python_code
stringlengths 0
992k
| repo_name
stringlengths 8
46
| file_path
stringlengths 5
162
|
---|---|---|
import hashlib
import os
import urllib
import warnings
from typing import Any, Union, List
from pkg_resources import packaging
import torch
from PIL import Image
from torchvision.transforms import Compose, Resize, CenterCrop, ToTensor, Normalize
from tqdm import tqdm
from .model import build_model
from .simple_tokenizer import SimpleTokenizer as _Tokenizer
try:
from torchvision.transforms import InterpolationMode
BICUBIC = InterpolationMode.BICUBIC
except ImportError:
BICUBIC = Image.BICUBIC
if packaging.version.parse(torch.__version__) < packaging.version.parse("1.7.1"):
warnings.warn("PyTorch version 1.7.1 or higher is recommended")
__all__ = ["available_models", "load", "tokenize"]
_tokenizer = _Tokenizer()
_MODELS = {
"ViT-B/32": "https://openaipublic.azureedge.net/clip/models/40d365715913c9da98579312b702a82c18be219cc2a73407c4526f58eba950af/ViT-B-32.pt",
"ViT-B/16": "https://openaipublic.azureedge.net/clip/models/5806e77cd80f8b59890b7e101eabd078d9fb84e6937f9e85e4ecb61988df416f/ViT-B-16.pt",
"ViT-L/14": "https://openaipublic.azureedge.net/clip/models/b8cca3fd41ae0c99ba7e8951adf17d267cdb84cd88be6f7c2e0eca1737a03836/ViT-L-14.pt",
}
def _download(url: str, root: str):
os.makedirs(root, exist_ok=True)
filename = os.path.basename(url)
expected_sha256 = url.split("/")[-2]
download_target = os.path.join(root, filename)
if os.path.exists(download_target) and not os.path.isfile(download_target):
raise RuntimeError(f"{download_target} exists and is not a regular file")
if os.path.isfile(download_target):
if hashlib.sha256(open(download_target, "rb").read()).hexdigest() == expected_sha256:
return download_target
else:
warnings.warn(f"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file")
with urllib.request.urlopen(url) as source, open(download_target, "wb") as output:
with tqdm(total=int(source.info().get("Content-Length")), ncols=80, unit='iB', unit_scale=True, unit_divisor=1024) as loop:
while True:
buffer = source.read(8192)
if not buffer:
break
output.write(buffer)
loop.update(len(buffer))
if hashlib.sha256(open(download_target, "rb").read()).hexdigest() != expected_sha256:
raise RuntimeError(f"Model has been downloaded but the SHA256 checksum does not not match")
return download_target
def _convert_image_to_rgb(image):
return image.convert("RGB")
def _transform(n_px):
return Compose([
Resize(n_px, interpolation=BICUBIC),
CenterCrop(n_px),
_convert_image_to_rgb,
ToTensor(),
Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)),
])
def available_models() -> List[str]:
"""Returns the names of available CLIP models"""
return list(_MODELS.keys())
def load(
name: str,
device: Union[str, torch.device] = "cuda" if torch.cuda.is_available() else "cpu",
jit: bool = False, download_root: str = None,
# evl
n_layers=12, n_dim=768, n_head=12, mlp_factor=4.0, drop_path_rate=0.,
mlp_dropout=[0.0, 0.0, 0.0, 0.0], cls_dropout=0.5, t_size=8, spatial_size=14,
use_t_conv=True, use_image_attnmap=True, use_t_pos_embed=True, dropout=0.0, no_pretrain=False, init_zero=True, mergeclip=False, mergeweight=0.5, use_capdecoder=False, clip_state_dict=None,
):
"""Load a CLIP model
Parameters
----------
name : str
A model name listed by `clip.available_models()`, or the path to a model checkpoint containing the state_dict
device : Union[str, torch.device]
The device to put the loaded model
jit : bool
Whether to load the optimized JIT model or more hackable non-JIT model (default).
download_root: str
path to download the model files; by default, it uses "~/.cache/clip"
Returns
-------
model : torch.nn.Module
The CLIP model
preprocess : Callable[[PIL.Image], torch.Tensor]
A torchvision transform that converts a PIL image into a tensor that the returned model can take as its input
"""
if name in _MODELS:
model_path = _download(_MODELS[name], download_root or os.path.expanduser("~/.cache/clip"))
elif os.path.isfile(name):
model_path = name
else:
raise RuntimeError(f"Model {name} not found; available models = {available_models()}")
'''
with open(model_path, 'rb') as opened_file:
try:
# loading JIT archive
model = torch.jit.load(opened_file, map_location=device if jit else "cpu").eval()
state_dict = None
except RuntimeError:
# loading saved state dict
if jit:
warnings.warn(f"File {model_path} is not a JIT archive. Loading as a state dict instead")
jit = False
state_dict = torch.load(model_path, map_location="cpu")
'''
init_state_dict = torch.load(model_path, map_location='cpu')['state_dict']
state_dict = {}
for k, v in init_state_dict.items():
k = k.replace('clip.','')
state_dict[k] = v
if not jit:
model = build_model(
state_dict or model.state_dict(),
n_layers=n_layers, n_dim=n_dim, n_head=n_head, mlp_factor=mlp_factor, drop_path_rate=drop_path_rate,
mlp_dropout=mlp_dropout, cls_dropout=cls_dropout, t_size=t_size, spatial_size=spatial_size,
use_t_conv=use_t_conv, use_image_attnmap=use_image_attnmap, use_t_pos_embed=use_t_pos_embed, no_pretrain=no_pretrain,
init_zero=init_zero, mergeclip=mergeclip, mergeweight=mergeweight, use_capdecoder=use_capdecoder, clip_state_dict=clip_state_dict,
).to(device)
if str(device) == "cpu":
model.float()
return model, _transform(model.visual.input_resolution)
# patch the device names
device_holder = torch.jit.trace(lambda: torch.ones([]).to(torch.device(device)), example_inputs=[])
device_node = [n for n in device_holder.graph.findAllNodes("prim::Constant") if "Device" in repr(n)][-1]
def patch_device(module):
try:
graphs = [module.graph] if hasattr(module, "graph") else []
except RuntimeError:
graphs = []
if hasattr(module, "forward1"):
graphs.append(module.forward1.graph)
for graph in graphs:
for node in graph.findAllNodes("prim::Constant"):
if "value" in node.attributeNames() and str(node["value"]).startswith("cuda"):
node.copyAttributes(device_node)
model.apply(patch_device)
patch_device(model.encode_image)
patch_device(model.encode_text)
# patch dtype to float32 on CPU
if str(device) == "cpu":
float_holder = torch.jit.trace(lambda: torch.ones([]).float(), example_inputs=[])
float_input = list(float_holder.graph.findNode("aten::to").inputs())[1]
float_node = float_input.node()
def patch_float(module):
try:
graphs = [module.graph] if hasattr(module, "graph") else []
except RuntimeError:
graphs = []
if hasattr(module, "forward1"):
graphs.append(module.forward1.graph)
for graph in graphs:
for node in graph.findAllNodes("aten::to"):
inputs = list(node.inputs())
for i in [1, 2]: # dtype can be the second or third argument to aten::to()
if inputs[i].node()["value"] == 5:
inputs[i].node().copyAttributes(float_node)
model.apply(patch_float)
patch_float(model.encode_image)
patch_float(model.encode_text)
model.float()
return model, _transform(model.input_resolution.item())
def tokenize(texts: Union[str, List[str]], context_length: int = 77, truncate: bool = False, return_special_tokens_mask: bool = False) -> Union[torch.IntTensor, torch.LongTensor, torch.BoolTensor]:
"""
Returns the tokenized representation of given input string(s)
Parameters
----------
texts : Union[str, List[str]]
An input string or a list of input strings to tokenize
context_length : int
The context length to use; all CLIP models use 77 as the context length
truncate: bool
Whether to truncate the text in case its encoding is longer than the context length
Returns
-------
A two-dimensional tensor containing the resulting tokens, shape = [number of input strings, context_length].
We return LongTensor when torch version is <1.8.0, since older index_select requires indices to be long.
"""
if isinstance(texts, str):
texts = [texts]
sot_token = _tokenizer.encoder["<|startoftext|>"]
eot_token = _tokenizer.encoder["<|endoftext|>"]
all_tokens = [[sot_token] + _tokenizer.encode(text) + [eot_token] for text in texts]
if packaging.version.parse(torch.__version__) < packaging.version.parse("1.8.0"):
result = torch.zeros(len(all_tokens), context_length, dtype=torch.long)
else:
result = torch.zeros(len(all_tokens), context_length, dtype=torch.int)
special_tokens_mask = torch.zeros(len(all_tokens), context_length, dtype=torch.bool)
for i, tokens in enumerate(all_tokens):
if len(tokens) > context_length:
if truncate:
tokens = tokens[:context_length]
tokens[-1] = eot_token
else:
raise RuntimeError(f"Input {texts[i]} is too long for context length {context_length}")
result[i, :len(tokens)] = torch.tensor(tokens)
special_tokens_mask[i, len(tokens):] = 1
if return_special_tokens_mask:
return result, special_tokens_mask
return result
| InternVideo-main | Downstream/Video-Text-Retrieval/modules/clip_evl/clip.py |
import gzip
import html
import os
from functools import lru_cache
import ftfy
import regex as re
@lru_cache()
def default_bpe():
return os.path.join(os.path.dirname(os.path.abspath(__file__)), "bpe_simple_vocab_16e6.txt.gz")
@lru_cache()
def bytes_to_unicode():
"""
Returns list of utf-8 byte and a corresponding list of unicode strings.
The reversible bpe codes work on unicode strings.
This means you need a large # of unicode characters in your vocab if you want to avoid UNKs.
When you're at something like a 10B token dataset you end up needing around 5K for decent coverage.
This is a signficant percentage of your normal, say, 32K bpe vocab.
To avoid that, we want lookup tables between utf-8 bytes and unicode strings.
And avoids mapping to whitespace/control characters the bpe code barfs on.
"""
bs = list(range(ord("!"), ord("~")+1))+list(range(ord("¡"), ord("¬")+1))+list(range(ord("®"), ord("ÿ")+1))
cs = bs[:]
n = 0
for b in range(2**8):
if b not in bs:
bs.append(b)
cs.append(2**8+n)
n += 1
cs = [chr(n) for n in cs]
return dict(zip(bs, cs))
def get_pairs(word):
"""Return set of symbol pairs in a word.
Word is represented as tuple of symbols (symbols being variable-length strings).
"""
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
return pairs
def basic_clean(text):
text = ftfy.fix_text(text)
text = html.unescape(html.unescape(text))
return text.strip()
def whitespace_clean(text):
text = re.sub(r'\s+', ' ', text)
text = text.strip()
return text
class SimpleTokenizer(object):
def __init__(self, bpe_path: str = default_bpe()):
self.byte_encoder = bytes_to_unicode()
self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
merges = gzip.open(bpe_path).read().decode("utf-8").split('\n')
merges = merges[1:49152-256-2+1]
merges = [tuple(merge.split()) for merge in merges]
vocab = list(bytes_to_unicode().values())
vocab = vocab + [v+'</w>' for v in vocab]
for merge in merges:
vocab.append(''.join(merge))
vocab.extend(['<|startoftext|>', '<|endoftext|>'])
self.encoder = dict(zip(vocab, range(len(vocab))))
self.decoder = {v: k for k, v in self.encoder.items()}
self.bpe_ranks = dict(zip(merges, range(len(merges))))
self.cache = {'<|startoftext|>': '<|startoftext|>', '<|endoftext|>': '<|endoftext|>'}
self.pat = re.compile(r"""<\|startoftext\|>|<\|endoftext\|>|'s|'t|'re|'ve|'m|'ll|'d|[\p{L}]+|[\p{N}]|[^\s\p{L}\p{N}]+""", re.IGNORECASE)
def bpe(self, token):
if token in self.cache:
return self.cache[token]
word = tuple(token[:-1]) + ( token[-1] + '</w>',)
pairs = get_pairs(word)
if not pairs:
return token+'</w>'
while True:
bigram = min(pairs, key = lambda pair: self.bpe_ranks.get(pair, float('inf')))
if bigram not in self.bpe_ranks:
break
first, second = bigram
new_word = []
i = 0
while i < len(word):
try:
j = word.index(first, i)
new_word.extend(word[i:j])
i = j
except:
new_word.extend(word[i:])
break
if word[i] == first and i < len(word)-1 and word[i+1] == second:
new_word.append(first+second)
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if len(word) == 1:
break
else:
pairs = get_pairs(word)
word = ' '.join(word)
self.cache[token] = word
return word
def encode(self, text):
bpe_tokens = []
text = whitespace_clean(basic_clean(text)).lower()
for token in re.findall(self.pat, text):
token = ''.join(self.byte_encoder[b] for b in token.encode('utf-8'))
bpe_tokens.extend(self.encoder[bpe_token] for bpe_token in self.bpe(token).split(' '))
return bpe_tokens
def decode(self, tokens):
text = ''.join([self.decoder[token] for token in tokens])
text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors="replace").replace('</w>', ' ')
return text
| InternVideo-main | Downstream/Video-Text-Retrieval/modules/clip_evl/simple_tokenizer.py |
#!/usr/bin/env python
import warnings
from typing import Tuple, Optional
import torch
from torch import Tensor
from torch.nn.modules.linear import Linear
from torch.nn.init import xavier_uniform_
from torch.nn.init import constant_
from torch.nn.init import xavier_normal_
from torch.nn.parameter import Parameter
from torch.nn.modules.module import Module
from .attention_module import multi_head_attention_forward
class _LinearWithBias(Linear):
bias: Tensor
def __init__(self, in_features: int, out_features: int) -> None:
super().__init__(in_features, out_features, bias=True)
class MultiheadAttention(Module):
r"""Allows the model to jointly attend to information
from different representation subspaces.
See reference: Attention Is All You Need
.. math::
\text{MultiHead}(Q, K, V) = \text{Concat}(head_1,\dots,head_h)W^O
\text{where} head_i = \text{Attention}(QW_i^Q, KW_i^K, VW_i^V)
Args:
embed_dim: total dimension of the model.
num_heads: parallel attention heads.
dropout: a Dropout layer on attn_output_weights. Default: 0.0.
bias: add bias as module parameter. Default: True.
add_bias_kv: add bias to the key and value sequences at dim=0.
add_zero_attn: add a new batch of zeros to the key and
value sequences at dim=1.
kdim: total number of features in key. Default: None.
vdim: total number of features in value. Default: None.
Note: if kdim and vdim are None, they will be set to embed_dim such that
query, key, and value have the same number of features.
Examples::
>>> multihead_attn = nn.MultiheadAttention(embed_dim, num_heads)
>>> attn_output, attn_output_weights = multihead_attn(query, key, value)
"""
bias_k: Optional[torch.Tensor]
bias_v: Optional[torch.Tensor]
def __init__(self, embed_dim, num_heads, dropout=0., bias=True, add_bias_kv=False, add_zero_attn=False, kdim=None, vdim=None):
super(MultiheadAttention, self).__init__()
self.embed_dim = embed_dim
self.kdim = kdim if kdim is not None else embed_dim
self.vdim = vdim if vdim is not None else embed_dim
self._qkv_same_embed_dim = self.kdim == embed_dim and self.vdim == embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads"
if self._qkv_same_embed_dim is False:
self.q_proj_weight = Parameter(torch.Tensor(embed_dim, embed_dim))
self.k_proj_weight = Parameter(torch.Tensor(embed_dim, self.kdim))
self.v_proj_weight = Parameter(torch.Tensor(embed_dim, self.vdim))
self.register_parameter('in_proj_weight', None)
else:
self.in_proj_weight = Parameter(torch.empty(3 * embed_dim, embed_dim))
self.register_parameter('q_proj_weight', None)
self.register_parameter('k_proj_weight', None)
self.register_parameter('v_proj_weight', None)
if bias:
self.in_proj_bias = Parameter(torch.empty(3 * embed_dim))
else:
self.register_parameter('in_proj_bias', None)
self.out_proj = _LinearWithBias(embed_dim, embed_dim)
if add_bias_kv:
self.bias_k = Parameter(torch.empty(1, 1, embed_dim))
self.bias_v = Parameter(torch.empty(1, 1, embed_dim))
else:
self.bias_k = self.bias_v = None
self.add_zero_attn = add_zero_attn
self._reset_parameters()
def _reset_parameters(self):
if self._qkv_same_embed_dim:
xavier_uniform_(self.in_proj_weight)
else:
xavier_uniform_(self.q_proj_weight)
xavier_uniform_(self.k_proj_weight)
xavier_uniform_(self.v_proj_weight)
if self.in_proj_bias is not None:
constant_(self.in_proj_bias, 0.)
constant_(self.out_proj.bias, 0.)
if self.bias_k is not None:
xavier_normal_(self.bias_k)
if self.bias_v is not None:
xavier_normal_(self.bias_v)
def __setstate__(self, state):
# Support loading old MultiheadAttention checkpoints generated by v1.1.0
if '_qkv_same_embed_dim' not in state:
state['_qkv_same_embed_dim'] = True
super(MultiheadAttention, self).__setstate__(state)
def forward(self, query, key, value, key_padding_mask=None,
need_weights=True, attn_mask=None, return_qk=False):
# type: (Tensor, Tensor, Tensor, Optional[Tensor], bool, Optional[Tensor], bool) -> Tuple[Tensor, Optional[Tensor]]
r"""
Args:
query, key, value: map a query and a set of key-value pairs to an output.
See "Attention Is All You Need" for more details.
key_padding_mask: if provided, specified padding elements in the key will
be ignored by the attention. When given a binary mask and a value is True,
the corresponding value on the attention layer will be ignored. When given
a byte mask and a value is non-zero, the corresponding value on the attention
layer will be ignored
need_weights: output attn_output_weights.
attn_mask: 2D or 3D mask that prevents attention to certain positions. A 2D mask will be broadcasted for all
the batches while a 3D mask allows to specify a different mask for the entries of each batch.
Shape:
- Inputs:
- query: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is
the embedding dimension.
- key: :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- value: :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- key_padding_mask: :math:`(N, S)` where N is the batch size, S is the source sequence length.
If a ByteTensor is provided, the non-zero positions will be ignored while the position
with the zero positions will be unchanged. If a BoolTensor is provided, the positions with the
value of ``True`` will be ignored while the position with the value of ``False`` will be unchanged.
- attn_mask: 2D mask :math:`(L, S)` where L is the target sequence length, S is the source sequence length.
3D mask :math:`(N*num_heads, L, S)` where N is the batch size, L is the target sequence length,
S is the source sequence length. attn_mask ensure that position i is allowed to attend the unmasked
positions. If a ByteTensor is provided, the non-zero positions are not allowed to attend
while the zero positions will be unchanged. If a BoolTensor is provided, positions with ``True``
is not allowed to attend while ``False`` values will be unchanged. If a FloatTensor
is provided, it will be added to the attention weight.
- return_qk: whether return Q and K.
- Outputs:
- attn_output: :math:`(L, N, E)` where L is the target sequence length, N is the batch size,
E is the embedding dimension.
- attn_output_weights: :math:`(N, L, S)` where N is the batch size,
L is the target sequence length, S is the source sequence length.
"""
if return_qk:
if not self._qkv_same_embed_dim:
q, k, attn_output, attn_output_weights = multi_head_attention_forward(
query, key, value, self.embed_dim, self.num_heads,
self.in_proj_weight, self.in_proj_bias,
self.bias_k, self.bias_v, self.add_zero_attn,
self.dropout, self.out_proj.weight, self.out_proj.bias,
training=self.training,
key_padding_mask=key_padding_mask, need_weights=need_weights,
attn_mask=attn_mask, use_separate_proj_weight=True,
q_proj_weight=self.q_proj_weight, k_proj_weight=self.k_proj_weight,
v_proj_weight=self.v_proj_weight, return_qk=True)
else:
q, k, attn_output, attn_output_weights = multi_head_attention_forward(
query, key, value, self.embed_dim, self.num_heads,
self.in_proj_weight, self.in_proj_bias,
self.bias_k, self.bias_v, self.add_zero_attn,
self.dropout, self.out_proj.weight, self.out_proj.bias,
training=self.training,
key_padding_mask=key_padding_mask, need_weights=need_weights,
attn_mask=attn_mask, return_qk=True)
return q, k, attn_output, attn_output_weights
else:
if not self._qkv_same_embed_dim:
return multi_head_attention_forward(
query, key, value, self.embed_dim, self.num_heads,
self.in_proj_weight, self.in_proj_bias,
self.bias_k, self.bias_v, self.add_zero_attn,
self.dropout, self.out_proj.weight, self.out_proj.bias,
training=self.training,
key_padding_mask=key_padding_mask, need_weights=need_weights,
attn_mask=attn_mask, use_separate_proj_weight=True,
q_proj_weight=self.q_proj_weight, k_proj_weight=self.k_proj_weight,
v_proj_weight=self.v_proj_weight)
else:
return multi_head_attention_forward(
query, key, value, self.embed_dim, self.num_heads,
self.in_proj_weight, self.in_proj_bias,
self.bias_k, self.bias_v, self.add_zero_attn,
self.dropout, self.out_proj.weight, self.out_proj.bias,
training=self.training,
key_padding_mask=key_padding_mask, need_weights=need_weights,
attn_mask=attn_mask)
| InternVideo-main | Downstream/Video-Text-Retrieval/modules/clip_evl/evl_utils/attention.py |
#!/usr/bin/env python
from collections import OrderedDict
from timm.models.layers import trunc_normal_, DropPath
import torch
import torch.nn as nn
import torch.nn.functional as F
class QuickGELU(nn.Module):
def forward(self, x: torch.Tensor):
return x * torch.sigmoid(1.702 * x)
def conv_1x1x1(inp, oup, groups=1):
return nn.Conv3d(inp, oup, (1, 1, 1), (1, 1, 1), (0, 0, 0), groups=groups)
def conv_3x3x3(inp, oup, groups=1):
return nn.Conv3d(inp, oup, (3, 3, 3), (1, 1, 1), (1, 1, 1), groups=groups)
def conv_1x3x3(inp, oup, groups=1):
return nn.Conv3d(inp, oup, (1, 3, 3), (1, 1, 1), (0, 1, 1), groups=groups)
def bn_3d(dim):
return nn.BatchNorm3d(dim)
class STM(nn.Module):
def __init__(self, n_dim, reduction=4):
super(STM, self).__init__()
reduced_c = n_dim // reduction
self.reduce = nn.Sequential(
nn.Conv2d(n_dim, reduced_c, kernel_size=1, bias=False),
nn.BatchNorm2d(reduced_c)
)
self.shift = nn.Conv2d(reduced_c, reduced_c, kernel_size=3, padding=1, groups=reduced_c, bias=False)
self.recover = nn.Sequential(
nn.Conv2d(reduced_c, n_dim, kernel_size=1, bias=False),
nn.BatchNorm2d(n_dim)
)
self.pad = (0, 0, 0, 0, 0, 0, 0, 1)
def forward(self, x):
# x: [L, N, T, C]
cls_token, x = x[:1], x[1:]
L, N, T, C = x.shape
H = W = int(L**0.5)
fea = x.permute(1, 2, 3, 0).reshape(N*T, C, H, W)
bottleneck = self.reduce(fea) # NT, C//r, H, W
# t feature
reshape_bottleneck = bottleneck.view((-1, T) + bottleneck.size()[1:]) # N, T, C//r, H, W
t_fea, __ = reshape_bottleneck.split([T-1, 1], dim=1) # N, T-1, C//r, H, W
# apply transformation conv to t+1 feature
conv_bottleneck = self.shift(bottleneck) # NT, C//r, H, W
# reshape fea: N, T, C//r, H, W
reshape_conv_bottleneck = conv_bottleneck.view((-1, T) + conv_bottleneck.size()[1:])
__, tPlusone_fea = reshape_conv_bottleneck.split([1, T-1], dim=1) # N, T-1, C//r, H, W
# motion fea = t+1_fea - t_fea
# pad the last timestamp
diff_fea = tPlusone_fea - t_fea # N, T-1, C//r, H, W
# pad = (0,0,0,0,0,0,0,1)
diff_fea_pluszero = F.pad(diff_fea, self.pad, mode="constant", value=0) # N, T, C//r, H, W
diff_fea_pluszero = diff_fea_pluszero.view((-1,) + diff_fea_pluszero.size()[2:]) # NT, C//r, H, W
y = self.recover(diff_fea_pluszero) # NT, C, H, W
# reshape
y = y.reshape(N, T, C, L).permute(3, 0, 1, 2)
y = torch.cat([cls_token, y], dim=0)
return y
class DSTM(nn.Module):
def __init__(self, n_dim, reduction=4):
super(DSTM, self).__init__()
reduced_c = n_dim // reduction
self.reduce = nn.Sequential(
nn.Conv2d(n_dim, reduced_c, kernel_size=1, bias=False),
nn.BatchNorm2d(reduced_c)
)
# DW(T+1) - T
self.shift_pre = nn.Conv2d(reduced_c, reduced_c, kernel_size=3, padding=1, groups=reduced_c, bias=False)
self.recover_pre = nn.Sequential(
nn.Conv2d(reduced_c, n_dim, kernel_size=1, bias=False),
nn.BatchNorm2d(n_dim)
)
self.pad_pre = (0, 0, 0, 0, 0, 0, 0, 1)
# DW(T-1) - T
self.shift_back = nn.Conv2d(reduced_c, reduced_c, kernel_size=3, padding=1, groups=reduced_c, bias=False)
self.recover_back = nn.Sequential(
nn.Conv2d(reduced_c, n_dim, kernel_size=1, bias=False),
nn.BatchNorm2d(n_dim)
)
self.pad_back = (0, 0, 0, 0, 0, 0, 0, 1)
def forward(self, x):
# x: [L, N, T, C]
cls_token, x = x[:1], x[1:]
L, N, T, C = x.shape
H = W = int(L**0.5)
fea = x.permute(1, 2, 3, 0).reshape(N*T, C, H, W)
bottleneck = self.reduce(fea) # NT, C//r, H, W
# t feature
reshape_bottleneck = bottleneck.view((-1, T) + bottleneck.size()[1:]) # N, T, C//r, H, W
pre_t_fea, __ = reshape_bottleneck.split([T-1, 1], dim=1) # N, T-1, C//r, H, W
back_t_fea, __ = reshape_bottleneck.split([1, T-1], dim=1) # N, T-1, C//r, H, W
# apply transformation conv to t+1/t-1 feature
pre_conv_bottleneck = self.shift_pre(bottleneck) # NT, C//r, H, W
back_conv_bottleneck = self.shift_back(bottleneck) # NT, C//r, H, W
# reshape fea: N, T, C//r, H, W
pre_reshape_conv_bottleneck = pre_conv_bottleneck.view((-1, T) + pre_conv_bottleneck.size()[1:])
back_reshape_conv_bottleneck = back_conv_bottleneck.view((-1, T) + back_conv_bottleneck.size()[1:])
__, tPlusone_fea = pre_reshape_conv_bottleneck.split([1, T-1], dim=1) # N, T-1, C//r, H, W
tMinusone_fea, _ = back_reshape_conv_bottleneck.split([T-1, 1], dim=1) # N, T-1, C//r, H, W
# pre_fea = t+1_fea - t_fea
# back_fea = t-1_fea - t_fea
pre_diff_fea = tPlusone_fea - pre_t_fea # N, T-1, C//r, H, W
back_diff_fea = tMinusone_fea - back_t_fea # N, T-1, C//r, H, W
# pad the last/first timestamp
pre_diff_fea_pluszero = F.pad(pre_diff_fea, self.pad_pre, mode="constant", value=0) # N, T, C//r, H, W
pre_diff_fea_pluszero = pre_diff_fea_pluszero.view((-1,) + pre_diff_fea_pluszero.size()[2:]) # NT, C//r, H, W
back_diff_fea_pluszero = F.pad(back_diff_fea, self.pad_back, mode="constant", value=0) # N, T, C//r, H, W
back_diff_fea_pluszero = back_diff_fea_pluszero.view((-1,) + back_diff_fea_pluszero.size()[2:]) # NT, C//r, H, W
# recover channel
pre_y = self.recover_pre(pre_diff_fea_pluszero) # NT, C, H, W
back_y = self.recover_back(back_diff_fea_pluszero) # NT, C, H, W
# reshape
y = (pre_y + back_y).reshape(N, T, C, L).permute(3, 0, 1, 2)
# cat cls_token
y = torch.cat([cls_token, y], dim=0)
return y
class TDN(nn.Module):
def __init__(self, channel, n_segment=8, index=1, reduction=4):
super(TDN, self).__init__()
self.channel = channel
self.reduction = reduction
self.n_segment = n_segment
self.stride = 2**(index-1)
self.conv1 = nn.Conv2d(in_channels=self.channel,
out_channels=self.channel//self.reduction,
kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(num_features=self.channel//self.reduction)
self.conv2 = nn.Conv2d(in_channels=self.channel//self.reduction,
out_channels=self.channel//self.reduction,
kernel_size=3, padding=1, groups=self.channel//self.reduction, bias=False)
self.avg_pool_forward2 = nn.AvgPool2d(kernel_size=2, stride=2)
self.avg_pool_forward4 = nn.AvgPool2d(kernel_size=4, stride=4)
self.sigmoid_forward = nn.Sigmoid()
self.avg_pool_backward2 = nn.AvgPool2d(kernel_size=2, stride=2)#nn.AdaptiveMaxPool2d(1)
self.avg_pool_backward4 = nn.AvgPool2d(kernel_size=4, stride=4)
self.sigmoid_backward = nn.Sigmoid()
self.pad1_forward = (0, 0, 0, 0, 0, 0, 0, 1)
self.pad1_backward = (0, 0, 0, 0, 0, 0, 1, 0)
self.conv3 = nn.Conv2d(in_channels=self.channel//self.reduction,
out_channels=self.channel, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(num_features=self.channel)
self.conv3_smallscale2 = nn.Conv2d(in_channels=self.channel//self.reduction,
out_channels=self.channel//self.reduction,padding=1, kernel_size=3, bias=False)
self.bn3_smallscale2 = nn.BatchNorm2d(num_features=self.channel//self.reduction)
self.conv3_smallscale4 = nn.Conv2d(in_channels = self.channel//self.reduction,
out_channels=self.channel//self.reduction,padding=1, kernel_size=3, bias=False)
self.bn3_smallscale4 = nn.BatchNorm2d(num_features=self.channel//self.reduction)
def spatial_pool(self, x):
nt, channel, height, width = x.size()
input_x = x
# [N, C, H * W]
input_x = input_x.view(nt, channel, height * width)
# [N, 1, C, H * W]
input_x = input_x.unsqueeze(1)
# [N, 1, H, W]
context_mask = self.conv_mask(x)
# [N, 1, H * W]
context_mask = context_mask.view(nt, 1, height * width)
# [N, 1, H * W]
context_mask = self.softmax(context_mask)
context_mask = context_mask.view(nt,1,height,width)
return context_mask
def forward(self, x):
# x: [L, N, T, C]
cls_token, x = x[:1], x[1:]
L, N, T, C = x.shape
H = W = int(L**0.5)
fea = x.permute(1, 2, 3, 0).reshape(N*T, C, H, W)
bottleneck = self.conv1(fea) # nt, c//r, h, w
bottleneck = self.bn1(bottleneck) # nt, c//r, h, w
reshape_bottleneck = bottleneck.view((-1, self.n_segment) + bottleneck.size()[1:]) # n, t, c//r, h, w
t_fea_forward, _ = reshape_bottleneck.split([self.n_segment -1, 1], dim=1) # n, t-1, c//r, h, w
_, t_fea_backward = reshape_bottleneck.split([1, self.n_segment -1], dim=1) # n, t-1, c//r, h, w
conv_bottleneck = self.conv2(bottleneck) # nt, c//r, h, w
reshape_conv_bottleneck = conv_bottleneck.view((-1, self.n_segment) + conv_bottleneck.size()[1:]) # n, t, c//r, h, w
_, tPlusone_fea_forward = reshape_conv_bottleneck.split([1, self.n_segment-1], dim=1) # n, t-1, c//r, h, w
tPlusone_fea_backward ,_ = reshape_conv_bottleneck.split([self.n_segment-1, 1], dim=1) # n, t-1, c//r, h, w
diff_fea_forward = tPlusone_fea_forward - t_fea_forward # n, t-1, c//r, h, w
diff_fea_backward = tPlusone_fea_backward - t_fea_backward# n, t-1, c//r, h, w
diff_fea_pluszero_forward = F.pad(diff_fea_forward, self.pad1_forward, mode="constant", value=0) # n, t, c//r, h, w
diff_fea_pluszero_forward = diff_fea_pluszero_forward.view((-1,) + diff_fea_pluszero_forward.size()[2:]) #nt, c//r, h, w
diff_fea_pluszero_backward = F.pad(diff_fea_backward, self.pad1_backward, mode="constant", value=0) # n, t, c//r, h, w
diff_fea_pluszero_backward = diff_fea_pluszero_backward.view((-1,) + diff_fea_pluszero_backward.size()[2:]) #nt, c//r, h, w
y_forward_smallscale2 = self.avg_pool_forward2(diff_fea_pluszero_forward) # nt, c//r, 1, 1
y_backward_smallscale2 = self.avg_pool_backward2(diff_fea_pluszero_backward) # nt, c//r, 1, 1
y_forward_smallscale4 = diff_fea_pluszero_forward
y_backward_smallscale4 = diff_fea_pluszero_backward
y_forward_smallscale2 = self.bn3_smallscale2(self.conv3_smallscale2(y_forward_smallscale2))
y_backward_smallscale2 = self.bn3_smallscale2(self.conv3_smallscale2(y_backward_smallscale2))
y_forward_smallscale4 = self.bn3_smallscale4(self.conv3_smallscale4(y_forward_smallscale4))
y_backward_smallscale4 = self.bn3_smallscale4(self.conv3_smallscale4(y_backward_smallscale4))
y_forward_smallscale2 = F.interpolate(y_forward_smallscale2, diff_fea_pluszero_forward.size()[2:])
y_backward_smallscale2 = F.interpolate(y_backward_smallscale2, diff_fea_pluszero_backward.size()[2:])
y_forward = self.bn3(self.conv3(1.0/3.0*diff_fea_pluszero_forward + 1.0/3.0*y_forward_smallscale2 + 1.0/3.0*y_forward_smallscale4))# nt, c, 1, 1
y_backward = self.bn3(self.conv3(1.0/3.0*diff_fea_pluszero_backward + 1.0/3.0*y_backward_smallscale2 + 1.0/3.0*y_backward_smallscale4)) # nt, c, 1, 1
y_forward = self.sigmoid_forward(y_forward) - 0.5
y_backward = self.sigmoid_backward(y_backward) - 0.5
y = 0.5 * y_forward + 0.5 * y_backward
attn = fea * y
x = x + attn.reshape(N, T, C, L).permute(3, 0, 1, 2)
x = torch.cat([cls_token, x], dim=0)
return x
class CMlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = conv_1x1x1(in_features, hidden_features)
self.act = act_layer()
self.fc2 = conv_1x1x1(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class CBlock(nn.Module):
def __init__(self, dim, mlp_ratio=4., dropout=0., drop_path=0., uni_type='3d', add_ffn=True):
super().__init__()
self.norm1 = bn_3d(dim)
self.conv1 = conv_1x1x1(dim, dim, 1)
self.conv2 = conv_1x1x1(dim, dim, 1)
if uni_type == '3d':
print('Use 3d conv for local MHRA')
self.attn = conv_3x3x3(dim, dim, groups=dim)
else:
print('Use 2d conv for local MHRA')
self.attn = conv_1x3x3(dim, dim, groups=dim)
# NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.add_ffn = add_ffn
if add_ffn:
print('Add FFN in local MHRA')
self.norm2 = bn_3d(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = CMlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=nn.GELU, drop=dropout)
print('Init zero')
nn.init.constant_(self.conv2.weight, 0.)
nn.init.constant_(self.conv2.bias, 0.)
if add_ffn:
nn.init.constant_(self.mlp.fc2.weight, 0.)
nn.init.constant_(self.mlp.fc2.bias, 0.)
def forward(self, x):
x = x + self.drop_path(self.conv2(self.attn(self.conv1(self.norm1(x)))))
if self.add_ffn:
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
class ResidualDecoderBlock(nn.Module):
def __init__(self, d_model: int, n_head: int, attn_mask: torch.Tensor = None,
mlp_factor: float = 4.0, dropout: float = 0.0, drop_path: float = 0.0):
super().__init__()
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
print(f'Drop path rate: {drop_path}')
self.attn = nn.MultiheadAttention(d_model, n_head)
self.ln_1 = nn.LayerNorm(d_model)
d_mlp = round(mlp_factor * d_model)
self.mlp = nn.Sequential(OrderedDict([
("c_fc", nn.Linear(d_model, d_mlp)),
("gelu", QuickGELU()),
("dropout", nn.Dropout(dropout)),
("c_proj", nn.Linear(d_mlp, d_model))
]))
self.ln_2 = nn.LayerNorm(d_model)
self.ln_3 = nn.LayerNorm(d_model)
self.attn_mask = attn_mask
nn.init.xavier_uniform_(self.attn.in_proj_weight)
# nn.init.xavier_uniform_(self.attn.out_proj.weight)
nn.init.constant_(self.attn.out_proj.weight, 0.)
nn.init.constant_(self.attn.out_proj.bias, 0.)
nn.init.xavier_uniform_(self.mlp[0].weight)
# nn.init.xavier_uniform_(self.mlp[-1].weight)
nn.init.constant_(self.mlp[-1].weight, 0.)
nn.init.constant_(self.mlp[-1].bias, 0.)
def attention(self, x: torch.Tensor, y: torch.Tensor):
#self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None
# return self.attn(x, y, y, need_weights=False, attn_mask=self.attn_mask)[0]
assert self.attn_mask is None # not implemented
# manual forward to add position information
d_model = self.ln_1.weight.size(0)
q = (x @ self.attn.in_proj_weight[:d_model].T) + self.attn.in_proj_bias[:d_model]
k = (y @ self.attn.in_proj_weight[d_model:-d_model].T) + self.attn.in_proj_bias[d_model:-d_model]
v = (y @ self.attn.in_proj_weight[-d_model:].T) + self.attn.in_proj_bias[-d_model:]
Tx, Ty, N = q.size(0), k.size(0), q.size(1)
q = q.view(Tx, N, self.attn.num_heads, self.attn.head_dim).permute(1, 2, 0, 3)
k = k.view(Ty, N, self.attn.num_heads, self.attn.head_dim).permute(1, 2, 0, 3)
v = v.view(Ty, N, self.attn.num_heads, self.attn.head_dim).permute(1, 2, 0, 3)
aff = (q @ k.transpose(-2, -1) / (self.attn.head_dim ** 0.5))
aff = aff.softmax(dim=-1)
out = aff @ v
out = out.permute(2, 0, 1, 3).flatten(2)
out = self.attn.out_proj(out)
return out
def forward(self, x: torch.Tensor, y: torch.Tensor):
x = x + self.drop_path(self.attention(self.ln_1(x), self.ln_3(y)))
x = x + self.drop_path(self.mlp(self.ln_2(x)))
return x
class TransformerDecoder_uniformer_diff_conv_balance(nn.Module):
def __init__(self, n_layers=4,
uni_layer=4, uni_type='3d', add_ffn=True, t_conv_type='1d', pre_prompt=True,
n_dim=768, n_head=12, mlp_factor=4.0, drop_path_rate=0.,
mlp_dropout=[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5], cls_dropout=0.5, t_size=8, spatial_size=14,
balance=0.,
use_t_conv=True, after_me=True, before_me=False, me_type='dstm', me_reduction=4,
use_t_pos_embed=True, num_classes=400):
super().__init__()
n_layers += uni_layer
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, n_layers)]
self.uni_layer = uni_layer
self.uni_dec = nn.ModuleList([
CBlock(n_dim, mlp_ratio=mlp_factor, dropout=mlp_dropout[i], drop_path=dpr[i], uni_type=uni_type, add_ffn=add_ffn)
for i in range(uni_layer)
])
self.dec = nn.ModuleList([
ResidualDecoderBlock(n_dim, n_head, mlp_factor=mlp_factor, dropout=mlp_dropout[i], drop_path=dpr[i])
for i in range(n_layers)
])
self.proj = nn.Sequential(
nn.LayerNorm(n_dim),
nn.Dropout(cls_dropout),
nn.Linear(n_dim, num_classes),
)
self.pre_prompt = pre_prompt
if pre_prompt:
print('Add pre prompt')
self.pre_temporal_cls_token = nn.Parameter(torch.zeros(n_dim))
self.temporal_cls_token = nn.Parameter(torch.zeros(n_dim))
if use_t_conv:
self.t_conv_type = t_conv_type
if t_conv_type == '1d':
print('Use 1d t_conv for CPE')
self.tconv = nn.ModuleList([
nn.Conv1d(n_dim, n_dim, kernel_size=3, stride=1, padding=1, bias=True, groups=n_dim)
for i in range(n_layers)
])
for m in self.tconv:
nn.init.constant_(m.bias, 0.)
m.weight.data[...] = torch.Tensor([0, 1, 0])
else:
print('Use 3d t_conv for CPE')
self.tconv = nn.ModuleList([
nn.Conv3d(n_dim, n_dim, kernel_size=3, stride=1, padding=1, bias=True, groups=n_dim)
for i in range(n_layers)
])
for m in self.tconv:
nn.init.constant_(m.bias, 0.)
else:
self.tconv = None
self.before_me = before_me
self.after_me = after_me
if before_me or after_me:
assert before_me != after_me
print(f'Use {me_type} attention, Before {before_me}, After {after_me}')
if me_type == 'stm':
me_op = STM
elif me_type == 'dstm':
me_op = DSTM
elif me_type == 'tdn':
me_op = TDN
self.me = nn.ModuleList([me_op(n_dim, reduction=me_reduction) for i in range(n_layers)])
if use_t_pos_embed:
self.pemb_t = nn.Parameter(torch.zeros([n_layers, t_size, n_dim]))
else:
self.pemb_t = None
print(F'Balnce weight {balance}')
self.balance = nn.Parameter(torch.ones((n_dim)) * balance)
self.sigmoid = nn.Sigmoid()
def forward(self, clip_feats_all, mode='video'):
# clip_feats_all = clip_feats_all[-len(self.dec):]
# only return n_layers features, save memory
clip_feats = [x for x in clip_feats_all]
if self.after_me:
origin_clip_feats = [x for x in clip_feats_all]
L, N, T, C = clip_feats[0].size()
x = self.temporal_cls_token.view(1, 1, -1).repeat(1, N, 1)
for i in range(len(clip_feats)):
if self.before_me:
# contain residual
clip_feats[i] = self.me[i](clip_feats[i])
if self.tconv is not None:
L, N, T, C = clip_feats[i].shape
if self.t_conv_type == '1d':
clip_feats[i] = clip_feats[i].permute(0, 1, 3, 2).flatten(0, 1) # L * N, C, T
clip_feats[i] = self.tconv[i](clip_feats[i]).permute(0, 2, 1).contiguous().view(L, N, T, C)
else:
H = W = int((L - 1) ** 0.5)
_, tmp_feats = clip_feats[i][:1], clip_feats[i][1:]
tmp_feats = tmp_feats.permute(1, 3, 2, 0).reshape(N, C, T, H, W)
tmp_feats = self.tconv[i](tmp_feats).view(N, C, T, L - 1).permute(3, 0, 2, 1)
clip_feats[i][1:] = clip_feats[i][1:] + tmp_feats
if self.pemb_t is not None and mode == 'video':
clip_feats[i] = clip_feats[i] + self.pemb_t[i]
if self.after_me:
clip_feats[i] = clip_feats[i] + self.me[i](origin_clip_feats[i])
if i < self.uni_layer:
# L, N, T, C
L, N, T, C = clip_feats[i].shape
H = W = int((L - 1) ** 0.5)
_, tmp_feats = clip_feats[i][:1], clip_feats[i][1:]
tmp_feats = tmp_feats.permute(1, 3, 2, 0).reshape(N, C, T, H, W)
tmp_feats = self.uni_dec[i](tmp_feats).view(N, C, T, L - 1).permute(3, 0, 2, 1)
clip_feats[i][1:] = clip_feats[i][1:] + tmp_feats
clip_feats[i] = clip_feats[i].permute(2, 0, 1, 3).flatten(0, 1) # T * L, N, C
if self.pre_prompt:
pre_x = self.pre_temporal_cls_token.view(1, 1, -1).repeat(1, N, 1)
for i in range(len(self.dec)):
if i < self.uni_layer:
pre_x = self.dec[i](pre_x, clip_feats[i])
elif i == self.uni_layer:
clip_feats[i] = torch.cat([pre_x, clip_feats[i]], dim=0)
x = self.dec[i](x, clip_feats[i])
else:
x = self.dec[i](x, clip_feats[i])
else:
for i in range(len(self.dec)):
x = self.dec[i](x, clip_feats[i])
# real residual
# L, N, T, C
residual = clip_feats_all[-1][0].mean(1)
weight = self.sigmoid(self.balance)
return self.proj((1 - weight) * x[0, :, :] + weight * residual)
if __name__ == '__main__':
model = TransformerDecoder_uniformer_diff_conv_balance()
# construct a fake input to demonstrate input tensor shape
L, N, T, C = 197, 1, 8, 768 # num_image_tokens, video_batch_size, t_size, feature_dim
# we use intermediate feature maps from multiple blocks, so input features should be a list
input_features = []
for i in range(8): # vit-b has 12 blocks
# every item in input_features contains features maps from a single block
# every item is a tuple containing 3 feature maps:
# (1) block output features (i.e. after mlp) with shape L, N, T, C
# (2) projected query features with shape L, N, T, C
# (3) projected key features with shape L, N, T, C
input_features.append(
tuple(torch.zeros([L, N, T, C]) for _ in range(3)))
# some small optimizations:
# (1) We only decode from the last $n$ blocks so it's good as long as the last $n$ items of input_features is valid and all previous items can be filled with None to save memory. By default $n=4$.
# (2) projected query/key features are optional. If you are using an uncompatible image backbone without query/key (e.g. CNN), you can fill the position with None (i.e. the tuple should be (Tensor, None, None) and set use_image_attnmap=False when constructing the model.
print(model)
print(model(input_features).shape) # should be N, 400
| InternVideo-main | Downstream/Video-Text-Retrieval/modules/clip_evl/evl_utils/evl_module_uniformer_diff_conv_balance.py |
#!/usr/bin/env python
import os
from collections import OrderedDict
from timm.models.layers import DropPath
import torch
from torch import nn
import torch.nn.functional as F
import torch.utils.checkpoint as checkpoint
from .attention import MultiheadAttention
import logging
logger = logging.getLogger(__name__)
MODEL_PATH = '/mnt/lustre/share_data/likunchang.vendor/model'
_MODELS = {
"ViT-B/32": os.path.join(MODEL_PATH, "vit_b32.pth"),
"ViT-B/16": os.path.join(MODEL_PATH, "vit_b16.pth"),
"ViT-L/14": os.path.join(MODEL_PATH, "vit_l14.pth"),
"ViT-L/14_336": os.path.join(MODEL_PATH, "vit_l14_336.pth"),
}
def conv_1x1x1(inp, oup, groups=1):
return nn.Conv3d(inp, oup, (1, 1, 1), (1, 1, 1), (0, 0, 0), groups=groups)
def conv_3x3x3(inp, oup, groups=1):
return nn.Conv3d(inp, oup, (3, 3, 3), (1, 1, 1), (1, 1, 1), groups=groups)
def conv_1x3x3(inp, oup, groups=1):
return nn.Conv3d(inp, oup, (1, 3, 3), (1, 1, 1), (0, 1, 1), groups=groups)
def bn_3d(dim):
return nn.BatchNorm3d(dim)
class LayerNorm(nn.LayerNorm):
"""Subclass torch's LayerNorm to handle fp16."""
def forward(self, x):
orig_type = x.dtype
ret = super().forward(x.type(torch.float32))
return ret.type(orig_type)
class QuickGELU(nn.Module):
def forward(self, x):
return x * torch.sigmoid(1.702 * x)
class ResidualAttentionBlock(nn.Module):
def __init__(
self, d_model, n_head, attn_mask=None, drop_path=0.0,
):
super().__init__()
self.n_head = n_head
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
logger.info(f'Drop path rate: {drop_path}')
# spatial
self.attn = MultiheadAttention(d_model, n_head)
self.ln_1 = LayerNorm(d_model)
self.mlp = nn.Sequential(OrderedDict([
("c_fc", nn.Linear(d_model, d_model * 4)),
("gelu", QuickGELU()),
("c_proj", nn.Linear(d_model * 4, d_model))
]))
self.ln_2 = LayerNorm(d_model)
self.attn_mask = attn_mask
def attention(self, x):
self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None
return self.attn(x, x, x, need_weights=False, attn_mask=self.attn_mask)[0]
def forward(self, x, T=8, use_checkpoint=False):
# x: 1+HW, NT, C
# MHSA
if use_checkpoint:
attn_out = checkpoint.checkpoint(self.attention, self.ln_1(x))
x = x + self.drop_path(attn_out)
else:
x = x + self.drop_path(self.attention(self.ln_1(x)))
# FFN
if use_checkpoint:
mlp_out = checkpoint.checkpoint(self.mlp, self.ln_2(x))
x = x + self.drop_path(mlp_out)
else:
x = x + self.drop_path(self.mlp(self.ln_2(x)))
return x
class Extractor(nn.Module):
def __init__(
self, d_model, n_head, attn_mask=None,
mlp_factor=4.0, dropout=0.0, drop_path=0.0,
):
super().__init__()
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
logger.info(f'Drop path rate: {drop_path}')
self.attn = nn.MultiheadAttention(d_model, n_head)
self.ln_1 = nn.LayerNorm(d_model)
d_mlp = round(mlp_factor * d_model)
self.mlp = nn.Sequential(OrderedDict([
("c_fc", nn.Linear(d_model, d_mlp)),
("gelu", QuickGELU()),
("dropout", nn.Dropout(dropout)),
("c_proj", nn.Linear(d_mlp, d_model))
]))
self.ln_2 = nn.LayerNorm(d_model)
self.ln_3 = nn.LayerNorm(d_model)
self.attn_mask = attn_mask
# zero init
nn.init.xavier_uniform_(self.attn.in_proj_weight)
nn.init.constant_(self.attn.out_proj.weight, 0.)
nn.init.constant_(self.attn.out_proj.bias, 0.)
nn.init.xavier_uniform_(self.mlp[0].weight)
nn.init.constant_(self.mlp[-1].weight, 0.)
nn.init.constant_(self.mlp[-1].bias, 0.)
def attention(self, x: torch.Tensor, y: torch.Tensor):
#self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None
# return self.attn(x, y, y, need_weights=False, attn_mask=self.attn_mask)[0]
assert self.attn_mask is None # not implemented
# manual forward to add position information
d_model = self.ln_1.weight.size(0)
q = (x @ self.attn.in_proj_weight[:d_model].T) + self.attn.in_proj_bias[:d_model]
k = (y @ self.attn.in_proj_weight[d_model:-d_model].T) + self.attn.in_proj_bias[d_model:-d_model]
v = (y @ self.attn.in_proj_weight[-d_model:].T) + self.attn.in_proj_bias[-d_model:]
Tx, Ty, N = q.size(0), k.size(0), q.size(1)
q = q.view(Tx, N, self.attn.num_heads, self.attn.head_dim).permute(1, 2, 0, 3)
k = k.view(Ty, N, self.attn.num_heads, self.attn.head_dim).permute(1, 2, 0, 3)
v = v.view(Ty, N, self.attn.num_heads, self.attn.head_dim).permute(1, 2, 0, 3)
aff = (q @ k.transpose(-2, -1) / (self.attn.head_dim ** 0.5))
aff = aff.softmax(dim=-1)
out = aff @ v
out = out.permute(2, 0, 1, 3).flatten(2)
out = self.attn.out_proj(out)
return out
def forward(self, x: torch.Tensor, y: torch.Tensor):
x = x + self.drop_path(self.attention(self.ln_1(x), self.ln_3(y)))
x = x + self.drop_path(self.mlp(self.ln_2(x)))
return x
class Transformer(nn.Module):
def __init__(
self, width, layers, heads, attn_mask=None, backbone_drop_path_rate=0.,
use_checkpoint=False, checkpoint_num=[0], t_size=8,
return_list=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11],
n_layers=12, n_dim=768, n_head=12, mlp_factor=4.0, drop_path_rate=0.,
mlp_dropout=[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5],
cls_dropout=0.5, num_classes=400,
):
super().__init__()
self.T = t_size
self.return_list = return_list
# Backbone
b_dpr = [x.item() for x in torch.linspace(0, backbone_drop_path_rate, layers)]
self.resblocks = nn.ModuleList([
ResidualAttentionBlock(
width, heads, attn_mask,
drop_path=b_dpr[i],
) for i in range(layers)
])
# checkpoint
self.use_checkpoint = use_checkpoint
self.checkpoint_num = checkpoint_num
logger.info(f'Use checkpoint: {self.use_checkpoint}')
logger.info(f'Checkpoint number: {self.checkpoint_num}')
# Extractor
assert n_layers == len(return_list)
self.temporal_cls_token = nn.Parameter(torch.zeros(1, 1, n_dim))
self.dpe = nn.ModuleList([
nn.Conv3d(n_dim, n_dim, kernel_size=3, stride=1, padding=1, bias=True, groups=n_dim)
for i in range(n_layers)
])
for m in self.dpe:
nn.init.constant_(m.bias, 0.)
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, n_layers)]
self.dec = nn.ModuleList([
Extractor(
n_dim, n_head, mlp_factor=mlp_factor,
dropout=mlp_dropout[i], drop_path=dpr[i],
) for i in range(n_layers)
])
# # projection
# self.proj = nn.Sequential(
# nn.LayerNorm(n_dim),
# nn.Dropout(cls_dropout),
# nn.Linear(n_dim, num_classes),
# )
self.balance = nn.Parameter(torch.zeros((n_dim)))
self.sigmoid = nn.Sigmoid()
def forward(self, x, mode='video', return_all_feats=False):
if mode == 'video':
T_down = self.T
else:
T_down = 1
L, NT, C = x.shape
N = NT // T_down
H = W = int((L - 1) ** 0.5)
cls_token = self.temporal_cls_token.repeat(1, N, 1)
j = -1
for i, resblock in enumerate(self.resblocks):
if self.use_checkpoint and i < self.checkpoint_num[0]:
x = resblock(x, self.T, use_checkpoint=True)
else:
x = resblock(x, T_down)
if i in self.return_list:
j += 1
tmp_x = x.clone()
tmp_x = tmp_x.view(L, N, T_down, C)
# dpe
_, tmp_feats = tmp_x[:1], tmp_x[1:]
tmp_feats = tmp_feats.permute(1, 3, 2, 0).reshape(N, C, T_down, H, W)
tmp_feats = self.dpe[j](tmp_feats).view(N, C, T_down, L - 1).permute(3, 0, 2, 1)
tmp_x[1:] = tmp_x[1:] + tmp_feats
# enhancer
tmp_x = tmp_x.permute(2, 0, 1, 3).flatten(0, 1) # T * L, N, C
cls_token = self.dec[j](cls_token, tmp_x)
weight = self.sigmoid(self.balance)
residual = x.view(L, N, T_down, C)[0].mean(1) # L, N, T, C
# return self.proj((1 - weight) * cls_token[0, :, :] + weight * residual)
feats = (1 - weight) * cls_token[0, :, :] + weight * residual
if return_all_feats:
return feats, x.view(L, N, T_down, C)
return feats
class VisionTransformer(nn.Module):
def __init__(
self,
# backbone
input_resolution, patch_size, width, layers, heads, output_dim, backbone_drop_path_rate=0.,
use_checkpoint=False, checkpoint_num=[0], t_size=8,
# extractor
return_list=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11],
n_layers=12, n_dim=768, n_head=12, mlp_factor=4.0, drop_path_rate=0.,
mlp_dropout=[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5],
cls_dropout=0.5, num_classes=400,
):
super().__init__()
self.input_resolution = input_resolution
self.output_dim = output_dim
self.conv1 = nn.Conv3d(3, width, (1, patch_size, patch_size), (1, patch_size, patch_size), (0, 0, 0), bias=False)
scale = width ** -0.5
self.class_embedding = nn.Parameter(scale * torch.randn(width))
self.positional_embedding = nn.Parameter(scale * torch.randn((input_resolution // patch_size) ** 2 + 1, width))
self.ln_pre = LayerNorm(width)
self.transformer = Transformer(
width, layers, heads,
backbone_drop_path_rate=backbone_drop_path_rate,
use_checkpoint=use_checkpoint, checkpoint_num=checkpoint_num, t_size=t_size,
return_list=return_list, n_layers=n_layers, n_dim=n_dim, n_head=n_head,
mlp_factor=mlp_factor, drop_path_rate=drop_path_rate, mlp_dropout=mlp_dropout,
cls_dropout=cls_dropout, num_classes=num_classes,
)
def forward(self, x, mode='video', return_all_feats=False):
x = self.conv1(x) # shape = [*, width, grid, grid]
N, C, T, H, W = x.shape
x = x.permute(0, 2, 3, 4, 1).reshape(N * T, H * W, C)
x = torch.cat([self.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device), x], dim=1) # shape = [*, grid ** 2 + 1, width]
x = x + self.positional_embedding.to(x.dtype)
x = self.ln_pre(x)
x = x.permute(1, 0, 2) # NLD -> LND
out = self.transformer(x, mode=mode, return_all_feats=return_all_feats)
return out
def inflate_weight(weight_2d, time_dim, center=True):
if center:
weight_3d = torch.zeros(*weight_2d.shape)
weight_3d = weight_3d.unsqueeze(2).repeat(1, 1, time_dim, 1, 1)
middle_idx = time_dim // 2
weight_3d[:, :, middle_idx, :, :] = weight_2d
else:
weight_3d = weight_2d.unsqueeze(2).repeat(1, 1, time_dim, 1, 1)
weight_3d = weight_3d / time_dim
return weight_3d
def load_state_dict(model, state_dict):
state_dict_3d = model.state_dict()
for k in state_dict.keys():
if state_dict[k].shape != state_dict_3d[k].shape:
if len(state_dict_3d[k].shape) <= 2:
logger.info(f'Ignore: {k}')
continue
logger.info(f'Inflate: {k}, {state_dict[k].shape} => {state_dict_3d[k].shape}')
time_dim = state_dict_3d[k].shape[2]
state_dict[k] = inflate_weight(state_dict[k], time_dim)
model.load_state_dict(state_dict, strict=False)
def vit_only_global_b32(
pretrained=True, use_checkpoint=False, checkpoint_num=[0],
t_size=16, backbone_drop_path_rate=0.,
return_list=[8, 9, 10, 11],
n_layers=4, n_dim=768, n_head=12, mlp_factor=4.0, drop_path_rate=0.,
mlp_dropout=[0.5, 0.5, 0.5, 0.5],
cls_dropout=0.5, num_classes=400,
):
model = VisionTransformer(
input_resolution=224,
patch_size=32,
width=768,
layers=12,
heads=12,
output_dim=512,
use_checkpoint=use_checkpoint,
checkpoint_num=checkpoint_num,
t_size=t_size,
backbone_drop_path_rate=backbone_drop_path_rate,
return_list=return_list,
n_layers=n_layers,
n_dim=n_dim,
n_head=n_head,
mlp_factor=mlp_factor,
drop_path_rate=drop_path_rate,
mlp_dropout=mlp_dropout,
cls_dropout=cls_dropout,
num_classes=num_classes,
)
if pretrained:
logger.info('load pretrained weights')
state_dict = torch.load(_MODELS["ViT-B/32"], map_location='cpu')
load_state_dict(model, state_dict)
return model.eval()
def vit_only_global_b16(
pretrained=True, use_checkpoint=False, checkpoint_num=[0],
t_size=16, backbone_drop_path_rate=0.,
return_list=[8, 9, 10, 11],
n_layers=4, n_dim=768, n_head=12, mlp_factor=4.0, drop_path_rate=0.,
mlp_dropout=[0.5, 0.5, 0.5, 0.5],
cls_dropout=0.5, num_classes=400,
):
model = VisionTransformer(
input_resolution=224,
patch_size=16,
width=768,
layers=12,
heads=12,
output_dim=512,
use_checkpoint=use_checkpoint,
checkpoint_num=checkpoint_num,
t_size=t_size,
backbone_drop_path_rate=backbone_drop_path_rate,
return_list=return_list,
n_layers=n_layers,
n_dim=n_dim,
n_head=n_head,
mlp_factor=mlp_factor,
drop_path_rate=drop_path_rate,
mlp_dropout=mlp_dropout,
cls_dropout=cls_dropout,
num_classes=num_classes,
)
if pretrained:
logger.info('load pretrained weights')
state_dict = torch.load(_MODELS["ViT-B/16"], map_location='cpu')
load_state_dict(model, state_dict)
return model.eval()
def vit_only_global_l14(
pretrained=True, use_checkpoint=False, checkpoint_num=[0],
t_size=16, backbone_drop_path_rate=0.,
return_list=[20, 21, 22, 23],
n_layers=4, n_dim=1024, n_head=16, mlp_factor=4.0, drop_path_rate=0.,
mlp_dropout=[0.5, 0.5, 0.5, 0.5],
cls_dropout=0.5, num_classes=400,
):
model = VisionTransformer(
input_resolution=224,
patch_size=14,
width=1024,
layers=24,
heads=16,
output_dim=768,
use_checkpoint=use_checkpoint,
checkpoint_num=checkpoint_num,
t_size=t_size,
backbone_drop_path_rate=backbone_drop_path_rate,
return_list=return_list,
n_layers=n_layers,
n_dim=n_dim,
n_head=n_head,
mlp_factor=mlp_factor,
drop_path_rate=drop_path_rate,
mlp_dropout=mlp_dropout,
cls_dropout=cls_dropout,
num_classes=num_classes,
)
if pretrained:
logger.info('load pretrained weights')
state_dict = torch.load(_MODELS["ViT-L/14"], map_location='cpu')
load_state_dict(model, state_dict)
return model.eval()
def vit_only_global_l14_336(
pretrained=True, use_checkpoint=False, checkpoint_num=[0],
t_size=16, backbone_drop_path_rate=0.,
return_list=[20, 21, 22, 23],
n_layers=4, n_dim=1024, n_head=16, mlp_factor=4.0, drop_path_rate=0.,
mlp_dropout=[0.5, 0.5, 0.5, 0.5],
cls_dropout=0.5, num_classes=400,
):
model = VisionTransformer(
input_resolution=336,
patch_size=14,
width=1024,
layers=24,
heads=16,
output_dim=768,
use_checkpoint=use_checkpoint,
checkpoint_num=checkpoint_num,
t_size=t_size,
backbone_drop_path_rate=backbone_drop_path_rate,
return_list=return_list,
n_layers=n_layers,
n_dim=n_dim,
n_head=n_head,
mlp_factor=mlp_factor,
drop_path_rate=drop_path_rate,
mlp_dropout=mlp_dropout,
cls_dropout=cls_dropout,
num_classes=num_classes,
)
if pretrained:
logger.info('load pretrained weights')
state_dict = torch.load(_MODELS["ViT-L/14_336"], map_location='cpu')
load_state_dict(model, state_dict)
return model.eval()
if __name__ == '__main__':
import time
from fvcore.nn import FlopCountAnalysis
from fvcore.nn import flop_count_table
import numpy as np
seed = 4217
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
num_frames = 8
model = vit_only_global_l14(
pretrained=False,
t_size=num_frames, backbone_drop_path_rate=0.2, drop_path_rate=0.4,
use_checkpoint=True, checkpoint_num=[0],
)
flops = FlopCountAnalysis(model, torch.rand(1, 3, num_frames, 224, 224))
s = time.time()
logger.info(flop_count_table(flops, max_depth=1))
logger.info(time.time()-s) | InternVideo-main | Downstream/Video-Text-Retrieval/modules/clip_evl/evl_utils/clip_vit_only_global.py |
#!/usr/bin/env python
import os
from collections import OrderedDict
from timm.models.layers import DropPath
import torch
from torch import nn
import torch.utils.checkpoint as checkpoint
from .attention import MultiheadAttention
MODEL_PATH = '/mnt/lustre/share_data/likunchang.vendor/model'
_MODELS = {
"ViT-B/32": os.path.join(MODEL_PATH, "vit_b32.pth"),
"ViT-B/16": os.path.join(MODEL_PATH, "vit_b16.pth"),
"ViT-L/14": os.path.join(MODEL_PATH, "vit_l14.pth"),
"ViT-L/14_336": os.path.join(MODEL_PATH, "vit_l14_336.pth"),
}
class LayerNorm(nn.LayerNorm):
"""Subclass torch's LayerNorm to handle fp16."""
def forward(self, x):
orig_type = x.dtype
ret = super().forward(x.type(torch.float32))
return ret.type(orig_type)
class QuickGELU(nn.Module):
def forward(self, x):
return x * torch.sigmoid(1.702 * x)
class ResidualAttentionBlock(nn.Module):
def __init__(self, d_model, n_head, attn_mask=None, drop_path=0.0):
super().__init__()
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
print(f'Drop path rate: {drop_path}')
self.attn = MultiheadAttention(d_model, n_head)
self.ln_1 = LayerNorm(d_model)
self.mlp = nn.Sequential(OrderedDict([
("c_fc", nn.Linear(d_model, d_model * 4)),
("gelu", QuickGELU()),
("c_proj", nn.Linear(d_model * 4, d_model))
]))
self.ln_2 = LayerNorm(d_model)
self.attn_mask = attn_mask
def attention(self, x, return_qk=False):
if return_qk:
self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None
q, k, attn_output, _ = self.attn(x, x, x, need_weights=False, attn_mask=self.attn_mask, return_qk=True)
return q, k, attn_output
else:
self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None
return self.attn(x, x, x, need_weights=False, attn_mask=self.attn_mask)[0]
def forward(self, x, return_qk=False):
if return_qk:
q, k, attn_output = self.attention(self.ln_1(x), return_qk=True)
x = x + self.drop_path(attn_output)
x = x + self.drop_path(self.mlp(self.ln_2(x)))
return x, q, k
else:
x = x + self.drop_path(self.attention(self.ln_1(x)))
x = x + self.drop_path(self.mlp(self.ln_2(x)))
return x
class Transformer(nn.Module):
def __init__(self, width, layers, heads, attn_mask=None, drop_path_rate=0.):
super().__init__()
self.width = width
self.layers = layers
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, layers)]
self.resblocks = nn.ModuleList([
ResidualAttentionBlock(width, heads, attn_mask, drop_path=dpr[i]) for i in range(layers)
])
def forward(self, x, return_num=4, T=8):
features = []
for i, resblock in enumerate(self.resblocks):
x = resblock(x)
if i >= self.layers - return_num:
L, NT, C = x.shape
N = NT // T
features.append(x.view(L, N, T, C))
return features
class VisionTransformer(nn.Module):
def __init__(
self, input_resolution, patch_size, width, layers, heads, output_dim, drop_path_rate=0.,
):
super().__init__()
self.input_resolution = input_resolution
self.output_dim = output_dim
self.conv1 = nn.Conv2d(in_channels=3, out_channels=width, kernel_size=patch_size, stride=patch_size, bias=False)
scale = width ** -0.5
self.class_embedding = nn.Parameter(scale * torch.randn(width))
self.positional_embedding = nn.Parameter(scale * torch.randn((input_resolution // patch_size) ** 2 + 1, width))
self.ln_pre = LayerNorm(width)
self.transformer = Transformer(width, layers, heads, drop_path_rate=drop_path_rate)
def forward(self, x, return_num=4, return_qk=True):
N, C, T, H, W = x.shape
x = x.permute(0, 2, 1, 3, 4).reshape(N * T, C, H, W)
x = self.conv1(x) # shape = [*, width, grid, grid]
x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2]
x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width]
x = torch.cat([self.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device), x], dim=1) # shape = [*, grid ** 2 + 1, width]
x = x + self.positional_embedding.to(x.dtype)
x = self.ln_pre(x)
x = x.permute(1, 0, 2) # NLD -> LND
features = self.transformer(
x, return_num=return_num, T=T,
)
return features
def vit_b32(pretrained=True, drop_path_rate=0.):
model = VisionTransformer(
input_resolution=224,
patch_size=32,
width=768,
layers=12,
heads=12,
output_dim=512,
drop_path_rate=drop_path_rate
)
if pretrained:
print('load pretrained weights')
state_dict = torch.load(_MODELS["ViT-B/32"], map_location='cpu')
model.load_state_dict(state_dict)
return model.eval()
def vit_b16(pretrained=True, drop_path_rate=0.):
model = VisionTransformer(
input_resolution=224,
patch_size=16,
width=768,
layers=12,
heads=12,
output_dim=512,
drop_path_rate=drop_path_rate
)
if pretrained:
print('load pretrained weights')
state_dict = torch.load(_MODELS["ViT-B/16"], map_location='cpu')
model.load_state_dict(state_dict)
return model.eval()
def vit_l14(pretrained=True, drop_path_rate=0.):
model = VisionTransformer(
input_resolution=224,
patch_size=14,
width=1024,
layers=24,
heads=16,
output_dim=768,
drop_path_rate=drop_path_rate
)
if pretrained:
print('load pretrained weights')
state_dict = torch.load(_MODELS["ViT-L/14"], map_location='cpu')
model.load_state_dict(state_dict)
return model.eval()
def vit_l14_336(pretrained=True, drop_path_rate=0.):
model = VisionTransformer(
input_resolution=336,
patch_size=14,
width=1024,
layers=24,
heads=16,
output_dim=768,
drop_path_rate=drop_path_rate
)
if pretrained:
print('load pretrained weights')
state_dict = torch.load(_MODELS["ViT-L/14_336"], map_location='cpu')
model.load_state_dict(state_dict)
return model.eval()
if __name__ == '__main__':
import time
from fvcore.nn import FlopCountAnalysis
from fvcore.nn import flop_count_table
model = vit_b32(pretrained=True)
flops = FlopCountAnalysis(model, torch.rand(1, 3, 8, 224, 224))
s = time.time()
print(flop_count_table(flops, max_depth=1))
print(time.time()-s) | InternVideo-main | Downstream/Video-Text-Retrieval/modules/clip_evl/evl_utils/clip_vit.py |
from .evl_module import TransformerDecoder
from .evl_module_uniformer_diff_conv_balance import TransformerDecoder_uniformer_diff_conv_balance
from .clip_vit import vit_b32, vit_b16, vit_l14, vit_l14_336
from .clip_vit_2plus1d import vit_2plus1d_b32, vit_2plus1d_b16, vit_2plus1d_l14, vit_2plus1d_l14_336
from .clip_vit_2plus1d_dw_bias import vit_2plus1d_dw_bias_b32, vit_2plus1d_dw_bias_b16, vit_2plus1d_dw_bias_l14, vit_2plus1d_dw_bias_l14_336
from .clip_vit_fusion import vit_fusion_b32, vit_fusion_b16, vit_fusion_l14, vit_fusion_l14_336
from .clip_vit_only_global import vit_only_global_b32, vit_only_global_b16, vit_only_global_l14, vit_only_global_l14_336 | InternVideo-main | Downstream/Video-Text-Retrieval/modules/clip_evl/evl_utils/__init__.py |
r"""Functional interface"""
import warnings
import math
import torch
from torch import _VF
from torch._jit_internal import Optional, Tuple
from torch.overrides import has_torch_function, handle_torch_function
from torch.nn.functional import _pad, linear, softmax, dropout
Tensor = torch.Tensor
pad = _pad
def multi_head_attention_forward(query: Tensor,
key: Tensor,
value: Tensor,
embed_dim_to_check: int,
num_heads: int,
in_proj_weight: Tensor,
in_proj_bias: Tensor,
bias_k: Optional[Tensor],
bias_v: Optional[Tensor],
add_zero_attn: bool,
dropout_p: float,
out_proj_weight: Tensor,
out_proj_bias: Tensor,
training: bool = True,
key_padding_mask: Optional[Tensor] = None,
need_weights: bool = True,
attn_mask: Optional[Tensor] = None,
use_separate_proj_weight: bool = False,
q_proj_weight: Optional[Tensor] = None,
k_proj_weight: Optional[Tensor] = None,
v_proj_weight: Optional[Tensor] = None,
static_k: Optional[Tensor] = None,
static_v: Optional[Tensor] = None,
return_qk: bool = False
) -> Tuple[Tensor, Optional[Tensor]]:
r"""
Args:
query, key, value: map a query and a set of key-value pairs to an output.
See "Attention Is All You Need" for more details.
embed_dim_to_check: total dimension of the model.
num_heads: parallel attention heads.
in_proj_weight, in_proj_bias: input projection weight and bias.
bias_k, bias_v: bias of the key and value sequences to be added at dim=0.
add_zero_attn: add a new batch of zeros to the key and
value sequences at dim=1.
dropout_p: probability of an element to be zeroed.
out_proj_weight, out_proj_bias: the output projection weight and bias.
training: apply dropout if is ``True``.
key_padding_mask: if provided, specified padding elements in the key will
be ignored by the attention. This is an binary mask. When the value is True,
the corresponding value on the attention layer will be filled with -inf.
need_weights: output attn_output_weights.
attn_mask: 2D or 3D mask that prevents attention to certain positions. A 2D mask will be broadcasted for all
the batches while a 3D mask allows to specify a different mask for the entries of each batch.
use_separate_proj_weight: the function accept the proj. weights for query, key,
and value in different forms. If false, in_proj_weight will be used, which is
a combination of q_proj_weight, k_proj_weight, v_proj_weight.
q_proj_weight, k_proj_weight, v_proj_weight, in_proj_bias: input projection weight and bias.
static_k, static_v: static key and value used for attention operators.
Shape:
Inputs:
- query: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is
the embedding dimension.
- key: :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- value: :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- key_padding_mask: :math:`(N, S)` where N is the batch size, S is the source sequence length.
If a ByteTensor is provided, the non-zero positions will be ignored while the zero positions
will be unchanged. If a BoolTensor is provided, the positions with the
value of ``True`` will be ignored while the position with the value of ``False`` will be unchanged.
- attn_mask: 2D mask :math:`(L, S)` where L is the target sequence length, S is the source sequence length.
3D mask :math:`(N*num_heads, L, S)` where N is the batch size, L is the target sequence length,
S is the source sequence length. attn_mask ensures that position i is allowed to attend the unmasked
positions. If a ByteTensor is provided, the non-zero positions are not allowed to attend
while the zero positions will be unchanged. If a BoolTensor is provided, positions with ``True``
are not allowed to attend while ``False`` values will be unchanged. If a FloatTensor
is provided, it will be added to the attention weight.
- static_k: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length,
N is the batch size, E is the embedding dimension. E/num_heads is the head dimension.
- static_v: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length,
N is the batch size, E is the embedding dimension. E/num_heads is the head dimension.
- return_qk: whether return Q and K.
Outputs:
- attn_output: :math:`(L, N, E)` where L is the target sequence length, N is the batch size,
E is the embedding dimension.
- attn_output_weights: :math:`(N, L, S)` where N is the batch size,
L is the target sequence length, S is the source sequence length.
"""
if not torch.jit.is_scripting():
tens_ops = (query, key, value, in_proj_weight, in_proj_bias, bias_k, bias_v,
out_proj_weight, out_proj_bias)
if any([type(t) is not Tensor for t in tens_ops]) and has_torch_function(tens_ops):
return handle_torch_function(
multi_head_attention_forward, tens_ops, query, key, value,
embed_dim_to_check, num_heads, in_proj_weight, in_proj_bias,
bias_k, bias_v, add_zero_attn, dropout_p, out_proj_weight,
out_proj_bias, training=training, key_padding_mask=key_padding_mask,
need_weights=need_weights, attn_mask=attn_mask,
use_separate_proj_weight=use_separate_proj_weight,
q_proj_weight=q_proj_weight, k_proj_weight=k_proj_weight,
v_proj_weight=v_proj_weight, static_k=static_k, static_v=static_v)
tgt_len, bsz, embed_dim = query.size()
assert embed_dim == embed_dim_to_check
# allow MHA to have different sizes for the feature dimension
assert key.size(0) == value.size(0) and key.size(1) == value.size(1)
head_dim = embed_dim // num_heads
assert head_dim * num_heads == embed_dim, "embed_dim must be divisible by num_heads"
scaling = float(head_dim) ** -0.5
if not use_separate_proj_weight:
if torch.equal(query, key) and torch.equal(key, value):
# self-attention
q, k, v = linear(query, in_proj_weight, in_proj_bias).chunk(3, dim=-1)
elif torch.equal(key, value):
# encoder-decoder attention
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = 0
_end = embed_dim
_w = in_proj_weight[_start:_end, :]
if _b is not None:
_b = _b[_start:_end]
q = linear(query, _w, _b)
if key is None:
assert value is None
k = None
v = None
else:
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = embed_dim
_end = None
_w = in_proj_weight[_start:, :]
if _b is not None:
_b = _b[_start:]
k, v = linear(key, _w, _b).chunk(2, dim=-1)
else:
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = 0
_end = embed_dim
_w = in_proj_weight[_start:_end, :]
if _b is not None:
_b = _b[_start:_end]
q = linear(query, _w, _b)
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = embed_dim
_end = embed_dim * 2
_w = in_proj_weight[_start:_end, :]
if _b is not None:
_b = _b[_start:_end]
k = linear(key, _w, _b)
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = embed_dim * 2
_end = None
_w = in_proj_weight[_start:, :]
if _b is not None:
_b = _b[_start:]
v = linear(value, _w, _b)
else:
q_proj_weight_non_opt = torch.jit._unwrap_optional(q_proj_weight)
len1, len2 = q_proj_weight_non_opt.size()
assert len1 == embed_dim and len2 == query.size(-1)
k_proj_weight_non_opt = torch.jit._unwrap_optional(k_proj_weight)
len1, len2 = k_proj_weight_non_opt.size()
assert len1 == embed_dim and len2 == key.size(-1)
v_proj_weight_non_opt = torch.jit._unwrap_optional(v_proj_weight)
len1, len2 = v_proj_weight_non_opt.size()
assert len1 == embed_dim and len2 == value.size(-1)
if in_proj_bias is not None:
q = linear(query, q_proj_weight_non_opt, in_proj_bias[0:embed_dim])
k = linear(key, k_proj_weight_non_opt, in_proj_bias[embed_dim:(embed_dim * 2)])
v = linear(value, v_proj_weight_non_opt, in_proj_bias[(embed_dim * 2):])
else:
q = linear(query, q_proj_weight_non_opt, in_proj_bias)
k = linear(key, k_proj_weight_non_opt, in_proj_bias)
v = linear(value, v_proj_weight_non_opt, in_proj_bias)
q = q * scaling
if attn_mask is not None:
assert attn_mask.dtype == torch.float32 or attn_mask.dtype == torch.float64 or \
attn_mask.dtype == torch.float16 or attn_mask.dtype == torch.uint8 or attn_mask.dtype == torch.bool, \
'Only float, byte, and bool types are supported for attn_mask, not {}'.format(attn_mask.dtype)
if attn_mask.dtype == torch.uint8:
warnings.warn("Byte tensor for attn_mask in nn.MultiheadAttention is deprecated. Use bool tensor instead.")
attn_mask = attn_mask.to(torch.bool)
if attn_mask.dim() == 2:
attn_mask = attn_mask.unsqueeze(0)
if list(attn_mask.size()) != [1, query.size(0), key.size(0)]:
raise RuntimeError('The size of the 2D attn_mask is not correct.')
elif attn_mask.dim() == 3:
if list(attn_mask.size()) != [bsz * num_heads, query.size(0), key.size(0)]:
raise RuntimeError('The size of the 3D attn_mask is not correct.')
else:
raise RuntimeError("attn_mask's dimension {} is not supported".format(attn_mask.dim()))
# attn_mask's dim is 3 now.
# convert ByteTensor key_padding_mask to bool
if key_padding_mask is not None and key_padding_mask.dtype == torch.uint8:
warnings.warn("Byte tensor for key_padding_mask in nn.MultiheadAttention is deprecated. Use bool tensor instead.")
key_padding_mask = key_padding_mask.to(torch.bool)
if bias_k is not None and bias_v is not None:
if static_k is None and static_v is None:
k = torch.cat([k, bias_k.repeat(1, bsz, 1)])
v = torch.cat([v, bias_v.repeat(1, bsz, 1)])
if attn_mask is not None:
attn_mask = pad(attn_mask, (0, 1))
if key_padding_mask is not None:
key_padding_mask = pad(key_padding_mask, (0, 1))
else:
assert static_k is None, "bias cannot be added to static key."
assert static_v is None, "bias cannot be added to static value."
else:
assert bias_k is None
assert bias_v is None
# L, N, E
if return_qk:
return_q = q.clone() / scaling
return_k = k.clone()
q = q.contiguous().view(tgt_len, bsz * num_heads, head_dim).transpose(0, 1)
if k is not None:
k = k.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1)
if v is not None:
v = v.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1)
if static_k is not None:
assert static_k.size(0) == bsz * num_heads
assert static_k.size(2) == head_dim
k = static_k
if static_v is not None:
assert static_v.size(0) == bsz * num_heads
assert static_v.size(2) == head_dim
v = static_v
src_len = k.size(1)
if key_padding_mask is not None:
assert key_padding_mask.size(0) == bsz
assert key_padding_mask.size(1) == src_len
if add_zero_attn:
src_len += 1
k = torch.cat([k, torch.zeros((k.size(0), 1) + k.size()[2:], dtype=k.dtype, device=k.device)], dim=1)
v = torch.cat([v, torch.zeros((v.size(0), 1) + v.size()[2:], dtype=v.dtype, device=v.device)], dim=1)
if attn_mask is not None:
attn_mask = pad(attn_mask, (0, 1))
if key_padding_mask is not None:
key_padding_mask = pad(key_padding_mask, (0, 1))
attn_output_weights = torch.bmm(q, k.transpose(1, 2))
assert list(attn_output_weights.size()) == [bsz * num_heads, tgt_len, src_len]
if attn_mask is not None:
if attn_mask.dtype == torch.bool:
attn_output_weights.masked_fill_(attn_mask, float('-inf'))
else:
attn_output_weights += attn_mask
if key_padding_mask is not None:
attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len)
attn_output_weights = attn_output_weights.masked_fill(
key_padding_mask.unsqueeze(1).unsqueeze(2),
float('-inf'),
)
attn_output_weights = attn_output_weights.view(bsz * num_heads, tgt_len, src_len)
attn_output_weights = softmax(
attn_output_weights, dim=-1)
attn_output_weights = dropout(attn_output_weights, p=dropout_p, training=training)
attn_output = torch.bmm(attn_output_weights, v)
assert list(attn_output.size()) == [bsz * num_heads, tgt_len, head_dim]
attn_output = attn_output.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim)
attn_output = linear(attn_output, out_proj_weight, out_proj_bias)
if return_qk:
if need_weights:
# average attention weights over heads
attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len)
return return_q, return_k, attn_output, attn_output_weights.sum(dim=1) / num_heads
else:
return return_q, return_k, attn_output, None
else:
if need_weights:
# average attention weights over heads
attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len)
return attn_output, attn_output_weights.sum(dim=1) / num_heads
else:
return attn_output, None
| InternVideo-main | Downstream/Video-Text-Retrieval/modules/clip_evl/evl_utils/attention_module.py |
import os
from collections import OrderedDict
from timm.models.layers import DropPath
import torch
from torch import nn
from einops import rearrange
from .attention import MultiheadAttention
MODEL_PATH = '/mnt/lustre/share_data/likunchang.vendor/model'
_MODELS = {
"ViT-B/32": os.path.join(MODEL_PATH, "vit_b32.pth"),
"ViT-B/16": os.path.join(MODEL_PATH, "vit_b16.pth"),
"ViT-L/14": os.path.join(MODEL_PATH, "vit_l14.pth"),
"ViT-L/14_336": os.path.join(MODEL_PATH, "vit_l14_336.pth"),
}
class LayerNorm(nn.LayerNorm):
"""Subclass torch's LayerNorm to handle fp16."""
def forward(self, x):
orig_type = x.dtype
ret = super().forward(x.type(torch.float32))
return ret.type(orig_type)
class QuickGELU(nn.Module):
def forward(self, x):
return x * torch.sigmoid(1.702 * x)
class ResidualAttentionBlock(nn.Module):
def __init__(self, d_model, n_head, attn_mask=None, drop_path=0.0):
super().__init__()
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
print(f'Drop path rate: {drop_path}')
# temporal
self.attn_t = MultiheadAttention(d_model, n_head)
self.ln_t = LayerNorm(d_model)
# spatial
self.attn = MultiheadAttention(d_model, n_head)
self.ln_1 = LayerNorm(d_model)
self.mlp = nn.Sequential(OrderedDict([
("c_fc", nn.Linear(d_model, d_model * 4)),
("gelu", QuickGELU()),
("c_proj", nn.Linear(d_model * 4, d_model))
]))
self.ln_2 = LayerNorm(d_model)
self.attn_mask = attn_mask
# init zero
print('Init zero for (2+1)d')
nn.init.constant_(self.attn_t.in_proj_weight, 0)
nn.init.constant_(self.attn_t.in_proj_bias, 0)
nn.init.constant_(self.attn_t.out_proj.weight, 1)
nn.init.constant_(self.attn_t.out_proj.bias, 0)
def attention(self, x):
self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None
return self.attn(x, x, x, need_weights=False, attn_mask=self.attn_mask)[0]
def attention_temporal(self, x):
self.attn_mask = None
return self.attn_t(x, x, x, need_weights=False, attn_mask=self.attn_mask)[0]
def forward(self, x, T=8):
# temporal
# x: 1+HWT, N, C
xt = x[1:, :, :]
_, N, C = xt.shape
xt = rearrange(xt, '(l t) n c -> t (n l) c', n=N, t=T)
res_temporal = self.attention_temporal(self.ln_t(xt))
res_temporal = rearrange(res_temporal, 't (n l) c -> (l t) n c', n=N, t=T)
xt = x[1:, :, :] + self.drop_path(res_temporal)
# spatial
init_cls_token = x[:1, :, :]
cls_token = init_cls_token.repeat(1, T, 1).view(1, T*N, C)
xs = rearrange(xt, '(l t) n c -> l (t n) c', n=N, t=T)
xs = torch.cat((cls_token, xs), 0)
res_spatial = self.attention(self.ln_1(xs))
# Taking care of CLS token
cls_token = res_spatial[0, :, :]
cls_token = rearrange(cls_token, '(t n) c -> t n c', n=N)
cls_token = torch.mean(cls_token, 0, True) # averaging for every frame
res_spatial = res_spatial[1:, :, :]
res_spatial = rearrange(res_spatial, 'l (t n) c -> (l t) n c', n=N)
x = x + self.drop_path(torch.cat((cls_token, res_spatial), 0))
x = x + self.drop_path(self.mlp(self.ln_2(x)))
return x
class Transformer(nn.Module):
def __init__(self, width, layers, heads, attn_mask=None, drop_path_rate=0.):
super().__init__()
self.width = width
self.layers = layers
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, layers)]
self.resblocks = nn.ModuleList([
ResidualAttentionBlock(width, heads, attn_mask, drop_path=dpr[i]) for i in range(layers)
])
def forward(self, x, return_num=4, T=8):
features = []
for i, resblock in enumerate(self.resblocks):
x = resblock(x, T=T)
if i >= self.layers - return_num:
# LT + 1, N, C
LT, N, C = x.shape
L = (LT - 1) // T
cls_x, tmp_x = x[:1], x[1:]
cls_x = cls_x.unsqueeze(2).repeat(1, 1, T, 1)
tmp_x = tmp_x.reshape(L, T, N, C).permute(0, 2, 1, 3) # L, N, T, C
tmp_x = torch.cat([cls_x, tmp_x], dim=0 )# L + 1, N, T, C
features.append(tmp_x)
return features
class VisionTransformer(nn.Module):
def __init__(
self, input_resolution, patch_size, width, layers, heads, output_dim, num_frames=8, drop_path_rate=0.,
):
super().__init__()
self.input_resolution = input_resolution
self.output_dim = output_dim
self.conv1 = nn.Conv2d(in_channels=3, out_channels=width, kernel_size=patch_size, stride=patch_size, bias=False)
scale = width ** -0.5
self.class_embedding = nn.Parameter(scale * torch.randn(width))
self.positional_embedding = nn.Parameter(scale * torch.randn((input_resolution // patch_size) ** 2 + 1, width))
self.temporal_positional_embedding = nn.Parameter(torch.zeros(1, num_frames, width))
self.ln_pre = LayerNorm(width)
self.transformer = Transformer(width, layers, heads, drop_path_rate=drop_path_rate)
def forward(self, x, return_num=4):
N, C, T, H, W = x.shape
x = x.permute(0, 2, 1, 3, 4).reshape(N * T, C, H, W)
x = self.conv1(x) # shape = [*, width, grid, grid]
x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2]
x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width]
x = torch.cat([self.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device), x], dim=1) # shape = [*, grid ** 2 + 1, width]
x = x + self.positional_embedding.to(x.dtype)
cls_tokens = x[:N, :1, :]
x = x[:, 1:]
x = rearrange(x, '(b t) n c -> (b n) t c', b=N, t=T)
x = x + self.temporal_positional_embedding
x = rearrange(x, '(b n) t c -> b (n t) c', b=N, t=T)
x = torch.cat((cls_tokens, x), dim=1)
x = self.ln_pre(x)
x = x.permute(1, 0, 2) # NLD -> LND
features = self.transformer(
x, return_num=return_num, T=T,
)
return features
def vit_2plus1d_b32(pretrained=True, num_frames=8, drop_path_rate=0.):
model = VisionTransformer(
input_resolution=224,
patch_size=32,
width=768,
layers=12,
heads=12,
output_dim=512,
num_frames=num_frames,
drop_path_rate=drop_path_rate
)
if pretrained:
print('load pretrained weights')
state_dict = torch.load(_MODELS["ViT-B/32"], map_location='cpu')
model.load_state_dict(state_dict, strict=False)
return model.eval()
def vit_2plus1d_b16(pretrained=True, num_frames=8, drop_path_rate=0.):
model = VisionTransformer(
input_resolution=224,
patch_size=16,
width=768,
layers=12,
heads=12,
output_dim=512,
num_frames=num_frames,
drop_path_rate=drop_path_rate,
)
if pretrained:
print('load pretrained weights')
state_dict = torch.load(_MODELS["ViT-B/16"], map_location='cpu')
model.load_state_dict(state_dict, strict=False)
return model.eval()
def vit_2plus1d_l14(pretrained=True, num_frames=8, drop_path_rate=0.):
model = VisionTransformer(
input_resolution=224,
patch_size=14,
width=1024,
layers=24,
heads=16,
output_dim=768,
num_frames=num_frames,
drop_path_rate=drop_path_rate
)
if pretrained:
print('load pretrained weights')
state_dict = torch.load(_MODELS["ViT-L/14"], map_location='cpu')
model.load_state_dict(state_dict, strict=False)
return model.eval()
def vit_2plus1d_l14_336(pretrained=True, num_frames=8, drop_path_rate=0.):
model = VisionTransformer(
input_resolution=336,
patch_size=14,
width=1024,
layers=24,
heads=16,
output_dim=768,
num_frames=num_frames,
drop_path_rate=drop_path_rate
)
if pretrained:
print('load pretrained weights')
state_dict = torch.load(_MODELS["ViT-L/14_336"], map_location='cpu')
model.load_state_dict(state_dict, strict=False)
return model.eval()
if __name__ == '__main__':
import time
from fvcore.nn import FlopCountAnalysis
from fvcore.nn import flop_count_table
model = vit_2plus1d_b32(pretrained=True)
flops = FlopCountAnalysis(model, torch.rand(1, 3, 8, 224, 224))
s = time.time()
print(flop_count_table(flops, max_depth=1))
print(time.time()-s) | InternVideo-main | Downstream/Video-Text-Retrieval/modules/clip_evl/evl_utils/clip_vit_2plus1d.py |
#!/usr/bin/env python
import os
from collections import OrderedDict
from timm.models.layers import DropPath
import torch
from torch import nn
import torch.nn.functional as F
import torch.utils.checkpoint as checkpoint
from .attention import MultiheadAttention
from ipdb import set_trace
MODEL_PATH = '/mnt/lustre/share_data/likunchang.vendor/model'
_MODELS = {
"ViT-B/32": os.path.join(MODEL_PATH, "vit_b32.pth"),
"ViT-B/16": os.path.join(MODEL_PATH, "vit_b16.pth"),
"ViT-L/14": os.path.join(MODEL_PATH, "vit_l14.pth"),
"ViT-L/14_336": os.path.join(MODEL_PATH, "vit_l14_336.pth"),
}
def conv_1x1x1(inp, oup, groups=1):
return nn.Conv3d(inp, oup, (1, 1, 1), (1, 1, 1), (0, 0, 0), groups=groups)
def conv_3x3x3(inp, oup, groups=1):
return nn.Conv3d(inp, oup, (3, 3, 3), (1, 1, 1), (1, 1, 1), groups=groups)
def conv_1x3x3(inp, oup, groups=1):
return nn.Conv3d(inp, oup, (1, 3, 3), (1, 1, 1), (0, 1, 1), groups=groups)
def bn_3d(dim):
return nn.BatchNorm3d(dim)
class LayerNorm(nn.LayerNorm):
"""Subclass torch's LayerNorm to handle fp16."""
def forward(self, x):
orig_type = x.dtype
ret = super().forward(x.type(torch.float32))
return ret.type(orig_type)
class QuickGELU(nn.Module):
def forward(self, x):
return x * torch.sigmoid(1.702 * x)
class Local_MHRA(nn.Module):
def __init__(self, d_model, dw_reduction=1.5, pos_kernel_size=3):
super().__init__()
padding = pos_kernel_size // 2
re_d_model = int(d_model // dw_reduction)
self.pos_embed = nn.Sequential(
nn.BatchNorm3d(d_model),
nn.Conv3d(d_model, re_d_model, kernel_size=1, stride=1, padding=0),
nn.Conv3d(re_d_model, re_d_model, kernel_size=(pos_kernel_size, 1, 1), stride=(1, 1, 1), padding=(padding, 0, 0), groups=re_d_model),
nn.Conv3d(re_d_model, d_model, kernel_size=1, stride=1, padding=0),
)
# init zero
print('Init zero for Conv in pos_emb')
nn.init.constant_(self.pos_embed[3].weight, 0)
nn.init.constant_(self.pos_embed[3].bias, 0)
def forward(self, x):
return self.pos_embed(x)
class ResidualAttentionBlock(nn.Module):
def __init__(
self, d_model, n_head, attn_mask=None, drop_path=0.0,
dw_reduction=1.5,
):
super().__init__()
self.n_head = n_head
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
print(f'Drop path rate: {drop_path}')
self.lmhra1 = Local_MHRA(d_model, dw_reduction=dw_reduction)
self.lmhra2 = Local_MHRA(d_model, dw_reduction=dw_reduction)
# spatial
self.attn = MultiheadAttention(d_model, n_head)
self.ln_1 = LayerNorm(d_model)
self.mlp = nn.Sequential(OrderedDict([
("c_fc", nn.Linear(d_model, d_model * 4)),
("gelu", QuickGELU()),
("c_proj", nn.Linear(d_model * 4, d_model))
]))
self.ln_2 = LayerNorm(d_model)
self.attn_mask = attn_mask
def attention(self, x):
self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None
return self.attn(x, x, x, need_weights=False, attn_mask=self.attn_mask)[0]
def forward(self, x, T=8):
# x: 1+HW, NT, C
# Local MHRA
tmp_x = x[1:, :, :]
L, NT, C = tmp_x.shape
N = NT // T
H = W = int(L ** 0.5)
tmp_x = tmp_x.view(H, W, N, T, C).permute(2, 4, 3, 0, 1).contiguous()
tmp_x = tmp_x + self.drop_path(self.lmhra1(tmp_x))
tmp_x = tmp_x.view(N, C, T, L).permute(3, 0, 2, 1).contiguous().view(L, NT, C)
x = torch.cat([x[:1, :, :], tmp_x], dim=0)
# MHSA
x = x + self.drop_path(self.attention(self.ln_1(x)))
# Local MHRA
tmp_x = x[1:, :, :]
tmp_x = tmp_x.view(H, W, N, T, C).permute(2, 4, 3, 0, 1).contiguous()
tmp_x = tmp_x + self.drop_path(self.lmhra2(tmp_x))
tmp_x = tmp_x.view(N, C, T, L).permute(3, 0, 2, 1).contiguous().view(L, NT, C)
x = torch.cat([x[:1, :, :], tmp_x], dim=0)
# FFN
x = x + self.drop_path(self.mlp(self.ln_2(x)))
return x
class Extractor(nn.Module):
def __init__(
self, d_model, n_head, attn_mask=None,
mlp_factor=4.0, dropout=0.0, drop_path=0.0,
):
super().__init__()
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
print(f'Drop path rate: {drop_path}')
self.attn = nn.MultiheadAttention(d_model, n_head)
self.ln_1 = nn.LayerNorm(d_model)
d_mlp = round(mlp_factor * d_model)
self.mlp = nn.Sequential(OrderedDict([
("c_fc", nn.Linear(d_model, d_mlp)),
("gelu", QuickGELU()),
("dropout", nn.Dropout(dropout)),
("c_proj", nn.Linear(d_mlp, d_model))
]))
self.ln_2 = nn.LayerNorm(d_model)
self.ln_3 = nn.LayerNorm(d_model)
self.attn_mask = attn_mask
# zero init
nn.init.xavier_uniform_(self.attn.in_proj_weight)
nn.init.constant_(self.attn.out_proj.weight, 0.)
nn.init.constant_(self.attn.out_proj.bias, 0.)
nn.init.xavier_uniform_(self.mlp[0].weight)
nn.init.constant_(self.mlp[-1].weight, 0.)
nn.init.constant_(self.mlp[-1].bias, 0.)
def attention(self, x: torch.Tensor, y: torch.Tensor):
#self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None
# return self.attn(x, y, y, need_weights=False, attn_mask=self.attn_mask)[0]
assert self.attn_mask is None # not implemented
# manual forward to add position information
d_model = self.ln_1.weight.size(0)
q = (x @ self.attn.in_proj_weight[:d_model].T) + self.attn.in_proj_bias[:d_model]
k = (y @ self.attn.in_proj_weight[d_model:-d_model].T) + self.attn.in_proj_bias[d_model:-d_model]
v = (y @ self.attn.in_proj_weight[-d_model:].T) + self.attn.in_proj_bias[-d_model:]
Tx, Ty, N = q.size(0), k.size(0), q.size(1)
q = q.view(Tx, N, self.attn.num_heads, self.attn.head_dim).permute(1, 2, 0, 3)
k = k.view(Ty, N, self.attn.num_heads, self.attn.head_dim).permute(1, 2, 0, 3)
v = v.view(Ty, N, self.attn.num_heads, self.attn.head_dim).permute(1, 2, 0, 3)
aff = (q @ k.transpose(-2, -1) / (self.attn.head_dim ** 0.5))
aff = aff.softmax(dim=-1)
out = aff @ v
out = out.permute(2, 0, 1, 3).flatten(2)
out = self.attn.out_proj(out)
return out
def forward(self, x: torch.Tensor, y: torch.Tensor):
x = x + self.drop_path(self.attention(self.ln_1(x), self.ln_3(y)))
x = x + self.drop_path(self.mlp(self.ln_2(x)))
return x
class Transformer(nn.Module):
def __init__(
self, width, layers, heads, attn_mask=None, backbone_drop_path_rate=0.,
t_size=8, dw_reduction=2,
return_list=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11],
n_layers=12, n_dim=768, n_head=12, mlp_factor=4.0, drop_path_rate=0.,
mlp_dropout=[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5],
cls_dropout=0.5, num_classes=400,
):
super().__init__()
self.T = t_size
self.return_list = return_list
# Backbone
b_dpr = [x.item() for x in torch.linspace(0, backbone_drop_path_rate, layers)]
self.resblocks = nn.ModuleList([
ResidualAttentionBlock(
width, heads, attn_mask,
drop_path=b_dpr[i],
dw_reduction=dw_reduction,
) for i in range(layers)
])
# Extractor
assert n_layers == len(return_list)
self.temporal_cls_token = nn.Parameter(torch.zeros(1, 1, n_dim))
self.dpe = nn.ModuleList([
nn.Conv3d(n_dim, n_dim, kernel_size=3, stride=1, padding=1, bias=True, groups=n_dim)
for i in range(n_layers)
])
for m in self.dpe:
nn.init.constant_(m.bias, 0.)
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, n_layers)]
self.dec = nn.ModuleList([
Extractor(
n_dim, n_head, mlp_factor=mlp_factor,
dropout=mlp_dropout[i], drop_path=dpr[i],
) for i in range(n_layers)
])
# # projection
# self.proj = nn.Sequential(
# nn.LayerNorm(n_dim),
# nn.Dropout(cls_dropout),
# nn.Linear(n_dim, num_classes),
# )
self.balance = nn.Parameter(torch.zeros((n_dim)))
self.sigmoid = nn.Sigmoid()
def forward(self, x, mode='video', return_all_feats=False):
if mode == 'video':
T_down = self.T
else:
T_down = 1
L, NT, C = x.shape
N = NT // T_down
H = W = int((L - 1) ** 0.5)
assert H * W == L - 1
cls_token = self.temporal_cls_token.repeat(1, N, 1)
j = -1
for i, resblock in enumerate(self.resblocks):
x = resblock(x, T_down)
if i in self.return_list:
j += 1
tmp_x = x.clone()
tmp_x = tmp_x.view(L, N, T_down, C)
# dpe
_, tmp_feats = tmp_x[:1], tmp_x[1:]
tmp_feats = tmp_feats.permute(1, 3, 2, 0).reshape(N, C, T_down, H, W)
tmp_feats = self.dpe[j](tmp_feats).view(N, C, T_down, L - 1).permute(3, 0, 2, 1)
tmp_x[1:] = tmp_x[1:] + tmp_feats
# enhancer
tmp_x = tmp_x.permute(2, 0, 1, 3).flatten(0, 1) # T * L, N, C
cls_token = self.dec[j](cls_token, tmp_x)
weight = self.sigmoid(self.balance)
residual = x.view(L, N, T_down, C)[0].mean(1) # L, N, T, C
# return self.proj((1 - weight) * cls_token[0, :, :] + weight * residual)
feats = (1 - weight) * cls_token[0, :, :] + weight * residual
if return_all_feats:
return feats, x.view(L, N, T_down, C)
return feats
class VisionTransformer(nn.Module):
def __init__(
self,
# backbone
input_resolution, patch_size, width, layers, heads, output_dim, backbone_drop_path_rate=0.,
t_size=8, kernel_size=3, dw_reduction=1.5,
# extractor
return_list=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11],
n_layers=12, n_dim=768, n_head=12, mlp_factor=4.0, drop_path_rate=0.,
mlp_dropout=[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5],
cls_dropout=0.5, num_classes=400,
):
super().__init__()
self.input_resolution = input_resolution
self.output_dim = output_dim
padding = (kernel_size - 1) // 2
self.conv1 = nn.Conv3d(3, width, (kernel_size, patch_size, patch_size), (2, patch_size, patch_size), (padding, 0, 0), bias=False)
scale = width ** -0.5
self.class_embedding = nn.Parameter(scale * torch.randn(width))
self.positional_embedding = nn.Parameter(scale * torch.randn((input_resolution // patch_size) ** 2 + 1, width))
self.ln_pre = LayerNorm(width)
self.transformer = Transformer(
width, layers, heads, dw_reduction=dw_reduction,
backbone_drop_path_rate=backbone_drop_path_rate, t_size=t_size // 2,
return_list=return_list, n_layers=n_layers, n_dim=n_dim, n_head=n_head,
mlp_factor=mlp_factor, drop_path_rate=drop_path_rate, mlp_dropout=mlp_dropout,
cls_dropout=cls_dropout, num_classes=num_classes,
)
def forward(self, x, mode='video', return_all_feats=False):
# taken from https://github.com/facebookresearch/omnivore/blob/main/omnivore/models/swin_transformer_3d.py#L703
if mode == 'image': # for image, stride 2
# ! Use replicate here
x = F.pad(x, (0, 0, 0, 0, 0, 1), mode="replicate")
x = self.conv1(x) # shape = [*, width, grid, grid]
N, C, T, H, W = x.shape
x = x.permute(0, 2, 3, 4, 1).reshape(N * T, H * W, C)
x = torch.cat([self.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device), x], dim=1) # shape = [*, grid ** 2 + 1, width]
x = x + self.positional_embedding.to(x.dtype)
x = self.ln_pre(x)
x = x.permute(1, 0, 2) # NLD -> LND
out = self.transformer(x, mode=mode, return_all_feats=return_all_feats)
return out
def inflate_weight(weight_2d, time_dim, center=True):
if center:
weight_3d = torch.zeros(*weight_2d.shape)
weight_3d = weight_3d.unsqueeze(2).repeat(1, 1, time_dim, 1, 1)
middle_idx = time_dim // 2
weight_3d[:, :, middle_idx, :, :] = weight_2d
else:
weight_3d = weight_2d.unsqueeze(2).repeat(1, 1, time_dim, 1, 1)
weight_3d = weight_3d / time_dim
return weight_3d
def load_state_dict(model, state_dict):
state_dict_3d = model.state_dict()
for k in state_dict.keys():
if state_dict[k].shape != state_dict_3d[k].shape:
if len(state_dict_3d[k].shape) <= 2:
print(f'Ignore: {k}')
continue
print(f'Inflate: {k}, {state_dict[k].shape} => {state_dict_3d[k].shape}')
time_dim = state_dict_3d[k].shape[2]
state_dict[k] = inflate_weight(state_dict[k], time_dim)
model.load_state_dict(state_dict, strict=False)
def clip_load_state_dict(model, state_dict):
state_dict_3d = model.state_dict()
for k in state_dict.keys():
# print(k, k in state_dict_3d, k in state_dict)
if k in state_dict and k in state_dict_3d and state_dict[k].shape != state_dict_3d[k].shape:
if len(state_dict_3d[k].shape) <= 2:
print(f'Ignore: {k}')
continue
print(f'Inflate: {k}, {state_dict[k].shape} => {state_dict_3d[k].shape}')
time_dim = state_dict_3d[k].shape[2]
state_dict[k] = inflate_weight(state_dict[k], time_dim)
model.load_state_dict(state_dict, strict=False)
def vit_fusion_b32(
pretrained=True,
t_size=16, dw_reduction=1.5, backbone_drop_path_rate=0.,
return_list=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11],
n_layers=12, n_dim=768, n_head=12, mlp_factor=4.0, drop_path_rate=0.,
mlp_dropout=[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5],
cls_dropout=0.5, num_classes=400,
):
model = VisionTransformer(
input_resolution=224,
patch_size=32,
width=768,
layers=12,
heads=12,
output_dim=512,
t_size=t_size,
dw_reduction=dw_reduction,
backbone_drop_path_rate=backbone_drop_path_rate,
return_list=return_list,
n_layers=n_layers,
n_dim=n_dim,
n_head=n_head,
mlp_factor=mlp_factor,
drop_path_rate=drop_path_rate,
mlp_dropout=mlp_dropout,
cls_dropout=cls_dropout,
num_classes=num_classes,
)
if pretrained:
print('load pretrained weights')
state_dict = torch.load(_MODELS["ViT-B/32"], map_location='cpu')
load_state_dict(model, state_dict)
return model.eval()
def vit_fusion_b16(
pretrained=True,
t_size=16, dw_reduction=1.5, backbone_drop_path_rate=0.,
return_list=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11],
n_layers=12, n_dim=768, n_head=12, mlp_factor=4.0, drop_path_rate=0.,
mlp_dropout=[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5],
cls_dropout=0.5, num_classes=400,
):
model = VisionTransformer(
input_resolution=224,
patch_size=16,
width=768,
layers=12,
heads=12,
output_dim=512,
t_size=t_size,
dw_reduction=dw_reduction,
backbone_drop_path_rate=backbone_drop_path_rate,
return_list=return_list,
n_layers=n_layers,
n_dim=n_dim,
n_head=n_head,
mlp_factor=mlp_factor,
drop_path_rate=drop_path_rate,
mlp_dropout=mlp_dropout,
cls_dropout=cls_dropout,
num_classes=num_classes,
)
if pretrained:
print('load pretrained weights')
state_dict = torch.load(_MODELS["ViT-B/16"], map_location='cpu')
load_state_dict(model, state_dict)
return model.eval()
def vit_fusion_l14(
pretrained=True,
t_size=16, dw_reduction=1.5, backbone_drop_path_rate=0.,
return_list=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11],
n_layers=12, n_dim=768, n_head=12, mlp_factor=4.0, drop_path_rate=0.,
mlp_dropout=[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5],
cls_dropout=0.5, num_classes=400,
):
model = VisionTransformer(
input_resolution=224,
patch_size=14,
width=1024,
layers=24,
heads=16,
output_dim=768,
t_size=t_size,
dw_reduction=dw_reduction,
backbone_drop_path_rate=backbone_drop_path_rate,
return_list=return_list,
n_layers=n_layers,
n_dim=n_dim,
n_head=n_head,
mlp_factor=mlp_factor,
drop_path_rate=drop_path_rate,
mlp_dropout=mlp_dropout,
cls_dropout=cls_dropout,
num_classes=num_classes,
)
if pretrained:
print('load pretrained weights')
state_dict = torch.load(_MODELS["ViT-L/14"], map_location='cpu')
load_state_dict(model, state_dict)
return model.eval()
def vit_fusion_l14_336(
pretrained=True,
t_size=16, dw_reduction=1.5, backbone_drop_path_rate=0.,
return_list=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11],
n_layers=12, n_dim=768, n_head=12, mlp_factor=4.0, drop_path_rate=0.,
mlp_dropout=[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5],
cls_dropout=0.5, num_classes=400,
):
model = VisionTransformer(
input_resolution=336,
patch_size=14,
width=1024,
layers=24,
heads=16,
output_dim=768,
t_size=t_size,
dw_reduction=dw_reduction,
backbone_drop_path_rate=backbone_drop_path_rate,
return_list=return_list,
n_layers=n_layers,
n_dim=n_dim,
n_head=n_head,
mlp_factor=mlp_factor,
drop_path_rate=drop_path_rate,
mlp_dropout=mlp_dropout,
cls_dropout=cls_dropout,
num_classes=num_classes,
)
if pretrained:
print('load pretrained weights')
state_dict = torch.load(_MODELS["ViT-L/14_336"], map_location='cpu')
load_state_dict(model, state_dict)
return model.eval()
if __name__ == '__main__':
import time
from fvcore.nn import FlopCountAnalysis
from fvcore.nn import flop_count_table
import numpy as np
seed = 4217
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
num_frames = 16
model = vit_fusion_b16(
pretrained=False,
t_size=num_frames, backbone_drop_path_rate=0.2, drop_path_rate=0.4,
dw_reduction=1.5,
)
flops = FlopCountAnalysis(model, torch.rand(1, 3, num_frames, 224, 224))
s = time.time()
print(flop_count_table(flops, max_depth=1))
print(time.time()-s) | InternVideo-main | Downstream/Video-Text-Retrieval/modules/clip_evl/evl_utils/clip_vit_fusion.py |
import warnings
from typing import Tuple, Optional
import torch
from torch import Tensor
from torch.nn.modules.linear import Linear
from torch.nn.init import xavier_uniform_
from torch.nn.init import constant_
from torch.nn.init import xavier_normal_
from torch.nn.parameter import Parameter
from torch.nn.modules.module import Module
from .attention_module_bias import multi_head_attention_forward
class _LinearWithBias(Linear):
bias: Tensor
def __init__(self, in_features: int, out_features: int) -> None:
super().__init__(in_features, out_features, bias=True)
class MultiheadAttention(Module):
r"""Allows the model to jointly attend to information
from different representation subspaces.
See reference: Attention Is All You Need
.. math::
\text{MultiHead}(Q, K, V) = \text{Concat}(head_1,\dots,head_h)W^O
\text{where} head_i = \text{Attention}(QW_i^Q, KW_i^K, VW_i^V)
Args:
embed_dim: total dimension of the model.
num_heads: parallel attention heads.
dropout: a Dropout layer on attn_output_weights. Default: 0.0.
bias: add bias as module parameter. Default: True.
add_bias_kv: add bias to the key and value sequences at dim=0.
add_zero_attn: add a new batch of zeros to the key and
value sequences at dim=1.
kdim: total number of features in key. Default: None.
vdim: total number of features in value. Default: None.
Note: if kdim and vdim are None, they will be set to embed_dim such that
query, key, and value have the same number of features.
Examples::
>>> multihead_attn = nn.MultiheadAttention(embed_dim, num_heads)
>>> attn_output, attn_output_weights = multihead_attn(query, key, value)
"""
bias_k: Optional[torch.Tensor]
bias_v: Optional[torch.Tensor]
def __init__(self, embed_dim, num_heads, dropout=0., bias=True, add_bias_kv=False, add_zero_attn=False, kdim=None, vdim=None):
super(MultiheadAttention, self).__init__()
self.embed_dim = embed_dim
self.kdim = kdim if kdim is not None else embed_dim
self.vdim = vdim if vdim is not None else embed_dim
self._qkv_same_embed_dim = self.kdim == embed_dim and self.vdim == embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads"
if self._qkv_same_embed_dim is False:
self.q_proj_weight = Parameter(torch.Tensor(embed_dim, embed_dim))
self.k_proj_weight = Parameter(torch.Tensor(embed_dim, self.kdim))
self.v_proj_weight = Parameter(torch.Tensor(embed_dim, self.vdim))
self.register_parameter('in_proj_weight', None)
else:
self.in_proj_weight = Parameter(torch.empty(3 * embed_dim, embed_dim))
self.register_parameter('q_proj_weight', None)
self.register_parameter('k_proj_weight', None)
self.register_parameter('v_proj_weight', None)
if bias:
self.in_proj_bias = Parameter(torch.empty(3 * embed_dim))
else:
self.register_parameter('in_proj_bias', None)
self.out_proj = _LinearWithBias(embed_dim, embed_dim)
if add_bias_kv:
self.bias_k = Parameter(torch.empty(1, 1, embed_dim))
self.bias_v = Parameter(torch.empty(1, 1, embed_dim))
else:
self.bias_k = self.bias_v = None
self.add_zero_attn = add_zero_attn
self._reset_parameters()
def _reset_parameters(self):
if self._qkv_same_embed_dim:
xavier_uniform_(self.in_proj_weight)
else:
xavier_uniform_(self.q_proj_weight)
xavier_uniform_(self.k_proj_weight)
xavier_uniform_(self.v_proj_weight)
if self.in_proj_bias is not None:
constant_(self.in_proj_bias, 0.)
constant_(self.out_proj.bias, 0.)
if self.bias_k is not None:
xavier_normal_(self.bias_k)
if self.bias_v is not None:
xavier_normal_(self.bias_v)
def __setstate__(self, state):
# Support loading old MultiheadAttention checkpoints generated by v1.1.0
if '_qkv_same_embed_dim' not in state:
state['_qkv_same_embed_dim'] = True
super(MultiheadAttention, self).__setstate__(state)
def forward(self, query, key, value, key_padding_mask=None,
need_weights=True, attn_mask=None, return_qk=False, rpb=None):
# type: (Tensor, Tensor, Tensor, Optional[Tensor], bool, Optional[Tensor], bool, Optional[Tensor]) -> Tuple[Tensor, Optional[Tensor]]
r"""
Args:
query, key, value: map a query and a set of key-value pairs to an output.
See "Attention Is All You Need" for more details.
key_padding_mask: if provided, specified padding elements in the key will
be ignored by the attention. When given a binary mask and a value is True,
the corresponding value on the attention layer will be ignored. When given
a byte mask and a value is non-zero, the corresponding value on the attention
layer will be ignored
need_weights: output attn_output_weights.
attn_mask: 2D or 3D mask that prevents attention to certain positions. A 2D mask will be broadcasted for all
the batches while a 3D mask allows to specify a different mask for the entries of each batch.
Shape:
- Inputs:
- query: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is
the embedding dimension.
- key: :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- value: :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- key_padding_mask: :math:`(N, S)` where N is the batch size, S is the source sequence length.
If a ByteTensor is provided, the non-zero positions will be ignored while the position
with the zero positions will be unchanged. If a BoolTensor is provided, the positions with the
value of ``True`` will be ignored while the position with the value of ``False`` will be unchanged.
- attn_mask: 2D mask :math:`(L, S)` where L is the target sequence length, S is the source sequence length.
3D mask :math:`(N*num_heads, L, S)` where N is the batch size, L is the target sequence length,
S is the source sequence length. attn_mask ensure that position i is allowed to attend the unmasked
positions. If a ByteTensor is provided, the non-zero positions are not allowed to attend
while the zero positions will be unchanged. If a BoolTensor is provided, positions with ``True``
is not allowed to attend while ``False`` values will be unchanged. If a FloatTensor
is provided, it will be added to the attention weight.
- return_qk: whether return Q and K.
- Outputs:
- attn_output: :math:`(L, N, E)` where L is the target sequence length, N is the batch size,
E is the embedding dimension.
- attn_output_weights: :math:`(N, L, S)` where N is the batch size,
L is the target sequence length, S is the source sequence length.
"""
if return_qk:
if not self._qkv_same_embed_dim:
q, k, attn_output, attn_output_weights = multi_head_attention_forward(
query, key, value, self.embed_dim, self.num_heads,
self.in_proj_weight, self.in_proj_bias,
self.bias_k, self.bias_v, self.add_zero_attn,
self.dropout, self.out_proj.weight, self.out_proj.bias,
training=self.training,
key_padding_mask=key_padding_mask, need_weights=need_weights,
attn_mask=attn_mask, use_separate_proj_weight=True,
q_proj_weight=self.q_proj_weight, k_proj_weight=self.k_proj_weight,
v_proj_weight=self.v_proj_weight, return_qk=True, rpb=rpb)
else:
q, k, attn_output, attn_output_weights = multi_head_attention_forward(
query, key, value, self.embed_dim, self.num_heads,
self.in_proj_weight, self.in_proj_bias,
self.bias_k, self.bias_v, self.add_zero_attn,
self.dropout, self.out_proj.weight, self.out_proj.bias,
training=self.training,
key_padding_mask=key_padding_mask, need_weights=need_weights,
attn_mask=attn_mask, return_qk=True, rpb=rpb)
return q, k, attn_output, attn_output_weights
else:
if not self._qkv_same_embed_dim:
return multi_head_attention_forward(
query, key, value, self.embed_dim, self.num_heads,
self.in_proj_weight, self.in_proj_bias,
self.bias_k, self.bias_v, self.add_zero_attn,
self.dropout, self.out_proj.weight, self.out_proj.bias,
training=self.training,
key_padding_mask=key_padding_mask, need_weights=need_weights,
attn_mask=attn_mask, use_separate_proj_weight=True,
q_proj_weight=self.q_proj_weight, k_proj_weight=self.k_proj_weight,
v_proj_weight=self.v_proj_weight, rpb=rpb)
else:
return multi_head_attention_forward(
query, key, value, self.embed_dim, self.num_heads,
self.in_proj_weight, self.in_proj_bias,
self.bias_k, self.bias_v, self.add_zero_attn,
self.dropout, self.out_proj.weight, self.out_proj.bias,
training=self.training,
key_padding_mask=key_padding_mask, need_weights=need_weights,
attn_mask=attn_mask, rpb=rpb)
| InternVideo-main | Downstream/Video-Text-Retrieval/modules/clip_evl/evl_utils/attention_bias.py |
r"""Functional interface"""
import warnings
import math
import torch
from torch import _VF
from torch._jit_internal import Optional, Tuple
from torch.overrides import has_torch_function, handle_torch_function
from torch.nn.functional import _pad, linear, softmax, dropout
Tensor = torch.Tensor
pad = _pad
def multi_head_attention_forward(query: Tensor,
key: Tensor,
value: Tensor,
embed_dim_to_check: int,
num_heads: int,
in_proj_weight: Tensor,
in_proj_bias: Tensor,
bias_k: Optional[Tensor],
bias_v: Optional[Tensor],
add_zero_attn: bool,
dropout_p: float,
out_proj_weight: Tensor,
out_proj_bias: Tensor,
training: bool = True,
key_padding_mask: Optional[Tensor] = None,
need_weights: bool = True,
attn_mask: Optional[Tensor] = None,
use_separate_proj_weight: bool = False,
q_proj_weight: Optional[Tensor] = None,
k_proj_weight: Optional[Tensor] = None,
v_proj_weight: Optional[Tensor] = None,
static_k: Optional[Tensor] = None,
static_v: Optional[Tensor] = None,
return_qk: bool = False,
rpb: Tensor = None,
) -> Tuple[Tensor, Optional[Tensor]]:
r"""
Args:
query, key, value: map a query and a set of key-value pairs to an output.
See "Attention Is All You Need" for more details.
embed_dim_to_check: total dimension of the model.
num_heads: parallel attention heads.
in_proj_weight, in_proj_bias: input projection weight and bias.
bias_k, bias_v: bias of the key and value sequences to be added at dim=0.
add_zero_attn: add a new batch of zeros to the key and
value sequences at dim=1.
dropout_p: probability of an element to be zeroed.
out_proj_weight, out_proj_bias: the output projection weight and bias.
training: apply dropout if is ``True``.
key_padding_mask: if provided, specified padding elements in the key will
be ignored by the attention. This is an binary mask. When the value is True,
the corresponding value on the attention layer will be filled with -inf.
need_weights: output attn_output_weights.
attn_mask: 2D or 3D mask that prevents attention to certain positions. A 2D mask will be broadcasted for all
the batches while a 3D mask allows to specify a different mask for the entries of each batch.
use_separate_proj_weight: the function accept the proj. weights for query, key,
and value in different forms. If false, in_proj_weight will be used, which is
a combination of q_proj_weight, k_proj_weight, v_proj_weight.
q_proj_weight, k_proj_weight, v_proj_weight, in_proj_bias: input projection weight and bias.
static_k, static_v: static key and value used for attention operators.
Shape:
Inputs:
- query: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is
the embedding dimension.
- key: :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- value: :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- key_padding_mask: :math:`(N, S)` where N is the batch size, S is the source sequence length.
If a ByteTensor is provided, the non-zero positions will be ignored while the zero positions
will be unchanged. If a BoolTensor is provided, the positions with the
value of ``True`` will be ignored while the position with the value of ``False`` will be unchanged.
- attn_mask: 2D mask :math:`(L, S)` where L is the target sequence length, S is the source sequence length.
3D mask :math:`(N*num_heads, L, S)` where N is the batch size, L is the target sequence length,
S is the source sequence length. attn_mask ensures that position i is allowed to attend the unmasked
positions. If a ByteTensor is provided, the non-zero positions are not allowed to attend
while the zero positions will be unchanged. If a BoolTensor is provided, positions with ``True``
are not allowed to attend while ``False`` values will be unchanged. If a FloatTensor
is provided, it will be added to the attention weight.
- static_k: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length,
N is the batch size, E is the embedding dimension. E/num_heads is the head dimension.
- static_v: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length,
N is the batch size, E is the embedding dimension. E/num_heads is the head dimension.
- return_qk: whether return Q and K.
- rpb: relative postion bias
Outputs:
- attn_output: :math:`(L, N, E)` where L is the target sequence length, N is the batch size,
E is the embedding dimension.
- attn_output_weights: :math:`(N, L, S)` where N is the batch size,
L is the target sequence length, S is the source sequence length.
"""
if not torch.jit.is_scripting():
tens_ops = (query, key, value, in_proj_weight, in_proj_bias, bias_k, bias_v,
out_proj_weight, out_proj_bias)
if any([type(t) is not Tensor for t in tens_ops]) and has_torch_function(tens_ops):
return handle_torch_function(
multi_head_attention_forward, tens_ops, query, key, value,
embed_dim_to_check, num_heads, in_proj_weight, in_proj_bias,
bias_k, bias_v, add_zero_attn, dropout_p, out_proj_weight,
out_proj_bias, training=training, key_padding_mask=key_padding_mask,
need_weights=need_weights, attn_mask=attn_mask,
use_separate_proj_weight=use_separate_proj_weight,
q_proj_weight=q_proj_weight, k_proj_weight=k_proj_weight,
v_proj_weight=v_proj_weight, static_k=static_k, static_v=static_v)
tgt_len, bsz, embed_dim = query.size()
assert embed_dim == embed_dim_to_check
# allow MHA to have different sizes for the feature dimension
assert key.size(0) == value.size(0) and key.size(1) == value.size(1)
head_dim = embed_dim // num_heads
assert head_dim * num_heads == embed_dim, "embed_dim must be divisible by num_heads"
scaling = float(head_dim) ** -0.5
if not use_separate_proj_weight:
if torch.equal(query, key) and torch.equal(key, value):
# self-attention
q, k, v = linear(query, in_proj_weight, in_proj_bias).chunk(3, dim=-1)
elif torch.equal(key, value):
# encoder-decoder attention
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = 0
_end = embed_dim
_w = in_proj_weight[_start:_end, :]
if _b is not None:
_b = _b[_start:_end]
q = linear(query, _w, _b)
if key is None:
assert value is None
k = None
v = None
else:
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = embed_dim
_end = None
_w = in_proj_weight[_start:, :]
if _b is not None:
_b = _b[_start:]
k, v = linear(key, _w, _b).chunk(2, dim=-1)
else:
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = 0
_end = embed_dim
_w = in_proj_weight[_start:_end, :]
if _b is not None:
_b = _b[_start:_end]
q = linear(query, _w, _b)
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = embed_dim
_end = embed_dim * 2
_w = in_proj_weight[_start:_end, :]
if _b is not None:
_b = _b[_start:_end]
k = linear(key, _w, _b)
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = embed_dim * 2
_end = None
_w = in_proj_weight[_start:, :]
if _b is not None:
_b = _b[_start:]
v = linear(value, _w, _b)
else:
q_proj_weight_non_opt = torch.jit._unwrap_optional(q_proj_weight)
len1, len2 = q_proj_weight_non_opt.size()
assert len1 == embed_dim and len2 == query.size(-1)
k_proj_weight_non_opt = torch.jit._unwrap_optional(k_proj_weight)
len1, len2 = k_proj_weight_non_opt.size()
assert len1 == embed_dim and len2 == key.size(-1)
v_proj_weight_non_opt = torch.jit._unwrap_optional(v_proj_weight)
len1, len2 = v_proj_weight_non_opt.size()
assert len1 == embed_dim and len2 == value.size(-1)
if in_proj_bias is not None:
q = linear(query, q_proj_weight_non_opt, in_proj_bias[0:embed_dim])
k = linear(key, k_proj_weight_non_opt, in_proj_bias[embed_dim:(embed_dim * 2)])
v = linear(value, v_proj_weight_non_opt, in_proj_bias[(embed_dim * 2):])
else:
q = linear(query, q_proj_weight_non_opt, in_proj_bias)
k = linear(key, k_proj_weight_non_opt, in_proj_bias)
v = linear(value, v_proj_weight_non_opt, in_proj_bias)
q = q * scaling
if attn_mask is not None:
assert attn_mask.dtype == torch.float32 or attn_mask.dtype == torch.float64 or \
attn_mask.dtype == torch.float16 or attn_mask.dtype == torch.uint8 or attn_mask.dtype == torch.bool, \
'Only float, byte, and bool types are supported for attn_mask, not {}'.format(attn_mask.dtype)
if attn_mask.dtype == torch.uint8:
warnings.warn("Byte tensor for attn_mask in nn.MultiheadAttention is deprecated. Use bool tensor instead.")
attn_mask = attn_mask.to(torch.bool)
if attn_mask.dim() == 2:
attn_mask = attn_mask.unsqueeze(0)
if list(attn_mask.size()) != [1, query.size(0), key.size(0)]:
raise RuntimeError('The size of the 2D attn_mask is not correct.')
elif attn_mask.dim() == 3:
if list(attn_mask.size()) != [bsz * num_heads, query.size(0), key.size(0)]:
raise RuntimeError('The size of the 3D attn_mask is not correct.')
else:
raise RuntimeError("attn_mask's dimension {} is not supported".format(attn_mask.dim()))
# attn_mask's dim is 3 now.
# convert ByteTensor key_padding_mask to bool
if key_padding_mask is not None and key_padding_mask.dtype == torch.uint8:
warnings.warn("Byte tensor for key_padding_mask in nn.MultiheadAttention is deprecated. Use bool tensor instead.")
key_padding_mask = key_padding_mask.to(torch.bool)
if bias_k is not None and bias_v is not None:
if static_k is None and static_v is None:
k = torch.cat([k, bias_k.repeat(1, bsz, 1)])
v = torch.cat([v, bias_v.repeat(1, bsz, 1)])
if attn_mask is not None:
attn_mask = pad(attn_mask, (0, 1))
if key_padding_mask is not None:
key_padding_mask = pad(key_padding_mask, (0, 1))
else:
assert static_k is None, "bias cannot be added to static key."
assert static_v is None, "bias cannot be added to static value."
else:
assert bias_k is None
assert bias_v is None
# L, N, E
if return_qk:
return_q = q.clone() / scaling
return_k = k.clone()
q = q.contiguous().view(tgt_len, bsz * num_heads, head_dim).transpose(0, 1)
if k is not None:
k = k.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1)
if v is not None:
v = v.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1)
if static_k is not None:
assert static_k.size(0) == bsz * num_heads
assert static_k.size(2) == head_dim
k = static_k
if static_v is not None:
assert static_v.size(0) == bsz * num_heads
assert static_v.size(2) == head_dim
v = static_v
src_len = k.size(1)
if key_padding_mask is not None:
assert key_padding_mask.size(0) == bsz
assert key_padding_mask.size(1) == src_len
if add_zero_attn:
src_len += 1
k = torch.cat([k, torch.zeros((k.size(0), 1) + k.size()[2:], dtype=k.dtype, device=k.device)], dim=1)
v = torch.cat([v, torch.zeros((v.size(0), 1) + v.size()[2:], dtype=v.dtype, device=v.device)], dim=1)
if attn_mask is not None:
attn_mask = pad(attn_mask, (0, 1))
if key_padding_mask is not None:
key_padding_mask = pad(key_padding_mask, (0, 1))
attn_output_weights = torch.bmm(q, k.transpose(1, 2))
assert list(attn_output_weights.size()) == [bsz * num_heads, tgt_len, src_len]
if attn_mask is not None:
if attn_mask.dtype == torch.bool:
attn_output_weights.masked_fill_(attn_mask, float('-inf'))
else:
attn_output_weights += attn_mask
if key_padding_mask is not None:
attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len)
attn_output_weights = attn_output_weights.masked_fill(
key_padding_mask.unsqueeze(1).unsqueeze(2),
float('-inf'),
)
attn_output_weights = attn_output_weights.view(bsz * num_heads, tgt_len, src_len)
if rpb is not None:
attn_output_weights = attn_output_weights + rpb
attn_output_weights = softmax(attn_output_weights, dim=-1)
attn_output_weights = dropout(attn_output_weights, p=dropout_p, training=training)
attn_output = torch.bmm(attn_output_weights, v)
assert list(attn_output.size()) == [bsz * num_heads, tgt_len, head_dim]
attn_output = attn_output.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim)
attn_output = linear(attn_output, out_proj_weight, out_proj_bias)
if return_qk:
if need_weights:
# average attention weights over heads
attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len)
return return_q, return_k, attn_output, attn_output_weights.sum(dim=1) / num_heads
else:
return return_q, return_k, attn_output, None
else:
if need_weights:
# average attention weights over heads
attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len)
return attn_output, attn_output_weights.sum(dim=1) / num_heads
else:
return attn_output, None
| InternVideo-main | Downstream/Video-Text-Retrieval/modules/clip_evl/evl_utils/attention_module_bias.py |
import os
from collections import OrderedDict
from timm.models.layers import DropPath
import torch
from torch import nn
from einops import rearrange
import torch.utils.checkpoint as checkpoint
from .attention_bias import MultiheadAttention
MODEL_PATH = '/mnt/lustre/share_data/likunchang.vendor/model'
_MODELS = {
"ViT-B/32": os.path.join(MODEL_PATH, "vit_b32.pth"),
"ViT-B/16": os.path.join(MODEL_PATH, "vit_b16.pth"),
"ViT-L/14": os.path.join(MODEL_PATH, "vit_l14.pth"),
"ViT-L/14_336": os.path.join(MODEL_PATH, "vit_l14_336.pth"),
}
class LayerNorm(nn.LayerNorm):
"""Subclass torch's LayerNorm to handle fp16."""
def forward(self, x):
orig_type = x.dtype
ret = super().forward(x.type(torch.float32))
return ret.type(orig_type)
class QuickGELU(nn.Module):
def forward(self, x):
return x * torch.sigmoid(1.702 * x)
class ResidualAttentionBlock(nn.Module):
def __init__(self, d_model, n_head, attn_mask=None, drop_path=0.0, t_size=8, spatial_size=7):
super().__init__()
self.n_head = n_head
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
print(f'Drop path rate: {drop_path}')
print(f'Add RPB: t_size {t_size}, spatial_size {spatial_size}')
self.pos_embed = nn.Conv3d(d_model, d_model, kernel_size=3, stride=1, padding=1, groups=d_model)
# temporal
self.attn_t = MultiheadAttention(d_model, n_head)
self.ln_t = LayerNorm(d_model)
self.rpb_t = nn.Parameter(torch.zeros([t_size * 2 - 1, n_head]))
idx_tensor_t = torch.zeros([t_size, t_size], dtype=torch.long)
for q in range(t_size):
for k in range(t_size):
offs = q - k + t_size - 1
idx_tensor_t[q, k] = offs
self.idx_tensor_t = idx_tensor_t
# spatial
self.attn = MultiheadAttention(d_model, n_head)
self.rpb = nn.Parameter(torch.zeros([(spatial_size * 2 - 1) ** 2, n_head]))
self.ln_1 = LayerNorm(d_model)
self.mlp = nn.Sequential(OrderedDict([
("c_fc", nn.Linear(d_model, d_model * 4)),
("gelu", QuickGELU()),
("c_proj", nn.Linear(d_model * 4, d_model))
]))
self.ln_2 = LayerNorm(d_model)
self.attn_mask = attn_mask
idx_tensor = torch.zeros([spatial_size ** 2, spatial_size ** 2], dtype=torch.long)
for q in range(spatial_size ** 2):
qi, qj = q // spatial_size, q % spatial_size
for k in range(spatial_size ** 2):
ki, kj = k // spatial_size, k % spatial_size
i_offs = qi - ki + spatial_size - 1
j_offs = qj - kj + spatial_size - 1
idx_tensor[q, k] = i_offs * (spatial_size * 2 - 1) + j_offs
self.idx_tensor = idx_tensor
# init zero
print('Init zero for (2+1)d')
nn.init.constant_(self.pos_embed.weight, 0)
nn.init.constant_(self.pos_embed.bias, 0)
nn.init.constant_(self.attn_t.in_proj_weight, 0)
nn.init.constant_(self.attn_t.in_proj_bias, 0)
nn.init.constant_(self.attn_t.out_proj.weight, 1)
nn.init.constant_(self.attn_t.out_proj.bias, 0)
def attention(self, x, rpb=None):
self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None
return self.attn(x, x, x, need_weights=False, attn_mask=self.attn_mask, rpb=rpb)[0]
def attention_temporal(self, x, rpb=None):
self.attn_mask = None
return self.attn_t(x, x, x, need_weights=False, attn_mask=self.attn_mask, rpb=rpb)[0]
def forward(self, x, T=8, mode='video'):
# temporal
# x: 1+HWT, N, C
# pos_emb
tmp_x = x[1:, :, :]
LT, N, C = tmp_x.shape
L = LT // T
H = W = int(L ** 0.5)
tmp_x = tmp_x.view(H, W, T, N, C).permute(3, 4, 2, 0, 1)
tmp_x = tmp_x + self.pos_embed(tmp_x)
tmp_x = tmp_x.view(N, C, T, L).permute(3, 2, 0, 1).view(LT, N, C)
x[1:, :, :] = tmp_x
xt = x[1:, :, :]
_, N, C = xt.shape
xt = rearrange(xt, '(l t) n c -> t (n l) c', n=N, t=T)
# no rpb_t for image
if mode == 'image':
rpb_t = None
else:
# rpb_t: T, T, H => B*H, T, T
self.idx_tensor_t = self.idx_tensor_t.to(xt.device)
rpb_t = self.rpb_t[self.idx_tensor_t].permute(2, 0, 1).repeat(N*L, 1, 1)
res_temporal = self.attention_temporal(self.ln_t(xt), rpb=rpb_t)
res_temporal = rearrange(res_temporal, 't (n l) c -> (l t) n c', n=N, t=T)
xt = x[1:, :, :] + self.drop_path(res_temporal)
# spatial
init_cls_token = x[:1, :, :]
cls_token = init_cls_token.repeat(1, T, 1).view(1, T*N, C)
xs = rearrange(xt, '(l t) n c -> l (t n) c', n=N, t=T)
xs = torch.cat((cls_token, xs), 0)
# rpb: L, L, H => B*H, L+1, L+1
rpb = torch.zeros((self.n_head, L+1, L+1), device=xs.device, dtype=xs.dtype)
self.idx_tensor = self.idx_tensor.to(xs.device)
rpb[:, 1:, 1:] = self.rpb[self.idx_tensor].permute(2, 0, 1)
rpb = rpb.repeat(T*N, 1, 1)
res_spatial = self.attention(self.ln_1(xs), rpb=rpb)
# Taking care of CLS token
cls_token = res_spatial[0, :, :]
cls_token = rearrange(cls_token, '(t n) c -> t n c', n=N)
cls_token = torch.mean(cls_token, 0, True) # averaging for every frame
res_spatial = res_spatial[1:, :, :]
res_spatial = rearrange(res_spatial, 'l (t n) c -> (l t) n c', n=N)
x = x + self.drop_path(torch.cat((cls_token, res_spatial), 0))
x = x + self.drop_path(self.mlp(self.ln_2(x)))
return x
class Transformer(nn.Module):
def __init__(self, width, layers, heads, attn_mask=None, drop_path_rate=0., t_size=8, spatial_size=7):
super().__init__()
self.width = width
self.layers = layers
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, layers)]
self.resblocks = nn.ModuleList([
ResidualAttentionBlock(width, heads, attn_mask,
drop_path=dpr[i], t_size=t_size, spatial_size=spatial_size) for i in range(layers)
])
def forward(self, x, return_num=4, T=8, mode='video'):
features = []
for i, resblock in enumerate(self.resblocks):
x = resblock(x, T=T, mode=mode)
if i >= self.layers - return_num:
# LT + 1, N, C
LT, N, C = x.shape
L = (LT - 1) // T
cls_x, tmp_x = x[:1], x[1:]
cls_x = cls_x.unsqueeze(2).repeat(1, 1, T, 1)
tmp_x = tmp_x.reshape(L, T, N, C).permute(0, 2, 1, 3) # L, N, T, C
tmp_x = torch.cat([cls_x, tmp_x], dim=0 )# L + 1, N, T, C
features.append(tmp_x)
return features
class VisionTransformer(nn.Module):
def __init__(
self, input_resolution, patch_size, width, layers, heads, output_dim,
num_frames=8, drop_path_rate=0., t_size=8, spatial_size=7
):
super().__init__()
self.input_resolution = input_resolution
self.output_dim = output_dim
self.conv1 = nn.Conv2d(in_channels=3, out_channels=width, kernel_size=patch_size, stride=patch_size, bias=False)
scale = width ** -0.5
self.class_embedding = nn.Parameter(scale * torch.randn(width))
self.positional_embedding = nn.Parameter(scale * torch.randn((input_resolution // patch_size) ** 2 + 1, width))
self.temporal_positional_embedding = nn.Parameter(torch.zeros(1, num_frames, width))
self.ln_pre = LayerNorm(width)
self.transformer = Transformer(width, layers, heads, drop_path_rate=drop_path_rate, t_size=t_size, spatial_size=spatial_size)
def forward(self, x, return_num=4, mode='video'):
if len(x.size()) == 5:
N, C, T, H, W = x.shape
x = x.permute(0, 2, 1, 3, 4).reshape(N * T, C, H, W)
x = self.conv1(x) # shape = [*, width, grid, grid]
x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2]
x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width]
x = torch.cat([self.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device), x], dim=1) # shape = [*, grid ** 2 + 1, width]
x = x + self.positional_embedding.to(x.dtype)
cls_tokens = x[:N, :1, :]
x = x[:, 1:]
# add temporal position embedding for video
if mode == 'video':
x = rearrange(x, '(b t) n c -> (b n) t c', b=N, t=T)
x = x + self.temporal_positional_embedding
x = rearrange(x, '(b n) t c -> b (n t) c', b=N, t=T)
else:
pass
x = torch.cat((cls_tokens, x), dim=1)
x = self.ln_pre(x)
x = x.permute(1, 0, 2) # NLD -> LND
features = self.transformer(
x, return_num=return_num, T=T, mode=mode
)
return features
def vit_2plus1d_dw_bias_b32(pretrained=True, num_frames=8, drop_path_rate=0., t_size=8):
model = VisionTransformer(
input_resolution=224,
patch_size=32,
width=768,
layers=12,
heads=12,
output_dim=512,
num_frames=num_frames,
drop_path_rate=drop_path_rate,
t_size=t_size,
)
if pretrained:
print('load pretrained weights')
state_dict = torch.load(_MODELS["ViT-B/32"], map_location='cpu')
model.load_state_dict(state_dict, strict=False)
return model.eval()
def vit_2plus1d_dw_bias_b16(pretrained=True, num_frames=8, drop_path_rate=0., t_size=8):
model = VisionTransformer(
input_resolution=224,
patch_size=16,
width=768,
layers=12,
heads=12,
output_dim=512,
num_frames=num_frames,
drop_path_rate=drop_path_rate,
t_size=t_size,
spatial_size=14,
)
if pretrained:
print('load pretrained weights')
state_dict = torch.load(_MODELS["ViT-B/16"], map_location='cpu')
model.load_state_dict(state_dict, strict=False)
return model.eval()
def vit_2plus1d_dw_bias_l14(pretrained=True, num_frames=8, drop_path_rate=0., t_size=8):
model = VisionTransformer(
input_resolution=224,
patch_size=14,
width=1024,
layers=24,
heads=16,
output_dim=768,
num_frames=num_frames,
drop_path_rate=drop_path_rate,
t_size=t_size,
)
if pretrained:
print('load pretrained weights')
state_dict = torch.load(_MODELS["ViT-L/14"], map_location='cpu')
model.load_state_dict(state_dict, strict=False)
return model.eval()
def vit_2plus1d_dw_bias_l14_336(pretrained=True, num_frames=8, drop_path_rate=0., t_size=8):
model = VisionTransformer(
input_resolution=336,
patch_size=14,
width=1024,
layers=24,
heads=16,
output_dim=768,
num_frames=num_frames,
drop_path_rate=drop_path_rate,
t_size=t_size,
)
if pretrained:
print('load pretrained weights')
state_dict = torch.load(_MODELS["ViT-L/14_336"], map_location='cpu')
model.load_state_dict(state_dict, strict=False)
return model.eval()
if __name__ == '__main__':
import time
from fvcore.nn import FlopCountAnalysis
from fvcore.nn import flop_count_table
model = vit_2plus1d_dw_bias_b32(pretrained=True)
flops = FlopCountAnalysis(model, torch.rand(4, 3, 8, 224, 224))
s = time.time()
print(flop_count_table(flops, max_depth=1))
print(time.time()-s) | InternVideo-main | Downstream/Video-Text-Retrieval/modules/clip_evl/evl_utils/clip_vit_2plus1d_dw_bias.py |
#!/usr/bin/env python
from collections import OrderedDict
from timm.models.layers import DropPath
import torch
import torch.nn as nn
import torch.nn.functional as F
class QuickGELU(nn.Module):
def forward(self, x: torch.Tensor):
return x * torch.sigmoid(1.702 * x)
class ResidualDecoderBlock(nn.Module):
def __init__(self, d_model: int, n_head: int, attn_mask: torch.Tensor = None,
mlp_factor: float = 4.0, dropout: float = 0.0, drop_path: float = 0.0):
super().__init__()
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
print(f'Drop path rate: {drop_path}')
self.attn = nn.MultiheadAttention(d_model, n_head)
self.ln_1 = nn.LayerNorm(d_model)
d_mlp = round(mlp_factor * d_model)
self.mlp = nn.Sequential(OrderedDict([
("c_fc", nn.Linear(d_model, d_mlp)),
("gelu", QuickGELU()),
("dropout", nn.Dropout(dropout)),
("c_proj", nn.Linear(d_mlp, d_model))
]))
self.ln_2 = nn.LayerNorm(d_model)
self.ln_3 = nn.LayerNorm(d_model)
self.attn_mask = attn_mask
nn.init.xavier_uniform_(self.attn.in_proj_weight)
nn.init.xavier_uniform_(self.attn.out_proj.weight)
nn.init.xavier_uniform_(self.mlp[0].weight)
nn.init.xavier_uniform_(self.mlp[-1].weight)
def attention(self, x: torch.Tensor, y: torch.Tensor):
#self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None
# return self.attn(x, y, y, need_weights=False, attn_mask=self.attn_mask)[0]
assert self.attn_mask is None # not implemented
# manual forward to add position information
d_model = self.ln_1.weight.size(0)
q = (x @ self.attn.in_proj_weight[:d_model].T) + self.attn.in_proj_bias[:d_model]
k = (y @ self.attn.in_proj_weight[d_model:-d_model].T) + self.attn.in_proj_bias[d_model:-d_model]
v = (y @ self.attn.in_proj_weight[-d_model:].T) + self.attn.in_proj_bias[-d_model:]
Tx, Ty, N = q.size(0), k.size(0), q.size(1)
q = q.view(Tx, N, self.attn.num_heads, self.attn.head_dim).permute(1, 2, 0, 3)
k = k.view(Ty, N, self.attn.num_heads, self.attn.head_dim).permute(1, 2, 0, 3)
v = v.view(Ty, N, self.attn.num_heads, self.attn.head_dim).permute(1, 2, 0, 3)
aff = (q @ k.transpose(-2, -1) / (self.attn.head_dim ** 0.5))
aff = aff.softmax(dim=-1)
out = aff @ v
out = out.permute(2, 0, 1, 3).flatten(2)
out = self.attn.out_proj(out)
return out
def forward(self, x: torch.Tensor, y: torch.Tensor):
x = x + self.drop_path(self.attention(self.ln_1(x), self.ln_3(y)))
x = x + self.drop_path(self.mlp(self.ln_2(x)))
return x
class TransformerDecoder(nn.Module):
def __init__(self, n_layers=4,
n_dim=768, n_head=12, mlp_factor=4.0, drop_path_rate=0.,
mlp_dropout=[0.5, 0.5, 0.5, 0.5], cls_dropout=0.5, t_size=8,
use_t_conv=True, use_t_pos_embed=True, num_classes=400,
add_residual=False,
):
super().__init__()
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, n_layers)]
self.dec = nn.ModuleList([
ResidualDecoderBlock(n_dim, n_head, mlp_factor=mlp_factor, dropout=mlp_dropout[i], drop_path=dpr[i])
for i in range(n_layers)
])
self.proj = nn.Sequential(
nn.LayerNorm(n_dim),
nn.Dropout(cls_dropout),
nn.Linear(n_dim, num_classes),
)
self.temporal_cls_token = nn.Parameter(torch.zeros(n_dim))
self.add_residual = add_residual
print(f'Add residual {add_residual}')
if use_t_conv:
self.tconv = nn.ModuleList([
nn.Conv1d(n_dim, n_dim, kernel_size=3, stride=1, padding=1, bias=True, groups=n_dim)
for i in range(n_layers)
])
for m in self.tconv:
nn.init.constant_(m.bias, 0.)
m.weight.data[...] = torch.Tensor([0, 1, 0])
else:
self.tconv = None
if use_t_pos_embed:
self.pemb_t = nn.Parameter(torch.zeros([n_layers, t_size, n_dim]))
else:
self.pemb_t = None
self.t_size = t_size
def forward(self, clip_feats_all):
# clip_feats_all = clip_feats_all[-len(self.dec):]
# only return n_layers features, save memory
clip_feats = [x for x in clip_feats_all]
L, N, T, C = clip_feats[0].size()
x = self.temporal_cls_token.view(1, 1, -1).repeat(1, N, 1)
for i in range(len(clip_feats)):
if self.tconv is not None:
L, N, T, C = clip_feats[i].shape
clip_feats[i] = clip_feats[i].permute(0, 1, 3, 2).flatten(0, 1) # L * N, C, T
clip_feats[i] = self.tconv[i](clip_feats[i]).permute(0, 2, 1).contiguous().view(L, N, T, C)
if self.pemb_t is not None:
clip_feats[i] = clip_feats[i] + self.pemb_t[i]
clip_feats[i] = clip_feats[i].permute(2, 0, 1, 3).flatten(0, 1) # T * L, N, C
for i in range(len(self.dec)):
x = self.dec[i](x, clip_feats[i])
if self.add_residual:
residual = clip_feats_all[-1][0].mean(1)
return self.proj(x[0, :, :] + residual)
else:
return self.proj(x[0, :, :])
if __name__ == '__main__':
model = TransformerDecoder()
# construct a fake input to demonstrate input tensor shape
L, N, T, C = 197, 1, 8, 768 # num_image_tokens, video_batch_size, t_size, feature_dim
# we use intermediate feature maps from multiple blocks, so input features should be a list
input_features = []
for i in range(4): # vit-b has 12 blocks
# every item in input_features contains features maps from a single block
# every item is a tuple containing 3 feature maps:
# (1) block output features (i.e. after mlp) with shape L, N, T, C
# (2) projected query features with shape L, N, T, C
# (3) projected key features with shape L, N, T, C
input_features.append(
torch.zeros([L, N, T, C]))
# some small optimizations:
# (1) We only decode from the last $n$ blocks so it's good as long as the last $n$ items of input_features is valid and all previous items can be filled with None to save memory. By default $n=4$.
# (2) projected query/key features are optional. If you are using an uncompatible image backbone without query/key (e.g. CNN), you can fill the position with None (i.e. the tuple should be (Tensor, None, None) and set use_image_attnmap=False when constructing the model.
print(model(input_features).shape) # should be N, 400
| InternVideo-main | Downstream/Video-Text-Retrieval/modules/clip_evl/evl_utils/evl_module.py |
from .clip import *
| InternVideo-main | Downstream/Video-Text-Retrieval/modules/clip_kc/__init__.py |
from collections import OrderedDict
from typing import Tuple, Union
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn
from einops import rearrange
from . import evl_utils
from .evl_utils import TransformerDecoder_uniformer_diff_conv_balance
class LayerNorm(nn.LayerNorm):
"""Subclass torch's LayerNorm to handle fp16."""
def forward(self, x: torch.Tensor):
orig_type = x.dtype
ret = super().forward(x.type(torch.float32))
return ret.type(orig_type)
class QuickGELU(nn.Module):
def forward(self, x: torch.Tensor):
return x * torch.sigmoid(1.702 * x)
class ResidualAttentionBlock(nn.Module):
def __init__(self, d_model: int, n_head: int, attn_mask: torch.Tensor = None):
super().__init__()
self.attn = nn.MultiheadAttention(d_model, n_head)
self.ln_1 = LayerNorm(d_model)
self.mlp = nn.Sequential(OrderedDict([
("c_fc", nn.Linear(d_model, d_model * 4)),
("gelu", QuickGELU()),
("c_proj", nn.Linear(d_model * 4, d_model))
]))
self.ln_2 = LayerNorm(d_model)
self.attn_mask = attn_mask
def attention(self, x: torch.Tensor):
self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None
return self.attn(x, x, x, need_weights=False, attn_mask=self.attn_mask)[0]
def forward(self, x: torch.Tensor):
x = x + self.attention(self.ln_1(x))
x = x + self.mlp(self.ln_2(x))
return x
class Transformer(nn.Module):
def __init__(self, width: int, layers: int, heads: int, attn_mask: torch.Tensor = None):
super().__init__()
self.width = width
self.layers = layers
self.resblocks = nn.Sequential(*[ResidualAttentionBlock(width, heads, attn_mask) for _ in range(layers)])
def forward(self, x: torch.Tensor):
return self.resblocks(x)
class CLIP(nn.Module):
def __init__(self,
embed_dim: int,
# vision
image_resolution: int,
vision_layers: Union[Tuple[int, int, int, int], int],
vision_width: int,
vision_patch_size: int,
# text
context_length: int,
vocab_size: int,
transformer_width: int,
transformer_heads: int,
transformer_layers: int,
# evl
n_layers=4, n_dim=768, n_head=12, mlp_factor=4.0, drop_path_rate=0.,
mlp_dropout=[0.5, 0.5, 0.5, 0.5], cls_dropout=0.5, t_size=8, spatial_size=14,
use_t_conv=True, use_image_attnmap=True, use_t_pos_embed=True,
backbone='vit_2plus1d_dw_bias_b16',
uni_layer=0,
uni_type='2d',
add_ffn=False,
t_conv_type='3d',
pre_prompt=False,
balance=0.,
after_me=True,
before_me=False,
me_type='stm',
me_reduction=4,
):
super().__init__()
# All assertions is for adhoc clip_kc and should be removed
# assert vision_layers == 12, vision_layers
assert image_resolution == 224, image_resolution
# assert vision_patch_size == 32, vision_patch_size
assert vision_width == n_dim, (vision_width, n_dim)
self.vision_width = n_dim
self.context_length = context_length
vision_heads = vision_width // 64
self.visual = evl_utils.__dict__[backbone](pretrained=False)
self.evl = TransformerDecoder_uniformer_diff_conv_balance(
n_layers=n_layers, n_dim=n_dim, n_head=n_head,
mlp_factor=mlp_factor, drop_path_rate=drop_path_rate,
mlp_dropout=mlp_dropout, cls_dropout=cls_dropout, t_size=t_size,
use_t_conv=use_t_conv, use_t_pos_embed=use_t_pos_embed,
uni_layer=uni_layer, uni_type=uni_type, add_ffn=add_ffn, t_conv_type=t_conv_type,
pre_prompt=pre_prompt, balance=balance,
after_me=after_me, before_me=before_me,
me_type=me_type, me_reduction=me_reduction,
)
self.visual_ln_post = nn.LayerNorm(n_dim)
scale = n_dim ** -0.5
self.visual_proj = nn.Parameter(scale * torch.randn(n_dim, embed_dim))
self.return_qk = use_image_attnmap
self.return_num = n_layers
self.transformer = Transformer(
width=transformer_width,
layers=transformer_layers,
heads=transformer_heads,
attn_mask=self.build_attention_mask()
)
self.vocab_size = vocab_size
self.token_embedding = nn.Embedding(vocab_size, transformer_width)
self.positional_embedding = nn.Parameter(torch.empty(self.context_length, transformer_width))
self.ln_final = LayerNorm(transformer_width)
self.text_projection = nn.Parameter(torch.empty(transformer_width, embed_dim))
self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07))
self.embed_dim = embed_dim
# We seperate the mask embedding to load pretrained model
self.mask_embedding = nn.Parameter(torch.empty(1, 1, transformer_width))
self.initialize_parameters()
def initialize_parameters(self):
nn.init.normal_(self.token_embedding.weight, std=0.02)
nn.init.normal_(self.positional_embedding, std=0.01)
nn.init.normal_(self.mask_embedding, std=0.02)
proj_std = (self.transformer.width ** -0.5) * ((2 * self.transformer.layers) ** -0.5)
attn_std = self.transformer.width ** -0.5
fc_std = (2 * self.transformer.width) ** -0.5
for block in self.transformer.resblocks:
nn.init.normal_(block.attn.in_proj_weight, std=attn_std)
nn.init.normal_(block.attn.out_proj.weight, std=proj_std)
nn.init.normal_(block.mlp.c_fc.weight, std=fc_std)
nn.init.normal_(block.mlp.c_proj.weight, std=proj_std)
if self.text_projection is not None:
nn.init.normal_(self.text_projection, std=self.transformer.width ** -0.5)
nn.init.constant_(self.visual_ln_post.weight, 1.0)
nn.init.constant_(self.visual_ln_post.bias, 0.0)
def build_attention_mask(self):
# lazily create causal attention mask, with full attention between the vision tokens
# pytorch uses additive attention mask; fill with -inf
mask = torch.empty(self.context_length, self.context_length)
mask.fill_(float("-inf"))
mask.triu_(1) # zero out the lower diagonal
return mask
@property
def dtype(self):
return self.visual.conv1.weight.dtype
def encode_video(self, video, return_all_feats=False):
if len(video.size()) == 4: #[bs * T, C, H, W]
#set_trace()
frames = 8
video = rearrange(video, '(b t) c h w -> b t c h w', b=int(video.size(0)/frames), t=frames)
video = rearrange(video, 'b t c h w -> b c t h w')
# video: [N, C, T, H, W]
features = self.visual(video, return_num=self.return_num)
x = self.visual_ln_post(self.evl(features))
x = x @ self.visual_proj
if return_all_feats:
return x, features[-1] # [N, T, C], [L, N, T, C]
return x
def encode_text(self, text, masked_indices=None, return_all_feats=False):
x = self.token_embedding(text).type(self.dtype) # [batch_size, n_ctx, d_model]
if masked_indices is not None:
x[masked_indices] = self.mask_embedding
x = x + self.positional_embedding.type(self.dtype)
x = x.permute(1, 0, 2) # NLD -> LND
x = self.transformer(x)
x = x.permute(1, 0, 2) # LND -> NLD
x = self.ln_final(x).type(self.dtype)
# x.shape = [batch_size, n_ctx, transformer.width]
# take features from the eot embedding (eot_token is the highest number in each sequence)
feats = x[torch.arange(x.shape[0]), text.argmax(dim=-1)] @ self.text_projection
if return_all_feats:
return feats, x
return feats
def forward(self, video, text):
video_features = self.encode_video(video)
text_features = self.encode_text(text)
# normalized features
video_features = video_features / video_features.norm(dim=1, keepdim=True)
text_features = text_features / text_features.norm(dim=1, keepdim=True)
# cosine similarity as logits
logit_scale = self.logit_scale.exp()
logits_per_video= logit_scale * video_features @ text_features.t()
logits_per_text = logits_per_video.t()
# shape = [global_batch_size, global_batch_size]
return logits_per_video, logits_per_text
def convert_weights(model: nn.Module):
"""Convert applicable model parameters to fp16"""
def _convert_weights_to_fp16(l):
if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Linear)):
l.weight.data = l.weight.data.half()
if l.bias is not None:
l.bias.data = l.bias.data.half()
if isinstance(l, nn.MultiheadAttention):
for attr in [*[f"{s}_proj_weight" for s in ["in", "q", "k", "v"]], "in_proj_bias", "bias_k", "bias_v"]:
tensor = getattr(l, attr)
if tensor is not None:
tensor.data = tensor.data.half()
for name in ["text_projection", "proj"]:
if hasattr(l, name) and not isinstance(l, TransformerDecoder_uniformer_diff_conv_balance):
attr = getattr(l, name)
if attr is not None:
attr.data = attr.data.half()
model.apply(_convert_weights_to_fp16)
def build_model(
state_dict: dict,
# evl
n_layers=4, n_dim=768, n_head=12, mlp_factor=4.0, drop_path_rate=0.,
mlp_dropout=[0.5, 0.5, 0.5, 0.5], cls_dropout=0.5, t_size=8, spatial_size=14,
use_t_conv=True, use_image_attnmap=True, use_t_pos_embed=True, no_pretrain=False,
):
vit = "visual.proj" in state_dict or "visual.positional_embedding" in state_dict
if "visual.proj" in state_dict:
state_dict["visual_proj"] = state_dict["visual.proj"]
state_dict["visual_ln_post.weight"] = state_dict["visual.ln_post.weight"]
state_dict["visual_ln_post.bias"] = state_dict["visual.ln_post.bias"]
del state_dict["visual.proj"], state_dict["visual.ln_post.weight"], state_dict["visual.ln_post.bias"]
# new_state_dict = OrderedDict()
# for k, v in state_dict.items():
# if k.startswith("backbone."):
# k = k.replace("backbone.", "visual.")
# new_state_dict[k] = v
# state_dict = new_state_dict
if vit:
vision_width = state_dict["visual.conv1.weight"].shape[0]
vision_layers = len([k for k in state_dict.keys() if k.startswith("visual.") and k.endswith(".attn.in_proj_weight")])
vision_patch_size = state_dict["visual.conv1.weight"].shape[-1]
grid_size = round((state_dict["visual.positional_embedding"].shape[0] - 1) ** 0.5)
image_resolution = vision_patch_size * grid_size
else:
counts: list = [len(set(k.split(".")[2] for k in state_dict if k.startswith(f"visual.layer{b}"))) for b in [1, 2, 3, 4]]
vision_layers = tuple(counts)
vision_width = state_dict["visual.layer1.0.conv1.weight"].shape[0]
output_width = round((state_dict["visual.attnpool.positional_embedding"].shape[0] - 1) ** 0.5)
vision_patch_size = None
assert output_width ** 2 + 1 == state_dict["visual.attnpool.positional_embedding"].shape[0]
image_resolution = output_width * 32
# embed_dim = 512
# context_length = 77
# vocab_size = 49408
# transformer_width = 512
# transformer_layers = 12
embed_dim = state_dict["text_projection"].shape[1]
context_length = state_dict["positional_embedding"].shape[0]
vocab_size = state_dict["token_embedding.weight"].shape[0]
transformer_width = state_dict["ln_final.weight"].shape[0]
transformer_heads = transformer_width // 64
transformer_layers = len(set(k.split(".")[2] for k in state_dict if k.startswith(f"transformer.resblocks")))
vision_width = state_dict["visual_proj"].shape[0]
n_dim = vision_width
if vision_width == 768:
backbone = "vit_2plus1d_dw_bias_b16"
n_head = 12
elif vision_width == 1024:
backbone = "vit_2plus1d_dw_bias_l14"
n_head = 16
else:
raise NotImplementedError
model = CLIP(
embed_dim,
image_resolution, vision_layers, vision_width, vision_patch_size,
context_length, vocab_size, transformer_width, transformer_heads, transformer_layers,
n_layers=n_layers, n_dim=n_dim, n_head=n_head, mlp_factor=mlp_factor, drop_path_rate=drop_path_rate,
mlp_dropout=mlp_dropout, cls_dropout=cls_dropout, t_size=t_size, spatial_size=spatial_size,
use_t_conv=use_t_conv, use_image_attnmap=use_image_attnmap, use_t_pos_embed=use_t_pos_embed, backbone=backbone
)
for key in ["input_resolution", "context_length", "vocab_size"]:
if key in state_dict:
del state_dict[key]
# convert_weights(model)
# strict=False, for parameters of decoder
# assert False, (len(model.state_dict()), len(state_dict))
if not no_pretrain:
model.load_state_dict(state_dict, strict=False)
return model.eval()
| InternVideo-main | Downstream/Video-Text-Retrieval/modules/clip_kc/model.py |
import os
import time
import torch
import torch.nn as nn
from fvcore.nn import FlopCountAnalysis
from fvcore.nn import flop_count_table
import evl_utils
from evl_utils import TransformerDecoder_uniformer_diff_conv_balance
PATH_PREFIX = '/mnt/lustre/share_data/likunchang.vendor/code/EVL/clip_kc/model'
class EVL(nn.Module):
def __init__(self,
backbone='vit_b16',
n_layers=12,
n_dim=1024,
n_head=16,
mlp_factor=4.0,
drop_path_rate=0.,
mlp_dropout=[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5],
cls_dropout=0.5,
t_size=32,
use_t_conv=True,
use_t_pos_embed=True,
uni_layer=4,
uni_type='3d',
add_ffn=True,
t_conv_type='1d',
pre_prompt=True,
balance=0.,
after_me=True,
before_me=False,
me_type='dstm',
me_reduction=4,
num_classes=400,
):
super().__init__()
# pre-trained from CLIP
self.backbone = evl_utils.__dict__[backbone](pretrained=False)
self.evl = TransformerDecoder_uniformer_diff_conv_balance(
n_layers=n_layers, n_dim=n_dim, n_head=n_head,
mlp_factor=mlp_factor, drop_path_rate=drop_path_rate,
mlp_dropout=mlp_dropout, cls_dropout=cls_dropout, t_size=t_size,
use_t_conv=use_t_conv, use_t_pos_embed=use_t_pos_embed,
uni_layer=uni_layer, uni_type=uni_type, add_ffn=add_ffn, t_conv_type=t_conv_type,
pre_prompt=pre_prompt, balance=balance,
after_me=after_me, before_me=before_me,
me_type=me_type, me_reduction=me_reduction,
num_classes=num_classes
)
self.return_num = n_layers
def forward(self, x):
features = self.backbone(x, return_num=self.return_num)
output = self.evl(features)
return output
def cal_flops(model, frame=8, size=224):
flops = FlopCountAnalysis(model, torch.rand(1, 3, frame, size, size))
s = time.time()
print(flop_count_table(flops, max_depth=1))
print(time.time()-s)
def vit_2plus1d_diff_b_sparse8(pretrained=True):
# 8x224x224
# k400 1x1: 82.5
model = EVL(
backbone='vit_2plus1d_dw_bias_b16',
n_layers=12,
n_dim=768,
n_head=12,
mlp_factor=4.0,
drop_path_rate=0.,
mlp_dropout=[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5],
cls_dropout=0.5,
t_size=8,
use_t_conv=True,
use_t_pos_embed=True,
uni_layer=0,
uni_type='2d',
add_ffn=False,
t_conv_type='3d',
pre_prompt=False,
balance=0.,
after_me=True,
before_me=False,
me_type='stm',
me_reduction=4,
num_classes=400,
)
if pretrained:
pretrained_path = os.path.join(PATH_PREFIX, 'vit_2plus1d_diff_b_sparse8.pyth')
print(f'lodel model from: {pretrained_path}')
state_dict = torch.load(pretrained_path, map_location='cpu')
model.load_state_dict(state_dict)
return model
if __name__ == '__main__':
model = vit_2plus1d_diff_b_sparse8()
cal_flops(model, frame=8, size=224) | InternVideo-main | Downstream/Video-Text-Retrieval/modules/clip_kc/model_no_freeze_diff.py |
import os
import time
import torch
import torch.nn as nn
from fvcore.nn import FlopCountAnalysis
from fvcore.nn import flop_count_table
import evl_utils
from evl_utils import TransformerDecoder
PATH_PREFIX = '/mnt/lustre/share_data/likunchang.vendor/code/EVL/clip_kc/model'
class EVL(nn.Module):
def __init__(self,
backbone='vit_l14_336',
n_layers=4,
n_dim=1024,
n_head=16,
mlp_factor=4.0,
drop_path_rate=0.,
mlp_dropout=[0.5, 0.5, 0.5, 0.5],
cls_dropout=0.5,
t_size=32,
use_t_conv=True,
use_t_pos_embed=True,
num_classes=400
):
super().__init__()
# pre-trained from CLIP
self.backbone = evl_utils.__dict__[backbone](pretrained=False)
self.evl = TransformerDecoder(
n_layers=n_layers, n_dim=n_dim, n_head=n_head,
mlp_factor=mlp_factor, drop_path_rate=drop_path_rate,
mlp_dropout=mlp_dropout, cls_dropout=cls_dropout, t_size=t_size,
use_t_conv=use_t_conv, use_t_pos_embed=use_t_pos_embed,
num_classes=num_classes,
)
self.return_num = n_layers
def forward(self, x):
features = self.backbone(x, return_num=self.return_num)
output = self.evl(features)
return output
def cal_flops(model, frame=8, size=224):
flops = FlopCountAnalysis(model, torch.rand(1, 3, frame, size, size))
s = time.time()
print(flop_count_table(flops, max_depth=1))
print(time.time()-s)
def vit_l_sparse16(pretrained=True):
# 16x224x224
# k400 1x1: 86.5
model = EVL(
backbone='vit_l14',
n_layers=4,
n_dim=1024,
n_head=16,
mlp_factor=4.0,
drop_path_rate=0.,
mlp_dropout=[0.5, 0.5, 0.5, 0.5],
cls_dropout=0.5,
t_size=32,
use_t_conv=True,
use_t_pos_embed=True,
num_classes=400
)
if pretrained:
pretrained_path = os.path.join(PATH_PREFIX, 'vit_l_sparse16.pyth')
print(f'lodel model from: {pretrained_path}')
state_dict = torch.load(pretrained_path, map_location='cpu')
model.load_state_dict(state_dict)
return model
def vit_l_sparse32(pretrained=True):
# 32x224x224
# k400 1x1: 87.0
model = EVL(
backbone='vit_l14',
n_layers=4,
n_dim=1024,
n_head=16,
mlp_factor=4.0,
drop_path_rate=0.,
mlp_dropout=[0.5, 0.5, 0.5, 0.5],
cls_dropout=0.5,
t_size=32,
use_t_conv=True,
use_t_pos_embed=True,
num_classes=400
)
if pretrained:
pretrained_path = os.path.join(PATH_PREFIX, 'vit_l_sparse32.pyth')
print(f'lodel model from: {pretrained_path}')
state_dict = torch.load(pretrained_path, map_location='cpu')
model.load_state_dict(state_dict)
return model
def vit_l336_sparse32(pretrained=True):
# 32x336x336
# k400 1x1: 87.4
model = EVL(
backbone='vit_l14_336',
n_layers=4,
n_dim=1024,
n_head=16,
mlp_factor=4.0,
drop_path_rate=0.,
mlp_dropout=[0.5, 0.5, 0.5, 0.5],
cls_dropout=0.5,
t_size=32,
use_t_conv=True,
use_t_pos_embed=True,
num_classes=400
)
if pretrained:
pretrained_path = os.path.join(PATH_PREFIX, 'vit_l336_sparse32.pyth')
print(f'lodel model from: {pretrained_path}')
state_dict = torch.load(pretrained_path, map_location='cpu')
model.load_state_dict(state_dict)
return model
if __name__ == '__main__':
model = vit_l_sparse16()
cal_flops(model, frame=16, size=224)
# model = vit_l_sparse32()
# cal_flops(model, frame=32, size=224)
# model = vit_l336_sparse32()
# cal_flops(model, frame=32, size=336) | InternVideo-main | Downstream/Video-Text-Retrieval/modules/clip_kc/model_freeze.py |
import os
import time
import torch
import torch.nn as nn
from fvcore.nn import FlopCountAnalysis
from fvcore.nn import flop_count_table
import evl_utils
from evl_utils import TransformerDecoder
PATH_PREFIX = '/mnt/lustre/share_data/likunchang.vendor/code/EVL/clip_kc/model'
class EVL(nn.Module):
def __init__(self,
backbone='vit_l14_336',
n_layers=4,
n_dim=1024,
n_head=16,
mlp_factor=4.0,
drop_path_rate=0.,
mlp_dropout=[0.5, 0.5, 0.5, 0.5],
cls_dropout=0.5,
t_size=32,
use_t_conv=True,
use_t_pos_embed=True,
num_classes=400,
add_residual=False,
):
super().__init__()
# pre-trained from CLIP
self.backbone = evl_utils.__dict__[backbone](pretrained=False)
self.evl = TransformerDecoder(
n_layers=n_layers, n_dim=n_dim, n_head=n_head,
mlp_factor=mlp_factor, drop_path_rate=drop_path_rate,
mlp_dropout=mlp_dropout, cls_dropout=cls_dropout, t_size=t_size,
use_t_conv=use_t_conv, use_t_pos_embed=use_t_pos_embed,
num_classes=num_classes, add_residual=add_residual
)
self.return_num = n_layers
def forward(self, x):
features = self.backbone(x, return_num=self.return_num)
output = self.evl(features)
return output
def cal_flops(model, frame=8, size=224):
flops = FlopCountAnalysis(model, torch.rand(1, 3, frame, size, size))
s = time.time()
print(flop_count_table(flops, max_depth=1))
print(time.time()-s)
def vit_b_sparse8(pretrained=True):
# 8x224x224
# k400 1x1: 82.4
model = EVL(
backbone='vit_b16',
n_layers=4,
n_dim=768,
n_head=12,
mlp_factor=4.0,
drop_path_rate=0.,
mlp_dropout=[0.5, 0.5, 0.5, 0.5],
cls_dropout=0.5,
t_size=8,
use_t_conv=True,
use_t_pos_embed=True,
num_classes=400,
add_residual=True,
)
if pretrained:
pretrained_path = os.path.join(PATH_PREFIX, 'vit_b_sparse8.pyth')
print(f'lodel model from: {pretrained_path}')
state_dict = torch.load(pretrained_path, map_location='cpu')
model.load_state_dict(state_dict)
return model
def vit_2plus1d_b_sparse8(pretrained=True):
# 8x224x224
# k400 1x1: 82.5
model = EVL(
backbone='vit_2plus1d_b16',
n_layers=4,
n_dim=768,
n_head=12,
mlp_factor=4.0,
drop_path_rate=0.,
mlp_dropout=[0.5, 0.5, 0.5, 0.5],
cls_dropout=0.5,
t_size=8,
use_t_conv=True,
use_t_pos_embed=True,
num_classes=400,
add_residual=True,
)
if pretrained:
pretrained_path = os.path.join(PATH_PREFIX, 'vit_2plus1d_b_sparse8.pyth')
print(f'lodel model from: {pretrained_path}')
state_dict = torch.load(pretrained_path, map_location='cpu')
model.load_state_dict(state_dict)
return model
if __name__ == '__main__':
# model = vit_b_sparse8()
# cal_flops(model, frame=8, size=224)
model = vit_2plus1d_b_sparse8()
cal_flops(model, frame=8, size=224) | InternVideo-main | Downstream/Video-Text-Retrieval/modules/clip_kc/model_no_freeze.py |
import hashlib
import os
import urllib
import warnings
from typing import Any, Union, List
from pkg_resources import packaging
import torch
from PIL import Image
from torchvision.transforms import Compose, Resize, CenterCrop, ToTensor, Normalize
from tqdm import tqdm
from ipdb import set_trace
from .model import build_model
from .simple_tokenizer import SimpleTokenizer as _Tokenizer
try:
from torchvision.transforms import InterpolationMode
BICUBIC = InterpolationMode.BICUBIC
except ImportError:
BICUBIC = Image.BICUBIC
if packaging.version.parse(torch.__version__) < packaging.version.parse("1.7.1"):
warnings.warn("PyTorch version 1.7.1 or higher is recommended")
__all__ = ["available_models", "load", "tokenize"]
_tokenizer = _Tokenizer()
_MODELS = {
"ViT-B/32": "https://openaipublic.azureedge.net/clip/models/40d365715913c9da98579312b702a82c18be219cc2a73407c4526f58eba950af/ViT-B-32.pt",
"ViT-B/16": "https://openaipublic.azureedge.net/clip/models/5806e77cd80f8b59890b7e101eabd078d9fb84e6937f9e85e4ecb61988df416f/ViT-B-16.pt",
"ViT-L/14": "https://openaipublic.azureedge.net/clip/models/b8cca3fd41ae0c99ba7e8951adf17d267cdb84cd88be6f7c2e0eca1737a03836/ViT-L-14.pt",
}
def _download(url: str, root: str):
os.makedirs(root, exist_ok=True)
filename = os.path.basename(url)
expected_sha256 = url.split("/")[-2]
download_target = os.path.join(root, filename)
if os.path.exists(download_target) and not os.path.isfile(download_target):
raise RuntimeError(f"{download_target} exists and is not a regular file")
if os.path.isfile(download_target):
if hashlib.sha256(open(download_target, "rb").read()).hexdigest() == expected_sha256:
return download_target
else:
warnings.warn(f"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file")
with urllib.request.urlopen(url) as source, open(download_target, "wb") as output:
with tqdm(total=int(source.info().get("Content-Length")), ncols=80, unit='iB', unit_scale=True, unit_divisor=1024) as loop:
while True:
buffer = source.read(8192)
if not buffer:
break
output.write(buffer)
loop.update(len(buffer))
if hashlib.sha256(open(download_target, "rb").read()).hexdigest() != expected_sha256:
raise RuntimeError(f"Model has been downloaded but the SHA256 checksum does not not match")
return download_target
def _convert_image_to_rgb(image):
return image.convert("RGB")
def _transform(n_px):
return Compose([
Resize(n_px, interpolation=BICUBIC),
CenterCrop(n_px),
_convert_image_to_rgb,
ToTensor(),
Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)),
])
def available_models() -> List[str]:
"""Returns the names of available CLIP models"""
return list(_MODELS.keys())
def load(
name: str,
device: Union[str, torch.device] = "cuda" if torch.cuda.is_available() else "cpu",
jit: bool = False, download_root: str = None,
# evl
n_layers=12, n_dim=768, n_head=12, mlp_factor=4.0, drop_path_rate=0.,
mlp_dropout=[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5], cls_dropout=0.5, t_size=8, spatial_size=14,
use_t_conv=True, use_image_attnmap=True, use_t_pos_embed=True, dropout=0.0, no_pretrain=False,
):
"""Load a CLIP model
Parameters
----------
name : str
A model name listed by `clip.available_models()`, or the path to a model checkpoint containing the state_dict
device : Union[str, torch.device]
The device to put the loaded model
jit : bool
Whether to load the optimized JIT model or more hackable non-JIT model (default).
download_root: str
path to download the model files; by default, it uses "~/.cache/clip"
Returns
-------
model : torch.nn.Module
The CLIP model
preprocess : Callable[[PIL.Image], torch.Tensor]
A torchvision transform that converts a PIL image into a tensor that the returned model can take as its input
"""
if name in _MODELS:
model_path = _download(_MODELS[name], download_root or os.path.expanduser("~/.cache/clip"))
elif os.path.isfile(name):
model_path = name
else:
raise RuntimeError(f"Model {name} not found; available models = {available_models()}")
'''
with open(model_path, 'rb') as opened_file:
try:
# loading JIT archive
model = torch.jit.load(opened_file, map_location=device if jit else "cpu").eval()
state_dict = None
except RuntimeError:
# loading saved state dict
if jit:
warnings.warn(f"File {model_path} is not a JIT archive. Loading as a state dict instead")
jit = False
state_dict = torch.load(model_path, map_location="cpu")
'''
init_state_dict = torch.load(model_path, map_location='cpu')['state_dict']
state_dict = {}
for k, v in init_state_dict.items():
k = k.replace('clip.','')
state_dict[k] = v
#set_trace()
if not jit:
model = build_model(
state_dict or model.state_dict(),
n_layers=n_layers, n_dim=n_dim, n_head=n_head, mlp_factor=mlp_factor, drop_path_rate=drop_path_rate,
mlp_dropout=mlp_dropout, cls_dropout=cls_dropout, t_size=t_size, spatial_size=spatial_size,
use_t_conv=use_t_conv, use_image_attnmap=use_image_attnmap, use_t_pos_embed=use_t_pos_embed, no_pretrain=no_pretrain
).to(device)
if str(device) == "cpu":
model.float()
return model, _transform(model.visual.input_resolution)
# patch the device names
device_holder = torch.jit.trace(lambda: torch.ones([]).to(torch.device(device)), example_inputs=[])
device_node = [n for n in device_holder.graph.findAllNodes("prim::Constant") if "Device" in repr(n)][-1]
def patch_device(module):
try:
graphs = [module.graph] if hasattr(module, "graph") else []
except RuntimeError:
graphs = []
if hasattr(module, "forward1"):
graphs.append(module.forward1.graph)
for graph in graphs:
for node in graph.findAllNodes("prim::Constant"):
if "value" in node.attributeNames() and str(node["value"]).startswith("cuda"):
node.copyAttributes(device_node)
model.apply(patch_device)
patch_device(model.encode_image)
patch_device(model.encode_text)
# patch dtype to float32 on CPU
if str(device) == "cpu":
float_holder = torch.jit.trace(lambda: torch.ones([]).float(), example_inputs=[])
float_input = list(float_holder.graph.findNode("aten::to").inputs())[1]
float_node = float_input.node()
def patch_float(module):
try:
graphs = [module.graph] if hasattr(module, "graph") else []
except RuntimeError:
graphs = []
if hasattr(module, "forward1"):
graphs.append(module.forward1.graph)
for graph in graphs:
for node in graph.findAllNodes("aten::to"):
inputs = list(node.inputs())
for i in [1, 2]: # dtype can be the second or third argument to aten::to()
if inputs[i].node()["value"] == 5:
inputs[i].node().copyAttributes(float_node)
model.apply(patch_float)
patch_float(model.encode_image)
patch_float(model.encode_text)
model.float()
return model, _transform(model.input_resolution.item())
def tokenize(texts: Union[str, List[str]], context_length: int = 77, truncate: bool = False, return_special_tokens_mask: bool = False) -> Union[torch.IntTensor, torch.LongTensor, torch.BoolTensor]:
"""
Returns the tokenized representation of given input string(s)
Parameters
----------
texts : Union[str, List[str]]
An input string or a list of input strings to tokenize
context_length : int
The context length to use; all CLIP models use 77 as the context length
truncate: bool
Whether to truncate the text in case its encoding is longer than the context length
Returns
-------
A two-dimensional tensor containing the resulting tokens, shape = [number of input strings, context_length].
We return LongTensor when torch version is <1.8.0, since older index_select requires indices to be long.
"""
if isinstance(texts, str):
texts = [texts]
sot_token = _tokenizer.encoder["<|startoftext|>"]
eot_token = _tokenizer.encoder["<|endoftext|>"]
all_tokens = [[sot_token] + _tokenizer.encode(text) + [eot_token] for text in texts]
if packaging.version.parse(torch.__version__) < packaging.version.parse("1.8.0"):
result = torch.zeros(len(all_tokens), context_length, dtype=torch.long)
else:
result = torch.zeros(len(all_tokens), context_length, dtype=torch.int)
special_tokens_mask = torch.zeros(len(all_tokens), context_length, dtype=torch.bool)
for i, tokens in enumerate(all_tokens):
if len(tokens) > context_length:
if truncate:
tokens = tokens[:context_length]
tokens[-1] = eot_token
else:
raise RuntimeError(f"Input {texts[i]} is too long for context length {context_length}")
result[i, :len(tokens)] = torch.tensor(tokens)
special_tokens_mask[i, len(tokens):] = 1
if return_special_tokens_mask:
return result, special_tokens_mask
return result
| InternVideo-main | Downstream/Video-Text-Retrieval/modules/clip_kc/clip.py |
import gzip
import html
import os
from functools import lru_cache
import ftfy
import regex as re
@lru_cache()
def default_bpe():
return os.path.join(os.path.dirname(os.path.abspath(__file__)), "bpe_simple_vocab_16e6.txt.gz")
@lru_cache()
def bytes_to_unicode():
"""
Returns list of utf-8 byte and a corresponding list of unicode strings.
The reversible bpe codes work on unicode strings.
This means you need a large # of unicode characters in your vocab if you want to avoid UNKs.
When you're at something like a 10B token dataset you end up needing around 5K for decent coverage.
This is a signficant percentage of your normal, say, 32K bpe vocab.
To avoid that, we want lookup tables between utf-8 bytes and unicode strings.
And avoids mapping to whitespace/control characters the bpe code barfs on.
"""
bs = list(range(ord("!"), ord("~")+1))+list(range(ord("¡"), ord("¬")+1))+list(range(ord("®"), ord("ÿ")+1))
cs = bs[:]
n = 0
for b in range(2**8):
if b not in bs:
bs.append(b)
cs.append(2**8+n)
n += 1
cs = [chr(n) for n in cs]
return dict(zip(bs, cs))
def get_pairs(word):
"""Return set of symbol pairs in a word.
Word is represented as tuple of symbols (symbols being variable-length strings).
"""
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
return pairs
def basic_clean(text):
text = ftfy.fix_text(text)
text = html.unescape(html.unescape(text))
return text.strip()
def whitespace_clean(text):
text = re.sub(r'\s+', ' ', text)
text = text.strip()
return text
class SimpleTokenizer(object):
def __init__(self, bpe_path: str = default_bpe()):
self.byte_encoder = bytes_to_unicode()
self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
merges = gzip.open(bpe_path).read().decode("utf-8").split('\n')
merges = merges[1:49152-256-2+1]
merges = [tuple(merge.split()) for merge in merges]
vocab = list(bytes_to_unicode().values())
vocab = vocab + [v+'</w>' for v in vocab]
for merge in merges:
vocab.append(''.join(merge))
vocab.extend(['<|startoftext|>', '<|endoftext|>'])
self.encoder = dict(zip(vocab, range(len(vocab))))
self.decoder = {v: k for k, v in self.encoder.items()}
self.bpe_ranks = dict(zip(merges, range(len(merges))))
self.cache = {'<|startoftext|>': '<|startoftext|>', '<|endoftext|>': '<|endoftext|>'}
self.pat = re.compile(r"""<\|startoftext\|>|<\|endoftext\|>|'s|'t|'re|'ve|'m|'ll|'d|[\p{L}]+|[\p{N}]|[^\s\p{L}\p{N}]+""", re.IGNORECASE)
def bpe(self, token):
if token in self.cache:
return self.cache[token]
word = tuple(token[:-1]) + ( token[-1] + '</w>',)
pairs = get_pairs(word)
if not pairs:
return token+'</w>'
while True:
bigram = min(pairs, key = lambda pair: self.bpe_ranks.get(pair, float('inf')))
if bigram not in self.bpe_ranks:
break
first, second = bigram
new_word = []
i = 0
while i < len(word):
try:
j = word.index(first, i)
new_word.extend(word[i:j])
i = j
except:
new_word.extend(word[i:])
break
if word[i] == first and i < len(word)-1 and word[i+1] == second:
new_word.append(first+second)
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if len(word) == 1:
break
else:
pairs = get_pairs(word)
word = ' '.join(word)
self.cache[token] = word
return word
def encode(self, text):
bpe_tokens = []
text = whitespace_clean(basic_clean(text)).lower()
for token in re.findall(self.pat, text):
token = ''.join(self.byte_encoder[b] for b in token.encode('utf-8'))
bpe_tokens.extend(self.encoder[bpe_token] for bpe_token in self.bpe(token).split(' '))
return bpe_tokens
def decode(self, tokens):
text = ''.join([self.decoder[token] for token in tokens])
text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors="replace").replace('</w>', ' ')
return text
| InternVideo-main | Downstream/Video-Text-Retrieval/modules/clip_kc/simple_tokenizer.py |
#!/usr/bin/env python
import warnings
from typing import Tuple, Optional
import torch
from torch import Tensor
from torch.nn.modules.linear import Linear
from torch.nn.init import xavier_uniform_
from torch.nn.init import constant_
from torch.nn.init import xavier_normal_
from torch.nn.parameter import Parameter
from torch.nn.modules.module import Module
from .attention_module import multi_head_attention_forward
class _LinearWithBias(Linear):
bias: Tensor
def __init__(self, in_features: int, out_features: int) -> None:
super().__init__(in_features, out_features, bias=True)
class MultiheadAttention(Module):
r"""Allows the model to jointly attend to information
from different representation subspaces.
See reference: Attention Is All You Need
.. math::
\text{MultiHead}(Q, K, V) = \text{Concat}(head_1,\dots,head_h)W^O
\text{where} head_i = \text{Attention}(QW_i^Q, KW_i^K, VW_i^V)
Args:
embed_dim: total dimension of the model.
num_heads: parallel attention heads.
dropout: a Dropout layer on attn_output_weights. Default: 0.0.
bias: add bias as module parameter. Default: True.
add_bias_kv: add bias to the key and value sequences at dim=0.
add_zero_attn: add a new batch of zeros to the key and
value sequences at dim=1.
kdim: total number of features in key. Default: None.
vdim: total number of features in value. Default: None.
Note: if kdim and vdim are None, they will be set to embed_dim such that
query, key, and value have the same number of features.
Examples::
>>> multihead_attn = nn.MultiheadAttention(embed_dim, num_heads)
>>> attn_output, attn_output_weights = multihead_attn(query, key, value)
"""
bias_k: Optional[torch.Tensor]
bias_v: Optional[torch.Tensor]
def __init__(self, embed_dim, num_heads, dropout=0., bias=True, add_bias_kv=False, add_zero_attn=False, kdim=None, vdim=None):
super(MultiheadAttention, self).__init__()
self.embed_dim = embed_dim
self.kdim = kdim if kdim is not None else embed_dim
self.vdim = vdim if vdim is not None else embed_dim
self._qkv_same_embed_dim = self.kdim == embed_dim and self.vdim == embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads"
if self._qkv_same_embed_dim is False:
self.q_proj_weight = Parameter(torch.Tensor(embed_dim, embed_dim))
self.k_proj_weight = Parameter(torch.Tensor(embed_dim, self.kdim))
self.v_proj_weight = Parameter(torch.Tensor(embed_dim, self.vdim))
self.register_parameter('in_proj_weight', None)
else:
self.in_proj_weight = Parameter(torch.empty(3 * embed_dim, embed_dim))
self.register_parameter('q_proj_weight', None)
self.register_parameter('k_proj_weight', None)
self.register_parameter('v_proj_weight', None)
if bias:
self.in_proj_bias = Parameter(torch.empty(3 * embed_dim))
else:
self.register_parameter('in_proj_bias', None)
self.out_proj = _LinearWithBias(embed_dim, embed_dim)
if add_bias_kv:
self.bias_k = Parameter(torch.empty(1, 1, embed_dim))
self.bias_v = Parameter(torch.empty(1, 1, embed_dim))
else:
self.bias_k = self.bias_v = None
self.add_zero_attn = add_zero_attn
self._reset_parameters()
def _reset_parameters(self):
if self._qkv_same_embed_dim:
xavier_uniform_(self.in_proj_weight)
else:
xavier_uniform_(self.q_proj_weight)
xavier_uniform_(self.k_proj_weight)
xavier_uniform_(self.v_proj_weight)
if self.in_proj_bias is not None:
constant_(self.in_proj_bias, 0.)
constant_(self.out_proj.bias, 0.)
if self.bias_k is not None:
xavier_normal_(self.bias_k)
if self.bias_v is not None:
xavier_normal_(self.bias_v)
def __setstate__(self, state):
# Support loading old MultiheadAttention checkpoints generated by v1.1.0
if '_qkv_same_embed_dim' not in state:
state['_qkv_same_embed_dim'] = True
super(MultiheadAttention, self).__setstate__(state)
def forward(self, query, key, value, key_padding_mask=None,
need_weights=True, attn_mask=None, return_qk=False):
# type: (Tensor, Tensor, Tensor, Optional[Tensor], bool, Optional[Tensor], bool) -> Tuple[Tensor, Optional[Tensor]]
r"""
Args:
query, key, value: map a query and a set of key-value pairs to an output.
See "Attention Is All You Need" for more details.
key_padding_mask: if provided, specified padding elements in the key will
be ignored by the attention. When given a binary mask and a value is True,
the corresponding value on the attention layer will be ignored. When given
a byte mask and a value is non-zero, the corresponding value on the attention
layer will be ignored
need_weights: output attn_output_weights.
attn_mask: 2D or 3D mask that prevents attention to certain positions. A 2D mask will be broadcasted for all
the batches while a 3D mask allows to specify a different mask for the entries of each batch.
Shape:
- Inputs:
- query: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is
the embedding dimension.
- key: :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- value: :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- key_padding_mask: :math:`(N, S)` where N is the batch size, S is the source sequence length.
If a ByteTensor is provided, the non-zero positions will be ignored while the position
with the zero positions will be unchanged. If a BoolTensor is provided, the positions with the
value of ``True`` will be ignored while the position with the value of ``False`` will be unchanged.
- attn_mask: 2D mask :math:`(L, S)` where L is the target sequence length, S is the source sequence length.
3D mask :math:`(N*num_heads, L, S)` where N is the batch size, L is the target sequence length,
S is the source sequence length. attn_mask ensure that position i is allowed to attend the unmasked
positions. If a ByteTensor is provided, the non-zero positions are not allowed to attend
while the zero positions will be unchanged. If a BoolTensor is provided, positions with ``True``
is not allowed to attend while ``False`` values will be unchanged. If a FloatTensor
is provided, it will be added to the attention weight.
- return_qk: whether return Q and K.
- Outputs:
- attn_output: :math:`(L, N, E)` where L is the target sequence length, N is the batch size,
E is the embedding dimension.
- attn_output_weights: :math:`(N, L, S)` where N is the batch size,
L is the target sequence length, S is the source sequence length.
"""
if return_qk:
if not self._qkv_same_embed_dim:
q, k, attn_output, attn_output_weights = multi_head_attention_forward(
query, key, value, self.embed_dim, self.num_heads,
self.in_proj_weight, self.in_proj_bias,
self.bias_k, self.bias_v, self.add_zero_attn,
self.dropout, self.out_proj.weight, self.out_proj.bias,
training=self.training,
key_padding_mask=key_padding_mask, need_weights=need_weights,
attn_mask=attn_mask, use_separate_proj_weight=True,
q_proj_weight=self.q_proj_weight, k_proj_weight=self.k_proj_weight,
v_proj_weight=self.v_proj_weight, return_qk=True)
else:
q, k, attn_output, attn_output_weights = multi_head_attention_forward(
query, key, value, self.embed_dim, self.num_heads,
self.in_proj_weight, self.in_proj_bias,
self.bias_k, self.bias_v, self.add_zero_attn,
self.dropout, self.out_proj.weight, self.out_proj.bias,
training=self.training,
key_padding_mask=key_padding_mask, need_weights=need_weights,
attn_mask=attn_mask, return_qk=True)
return q, k, attn_output, attn_output_weights
else:
if not self._qkv_same_embed_dim:
return multi_head_attention_forward(
query, key, value, self.embed_dim, self.num_heads,
self.in_proj_weight, self.in_proj_bias,
self.bias_k, self.bias_v, self.add_zero_attn,
self.dropout, self.out_proj.weight, self.out_proj.bias,
training=self.training,
key_padding_mask=key_padding_mask, need_weights=need_weights,
attn_mask=attn_mask, use_separate_proj_weight=True,
q_proj_weight=self.q_proj_weight, k_proj_weight=self.k_proj_weight,
v_proj_weight=self.v_proj_weight)
else:
return multi_head_attention_forward(
query, key, value, self.embed_dim, self.num_heads,
self.in_proj_weight, self.in_proj_bias,
self.bias_k, self.bias_v, self.add_zero_attn,
self.dropout, self.out_proj.weight, self.out_proj.bias,
training=self.training,
key_padding_mask=key_padding_mask, need_weights=need_weights,
attn_mask=attn_mask)
| InternVideo-main | Downstream/Video-Text-Retrieval/modules/clip_kc/evl_utils/attention.py |
#!/usr/bin/env python
from collections import OrderedDict
from timm.models.layers import trunc_normal_, DropPath
import torch
import torch.nn as nn
import torch.nn.functional as F
class QuickGELU(nn.Module):
def forward(self, x: torch.Tensor):
return x * torch.sigmoid(1.702 * x)
def conv_1x1x1(inp, oup, groups=1):
return nn.Conv3d(inp, oup, (1, 1, 1), (1, 1, 1), (0, 0, 0), groups=groups)
def conv_3x3x3(inp, oup, groups=1):
return nn.Conv3d(inp, oup, (3, 3, 3), (1, 1, 1), (1, 1, 1), groups=groups)
def conv_1x3x3(inp, oup, groups=1):
return nn.Conv3d(inp, oup, (1, 3, 3), (1, 1, 1), (0, 1, 1), groups=groups)
def bn_3d(dim):
return nn.BatchNorm3d(dim)
class STM(nn.Module):
def __init__(self, n_dim, reduction=4):
super(STM, self).__init__()
reduced_c = n_dim // reduction
self.reduce = nn.Sequential(
nn.Conv2d(n_dim, reduced_c, kernel_size=1, bias=False),
nn.BatchNorm2d(reduced_c)
)
self.shift = nn.Conv2d(reduced_c, reduced_c, kernel_size=3, padding=1, groups=reduced_c, bias=False)
self.recover = nn.Sequential(
nn.Conv2d(reduced_c, n_dim, kernel_size=1, bias=False),
nn.BatchNorm2d(n_dim)
)
self.pad = (0, 0, 0, 0, 0, 0, 0, 1)
def forward(self, x):
# x: [L, N, T, C]
cls_token, x = x[:1], x[1:]
L, N, T, C = x.shape
H = W = int(L**0.5)
fea = x.permute(1, 2, 3, 0).reshape(N*T, C, H, W)
bottleneck = self.reduce(fea) # NT, C//r, H, W
# t feature
reshape_bottleneck = bottleneck.view((-1, T) + bottleneck.size()[1:]) # N, T, C//r, H, W
t_fea, __ = reshape_bottleneck.split([T-1, 1], dim=1) # N, T-1, C//r, H, W
# apply transformation conv to t+1 feature
conv_bottleneck = self.shift(bottleneck) # NT, C//r, H, W
# reshape fea: N, T, C//r, H, W
reshape_conv_bottleneck = conv_bottleneck.view((-1, T) + conv_bottleneck.size()[1:])
__, tPlusone_fea = reshape_conv_bottleneck.split([1, T-1], dim=1) # N, T-1, C//r, H, W
# motion fea = t+1_fea - t_fea
# pad the last timestamp
diff_fea = tPlusone_fea - t_fea # N, T-1, C//r, H, W
# pad = (0,0,0,0,0,0,0,1)
diff_fea_pluszero = F.pad(diff_fea, self.pad, mode="constant", value=0) # N, T, C//r, H, W
diff_fea_pluszero = diff_fea_pluszero.view((-1,) + diff_fea_pluszero.size()[2:]) # NT, C//r, H, W
y = self.recover(diff_fea_pluszero) # NT, C, H, W
# reshape
y = y.reshape(N, T, C, L).permute(3, 0, 1, 2)
y = torch.cat([cls_token, y], dim=0)
return y
class DSTM(nn.Module):
def __init__(self, n_dim, reduction=4):
super(DSTM, self).__init__()
reduced_c = n_dim // reduction
self.reduce = nn.Sequential(
nn.Conv2d(n_dim, reduced_c, kernel_size=1, bias=False),
nn.BatchNorm2d(reduced_c)
)
# DW(T+1) - T
self.shift_pre = nn.Conv2d(reduced_c, reduced_c, kernel_size=3, padding=1, groups=reduced_c, bias=False)
self.recover_pre = nn.Sequential(
nn.Conv2d(reduced_c, n_dim, kernel_size=1, bias=False),
nn.BatchNorm2d(n_dim)
)
self.pad_pre = (0, 0, 0, 0, 0, 0, 0, 1)
# DW(T-1) - T
self.shift_back = nn.Conv2d(reduced_c, reduced_c, kernel_size=3, padding=1, groups=reduced_c, bias=False)
self.recover_back = nn.Sequential(
nn.Conv2d(reduced_c, n_dim, kernel_size=1, bias=False),
nn.BatchNorm2d(n_dim)
)
self.pad_back = (0, 0, 0, 0, 0, 0, 0, 1)
def forward(self, x):
# x: [L, N, T, C]
cls_token, x = x[:1], x[1:]
L, N, T, C = x.shape
H = W = int(L**0.5)
fea = x.permute(1, 2, 3, 0).reshape(N*T, C, H, W)
bottleneck = self.reduce(fea) # NT, C//r, H, W
# t feature
reshape_bottleneck = bottleneck.view((-1, T) + bottleneck.size()[1:]) # N, T, C//r, H, W
pre_t_fea, __ = reshape_bottleneck.split([T-1, 1], dim=1) # N, T-1, C//r, H, W
back_t_fea, __ = reshape_bottleneck.split([1, T-1], dim=1) # N, T-1, C//r, H, W
# apply transformation conv to t+1/t-1 feature
pre_conv_bottleneck = self.shift_pre(bottleneck) # NT, C//r, H, W
back_conv_bottleneck = self.shift_back(bottleneck) # NT, C//r, H, W
# reshape fea: N, T, C//r, H, W
pre_reshape_conv_bottleneck = pre_conv_bottleneck.view((-1, T) + pre_conv_bottleneck.size()[1:])
back_reshape_conv_bottleneck = back_conv_bottleneck.view((-1, T) + back_conv_bottleneck.size()[1:])
__, tPlusone_fea = pre_reshape_conv_bottleneck.split([1, T-1], dim=1) # N, T-1, C//r, H, W
tMinusone_fea, _ = back_reshape_conv_bottleneck.split([T-1, 1], dim=1) # N, T-1, C//r, H, W
# pre_fea = t+1_fea - t_fea
# back_fea = t-1_fea - t_fea
pre_diff_fea = tPlusone_fea - pre_t_fea # N, T-1, C//r, H, W
back_diff_fea = tMinusone_fea - back_t_fea # N, T-1, C//r, H, W
# pad the last/first timestamp
pre_diff_fea_pluszero = F.pad(pre_diff_fea, self.pad_pre, mode="constant", value=0) # N, T, C//r, H, W
pre_diff_fea_pluszero = pre_diff_fea_pluszero.view((-1,) + pre_diff_fea_pluszero.size()[2:]) # NT, C//r, H, W
back_diff_fea_pluszero = F.pad(back_diff_fea, self.pad_back, mode="constant", value=0) # N, T, C//r, H, W
back_diff_fea_pluszero = back_diff_fea_pluszero.view((-1,) + back_diff_fea_pluszero.size()[2:]) # NT, C//r, H, W
# recover channel
pre_y = self.recover_pre(pre_diff_fea_pluszero) # NT, C, H, W
back_y = self.recover_back(back_diff_fea_pluszero) # NT, C, H, W
# reshape
y = (pre_y + back_y).reshape(N, T, C, L).permute(3, 0, 1, 2)
# cat cls_token
y = torch.cat([cls_token, y], dim=0)
return y
class TDN(nn.Module):
def __init__(self, channel, n_segment=8, index=1, reduction=4):
super(TDN, self).__init__()
self.channel = channel
self.reduction = reduction
self.n_segment = n_segment
self.stride = 2**(index-1)
self.conv1 = nn.Conv2d(in_channels=self.channel,
out_channels=self.channel//self.reduction,
kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(num_features=self.channel//self.reduction)
self.conv2 = nn.Conv2d(in_channels=self.channel//self.reduction,
out_channels=self.channel//self.reduction,
kernel_size=3, padding=1, groups=self.channel//self.reduction, bias=False)
self.avg_pool_forward2 = nn.AvgPool2d(kernel_size=2, stride=2)
self.avg_pool_forward4 = nn.AvgPool2d(kernel_size=4, stride=4)
self.sigmoid_forward = nn.Sigmoid()
self.avg_pool_backward2 = nn.AvgPool2d(kernel_size=2, stride=2)#nn.AdaptiveMaxPool2d(1)
self.avg_pool_backward4 = nn.AvgPool2d(kernel_size=4, stride=4)
self.sigmoid_backward = nn.Sigmoid()
self.pad1_forward = (0, 0, 0, 0, 0, 0, 0, 1)
self.pad1_backward = (0, 0, 0, 0, 0, 0, 1, 0)
self.conv3 = nn.Conv2d(in_channels=self.channel//self.reduction,
out_channels=self.channel, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(num_features=self.channel)
self.conv3_smallscale2 = nn.Conv2d(in_channels=self.channel//self.reduction,
out_channels=self.channel//self.reduction,padding=1, kernel_size=3, bias=False)
self.bn3_smallscale2 = nn.BatchNorm2d(num_features=self.channel//self.reduction)
self.conv3_smallscale4 = nn.Conv2d(in_channels = self.channel//self.reduction,
out_channels=self.channel//self.reduction,padding=1, kernel_size=3, bias=False)
self.bn3_smallscale4 = nn.BatchNorm2d(num_features=self.channel//self.reduction)
def spatial_pool(self, x):
nt, channel, height, width = x.size()
input_x = x
# [N, C, H * W]
input_x = input_x.view(nt, channel, height * width)
# [N, 1, C, H * W]
input_x = input_x.unsqueeze(1)
# [N, 1, H, W]
context_mask = self.conv_mask(x)
# [N, 1, H * W]
context_mask = context_mask.view(nt, 1, height * width)
# [N, 1, H * W]
context_mask = self.softmax(context_mask)
context_mask = context_mask.view(nt,1,height,width)
return context_mask
def forward(self, x):
# x: [L, N, T, C]
cls_token, x = x[:1], x[1:]
L, N, T, C = x.shape
H = W = int(L**0.5)
fea = x.permute(1, 2, 3, 0).reshape(N*T, C, H, W)
bottleneck = self.conv1(fea) # nt, c//r, h, w
bottleneck = self.bn1(bottleneck) # nt, c//r, h, w
reshape_bottleneck = bottleneck.view((-1, self.n_segment) + bottleneck.size()[1:]) # n, t, c//r, h, w
t_fea_forward, _ = reshape_bottleneck.split([self.n_segment -1, 1], dim=1) # n, t-1, c//r, h, w
_, t_fea_backward = reshape_bottleneck.split([1, self.n_segment -1], dim=1) # n, t-1, c//r, h, w
conv_bottleneck = self.conv2(bottleneck) # nt, c//r, h, w
reshape_conv_bottleneck = conv_bottleneck.view((-1, self.n_segment) + conv_bottleneck.size()[1:]) # n, t, c//r, h, w
_, tPlusone_fea_forward = reshape_conv_bottleneck.split([1, self.n_segment-1], dim=1) # n, t-1, c//r, h, w
tPlusone_fea_backward ,_ = reshape_conv_bottleneck.split([self.n_segment-1, 1], dim=1) # n, t-1, c//r, h, w
diff_fea_forward = tPlusone_fea_forward - t_fea_forward # n, t-1, c//r, h, w
diff_fea_backward = tPlusone_fea_backward - t_fea_backward# n, t-1, c//r, h, w
diff_fea_pluszero_forward = F.pad(diff_fea_forward, self.pad1_forward, mode="constant", value=0) # n, t, c//r, h, w
diff_fea_pluszero_forward = diff_fea_pluszero_forward.view((-1,) + diff_fea_pluszero_forward.size()[2:]) #nt, c//r, h, w
diff_fea_pluszero_backward = F.pad(diff_fea_backward, self.pad1_backward, mode="constant", value=0) # n, t, c//r, h, w
diff_fea_pluszero_backward = diff_fea_pluszero_backward.view((-1,) + diff_fea_pluszero_backward.size()[2:]) #nt, c//r, h, w
y_forward_smallscale2 = self.avg_pool_forward2(diff_fea_pluszero_forward) # nt, c//r, 1, 1
y_backward_smallscale2 = self.avg_pool_backward2(diff_fea_pluszero_backward) # nt, c//r, 1, 1
y_forward_smallscale4 = diff_fea_pluszero_forward
y_backward_smallscale4 = diff_fea_pluszero_backward
y_forward_smallscale2 = self.bn3_smallscale2(self.conv3_smallscale2(y_forward_smallscale2))
y_backward_smallscale2 = self.bn3_smallscale2(self.conv3_smallscale2(y_backward_smallscale2))
y_forward_smallscale4 = self.bn3_smallscale4(self.conv3_smallscale4(y_forward_smallscale4))
y_backward_smallscale4 = self.bn3_smallscale4(self.conv3_smallscale4(y_backward_smallscale4))
y_forward_smallscale2 = F.interpolate(y_forward_smallscale2, diff_fea_pluszero_forward.size()[2:])
y_backward_smallscale2 = F.interpolate(y_backward_smallscale2, diff_fea_pluszero_backward.size()[2:])
y_forward = self.bn3(self.conv3(1.0/3.0*diff_fea_pluszero_forward + 1.0/3.0*y_forward_smallscale2 + 1.0/3.0*y_forward_smallscale4))# nt, c, 1, 1
y_backward = self.bn3(self.conv3(1.0/3.0*diff_fea_pluszero_backward + 1.0/3.0*y_backward_smallscale2 + 1.0/3.0*y_backward_smallscale4)) # nt, c, 1, 1
y_forward = self.sigmoid_forward(y_forward) - 0.5
y_backward = self.sigmoid_backward(y_backward) - 0.5
y = 0.5 * y_forward + 0.5 * y_backward
attn = fea * y
x = x + attn.reshape(N, T, C, L).permute(3, 0, 1, 2)
x = torch.cat([cls_token, x], dim=0)
return x
class CMlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = conv_1x1x1(in_features, hidden_features)
self.act = act_layer()
self.fc2 = conv_1x1x1(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class CBlock(nn.Module):
def __init__(self, dim, mlp_ratio=4., dropout=0., drop_path=0., uni_type='3d', add_ffn=True):
super().__init__()
self.norm1 = bn_3d(dim)
self.conv1 = conv_1x1x1(dim, dim, 1)
self.conv2 = conv_1x1x1(dim, dim, 1)
if uni_type == '3d':
print('Use 3d conv for local MHRA')
self.attn = conv_3x3x3(dim, dim, groups=dim)
else:
print('Use 2d conv for local MHRA')
self.attn = conv_1x3x3(dim, dim, groups=dim)
# NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.add_ffn = add_ffn
if add_ffn:
print('Add FFN in local MHRA')
self.norm2 = bn_3d(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = CMlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=nn.GELU, drop=dropout)
print('Init zero')
nn.init.constant_(self.conv2.weight, 0.)
nn.init.constant_(self.conv2.bias, 0.)
if add_ffn:
nn.init.constant_(self.mlp.fc2.weight, 0.)
nn.init.constant_(self.mlp.fc2.bias, 0.)
def forward(self, x):
x = x + self.drop_path(self.conv2(self.attn(self.conv1(self.norm1(x)))))
if self.add_ffn:
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
class ResidualDecoderBlock(nn.Module):
def __init__(self, d_model: int, n_head: int, attn_mask: torch.Tensor = None,
mlp_factor: float = 4.0, dropout: float = 0.0, drop_path: float = 0.0):
super().__init__()
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
print(f'Drop path rate: {drop_path}')
self.attn = nn.MultiheadAttention(d_model, n_head)
self.ln_1 = nn.LayerNorm(d_model)
d_mlp = round(mlp_factor * d_model)
self.mlp = nn.Sequential(OrderedDict([
("c_fc", nn.Linear(d_model, d_mlp)),
("gelu", QuickGELU()),
("dropout", nn.Dropout(dropout)),
("c_proj", nn.Linear(d_mlp, d_model))
]))
self.ln_2 = nn.LayerNorm(d_model)
self.ln_3 = nn.LayerNorm(d_model)
self.attn_mask = attn_mask
nn.init.xavier_uniform_(self.attn.in_proj_weight)
# nn.init.xavier_uniform_(self.attn.out_proj.weight)
nn.init.constant_(self.attn.out_proj.weight, 0.)
nn.init.constant_(self.attn.out_proj.bias, 0.)
nn.init.xavier_uniform_(self.mlp[0].weight)
# nn.init.xavier_uniform_(self.mlp[-1].weight)
nn.init.constant_(self.mlp[-1].weight, 0.)
nn.init.constant_(self.mlp[-1].bias, 0.)
def attention(self, x: torch.Tensor, y: torch.Tensor):
#self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None
# return self.attn(x, y, y, need_weights=False, attn_mask=self.attn_mask)[0]
assert self.attn_mask is None # not implemented
# manual forward to add position information
d_model = self.ln_1.weight.size(0)
q = (x @ self.attn.in_proj_weight[:d_model].T) + self.attn.in_proj_bias[:d_model]
k = (y @ self.attn.in_proj_weight[d_model:-d_model].T) + self.attn.in_proj_bias[d_model:-d_model]
v = (y @ self.attn.in_proj_weight[-d_model:].T) + self.attn.in_proj_bias[-d_model:]
Tx, Ty, N = q.size(0), k.size(0), q.size(1)
q = q.view(Tx, N, self.attn.num_heads, self.attn.head_dim).permute(1, 2, 0, 3)
k = k.view(Ty, N, self.attn.num_heads, self.attn.head_dim).permute(1, 2, 0, 3)
v = v.view(Ty, N, self.attn.num_heads, self.attn.head_dim).permute(1, 2, 0, 3)
aff = (q @ k.transpose(-2, -1) / (self.attn.head_dim ** 0.5))
aff = aff.softmax(dim=-1)
out = aff @ v
out = out.permute(2, 0, 1, 3).flatten(2)
out = self.attn.out_proj(out)
return out
def forward(self, x: torch.Tensor, y: torch.Tensor):
x = x + self.drop_path(self.attention(self.ln_1(x), self.ln_3(y)))
x = x + self.drop_path(self.mlp(self.ln_2(x)))
return x
class TransformerDecoder_uniformer_diff_conv_balance(nn.Module):
def __init__(self, n_layers=4,
uni_layer=4, uni_type='3d', add_ffn=True, t_conv_type='1d', pre_prompt=True,
n_dim=768, n_head=12, mlp_factor=4.0, drop_path_rate=0.,
mlp_dropout=[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5], cls_dropout=0.5, t_size=8, spatial_size=14,
balance=0.,
use_t_conv=True, after_me=True, before_me=False, me_type='dstm', me_reduction=4,
use_t_pos_embed=True, num_classes=400):
super().__init__()
n_layers += uni_layer
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, n_layers)]
self.uni_layer = uni_layer
self.uni_dec = nn.ModuleList([
CBlock(n_dim, mlp_ratio=mlp_factor, dropout=mlp_dropout[i], drop_path=dpr[i], uni_type=uni_type, add_ffn=add_ffn)
for i in range(uni_layer)
])
self.dec = nn.ModuleList([
ResidualDecoderBlock(n_dim, n_head, mlp_factor=mlp_factor, dropout=mlp_dropout[i], drop_path=dpr[i])
for i in range(n_layers)
])
self.pre_prompt = pre_prompt
if pre_prompt:
print('Add pre prompt')
self.pre_temporal_cls_token = nn.Parameter(torch.zeros(n_dim))
self.temporal_cls_token = nn.Parameter(torch.zeros(n_dim))
if use_t_conv:
self.t_conv_type = t_conv_type
if t_conv_type == '1d':
print('Use 1d t_conv for CPE')
self.tconv = nn.ModuleList([
nn.Conv1d(n_dim, n_dim, kernel_size=3, stride=1, padding=1, bias=True, groups=n_dim)
for i in range(n_layers)
])
for m in self.tconv:
nn.init.constant_(m.bias, 0.)
m.weight.data[...] = torch.Tensor([0, 1, 0])
else:
print('Use 3d t_conv for CPE')
self.tconv = nn.ModuleList([
nn.Conv3d(n_dim, n_dim, kernel_size=3, stride=1, padding=1, bias=True, groups=n_dim)
for i in range(n_layers)
])
for m in self.tconv:
nn.init.constant_(m.bias, 0.)
else:
self.tconv = None
self.before_me = before_me
self.after_me = after_me
if before_me or after_me:
assert before_me != after_me
print(f'Use {me_type} attention, Before {before_me}, After {after_me}')
if me_type == 'stm':
me_op = STM
elif me_type == 'dstm':
me_op = DSTM
elif me_type == 'tdn':
me_op = TDN
self.me = nn.ModuleList([me_op(n_dim, reduction=me_reduction) for i in range(n_layers)])
if use_t_pos_embed:
self.pemb_t = nn.Parameter(torch.zeros([n_layers, t_size, n_dim]))
else:
self.pemb_t = None
print(F'Balnce weight {balance}')
self.balance = nn.Parameter(torch.ones((n_dim)) * balance)
self.sigmoid = nn.Sigmoid()
def forward(self, clip_feats_all):
# clip_feats_all = clip_feats_all[-len(self.dec):]
# only return n_layers features, save memory
clip_feats = [x for x in clip_feats_all]
if self.after_me:
origin_clip_feats = [x for x in clip_feats_all]
L, N, T, C = clip_feats[0].size()
x = self.temporal_cls_token.view(1, 1, -1).repeat(1, N, 1)
for i in range(len(clip_feats)):
if self.before_me:
# contain residual
clip_feats[i] = self.me[i](clip_feats[i])
if self.tconv is not None:
L, N, T, C = clip_feats[i].shape
if self.t_conv_type == '1d':
clip_feats[i] = clip_feats[i].permute(0, 1, 3, 2).flatten(0, 1) # L * N, C, T
clip_feats[i] = self.tconv[i](clip_feats[i]).permute(0, 2, 1).contiguous().view(L, N, T, C)
else:
H = W = int((L - 1) ** 0.5)
_, tmp_feats = clip_feats[i][:1], clip_feats[i][1:]
tmp_feats = tmp_feats.permute(1, 3, 2, 0).reshape(N, C, T, H, W)
tmp_feats = self.tconv[i](tmp_feats).view(N, C, T, L - 1).permute(3, 0, 2, 1)
clip_feats[i][1:] = clip_feats[i][1:] + tmp_feats
if self.pemb_t is not None:
clip_feats[i] = clip_feats[i] + self.pemb_t[i]
if self.after_me:
clip_feats[i] = clip_feats[i] + self.me[i](origin_clip_feats[i])
if i < self.uni_layer:
# L, N, T, C
L, N, T, C = clip_feats[i].shape
H = W = int((L - 1) ** 0.5)
_, tmp_feats = clip_feats[i][:1], clip_feats[i][1:]
tmp_feats = tmp_feats.permute(1, 3, 2, 0).reshape(N, C, T, H, W)
tmp_feats = self.uni_dec[i](tmp_feats).view(N, C, T, L - 1).permute(3, 0, 2, 1)
clip_feats[i][1:] = clip_feats[i][1:] + tmp_feats
clip_feats[i] = clip_feats[i].permute(2, 0, 1, 3).flatten(0, 1) # T * L, N, C
if self.pre_prompt:
pre_x = self.pre_temporal_cls_token.view(1, 1, -1).repeat(1, N, 1)
for i in range(len(self.dec)):
if i < self.uni_layer:
pre_x = self.dec[i](pre_x, clip_feats[i])
elif i == self.uni_layer:
clip_feats[i] = torch.cat([pre_x, clip_feats[i]], dim=0)
x = self.dec[i](x, clip_feats[i])
else:
x = self.dec[i](x, clip_feats[i])
else:
for i in range(len(self.dec)):
x = self.dec[i](x, clip_feats[i])
# real residual
# L, N, T, C
residual = clip_feats_all[-1][0].mean(1)
weight = self.sigmoid(self.balance)
# return self.proj((1 - weight) * x[0, :, :] + weight * residual)
return (1 - weight) * x[0, :, :] + weight * residual
if __name__ == '__main__':
model = TransformerDecoder_uniformer_diff_conv_balance()
# construct a fake input to demonstrate input tensor shape
L, N, T, C = 197, 1, 8, 768 # num_image_tokens, video_batch_size, t_size, feature_dim
# we use intermediate feature maps from multiple blocks, so input features should be a list
input_features = []
for i in range(8): # vit-b has 12 blocks
# every item in input_features contains features maps from a single block
# every item is a tuple containing 3 feature maps:
# (1) block output features (i.e. after mlp) with shape L, N, T, C
# (2) projected query features with shape L, N, T, C
# (3) projected key features with shape L, N, T, C
input_features.append(
tuple(torch.zeros([L, N, T, C]) for _ in range(3)))
# some small optimizations:
# (1) We only decode from the last $n$ blocks so it's good as long as the last $n$ items of input_features is valid and all previous items can be filled with None to save memory. By default $n=4$.
# (2) projected query/key features are optional. If you are using an uncompatible image backbone without query/key (e.g. CNN), you can fill the position with None (i.e. the tuple should be (Tensor, None, None) and set use_image_attnmap=False when constructing the model.
print(model)
print(model(input_features).shape) # should be N, 400
| InternVideo-main | Downstream/Video-Text-Retrieval/modules/clip_kc/evl_utils/evl_module_uniformer_diff_conv_balance.py |
#!/usr/bin/env python
import os
from collections import OrderedDict
from timm.models.layers import DropPath
import torch
from torch import nn
import torch.utils.checkpoint as checkpoint
from .attention import MultiheadAttention
MODEL_PATH = '/mnt/lustre/share_data/likunchang.vendor/model'
_MODELS = {
"ViT-B/32": os.path.join(MODEL_PATH, "vit_b32.pth"),
"ViT-B/16": os.path.join(MODEL_PATH, "vit_b16.pth"),
"ViT-L/14": os.path.join(MODEL_PATH, "vit_l14.pth"),
"ViT-L/14_336": os.path.join(MODEL_PATH, "vit_l14_336.pth"),
}
class LayerNorm(nn.LayerNorm):
"""Subclass torch's LayerNorm to handle fp16."""
def forward(self, x):
orig_type = x.dtype
ret = super().forward(x.type(torch.float32))
return ret.type(orig_type)
class QuickGELU(nn.Module):
def forward(self, x):
return x * torch.sigmoid(1.702 * x)
class ResidualAttentionBlock(nn.Module):
def __init__(self, d_model, n_head, attn_mask=None, drop_path=0.0):
super().__init__()
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
print(f'Drop path rate: {drop_path}')
self.attn = MultiheadAttention(d_model, n_head)
self.ln_1 = LayerNorm(d_model)
self.mlp = nn.Sequential(OrderedDict([
("c_fc", nn.Linear(d_model, d_model * 4)),
("gelu", QuickGELU()),
("c_proj", nn.Linear(d_model * 4, d_model))
]))
self.ln_2 = LayerNorm(d_model)
self.attn_mask = attn_mask
def attention(self, x, return_qk=False):
if return_qk:
self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None
q, k, attn_output, _ = self.attn(x, x, x, need_weights=False, attn_mask=self.attn_mask, return_qk=True)
return q, k, attn_output
else:
self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None
return self.attn(x, x, x, need_weights=False, attn_mask=self.attn_mask)[0]
def forward(self, x, return_qk=False):
if return_qk:
q, k, attn_output = self.attention(self.ln_1(x), return_qk=True)
x = x + self.drop_path(attn_output)
x = x + self.drop_path(self.mlp(self.ln_2(x)))
return x, q, k
else:
x = x + self.drop_path(self.attention(self.ln_1(x)))
x = x + self.drop_path(self.mlp(self.ln_2(x)))
return x
class Transformer(nn.Module):
def __init__(self, width, layers, heads, attn_mask=None, drop_path_rate=0.):
super().__init__()
self.width = width
self.layers = layers
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, layers)]
self.resblocks = nn.ModuleList([
ResidualAttentionBlock(width, heads, attn_mask, drop_path=dpr[i]) for i in range(layers)
])
def forward(self, x, return_num=4, T=8):
features = []
for i, resblock in enumerate(self.resblocks):
x = resblock(x)
if i >= self.layers - return_num:
L, NT, C = x.shape
N = NT // T
features.append(x.view(L, N, T, C))
return features
class VisionTransformer(nn.Module):
def __init__(
self, input_resolution, patch_size, width, layers, heads, output_dim, drop_path_rate=0.,
):
super().__init__()
self.input_resolution = input_resolution
self.output_dim = output_dim
self.conv1 = nn.Conv2d(in_channels=3, out_channels=width, kernel_size=patch_size, stride=patch_size, bias=False)
scale = width ** -0.5
self.class_embedding = nn.Parameter(scale * torch.randn(width))
self.positional_embedding = nn.Parameter(scale * torch.randn((input_resolution // patch_size) ** 2 + 1, width))
self.ln_pre = LayerNorm(width)
self.transformer = Transformer(width, layers, heads, drop_path_rate=drop_path_rate)
def forward(self, x, return_num=4, return_qk=True):
N, C, T, H, W = x.shape
x = x.permute(0, 2, 1, 3, 4).reshape(N * T, C, H, W)
x = self.conv1(x) # shape = [*, width, grid, grid]
x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2]
x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width]
x = torch.cat([self.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device), x], dim=1) # shape = [*, grid ** 2 + 1, width]
x = x + self.positional_embedding.to(x.dtype)
x = self.ln_pre(x)
x = x.permute(1, 0, 2) # NLD -> LND
features = self.transformer(
x, return_num=return_num, T=T,
)
return features
def vit_b32(pretrained=True, drop_path_rate=0.):
model = VisionTransformer(
input_resolution=224,
patch_size=32,
width=768,
layers=12,
heads=12,
output_dim=512,
drop_path_rate=drop_path_rate
)
if pretrained:
print('load pretrained weights')
state_dict = torch.load(_MODELS["ViT-B/32"], map_location='cpu')
model.load_state_dict(state_dict)
return model.eval()
def vit_b16(pretrained=True, drop_path_rate=0.):
model = VisionTransformer(
input_resolution=224,
patch_size=16,
width=768,
layers=12,
heads=12,
output_dim=512,
drop_path_rate=drop_path_rate
)
if pretrained:
print('load pretrained weights')
state_dict = torch.load(_MODELS["ViT-B/16"], map_location='cpu')
model.load_state_dict(state_dict)
return model.eval()
def vit_l14(pretrained=True, drop_path_rate=0.):
model = VisionTransformer(
input_resolution=224,
patch_size=14,
width=1024,
layers=24,
heads=16,
output_dim=768,
drop_path_rate=drop_path_rate
)
if pretrained:
print('load pretrained weights')
state_dict = torch.load(_MODELS["ViT-L/14"], map_location='cpu')
model.load_state_dict(state_dict)
return model.eval()
def vit_l14_336(pretrained=True, drop_path_rate=0.):
model = VisionTransformer(
input_resolution=336,
patch_size=14,
width=1024,
layers=24,
heads=16,
output_dim=768,
drop_path_rate=drop_path_rate
)
if pretrained:
print('load pretrained weights')
state_dict = torch.load(_MODELS["ViT-L/14_336"], map_location='cpu')
model.load_state_dict(state_dict)
return model.eval()
if __name__ == '__main__':
import time
from fvcore.nn import FlopCountAnalysis
from fvcore.nn import flop_count_table
model = vit_b32(pretrained=True)
flops = FlopCountAnalysis(model, torch.rand(1, 3, 8, 224, 224))
s = time.time()
print(flop_count_table(flops, max_depth=1))
print(time.time()-s) | InternVideo-main | Downstream/Video-Text-Retrieval/modules/clip_kc/evl_utils/clip_vit.py |
from .evl_module import TransformerDecoder
from .evl_module_uniformer_diff_conv_balance import TransformerDecoder_uniformer_diff_conv_balance
from .clip_vit import vit_b32, vit_b16, vit_l14, vit_l14_336
from .clip_vit_2plus1d import vit_2plus1d_b32, vit_2plus1d_b16, vit_2plus1d_l14, vit_2plus1d_l14_336
from .clip_vit_2plus1d_dw_bias import vit_2plus1d_dw_bias_b32, vit_2plus1d_dw_bias_b16, vit_2plus1d_dw_bias_l14, vit_2plus1d_dw_bias_l14_336 | InternVideo-main | Downstream/Video-Text-Retrieval/modules/clip_kc/evl_utils/__init__.py |
r"""Functional interface"""
import warnings
import math
import torch
from torch import _VF
from torch._jit_internal import Optional, Tuple
from torch.overrides import has_torch_function, handle_torch_function
from torch.nn.functional import _pad, linear, softmax, dropout
Tensor = torch.Tensor
pad = _pad
def multi_head_attention_forward(query: Tensor,
key: Tensor,
value: Tensor,
embed_dim_to_check: int,
num_heads: int,
in_proj_weight: Tensor,
in_proj_bias: Tensor,
bias_k: Optional[Tensor],
bias_v: Optional[Tensor],
add_zero_attn: bool,
dropout_p: float,
out_proj_weight: Tensor,
out_proj_bias: Tensor,
training: bool = True,
key_padding_mask: Optional[Tensor] = None,
need_weights: bool = True,
attn_mask: Optional[Tensor] = None,
use_separate_proj_weight: bool = False,
q_proj_weight: Optional[Tensor] = None,
k_proj_weight: Optional[Tensor] = None,
v_proj_weight: Optional[Tensor] = None,
static_k: Optional[Tensor] = None,
static_v: Optional[Tensor] = None,
return_qk: bool = False
) -> Tuple[Tensor, Optional[Tensor]]:
r"""
Args:
query, key, value: map a query and a set of key-value pairs to an output.
See "Attention Is All You Need" for more details.
embed_dim_to_check: total dimension of the model.
num_heads: parallel attention heads.
in_proj_weight, in_proj_bias: input projection weight and bias.
bias_k, bias_v: bias of the key and value sequences to be added at dim=0.
add_zero_attn: add a new batch of zeros to the key and
value sequences at dim=1.
dropout_p: probability of an element to be zeroed.
out_proj_weight, out_proj_bias: the output projection weight and bias.
training: apply dropout if is ``True``.
key_padding_mask: if provided, specified padding elements in the key will
be ignored by the attention. This is an binary mask. When the value is True,
the corresponding value on the attention layer will be filled with -inf.
need_weights: output attn_output_weights.
attn_mask: 2D or 3D mask that prevents attention to certain positions. A 2D mask will be broadcasted for all
the batches while a 3D mask allows to specify a different mask for the entries of each batch.
use_separate_proj_weight: the function accept the proj. weights for query, key,
and value in different forms. If false, in_proj_weight will be used, which is
a combination of q_proj_weight, k_proj_weight, v_proj_weight.
q_proj_weight, k_proj_weight, v_proj_weight, in_proj_bias: input projection weight and bias.
static_k, static_v: static key and value used for attention operators.
Shape:
Inputs:
- query: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is
the embedding dimension.
- key: :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- value: :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- key_padding_mask: :math:`(N, S)` where N is the batch size, S is the source sequence length.
If a ByteTensor is provided, the non-zero positions will be ignored while the zero positions
will be unchanged. If a BoolTensor is provided, the positions with the
value of ``True`` will be ignored while the position with the value of ``False`` will be unchanged.
- attn_mask: 2D mask :math:`(L, S)` where L is the target sequence length, S is the source sequence length.
3D mask :math:`(N*num_heads, L, S)` where N is the batch size, L is the target sequence length,
S is the source sequence length. attn_mask ensures that position i is allowed to attend the unmasked
positions. If a ByteTensor is provided, the non-zero positions are not allowed to attend
while the zero positions will be unchanged. If a BoolTensor is provided, positions with ``True``
are not allowed to attend while ``False`` values will be unchanged. If a FloatTensor
is provided, it will be added to the attention weight.
- static_k: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length,
N is the batch size, E is the embedding dimension. E/num_heads is the head dimension.
- static_v: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length,
N is the batch size, E is the embedding dimension. E/num_heads is the head dimension.
- return_qk: whether return Q and K.
Outputs:
- attn_output: :math:`(L, N, E)` where L is the target sequence length, N is the batch size,
E is the embedding dimension.
- attn_output_weights: :math:`(N, L, S)` where N is the batch size,
L is the target sequence length, S is the source sequence length.
"""
if not torch.jit.is_scripting():
tens_ops = (query, key, value, in_proj_weight, in_proj_bias, bias_k, bias_v,
out_proj_weight, out_proj_bias)
if any([type(t) is not Tensor for t in tens_ops]) and has_torch_function(tens_ops):
return handle_torch_function(
multi_head_attention_forward, tens_ops, query, key, value,
embed_dim_to_check, num_heads, in_proj_weight, in_proj_bias,
bias_k, bias_v, add_zero_attn, dropout_p, out_proj_weight,
out_proj_bias, training=training, key_padding_mask=key_padding_mask,
need_weights=need_weights, attn_mask=attn_mask,
use_separate_proj_weight=use_separate_proj_weight,
q_proj_weight=q_proj_weight, k_proj_weight=k_proj_weight,
v_proj_weight=v_proj_weight, static_k=static_k, static_v=static_v)
tgt_len, bsz, embed_dim = query.size()
assert embed_dim == embed_dim_to_check
# allow MHA to have different sizes for the feature dimension
assert key.size(0) == value.size(0) and key.size(1) == value.size(1)
head_dim = embed_dim // num_heads
assert head_dim * num_heads == embed_dim, "embed_dim must be divisible by num_heads"
scaling = float(head_dim) ** -0.5
if not use_separate_proj_weight:
if torch.equal(query, key) and torch.equal(key, value):
# self-attention
q, k, v = linear(query, in_proj_weight, in_proj_bias).chunk(3, dim=-1)
elif torch.equal(key, value):
# encoder-decoder attention
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = 0
_end = embed_dim
_w = in_proj_weight[_start:_end, :]
if _b is not None:
_b = _b[_start:_end]
q = linear(query, _w, _b)
if key is None:
assert value is None
k = None
v = None
else:
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = embed_dim
_end = None
_w = in_proj_weight[_start:, :]
if _b is not None:
_b = _b[_start:]
k, v = linear(key, _w, _b).chunk(2, dim=-1)
else:
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = 0
_end = embed_dim
_w = in_proj_weight[_start:_end, :]
if _b is not None:
_b = _b[_start:_end]
q = linear(query, _w, _b)
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = embed_dim
_end = embed_dim * 2
_w = in_proj_weight[_start:_end, :]
if _b is not None:
_b = _b[_start:_end]
k = linear(key, _w, _b)
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = embed_dim * 2
_end = None
_w = in_proj_weight[_start:, :]
if _b is not None:
_b = _b[_start:]
v = linear(value, _w, _b)
else:
q_proj_weight_non_opt = torch.jit._unwrap_optional(q_proj_weight)
len1, len2 = q_proj_weight_non_opt.size()
assert len1 == embed_dim and len2 == query.size(-1)
k_proj_weight_non_opt = torch.jit._unwrap_optional(k_proj_weight)
len1, len2 = k_proj_weight_non_opt.size()
assert len1 == embed_dim and len2 == key.size(-1)
v_proj_weight_non_opt = torch.jit._unwrap_optional(v_proj_weight)
len1, len2 = v_proj_weight_non_opt.size()
assert len1 == embed_dim and len2 == value.size(-1)
if in_proj_bias is not None:
q = linear(query, q_proj_weight_non_opt, in_proj_bias[0:embed_dim])
k = linear(key, k_proj_weight_non_opt, in_proj_bias[embed_dim:(embed_dim * 2)])
v = linear(value, v_proj_weight_non_opt, in_proj_bias[(embed_dim * 2):])
else:
q = linear(query, q_proj_weight_non_opt, in_proj_bias)
k = linear(key, k_proj_weight_non_opt, in_proj_bias)
v = linear(value, v_proj_weight_non_opt, in_proj_bias)
q = q * scaling
if attn_mask is not None:
assert attn_mask.dtype == torch.float32 or attn_mask.dtype == torch.float64 or \
attn_mask.dtype == torch.float16 or attn_mask.dtype == torch.uint8 or attn_mask.dtype == torch.bool, \
'Only float, byte, and bool types are supported for attn_mask, not {}'.format(attn_mask.dtype)
if attn_mask.dtype == torch.uint8:
warnings.warn("Byte tensor for attn_mask in nn.MultiheadAttention is deprecated. Use bool tensor instead.")
attn_mask = attn_mask.to(torch.bool)
if attn_mask.dim() == 2:
attn_mask = attn_mask.unsqueeze(0)
if list(attn_mask.size()) != [1, query.size(0), key.size(0)]:
raise RuntimeError('The size of the 2D attn_mask is not correct.')
elif attn_mask.dim() == 3:
if list(attn_mask.size()) != [bsz * num_heads, query.size(0), key.size(0)]:
raise RuntimeError('The size of the 3D attn_mask is not correct.')
else:
raise RuntimeError("attn_mask's dimension {} is not supported".format(attn_mask.dim()))
# attn_mask's dim is 3 now.
# convert ByteTensor key_padding_mask to bool
if key_padding_mask is not None and key_padding_mask.dtype == torch.uint8:
warnings.warn("Byte tensor for key_padding_mask in nn.MultiheadAttention is deprecated. Use bool tensor instead.")
key_padding_mask = key_padding_mask.to(torch.bool)
if bias_k is not None and bias_v is not None:
if static_k is None and static_v is None:
k = torch.cat([k, bias_k.repeat(1, bsz, 1)])
v = torch.cat([v, bias_v.repeat(1, bsz, 1)])
if attn_mask is not None:
attn_mask = pad(attn_mask, (0, 1))
if key_padding_mask is not None:
key_padding_mask = pad(key_padding_mask, (0, 1))
else:
assert static_k is None, "bias cannot be added to static key."
assert static_v is None, "bias cannot be added to static value."
else:
assert bias_k is None
assert bias_v is None
# L, N, E
if return_qk:
return_q = q.clone() / scaling
return_k = k.clone()
q = q.contiguous().view(tgt_len, bsz * num_heads, head_dim).transpose(0, 1)
if k is not None:
k = k.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1)
if v is not None:
v = v.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1)
if static_k is not None:
assert static_k.size(0) == bsz * num_heads
assert static_k.size(2) == head_dim
k = static_k
if static_v is not None:
assert static_v.size(0) == bsz * num_heads
assert static_v.size(2) == head_dim
v = static_v
src_len = k.size(1)
if key_padding_mask is not None:
assert key_padding_mask.size(0) == bsz
assert key_padding_mask.size(1) == src_len
if add_zero_attn:
src_len += 1
k = torch.cat([k, torch.zeros((k.size(0), 1) + k.size()[2:], dtype=k.dtype, device=k.device)], dim=1)
v = torch.cat([v, torch.zeros((v.size(0), 1) + v.size()[2:], dtype=v.dtype, device=v.device)], dim=1)
if attn_mask is not None:
attn_mask = pad(attn_mask, (0, 1))
if key_padding_mask is not None:
key_padding_mask = pad(key_padding_mask, (0, 1))
attn_output_weights = torch.bmm(q, k.transpose(1, 2))
assert list(attn_output_weights.size()) == [bsz * num_heads, tgt_len, src_len]
if attn_mask is not None:
if attn_mask.dtype == torch.bool:
attn_output_weights.masked_fill_(attn_mask, float('-inf'))
else:
attn_output_weights += attn_mask
if key_padding_mask is not None:
attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len)
attn_output_weights = attn_output_weights.masked_fill(
key_padding_mask.unsqueeze(1).unsqueeze(2),
float('-inf'),
)
attn_output_weights = attn_output_weights.view(bsz * num_heads, tgt_len, src_len)
attn_output_weights = softmax(
attn_output_weights, dim=-1)
attn_output_weights = dropout(attn_output_weights, p=dropout_p, training=training)
attn_output = torch.bmm(attn_output_weights, v)
assert list(attn_output.size()) == [bsz * num_heads, tgt_len, head_dim]
attn_output = attn_output.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim)
attn_output = linear(attn_output, out_proj_weight, out_proj_bias)
if return_qk:
if need_weights:
# average attention weights over heads
attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len)
return return_q, return_k, attn_output, attn_output_weights.sum(dim=1) / num_heads
else:
return return_q, return_k, attn_output, None
else:
if need_weights:
# average attention weights over heads
attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len)
return attn_output, attn_output_weights.sum(dim=1) / num_heads
else:
return attn_output, None
| InternVideo-main | Downstream/Video-Text-Retrieval/modules/clip_kc/evl_utils/attention_module.py |
import os
from collections import OrderedDict
from timm.models.layers import DropPath
import torch
from torch import nn
from einops import rearrange
from .attention import MultiheadAttention
MODEL_PATH = '/mnt/lustre/share_data/likunchang.vendor/model'
_MODELS = {
"ViT-B/32": os.path.join(MODEL_PATH, "vit_b32.pth"),
"ViT-B/16": os.path.join(MODEL_PATH, "vit_b16.pth"),
"ViT-L/14": os.path.join(MODEL_PATH, "vit_l14.pth"),
"ViT-L/14_336": os.path.join(MODEL_PATH, "vit_l14_336.pth"),
}
class LayerNorm(nn.LayerNorm):
"""Subclass torch's LayerNorm to handle fp16."""
def forward(self, x):
orig_type = x.dtype
ret = super().forward(x.type(torch.float32))
return ret.type(orig_type)
class QuickGELU(nn.Module):
def forward(self, x):
return x * torch.sigmoid(1.702 * x)
class ResidualAttentionBlock(nn.Module):
def __init__(self, d_model, n_head, attn_mask=None, drop_path=0.0):
super().__init__()
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
print(f'Drop path rate: {drop_path}')
# temporal
self.attn_t = MultiheadAttention(d_model, n_head)
self.ln_t = LayerNorm(d_model)
# spatial
self.attn = MultiheadAttention(d_model, n_head)
self.ln_1 = LayerNorm(d_model)
self.mlp = nn.Sequential(OrderedDict([
("c_fc", nn.Linear(d_model, d_model * 4)),
("gelu", QuickGELU()),
("c_proj", nn.Linear(d_model * 4, d_model))
]))
self.ln_2 = LayerNorm(d_model)
self.attn_mask = attn_mask
# init zero
print('Init zero for (2+1)d')
nn.init.constant_(self.attn_t.in_proj_weight, 0)
nn.init.constant_(self.attn_t.in_proj_bias, 0)
nn.init.constant_(self.attn_t.out_proj.weight, 1)
nn.init.constant_(self.attn_t.out_proj.bias, 0)
def attention(self, x):
self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None
return self.attn(x, x, x, need_weights=False, attn_mask=self.attn_mask)[0]
def attention_temporal(self, x):
self.attn_mask = None
return self.attn_t(x, x, x, need_weights=False, attn_mask=self.attn_mask)[0]
def forward(self, x, T=8):
# temporal
# x: 1+HWT, N, C
xt = x[1:, :, :]
_, N, C = xt.shape
xt = rearrange(xt, '(l t) n c -> t (n l) c', n=N, t=T)
res_temporal = self.attention_temporal(self.ln_t(xt))
res_temporal = rearrange(res_temporal, 't (n l) c -> (l t) n c', n=N, t=T)
xt = x[1:, :, :] + self.drop_path(res_temporal)
# spatial
init_cls_token = x[:1, :, :]
cls_token = init_cls_token.repeat(1, T, 1).view(1, T*N, C)
xs = rearrange(xt, '(l t) n c -> l (t n) c', n=N, t=T)
xs = torch.cat((cls_token, xs), 0)
res_spatial = self.attention(self.ln_1(xs))
# Taking care of CLS token
cls_token = res_spatial[0, :, :]
cls_token = rearrange(cls_token, '(t n) c -> t n c', n=N)
cls_token = torch.mean(cls_token, 0, True) # averaging for every frame
res_spatial = res_spatial[1:, :, :]
res_spatial = rearrange(res_spatial, 'l (t n) c -> (l t) n c', n=N)
x = x + self.drop_path(torch.cat((cls_token, res_spatial), 0))
x = x + self.drop_path(self.mlp(self.ln_2(x)))
return x
class Transformer(nn.Module):
def __init__(self, width, layers, heads, attn_mask=None, drop_path_rate=0.):
super().__init__()
self.width = width
self.layers = layers
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, layers)]
self.resblocks = nn.ModuleList([
ResidualAttentionBlock(width, heads, attn_mask, drop_path=dpr[i]) for i in range(layers)
])
def forward(self, x, return_num=4, T=8):
features = []
for i, resblock in enumerate(self.resblocks):
x = resblock(x, T=T)
if i >= self.layers - return_num:
# LT + 1, N, C
LT, N, C = x.shape
L = (LT - 1) // T
cls_x, tmp_x = x[:1], x[1:]
cls_x = cls_x.unsqueeze(2).repeat(1, 1, T, 1)
tmp_x = tmp_x.reshape(L, T, N, C).permute(0, 2, 1, 3) # L, N, T, C
tmp_x = torch.cat([cls_x, tmp_x], dim=0 )# L + 1, N, T, C
features.append(tmp_x)
return features
class VisionTransformer(nn.Module):
def __init__(
self, input_resolution, patch_size, width, layers, heads, output_dim, num_frames=8, drop_path_rate=0.,
):
super().__init__()
self.input_resolution = input_resolution
self.output_dim = output_dim
self.conv1 = nn.Conv2d(in_channels=3, out_channels=width, kernel_size=patch_size, stride=patch_size, bias=False)
scale = width ** -0.5
self.class_embedding = nn.Parameter(scale * torch.randn(width))
self.positional_embedding = nn.Parameter(scale * torch.randn((input_resolution // patch_size) ** 2 + 1, width))
self.temporal_positional_embedding = nn.Parameter(torch.zeros(1, num_frames, width))
self.ln_pre = LayerNorm(width)
self.transformer = Transformer(width, layers, heads, drop_path_rate=drop_path_rate)
def forward(self, x, return_num=4):
N, C, T, H, W = x.shape
x = x.permute(0, 2, 1, 3, 4).reshape(N * T, C, H, W)
x = self.conv1(x) # shape = [*, width, grid, grid]
x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2]
x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width]
x = torch.cat([self.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device), x], dim=1) # shape = [*, grid ** 2 + 1, width]
x = x + self.positional_embedding.to(x.dtype)
cls_tokens = x[:N, :1, :]
x = x[:, 1:]
x = rearrange(x, '(b t) n c -> (b n) t c', b=N, t=T)
x = x + self.temporal_positional_embedding
x = rearrange(x, '(b n) t c -> b (n t) c', b=N, t=T)
x = torch.cat((cls_tokens, x), dim=1)
x = self.ln_pre(x)
x = x.permute(1, 0, 2) # NLD -> LND
features = self.transformer(
x, return_num=return_num, T=T,
)
return features
def vit_2plus1d_b32(pretrained=True, num_frames=8, drop_path_rate=0.):
model = VisionTransformer(
input_resolution=224,
patch_size=32,
width=768,
layers=12,
heads=12,
output_dim=512,
num_frames=num_frames,
drop_path_rate=drop_path_rate
)
if pretrained:
print('load pretrained weights')
state_dict = torch.load(_MODELS["ViT-B/32"], map_location='cpu')
model.load_state_dict(state_dict, strict=False)
return model.eval()
def vit_2plus1d_b16(pretrained=True, num_frames=8, drop_path_rate=0.):
model = VisionTransformer(
input_resolution=224,
patch_size=16,
width=768,
layers=12,
heads=12,
output_dim=512,
num_frames=num_frames,
drop_path_rate=drop_path_rate,
)
if pretrained:
print('load pretrained weights')
state_dict = torch.load(_MODELS["ViT-B/16"], map_location='cpu')
model.load_state_dict(state_dict, strict=False)
return model.eval()
def vit_2plus1d_l14(pretrained=True, num_frames=8, drop_path_rate=0.):
model = VisionTransformer(
input_resolution=224,
patch_size=14,
width=1024,
layers=24,
heads=16,
output_dim=768,
num_frames=num_frames,
drop_path_rate=drop_path_rate
)
if pretrained:
print('load pretrained weights')
state_dict = torch.load(_MODELS["ViT-L/14"], map_location='cpu')
model.load_state_dict(state_dict, strict=False)
return model.eval()
def vit_2plus1d_l14_336(pretrained=True, num_frames=8, drop_path_rate=0.):
model = VisionTransformer(
input_resolution=336,
patch_size=14,
width=1024,
layers=24,
heads=16,
output_dim=768,
num_frames=num_frames,
drop_path_rate=drop_path_rate
)
if pretrained:
print('load pretrained weights')
state_dict = torch.load(_MODELS["ViT-L/14_336"], map_location='cpu')
model.load_state_dict(state_dict, strict=False)
return model.eval()
if __name__ == '__main__':
import time
from fvcore.nn import FlopCountAnalysis
from fvcore.nn import flop_count_table
model = vit_2plus1d_b32(pretrained=True)
flops = FlopCountAnalysis(model, torch.rand(1, 3, 8, 224, 224))
s = time.time()
print(flop_count_table(flops, max_depth=1))
print(time.time()-s) | InternVideo-main | Downstream/Video-Text-Retrieval/modules/clip_kc/evl_utils/clip_vit_2plus1d.py |
import warnings
from typing import Tuple, Optional
import torch
from torch import Tensor
from torch.nn.modules.linear import Linear
from torch.nn.init import xavier_uniform_
from torch.nn.init import constant_
from torch.nn.init import xavier_normal_
from torch.nn.parameter import Parameter
from torch.nn.modules.module import Module
from .attention_module_bias import multi_head_attention_forward
class _LinearWithBias(Linear):
bias: Tensor
def __init__(self, in_features: int, out_features: int) -> None:
super().__init__(in_features, out_features, bias=True)
class MultiheadAttention(Module):
r"""Allows the model to jointly attend to information
from different representation subspaces.
See reference: Attention Is All You Need
.. math::
\text{MultiHead}(Q, K, V) = \text{Concat}(head_1,\dots,head_h)W^O
\text{where} head_i = \text{Attention}(QW_i^Q, KW_i^K, VW_i^V)
Args:
embed_dim: total dimension of the model.
num_heads: parallel attention heads.
dropout: a Dropout layer on attn_output_weights. Default: 0.0.
bias: add bias as module parameter. Default: True.
add_bias_kv: add bias to the key and value sequences at dim=0.
add_zero_attn: add a new batch of zeros to the key and
value sequences at dim=1.
kdim: total number of features in key. Default: None.
vdim: total number of features in value. Default: None.
Note: if kdim and vdim are None, they will be set to embed_dim such that
query, key, and value have the same number of features.
Examples::
>>> multihead_attn = nn.MultiheadAttention(embed_dim, num_heads)
>>> attn_output, attn_output_weights = multihead_attn(query, key, value)
"""
bias_k: Optional[torch.Tensor]
bias_v: Optional[torch.Tensor]
def __init__(self, embed_dim, num_heads, dropout=0., bias=True, add_bias_kv=False, add_zero_attn=False, kdim=None, vdim=None):
super(MultiheadAttention, self).__init__()
self.embed_dim = embed_dim
self.kdim = kdim if kdim is not None else embed_dim
self.vdim = vdim if vdim is not None else embed_dim
self._qkv_same_embed_dim = self.kdim == embed_dim and self.vdim == embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads"
if self._qkv_same_embed_dim is False:
self.q_proj_weight = Parameter(torch.Tensor(embed_dim, embed_dim))
self.k_proj_weight = Parameter(torch.Tensor(embed_dim, self.kdim))
self.v_proj_weight = Parameter(torch.Tensor(embed_dim, self.vdim))
self.register_parameter('in_proj_weight', None)
else:
self.in_proj_weight = Parameter(torch.empty(3 * embed_dim, embed_dim))
self.register_parameter('q_proj_weight', None)
self.register_parameter('k_proj_weight', None)
self.register_parameter('v_proj_weight', None)
if bias:
self.in_proj_bias = Parameter(torch.empty(3 * embed_dim))
else:
self.register_parameter('in_proj_bias', None)
self.out_proj = _LinearWithBias(embed_dim, embed_dim)
if add_bias_kv:
self.bias_k = Parameter(torch.empty(1, 1, embed_dim))
self.bias_v = Parameter(torch.empty(1, 1, embed_dim))
else:
self.bias_k = self.bias_v = None
self.add_zero_attn = add_zero_attn
self._reset_parameters()
def _reset_parameters(self):
if self._qkv_same_embed_dim:
xavier_uniform_(self.in_proj_weight)
else:
xavier_uniform_(self.q_proj_weight)
xavier_uniform_(self.k_proj_weight)
xavier_uniform_(self.v_proj_weight)
if self.in_proj_bias is not None:
constant_(self.in_proj_bias, 0.)
constant_(self.out_proj.bias, 0.)
if self.bias_k is not None:
xavier_normal_(self.bias_k)
if self.bias_v is not None:
xavier_normal_(self.bias_v)
def __setstate__(self, state):
# Support loading old MultiheadAttention checkpoints generated by v1.1.0
if '_qkv_same_embed_dim' not in state:
state['_qkv_same_embed_dim'] = True
super(MultiheadAttention, self).__setstate__(state)
def forward(self, query, key, value, key_padding_mask=None,
need_weights=True, attn_mask=None, return_qk=False, rpb=None):
# type: (Tensor, Tensor, Tensor, Optional[Tensor], bool, Optional[Tensor], bool, Optional[Tensor]) -> Tuple[Tensor, Optional[Tensor]]
r"""
Args:
query, key, value: map a query and a set of key-value pairs to an output.
See "Attention Is All You Need" for more details.
key_padding_mask: if provided, specified padding elements in the key will
be ignored by the attention. When given a binary mask and a value is True,
the corresponding value on the attention layer will be ignored. When given
a byte mask and a value is non-zero, the corresponding value on the attention
layer will be ignored
need_weights: output attn_output_weights.
attn_mask: 2D or 3D mask that prevents attention to certain positions. A 2D mask will be broadcasted for all
the batches while a 3D mask allows to specify a different mask for the entries of each batch.
Shape:
- Inputs:
- query: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is
the embedding dimension.
- key: :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- value: :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- key_padding_mask: :math:`(N, S)` where N is the batch size, S is the source sequence length.
If a ByteTensor is provided, the non-zero positions will be ignored while the position
with the zero positions will be unchanged. If a BoolTensor is provided, the positions with the
value of ``True`` will be ignored while the position with the value of ``False`` will be unchanged.
- attn_mask: 2D mask :math:`(L, S)` where L is the target sequence length, S is the source sequence length.
3D mask :math:`(N*num_heads, L, S)` where N is the batch size, L is the target sequence length,
S is the source sequence length. attn_mask ensure that position i is allowed to attend the unmasked
positions. If a ByteTensor is provided, the non-zero positions are not allowed to attend
while the zero positions will be unchanged. If a BoolTensor is provided, positions with ``True``
is not allowed to attend while ``False`` values will be unchanged. If a FloatTensor
is provided, it will be added to the attention weight.
- return_qk: whether return Q and K.
- Outputs:
- attn_output: :math:`(L, N, E)` where L is the target sequence length, N is the batch size,
E is the embedding dimension.
- attn_output_weights: :math:`(N, L, S)` where N is the batch size,
L is the target sequence length, S is the source sequence length.
"""
if return_qk:
if not self._qkv_same_embed_dim:
q, k, attn_output, attn_output_weights = multi_head_attention_forward(
query, key, value, self.embed_dim, self.num_heads,
self.in_proj_weight, self.in_proj_bias,
self.bias_k, self.bias_v, self.add_zero_attn,
self.dropout, self.out_proj.weight, self.out_proj.bias,
training=self.training,
key_padding_mask=key_padding_mask, need_weights=need_weights,
attn_mask=attn_mask, use_separate_proj_weight=True,
q_proj_weight=self.q_proj_weight, k_proj_weight=self.k_proj_weight,
v_proj_weight=self.v_proj_weight, return_qk=True, rpb=rpb)
else:
q, k, attn_output, attn_output_weights = multi_head_attention_forward(
query, key, value, self.embed_dim, self.num_heads,
self.in_proj_weight, self.in_proj_bias,
self.bias_k, self.bias_v, self.add_zero_attn,
self.dropout, self.out_proj.weight, self.out_proj.bias,
training=self.training,
key_padding_mask=key_padding_mask, need_weights=need_weights,
attn_mask=attn_mask, return_qk=True, rpb=rpb)
return q, k, attn_output, attn_output_weights
else:
if not self._qkv_same_embed_dim:
return multi_head_attention_forward(
query, key, value, self.embed_dim, self.num_heads,
self.in_proj_weight, self.in_proj_bias,
self.bias_k, self.bias_v, self.add_zero_attn,
self.dropout, self.out_proj.weight, self.out_proj.bias,
training=self.training,
key_padding_mask=key_padding_mask, need_weights=need_weights,
attn_mask=attn_mask, use_separate_proj_weight=True,
q_proj_weight=self.q_proj_weight, k_proj_weight=self.k_proj_weight,
v_proj_weight=self.v_proj_weight, rpb=rpb)
else:
return multi_head_attention_forward(
query, key, value, self.embed_dim, self.num_heads,
self.in_proj_weight, self.in_proj_bias,
self.bias_k, self.bias_v, self.add_zero_attn,
self.dropout, self.out_proj.weight, self.out_proj.bias,
training=self.training,
key_padding_mask=key_padding_mask, need_weights=need_weights,
attn_mask=attn_mask, rpb=rpb)
| InternVideo-main | Downstream/Video-Text-Retrieval/modules/clip_kc/evl_utils/attention_bias.py |
r"""Functional interface"""
import warnings
import math
import torch
from torch import _VF
from torch._jit_internal import Optional, Tuple
from torch.overrides import has_torch_function, handle_torch_function
from torch.nn.functional import _pad, linear, softmax, dropout
Tensor = torch.Tensor
pad = _pad
def multi_head_attention_forward(query: Tensor,
key: Tensor,
value: Tensor,
embed_dim_to_check: int,
num_heads: int,
in_proj_weight: Tensor,
in_proj_bias: Tensor,
bias_k: Optional[Tensor],
bias_v: Optional[Tensor],
add_zero_attn: bool,
dropout_p: float,
out_proj_weight: Tensor,
out_proj_bias: Tensor,
training: bool = True,
key_padding_mask: Optional[Tensor] = None,
need_weights: bool = True,
attn_mask: Optional[Tensor] = None,
use_separate_proj_weight: bool = False,
q_proj_weight: Optional[Tensor] = None,
k_proj_weight: Optional[Tensor] = None,
v_proj_weight: Optional[Tensor] = None,
static_k: Optional[Tensor] = None,
static_v: Optional[Tensor] = None,
return_qk: bool = False,
rpb: Tensor = None,
) -> Tuple[Tensor, Optional[Tensor]]:
r"""
Args:
query, key, value: map a query and a set of key-value pairs to an output.
See "Attention Is All You Need" for more details.
embed_dim_to_check: total dimension of the model.
num_heads: parallel attention heads.
in_proj_weight, in_proj_bias: input projection weight and bias.
bias_k, bias_v: bias of the key and value sequences to be added at dim=0.
add_zero_attn: add a new batch of zeros to the key and
value sequences at dim=1.
dropout_p: probability of an element to be zeroed.
out_proj_weight, out_proj_bias: the output projection weight and bias.
training: apply dropout if is ``True``.
key_padding_mask: if provided, specified padding elements in the key will
be ignored by the attention. This is an binary mask. When the value is True,
the corresponding value on the attention layer will be filled with -inf.
need_weights: output attn_output_weights.
attn_mask: 2D or 3D mask that prevents attention to certain positions. A 2D mask will be broadcasted for all
the batches while a 3D mask allows to specify a different mask for the entries of each batch.
use_separate_proj_weight: the function accept the proj. weights for query, key,
and value in different forms. If false, in_proj_weight will be used, which is
a combination of q_proj_weight, k_proj_weight, v_proj_weight.
q_proj_weight, k_proj_weight, v_proj_weight, in_proj_bias: input projection weight and bias.
static_k, static_v: static key and value used for attention operators.
Shape:
Inputs:
- query: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is
the embedding dimension.
- key: :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- value: :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- key_padding_mask: :math:`(N, S)` where N is the batch size, S is the source sequence length.
If a ByteTensor is provided, the non-zero positions will be ignored while the zero positions
will be unchanged. If a BoolTensor is provided, the positions with the
value of ``True`` will be ignored while the position with the value of ``False`` will be unchanged.
- attn_mask: 2D mask :math:`(L, S)` where L is the target sequence length, S is the source sequence length.
3D mask :math:`(N*num_heads, L, S)` where N is the batch size, L is the target sequence length,
S is the source sequence length. attn_mask ensures that position i is allowed to attend the unmasked
positions. If a ByteTensor is provided, the non-zero positions are not allowed to attend
while the zero positions will be unchanged. If a BoolTensor is provided, positions with ``True``
are not allowed to attend while ``False`` values will be unchanged. If a FloatTensor
is provided, it will be added to the attention weight.
- static_k: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length,
N is the batch size, E is the embedding dimension. E/num_heads is the head dimension.
- static_v: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length,
N is the batch size, E is the embedding dimension. E/num_heads is the head dimension.
- return_qk: whether return Q and K.
- rpb: relative postion bias
Outputs:
- attn_output: :math:`(L, N, E)` where L is the target sequence length, N is the batch size,
E is the embedding dimension.
- attn_output_weights: :math:`(N, L, S)` where N is the batch size,
L is the target sequence length, S is the source sequence length.
"""
if not torch.jit.is_scripting():
tens_ops = (query, key, value, in_proj_weight, in_proj_bias, bias_k, bias_v,
out_proj_weight, out_proj_bias)
if any([type(t) is not Tensor for t in tens_ops]) and has_torch_function(tens_ops):
return handle_torch_function(
multi_head_attention_forward, tens_ops, query, key, value,
embed_dim_to_check, num_heads, in_proj_weight, in_proj_bias,
bias_k, bias_v, add_zero_attn, dropout_p, out_proj_weight,
out_proj_bias, training=training, key_padding_mask=key_padding_mask,
need_weights=need_weights, attn_mask=attn_mask,
use_separate_proj_weight=use_separate_proj_weight,
q_proj_weight=q_proj_weight, k_proj_weight=k_proj_weight,
v_proj_weight=v_proj_weight, static_k=static_k, static_v=static_v)
tgt_len, bsz, embed_dim = query.size()
assert embed_dim == embed_dim_to_check
# allow MHA to have different sizes for the feature dimension
assert key.size(0) == value.size(0) and key.size(1) == value.size(1)
head_dim = embed_dim // num_heads
assert head_dim * num_heads == embed_dim, "embed_dim must be divisible by num_heads"
scaling = float(head_dim) ** -0.5
if not use_separate_proj_weight:
if torch.equal(query, key) and torch.equal(key, value):
# self-attention
q, k, v = linear(query, in_proj_weight, in_proj_bias).chunk(3, dim=-1)
elif torch.equal(key, value):
# encoder-decoder attention
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = 0
_end = embed_dim
_w = in_proj_weight[_start:_end, :]
if _b is not None:
_b = _b[_start:_end]
q = linear(query, _w, _b)
if key is None:
assert value is None
k = None
v = None
else:
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = embed_dim
_end = None
_w = in_proj_weight[_start:, :]
if _b is not None:
_b = _b[_start:]
k, v = linear(key, _w, _b).chunk(2, dim=-1)
else:
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = 0
_end = embed_dim
_w = in_proj_weight[_start:_end, :]
if _b is not None:
_b = _b[_start:_end]
q = linear(query, _w, _b)
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = embed_dim
_end = embed_dim * 2
_w = in_proj_weight[_start:_end, :]
if _b is not None:
_b = _b[_start:_end]
k = linear(key, _w, _b)
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = embed_dim * 2
_end = None
_w = in_proj_weight[_start:, :]
if _b is not None:
_b = _b[_start:]
v = linear(value, _w, _b)
else:
q_proj_weight_non_opt = torch.jit._unwrap_optional(q_proj_weight)
len1, len2 = q_proj_weight_non_opt.size()
assert len1 == embed_dim and len2 == query.size(-1)
k_proj_weight_non_opt = torch.jit._unwrap_optional(k_proj_weight)
len1, len2 = k_proj_weight_non_opt.size()
assert len1 == embed_dim and len2 == key.size(-1)
v_proj_weight_non_opt = torch.jit._unwrap_optional(v_proj_weight)
len1, len2 = v_proj_weight_non_opt.size()
assert len1 == embed_dim and len2 == value.size(-1)
if in_proj_bias is not None:
q = linear(query, q_proj_weight_non_opt, in_proj_bias[0:embed_dim])
k = linear(key, k_proj_weight_non_opt, in_proj_bias[embed_dim:(embed_dim * 2)])
v = linear(value, v_proj_weight_non_opt, in_proj_bias[(embed_dim * 2):])
else:
q = linear(query, q_proj_weight_non_opt, in_proj_bias)
k = linear(key, k_proj_weight_non_opt, in_proj_bias)
v = linear(value, v_proj_weight_non_opt, in_proj_bias)
q = q * scaling
if attn_mask is not None:
assert attn_mask.dtype == torch.float32 or attn_mask.dtype == torch.float64 or \
attn_mask.dtype == torch.float16 or attn_mask.dtype == torch.uint8 or attn_mask.dtype == torch.bool, \
'Only float, byte, and bool types are supported for attn_mask, not {}'.format(attn_mask.dtype)
if attn_mask.dtype == torch.uint8:
warnings.warn("Byte tensor for attn_mask in nn.MultiheadAttention is deprecated. Use bool tensor instead.")
attn_mask = attn_mask.to(torch.bool)
if attn_mask.dim() == 2:
attn_mask = attn_mask.unsqueeze(0)
if list(attn_mask.size()) != [1, query.size(0), key.size(0)]:
raise RuntimeError('The size of the 2D attn_mask is not correct.')
elif attn_mask.dim() == 3:
if list(attn_mask.size()) != [bsz * num_heads, query.size(0), key.size(0)]:
raise RuntimeError('The size of the 3D attn_mask is not correct.')
else:
raise RuntimeError("attn_mask's dimension {} is not supported".format(attn_mask.dim()))
# attn_mask's dim is 3 now.
# convert ByteTensor key_padding_mask to bool
if key_padding_mask is not None and key_padding_mask.dtype == torch.uint8:
warnings.warn("Byte tensor for key_padding_mask in nn.MultiheadAttention is deprecated. Use bool tensor instead.")
key_padding_mask = key_padding_mask.to(torch.bool)
if bias_k is not None and bias_v is not None:
if static_k is None and static_v is None:
k = torch.cat([k, bias_k.repeat(1, bsz, 1)])
v = torch.cat([v, bias_v.repeat(1, bsz, 1)])
if attn_mask is not None:
attn_mask = pad(attn_mask, (0, 1))
if key_padding_mask is not None:
key_padding_mask = pad(key_padding_mask, (0, 1))
else:
assert static_k is None, "bias cannot be added to static key."
assert static_v is None, "bias cannot be added to static value."
else:
assert bias_k is None
assert bias_v is None
# L, N, E
if return_qk:
return_q = q.clone() / scaling
return_k = k.clone()
q = q.contiguous().view(tgt_len, bsz * num_heads, head_dim).transpose(0, 1)
if k is not None:
k = k.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1)
if v is not None:
v = v.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1)
if static_k is not None:
assert static_k.size(0) == bsz * num_heads
assert static_k.size(2) == head_dim
k = static_k
if static_v is not None:
assert static_v.size(0) == bsz * num_heads
assert static_v.size(2) == head_dim
v = static_v
src_len = k.size(1)
if key_padding_mask is not None:
assert key_padding_mask.size(0) == bsz
assert key_padding_mask.size(1) == src_len
if add_zero_attn:
src_len += 1
k = torch.cat([k, torch.zeros((k.size(0), 1) + k.size()[2:], dtype=k.dtype, device=k.device)], dim=1)
v = torch.cat([v, torch.zeros((v.size(0), 1) + v.size()[2:], dtype=v.dtype, device=v.device)], dim=1)
if attn_mask is not None:
attn_mask = pad(attn_mask, (0, 1))
if key_padding_mask is not None:
key_padding_mask = pad(key_padding_mask, (0, 1))
attn_output_weights = torch.bmm(q, k.transpose(1, 2))
assert list(attn_output_weights.size()) == [bsz * num_heads, tgt_len, src_len]
if attn_mask is not None:
if attn_mask.dtype == torch.bool:
attn_output_weights.masked_fill_(attn_mask, float('-inf'))
else:
attn_output_weights += attn_mask
if key_padding_mask is not None:
attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len)
attn_output_weights = attn_output_weights.masked_fill(
key_padding_mask.unsqueeze(1).unsqueeze(2),
float('-inf'),
)
attn_output_weights = attn_output_weights.view(bsz * num_heads, tgt_len, src_len)
if rpb is not None:
attn_output_weights = attn_output_weights + rpb
attn_output_weights = softmax(attn_output_weights, dim=-1)
attn_output_weights = dropout(attn_output_weights, p=dropout_p, training=training)
attn_output = torch.bmm(attn_output_weights, v)
assert list(attn_output.size()) == [bsz * num_heads, tgt_len, head_dim]
attn_output = attn_output.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim)
attn_output = linear(attn_output, out_proj_weight, out_proj_bias)
if return_qk:
if need_weights:
# average attention weights over heads
attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len)
return return_q, return_k, attn_output, attn_output_weights.sum(dim=1) / num_heads
else:
return return_q, return_k, attn_output, None
else:
if need_weights:
# average attention weights over heads
attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len)
return attn_output, attn_output_weights.sum(dim=1) / num_heads
else:
return attn_output, None
| InternVideo-main | Downstream/Video-Text-Retrieval/modules/clip_kc/evl_utils/attention_module_bias.py |
import os
from collections import OrderedDict
from timm.models.layers import DropPath
import torch
from torch import nn
from einops import rearrange
import torch.utils.checkpoint as checkpoint
from .attention_bias import MultiheadAttention
MODEL_PATH = '/mnt/lustre/share_data/likunchang.vendor/model'
_MODELS = {
"ViT-B/32": os.path.join(MODEL_PATH, "vit_b32.pth"),
"ViT-B/16": os.path.join(MODEL_PATH, "vit_b16.pth"),
"ViT-L/14": os.path.join(MODEL_PATH, "vit_l14.pth"),
"ViT-L/14_336": os.path.join(MODEL_PATH, "vit_l14_336.pth"),
}
class LayerNorm(nn.LayerNorm):
"""Subclass torch's LayerNorm to handle fp16."""
def forward(self, x):
orig_type = x.dtype
ret = super().forward(x.type(torch.float32))
return ret.type(orig_type)
class QuickGELU(nn.Module):
def forward(self, x):
return x * torch.sigmoid(1.702 * x)
class ResidualAttentionBlock(nn.Module):
def __init__(self, d_model, n_head, attn_mask=None, drop_path=0.0, t_size=8, spatial_size=7):
super().__init__()
self.n_head = n_head
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
print(f'Drop path rate: {drop_path}')
print(f'Add RPB: t_size {t_size}, spatial_size {spatial_size}')
self.pos_embed = nn.Conv3d(d_model, d_model, kernel_size=3, stride=1, padding=1, groups=d_model)
# temporal
self.attn_t = MultiheadAttention(d_model, n_head)
self.ln_t = LayerNorm(d_model)
self.rpb_t = nn.Parameter(torch.zeros([t_size * 2 - 1, n_head]))
idx_tensor_t = torch.zeros([t_size, t_size], dtype=torch.long)
for q in range(t_size):
for k in range(t_size):
offs = q - k + t_size - 1
idx_tensor_t[q, k] = offs
self.idx_tensor_t = idx_tensor_t
# spatial
self.attn = MultiheadAttention(d_model, n_head)
self.rpb = nn.Parameter(torch.zeros([(spatial_size * 2 - 1) ** 2, n_head]))
self.ln_1 = LayerNorm(d_model)
self.mlp = nn.Sequential(OrderedDict([
("c_fc", nn.Linear(d_model, d_model * 4)),
("gelu", QuickGELU()),
("c_proj", nn.Linear(d_model * 4, d_model))
]))
self.ln_2 = LayerNorm(d_model)
self.attn_mask = attn_mask
idx_tensor = torch.zeros([spatial_size ** 2, spatial_size ** 2], dtype=torch.long)
for q in range(spatial_size ** 2):
qi, qj = q // spatial_size, q % spatial_size
for k in range(spatial_size ** 2):
ki, kj = k // spatial_size, k % spatial_size
i_offs = qi - ki + spatial_size - 1
j_offs = qj - kj + spatial_size - 1
idx_tensor[q, k] = i_offs * (spatial_size * 2 - 1) + j_offs
self.idx_tensor = idx_tensor
# init zero
print('Init zero for (2+1)d')
nn.init.constant_(self.pos_embed.weight, 0)
nn.init.constant_(self.pos_embed.bias, 0)
nn.init.constant_(self.attn_t.in_proj_weight, 0)
nn.init.constant_(self.attn_t.in_proj_bias, 0)
nn.init.constant_(self.attn_t.out_proj.weight, 1)
nn.init.constant_(self.attn_t.out_proj.bias, 0)
def attention(self, x, rpb=None):
self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None
return self.attn(x, x, x, need_weights=False, attn_mask=self.attn_mask, rpb=rpb)[0]
def attention_temporal(self, x, rpb=None):
self.attn_mask = None
return self.attn_t(x, x, x, need_weights=False, attn_mask=self.attn_mask, rpb=rpb)[0]
def forward(self, x, T=8):
# temporal
# x: 1+HWT, N, C
# pos_emb
tmp_x = x[1:, :, :]
LT, N, C = tmp_x.shape
L = LT // T
H = W = int(L ** 0.5)
tmp_x = tmp_x.view(H, W, T, N, C).permute(3, 4, 2, 0, 1)
tmp_x = tmp_x + self.pos_embed(tmp_x)
tmp_x = tmp_x.view(N, C, T, L).permute(3, 2, 0, 1).view(LT, N, C)
x[1:, :, :] = tmp_x
xt = x[1:, :, :]
_, N, C = xt.shape
xt = rearrange(xt, '(l t) n c -> t (n l) c', n=N, t=T)
# rpb_t: T, T, H => B*H, T, T
self.idx_tensor_t = self.idx_tensor_t.to(xt.device)
rpb_t = self.rpb_t[self.idx_tensor_t].permute(2, 0, 1).repeat(N*L, 1, 1)
res_temporal = self.attention_temporal(self.ln_t(xt), rpb=rpb_t)
res_temporal = rearrange(res_temporal, 't (n l) c -> (l t) n c', n=N, t=T)
xt = x[1:, :, :] + self.drop_path(res_temporal)
# spatial
init_cls_token = x[:1, :, :]
cls_token = init_cls_token.repeat(1, T, 1).view(1, T*N, C)
xs = rearrange(xt, '(l t) n c -> l (t n) c', n=N, t=T)
xs = torch.cat((cls_token, xs), 0)
# rpb: L, L, H => B*H, L+1, L+1
rpb = torch.zeros((self.n_head, L+1, L+1), device=xs.device, dtype=xs.dtype)
self.idx_tensor = self.idx_tensor.to(xs.device)
rpb[:, 1:, 1:] = self.rpb[self.idx_tensor].permute(2, 0, 1)
rpb = rpb.repeat(T*N, 1, 1)
res_spatial = self.attention(self.ln_1(xs), rpb=rpb)
# Taking care of CLS token
cls_token = res_spatial[0, :, :]
cls_token = rearrange(cls_token, '(t n) c -> t n c', n=N)
cls_token = torch.mean(cls_token, 0, True) # averaging for every frame
res_spatial = res_spatial[1:, :, :]
res_spatial = rearrange(res_spatial, 'l (t n) c -> (l t) n c', n=N)
x = x + self.drop_path(torch.cat((cls_token, res_spatial), 0))
x = x + self.drop_path(self.mlp(self.ln_2(x)))
return x
class Transformer(nn.Module):
def __init__(self, width, layers, heads, attn_mask=None, drop_path_rate=0., t_size=8, spatial_size=7):
super().__init__()
self.width = width
self.layers = layers
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, layers)]
self.resblocks = nn.ModuleList([
ResidualAttentionBlock(width, heads, attn_mask,
drop_path=dpr[i], t_size=t_size, spatial_size=spatial_size) for i in range(layers)
])
def forward(self, x, return_num=4, T=8):
features = []
for i, resblock in enumerate(self.resblocks):
x = resblock(x, T=T)
if i >= self.layers - return_num:
# LT + 1, N, C
LT, N, C = x.shape
L = (LT - 1) // T
cls_x, tmp_x = x[:1], x[1:]
cls_x = cls_x.unsqueeze(2).repeat(1, 1, T, 1)
tmp_x = tmp_x.reshape(L, T, N, C).permute(0, 2, 1, 3) # L, N, T, C
tmp_x = torch.cat([cls_x, tmp_x], dim=0 )# L + 1, N, T, C
features.append(tmp_x)
return features
class VisionTransformer(nn.Module):
def __init__(
self, input_resolution, patch_size, width, layers, heads, output_dim, num_frames=8, drop_path_rate=0.,
t_size=8, spatial_size=7
):
super().__init__()
self.input_resolution = input_resolution
self.output_dim = output_dim
self.conv1 = nn.Conv2d(in_channels=3, out_channels=width, kernel_size=patch_size, stride=patch_size, bias=False)
scale = width ** -0.5
self.class_embedding = nn.Parameter(scale * torch.randn(width))
self.positional_embedding = nn.Parameter(scale * torch.randn((input_resolution // patch_size) ** 2 + 1, width))
self.temporal_positional_embedding = nn.Parameter(torch.zeros(1, num_frames, width))
self.ln_pre = LayerNorm(width)
self.transformer = Transformer(width, layers, heads, drop_path_rate=drop_path_rate, t_size=t_size, spatial_size=spatial_size)
def forward(self, x, return_num=4):
N, C, T, H, W = x.shape
x = x.permute(0, 2, 1, 3, 4).reshape(N * T, C, H, W)
x = self.conv1(x) # shape = [*, width, grid, grid]
x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2]
x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width]
x = torch.cat([self.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device), x], dim=1) # shape = [*, grid ** 2 + 1, width]
x = x + self.positional_embedding.to(x.dtype)
cls_tokens = x[:N, :1, :]
x = x[:, 1:]
x = rearrange(x, '(b t) n c -> (b n) t c', b=N, t=T)
x = x + self.temporal_positional_embedding
x = rearrange(x, '(b n) t c -> b (n t) c', b=N, t=T)
x = torch.cat((cls_tokens, x), dim=1)
x = self.ln_pre(x)
x = x.permute(1, 0, 2) # NLD -> LND
features = self.transformer(
x, return_num=return_num, T=T,
)
return features
def vit_2plus1d_dw_bias_b32(pretrained=True, num_frames=8, drop_path_rate=0.):
model = VisionTransformer(
input_resolution=224,
patch_size=32,
width=768,
layers=12,
heads=12,
output_dim=512,
num_frames=num_frames,
drop_path_rate=drop_path_rate
)
if pretrained:
print('load pretrained weights')
state_dict = torch.load(_MODELS["ViT-B/32"], map_location='cpu')
model.load_state_dict(state_dict, strict=False)
return model.eval()
def vit_2plus1d_dw_bias_b16(
pretrained=True, num_frames=8, drop_path_rate=0.,
t_size=8
):
model = VisionTransformer(
input_resolution=224,
patch_size=16,
width=768,
layers=12,
heads=12,
output_dim=512,
num_frames=num_frames,
drop_path_rate=drop_path_rate,
t_size=t_size,
spatial_size=14
)
if pretrained:
print('load pretrained weights')
state_dict = torch.load(_MODELS["ViT-B/16"], map_location='cpu')
model.load_state_dict(state_dict, strict=False)
return model.eval()
def vit_2plus1d_dw_bias_l14(pretrained=True, num_frames=8, drop_path_rate=0.):
model = VisionTransformer(
input_resolution=224,
patch_size=14,
width=1024,
layers=24,
heads=16,
output_dim=768,
num_frames=num_frames,
drop_path_rate=drop_path_rate
)
if pretrained:
print('load pretrained weights')
state_dict = torch.load(_MODELS["ViT-L/14"], map_location='cpu')
model.load_state_dict(state_dict, strict=False)
return model.eval()
def vit_2plus1d_dw_bias_l14_336(pretrained=True, num_frames=8, drop_path_rate=0.):
model = VisionTransformer(
input_resolution=336,
patch_size=14,
width=1024,
layers=24,
heads=16,
output_dim=768,
num_frames=num_frames,
drop_path_rate=drop_path_rate
)
if pretrained:
print('load pretrained weights')
state_dict = torch.load(_MODELS["ViT-L/14_336"], map_location='cpu')
model.load_state_dict(state_dict, strict=False)
return model.eval()
if __name__ == '__main__':
import time
from fvcore.nn import FlopCountAnalysis
from fvcore.nn import flop_count_table
model = vit_2plus1d_dw_bias_b32(pretrained=True)
flops = FlopCountAnalysis(model, torch.rand(4, 3, 8, 224, 224))
s = time.time()
print(flop_count_table(flops, max_depth=1))
print(time.time()-s) | InternVideo-main | Downstream/Video-Text-Retrieval/modules/clip_kc/evl_utils/clip_vit_2plus1d_dw_bias.py |
#!/usr/bin/env python
from collections import OrderedDict
from timm.models.layers import DropPath
import torch
import torch.nn as nn
import torch.nn.functional as F
class QuickGELU(nn.Module):
def forward(self, x: torch.Tensor):
return x * torch.sigmoid(1.702 * x)
class ResidualDecoderBlock(nn.Module):
def __init__(self, d_model: int, n_head: int, attn_mask: torch.Tensor = None,
mlp_factor: float = 4.0, dropout: float = 0.0, drop_path: float = 0.0):
super().__init__()
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
print(f'Drop path rate: {drop_path}')
self.attn = nn.MultiheadAttention(d_model, n_head)
self.ln_1 = nn.LayerNorm(d_model)
d_mlp = round(mlp_factor * d_model)
self.mlp = nn.Sequential(OrderedDict([
("c_fc", nn.Linear(d_model, d_mlp)),
("gelu", QuickGELU()),
("dropout", nn.Dropout(dropout)),
("c_proj", nn.Linear(d_mlp, d_model))
]))
self.ln_2 = nn.LayerNorm(d_model)
self.ln_3 = nn.LayerNorm(d_model)
self.attn_mask = attn_mask
nn.init.xavier_uniform_(self.attn.in_proj_weight)
nn.init.xavier_uniform_(self.attn.out_proj.weight)
nn.init.xavier_uniform_(self.mlp[0].weight)
nn.init.xavier_uniform_(self.mlp[-1].weight)
def attention(self, x: torch.Tensor, y: torch.Tensor):
#self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None
# return self.attn(x, y, y, need_weights=False, attn_mask=self.attn_mask)[0]
assert self.attn_mask is None # not implemented
# manual forward to add position information
d_model = self.ln_1.weight.size(0)
q = (x @ self.attn.in_proj_weight[:d_model].T) + self.attn.in_proj_bias[:d_model]
k = (y @ self.attn.in_proj_weight[d_model:-d_model].T) + self.attn.in_proj_bias[d_model:-d_model]
v = (y @ self.attn.in_proj_weight[-d_model:].T) + self.attn.in_proj_bias[-d_model:]
Tx, Ty, N = q.size(0), k.size(0), q.size(1)
q = q.view(Tx, N, self.attn.num_heads, self.attn.head_dim).permute(1, 2, 0, 3)
k = k.view(Ty, N, self.attn.num_heads, self.attn.head_dim).permute(1, 2, 0, 3)
v = v.view(Ty, N, self.attn.num_heads, self.attn.head_dim).permute(1, 2, 0, 3)
aff = (q @ k.transpose(-2, -1) / (self.attn.head_dim ** 0.5))
aff = aff.softmax(dim=-1)
out = aff @ v
out = out.permute(2, 0, 1, 3).flatten(2)
out = self.attn.out_proj(out)
return out
def forward(self, x: torch.Tensor, y: torch.Tensor):
x = x + self.drop_path(self.attention(self.ln_1(x), self.ln_3(y)))
x = x + self.drop_path(self.mlp(self.ln_2(x)))
return x
class TransformerDecoder(nn.Module):
def __init__(self, n_layers=4,
n_dim=768, n_head=12, mlp_factor=4.0, drop_path_rate=0.,
mlp_dropout=[0.5, 0.5, 0.5, 0.5], cls_dropout=0.5, t_size=8,
use_t_conv=True, use_t_pos_embed=True, num_classes=400,
add_residual=False,
):
super().__init__()
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, n_layers)]
self.dec = nn.ModuleList([
ResidualDecoderBlock(n_dim, n_head, mlp_factor=mlp_factor, dropout=mlp_dropout[i], drop_path=dpr[i])
for i in range(n_layers)
])
self.proj = nn.Sequential(
nn.LayerNorm(n_dim),
nn.Dropout(cls_dropout),
nn.Linear(n_dim, num_classes),
)
self.temporal_cls_token = nn.Parameter(torch.zeros(n_dim))
self.add_residual = add_residual
print(f'Add residual {add_residual}')
if use_t_conv:
self.tconv = nn.ModuleList([
nn.Conv1d(n_dim, n_dim, kernel_size=3, stride=1, padding=1, bias=True, groups=n_dim)
for i in range(n_layers)
])
for m in self.tconv:
nn.init.constant_(m.bias, 0.)
m.weight.data[...] = torch.Tensor([0, 1, 0])
else:
self.tconv = None
if use_t_pos_embed:
self.pemb_t = nn.Parameter(torch.zeros([n_layers, t_size, n_dim]))
else:
self.pemb_t = None
self.t_size = t_size
def forward(self, clip_feats_all):
# clip_feats_all = clip_feats_all[-len(self.dec):]
# only return n_layers features, save memory
clip_feats = [x for x in clip_feats_all]
L, N, T, C = clip_feats[0].size()
x = self.temporal_cls_token.view(1, 1, -1).repeat(1, N, 1)
for i in range(len(clip_feats)):
if self.tconv is not None:
L, N, T, C = clip_feats[i].shape
clip_feats[i] = clip_feats[i].permute(0, 1, 3, 2).flatten(0, 1) # L * N, C, T
clip_feats[i] = self.tconv[i](clip_feats[i]).permute(0, 2, 1).contiguous().view(L, N, T, C)
if self.pemb_t is not None:
clip_feats[i] = clip_feats[i] + self.pemb_t[i]
clip_feats[i] = clip_feats[i].permute(2, 0, 1, 3).flatten(0, 1) # T * L, N, C
for i in range(len(self.dec)):
x = self.dec[i](x, clip_feats[i])
if self.add_residual:
residual = clip_feats_all[-1][0].mean(1)
return self.proj(x[0, :, :] + residual)
else:
return self.proj(x[0, :, :])
if __name__ == '__main__':
model = TransformerDecoder()
# construct a fake input to demonstrate input tensor shape
L, N, T, C = 197, 1, 8, 768 # num_image_tokens, video_batch_size, t_size, feature_dim
# we use intermediate feature maps from multiple blocks, so input features should be a list
input_features = []
for i in range(4): # vit-b has 12 blocks
# every item in input_features contains features maps from a single block
# every item is a tuple containing 3 feature maps:
# (1) block output features (i.e. after mlp) with shape L, N, T, C
# (2) projected query features with shape L, N, T, C
# (3) projected key features with shape L, N, T, C
input_features.append(
torch.zeros([L, N, T, C]))
# some small optimizations:
# (1) We only decode from the last $n$ blocks so it's good as long as the last $n$ items of input_features is valid and all previous items can be filled with None to save memory. By default $n=4$.
# (2) projected query/key features are optional. If you are using an uncompatible image backbone without query/key (e.g. CNN), you can fill the position with None (i.e. the tuple should be (Tensor, None, None) and set use_image_attnmap=False when constructing the model.
print(model(input_features).shape) # should be N, 400
| InternVideo-main | Downstream/Video-Text-Retrieval/modules/clip_kc/evl_utils/evl_module.py |
import os
import time
import torch
import torch.nn as nn
# from fvcore.nn import FlopCountAnalysis
# from fvcore.nn import flop_count_table
from modules.clip_kc_new import evl_utils
PATH_PREFIX = '/mnt/lustre/share_data/likunchang.vendor/code/EVL/clip_kc/model'
class EVL(nn.Module):
def __init__(self,
backbone='vit_b16',
t_size=16,
backbone_drop_path_rate=0.,
return_list=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11],
n_layers=12,
n_dim=768,
n_head=12,
mlp_factor=4.0,
drop_path_rate=0.,
mlp_dropout=[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5],
cls_dropout=0.5,
num_classes=174,
):
super().__init__()
# pre-trained from CLIP
self.backbone = evl_utils.__dict__[backbone](
pretrained=False,
t_size=t_size,
backbone_drop_path_rate=backbone_drop_path_rate,
return_list=return_list,
n_layers=n_layers,
n_dim=n_dim,
n_head=n_head,
mlp_factor=mlp_factor,
drop_path_rate=drop_path_rate,
mlp_dropout=mlp_dropout,
cls_dropout=cls_dropout,
num_classes=num_classes,
)
def forward(self, x, mode='video'):
output = self.backbone(x, mode=mode)
return output
def cal_flops(model, frame=8, size=224):
flops = FlopCountAnalysis(model, torch.rand(1, 3, frame, size, size))
s = time.time()
print(flop_count_table(flops, max_depth=1))
print(time.time()-s)
def vit_only_global_b_sparse8_k400(pretrained=True):
model = EVL(
backbone='vit_only_global_b16',
t_size=8,
backbone_drop_path_rate=0.,
return_list=[8, 9, 10, 11],
n_layers=4,
n_dim=768,
n_head=12,
mlp_factor=4.0,
drop_path_rate=0.,
mlp_dropout=[0.5, 0.5, 0.5, 0.5],
cls_dropout=0.5,
num_classes=400,
)
# if pretrained:
# pretrained_path = os.path.join(PATH_PREFIX, 'fuck.pyth')
# print(f'lodel model from: {pretrained_path}')
# state_dict = torch.load(pretrained_path, map_location='cpu')
# model.load_state_dict(state_dict)
return model
def vit_only_global_l_sparse8_k400(pretrained=True):
model = EVL(
backbone='vit_only_global_l14',
t_size=8,
backbone_drop_path_rate=0.,
return_list=[20, 21, 22, 23],
n_layers=4,
n_dim=1024,
n_head=16,
mlp_factor=4.0,
drop_path_rate=0.,
mlp_dropout=[0.5, 0.5, 0.5, 0.5],
cls_dropout=0.5,
num_classes=400,
)
# if pretrained:
# pretrained_path = os.path.join(PATH_PREFIX, 'fuck.pyth')
# print(f'lodel model from: {pretrained_path}')
# state_dict = torch.load(pretrained_path, map_location='cpu')
# model.load_state_dict(state_dict)
return model
if __name__ == '__main__':
model = vit_only_global_l_sparse8_k400()
# cal_flops(model, frame=1, size=224)
cal_flops(model, frame=8, size=224) | InternVideo-main | Downstream/Video-Text-Retrieval/modules/clip_kc_new/model_no_freeze_only_global.py |
import os
import time
import torch
import torch.nn as nn
from fvcore.nn import FlopCountAnalysis
from fvcore.nn import flop_count_table
from . import evl_utils
PATH_PREFIX = '/mnt/lustre/share_data/likunchang.vendor/code/EVL/clip_kc/model'
class EVL(nn.Module):
def __init__(self,
backbone='vit_b16',
t_size=16,
dw_reduction=1.5,
backbone_drop_path_rate=0.,
return_list=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11],
n_layers=12,
n_dim=768,
n_head=12,
mlp_factor=4.0,
drop_path_rate=0.,
mlp_dropout=[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5],
cls_dropout=0.5,
num_classes=174,
):
super().__init__()
# pre-trained from CLIP
self.backbone = evl_utils.__dict__[backbone](
pretrained=False,
t_size=t_size,
dw_reduction=dw_reduction,
backbone_drop_path_rate=backbone_drop_path_rate,
return_list=return_list,
n_layers=n_layers,
n_dim=n_dim,
n_head=n_head,
mlp_factor=mlp_factor,
drop_path_rate=drop_path_rate,
mlp_dropout=mlp_dropout,
cls_dropout=cls_dropout,
num_classes=num_classes,
)
def forward(self, x, mode='video'):
output = self.backbone(x, mode=mode)
return output
def cal_flops(model, frame=8, size=224):
flops = FlopCountAnalysis(model, torch.rand(1, 3, frame, size, size))
s = time.time()
print(flop_count_table(flops, max_depth=1))
print(time.time()-s)
def vit_fusion_b_sparse16_k400(pretrained=True):
model = EVL(
backbone='vit_fusion_b16',
t_size=16,
dw_reduction=1.5,
backbone_drop_path_rate=0.,
return_list=[8, 9, 10, 11],
n_layers=4,
n_dim=768,
n_head=12,
mlp_factor=4.0,
drop_path_rate=0.,
mlp_dropout=[0.5, 0.5, 0.5, 0.5],
cls_dropout=0.5,
num_classes=400,
)
# if pretrained:
# pretrained_path = os.path.join(PATH_PREFIX, 'fuck.pyth')
# print(f'lodel model from: {pretrained_path}')
# state_dict = torch.load(pretrained_path, map_location='cpu')
# model.load_state_dict(state_dict)
return model
def vit_fusion_b_sparse16_sthsth(pretrained=True):
model = EVL(
backbone='vit_fusion_b16',
t_size=16,
dw_reduction=1.5,
backbone_drop_path_rate=0.,
return_list=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11],
n_layers=12,
n_dim=768,
n_head=12,
mlp_factor=4.0,
drop_path_rate=0.,
mlp_dropout=[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5],
cls_dropout=0.5,
num_classes=174,
)
# if pretrained:
# pretrained_path = os.path.join(PATH_PREFIX, 'fuck.pyth')
# print(f'lodel model from: {pretrained_path}')
# state_dict = torch.load(pretrained_path, map_location='cpu')
# model.load_state_dict(state_dict)
return model
if __name__ == '__main__':
model = vit_fusion_b_sparse16_k400()
# cal_flops(model, frame=1, size=224)
cal_flops(model, frame=16, size=224) | InternVideo-main | Downstream/Video-Text-Retrieval/modules/clip_kc_new/model_no_freeze_uniformer.py |
from .clip import *
from .evl_utils import * | InternVideo-main | Downstream/Video-Text-Retrieval/modules/clip_kc_new/__init__.py |
from collections import OrderedDict
from typing import Tuple, Union
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn
from . import evl_utils
from .evl_utils import TransformerDecoder_uniformer_diff_conv_balance
from einops import rearrange
from ipdb import set_trace
from copy import deepcopy
# from .clip_decoders import CaptionDecoder
class LayerNorm(nn.LayerNorm):
"""Subclass torch's LayerNorm to handle fp16."""
def forward(self, x: torch.Tensor):
orig_type = x.dtype
ret = super().forward(x.type(torch.float32))
return ret.type(orig_type)
class QuickGELU(nn.Module):
def forward(self, x: torch.Tensor):
return x * torch.sigmoid(1.702 * x)
class ResidualAttentionBlock(nn.Module):
def __init__(self, d_model: int, n_head: int, attn_mask: torch.Tensor = None):
super().__init__()
self.attn = nn.MultiheadAttention(d_model, n_head)
self.ln_1 = LayerNorm(d_model)
self.mlp = nn.Sequential(OrderedDict([
("c_fc", nn.Linear(d_model, d_model * 4)),
("gelu", QuickGELU()),
("c_proj", nn.Linear(d_model * 4, d_model))
]))
self.ln_2 = LayerNorm(d_model)
self.attn_mask = attn_mask
def attention(self, x: torch.Tensor):
self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None
return self.attn(x, x, x, need_weights=False, attn_mask=self.attn_mask)[0]
def forward(self, x: torch.Tensor):
x = x + self.attention(self.ln_1(x))
x = x + self.mlp(self.ln_2(x))
return x
class Transformer(nn.Module):
def __init__(self, width: int, layers: int, heads: int, attn_mask: torch.Tensor = None):
super().__init__()
self.width = width
self.layers = layers
self.resblocks = nn.Sequential(*[ResidualAttentionBlock(width, heads, attn_mask) for _ in range(layers)])
def forward(self, x: torch.Tensor):
# if self.use_checkpoint and self.checkpoint_num[1] > 0:
# segments = min(len(self.resblocks), self.checkpoint_num[1])
# return checkpoint_sequential(self.resblocks, segments, x)
# else:
return self.resblocks(x)
class CLIP(nn.Module):
def __init__(self,
embed_dim: int,
# vision
image_resolution: int,
vision_layers: Union[Tuple[int, int, int, int], int],
vision_width: int,
vision_patch_size: int,
# text
context_length: int,
vocab_size: int,
transformer_width: int,
transformer_heads: int,
transformer_layers: int,
# evl
n_layers=4, n_dim=768, n_head=12, mlp_factor=4.0, drop_path_rate=0.,
mlp_dropout=[0.5, 0.5, 0.5, 0.5], cls_dropout=0.5, t_size=8, spatial_size=14,
use_t_conv=True, use_image_attnmap=True, use_t_pos_embed=True,
backbone='vit_2plus1d_dw_bias_b16',
uni_layer=0,
uni_type='2d',
add_ffn=False,
t_conv_type='3d',
pre_prompt=False,
balance=0.,
after_me=True,
before_me=False,
me_type='stm',
me_reduction=4,
init_zero=True,
return_list=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11],
use_capdecoder=False,
):
super().__init__()
# All assertions is for adhoc clip_kc and should be removed
# assert vision_layers == 12, vision_layers
assert image_resolution == 224, image_resolution
# assert vision_patch_size == 32, vision_patch_size
assert vision_width == n_dim, (vision_width, n_dim)
self.vision_width = n_dim
self.context_length = context_length
vision_heads = vision_width // 64
self.visual = evl_utils.__dict__[backbone](
pretrained=False, t_size=t_size, mlp_dropout=mlp_dropout, cls_dropout=cls_dropout, n_dim=n_dim,
n_head=n_head, return_list=return_list, drop_path_rate=drop_path_rate, backbone_drop_path_rate=drop_path_rate)
# self.evl = TransformerDecoder_uniformer_diff_conv_balance(
# n_layers=n_layers, n_dim=n_dim, n_head=n_head,
# mlp_factor=mlp_factor, drop_path_rate=drop_path_rate,
# mlp_dropout=mlp_dropout, cls_dropout=cls_dropout, t_size=t_size,
# use_t_conv=use_t_conv, use_t_pos_embed=use_t_pos_embed,
# uni_layer=uni_layer, uni_type=uni_type, add_ffn=add_ffn, t_conv_type=t_conv_type,
# pre_prompt=pre_prompt, balance=balance,
# after_me=after_me, before_me=before_me,
# me_type=me_type, me_reduction=me_reduction,
# init_zero=init_zero,
# )
self.visual_ln_post = nn.LayerNorm(n_dim)
scale = n_dim ** -0.5
self.visual_proj = nn.Parameter(scale * torch.randn(n_dim, embed_dim))
self.return_qk = use_image_attnmap
self.return_num = n_layers
self.transformer = Transformer(
width=transformer_width,
layers=transformer_layers,
heads=transformer_heads,
attn_mask=self.build_attention_mask(),
)
self.vocab_size = vocab_size
self.token_embedding = nn.Embedding(vocab_size, transformer_width)
self.positional_embedding = nn.Parameter(torch.empty(self.context_length, transformer_width))
self.ln_final = LayerNorm(transformer_width)
self.text_projection = nn.Parameter(torch.empty(transformer_width, embed_dim))
self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07))
self.embed_dim = embed_dim
# We seperate the mask embedding to load pretrained model
self.text_mask_embedding = nn.Parameter(torch.empty(1, 1, transformer_width))
# # To keep the num_embeddings unchanged, we add this to embedded text
# self.eot_token_embedding = nn.Parameter(torch.empty(1, transformer_width))
self.initialize_parameters()
def initialize_parameters(self):
nn.init.normal_(self.token_embedding.weight, std=0.02)
nn.init.normal_(self.positional_embedding, std=0.01)
nn.init.normal_(self.text_mask_embedding, std=0.02)
# nn.init.constant_(self.eot_token_embedding, 0.0)
proj_std = (self.transformer.width ** -0.5) * ((2 * self.transformer.layers) ** -0.5)
attn_std = self.transformer.width ** -0.5
fc_std = (2 * self.transformer.width) ** -0.5
for block in self.transformer.resblocks:
nn.init.normal_(block.attn.in_proj_weight, std=attn_std)
nn.init.normal_(block.attn.out_proj.weight, std=proj_std)
nn.init.normal_(block.mlp.c_fc.weight, std=fc_std)
nn.init.normal_(block.mlp.c_proj.weight, std=proj_std)
if self.text_projection is not None:
nn.init.normal_(self.text_projection, std=self.transformer.width ** -0.5)
nn.init.constant_(self.visual_ln_post.weight, 1.0)
nn.init.constant_(self.visual_ln_post.bias, 0.0)
def build_attention_mask(self):
# lazily create causal attention mask, with full attention between the vision tokens
# pytorch uses additive attention mask; fill with -inf
mask = torch.empty(self.context_length, self.context_length)
mask.fill_(float("-inf"))
mask.triu_(1) # zero out the lower diagonal
return mask
@property
def dtype(self):
return self.visual.conv1.weight.dtype
def encode_video(self, video, return_all_feats=False, masked_indices=None, mode="video"):
# video: [N, C, T, H, W]
feats = self.visual(video, return_all_feats=return_all_feats, mode=mode)
if return_all_feats:
x, feats = feats
x = self.visual_ln_post(x)
if self.visual_proj is not None:
x = x @ self.visual_proj
if return_all_feats:
return x, feats # [N, C], [L, N, T, C]
return x
def encode_text(self, text, masked_indices=None, return_all_feats=False):
# assert (text.max(dim=-1)[0] + 1 == self.token_embedding.num_embeddings).all(), \
# "The last token of each sentence should be eot_token, check the input"
x = self.token_embedding(text).type(self.dtype) # [batch_size, n_ctx, d_model]
# x[torch.arange(x.shape[0]), text.argmax(dim=-1)] += self.eot_token_embedding
if masked_indices is not None:
x[masked_indices] = self.text_mask_embedding
x = x + self.positional_embedding.type(self.dtype)
x = x.permute(1, 0, 2) # NLD -> LND
x = self.transformer(x)
x = x.permute(1, 0, 2) # LND -> NLD
x = self.ln_final(x).type(self.dtype)
# x.shape = [batch_size, n_ctx, transformer.width]
# take features from the eot embedding (eot_token is the highest number in each sequence)
feats = x[torch.arange(x.shape[0]), text.argmax(dim=-1)]
if self.text_projection is not None:
feats = feats @ self.text_projection
if return_all_feats:
return feats, x
return feats
def forward(self, video, text):
video_features = self.encode_video(video)
text_features = self.encode_text(text)
# normalized features
video_features = video_features / video_features.norm(dim=1, keepdim=True)
text_features = text_features / text_features.norm(dim=1, keepdim=True)
# cosine similarity as logits
logit_scale = self.logit_scale.exp()
logits_per_video= logit_scale * video_features @ text_features.t()
logits_per_text = logits_per_video.t()
# shape = [global_batch_size, global_batch_size]
return logits_per_video, logits_per_text
def convert_weights(model: nn.Module):
"""Convert applicable model parameters to fp16"""
def _convert_weights_to_fp16(l):
if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Linear)):
l.weight.data = l.weight.data.half()
if l.bias is not None:
l.bias.data = l.bias.data.half()
if isinstance(l, nn.MultiheadAttention):
for attr in [*[f"{s}_proj_weight" for s in ["in", "q", "k", "v"]], "in_proj_bias", "bias_k", "bias_v"]:
tensor = getattr(l, attr)
if tensor is not None:
tensor.data = tensor.data.half()
for name in ["text_projection", "proj"]:
if hasattr(l, name) and not isinstance(l, TransformerDecoder_uniformer_diff_conv_balance):
attr = getattr(l, name)
if attr is not None:
attr.data = attr.data.half()
model.apply(_convert_weights_to_fp16)
def interpolate_temporal_pos_embed(pos_embed, T):
# [1, t, d] -> [1, d, t]
pos_embed = pos_embed.transpose(-2, -1)
# [1, d, t] -> [1, d, T]
pos_embed = F.interpolate(pos_embed, size=(T), mode='linear')
# [1, d, T] -> [1, T, d]
return pos_embed.transpose(-2, -1)
def interploate_rpb(rpb, T):
t1 = T * 2 - 1
rpb = rpb.transpose(0, 1).unsqueeze(0)
rpb = F.interpolate(rpb, size=(t1), mode='linear')
return rpb.squeeze(0).transpose(0, 1)
def build_model(
state_dict: dict,
# evl
n_layers=4, n_dim=768, n_head=12, mlp_factor=4.0, drop_path_rate=0.,
mlp_dropout=[0.5, 0.5, 0.5, 0.5], cls_dropout=0.5, t_size=8, spatial_size=14,
use_t_conv=True, use_image_attnmap=True, use_t_pos_embed=True, no_pretrain=False,
init_zero=True, mergeclip=False, mergeweight=0.5, use_capdecoder=False, clip_state_dict=None,
):
vit = "visual.proj" in state_dict or "visual.positional_embedding" in state_dict
if "visual.proj" in state_dict:
state_dict["visual_proj"] = state_dict["visual.proj"]
state_dict["visual_ln_post.weight"] = state_dict["visual.ln_post.weight"]
state_dict["visual_ln_post.bias"] = state_dict["visual.ln_post.bias"]
del state_dict["visual.proj"], state_dict["visual.ln_post.weight"], state_dict["visual.ln_post.bias"]
# new_state_dict = OrderedDict()
# for k, v in state_dict.items():
# if k.startswith("backbone."):
# k = k.replace("backbone.", "visual.")
# new_state_dict[k] = v
# state_dict = new_state_dict
if vit:
vision_width = state_dict["visual.conv1.weight"].shape[0]
vision_layers = len([k for k in state_dict.keys() if k.startswith("visual.") and k.endswith(".attn.in_proj_weight")])
vision_patch_size = state_dict["visual.conv1.weight"].shape[-1]
grid_size = round((state_dict["visual.positional_embedding"].shape[0] - 1) ** 0.5)
image_resolution = vision_patch_size * grid_size
else:
counts: list = [len(set(k.split(".")[2] for k in state_dict if k.startswith(f"visual.layer{b}"))) for b in [1, 2, 3, 4]]
vision_layers = tuple(counts)
vision_width = state_dict["visual.layer1.0.conv1.weight"].shape[0]
output_width = round((state_dict["visual.attnpool.positional_embedding"].shape[0] - 1) ** 0.5)
vision_patch_size = None
assert output_width ** 2 + 1 == state_dict["visual.attnpool.positional_embedding"].shape[0]
image_resolution = output_width * 32
# embed_dim = 512
# context_length = 77
# vocab_size = 49408
# transformer_width = 512
# transformer_layers = 12
embed_dim = state_dict["text_projection"].shape[1]
context_length = state_dict["positional_embedding"].shape[0]
vocab_size = state_dict["token_embedding.weight"].shape[0]
transformer_width = state_dict["ln_final.weight"].shape[0]
transformer_heads = transformer_width // 64
transformer_layers = len(set(k.split(".")[2] for k in state_dict if k.startswith(f"transformer.resblocks")))
########### add this ############
# for k, v in state_dict.items():
# print(k, v.shape)
################################################
vision_width = state_dict["visual_proj"].shape[0]
n_dim = vision_width
if vision_width == 768:
backbone = "vit_only_global_b16"
n_head = 12
return_list = [8, 9, 10, 11]
# comment this as we always use vit-L/14
# elif vision_width == 1024 and state_dict["input_resolution"] == 224:
elif vision_width == 1024:
backbone = "vit_only_global_l14"
n_head = 16
return_list = [20, 21, 22, 23]
elif vision_width == 1024 and state_dict["input_resolution"] == 336:
backbone = "vit_only_global_l14_336"
n_head = 16
return_list = [20, 21, 22, 23]
else:
raise NotImplementedError
model = CLIP(
embed_dim,
image_resolution, vision_layers, vision_width, vision_patch_size,
context_length, vocab_size, transformer_width, transformer_heads, transformer_layers,
n_layers=n_layers, n_dim=n_dim, n_head=n_head, mlp_factor=mlp_factor, drop_path_rate=drop_path_rate,
mlp_dropout=mlp_dropout, cls_dropout=cls_dropout, t_size=t_size, spatial_size=spatial_size,
use_t_conv=use_t_conv, use_image_attnmap=use_image_attnmap, use_t_pos_embed=use_t_pos_embed, backbone=backbone,
init_zero=init_zero, return_list=return_list,
)
for key in ["input_resolution", "context_length", "vocab_size"]:
if key in state_dict:
del state_dict[key]
# convert_weights(model)
temporal_key = 'visual.temporal_positional_embedding'
temporal_key2 = 'evl.pemb_t'
if temporal_key in state_dict and t_size != state_dict[temporal_key].size(1):
state_dict[temporal_key] = interpolate_temporal_pos_embed(state_dict[temporal_key], t_size)
state_dict[temporal_key2] = interpolate_temporal_pos_embed(state_dict[temporal_key2], t_size)
for kk, vv in state_dict.items():
if 'rpb_t' in kk:
size_old = state_dict[kk].shape
state_dict[kk] = interploate_rpb(vv, t_size)
size_new = state_dict[kk].shape
print('Interpolating' ,kk, size_old, '-->', size_new)
# set_trace()
# print('$' * 100)
# for k, v in model.state_dict().items():
# print(k, v.shape)
if mergeclip:
assert 0.0 <= mergeweight <= 1.0
assert clip_state_dict is not None
clip_sd = {k: v.cpu() for k, v in clip_state_dict.items()}
for key in ["input_resolution", "context_length", "vocab_size"]:
if key in clip_sd:
del clip_sd[key]
## trick: use this func to transfer 2d clip weights to 3d, and mark back
evl_utils.clip_vit_fusion.clip_load_state_dict(model, clip_sd)
loaded_dict = model.state_dict()
clip_sd_new = {k: v.cpu() for k, v in loaded_dict.items() if k in clip_sd}
# set_trace()
new_sd = deepcopy(state_dict)
for k in new_sd:
if k not in clip_sd_new:
continue
if any(x in k for x in clip_sd_new.keys()):
print('merging: ', k, '\t', clip_sd_new[k].shape, state_dict[k].shape)
new_sd[k] = clip_sd_new[k] * mergeweight + state_dict[k] * (1.0 - mergeweight)
############## only merge the clip text features, this is for ActivityNet ###########
# if 'visual' in k:
# new_sd[k] = clip_sd[k]
# else:
# new_sd[k] = clip_sd[k] * mergeweight + state_dict[k] * (1.0 - mergeweight)
################################################################################
############## only merge the clip visual features, this is for MSVD ###########
# if 'visual' in k:
# new_sd[k] = clip_sd[k] * mergeweight + state_dict[k] * (1.0 - mergeweight)
# else:
# new_sd[k] = clip_sd[k]
################################################################################
state_dict = new_sd
# print('$' * 100)
# for k, v in state_dict.items():
# print(k, v.shape)
if not no_pretrain:
# msg = evl_utils.clip_vit_fusion.clip_load_state_dict(model, state_dict)
msg = model.load_state_dict(state_dict, strict=False)
print(msg)
return model.eval()
| InternVideo-main | Downstream/Video-Text-Retrieval/modules/clip_kc_new/model.py |
import os
import time
import torch
import torch.nn as nn
from fvcore.nn import FlopCountAnalysis
from fvcore.nn import flop_count_table
import evl_utils
from evl_utils import TransformerDecoder_uniformer_diff_conv_balance
PATH_PREFIX = '/mnt/lustre/share_data/likunchang.vendor/code/EVL/clip_kc/model'
class EVL(nn.Module):
def __init__(self,
backbone='vit_b16',
n_layers=12,
n_dim=1024,
n_head=16,
mlp_factor=4.0,
drop_path_rate=0.,
mlp_dropout=[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5],
cls_dropout=0.5,
num_frames=8,
t_size=8,
use_t_conv=True,
use_t_pos_embed=True,
uni_layer=4,
uni_type='3d',
add_ffn=True,
t_conv_type='1d',
pre_prompt=True,
balance=0.,
after_me=True,
before_me=False,
me_type='dstm',
me_reduction=4,
num_classes=400,
):
super().__init__()
# pre-trained from CLIP
self.backbone = evl_utils.__dict__[backbone](pretrained=False, num_frames=num_frames, t_size=t_size)
self.evl = TransformerDecoder_uniformer_diff_conv_balance(
n_layers=n_layers, n_dim=n_dim, n_head=n_head,
mlp_factor=mlp_factor, drop_path_rate=drop_path_rate,
mlp_dropout=mlp_dropout, cls_dropout=cls_dropout, t_size=t_size,
use_t_conv=use_t_conv, use_t_pos_embed=use_t_pos_embed,
uni_layer=uni_layer, uni_type=uni_type, add_ffn=add_ffn, t_conv_type=t_conv_type,
pre_prompt=pre_prompt, balance=balance,
after_me=after_me, before_me=before_me,
me_type=me_type, me_reduction=me_reduction,
num_classes=num_classes
)
self.return_num = n_layers
def forward(self, x, mode='image'):
features = self.backbone(x, return_num=self.return_num, mode=mode)
output = self.evl(features, mode=mode)
return output
def cal_flops(model, frame=8, size=224):
flops = FlopCountAnalysis(model, torch.rand(1, 3, frame, size, size))
s = time.time()
print(flop_count_table(flops, max_depth=1))
print(time.time()-s)
def vit_2plus1d_diff_b_sparse8(pretrained=True):
# 8x224x224
# k400 1x1: 82.5
model = EVL(
backbone='vit_2plus1d_dw_bias_b16',
n_layers=12,
n_dim=768,
n_head=12,
mlp_factor=4.0,
drop_path_rate=0.,
mlp_dropout=[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5],
cls_dropout=0.5,
num_frames=8,
t_size=8,
use_t_conv=True,
use_t_pos_embed=True,
uni_layer=0,
uni_type='2d',
add_ffn=False,
t_conv_type='3d',
pre_prompt=False,
balance=0.,
after_me=True,
before_me=False,
me_type='stm',
me_reduction=4,
num_classes=400,
)
# if pretrained:
# pretrained_path = os.path.join(PATH_PREFIX, 'vit_2plus1d_diff_b_sparse8.pyth')
# print(f'lodel model from: {pretrained_path}')
# state_dict = torch.load(pretrained_path, map_location='cpu')
# model.load_state_dict(state_dict)
return model
if __name__ == '__main__':
model = vit_2plus1d_diff_b_sparse8()
cal_flops(model, frame=1, size=224) | InternVideo-main | Downstream/Video-Text-Retrieval/modules/clip_kc_new/model_no_freeze_diff.py |
import os
import time
import torch
import torch.nn as nn
from fvcore.nn import FlopCountAnalysis
from fvcore.nn import flop_count_table
import evl_utils
from evl_utils import TransformerDecoder
PATH_PREFIX = '/mnt/lustre/share_data/likunchang.vendor/code/EVL/clip_kc/model'
class EVL(nn.Module):
def __init__(self,
backbone='vit_l14_336',
n_layers=4,
n_dim=1024,
n_head=16,
mlp_factor=4.0,
drop_path_rate=0.,
mlp_dropout=[0.5, 0.5, 0.5, 0.5],
cls_dropout=0.5,
t_size=32,
use_t_conv=True,
use_t_pos_embed=True,
num_classes=400
):
super().__init__()
# pre-trained from CLIP
self.backbone = evl_utils.__dict__[backbone](pretrained=False)
self.evl = TransformerDecoder(
n_layers=n_layers, n_dim=n_dim, n_head=n_head,
mlp_factor=mlp_factor, drop_path_rate=drop_path_rate,
mlp_dropout=mlp_dropout, cls_dropout=cls_dropout, t_size=t_size,
use_t_conv=use_t_conv, use_t_pos_embed=use_t_pos_embed,
num_classes=num_classes,
)
self.return_num = n_layers
def forward(self, x):
features = self.backbone(x, return_num=self.return_num)
output = self.evl(features)
return output
def cal_flops(model, frame=8, size=224):
flops = FlopCountAnalysis(model, torch.rand(1, 3, frame, size, size))
s = time.time()
print(flop_count_table(flops, max_depth=1))
print(time.time()-s)
def vit_l_sparse16(pretrained=True):
# 16x224x224
# k400 1x1: 86.5
model = EVL(
backbone='vit_l14',
n_layers=4,
n_dim=1024,
n_head=16,
mlp_factor=4.0,
drop_path_rate=0.,
mlp_dropout=[0.5, 0.5, 0.5, 0.5],
cls_dropout=0.5,
t_size=32,
use_t_conv=True,
use_t_pos_embed=True,
num_classes=400
)
if pretrained:
pretrained_path = os.path.join(PATH_PREFIX, 'vit_l_sparse16.pyth')
print(f'lodel model from: {pretrained_path}')
state_dict = torch.load(pretrained_path, map_location='cpu')
model.load_state_dict(state_dict)
return model
def vit_l_sparse32(pretrained=True):
# 32x224x224
# k400 1x1: 87.0
model = EVL(
backbone='vit_l14',
n_layers=4,
n_dim=1024,
n_head=16,
mlp_factor=4.0,
drop_path_rate=0.,
mlp_dropout=[0.5, 0.5, 0.5, 0.5],
cls_dropout=0.5,
t_size=32,
use_t_conv=True,
use_t_pos_embed=True,
num_classes=400
)
if pretrained:
pretrained_path = os.path.join(PATH_PREFIX, 'vit_l_sparse32.pyth')
print(f'lodel model from: {pretrained_path}')
state_dict = torch.load(pretrained_path, map_location='cpu')
model.load_state_dict(state_dict)
return model
def vit_l336_sparse32(pretrained=True):
# 32x336x336
# k400 1x1: 87.4
model = EVL(
backbone='vit_l14_336',
n_layers=4,
n_dim=1024,
n_head=16,
mlp_factor=4.0,
drop_path_rate=0.,
mlp_dropout=[0.5, 0.5, 0.5, 0.5],
cls_dropout=0.5,
t_size=32,
use_t_conv=True,
use_t_pos_embed=True,
num_classes=400
)
if pretrained:
pretrained_path = os.path.join(PATH_PREFIX, 'vit_l336_sparse32.pyth')
print(f'lodel model from: {pretrained_path}')
state_dict = torch.load(pretrained_path, map_location='cpu')
model.load_state_dict(state_dict)
return model
if __name__ == '__main__':
model = vit_l_sparse16()
cal_flops(model, frame=16, size=224)
# model = vit_l_sparse32()
# cal_flops(model, frame=32, size=224)
# model = vit_l336_sparse32()
# cal_flops(model, frame=32, size=336) | InternVideo-main | Downstream/Video-Text-Retrieval/modules/clip_kc_new/model_freeze.py |
import os
import time
import torch
import torch.nn as nn
from fvcore.nn import FlopCountAnalysis
from fvcore.nn import flop_count_table
import evl_utils
from evl_utils import TransformerDecoder
PATH_PREFIX = '/mnt/lustre/share_data/likunchang.vendor/code/EVL/clip_kc/model'
class EVL(nn.Module):
def __init__(self,
backbone='vit_l14_336',
n_layers=4,
n_dim=1024,
n_head=16,
mlp_factor=4.0,
drop_path_rate=0.,
mlp_dropout=[0.5, 0.5, 0.5, 0.5],
cls_dropout=0.5,
t_size=32,
use_t_conv=True,
use_t_pos_embed=True,
num_classes=400,
add_residual=False,
):
super().__init__()
# pre-trained from CLIP
self.backbone = evl_utils.__dict__[backbone](pretrained=False)
self.evl = TransformerDecoder(
n_layers=n_layers, n_dim=n_dim, n_head=n_head,
mlp_factor=mlp_factor, drop_path_rate=drop_path_rate,
mlp_dropout=mlp_dropout, cls_dropout=cls_dropout, t_size=t_size,
use_t_conv=use_t_conv, use_t_pos_embed=use_t_pos_embed,
num_classes=num_classes, add_residual=add_residual
)
self.return_num = n_layers
def forward(self, x):
features = self.backbone(x, return_num=self.return_num)
output = self.evl(features)
return output
def cal_flops(model, frame=8, size=224):
flops = FlopCountAnalysis(model, torch.rand(1, 3, frame, size, size))
s = time.time()
print(flop_count_table(flops, max_depth=1))
print(time.time()-s)
def vit_b_sparse8(pretrained=True):
# 8x224x224
# k400 1x1: 82.4
model = EVL(
backbone='vit_b16',
n_layers=4,
n_dim=768,
n_head=12,
mlp_factor=4.0,
drop_path_rate=0.,
mlp_dropout=[0.5, 0.5, 0.5, 0.5],
cls_dropout=0.5,
t_size=8,
use_t_conv=True,
use_t_pos_embed=True,
num_classes=400,
add_residual=True,
)
if pretrained:
pretrained_path = os.path.join(PATH_PREFIX, 'vit_b_sparse8.pyth')
print(f'lodel model from: {pretrained_path}')
state_dict = torch.load(pretrained_path, map_location='cpu')
model.load_state_dict(state_dict)
return model
def vit_2plus1d_b_sparse8(pretrained=True):
# 8x224x224
# k400 1x1: 82.5
model = EVL(
backbone='vit_2plus1d_b16',
n_layers=4,
n_dim=768,
n_head=12,
mlp_factor=4.0,
drop_path_rate=0.,
mlp_dropout=[0.5, 0.5, 0.5, 0.5],
cls_dropout=0.5,
t_size=8,
use_t_conv=True,
use_t_pos_embed=True,
num_classes=400,
add_residual=True,
)
if pretrained:
pretrained_path = os.path.join(PATH_PREFIX, 'vit_2plus1d_b_sparse8.pyth')
print(f'lodel model from: {pretrained_path}')
state_dict = torch.load(pretrained_path, map_location='cpu')
model.load_state_dict(state_dict)
return model
if __name__ == '__main__':
# model = vit_b_sparse8()
# cal_flops(model, frame=8, size=224)
model = vit_2plus1d_b_sparse8()
cal_flops(model, frame=8, size=224) | InternVideo-main | Downstream/Video-Text-Retrieval/modules/clip_kc_new/model_no_freeze.py |
import hashlib
import os
import urllib
import warnings
from typing import Any, Union, List
from pkg_resources import packaging
import torch
from PIL import Image
from torchvision.transforms import Compose, Resize, CenterCrop, ToTensor, Normalize
from tqdm import tqdm
from .model import build_model
from .simple_tokenizer import SimpleTokenizer as _Tokenizer
try:
from torchvision.transforms import InterpolationMode
BICUBIC = InterpolationMode.BICUBIC
except ImportError:
BICUBIC = Image.BICUBIC
if packaging.version.parse(torch.__version__) < packaging.version.parse("1.7.1"):
warnings.warn("PyTorch version 1.7.1 or higher is recommended")
__all__ = ["available_models", "load", "tokenize"]
_tokenizer = _Tokenizer()
_MODELS = {
"ViT-B/32": "https://openaipublic.azureedge.net/clip/models/40d365715913c9da98579312b702a82c18be219cc2a73407c4526f58eba950af/ViT-B-32.pt",
"ViT-B/16": "https://openaipublic.azureedge.net/clip/models/5806e77cd80f8b59890b7e101eabd078d9fb84e6937f9e85e4ecb61988df416f/ViT-B-16.pt",
"ViT-L/14": "https://openaipublic.azureedge.net/clip/models/b8cca3fd41ae0c99ba7e8951adf17d267cdb84cd88be6f7c2e0eca1737a03836/ViT-L-14.pt",
}
def _download(url: str, root: str):
os.makedirs(root, exist_ok=True)
filename = os.path.basename(url)
expected_sha256 = url.split("/")[-2]
download_target = os.path.join(root, filename)
if os.path.exists(download_target) and not os.path.isfile(download_target):
raise RuntimeError(f"{download_target} exists and is not a regular file")
if os.path.isfile(download_target):
if hashlib.sha256(open(download_target, "rb").read()).hexdigest() == expected_sha256:
return download_target
else:
warnings.warn(f"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file")
with urllib.request.urlopen(url) as source, open(download_target, "wb") as output:
with tqdm(total=int(source.info().get("Content-Length")), ncols=80, unit='iB', unit_scale=True, unit_divisor=1024) as loop:
while True:
buffer = source.read(8192)
if not buffer:
break
output.write(buffer)
loop.update(len(buffer))
if hashlib.sha256(open(download_target, "rb").read()).hexdigest() != expected_sha256:
raise RuntimeError(f"Model has been downloaded but the SHA256 checksum does not not match")
return download_target
def _convert_image_to_rgb(image):
return image.convert("RGB")
def _transform(n_px):
return Compose([
Resize(n_px, interpolation=BICUBIC),
CenterCrop(n_px),
_convert_image_to_rgb,
ToTensor(),
Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)),
])
def available_models() -> List[str]:
"""Returns the names of available CLIP models"""
return list(_MODELS.keys())
def load(
name: str,
device: Union[str, torch.device] = "cuda" if torch.cuda.is_available() else "cpu",
jit: bool = False, download_root: str = None,
# evl
n_layers=12, n_dim=768, n_head=12, mlp_factor=4.0, drop_path_rate=0.,
mlp_dropout=[0.0, 0.0, 0.0, 0.0], cls_dropout=0.5, t_size=8, spatial_size=14,
use_t_conv=True, use_image_attnmap=True, use_t_pos_embed=True, dropout=0.0, no_pretrain=False, init_zero=True, mergeclip=False, mergeweight=0.5, use_capdecoder=False, clip_state_dict=None,
):
"""Load a CLIP model
Parameters
----------
name : str
A model name listed by `clip.available_models()`, or the path to a model checkpoint containing the state_dict
device : Union[str, torch.device]
The device to put the loaded model
jit : bool
Whether to load the optimized JIT model or more hackable non-JIT model (default).
download_root: str
path to download the model files; by default, it uses "~/.cache/clip"
Returns
-------
model : torch.nn.Module
The CLIP model
preprocess : Callable[[PIL.Image], torch.Tensor]
A torchvision transform that converts a PIL image into a tensor that the returned model can take as its input
"""
if name in _MODELS:
model_path = _download(_MODELS[name], download_root or os.path.expanduser("~/.cache/clip"))
elif os.path.isfile(name):
model_path = name
else:
raise RuntimeError(f"Model {name} not found; available models = {available_models()}")
'''
with open(model_path, 'rb') as opened_file:
try:
# loading JIT archive
model = torch.jit.load(opened_file, map_location=device if jit else "cpu").eval()
state_dict = None
except RuntimeError:
# loading saved state dict
if jit:
warnings.warn(f"File {model_path} is not a JIT archive. Loading as a state dict instead")
jit = False
state_dict = torch.load(model_path, map_location="cpu")
'''
init_state_dict = torch.load(model_path, map_location='cpu')['state_dict']
state_dict = {}
for k, v in init_state_dict.items():
k = k.replace('clip.','')
state_dict[k] = v
if not jit:
model = build_model(
state_dict or model.state_dict(),
n_layers=n_layers, n_dim=n_dim, n_head=n_head, mlp_factor=mlp_factor, drop_path_rate=drop_path_rate,
mlp_dropout=mlp_dropout, cls_dropout=cls_dropout, t_size=t_size, spatial_size=spatial_size,
use_t_conv=use_t_conv, use_image_attnmap=use_image_attnmap, use_t_pos_embed=use_t_pos_embed, no_pretrain=no_pretrain,
init_zero=init_zero, mergeclip=mergeclip, mergeweight=mergeweight, use_capdecoder=use_capdecoder, clip_state_dict=clip_state_dict,
).to(device)
if str(device) == "cpu":
model.float()
return model, _transform(model.visual.input_resolution)
# patch the device names
device_holder = torch.jit.trace(lambda: torch.ones([]).to(torch.device(device)), example_inputs=[])
device_node = [n for n in device_holder.graph.findAllNodes("prim::Constant") if "Device" in repr(n)][-1]
def patch_device(module):
try:
graphs = [module.graph] if hasattr(module, "graph") else []
except RuntimeError:
graphs = []
if hasattr(module, "forward1"):
graphs.append(module.forward1.graph)
for graph in graphs:
for node in graph.findAllNodes("prim::Constant"):
if "value" in node.attributeNames() and str(node["value"]).startswith("cuda"):
node.copyAttributes(device_node)
model.apply(patch_device)
patch_device(model.encode_image)
patch_device(model.encode_text)
# patch dtype to float32 on CPU
if str(device) == "cpu":
float_holder = torch.jit.trace(lambda: torch.ones([]).float(), example_inputs=[])
float_input = list(float_holder.graph.findNode("aten::to").inputs())[1]
float_node = float_input.node()
def patch_float(module):
try:
graphs = [module.graph] if hasattr(module, "graph") else []
except RuntimeError:
graphs = []
if hasattr(module, "forward1"):
graphs.append(module.forward1.graph)
for graph in graphs:
for node in graph.findAllNodes("aten::to"):
inputs = list(node.inputs())
for i in [1, 2]: # dtype can be the second or third argument to aten::to()
if inputs[i].node()["value"] == 5:
inputs[i].node().copyAttributes(float_node)
model.apply(patch_float)
patch_float(model.encode_image)
patch_float(model.encode_text)
model.float()
return model, _transform(model.input_resolution.item())
def tokenize(texts: Union[str, List[str]], context_length: int = 77, truncate: bool = False, return_special_tokens_mask: bool = False) -> Union[torch.IntTensor, torch.LongTensor, torch.BoolTensor]:
"""
Returns the tokenized representation of given input string(s)
Parameters
----------
texts : Union[str, List[str]]
An input string or a list of input strings to tokenize
context_length : int
The context length to use; all CLIP models use 77 as the context length
truncate: bool
Whether to truncate the text in case its encoding is longer than the context length
Returns
-------
A two-dimensional tensor containing the resulting tokens, shape = [number of input strings, context_length].
We return LongTensor when torch version is <1.8.0, since older index_select requires indices to be long.
"""
if isinstance(texts, str):
texts = [texts]
sot_token = _tokenizer.encoder["<|startoftext|>"]
eot_token = _tokenizer.encoder["<|endoftext|>"]
all_tokens = [[sot_token] + _tokenizer.encode(text) + [eot_token] for text in texts]
if packaging.version.parse(torch.__version__) < packaging.version.parse("1.8.0"):
result = torch.zeros(len(all_tokens), context_length, dtype=torch.long)
else:
result = torch.zeros(len(all_tokens), context_length, dtype=torch.int)
special_tokens_mask = torch.zeros(len(all_tokens), context_length, dtype=torch.bool)
for i, tokens in enumerate(all_tokens):
if len(tokens) > context_length:
if truncate:
tokens = tokens[:context_length]
tokens[-1] = eot_token
else:
raise RuntimeError(f"Input {texts[i]} is too long for context length {context_length}")
result[i, :len(tokens)] = torch.tensor(tokens)
special_tokens_mask[i, len(tokens):] = 1
if return_special_tokens_mask:
return result, special_tokens_mask
return result
| InternVideo-main | Downstream/Video-Text-Retrieval/modules/clip_kc_new/clip.py |
import gzip
import html
import os
from functools import lru_cache
import ftfy
import regex as re
@lru_cache()
def default_bpe():
return os.path.join(os.path.dirname(os.path.abspath(__file__)), "bpe_simple_vocab_16e6.txt.gz")
@lru_cache()
def bytes_to_unicode():
"""
Returns list of utf-8 byte and a corresponding list of unicode strings.
The reversible bpe codes work on unicode strings.
This means you need a large # of unicode characters in your vocab if you want to avoid UNKs.
When you're at something like a 10B token dataset you end up needing around 5K for decent coverage.
This is a signficant percentage of your normal, say, 32K bpe vocab.
To avoid that, we want lookup tables between utf-8 bytes and unicode strings.
And avoids mapping to whitespace/control characters the bpe code barfs on.
"""
bs = list(range(ord("!"), ord("~")+1))+list(range(ord("¡"), ord("¬")+1))+list(range(ord("®"), ord("ÿ")+1))
cs = bs[:]
n = 0
for b in range(2**8):
if b not in bs:
bs.append(b)
cs.append(2**8+n)
n += 1
cs = [chr(n) for n in cs]
return dict(zip(bs, cs))
def get_pairs(word):
"""Return set of symbol pairs in a word.
Word is represented as tuple of symbols (symbols being variable-length strings).
"""
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
return pairs
def basic_clean(text):
text = ftfy.fix_text(text)
text = html.unescape(html.unescape(text))
return text.strip()
def whitespace_clean(text):
text = re.sub(r'\s+', ' ', text)
text = text.strip()
return text
class SimpleTokenizer(object):
def __init__(self, bpe_path: str = default_bpe()):
self.byte_encoder = bytes_to_unicode()
self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
merges = gzip.open(bpe_path).read().decode("utf-8").split('\n')
merges = merges[1:49152-256-2+1]
merges = [tuple(merge.split()) for merge in merges]
vocab = list(bytes_to_unicode().values())
vocab = vocab + [v+'</w>' for v in vocab]
for merge in merges:
vocab.append(''.join(merge))
vocab.extend(['<|startoftext|>', '<|endoftext|>'])
self.encoder = dict(zip(vocab, range(len(vocab))))
self.decoder = {v: k for k, v in self.encoder.items()}
self.bpe_ranks = dict(zip(merges, range(len(merges))))
self.cache = {'<|startoftext|>': '<|startoftext|>', '<|endoftext|>': '<|endoftext|>'}
self.pat = re.compile(r"""<\|startoftext\|>|<\|endoftext\|>|'s|'t|'re|'ve|'m|'ll|'d|[\p{L}]+|[\p{N}]|[^\s\p{L}\p{N}]+""", re.IGNORECASE)
def bpe(self, token):
if token in self.cache:
return self.cache[token]
word = tuple(token[:-1]) + ( token[-1] + '</w>',)
pairs = get_pairs(word)
if not pairs:
return token+'</w>'
while True:
bigram = min(pairs, key = lambda pair: self.bpe_ranks.get(pair, float('inf')))
if bigram not in self.bpe_ranks:
break
first, second = bigram
new_word = []
i = 0
while i < len(word):
try:
j = word.index(first, i)
new_word.extend(word[i:j])
i = j
except:
new_word.extend(word[i:])
break
if word[i] == first and i < len(word)-1 and word[i+1] == second:
new_word.append(first+second)
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if len(word) == 1:
break
else:
pairs = get_pairs(word)
word = ' '.join(word)
self.cache[token] = word
return word
def encode(self, text):
bpe_tokens = []
text = whitespace_clean(basic_clean(text)).lower()
for token in re.findall(self.pat, text):
token = ''.join(self.byte_encoder[b] for b in token.encode('utf-8'))
bpe_tokens.extend(self.encoder[bpe_token] for bpe_token in self.bpe(token).split(' '))
return bpe_tokens
def decode(self, tokens):
text = ''.join([self.decoder[token] for token in tokens])
text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors="replace").replace('</w>', ' ')
return text
| InternVideo-main | Downstream/Video-Text-Retrieval/modules/clip_kc_new/simple_tokenizer.py |
#!/usr/bin/env python
import warnings
from typing import Tuple, Optional
import torch
from torch import Tensor
from torch.nn.modules.linear import Linear
from torch.nn.init import xavier_uniform_
from torch.nn.init import constant_
from torch.nn.init import xavier_normal_
from torch.nn.parameter import Parameter
from torch.nn.modules.module import Module
from .attention_module import multi_head_attention_forward
class _LinearWithBias(Linear):
bias: Tensor
def __init__(self, in_features: int, out_features: int) -> None:
super().__init__(in_features, out_features, bias=True)
class MultiheadAttention(Module):
r"""Allows the model to jointly attend to information
from different representation subspaces.
See reference: Attention Is All You Need
.. math::
\text{MultiHead}(Q, K, V) = \text{Concat}(head_1,\dots,head_h)W^O
\text{where} head_i = \text{Attention}(QW_i^Q, KW_i^K, VW_i^V)
Args:
embed_dim: total dimension of the model.
num_heads: parallel attention heads.
dropout: a Dropout layer on attn_output_weights. Default: 0.0.
bias: add bias as module parameter. Default: True.
add_bias_kv: add bias to the key and value sequences at dim=0.
add_zero_attn: add a new batch of zeros to the key and
value sequences at dim=1.
kdim: total number of features in key. Default: None.
vdim: total number of features in value. Default: None.
Note: if kdim and vdim are None, they will be set to embed_dim such that
query, key, and value have the same number of features.
Examples::
>>> multihead_attn = nn.MultiheadAttention(embed_dim, num_heads)
>>> attn_output, attn_output_weights = multihead_attn(query, key, value)
"""
bias_k: Optional[torch.Tensor]
bias_v: Optional[torch.Tensor]
def __init__(self, embed_dim, num_heads, dropout=0., bias=True, add_bias_kv=False, add_zero_attn=False, kdim=None, vdim=None):
super(MultiheadAttention, self).__init__()
self.embed_dim = embed_dim
self.kdim = kdim if kdim is not None else embed_dim
self.vdim = vdim if vdim is not None else embed_dim
self._qkv_same_embed_dim = self.kdim == embed_dim and self.vdim == embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads"
if self._qkv_same_embed_dim is False:
self.q_proj_weight = Parameter(torch.Tensor(embed_dim, embed_dim))
self.k_proj_weight = Parameter(torch.Tensor(embed_dim, self.kdim))
self.v_proj_weight = Parameter(torch.Tensor(embed_dim, self.vdim))
self.register_parameter('in_proj_weight', None)
else:
self.in_proj_weight = Parameter(torch.empty(3 * embed_dim, embed_dim))
self.register_parameter('q_proj_weight', None)
self.register_parameter('k_proj_weight', None)
self.register_parameter('v_proj_weight', None)
if bias:
self.in_proj_bias = Parameter(torch.empty(3 * embed_dim))
else:
self.register_parameter('in_proj_bias', None)
self.out_proj = _LinearWithBias(embed_dim, embed_dim)
if add_bias_kv:
self.bias_k = Parameter(torch.empty(1, 1, embed_dim))
self.bias_v = Parameter(torch.empty(1, 1, embed_dim))
else:
self.bias_k = self.bias_v = None
self.add_zero_attn = add_zero_attn
self._reset_parameters()
def _reset_parameters(self):
if self._qkv_same_embed_dim:
xavier_uniform_(self.in_proj_weight)
else:
xavier_uniform_(self.q_proj_weight)
xavier_uniform_(self.k_proj_weight)
xavier_uniform_(self.v_proj_weight)
if self.in_proj_bias is not None:
constant_(self.in_proj_bias, 0.)
constant_(self.out_proj.bias, 0.)
if self.bias_k is not None:
xavier_normal_(self.bias_k)
if self.bias_v is not None:
xavier_normal_(self.bias_v)
def __setstate__(self, state):
# Support loading old MultiheadAttention checkpoints generated by v1.1.0
if '_qkv_same_embed_dim' not in state:
state['_qkv_same_embed_dim'] = True
super(MultiheadAttention, self).__setstate__(state)
def forward(self, query, key, value, key_padding_mask=None,
need_weights=True, attn_mask=None, return_qk=False):
# type: (Tensor, Tensor, Tensor, Optional[Tensor], bool, Optional[Tensor], bool) -> Tuple[Tensor, Optional[Tensor]]
r"""
Args:
query, key, value: map a query and a set of key-value pairs to an output.
See "Attention Is All You Need" for more details.
key_padding_mask: if provided, specified padding elements in the key will
be ignored by the attention. When given a binary mask and a value is True,
the corresponding value on the attention layer will be ignored. When given
a byte mask and a value is non-zero, the corresponding value on the attention
layer will be ignored
need_weights: output attn_output_weights.
attn_mask: 2D or 3D mask that prevents attention to certain positions. A 2D mask will be broadcasted for all
the batches while a 3D mask allows to specify a different mask for the entries of each batch.
Shape:
- Inputs:
- query: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is
the embedding dimension.
- key: :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- value: :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- key_padding_mask: :math:`(N, S)` where N is the batch size, S is the source sequence length.
If a ByteTensor is provided, the non-zero positions will be ignored while the position
with the zero positions will be unchanged. If a BoolTensor is provided, the positions with the
value of ``True`` will be ignored while the position with the value of ``False`` will be unchanged.
- attn_mask: 2D mask :math:`(L, S)` where L is the target sequence length, S is the source sequence length.
3D mask :math:`(N*num_heads, L, S)` where N is the batch size, L is the target sequence length,
S is the source sequence length. attn_mask ensure that position i is allowed to attend the unmasked
positions. If a ByteTensor is provided, the non-zero positions are not allowed to attend
while the zero positions will be unchanged. If a BoolTensor is provided, positions with ``True``
is not allowed to attend while ``False`` values will be unchanged. If a FloatTensor
is provided, it will be added to the attention weight.
- return_qk: whether return Q and K.
- Outputs:
- attn_output: :math:`(L, N, E)` where L is the target sequence length, N is the batch size,
E is the embedding dimension.
- attn_output_weights: :math:`(N, L, S)` where N is the batch size,
L is the target sequence length, S is the source sequence length.
"""
if return_qk:
if not self._qkv_same_embed_dim:
q, k, attn_output, attn_output_weights = multi_head_attention_forward(
query, key, value, self.embed_dim, self.num_heads,
self.in_proj_weight, self.in_proj_bias,
self.bias_k, self.bias_v, self.add_zero_attn,
self.dropout, self.out_proj.weight, self.out_proj.bias,
training=self.training,
key_padding_mask=key_padding_mask, need_weights=need_weights,
attn_mask=attn_mask, use_separate_proj_weight=True,
q_proj_weight=self.q_proj_weight, k_proj_weight=self.k_proj_weight,
v_proj_weight=self.v_proj_weight, return_qk=True)
else:
q, k, attn_output, attn_output_weights = multi_head_attention_forward(
query, key, value, self.embed_dim, self.num_heads,
self.in_proj_weight, self.in_proj_bias,
self.bias_k, self.bias_v, self.add_zero_attn,
self.dropout, self.out_proj.weight, self.out_proj.bias,
training=self.training,
key_padding_mask=key_padding_mask, need_weights=need_weights,
attn_mask=attn_mask, return_qk=True)
return q, k, attn_output, attn_output_weights
else:
if not self._qkv_same_embed_dim:
return multi_head_attention_forward(
query, key, value, self.embed_dim, self.num_heads,
self.in_proj_weight, self.in_proj_bias,
self.bias_k, self.bias_v, self.add_zero_attn,
self.dropout, self.out_proj.weight, self.out_proj.bias,
training=self.training,
key_padding_mask=key_padding_mask, need_weights=need_weights,
attn_mask=attn_mask, use_separate_proj_weight=True,
q_proj_weight=self.q_proj_weight, k_proj_weight=self.k_proj_weight,
v_proj_weight=self.v_proj_weight)
else:
return multi_head_attention_forward(
query, key, value, self.embed_dim, self.num_heads,
self.in_proj_weight, self.in_proj_bias,
self.bias_k, self.bias_v, self.add_zero_attn,
self.dropout, self.out_proj.weight, self.out_proj.bias,
training=self.training,
key_padding_mask=key_padding_mask, need_weights=need_weights,
attn_mask=attn_mask)
| InternVideo-main | Downstream/Video-Text-Retrieval/modules/clip_kc_new/evl_utils/attention.py |
#!/usr/bin/env python
from collections import OrderedDict
from timm.models.layers import trunc_normal_, DropPath
import torch
import torch.nn as nn
import torch.nn.functional as F
class QuickGELU(nn.Module):
def forward(self, x: torch.Tensor):
return x * torch.sigmoid(1.702 * x)
def conv_1x1x1(inp, oup, groups=1):
return nn.Conv3d(inp, oup, (1, 1, 1), (1, 1, 1), (0, 0, 0), groups=groups)
def conv_3x3x3(inp, oup, groups=1):
return nn.Conv3d(inp, oup, (3, 3, 3), (1, 1, 1), (1, 1, 1), groups=groups)
def conv_1x3x3(inp, oup, groups=1):
return nn.Conv3d(inp, oup, (1, 3, 3), (1, 1, 1), (0, 1, 1), groups=groups)
def bn_3d(dim):
return nn.BatchNorm3d(dim)
class STM(nn.Module):
def __init__(self, n_dim, reduction=4):
super(STM, self).__init__()
reduced_c = n_dim // reduction
self.reduce = nn.Sequential(
nn.Conv2d(n_dim, reduced_c, kernel_size=1, bias=False),
nn.BatchNorm2d(reduced_c)
)
self.shift = nn.Conv2d(reduced_c, reduced_c, kernel_size=3, padding=1, groups=reduced_c, bias=False)
self.recover = nn.Sequential(
nn.Conv2d(reduced_c, n_dim, kernel_size=1, bias=False),
nn.BatchNorm2d(n_dim)
)
self.pad = (0, 0, 0, 0, 0, 0, 0, 1)
def forward(self, x):
# x: [L, N, T, C]
cls_token, x = x[:1], x[1:]
L, N, T, C = x.shape
H = W = int(L**0.5)
fea = x.permute(1, 2, 3, 0).reshape(N*T, C, H, W)
bottleneck = self.reduce(fea) # NT, C//r, H, W
# t feature
reshape_bottleneck = bottleneck.view((-1, T) + bottleneck.size()[1:]) # N, T, C//r, H, W
t_fea, __ = reshape_bottleneck.split([T-1, 1], dim=1) # N, T-1, C//r, H, W
# apply transformation conv to t+1 feature
conv_bottleneck = self.shift(bottleneck) # NT, C//r, H, W
# reshape fea: N, T, C//r, H, W
reshape_conv_bottleneck = conv_bottleneck.view((-1, T) + conv_bottleneck.size()[1:])
__, tPlusone_fea = reshape_conv_bottleneck.split([1, T-1], dim=1) # N, T-1, C//r, H, W
# motion fea = t+1_fea - t_fea
# pad the last timestamp
diff_fea = tPlusone_fea - t_fea # N, T-1, C//r, H, W
# pad = (0,0,0,0,0,0,0,1)
diff_fea_pluszero = F.pad(diff_fea, self.pad, mode="constant", value=0) # N, T, C//r, H, W
diff_fea_pluszero = diff_fea_pluszero.view((-1,) + diff_fea_pluszero.size()[2:]) # NT, C//r, H, W
y = self.recover(diff_fea_pluszero) # NT, C, H, W
# reshape
y = y.reshape(N, T, C, L).permute(3, 0, 1, 2)
y = torch.cat([cls_token, y], dim=0)
return y
class DSTM(nn.Module):
def __init__(self, n_dim, reduction=4):
super(DSTM, self).__init__()
reduced_c = n_dim // reduction
self.reduce = nn.Sequential(
nn.Conv2d(n_dim, reduced_c, kernel_size=1, bias=False),
nn.BatchNorm2d(reduced_c)
)
# DW(T+1) - T
self.shift_pre = nn.Conv2d(reduced_c, reduced_c, kernel_size=3, padding=1, groups=reduced_c, bias=False)
self.recover_pre = nn.Sequential(
nn.Conv2d(reduced_c, n_dim, kernel_size=1, bias=False),
nn.BatchNorm2d(n_dim)
)
self.pad_pre = (0, 0, 0, 0, 0, 0, 0, 1)
# DW(T-1) - T
self.shift_back = nn.Conv2d(reduced_c, reduced_c, kernel_size=3, padding=1, groups=reduced_c, bias=False)
self.recover_back = nn.Sequential(
nn.Conv2d(reduced_c, n_dim, kernel_size=1, bias=False),
nn.BatchNorm2d(n_dim)
)
self.pad_back = (0, 0, 0, 0, 0, 0, 0, 1)
def forward(self, x):
# x: [L, N, T, C]
cls_token, x = x[:1], x[1:]
L, N, T, C = x.shape
H = W = int(L**0.5)
fea = x.permute(1, 2, 3, 0).reshape(N*T, C, H, W)
bottleneck = self.reduce(fea) # NT, C//r, H, W
# t feature
reshape_bottleneck = bottleneck.view((-1, T) + bottleneck.size()[1:]) # N, T, C//r, H, W
pre_t_fea, __ = reshape_bottleneck.split([T-1, 1], dim=1) # N, T-1, C//r, H, W
back_t_fea, __ = reshape_bottleneck.split([1, T-1], dim=1) # N, T-1, C//r, H, W
# apply transformation conv to t+1/t-1 feature
pre_conv_bottleneck = self.shift_pre(bottleneck) # NT, C//r, H, W
back_conv_bottleneck = self.shift_back(bottleneck) # NT, C//r, H, W
# reshape fea: N, T, C//r, H, W
pre_reshape_conv_bottleneck = pre_conv_bottleneck.view((-1, T) + pre_conv_bottleneck.size()[1:])
back_reshape_conv_bottleneck = back_conv_bottleneck.view((-1, T) + back_conv_bottleneck.size()[1:])
__, tPlusone_fea = pre_reshape_conv_bottleneck.split([1, T-1], dim=1) # N, T-1, C//r, H, W
tMinusone_fea, _ = back_reshape_conv_bottleneck.split([T-1, 1], dim=1) # N, T-1, C//r, H, W
# pre_fea = t+1_fea - t_fea
# back_fea = t-1_fea - t_fea
pre_diff_fea = tPlusone_fea - pre_t_fea # N, T-1, C//r, H, W
back_diff_fea = tMinusone_fea - back_t_fea # N, T-1, C//r, H, W
# pad the last/first timestamp
pre_diff_fea_pluszero = F.pad(pre_diff_fea, self.pad_pre, mode="constant", value=0) # N, T, C//r, H, W
pre_diff_fea_pluszero = pre_diff_fea_pluszero.view((-1,) + pre_diff_fea_pluszero.size()[2:]) # NT, C//r, H, W
back_diff_fea_pluszero = F.pad(back_diff_fea, self.pad_back, mode="constant", value=0) # N, T, C//r, H, W
back_diff_fea_pluszero = back_diff_fea_pluszero.view((-1,) + back_diff_fea_pluszero.size()[2:]) # NT, C//r, H, W
# recover channel
pre_y = self.recover_pre(pre_diff_fea_pluszero) # NT, C, H, W
back_y = self.recover_back(back_diff_fea_pluszero) # NT, C, H, W
# reshape
y = (pre_y + back_y).reshape(N, T, C, L).permute(3, 0, 1, 2)
# cat cls_token
y = torch.cat([cls_token, y], dim=0)
return y
class TDN(nn.Module):
def __init__(self, channel, n_segment=8, index=1, reduction=4):
super(TDN, self).__init__()
self.channel = channel
self.reduction = reduction
self.n_segment = n_segment
self.stride = 2**(index-1)
self.conv1 = nn.Conv2d(in_channels=self.channel,
out_channels=self.channel//self.reduction,
kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(num_features=self.channel//self.reduction)
self.conv2 = nn.Conv2d(in_channels=self.channel//self.reduction,
out_channels=self.channel//self.reduction,
kernel_size=3, padding=1, groups=self.channel//self.reduction, bias=False)
self.avg_pool_forward2 = nn.AvgPool2d(kernel_size=2, stride=2)
self.avg_pool_forward4 = nn.AvgPool2d(kernel_size=4, stride=4)
self.sigmoid_forward = nn.Sigmoid()
self.avg_pool_backward2 = nn.AvgPool2d(kernel_size=2, stride=2)#nn.AdaptiveMaxPool2d(1)
self.avg_pool_backward4 = nn.AvgPool2d(kernel_size=4, stride=4)
self.sigmoid_backward = nn.Sigmoid()
self.pad1_forward = (0, 0, 0, 0, 0, 0, 0, 1)
self.pad1_backward = (0, 0, 0, 0, 0, 0, 1, 0)
self.conv3 = nn.Conv2d(in_channels=self.channel//self.reduction,
out_channels=self.channel, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(num_features=self.channel)
self.conv3_smallscale2 = nn.Conv2d(in_channels=self.channel//self.reduction,
out_channels=self.channel//self.reduction,padding=1, kernel_size=3, bias=False)
self.bn3_smallscale2 = nn.BatchNorm2d(num_features=self.channel//self.reduction)
self.conv3_smallscale4 = nn.Conv2d(in_channels = self.channel//self.reduction,
out_channels=self.channel//self.reduction,padding=1, kernel_size=3, bias=False)
self.bn3_smallscale4 = nn.BatchNorm2d(num_features=self.channel//self.reduction)
def spatial_pool(self, x):
nt, channel, height, width = x.size()
input_x = x
# [N, C, H * W]
input_x = input_x.view(nt, channel, height * width)
# [N, 1, C, H * W]
input_x = input_x.unsqueeze(1)
# [N, 1, H, W]
context_mask = self.conv_mask(x)
# [N, 1, H * W]
context_mask = context_mask.view(nt, 1, height * width)
# [N, 1, H * W]
context_mask = self.softmax(context_mask)
context_mask = context_mask.view(nt,1,height,width)
return context_mask
def forward(self, x):
# x: [L, N, T, C]
cls_token, x = x[:1], x[1:]
L, N, T, C = x.shape
H = W = int(L**0.5)
fea = x.permute(1, 2, 3, 0).reshape(N*T, C, H, W)
bottleneck = self.conv1(fea) # nt, c//r, h, w
bottleneck = self.bn1(bottleneck) # nt, c//r, h, w
reshape_bottleneck = bottleneck.view((-1, self.n_segment) + bottleneck.size()[1:]) # n, t, c//r, h, w
t_fea_forward, _ = reshape_bottleneck.split([self.n_segment -1, 1], dim=1) # n, t-1, c//r, h, w
_, t_fea_backward = reshape_bottleneck.split([1, self.n_segment -1], dim=1) # n, t-1, c//r, h, w
conv_bottleneck = self.conv2(bottleneck) # nt, c//r, h, w
reshape_conv_bottleneck = conv_bottleneck.view((-1, self.n_segment) + conv_bottleneck.size()[1:]) # n, t, c//r, h, w
_, tPlusone_fea_forward = reshape_conv_bottleneck.split([1, self.n_segment-1], dim=1) # n, t-1, c//r, h, w
tPlusone_fea_backward ,_ = reshape_conv_bottleneck.split([self.n_segment-1, 1], dim=1) # n, t-1, c//r, h, w
diff_fea_forward = tPlusone_fea_forward - t_fea_forward # n, t-1, c//r, h, w
diff_fea_backward = tPlusone_fea_backward - t_fea_backward# n, t-1, c//r, h, w
diff_fea_pluszero_forward = F.pad(diff_fea_forward, self.pad1_forward, mode="constant", value=0) # n, t, c//r, h, w
diff_fea_pluszero_forward = diff_fea_pluszero_forward.view((-1,) + diff_fea_pluszero_forward.size()[2:]) #nt, c//r, h, w
diff_fea_pluszero_backward = F.pad(diff_fea_backward, self.pad1_backward, mode="constant", value=0) # n, t, c//r, h, w
diff_fea_pluszero_backward = diff_fea_pluszero_backward.view((-1,) + diff_fea_pluszero_backward.size()[2:]) #nt, c//r, h, w
y_forward_smallscale2 = self.avg_pool_forward2(diff_fea_pluszero_forward) # nt, c//r, 1, 1
y_backward_smallscale2 = self.avg_pool_backward2(diff_fea_pluszero_backward) # nt, c//r, 1, 1
y_forward_smallscale4 = diff_fea_pluszero_forward
y_backward_smallscale4 = diff_fea_pluszero_backward
y_forward_smallscale2 = self.bn3_smallscale2(self.conv3_smallscale2(y_forward_smallscale2))
y_backward_smallscale2 = self.bn3_smallscale2(self.conv3_smallscale2(y_backward_smallscale2))
y_forward_smallscale4 = self.bn3_smallscale4(self.conv3_smallscale4(y_forward_smallscale4))
y_backward_smallscale4 = self.bn3_smallscale4(self.conv3_smallscale4(y_backward_smallscale4))
y_forward_smallscale2 = F.interpolate(y_forward_smallscale2, diff_fea_pluszero_forward.size()[2:])
y_backward_smallscale2 = F.interpolate(y_backward_smallscale2, diff_fea_pluszero_backward.size()[2:])
y_forward = self.bn3(self.conv3(1.0/3.0*diff_fea_pluszero_forward + 1.0/3.0*y_forward_smallscale2 + 1.0/3.0*y_forward_smallscale4))# nt, c, 1, 1
y_backward = self.bn3(self.conv3(1.0/3.0*diff_fea_pluszero_backward + 1.0/3.0*y_backward_smallscale2 + 1.0/3.0*y_backward_smallscale4)) # nt, c, 1, 1
y_forward = self.sigmoid_forward(y_forward) - 0.5
y_backward = self.sigmoid_backward(y_backward) - 0.5
y = 0.5 * y_forward + 0.5 * y_backward
attn = fea * y
x = x + attn.reshape(N, T, C, L).permute(3, 0, 1, 2)
x = torch.cat([cls_token, x], dim=0)
return x
class CMlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = conv_1x1x1(in_features, hidden_features)
self.act = act_layer()
self.fc2 = conv_1x1x1(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class CBlock(nn.Module):
def __init__(self, dim, mlp_ratio=4., dropout=0., drop_path=0., uni_type='3d', add_ffn=True):
super().__init__()
self.norm1 = bn_3d(dim)
self.conv1 = conv_1x1x1(dim, dim, 1)
self.conv2 = conv_1x1x1(dim, dim, 1)
if uni_type == '3d':
print('Use 3d conv for local MHRA')
self.attn = conv_3x3x3(dim, dim, groups=dim)
else:
print('Use 2d conv for local MHRA')
self.attn = conv_1x3x3(dim, dim, groups=dim)
# NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.add_ffn = add_ffn
if add_ffn:
print('Add FFN in local MHRA')
self.norm2 = bn_3d(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = CMlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=nn.GELU, drop=dropout)
print('Init zero')
nn.init.constant_(self.conv2.weight, 0.)
nn.init.constant_(self.conv2.bias, 0.)
if add_ffn:
nn.init.constant_(self.mlp.fc2.weight, 0.)
nn.init.constant_(self.mlp.fc2.bias, 0.)
def forward(self, x):
x = x + self.drop_path(self.conv2(self.attn(self.conv1(self.norm1(x)))))
if self.add_ffn:
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
class ResidualDecoderBlock(nn.Module):
def __init__(self, d_model: int, n_head: int, attn_mask: torch.Tensor = None,
mlp_factor: float = 4.0, dropout: float = 0.0, drop_path: float = 0.0):
super().__init__()
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
print(f'Drop path rate: {drop_path}')
self.attn = nn.MultiheadAttention(d_model, n_head)
self.ln_1 = nn.LayerNorm(d_model)
d_mlp = round(mlp_factor * d_model)
self.mlp = nn.Sequential(OrderedDict([
("c_fc", nn.Linear(d_model, d_mlp)),
("gelu", QuickGELU()),
("dropout", nn.Dropout(dropout)),
("c_proj", nn.Linear(d_mlp, d_model))
]))
self.ln_2 = nn.LayerNorm(d_model)
self.ln_3 = nn.LayerNorm(d_model)
self.attn_mask = attn_mask
nn.init.xavier_uniform_(self.attn.in_proj_weight)
# nn.init.xavier_uniform_(self.attn.out_proj.weight)
nn.init.constant_(self.attn.out_proj.weight, 0.)
nn.init.constant_(self.attn.out_proj.bias, 0.)
nn.init.xavier_uniform_(self.mlp[0].weight)
# nn.init.xavier_uniform_(self.mlp[-1].weight)
nn.init.constant_(self.mlp[-1].weight, 0.)
nn.init.constant_(self.mlp[-1].bias, 0.)
def attention(self, x: torch.Tensor, y: torch.Tensor):
#self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None
# return self.attn(x, y, y, need_weights=False, attn_mask=self.attn_mask)[0]
assert self.attn_mask is None # not implemented
# manual forward to add position information
d_model = self.ln_1.weight.size(0)
q = (x @ self.attn.in_proj_weight[:d_model].T) + self.attn.in_proj_bias[:d_model]
k = (y @ self.attn.in_proj_weight[d_model:-d_model].T) + self.attn.in_proj_bias[d_model:-d_model]
v = (y @ self.attn.in_proj_weight[-d_model:].T) + self.attn.in_proj_bias[-d_model:]
Tx, Ty, N = q.size(0), k.size(0), q.size(1)
q = q.view(Tx, N, self.attn.num_heads, self.attn.head_dim).permute(1, 2, 0, 3)
k = k.view(Ty, N, self.attn.num_heads, self.attn.head_dim).permute(1, 2, 0, 3)
v = v.view(Ty, N, self.attn.num_heads, self.attn.head_dim).permute(1, 2, 0, 3)
aff = (q @ k.transpose(-2, -1) / (self.attn.head_dim ** 0.5))
aff = aff.softmax(dim=-1)
out = aff @ v
out = out.permute(2, 0, 1, 3).flatten(2)
out = self.attn.out_proj(out)
return out
def forward(self, x: torch.Tensor, y: torch.Tensor):
x = x + self.drop_path(self.attention(self.ln_1(x), self.ln_3(y)))
x = x + self.drop_path(self.mlp(self.ln_2(x)))
return x
class TransformerDecoder_uniformer_diff_conv_balance(nn.Module):
def __init__(self, n_layers=4,
uni_layer=4, uni_type='3d', add_ffn=True, t_conv_type='1d', pre_prompt=True,
n_dim=768, n_head=12, mlp_factor=4.0, drop_path_rate=0.,
mlp_dropout=[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5], cls_dropout=0.5, t_size=8, spatial_size=14,
balance=0.,
use_t_conv=True, after_me=True, before_me=False, me_type='dstm', me_reduction=4,
use_t_pos_embed=True, num_classes=400):
super().__init__()
n_layers += uni_layer
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, n_layers)]
self.uni_layer = uni_layer
self.uni_dec = nn.ModuleList([
CBlock(n_dim, mlp_ratio=mlp_factor, dropout=mlp_dropout[i], drop_path=dpr[i], uni_type=uni_type, add_ffn=add_ffn)
for i in range(uni_layer)
])
self.dec = nn.ModuleList([
ResidualDecoderBlock(n_dim, n_head, mlp_factor=mlp_factor, dropout=mlp_dropout[i], drop_path=dpr[i])
for i in range(n_layers)
])
self.proj = nn.Sequential(
nn.LayerNorm(n_dim),
nn.Dropout(cls_dropout),
nn.Linear(n_dim, num_classes),
)
self.pre_prompt = pre_prompt
if pre_prompt:
print('Add pre prompt')
self.pre_temporal_cls_token = nn.Parameter(torch.zeros(n_dim))
self.temporal_cls_token = nn.Parameter(torch.zeros(n_dim))
if use_t_conv:
self.t_conv_type = t_conv_type
if t_conv_type == '1d':
print('Use 1d t_conv for CPE')
self.tconv = nn.ModuleList([
nn.Conv1d(n_dim, n_dim, kernel_size=3, stride=1, padding=1, bias=True, groups=n_dim)
for i in range(n_layers)
])
for m in self.tconv:
nn.init.constant_(m.bias, 0.)
m.weight.data[...] = torch.Tensor([0, 1, 0])
else:
print('Use 3d t_conv for CPE')
self.tconv = nn.ModuleList([
nn.Conv3d(n_dim, n_dim, kernel_size=3, stride=1, padding=1, bias=True, groups=n_dim)
for i in range(n_layers)
])
for m in self.tconv:
nn.init.constant_(m.bias, 0.)
else:
self.tconv = None
self.before_me = before_me
self.after_me = after_me
if before_me or after_me:
assert before_me != after_me
print(f'Use {me_type} attention, Before {before_me}, After {after_me}')
if me_type == 'stm':
me_op = STM
elif me_type == 'dstm':
me_op = DSTM
elif me_type == 'tdn':
me_op = TDN
self.me = nn.ModuleList([me_op(n_dim, reduction=me_reduction) for i in range(n_layers)])
if use_t_pos_embed:
self.pemb_t = nn.Parameter(torch.zeros([n_layers, t_size, n_dim]))
else:
self.pemb_t = None
print(F'Balnce weight {balance}')
self.balance = nn.Parameter(torch.ones((n_dim)) * balance)
self.sigmoid = nn.Sigmoid()
def forward(self, clip_feats_all, mode='video'):
# clip_feats_all = clip_feats_all[-len(self.dec):]
# only return n_layers features, save memory
clip_feats = [x for x in clip_feats_all]
if self.after_me:
origin_clip_feats = [x for x in clip_feats_all]
L, N, T, C = clip_feats[0].size()
x = self.temporal_cls_token.view(1, 1, -1).repeat(1, N, 1)
for i in range(len(clip_feats)):
if self.before_me:
# contain residual
clip_feats[i] = self.me[i](clip_feats[i])
if self.tconv is not None:
L, N, T, C = clip_feats[i].shape
if self.t_conv_type == '1d':
clip_feats[i] = clip_feats[i].permute(0, 1, 3, 2).flatten(0, 1) # L * N, C, T
clip_feats[i] = self.tconv[i](clip_feats[i]).permute(0, 2, 1).contiguous().view(L, N, T, C)
else:
H = W = int((L - 1) ** 0.5)
_, tmp_feats = clip_feats[i][:1], clip_feats[i][1:]
tmp_feats = tmp_feats.permute(1, 3, 2, 0).reshape(N, C, T, H, W)
tmp_feats = self.tconv[i](tmp_feats).view(N, C, T, L - 1).permute(3, 0, 2, 1)
clip_feats[i][1:] = clip_feats[i][1:] + tmp_feats
if self.pemb_t is not None and mode == 'video':
clip_feats[i] = clip_feats[i] + self.pemb_t[i]
if self.after_me:
clip_feats[i] = clip_feats[i] + self.me[i](origin_clip_feats[i])
if i < self.uni_layer:
# L, N, T, C
L, N, T, C = clip_feats[i].shape
H = W = int((L - 1) ** 0.5)
_, tmp_feats = clip_feats[i][:1], clip_feats[i][1:]
tmp_feats = tmp_feats.permute(1, 3, 2, 0).reshape(N, C, T, H, W)
tmp_feats = self.uni_dec[i](tmp_feats).view(N, C, T, L - 1).permute(3, 0, 2, 1)
clip_feats[i][1:] = clip_feats[i][1:] + tmp_feats
clip_feats[i] = clip_feats[i].permute(2, 0, 1, 3).flatten(0, 1) # T * L, N, C
if self.pre_prompt:
pre_x = self.pre_temporal_cls_token.view(1, 1, -1).repeat(1, N, 1)
for i in range(len(self.dec)):
if i < self.uni_layer:
pre_x = self.dec[i](pre_x, clip_feats[i])
elif i == self.uni_layer:
clip_feats[i] = torch.cat([pre_x, clip_feats[i]], dim=0)
x = self.dec[i](x, clip_feats[i])
else:
x = self.dec[i](x, clip_feats[i])
else:
for i in range(len(self.dec)):
x = self.dec[i](x, clip_feats[i])
# real residual
# L, N, T, C
residual = clip_feats_all[-1][0].mean(1)
weight = self.sigmoid(self.balance)
return self.proj((1 - weight) * x[0, :, :] + weight * residual)
if __name__ == '__main__':
model = TransformerDecoder_uniformer_diff_conv_balance()
# construct a fake input to demonstrate input tensor shape
L, N, T, C = 197, 1, 8, 768 # num_image_tokens, video_batch_size, t_size, feature_dim
# we use intermediate feature maps from multiple blocks, so input features should be a list
input_features = []
for i in range(8): # vit-b has 12 blocks
# every item in input_features contains features maps from a single block
# every item is a tuple containing 3 feature maps:
# (1) block output features (i.e. after mlp) with shape L, N, T, C
# (2) projected query features with shape L, N, T, C
# (3) projected key features with shape L, N, T, C
input_features.append(
tuple(torch.zeros([L, N, T, C]) for _ in range(3)))
# some small optimizations:
# (1) We only decode from the last $n$ blocks so it's good as long as the last $n$ items of input_features is valid and all previous items can be filled with None to save memory. By default $n=4$.
# (2) projected query/key features are optional. If you are using an uncompatible image backbone without query/key (e.g. CNN), you can fill the position with None (i.e. the tuple should be (Tensor, None, None) and set use_image_attnmap=False when constructing the model.
print(model)
print(model(input_features).shape) # should be N, 400
| InternVideo-main | Downstream/Video-Text-Retrieval/modules/clip_kc_new/evl_utils/evl_module_uniformer_diff_conv_balance.py |
#!/usr/bin/env python
import os
from collections import OrderedDict
from timm.models.layers import DropPath
import torch
from torch import nn
import torch.nn.functional as F
import torch.utils.checkpoint as checkpoint
from .attention import MultiheadAttention
import logging
logger = logging.getLogger(__name__)
MODEL_PATH = '/mnt/lustre/share_data/likunchang.vendor/model'
_MODELS = {
"ViT-B/32": os.path.join(MODEL_PATH, "vit_b32.pth"),
"ViT-B/16": os.path.join(MODEL_PATH, "vit_b16.pth"),
"ViT-L/14": os.path.join(MODEL_PATH, "vit_l14.pth"),
"ViT-L/14_336": os.path.join(MODEL_PATH, "vit_l14_336.pth"),
}
def conv_1x1x1(inp, oup, groups=1):
return nn.Conv3d(inp, oup, (1, 1, 1), (1, 1, 1), (0, 0, 0), groups=groups)
def conv_3x3x3(inp, oup, groups=1):
return nn.Conv3d(inp, oup, (3, 3, 3), (1, 1, 1), (1, 1, 1), groups=groups)
def conv_1x3x3(inp, oup, groups=1):
return nn.Conv3d(inp, oup, (1, 3, 3), (1, 1, 1), (0, 1, 1), groups=groups)
def bn_3d(dim):
return nn.BatchNorm3d(dim)
class LayerNorm(nn.LayerNorm):
"""Subclass torch's LayerNorm to handle fp16."""
def forward(self, x):
orig_type = x.dtype
ret = super().forward(x.type(torch.float32))
return ret.type(orig_type)
class QuickGELU(nn.Module):
def forward(self, x):
return x * torch.sigmoid(1.702 * x)
class ResidualAttentionBlock(nn.Module):
def __init__(
self, d_model, n_head, attn_mask=None, drop_path=0.0,
):
super().__init__()
self.n_head = n_head
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
logger.info(f'Drop path rate: {drop_path}')
# spatial
self.attn = MultiheadAttention(d_model, n_head)
self.ln_1 = LayerNorm(d_model)
self.mlp = nn.Sequential(OrderedDict([
("c_fc", nn.Linear(d_model, d_model * 4)),
("gelu", QuickGELU()),
("c_proj", nn.Linear(d_model * 4, d_model))
]))
self.ln_2 = LayerNorm(d_model)
self.attn_mask = attn_mask
def attention(self, x):
self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None
return self.attn(x, x, x, need_weights=False, attn_mask=self.attn_mask)[0]
def forward(self, x, T=8, use_checkpoint=False):
# x: 1+HW, NT, C
# MHSA
if use_checkpoint:
attn_out = checkpoint.checkpoint(self.attention, self.ln_1(x))
x = x + self.drop_path(attn_out)
else:
x = x + self.drop_path(self.attention(self.ln_1(x)))
# FFN
if use_checkpoint:
mlp_out = checkpoint.checkpoint(self.mlp, self.ln_2(x))
x = x + self.drop_path(mlp_out)
else:
x = x + self.drop_path(self.mlp(self.ln_2(x)))
return x
class Extractor(nn.Module):
def __init__(
self, d_model, n_head, attn_mask=None,
mlp_factor=4.0, dropout=0.0, drop_path=0.0,
):
super().__init__()
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
logger.info(f'Drop path rate: {drop_path}')
self.attn = nn.MultiheadAttention(d_model, n_head)
self.ln_1 = nn.LayerNorm(d_model)
d_mlp = round(mlp_factor * d_model)
self.mlp = nn.Sequential(OrderedDict([
("c_fc", nn.Linear(d_model, d_mlp)),
("gelu", QuickGELU()),
("dropout", nn.Dropout(dropout)),
("c_proj", nn.Linear(d_mlp, d_model))
]))
self.ln_2 = nn.LayerNorm(d_model)
self.ln_3 = nn.LayerNorm(d_model)
self.attn_mask = attn_mask
# zero init
nn.init.xavier_uniform_(self.attn.in_proj_weight)
nn.init.constant_(self.attn.out_proj.weight, 0.)
nn.init.constant_(self.attn.out_proj.bias, 0.)
nn.init.xavier_uniform_(self.mlp[0].weight)
nn.init.constant_(self.mlp[-1].weight, 0.)
nn.init.constant_(self.mlp[-1].bias, 0.)
def attention(self, x: torch.Tensor, y: torch.Tensor):
#self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None
# return self.attn(x, y, y, need_weights=False, attn_mask=self.attn_mask)[0]
assert self.attn_mask is None # not implemented
# manual forward to add position information
d_model = self.ln_1.weight.size(0)
q = (x @ self.attn.in_proj_weight[:d_model].T) + self.attn.in_proj_bias[:d_model]
k = (y @ self.attn.in_proj_weight[d_model:-d_model].T) + self.attn.in_proj_bias[d_model:-d_model]
v = (y @ self.attn.in_proj_weight[-d_model:].T) + self.attn.in_proj_bias[-d_model:]
Tx, Ty, N = q.size(0), k.size(0), q.size(1)
q = q.view(Tx, N, self.attn.num_heads, self.attn.head_dim).permute(1, 2, 0, 3)
k = k.view(Ty, N, self.attn.num_heads, self.attn.head_dim).permute(1, 2, 0, 3)
v = v.view(Ty, N, self.attn.num_heads, self.attn.head_dim).permute(1, 2, 0, 3)
aff = (q @ k.transpose(-2, -1) / (self.attn.head_dim ** 0.5))
aff = aff.softmax(dim=-1)
out = aff @ v
out = out.permute(2, 0, 1, 3).flatten(2)
out = self.attn.out_proj(out)
return out
def forward(self, x: torch.Tensor, y: torch.Tensor):
x = x + self.drop_path(self.attention(self.ln_1(x), self.ln_3(y)))
x = x + self.drop_path(self.mlp(self.ln_2(x)))
return x
class Transformer(nn.Module):
def __init__(
self, width, layers, heads, attn_mask=None, backbone_drop_path_rate=0.,
use_checkpoint=False, checkpoint_num=[0], t_size=8,
return_list=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11],
n_layers=12, n_dim=768, n_head=12, mlp_factor=4.0, drop_path_rate=0.,
mlp_dropout=[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5],
cls_dropout=0.5, num_classes=400,
):
super().__init__()
self.T = t_size
self.return_list = return_list
# Backbone
b_dpr = [x.item() for x in torch.linspace(0, backbone_drop_path_rate, layers)]
self.resblocks = nn.ModuleList([
ResidualAttentionBlock(
width, heads, attn_mask,
drop_path=b_dpr[i],
) for i in range(layers)
])
# checkpoint
self.use_checkpoint = use_checkpoint
self.checkpoint_num = checkpoint_num
logger.info(f'Use checkpoint: {self.use_checkpoint}')
logger.info(f'Checkpoint number: {self.checkpoint_num}')
# Extractor
assert n_layers == len(return_list)
self.temporal_cls_token = nn.Parameter(torch.zeros(1, 1, n_dim))
self.dpe = nn.ModuleList([
nn.Conv3d(n_dim, n_dim, kernel_size=3, stride=1, padding=1, bias=True, groups=n_dim)
for i in range(n_layers)
])
for m in self.dpe:
nn.init.constant_(m.bias, 0.)
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, n_layers)]
self.dec = nn.ModuleList([
Extractor(
n_dim, n_head, mlp_factor=mlp_factor,
dropout=mlp_dropout[i], drop_path=dpr[i],
) for i in range(n_layers)
])
# # projection
# self.proj = nn.Sequential(
# nn.LayerNorm(n_dim),
# nn.Dropout(cls_dropout),
# nn.Linear(n_dim, num_classes),
# )
self.balance = nn.Parameter(torch.zeros((n_dim)))
self.sigmoid = nn.Sigmoid()
def forward(self, x, mode='video', return_all_feats=False):
if mode == 'video':
T_down = self.T
else:
T_down = 1
L, NT, C = x.shape
N = NT // T_down
H = W = int((L - 1) ** 0.5)
cls_token = self.temporal_cls_token.repeat(1, N, 1)
j = -1
for i, resblock in enumerate(self.resblocks):
if self.use_checkpoint and i < self.checkpoint_num[0]:
x = resblock(x, self.T, use_checkpoint=True)
else:
x = resblock(x, T_down)
if i in self.return_list:
j += 1
tmp_x = x.clone()
tmp_x = tmp_x.view(L, N, T_down, C)
# dpe
_, tmp_feats = tmp_x[:1], tmp_x[1:]
tmp_feats = tmp_feats.permute(1, 3, 2, 0).reshape(N, C, T_down, H, W)
tmp_feats = self.dpe[j](tmp_feats).view(N, C, T_down, L - 1).permute(3, 0, 2, 1)
tmp_x[1:] = tmp_x[1:] + tmp_feats
# enhancer
tmp_x = tmp_x.permute(2, 0, 1, 3).flatten(0, 1) # T * L, N, C
cls_token = self.dec[j](cls_token, tmp_x)
weight = self.sigmoid(self.balance)
residual = x.view(L, N, T_down, C)[0].mean(1) # L, N, T, C
# return self.proj((1 - weight) * cls_token[0, :, :] + weight * residual)
feats = (1 - weight) * cls_token[0, :, :] + weight * residual
if return_all_feats:
return feats, x.view(L, N, T_down, C)
return feats
class VisionTransformer(nn.Module):
def __init__(
self,
# backbone
input_resolution, patch_size, width, layers, heads, output_dim, backbone_drop_path_rate=0.,
use_checkpoint=False, checkpoint_num=[0], t_size=8,
# extractor
return_list=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11],
n_layers=12, n_dim=768, n_head=12, mlp_factor=4.0, drop_path_rate=0.,
mlp_dropout=[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5],
cls_dropout=0.5, num_classes=400,
):
super().__init__()
self.input_resolution = input_resolution
self.output_dim = output_dim
self.conv1 = nn.Conv3d(3, width, (1, patch_size, patch_size), (1, patch_size, patch_size), (0, 0, 0), bias=False)
scale = width ** -0.5
self.class_embedding = nn.Parameter(scale * torch.randn(width))
self.positional_embedding = nn.Parameter(scale * torch.randn((input_resolution // patch_size) ** 2 + 1, width))
self.ln_pre = LayerNorm(width)
self.transformer = Transformer(
width, layers, heads,
backbone_drop_path_rate=backbone_drop_path_rate,
use_checkpoint=use_checkpoint, checkpoint_num=checkpoint_num, t_size=t_size,
return_list=return_list, n_layers=n_layers, n_dim=n_dim, n_head=n_head,
mlp_factor=mlp_factor, drop_path_rate=drop_path_rate, mlp_dropout=mlp_dropout,
cls_dropout=cls_dropout, num_classes=num_classes,
)
def forward(self, x, mode='video', return_all_feats=False):
x = self.conv1(x) # shape = [*, width, grid, grid]
N, C, T, H, W = x.shape
x = x.permute(0, 2, 3, 4, 1).reshape(N * T, H * W, C)
x = torch.cat([self.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device), x], dim=1) # shape = [*, grid ** 2 + 1, width]
x = x + self.positional_embedding.to(x.dtype)
x = self.ln_pre(x)
x = x.permute(1, 0, 2) # NLD -> LND
out = self.transformer(x, mode=mode, return_all_feats=return_all_feats)
return out
def inflate_weight(weight_2d, time_dim, center=True):
if center:
weight_3d = torch.zeros(*weight_2d.shape)
weight_3d = weight_3d.unsqueeze(2).repeat(1, 1, time_dim, 1, 1)
middle_idx = time_dim // 2
weight_3d[:, :, middle_idx, :, :] = weight_2d
else:
weight_3d = weight_2d.unsqueeze(2).repeat(1, 1, time_dim, 1, 1)
weight_3d = weight_3d / time_dim
return weight_3d
def load_state_dict(model, state_dict):
state_dict_3d = model.state_dict()
for k in state_dict.keys():
if state_dict[k].shape != state_dict_3d[k].shape:
if len(state_dict_3d[k].shape) <= 2:
logger.info(f'Ignore: {k}')
continue
logger.info(f'Inflate: {k}, {state_dict[k].shape} => {state_dict_3d[k].shape}')
time_dim = state_dict_3d[k].shape[2]
state_dict[k] = inflate_weight(state_dict[k], time_dim)
model.load_state_dict(state_dict, strict=False)
def vit_only_global_b32(
pretrained=True, use_checkpoint=False, checkpoint_num=[0],
t_size=16, backbone_drop_path_rate=0.,
return_list=[8, 9, 10, 11],
n_layers=4, n_dim=768, n_head=12, mlp_factor=4.0, drop_path_rate=0.,
mlp_dropout=[0.5, 0.5, 0.5, 0.5],
cls_dropout=0.5, num_classes=400,
):
model = VisionTransformer(
input_resolution=224,
patch_size=32,
width=768,
layers=12,
heads=12,
output_dim=512,
use_checkpoint=use_checkpoint,
checkpoint_num=checkpoint_num,
t_size=t_size,
backbone_drop_path_rate=backbone_drop_path_rate,
return_list=return_list,
n_layers=n_layers,
n_dim=n_dim,
n_head=n_head,
mlp_factor=mlp_factor,
drop_path_rate=drop_path_rate,
mlp_dropout=mlp_dropout,
cls_dropout=cls_dropout,
num_classes=num_classes,
)
if pretrained:
logger.info('load pretrained weights')
state_dict = torch.load(_MODELS["ViT-B/32"], map_location='cpu')
load_state_dict(model, state_dict)
return model.eval()
def vit_only_global_b16(
pretrained=True, use_checkpoint=False, checkpoint_num=[0],
t_size=16, backbone_drop_path_rate=0.,
return_list=[8, 9, 10, 11],
n_layers=4, n_dim=768, n_head=12, mlp_factor=4.0, drop_path_rate=0.,
mlp_dropout=[0.5, 0.5, 0.5, 0.5],
cls_dropout=0.5, num_classes=400,
):
model = VisionTransformer(
input_resolution=224,
patch_size=16,
width=768,
layers=12,
heads=12,
output_dim=512,
use_checkpoint=use_checkpoint,
checkpoint_num=checkpoint_num,
t_size=t_size,
backbone_drop_path_rate=backbone_drop_path_rate,
return_list=return_list,
n_layers=n_layers,
n_dim=n_dim,
n_head=n_head,
mlp_factor=mlp_factor,
drop_path_rate=drop_path_rate,
mlp_dropout=mlp_dropout,
cls_dropout=cls_dropout,
num_classes=num_classes,
)
if pretrained:
logger.info('load pretrained weights')
state_dict = torch.load(_MODELS["ViT-B/16"], map_location='cpu')
load_state_dict(model, state_dict)
return model.eval()
def vit_only_global_l14(
pretrained=True, use_checkpoint=False, checkpoint_num=[0],
t_size=16, backbone_drop_path_rate=0.,
return_list=[20, 21, 22, 23],
n_layers=4, n_dim=1024, n_head=16, mlp_factor=4.0, drop_path_rate=0.,
mlp_dropout=[0.5, 0.5, 0.5, 0.5],
cls_dropout=0.5, num_classes=400,
):
model = VisionTransformer(
input_resolution=224,
patch_size=14,
width=1024,
layers=24,
heads=16,
output_dim=768,
use_checkpoint=use_checkpoint,
checkpoint_num=checkpoint_num,
t_size=t_size,
backbone_drop_path_rate=backbone_drop_path_rate,
return_list=return_list,
n_layers=n_layers,
n_dim=n_dim,
n_head=n_head,
mlp_factor=mlp_factor,
drop_path_rate=drop_path_rate,
mlp_dropout=mlp_dropout,
cls_dropout=cls_dropout,
num_classes=num_classes,
)
if pretrained:
logger.info('load pretrained weights')
state_dict = torch.load(_MODELS["ViT-L/14"], map_location='cpu')
load_state_dict(model, state_dict)
return model.eval()
def vit_only_global_l14_336(
pretrained=True, use_checkpoint=False, checkpoint_num=[0],
t_size=16, backbone_drop_path_rate=0.,
return_list=[20, 21, 22, 23],
n_layers=4, n_dim=1024, n_head=16, mlp_factor=4.0, drop_path_rate=0.,
mlp_dropout=[0.5, 0.5, 0.5, 0.5],
cls_dropout=0.5, num_classes=400,
):
model = VisionTransformer(
input_resolution=336,
patch_size=14,
width=1024,
layers=24,
heads=16,
output_dim=768,
use_checkpoint=use_checkpoint,
checkpoint_num=checkpoint_num,
t_size=t_size,
backbone_drop_path_rate=backbone_drop_path_rate,
return_list=return_list,
n_layers=n_layers,
n_dim=n_dim,
n_head=n_head,
mlp_factor=mlp_factor,
drop_path_rate=drop_path_rate,
mlp_dropout=mlp_dropout,
cls_dropout=cls_dropout,
num_classes=num_classes,
)
if pretrained:
logger.info('load pretrained weights')
state_dict = torch.load(_MODELS["ViT-L/14_336"], map_location='cpu')
load_state_dict(model, state_dict)
return model.eval()
if __name__ == '__main__':
import time
from fvcore.nn import FlopCountAnalysis
from fvcore.nn import flop_count_table
import numpy as np
seed = 4217
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
num_frames = 8
model = vit_only_global_l14(
pretrained=False,
t_size=num_frames, backbone_drop_path_rate=0.2, drop_path_rate=0.4,
use_checkpoint=True, checkpoint_num=[0],
)
flops = FlopCountAnalysis(model, torch.rand(1, 3, num_frames, 224, 224))
s = time.time()
logger.info(flop_count_table(flops, max_depth=1))
logger.info(time.time()-s) | InternVideo-main | Downstream/Video-Text-Retrieval/modules/clip_kc_new/evl_utils/clip_vit_only_global.py |
#!/usr/bin/env python
import os
from collections import OrderedDict
from timm.models.layers import DropPath
import torch
from torch import nn
import torch.utils.checkpoint as checkpoint
from .attention import MultiheadAttention
MODEL_PATH = '/mnt/lustre/share_data/likunchang.vendor/model'
_MODELS = {
"ViT-B/32": os.path.join(MODEL_PATH, "vit_b32.pth"),
"ViT-B/16": os.path.join(MODEL_PATH, "vit_b16.pth"),
"ViT-L/14": os.path.join(MODEL_PATH, "vit_l14.pth"),
"ViT-L/14_336": os.path.join(MODEL_PATH, "vit_l14_336.pth"),
}
class LayerNorm(nn.LayerNorm):
"""Subclass torch's LayerNorm to handle fp16."""
def forward(self, x):
orig_type = x.dtype
ret = super().forward(x.type(torch.float32))
return ret.type(orig_type)
class QuickGELU(nn.Module):
def forward(self, x):
return x * torch.sigmoid(1.702 * x)
class ResidualAttentionBlock(nn.Module):
def __init__(self, d_model, n_head, attn_mask=None, drop_path=0.0):
super().__init__()
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
print(f'Drop path rate: {drop_path}')
self.attn = MultiheadAttention(d_model, n_head)
self.ln_1 = LayerNorm(d_model)
self.mlp = nn.Sequential(OrderedDict([
("c_fc", nn.Linear(d_model, d_model * 4)),
("gelu", QuickGELU()),
("c_proj", nn.Linear(d_model * 4, d_model))
]))
self.ln_2 = LayerNorm(d_model)
self.attn_mask = attn_mask
def attention(self, x, return_qk=False):
if return_qk:
self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None
q, k, attn_output, _ = self.attn(x, x, x, need_weights=False, attn_mask=self.attn_mask, return_qk=True)
return q, k, attn_output
else:
self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None
return self.attn(x, x, x, need_weights=False, attn_mask=self.attn_mask)[0]
def forward(self, x, return_qk=False):
if return_qk:
q, k, attn_output = self.attention(self.ln_1(x), return_qk=True)
x = x + self.drop_path(attn_output)
x = x + self.drop_path(self.mlp(self.ln_2(x)))
return x, q, k
else:
x = x + self.drop_path(self.attention(self.ln_1(x)))
x = x + self.drop_path(self.mlp(self.ln_2(x)))
return x
class Transformer(nn.Module):
def __init__(self, width, layers, heads, attn_mask=None, drop_path_rate=0.):
super().__init__()
self.width = width
self.layers = layers
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, layers)]
self.resblocks = nn.ModuleList([
ResidualAttentionBlock(width, heads, attn_mask, drop_path=dpr[i]) for i in range(layers)
])
def forward(self, x, return_num=4, T=8):
features = []
for i, resblock in enumerate(self.resblocks):
x = resblock(x)
if i >= self.layers - return_num:
L, NT, C = x.shape
N = NT // T
features.append(x.view(L, N, T, C))
return features
class VisionTransformer(nn.Module):
def __init__(
self, input_resolution, patch_size, width, layers, heads, output_dim, drop_path_rate=0.,
):
super().__init__()
self.input_resolution = input_resolution
self.output_dim = output_dim
self.conv1 = nn.Conv2d(in_channels=3, out_channels=width, kernel_size=patch_size, stride=patch_size, bias=False)
scale = width ** -0.5
self.class_embedding = nn.Parameter(scale * torch.randn(width))
self.positional_embedding = nn.Parameter(scale * torch.randn((input_resolution // patch_size) ** 2 + 1, width))
self.ln_pre = LayerNorm(width)
self.transformer = Transformer(width, layers, heads, drop_path_rate=drop_path_rate)
def forward(self, x, return_num=4, return_qk=True):
N, C, T, H, W = x.shape
x = x.permute(0, 2, 1, 3, 4).reshape(N * T, C, H, W)
x = self.conv1(x) # shape = [*, width, grid, grid]
x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2]
x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width]
x = torch.cat([self.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device), x], dim=1) # shape = [*, grid ** 2 + 1, width]
x = x + self.positional_embedding.to(x.dtype)
x = self.ln_pre(x)
x = x.permute(1, 0, 2) # NLD -> LND
features = self.transformer(
x, return_num=return_num, T=T,
)
return features
def vit_b32(pretrained=True, drop_path_rate=0.):
model = VisionTransformer(
input_resolution=224,
patch_size=32,
width=768,
layers=12,
heads=12,
output_dim=512,
drop_path_rate=drop_path_rate
)
if pretrained:
print('load pretrained weights')
state_dict = torch.load(_MODELS["ViT-B/32"], map_location='cpu')
model.load_state_dict(state_dict)
return model.eval()
def vit_b16(pretrained=True, drop_path_rate=0.):
model = VisionTransformer(
input_resolution=224,
patch_size=16,
width=768,
layers=12,
heads=12,
output_dim=512,
drop_path_rate=drop_path_rate
)
if pretrained:
print('load pretrained weights')
state_dict = torch.load(_MODELS["ViT-B/16"], map_location='cpu')
model.load_state_dict(state_dict)
return model.eval()
def vit_l14(pretrained=True, drop_path_rate=0.):
model = VisionTransformer(
input_resolution=224,
patch_size=14,
width=1024,
layers=24,
heads=16,
output_dim=768,
drop_path_rate=drop_path_rate
)
if pretrained:
print('load pretrained weights')
state_dict = torch.load(_MODELS["ViT-L/14"], map_location='cpu')
model.load_state_dict(state_dict)
return model.eval()
def vit_l14_336(pretrained=True, drop_path_rate=0.):
model = VisionTransformer(
input_resolution=336,
patch_size=14,
width=1024,
layers=24,
heads=16,
output_dim=768,
drop_path_rate=drop_path_rate
)
if pretrained:
print('load pretrained weights')
state_dict = torch.load(_MODELS["ViT-L/14_336"], map_location='cpu')
model.load_state_dict(state_dict)
return model.eval()
if __name__ == '__main__':
import time
from fvcore.nn import FlopCountAnalysis
from fvcore.nn import flop_count_table
model = vit_b32(pretrained=True)
flops = FlopCountAnalysis(model, torch.rand(1, 3, 8, 224, 224))
s = time.time()
print(flop_count_table(flops, max_depth=1))
print(time.time()-s) | InternVideo-main | Downstream/Video-Text-Retrieval/modules/clip_kc_new/evl_utils/clip_vit.py |
from .evl_module import TransformerDecoder
from .evl_module_uniformer_diff_conv_balance import TransformerDecoder_uniformer_diff_conv_balance
from .clip_vit import vit_b32, vit_b16, vit_l14, vit_l14_336
from .clip_vit_2plus1d import vit_2plus1d_b32, vit_2plus1d_b16, vit_2plus1d_l14, vit_2plus1d_l14_336
from .clip_vit_2plus1d_dw_bias import vit_2plus1d_dw_bias_b32, vit_2plus1d_dw_bias_b16, vit_2plus1d_dw_bias_l14, vit_2plus1d_dw_bias_l14_336
from .clip_vit_fusion import vit_fusion_b32, vit_fusion_b16, vit_fusion_l14, vit_fusion_l14_336
from .clip_vit_only_global import vit_only_global_b32, vit_only_global_b16, vit_only_global_l14, vit_only_global_l14_336 | InternVideo-main | Downstream/Video-Text-Retrieval/modules/clip_kc_new/evl_utils/__init__.py |
r"""Functional interface"""
import warnings
import math
import torch
from torch import _VF
from torch._jit_internal import Optional, Tuple
from torch.overrides import has_torch_function, handle_torch_function
from torch.nn.functional import _pad, linear, softmax, dropout
Tensor = torch.Tensor
pad = _pad
def multi_head_attention_forward(query: Tensor,
key: Tensor,
value: Tensor,
embed_dim_to_check: int,
num_heads: int,
in_proj_weight: Tensor,
in_proj_bias: Tensor,
bias_k: Optional[Tensor],
bias_v: Optional[Tensor],
add_zero_attn: bool,
dropout_p: float,
out_proj_weight: Tensor,
out_proj_bias: Tensor,
training: bool = True,
key_padding_mask: Optional[Tensor] = None,
need_weights: bool = True,
attn_mask: Optional[Tensor] = None,
use_separate_proj_weight: bool = False,
q_proj_weight: Optional[Tensor] = None,
k_proj_weight: Optional[Tensor] = None,
v_proj_weight: Optional[Tensor] = None,
static_k: Optional[Tensor] = None,
static_v: Optional[Tensor] = None,
return_qk: bool = False
) -> Tuple[Tensor, Optional[Tensor]]:
r"""
Args:
query, key, value: map a query and a set of key-value pairs to an output.
See "Attention Is All You Need" for more details.
embed_dim_to_check: total dimension of the model.
num_heads: parallel attention heads.
in_proj_weight, in_proj_bias: input projection weight and bias.
bias_k, bias_v: bias of the key and value sequences to be added at dim=0.
add_zero_attn: add a new batch of zeros to the key and
value sequences at dim=1.
dropout_p: probability of an element to be zeroed.
out_proj_weight, out_proj_bias: the output projection weight and bias.
training: apply dropout if is ``True``.
key_padding_mask: if provided, specified padding elements in the key will
be ignored by the attention. This is an binary mask. When the value is True,
the corresponding value on the attention layer will be filled with -inf.
need_weights: output attn_output_weights.
attn_mask: 2D or 3D mask that prevents attention to certain positions. A 2D mask will be broadcasted for all
the batches while a 3D mask allows to specify a different mask for the entries of each batch.
use_separate_proj_weight: the function accept the proj. weights for query, key,
and value in different forms. If false, in_proj_weight will be used, which is
a combination of q_proj_weight, k_proj_weight, v_proj_weight.
q_proj_weight, k_proj_weight, v_proj_weight, in_proj_bias: input projection weight and bias.
static_k, static_v: static key and value used for attention operators.
Shape:
Inputs:
- query: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is
the embedding dimension.
- key: :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- value: :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- key_padding_mask: :math:`(N, S)` where N is the batch size, S is the source sequence length.
If a ByteTensor is provided, the non-zero positions will be ignored while the zero positions
will be unchanged. If a BoolTensor is provided, the positions with the
value of ``True`` will be ignored while the position with the value of ``False`` will be unchanged.
- attn_mask: 2D mask :math:`(L, S)` where L is the target sequence length, S is the source sequence length.
3D mask :math:`(N*num_heads, L, S)` where N is the batch size, L is the target sequence length,
S is the source sequence length. attn_mask ensures that position i is allowed to attend the unmasked
positions. If a ByteTensor is provided, the non-zero positions are not allowed to attend
while the zero positions will be unchanged. If a BoolTensor is provided, positions with ``True``
are not allowed to attend while ``False`` values will be unchanged. If a FloatTensor
is provided, it will be added to the attention weight.
- static_k: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length,
N is the batch size, E is the embedding dimension. E/num_heads is the head dimension.
- static_v: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length,
N is the batch size, E is the embedding dimension. E/num_heads is the head dimension.
- return_qk: whether return Q and K.
Outputs:
- attn_output: :math:`(L, N, E)` where L is the target sequence length, N is the batch size,
E is the embedding dimension.
- attn_output_weights: :math:`(N, L, S)` where N is the batch size,
L is the target sequence length, S is the source sequence length.
"""
if not torch.jit.is_scripting():
tens_ops = (query, key, value, in_proj_weight, in_proj_bias, bias_k, bias_v,
out_proj_weight, out_proj_bias)
if any([type(t) is not Tensor for t in tens_ops]) and has_torch_function(tens_ops):
return handle_torch_function(
multi_head_attention_forward, tens_ops, query, key, value,
embed_dim_to_check, num_heads, in_proj_weight, in_proj_bias,
bias_k, bias_v, add_zero_attn, dropout_p, out_proj_weight,
out_proj_bias, training=training, key_padding_mask=key_padding_mask,
need_weights=need_weights, attn_mask=attn_mask,
use_separate_proj_weight=use_separate_proj_weight,
q_proj_weight=q_proj_weight, k_proj_weight=k_proj_weight,
v_proj_weight=v_proj_weight, static_k=static_k, static_v=static_v)
tgt_len, bsz, embed_dim = query.size()
assert embed_dim == embed_dim_to_check
# allow MHA to have different sizes for the feature dimension
assert key.size(0) == value.size(0) and key.size(1) == value.size(1)
head_dim = embed_dim // num_heads
assert head_dim * num_heads == embed_dim, "embed_dim must be divisible by num_heads"
scaling = float(head_dim) ** -0.5
if not use_separate_proj_weight:
if torch.equal(query, key) and torch.equal(key, value):
# self-attention
q, k, v = linear(query, in_proj_weight, in_proj_bias).chunk(3, dim=-1)
elif torch.equal(key, value):
# encoder-decoder attention
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = 0
_end = embed_dim
_w = in_proj_weight[_start:_end, :]
if _b is not None:
_b = _b[_start:_end]
q = linear(query, _w, _b)
if key is None:
assert value is None
k = None
v = None
else:
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = embed_dim
_end = None
_w = in_proj_weight[_start:, :]
if _b is not None:
_b = _b[_start:]
k, v = linear(key, _w, _b).chunk(2, dim=-1)
else:
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = 0
_end = embed_dim
_w = in_proj_weight[_start:_end, :]
if _b is not None:
_b = _b[_start:_end]
q = linear(query, _w, _b)
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = embed_dim
_end = embed_dim * 2
_w = in_proj_weight[_start:_end, :]
if _b is not None:
_b = _b[_start:_end]
k = linear(key, _w, _b)
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = embed_dim * 2
_end = None
_w = in_proj_weight[_start:, :]
if _b is not None:
_b = _b[_start:]
v = linear(value, _w, _b)
else:
q_proj_weight_non_opt = torch.jit._unwrap_optional(q_proj_weight)
len1, len2 = q_proj_weight_non_opt.size()
assert len1 == embed_dim and len2 == query.size(-1)
k_proj_weight_non_opt = torch.jit._unwrap_optional(k_proj_weight)
len1, len2 = k_proj_weight_non_opt.size()
assert len1 == embed_dim and len2 == key.size(-1)
v_proj_weight_non_opt = torch.jit._unwrap_optional(v_proj_weight)
len1, len2 = v_proj_weight_non_opt.size()
assert len1 == embed_dim and len2 == value.size(-1)
if in_proj_bias is not None:
q = linear(query, q_proj_weight_non_opt, in_proj_bias[0:embed_dim])
k = linear(key, k_proj_weight_non_opt, in_proj_bias[embed_dim:(embed_dim * 2)])
v = linear(value, v_proj_weight_non_opt, in_proj_bias[(embed_dim * 2):])
else:
q = linear(query, q_proj_weight_non_opt, in_proj_bias)
k = linear(key, k_proj_weight_non_opt, in_proj_bias)
v = linear(value, v_proj_weight_non_opt, in_proj_bias)
q = q * scaling
if attn_mask is not None:
assert attn_mask.dtype == torch.float32 or attn_mask.dtype == torch.float64 or \
attn_mask.dtype == torch.float16 or attn_mask.dtype == torch.uint8 or attn_mask.dtype == torch.bool, \
'Only float, byte, and bool types are supported for attn_mask, not {}'.format(attn_mask.dtype)
if attn_mask.dtype == torch.uint8:
warnings.warn("Byte tensor for attn_mask in nn.MultiheadAttention is deprecated. Use bool tensor instead.")
attn_mask = attn_mask.to(torch.bool)
if attn_mask.dim() == 2:
attn_mask = attn_mask.unsqueeze(0)
if list(attn_mask.size()) != [1, query.size(0), key.size(0)]:
raise RuntimeError('The size of the 2D attn_mask is not correct.')
elif attn_mask.dim() == 3:
if list(attn_mask.size()) != [bsz * num_heads, query.size(0), key.size(0)]:
raise RuntimeError('The size of the 3D attn_mask is not correct.')
else:
raise RuntimeError("attn_mask's dimension {} is not supported".format(attn_mask.dim()))
# attn_mask's dim is 3 now.
# convert ByteTensor key_padding_mask to bool
if key_padding_mask is not None and key_padding_mask.dtype == torch.uint8:
warnings.warn("Byte tensor for key_padding_mask in nn.MultiheadAttention is deprecated. Use bool tensor instead.")
key_padding_mask = key_padding_mask.to(torch.bool)
if bias_k is not None and bias_v is not None:
if static_k is None and static_v is None:
k = torch.cat([k, bias_k.repeat(1, bsz, 1)])
v = torch.cat([v, bias_v.repeat(1, bsz, 1)])
if attn_mask is not None:
attn_mask = pad(attn_mask, (0, 1))
if key_padding_mask is not None:
key_padding_mask = pad(key_padding_mask, (0, 1))
else:
assert static_k is None, "bias cannot be added to static key."
assert static_v is None, "bias cannot be added to static value."
else:
assert bias_k is None
assert bias_v is None
# L, N, E
if return_qk:
return_q = q.clone() / scaling
return_k = k.clone()
q = q.contiguous().view(tgt_len, bsz * num_heads, head_dim).transpose(0, 1)
if k is not None:
k = k.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1)
if v is not None:
v = v.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1)
if static_k is not None:
assert static_k.size(0) == bsz * num_heads
assert static_k.size(2) == head_dim
k = static_k
if static_v is not None:
assert static_v.size(0) == bsz * num_heads
assert static_v.size(2) == head_dim
v = static_v
src_len = k.size(1)
if key_padding_mask is not None:
assert key_padding_mask.size(0) == bsz
assert key_padding_mask.size(1) == src_len
if add_zero_attn:
src_len += 1
k = torch.cat([k, torch.zeros((k.size(0), 1) + k.size()[2:], dtype=k.dtype, device=k.device)], dim=1)
v = torch.cat([v, torch.zeros((v.size(0), 1) + v.size()[2:], dtype=v.dtype, device=v.device)], dim=1)
if attn_mask is not None:
attn_mask = pad(attn_mask, (0, 1))
if key_padding_mask is not None:
key_padding_mask = pad(key_padding_mask, (0, 1))
attn_output_weights = torch.bmm(q, k.transpose(1, 2))
assert list(attn_output_weights.size()) == [bsz * num_heads, tgt_len, src_len]
if attn_mask is not None:
if attn_mask.dtype == torch.bool:
attn_output_weights.masked_fill_(attn_mask, float('-inf'))
else:
attn_output_weights += attn_mask
if key_padding_mask is not None:
attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len)
attn_output_weights = attn_output_weights.masked_fill(
key_padding_mask.unsqueeze(1).unsqueeze(2),
float('-inf'),
)
attn_output_weights = attn_output_weights.view(bsz * num_heads, tgt_len, src_len)
attn_output_weights = softmax(
attn_output_weights, dim=-1)
attn_output_weights = dropout(attn_output_weights, p=dropout_p, training=training)
attn_output = torch.bmm(attn_output_weights, v)
assert list(attn_output.size()) == [bsz * num_heads, tgt_len, head_dim]
attn_output = attn_output.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim)
attn_output = linear(attn_output, out_proj_weight, out_proj_bias)
if return_qk:
if need_weights:
# average attention weights over heads
attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len)
return return_q, return_k, attn_output, attn_output_weights.sum(dim=1) / num_heads
else:
return return_q, return_k, attn_output, None
else:
if need_weights:
# average attention weights over heads
attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len)
return attn_output, attn_output_weights.sum(dim=1) / num_heads
else:
return attn_output, None
| InternVideo-main | Downstream/Video-Text-Retrieval/modules/clip_kc_new/evl_utils/attention_module.py |
import os
from collections import OrderedDict
from timm.models.layers import DropPath
import torch
from torch import nn
from einops import rearrange
from .attention import MultiheadAttention
MODEL_PATH = '/mnt/lustre/share_data/likunchang.vendor/model'
_MODELS = {
"ViT-B/32": os.path.join(MODEL_PATH, "vit_b32.pth"),
"ViT-B/16": os.path.join(MODEL_PATH, "vit_b16.pth"),
"ViT-L/14": os.path.join(MODEL_PATH, "vit_l14.pth"),
"ViT-L/14_336": os.path.join(MODEL_PATH, "vit_l14_336.pth"),
}
class LayerNorm(nn.LayerNorm):
"""Subclass torch's LayerNorm to handle fp16."""
def forward(self, x):
orig_type = x.dtype
ret = super().forward(x.type(torch.float32))
return ret.type(orig_type)
class QuickGELU(nn.Module):
def forward(self, x):
return x * torch.sigmoid(1.702 * x)
class ResidualAttentionBlock(nn.Module):
def __init__(self, d_model, n_head, attn_mask=None, drop_path=0.0):
super().__init__()
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
print(f'Drop path rate: {drop_path}')
# temporal
self.attn_t = MultiheadAttention(d_model, n_head)
self.ln_t = LayerNorm(d_model)
# spatial
self.attn = MultiheadAttention(d_model, n_head)
self.ln_1 = LayerNorm(d_model)
self.mlp = nn.Sequential(OrderedDict([
("c_fc", nn.Linear(d_model, d_model * 4)),
("gelu", QuickGELU()),
("c_proj", nn.Linear(d_model * 4, d_model))
]))
self.ln_2 = LayerNorm(d_model)
self.attn_mask = attn_mask
# init zero
print('Init zero for (2+1)d')
nn.init.constant_(self.attn_t.in_proj_weight, 0)
nn.init.constant_(self.attn_t.in_proj_bias, 0)
nn.init.constant_(self.attn_t.out_proj.weight, 1)
nn.init.constant_(self.attn_t.out_proj.bias, 0)
def attention(self, x):
self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None
return self.attn(x, x, x, need_weights=False, attn_mask=self.attn_mask)[0]
def attention_temporal(self, x):
self.attn_mask = None
return self.attn_t(x, x, x, need_weights=False, attn_mask=self.attn_mask)[0]
def forward(self, x, T=8):
# temporal
# x: 1+HWT, N, C
xt = x[1:, :, :]
_, N, C = xt.shape
xt = rearrange(xt, '(l t) n c -> t (n l) c', n=N, t=T)
res_temporal = self.attention_temporal(self.ln_t(xt))
res_temporal = rearrange(res_temporal, 't (n l) c -> (l t) n c', n=N, t=T)
xt = x[1:, :, :] + self.drop_path(res_temporal)
# spatial
init_cls_token = x[:1, :, :]
cls_token = init_cls_token.repeat(1, T, 1).view(1, T*N, C)
xs = rearrange(xt, '(l t) n c -> l (t n) c', n=N, t=T)
xs = torch.cat((cls_token, xs), 0)
res_spatial = self.attention(self.ln_1(xs))
# Taking care of CLS token
cls_token = res_spatial[0, :, :]
cls_token = rearrange(cls_token, '(t n) c -> t n c', n=N)
cls_token = torch.mean(cls_token, 0, True) # averaging for every frame
res_spatial = res_spatial[1:, :, :]
res_spatial = rearrange(res_spatial, 'l (t n) c -> (l t) n c', n=N)
x = x + self.drop_path(torch.cat((cls_token, res_spatial), 0))
x = x + self.drop_path(self.mlp(self.ln_2(x)))
return x
class Transformer(nn.Module):
def __init__(self, width, layers, heads, attn_mask=None, drop_path_rate=0.):
super().__init__()
self.width = width
self.layers = layers
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, layers)]
self.resblocks = nn.ModuleList([
ResidualAttentionBlock(width, heads, attn_mask, drop_path=dpr[i]) for i in range(layers)
])
def forward(self, x, return_num=4, T=8):
features = []
for i, resblock in enumerate(self.resblocks):
x = resblock(x, T=T)
if i >= self.layers - return_num:
# LT + 1, N, C
LT, N, C = x.shape
L = (LT - 1) // T
cls_x, tmp_x = x[:1], x[1:]
cls_x = cls_x.unsqueeze(2).repeat(1, 1, T, 1)
tmp_x = tmp_x.reshape(L, T, N, C).permute(0, 2, 1, 3) # L, N, T, C
tmp_x = torch.cat([cls_x, tmp_x], dim=0 )# L + 1, N, T, C
features.append(tmp_x)
return features
class VisionTransformer(nn.Module):
def __init__(
self, input_resolution, patch_size, width, layers, heads, output_dim, num_frames=8, drop_path_rate=0.,
):
super().__init__()
self.input_resolution = input_resolution
self.output_dim = output_dim
self.conv1 = nn.Conv2d(in_channels=3, out_channels=width, kernel_size=patch_size, stride=patch_size, bias=False)
scale = width ** -0.5
self.class_embedding = nn.Parameter(scale * torch.randn(width))
self.positional_embedding = nn.Parameter(scale * torch.randn((input_resolution // patch_size) ** 2 + 1, width))
self.temporal_positional_embedding = nn.Parameter(torch.zeros(1, num_frames, width))
self.ln_pre = LayerNorm(width)
self.transformer = Transformer(width, layers, heads, drop_path_rate=drop_path_rate)
def forward(self, x, return_num=4):
N, C, T, H, W = x.shape
x = x.permute(0, 2, 1, 3, 4).reshape(N * T, C, H, W)
x = self.conv1(x) # shape = [*, width, grid, grid]
x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2]
x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width]
x = torch.cat([self.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device), x], dim=1) # shape = [*, grid ** 2 + 1, width]
x = x + self.positional_embedding.to(x.dtype)
cls_tokens = x[:N, :1, :]
x = x[:, 1:]
x = rearrange(x, '(b t) n c -> (b n) t c', b=N, t=T)
x = x + self.temporal_positional_embedding
x = rearrange(x, '(b n) t c -> b (n t) c', b=N, t=T)
x = torch.cat((cls_tokens, x), dim=1)
x = self.ln_pre(x)
x = x.permute(1, 0, 2) # NLD -> LND
features = self.transformer(
x, return_num=return_num, T=T,
)
return features
def vit_2plus1d_b32(pretrained=True, num_frames=8, drop_path_rate=0.):
model = VisionTransformer(
input_resolution=224,
patch_size=32,
width=768,
layers=12,
heads=12,
output_dim=512,
num_frames=num_frames,
drop_path_rate=drop_path_rate
)
if pretrained:
print('load pretrained weights')
state_dict = torch.load(_MODELS["ViT-B/32"], map_location='cpu')
model.load_state_dict(state_dict, strict=False)
return model.eval()
def vit_2plus1d_b16(pretrained=True, num_frames=8, drop_path_rate=0.):
model = VisionTransformer(
input_resolution=224,
patch_size=16,
width=768,
layers=12,
heads=12,
output_dim=512,
num_frames=num_frames,
drop_path_rate=drop_path_rate,
)
if pretrained:
print('load pretrained weights')
state_dict = torch.load(_MODELS["ViT-B/16"], map_location='cpu')
model.load_state_dict(state_dict, strict=False)
return model.eval()
def vit_2plus1d_l14(pretrained=True, num_frames=8, drop_path_rate=0.):
model = VisionTransformer(
input_resolution=224,
patch_size=14,
width=1024,
layers=24,
heads=16,
output_dim=768,
num_frames=num_frames,
drop_path_rate=drop_path_rate
)
if pretrained:
print('load pretrained weights')
state_dict = torch.load(_MODELS["ViT-L/14"], map_location='cpu')
model.load_state_dict(state_dict, strict=False)
return model.eval()
def vit_2plus1d_l14_336(pretrained=True, num_frames=8, drop_path_rate=0.):
model = VisionTransformer(
input_resolution=336,
patch_size=14,
width=1024,
layers=24,
heads=16,
output_dim=768,
num_frames=num_frames,
drop_path_rate=drop_path_rate
)
if pretrained:
print('load pretrained weights')
state_dict = torch.load(_MODELS["ViT-L/14_336"], map_location='cpu')
model.load_state_dict(state_dict, strict=False)
return model.eval()
if __name__ == '__main__':
import time
from fvcore.nn import FlopCountAnalysis
from fvcore.nn import flop_count_table
model = vit_2plus1d_b32(pretrained=True)
flops = FlopCountAnalysis(model, torch.rand(1, 3, 8, 224, 224))
s = time.time()
print(flop_count_table(flops, max_depth=1))
print(time.time()-s) | InternVideo-main | Downstream/Video-Text-Retrieval/modules/clip_kc_new/evl_utils/clip_vit_2plus1d.py |
#!/usr/bin/env python
import os
from collections import OrderedDict
from timm.models.layers import DropPath
import torch
from torch import nn
import torch.nn.functional as F
import torch.utils.checkpoint as checkpoint
from .attention import MultiheadAttention
from ipdb import set_trace
MODEL_PATH = '/mnt/lustre/share_data/likunchang.vendor/model'
_MODELS = {
"ViT-B/32": os.path.join(MODEL_PATH, "vit_b32.pth"),
"ViT-B/16": os.path.join(MODEL_PATH, "vit_b16.pth"),
"ViT-L/14": os.path.join(MODEL_PATH, "vit_l14.pth"),
"ViT-L/14_336": os.path.join(MODEL_PATH, "vit_l14_336.pth"),
}
def conv_1x1x1(inp, oup, groups=1):
return nn.Conv3d(inp, oup, (1, 1, 1), (1, 1, 1), (0, 0, 0), groups=groups)
def conv_3x3x3(inp, oup, groups=1):
return nn.Conv3d(inp, oup, (3, 3, 3), (1, 1, 1), (1, 1, 1), groups=groups)
def conv_1x3x3(inp, oup, groups=1):
return nn.Conv3d(inp, oup, (1, 3, 3), (1, 1, 1), (0, 1, 1), groups=groups)
def bn_3d(dim):
return nn.BatchNorm3d(dim)
class LayerNorm(nn.LayerNorm):
"""Subclass torch's LayerNorm to handle fp16."""
def forward(self, x):
orig_type = x.dtype
ret = super().forward(x.type(torch.float32))
return ret.type(orig_type)
class QuickGELU(nn.Module):
def forward(self, x):
return x * torch.sigmoid(1.702 * x)
class Local_MHRA(nn.Module):
def __init__(self, d_model, dw_reduction=1.5, pos_kernel_size=3):
super().__init__()
padding = pos_kernel_size // 2
re_d_model = int(d_model // dw_reduction)
self.pos_embed = nn.Sequential(
nn.BatchNorm3d(d_model),
nn.Conv3d(d_model, re_d_model, kernel_size=1, stride=1, padding=0),
nn.Conv3d(re_d_model, re_d_model, kernel_size=(pos_kernel_size, 1, 1), stride=(1, 1, 1), padding=(padding, 0, 0), groups=re_d_model),
nn.Conv3d(re_d_model, d_model, kernel_size=1, stride=1, padding=0),
)
# init zero
print('Init zero for Conv in pos_emb')
nn.init.constant_(self.pos_embed[3].weight, 0)
nn.init.constant_(self.pos_embed[3].bias, 0)
def forward(self, x):
return self.pos_embed(x)
class ResidualAttentionBlock(nn.Module):
def __init__(
self, d_model, n_head, attn_mask=None, drop_path=0.0,
dw_reduction=1.5,
):
super().__init__()
self.n_head = n_head
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
print(f'Drop path rate: {drop_path}')
self.lmhra1 = Local_MHRA(d_model, dw_reduction=dw_reduction)
self.lmhra2 = Local_MHRA(d_model, dw_reduction=dw_reduction)
# spatial
self.attn = MultiheadAttention(d_model, n_head)
self.ln_1 = LayerNorm(d_model)
self.mlp = nn.Sequential(OrderedDict([
("c_fc", nn.Linear(d_model, d_model * 4)),
("gelu", QuickGELU()),
("c_proj", nn.Linear(d_model * 4, d_model))
]))
self.ln_2 = LayerNorm(d_model)
self.attn_mask = attn_mask
def attention(self, x):
self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None
return self.attn(x, x, x, need_weights=False, attn_mask=self.attn_mask)[0]
def forward(self, x, T=8):
# x: 1+HW, NT, C
# Local MHRA
tmp_x = x[1:, :, :]
L, NT, C = tmp_x.shape
N = NT // T
H = W = int(L ** 0.5)
tmp_x = tmp_x.view(H, W, N, T, C).permute(2, 4, 3, 0, 1).contiguous()
tmp_x = tmp_x + self.drop_path(self.lmhra1(tmp_x))
tmp_x = tmp_x.view(N, C, T, L).permute(3, 0, 2, 1).contiguous().view(L, NT, C)
x = torch.cat([x[:1, :, :], tmp_x], dim=0)
# MHSA
x = x + self.drop_path(self.attention(self.ln_1(x)))
# Local MHRA
tmp_x = x[1:, :, :]
tmp_x = tmp_x.view(H, W, N, T, C).permute(2, 4, 3, 0, 1).contiguous()
tmp_x = tmp_x + self.drop_path(self.lmhra2(tmp_x))
tmp_x = tmp_x.view(N, C, T, L).permute(3, 0, 2, 1).contiguous().view(L, NT, C)
x = torch.cat([x[:1, :, :], tmp_x], dim=0)
# FFN
x = x + self.drop_path(self.mlp(self.ln_2(x)))
return x
class Extractor(nn.Module):
def __init__(
self, d_model, n_head, attn_mask=None,
mlp_factor=4.0, dropout=0.0, drop_path=0.0,
):
super().__init__()
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
print(f'Drop path rate: {drop_path}')
self.attn = nn.MultiheadAttention(d_model, n_head)
self.ln_1 = nn.LayerNorm(d_model)
d_mlp = round(mlp_factor * d_model)
self.mlp = nn.Sequential(OrderedDict([
("c_fc", nn.Linear(d_model, d_mlp)),
("gelu", QuickGELU()),
("dropout", nn.Dropout(dropout)),
("c_proj", nn.Linear(d_mlp, d_model))
]))
self.ln_2 = nn.LayerNorm(d_model)
self.ln_3 = nn.LayerNorm(d_model)
self.attn_mask = attn_mask
# zero init
nn.init.xavier_uniform_(self.attn.in_proj_weight)
nn.init.constant_(self.attn.out_proj.weight, 0.)
nn.init.constant_(self.attn.out_proj.bias, 0.)
nn.init.xavier_uniform_(self.mlp[0].weight)
nn.init.constant_(self.mlp[-1].weight, 0.)
nn.init.constant_(self.mlp[-1].bias, 0.)
def attention(self, x: torch.Tensor, y: torch.Tensor):
#self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None
# return self.attn(x, y, y, need_weights=False, attn_mask=self.attn_mask)[0]
assert self.attn_mask is None # not implemented
# manual forward to add position information
d_model = self.ln_1.weight.size(0)
q = (x @ self.attn.in_proj_weight[:d_model].T) + self.attn.in_proj_bias[:d_model]
k = (y @ self.attn.in_proj_weight[d_model:-d_model].T) + self.attn.in_proj_bias[d_model:-d_model]
v = (y @ self.attn.in_proj_weight[-d_model:].T) + self.attn.in_proj_bias[-d_model:]
Tx, Ty, N = q.size(0), k.size(0), q.size(1)
q = q.view(Tx, N, self.attn.num_heads, self.attn.head_dim).permute(1, 2, 0, 3)
k = k.view(Ty, N, self.attn.num_heads, self.attn.head_dim).permute(1, 2, 0, 3)
v = v.view(Ty, N, self.attn.num_heads, self.attn.head_dim).permute(1, 2, 0, 3)
aff = (q @ k.transpose(-2, -1) / (self.attn.head_dim ** 0.5))
aff = aff.softmax(dim=-1)
out = aff @ v
out = out.permute(2, 0, 1, 3).flatten(2)
out = self.attn.out_proj(out)
return out
def forward(self, x: torch.Tensor, y: torch.Tensor):
x = x + self.drop_path(self.attention(self.ln_1(x), self.ln_3(y)))
x = x + self.drop_path(self.mlp(self.ln_2(x)))
return x
class Transformer(nn.Module):
def __init__(
self, width, layers, heads, attn_mask=None, backbone_drop_path_rate=0.,
t_size=8, dw_reduction=2,
return_list=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11],
n_layers=12, n_dim=768, n_head=12, mlp_factor=4.0, drop_path_rate=0.,
mlp_dropout=[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5],
cls_dropout=0.5, num_classes=400,
):
super().__init__()
self.T = t_size
self.return_list = return_list
# Backbone
b_dpr = [x.item() for x in torch.linspace(0, backbone_drop_path_rate, layers)]
self.resblocks = nn.ModuleList([
ResidualAttentionBlock(
width, heads, attn_mask,
drop_path=b_dpr[i],
dw_reduction=dw_reduction,
) for i in range(layers)
])
# Extractor
assert n_layers == len(return_list)
self.temporal_cls_token = nn.Parameter(torch.zeros(1, 1, n_dim))
self.dpe = nn.ModuleList([
nn.Conv3d(n_dim, n_dim, kernel_size=3, stride=1, padding=1, bias=True, groups=n_dim)
for i in range(n_layers)
])
for m in self.dpe:
nn.init.constant_(m.bias, 0.)
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, n_layers)]
self.dec = nn.ModuleList([
Extractor(
n_dim, n_head, mlp_factor=mlp_factor,
dropout=mlp_dropout[i], drop_path=dpr[i],
) for i in range(n_layers)
])
# # projection
# self.proj = nn.Sequential(
# nn.LayerNorm(n_dim),
# nn.Dropout(cls_dropout),
# nn.Linear(n_dim, num_classes),
# )
self.balance = nn.Parameter(torch.zeros((n_dim)))
self.sigmoid = nn.Sigmoid()
def forward(self, x, mode='video', return_all_feats=False):
if mode == 'video':
T_down = self.T
else:
T_down = 1
L, NT, C = x.shape
N = NT // T_down
H = W = int((L - 1) ** 0.5)
assert H * W == L - 1
cls_token = self.temporal_cls_token.repeat(1, N, 1)
j = -1
for i, resblock in enumerate(self.resblocks):
x = resblock(x, T_down)
if i in self.return_list:
j += 1
tmp_x = x.clone()
tmp_x = tmp_x.view(L, N, T_down, C)
# dpe
_, tmp_feats = tmp_x[:1], tmp_x[1:]
tmp_feats = tmp_feats.permute(1, 3, 2, 0).reshape(N, C, T_down, H, W)
tmp_feats = self.dpe[j](tmp_feats).view(N, C, T_down, L - 1).permute(3, 0, 2, 1)
tmp_x[1:] = tmp_x[1:] + tmp_feats
# enhancer
tmp_x = tmp_x.permute(2, 0, 1, 3).flatten(0, 1) # T * L, N, C
cls_token = self.dec[j](cls_token, tmp_x)
weight = self.sigmoid(self.balance)
residual = x.view(L, N, T_down, C)[0].mean(1) # L, N, T, C
# return self.proj((1 - weight) * cls_token[0, :, :] + weight * residual)
feats = (1 - weight) * cls_token[0, :, :] + weight * residual
if return_all_feats:
return feats, x.view(L, N, T_down, C)
return feats
class VisionTransformer(nn.Module):
def __init__(
self,
# backbone
input_resolution, patch_size, width, layers, heads, output_dim, backbone_drop_path_rate=0.,
t_size=8, kernel_size=3, dw_reduction=1.5,
# extractor
return_list=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11],
n_layers=12, n_dim=768, n_head=12, mlp_factor=4.0, drop_path_rate=0.,
mlp_dropout=[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5],
cls_dropout=0.5, num_classes=400,
):
super().__init__()
self.input_resolution = input_resolution
self.output_dim = output_dim
padding = (kernel_size - 1) // 2
self.conv1 = nn.Conv3d(3, width, (kernel_size, patch_size, patch_size), (2, patch_size, patch_size), (padding, 0, 0), bias=False)
scale = width ** -0.5
self.class_embedding = nn.Parameter(scale * torch.randn(width))
self.positional_embedding = nn.Parameter(scale * torch.randn((input_resolution // patch_size) ** 2 + 1, width))
self.ln_pre = LayerNorm(width)
self.transformer = Transformer(
width, layers, heads, dw_reduction=dw_reduction,
backbone_drop_path_rate=backbone_drop_path_rate, t_size=t_size // 2,
return_list=return_list, n_layers=n_layers, n_dim=n_dim, n_head=n_head,
mlp_factor=mlp_factor, drop_path_rate=drop_path_rate, mlp_dropout=mlp_dropout,
cls_dropout=cls_dropout, num_classes=num_classes,
)
def forward(self, x, mode='video', return_all_feats=False):
# taken from https://github.com/facebookresearch/omnivore/blob/main/omnivore/models/swin_transformer_3d.py#L703
if mode == 'image': # for image, stride 2
# ! Use replicate here
x = F.pad(x, (0, 0, 0, 0, 0, 1), mode="replicate")
x = self.conv1(x) # shape = [*, width, grid, grid]
N, C, T, H, W = x.shape
x = x.permute(0, 2, 3, 4, 1).reshape(N * T, H * W, C)
x = torch.cat([self.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device), x], dim=1) # shape = [*, grid ** 2 + 1, width]
x = x + self.positional_embedding.to(x.dtype)
x = self.ln_pre(x)
x = x.permute(1, 0, 2) # NLD -> LND
out = self.transformer(x, mode=mode, return_all_feats=return_all_feats)
return out
def inflate_weight(weight_2d, time_dim, center=True):
if center:
weight_3d = torch.zeros(*weight_2d.shape)
weight_3d = weight_3d.unsqueeze(2).repeat(1, 1, time_dim, 1, 1)
middle_idx = time_dim // 2
weight_3d[:, :, middle_idx, :, :] = weight_2d
else:
weight_3d = weight_2d.unsqueeze(2).repeat(1, 1, time_dim, 1, 1)
weight_3d = weight_3d / time_dim
return weight_3d
def load_state_dict(model, state_dict):
state_dict_3d = model.state_dict()
for k in state_dict.keys():
if state_dict[k].shape != state_dict_3d[k].shape:
if len(state_dict_3d[k].shape) <= 2:
print(f'Ignore: {k}')
continue
print(f'Inflate: {k}, {state_dict[k].shape} => {state_dict_3d[k].shape}')
time_dim = state_dict_3d[k].shape[2]
state_dict[k] = inflate_weight(state_dict[k], time_dim)
model.load_state_dict(state_dict, strict=False)
def clip_load_state_dict(model, state_dict):
state_dict_3d = model.state_dict()
for k in state_dict.keys():
# print(k, k in state_dict_3d, k in state_dict)
if k in state_dict and k in state_dict_3d and state_dict[k].shape != state_dict_3d[k].shape:
if len(state_dict_3d[k].shape) <= 2:
print(f'Ignore: {k}')
continue
print(f'Inflate: {k}, {state_dict[k].shape} => {state_dict_3d[k].shape}')
time_dim = state_dict_3d[k].shape[2]
state_dict[k] = inflate_weight(state_dict[k], time_dim)
model.load_state_dict(state_dict, strict=False)
def vit_fusion_b32(
pretrained=True,
t_size=16, dw_reduction=1.5, backbone_drop_path_rate=0.,
return_list=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11],
n_layers=12, n_dim=768, n_head=12, mlp_factor=4.0, drop_path_rate=0.,
mlp_dropout=[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5],
cls_dropout=0.5, num_classes=400,
):
model = VisionTransformer(
input_resolution=224,
patch_size=32,
width=768,
layers=12,
heads=12,
output_dim=512,
t_size=t_size,
dw_reduction=dw_reduction,
backbone_drop_path_rate=backbone_drop_path_rate,
return_list=return_list,
n_layers=n_layers,
n_dim=n_dim,
n_head=n_head,
mlp_factor=mlp_factor,
drop_path_rate=drop_path_rate,
mlp_dropout=mlp_dropout,
cls_dropout=cls_dropout,
num_classes=num_classes,
)
if pretrained:
print('load pretrained weights')
state_dict = torch.load(_MODELS["ViT-B/32"], map_location='cpu')
load_state_dict(model, state_dict)
return model.eval()
def vit_fusion_b16(
pretrained=True,
t_size=16, dw_reduction=1.5, backbone_drop_path_rate=0.,
return_list=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11],
n_layers=12, n_dim=768, n_head=12, mlp_factor=4.0, drop_path_rate=0.,
mlp_dropout=[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5],
cls_dropout=0.5, num_classes=400,
):
model = VisionTransformer(
input_resolution=224,
patch_size=16,
width=768,
layers=12,
heads=12,
output_dim=512,
t_size=t_size,
dw_reduction=dw_reduction,
backbone_drop_path_rate=backbone_drop_path_rate,
return_list=return_list,
n_layers=n_layers,
n_dim=n_dim,
n_head=n_head,
mlp_factor=mlp_factor,
drop_path_rate=drop_path_rate,
mlp_dropout=mlp_dropout,
cls_dropout=cls_dropout,
num_classes=num_classes,
)
if pretrained:
print('load pretrained weights')
state_dict = torch.load(_MODELS["ViT-B/16"], map_location='cpu')
load_state_dict(model, state_dict)
return model.eval()
def vit_fusion_l14(
pretrained=True,
t_size=16, dw_reduction=1.5, backbone_drop_path_rate=0.,
return_list=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11],
n_layers=12, n_dim=768, n_head=12, mlp_factor=4.0, drop_path_rate=0.,
mlp_dropout=[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5],
cls_dropout=0.5, num_classes=400,
):
model = VisionTransformer(
input_resolution=224,
patch_size=14,
width=1024,
layers=24,
heads=16,
output_dim=768,
t_size=t_size,
dw_reduction=dw_reduction,
backbone_drop_path_rate=backbone_drop_path_rate,
return_list=return_list,
n_layers=n_layers,
n_dim=n_dim,
n_head=n_head,
mlp_factor=mlp_factor,
drop_path_rate=drop_path_rate,
mlp_dropout=mlp_dropout,
cls_dropout=cls_dropout,
num_classes=num_classes,
)
if pretrained:
print('load pretrained weights')
state_dict = torch.load(_MODELS["ViT-L/14"], map_location='cpu')
load_state_dict(model, state_dict)
return model.eval()
def vit_fusion_l14_336(
pretrained=True,
t_size=16, dw_reduction=1.5, backbone_drop_path_rate=0.,
return_list=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11],
n_layers=12, n_dim=768, n_head=12, mlp_factor=4.0, drop_path_rate=0.,
mlp_dropout=[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5],
cls_dropout=0.5, num_classes=400,
):
model = VisionTransformer(
input_resolution=336,
patch_size=14,
width=1024,
layers=24,
heads=16,
output_dim=768,
t_size=t_size,
dw_reduction=dw_reduction,
backbone_drop_path_rate=backbone_drop_path_rate,
return_list=return_list,
n_layers=n_layers,
n_dim=n_dim,
n_head=n_head,
mlp_factor=mlp_factor,
drop_path_rate=drop_path_rate,
mlp_dropout=mlp_dropout,
cls_dropout=cls_dropout,
num_classes=num_classes,
)
if pretrained:
print('load pretrained weights')
state_dict = torch.load(_MODELS["ViT-L/14_336"], map_location='cpu')
load_state_dict(model, state_dict)
return model.eval()
if __name__ == '__main__':
import time
from fvcore.nn import FlopCountAnalysis
from fvcore.nn import flop_count_table
import numpy as np
seed = 4217
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
num_frames = 16
model = vit_fusion_b16(
pretrained=False,
t_size=num_frames, backbone_drop_path_rate=0.2, drop_path_rate=0.4,
dw_reduction=1.5,
)
flops = FlopCountAnalysis(model, torch.rand(1, 3, num_frames, 224, 224))
s = time.time()
print(flop_count_table(flops, max_depth=1))
print(time.time()-s) | InternVideo-main | Downstream/Video-Text-Retrieval/modules/clip_kc_new/evl_utils/clip_vit_fusion.py |
import warnings
from typing import Tuple, Optional
import torch
from torch import Tensor
from torch.nn.modules.linear import Linear
from torch.nn.init import xavier_uniform_
from torch.nn.init import constant_
from torch.nn.init import xavier_normal_
from torch.nn.parameter import Parameter
from torch.nn.modules.module import Module
from .attention_module_bias import multi_head_attention_forward
class _LinearWithBias(Linear):
bias: Tensor
def __init__(self, in_features: int, out_features: int) -> None:
super().__init__(in_features, out_features, bias=True)
class MultiheadAttention(Module):
r"""Allows the model to jointly attend to information
from different representation subspaces.
See reference: Attention Is All You Need
.. math::
\text{MultiHead}(Q, K, V) = \text{Concat}(head_1,\dots,head_h)W^O
\text{where} head_i = \text{Attention}(QW_i^Q, KW_i^K, VW_i^V)
Args:
embed_dim: total dimension of the model.
num_heads: parallel attention heads.
dropout: a Dropout layer on attn_output_weights. Default: 0.0.
bias: add bias as module parameter. Default: True.
add_bias_kv: add bias to the key and value sequences at dim=0.
add_zero_attn: add a new batch of zeros to the key and
value sequences at dim=1.
kdim: total number of features in key. Default: None.
vdim: total number of features in value. Default: None.
Note: if kdim and vdim are None, they will be set to embed_dim such that
query, key, and value have the same number of features.
Examples::
>>> multihead_attn = nn.MultiheadAttention(embed_dim, num_heads)
>>> attn_output, attn_output_weights = multihead_attn(query, key, value)
"""
bias_k: Optional[torch.Tensor]
bias_v: Optional[torch.Tensor]
def __init__(self, embed_dim, num_heads, dropout=0., bias=True, add_bias_kv=False, add_zero_attn=False, kdim=None, vdim=None):
super(MultiheadAttention, self).__init__()
self.embed_dim = embed_dim
self.kdim = kdim if kdim is not None else embed_dim
self.vdim = vdim if vdim is not None else embed_dim
self._qkv_same_embed_dim = self.kdim == embed_dim and self.vdim == embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads"
if self._qkv_same_embed_dim is False:
self.q_proj_weight = Parameter(torch.Tensor(embed_dim, embed_dim))
self.k_proj_weight = Parameter(torch.Tensor(embed_dim, self.kdim))
self.v_proj_weight = Parameter(torch.Tensor(embed_dim, self.vdim))
self.register_parameter('in_proj_weight', None)
else:
self.in_proj_weight = Parameter(torch.empty(3 * embed_dim, embed_dim))
self.register_parameter('q_proj_weight', None)
self.register_parameter('k_proj_weight', None)
self.register_parameter('v_proj_weight', None)
if bias:
self.in_proj_bias = Parameter(torch.empty(3 * embed_dim))
else:
self.register_parameter('in_proj_bias', None)
self.out_proj = _LinearWithBias(embed_dim, embed_dim)
if add_bias_kv:
self.bias_k = Parameter(torch.empty(1, 1, embed_dim))
self.bias_v = Parameter(torch.empty(1, 1, embed_dim))
else:
self.bias_k = self.bias_v = None
self.add_zero_attn = add_zero_attn
self._reset_parameters()
def _reset_parameters(self):
if self._qkv_same_embed_dim:
xavier_uniform_(self.in_proj_weight)
else:
xavier_uniform_(self.q_proj_weight)
xavier_uniform_(self.k_proj_weight)
xavier_uniform_(self.v_proj_weight)
if self.in_proj_bias is not None:
constant_(self.in_proj_bias, 0.)
constant_(self.out_proj.bias, 0.)
if self.bias_k is not None:
xavier_normal_(self.bias_k)
if self.bias_v is not None:
xavier_normal_(self.bias_v)
def __setstate__(self, state):
# Support loading old MultiheadAttention checkpoints generated by v1.1.0
if '_qkv_same_embed_dim' not in state:
state['_qkv_same_embed_dim'] = True
super(MultiheadAttention, self).__setstate__(state)
def forward(self, query, key, value, key_padding_mask=None,
need_weights=True, attn_mask=None, return_qk=False, rpb=None):
# type: (Tensor, Tensor, Tensor, Optional[Tensor], bool, Optional[Tensor], bool, Optional[Tensor]) -> Tuple[Tensor, Optional[Tensor]]
r"""
Args:
query, key, value: map a query and a set of key-value pairs to an output.
See "Attention Is All You Need" for more details.
key_padding_mask: if provided, specified padding elements in the key will
be ignored by the attention. When given a binary mask and a value is True,
the corresponding value on the attention layer will be ignored. When given
a byte mask and a value is non-zero, the corresponding value on the attention
layer will be ignored
need_weights: output attn_output_weights.
attn_mask: 2D or 3D mask that prevents attention to certain positions. A 2D mask will be broadcasted for all
the batches while a 3D mask allows to specify a different mask for the entries of each batch.
Shape:
- Inputs:
- query: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is
the embedding dimension.
- key: :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- value: :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- key_padding_mask: :math:`(N, S)` where N is the batch size, S is the source sequence length.
If a ByteTensor is provided, the non-zero positions will be ignored while the position
with the zero positions will be unchanged. If a BoolTensor is provided, the positions with the
value of ``True`` will be ignored while the position with the value of ``False`` will be unchanged.
- attn_mask: 2D mask :math:`(L, S)` where L is the target sequence length, S is the source sequence length.
3D mask :math:`(N*num_heads, L, S)` where N is the batch size, L is the target sequence length,
S is the source sequence length. attn_mask ensure that position i is allowed to attend the unmasked
positions. If a ByteTensor is provided, the non-zero positions are not allowed to attend
while the zero positions will be unchanged. If a BoolTensor is provided, positions with ``True``
is not allowed to attend while ``False`` values will be unchanged. If a FloatTensor
is provided, it will be added to the attention weight.
- return_qk: whether return Q and K.
- Outputs:
- attn_output: :math:`(L, N, E)` where L is the target sequence length, N is the batch size,
E is the embedding dimension.
- attn_output_weights: :math:`(N, L, S)` where N is the batch size,
L is the target sequence length, S is the source sequence length.
"""
if return_qk:
if not self._qkv_same_embed_dim:
q, k, attn_output, attn_output_weights = multi_head_attention_forward(
query, key, value, self.embed_dim, self.num_heads,
self.in_proj_weight, self.in_proj_bias,
self.bias_k, self.bias_v, self.add_zero_attn,
self.dropout, self.out_proj.weight, self.out_proj.bias,
training=self.training,
key_padding_mask=key_padding_mask, need_weights=need_weights,
attn_mask=attn_mask, use_separate_proj_weight=True,
q_proj_weight=self.q_proj_weight, k_proj_weight=self.k_proj_weight,
v_proj_weight=self.v_proj_weight, return_qk=True, rpb=rpb)
else:
q, k, attn_output, attn_output_weights = multi_head_attention_forward(
query, key, value, self.embed_dim, self.num_heads,
self.in_proj_weight, self.in_proj_bias,
self.bias_k, self.bias_v, self.add_zero_attn,
self.dropout, self.out_proj.weight, self.out_proj.bias,
training=self.training,
key_padding_mask=key_padding_mask, need_weights=need_weights,
attn_mask=attn_mask, return_qk=True, rpb=rpb)
return q, k, attn_output, attn_output_weights
else:
if not self._qkv_same_embed_dim:
return multi_head_attention_forward(
query, key, value, self.embed_dim, self.num_heads,
self.in_proj_weight, self.in_proj_bias,
self.bias_k, self.bias_v, self.add_zero_attn,
self.dropout, self.out_proj.weight, self.out_proj.bias,
training=self.training,
key_padding_mask=key_padding_mask, need_weights=need_weights,
attn_mask=attn_mask, use_separate_proj_weight=True,
q_proj_weight=self.q_proj_weight, k_proj_weight=self.k_proj_weight,
v_proj_weight=self.v_proj_weight, rpb=rpb)
else:
return multi_head_attention_forward(
query, key, value, self.embed_dim, self.num_heads,
self.in_proj_weight, self.in_proj_bias,
self.bias_k, self.bias_v, self.add_zero_attn,
self.dropout, self.out_proj.weight, self.out_proj.bias,
training=self.training,
key_padding_mask=key_padding_mask, need_weights=need_weights,
attn_mask=attn_mask, rpb=rpb)
| InternVideo-main | Downstream/Video-Text-Retrieval/modules/clip_kc_new/evl_utils/attention_bias.py |
r"""Functional interface"""
import warnings
import math
import torch
from torch import _VF
from torch._jit_internal import Optional, Tuple
from torch.overrides import has_torch_function, handle_torch_function
from torch.nn.functional import _pad, linear, softmax, dropout
Tensor = torch.Tensor
pad = _pad
def multi_head_attention_forward(query: Tensor,
key: Tensor,
value: Tensor,
embed_dim_to_check: int,
num_heads: int,
in_proj_weight: Tensor,
in_proj_bias: Tensor,
bias_k: Optional[Tensor],
bias_v: Optional[Tensor],
add_zero_attn: bool,
dropout_p: float,
out_proj_weight: Tensor,
out_proj_bias: Tensor,
training: bool = True,
key_padding_mask: Optional[Tensor] = None,
need_weights: bool = True,
attn_mask: Optional[Tensor] = None,
use_separate_proj_weight: bool = False,
q_proj_weight: Optional[Tensor] = None,
k_proj_weight: Optional[Tensor] = None,
v_proj_weight: Optional[Tensor] = None,
static_k: Optional[Tensor] = None,
static_v: Optional[Tensor] = None,
return_qk: bool = False,
rpb: Tensor = None,
) -> Tuple[Tensor, Optional[Tensor]]:
r"""
Args:
query, key, value: map a query and a set of key-value pairs to an output.
See "Attention Is All You Need" for more details.
embed_dim_to_check: total dimension of the model.
num_heads: parallel attention heads.
in_proj_weight, in_proj_bias: input projection weight and bias.
bias_k, bias_v: bias of the key and value sequences to be added at dim=0.
add_zero_attn: add a new batch of zeros to the key and
value sequences at dim=1.
dropout_p: probability of an element to be zeroed.
out_proj_weight, out_proj_bias: the output projection weight and bias.
training: apply dropout if is ``True``.
key_padding_mask: if provided, specified padding elements in the key will
be ignored by the attention. This is an binary mask. When the value is True,
the corresponding value on the attention layer will be filled with -inf.
need_weights: output attn_output_weights.
attn_mask: 2D or 3D mask that prevents attention to certain positions. A 2D mask will be broadcasted for all
the batches while a 3D mask allows to specify a different mask for the entries of each batch.
use_separate_proj_weight: the function accept the proj. weights for query, key,
and value in different forms. If false, in_proj_weight will be used, which is
a combination of q_proj_weight, k_proj_weight, v_proj_weight.
q_proj_weight, k_proj_weight, v_proj_weight, in_proj_bias: input projection weight and bias.
static_k, static_v: static key and value used for attention operators.
Shape:
Inputs:
- query: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is
the embedding dimension.
- key: :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- value: :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- key_padding_mask: :math:`(N, S)` where N is the batch size, S is the source sequence length.
If a ByteTensor is provided, the non-zero positions will be ignored while the zero positions
will be unchanged. If a BoolTensor is provided, the positions with the
value of ``True`` will be ignored while the position with the value of ``False`` will be unchanged.
- attn_mask: 2D mask :math:`(L, S)` where L is the target sequence length, S is the source sequence length.
3D mask :math:`(N*num_heads, L, S)` where N is the batch size, L is the target sequence length,
S is the source sequence length. attn_mask ensures that position i is allowed to attend the unmasked
positions. If a ByteTensor is provided, the non-zero positions are not allowed to attend
while the zero positions will be unchanged. If a BoolTensor is provided, positions with ``True``
are not allowed to attend while ``False`` values will be unchanged. If a FloatTensor
is provided, it will be added to the attention weight.
- static_k: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length,
N is the batch size, E is the embedding dimension. E/num_heads is the head dimension.
- static_v: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length,
N is the batch size, E is the embedding dimension. E/num_heads is the head dimension.
- return_qk: whether return Q and K.
- rpb: relative postion bias
Outputs:
- attn_output: :math:`(L, N, E)` where L is the target sequence length, N is the batch size,
E is the embedding dimension.
- attn_output_weights: :math:`(N, L, S)` where N is the batch size,
L is the target sequence length, S is the source sequence length.
"""
if not torch.jit.is_scripting():
tens_ops = (query, key, value, in_proj_weight, in_proj_bias, bias_k, bias_v,
out_proj_weight, out_proj_bias)
if any([type(t) is not Tensor for t in tens_ops]) and has_torch_function(tens_ops):
return handle_torch_function(
multi_head_attention_forward, tens_ops, query, key, value,
embed_dim_to_check, num_heads, in_proj_weight, in_proj_bias,
bias_k, bias_v, add_zero_attn, dropout_p, out_proj_weight,
out_proj_bias, training=training, key_padding_mask=key_padding_mask,
need_weights=need_weights, attn_mask=attn_mask,
use_separate_proj_weight=use_separate_proj_weight,
q_proj_weight=q_proj_weight, k_proj_weight=k_proj_weight,
v_proj_weight=v_proj_weight, static_k=static_k, static_v=static_v)
tgt_len, bsz, embed_dim = query.size()
assert embed_dim == embed_dim_to_check
# allow MHA to have different sizes for the feature dimension
assert key.size(0) == value.size(0) and key.size(1) == value.size(1)
head_dim = embed_dim // num_heads
assert head_dim * num_heads == embed_dim, "embed_dim must be divisible by num_heads"
scaling = float(head_dim) ** -0.5
if not use_separate_proj_weight:
if torch.equal(query, key) and torch.equal(key, value):
# self-attention
q, k, v = linear(query, in_proj_weight, in_proj_bias).chunk(3, dim=-1)
elif torch.equal(key, value):
# encoder-decoder attention
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = 0
_end = embed_dim
_w = in_proj_weight[_start:_end, :]
if _b is not None:
_b = _b[_start:_end]
q = linear(query, _w, _b)
if key is None:
assert value is None
k = None
v = None
else:
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = embed_dim
_end = None
_w = in_proj_weight[_start:, :]
if _b is not None:
_b = _b[_start:]
k, v = linear(key, _w, _b).chunk(2, dim=-1)
else:
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = 0
_end = embed_dim
_w = in_proj_weight[_start:_end, :]
if _b is not None:
_b = _b[_start:_end]
q = linear(query, _w, _b)
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = embed_dim
_end = embed_dim * 2
_w = in_proj_weight[_start:_end, :]
if _b is not None:
_b = _b[_start:_end]
k = linear(key, _w, _b)
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = embed_dim * 2
_end = None
_w = in_proj_weight[_start:, :]
if _b is not None:
_b = _b[_start:]
v = linear(value, _w, _b)
else:
q_proj_weight_non_opt = torch.jit._unwrap_optional(q_proj_weight)
len1, len2 = q_proj_weight_non_opt.size()
assert len1 == embed_dim and len2 == query.size(-1)
k_proj_weight_non_opt = torch.jit._unwrap_optional(k_proj_weight)
len1, len2 = k_proj_weight_non_opt.size()
assert len1 == embed_dim and len2 == key.size(-1)
v_proj_weight_non_opt = torch.jit._unwrap_optional(v_proj_weight)
len1, len2 = v_proj_weight_non_opt.size()
assert len1 == embed_dim and len2 == value.size(-1)
if in_proj_bias is not None:
q = linear(query, q_proj_weight_non_opt, in_proj_bias[0:embed_dim])
k = linear(key, k_proj_weight_non_opt, in_proj_bias[embed_dim:(embed_dim * 2)])
v = linear(value, v_proj_weight_non_opt, in_proj_bias[(embed_dim * 2):])
else:
q = linear(query, q_proj_weight_non_opt, in_proj_bias)
k = linear(key, k_proj_weight_non_opt, in_proj_bias)
v = linear(value, v_proj_weight_non_opt, in_proj_bias)
q = q * scaling
if attn_mask is not None:
assert attn_mask.dtype == torch.float32 or attn_mask.dtype == torch.float64 or \
attn_mask.dtype == torch.float16 or attn_mask.dtype == torch.uint8 or attn_mask.dtype == torch.bool, \
'Only float, byte, and bool types are supported for attn_mask, not {}'.format(attn_mask.dtype)
if attn_mask.dtype == torch.uint8:
warnings.warn("Byte tensor for attn_mask in nn.MultiheadAttention is deprecated. Use bool tensor instead.")
attn_mask = attn_mask.to(torch.bool)
if attn_mask.dim() == 2:
attn_mask = attn_mask.unsqueeze(0)
if list(attn_mask.size()) != [1, query.size(0), key.size(0)]:
raise RuntimeError('The size of the 2D attn_mask is not correct.')
elif attn_mask.dim() == 3:
if list(attn_mask.size()) != [bsz * num_heads, query.size(0), key.size(0)]:
raise RuntimeError('The size of the 3D attn_mask is not correct.')
else:
raise RuntimeError("attn_mask's dimension {} is not supported".format(attn_mask.dim()))
# attn_mask's dim is 3 now.
# convert ByteTensor key_padding_mask to bool
if key_padding_mask is not None and key_padding_mask.dtype == torch.uint8:
warnings.warn("Byte tensor for key_padding_mask in nn.MultiheadAttention is deprecated. Use bool tensor instead.")
key_padding_mask = key_padding_mask.to(torch.bool)
if bias_k is not None and bias_v is not None:
if static_k is None and static_v is None:
k = torch.cat([k, bias_k.repeat(1, bsz, 1)])
v = torch.cat([v, bias_v.repeat(1, bsz, 1)])
if attn_mask is not None:
attn_mask = pad(attn_mask, (0, 1))
if key_padding_mask is not None:
key_padding_mask = pad(key_padding_mask, (0, 1))
else:
assert static_k is None, "bias cannot be added to static key."
assert static_v is None, "bias cannot be added to static value."
else:
assert bias_k is None
assert bias_v is None
# L, N, E
if return_qk:
return_q = q.clone() / scaling
return_k = k.clone()
q = q.contiguous().view(tgt_len, bsz * num_heads, head_dim).transpose(0, 1)
if k is not None:
k = k.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1)
if v is not None:
v = v.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1)
if static_k is not None:
assert static_k.size(0) == bsz * num_heads
assert static_k.size(2) == head_dim
k = static_k
if static_v is not None:
assert static_v.size(0) == bsz * num_heads
assert static_v.size(2) == head_dim
v = static_v
src_len = k.size(1)
if key_padding_mask is not None:
assert key_padding_mask.size(0) == bsz
assert key_padding_mask.size(1) == src_len
if add_zero_attn:
src_len += 1
k = torch.cat([k, torch.zeros((k.size(0), 1) + k.size()[2:], dtype=k.dtype, device=k.device)], dim=1)
v = torch.cat([v, torch.zeros((v.size(0), 1) + v.size()[2:], dtype=v.dtype, device=v.device)], dim=1)
if attn_mask is not None:
attn_mask = pad(attn_mask, (0, 1))
if key_padding_mask is not None:
key_padding_mask = pad(key_padding_mask, (0, 1))
attn_output_weights = torch.bmm(q, k.transpose(1, 2))
assert list(attn_output_weights.size()) == [bsz * num_heads, tgt_len, src_len]
if attn_mask is not None:
if attn_mask.dtype == torch.bool:
attn_output_weights.masked_fill_(attn_mask, float('-inf'))
else:
attn_output_weights += attn_mask
if key_padding_mask is not None:
attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len)
attn_output_weights = attn_output_weights.masked_fill(
key_padding_mask.unsqueeze(1).unsqueeze(2),
float('-inf'),
)
attn_output_weights = attn_output_weights.view(bsz * num_heads, tgt_len, src_len)
if rpb is not None:
attn_output_weights = attn_output_weights + rpb
attn_output_weights = softmax(attn_output_weights, dim=-1)
attn_output_weights = dropout(attn_output_weights, p=dropout_p, training=training)
attn_output = torch.bmm(attn_output_weights, v)
assert list(attn_output.size()) == [bsz * num_heads, tgt_len, head_dim]
attn_output = attn_output.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim)
attn_output = linear(attn_output, out_proj_weight, out_proj_bias)
if return_qk:
if need_weights:
# average attention weights over heads
attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len)
return return_q, return_k, attn_output, attn_output_weights.sum(dim=1) / num_heads
else:
return return_q, return_k, attn_output, None
else:
if need_weights:
# average attention weights over heads
attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len)
return attn_output, attn_output_weights.sum(dim=1) / num_heads
else:
return attn_output, None
| InternVideo-main | Downstream/Video-Text-Retrieval/modules/clip_kc_new/evl_utils/attention_module_bias.py |
import os
from collections import OrderedDict
from timm.models.layers import DropPath
import torch
from torch import nn
from einops import rearrange
import torch.utils.checkpoint as checkpoint
from .attention_bias import MultiheadAttention
MODEL_PATH = '/mnt/lustre/share_data/likunchang.vendor/model'
_MODELS = {
"ViT-B/32": os.path.join(MODEL_PATH, "vit_b32.pth"),
"ViT-B/16": os.path.join(MODEL_PATH, "vit_b16.pth"),
"ViT-L/14": os.path.join(MODEL_PATH, "vit_l14.pth"),
"ViT-L/14_336": os.path.join(MODEL_PATH, "vit_l14_336.pth"),
}
class LayerNorm(nn.LayerNorm):
"""Subclass torch's LayerNorm to handle fp16."""
def forward(self, x):
orig_type = x.dtype
ret = super().forward(x.type(torch.float32))
return ret.type(orig_type)
class QuickGELU(nn.Module):
def forward(self, x):
return x * torch.sigmoid(1.702 * x)
class ResidualAttentionBlock(nn.Module):
def __init__(self, d_model, n_head, attn_mask=None, drop_path=0.0, t_size=8, spatial_size=7):
super().__init__()
self.n_head = n_head
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
print(f'Drop path rate: {drop_path}')
print(f'Add RPB: t_size {t_size}, spatial_size {spatial_size}')
self.pos_embed = nn.Conv3d(d_model, d_model, kernel_size=3, stride=1, padding=1, groups=d_model)
# temporal
self.attn_t = MultiheadAttention(d_model, n_head)
self.ln_t = LayerNorm(d_model)
self.rpb_t = nn.Parameter(torch.zeros([t_size * 2 - 1, n_head]))
idx_tensor_t = torch.zeros([t_size, t_size], dtype=torch.long)
for q in range(t_size):
for k in range(t_size):
offs = q - k + t_size - 1
idx_tensor_t[q, k] = offs
self.idx_tensor_t = idx_tensor_t
# spatial
self.attn = MultiheadAttention(d_model, n_head)
self.rpb = nn.Parameter(torch.zeros([(spatial_size * 2 - 1) ** 2, n_head]))
self.ln_1 = LayerNorm(d_model)
self.mlp = nn.Sequential(OrderedDict([
("c_fc", nn.Linear(d_model, d_model * 4)),
("gelu", QuickGELU()),
("c_proj", nn.Linear(d_model * 4, d_model))
]))
self.ln_2 = LayerNorm(d_model)
self.attn_mask = attn_mask
idx_tensor = torch.zeros([spatial_size ** 2, spatial_size ** 2], dtype=torch.long)
for q in range(spatial_size ** 2):
qi, qj = q // spatial_size, q % spatial_size
for k in range(spatial_size ** 2):
ki, kj = k // spatial_size, k % spatial_size
i_offs = qi - ki + spatial_size - 1
j_offs = qj - kj + spatial_size - 1
idx_tensor[q, k] = i_offs * (spatial_size * 2 - 1) + j_offs
self.idx_tensor = idx_tensor
# init zero
print('Init zero for (2+1)d')
nn.init.constant_(self.pos_embed.weight, 0)
nn.init.constant_(self.pos_embed.bias, 0)
nn.init.constant_(self.attn_t.in_proj_weight, 0)
nn.init.constant_(self.attn_t.in_proj_bias, 0)
nn.init.constant_(self.attn_t.out_proj.weight, 1)
nn.init.constant_(self.attn_t.out_proj.bias, 0)
def attention(self, x, rpb=None):
self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None
return self.attn(x, x, x, need_weights=False, attn_mask=self.attn_mask, rpb=rpb)[0]
def attention_temporal(self, x, rpb=None):
self.attn_mask = None
return self.attn_t(x, x, x, need_weights=False, attn_mask=self.attn_mask, rpb=rpb)[0]
def forward(self, x, T=8, mode='video'):
# temporal
# x: 1+HWT, N, C
# pos_emb
tmp_x = x[1:, :, :]
LT, N, C = tmp_x.shape
L = LT // T
H = W = int(L ** 0.5)
tmp_x = tmp_x.view(H, W, T, N, C).permute(3, 4, 2, 0, 1)
tmp_x = tmp_x + self.pos_embed(tmp_x)
tmp_x = tmp_x.view(N, C, T, L).permute(3, 2, 0, 1).view(LT, N, C)
x[1:, :, :] = tmp_x
xt = x[1:, :, :]
_, N, C = xt.shape
xt = rearrange(xt, '(l t) n c -> t (n l) c', n=N, t=T)
# no rpb_t for image
if mode == 'image':
rpb_t = None
else:
# rpb_t: T, T, H => B*H, T, T
self.idx_tensor_t = self.idx_tensor_t.to(xt.device)
rpb_t = self.rpb_t[self.idx_tensor_t].permute(2, 0, 1).repeat(N*L, 1, 1)
res_temporal = self.attention_temporal(self.ln_t(xt), rpb=rpb_t)
res_temporal = rearrange(res_temporal, 't (n l) c -> (l t) n c', n=N, t=T)
xt = x[1:, :, :] + self.drop_path(res_temporal)
# spatial
init_cls_token = x[:1, :, :]
cls_token = init_cls_token.repeat(1, T, 1).view(1, T*N, C)
xs = rearrange(xt, '(l t) n c -> l (t n) c', n=N, t=T)
xs = torch.cat((cls_token, xs), 0)
# rpb: L, L, H => B*H, L+1, L+1
rpb = torch.zeros((self.n_head, L+1, L+1), device=xs.device, dtype=xs.dtype)
self.idx_tensor = self.idx_tensor.to(xs.device)
rpb[:, 1:, 1:] = self.rpb[self.idx_tensor].permute(2, 0, 1)
rpb = rpb.repeat(T*N, 1, 1)
res_spatial = self.attention(self.ln_1(xs), rpb=rpb)
# Taking care of CLS token
cls_token = res_spatial[0, :, :]
cls_token = rearrange(cls_token, '(t n) c -> t n c', n=N)
cls_token = torch.mean(cls_token, 0, True) # averaging for every frame
res_spatial = res_spatial[1:, :, :]
res_spatial = rearrange(res_spatial, 'l (t n) c -> (l t) n c', n=N)
x = x + self.drop_path(torch.cat((cls_token, res_spatial), 0))
x = x + self.drop_path(self.mlp(self.ln_2(x)))
return x
class Transformer(nn.Module):
def __init__(self, width, layers, heads, attn_mask=None, drop_path_rate=0., t_size=8, spatial_size=7):
super().__init__()
self.width = width
self.layers = layers
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, layers)]
self.resblocks = nn.ModuleList([
ResidualAttentionBlock(width, heads, attn_mask,
drop_path=dpr[i], t_size=t_size, spatial_size=spatial_size) for i in range(layers)
])
def forward(self, x, return_num=4, T=8, mode='video'):
features = []
for i, resblock in enumerate(self.resblocks):
x = resblock(x, T=T, mode=mode)
if i >= self.layers - return_num:
# LT + 1, N, C
LT, N, C = x.shape
L = (LT - 1) // T
cls_x, tmp_x = x[:1], x[1:]
cls_x = cls_x.unsqueeze(2).repeat(1, 1, T, 1)
tmp_x = tmp_x.reshape(L, T, N, C).permute(0, 2, 1, 3) # L, N, T, C
tmp_x = torch.cat([cls_x, tmp_x], dim=0 )# L + 1, N, T, C
features.append(tmp_x)
return features
class VisionTransformer(nn.Module):
def __init__(
self, input_resolution, patch_size, width, layers, heads, output_dim,
num_frames=8, drop_path_rate=0., t_size=8, spatial_size=7
):
super().__init__()
self.input_resolution = input_resolution
self.output_dim = output_dim
self.conv1 = nn.Conv2d(in_channels=3, out_channels=width, kernel_size=patch_size, stride=patch_size, bias=False)
scale = width ** -0.5
self.class_embedding = nn.Parameter(scale * torch.randn(width))
self.positional_embedding = nn.Parameter(scale * torch.randn((input_resolution // patch_size) ** 2 + 1, width))
self.temporal_positional_embedding = nn.Parameter(torch.zeros(1, num_frames, width))
self.ln_pre = LayerNorm(width)
self.transformer = Transformer(width, layers, heads, drop_path_rate=drop_path_rate, t_size=t_size, spatial_size=spatial_size)
def forward(self, x, return_num=4, mode='video'):
if len(x.size()) == 5:
N, C, T, H, W = x.shape
x = x.permute(0, 2, 1, 3, 4).reshape(N * T, C, H, W)
x = self.conv1(x) # shape = [*, width, grid, grid]
x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2]
x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width]
x = torch.cat([self.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device), x], dim=1) # shape = [*, grid ** 2 + 1, width]
x = x + self.positional_embedding.to(x.dtype)
cls_tokens = x[:N, :1, :]
x = x[:, 1:]
# add temporal position embedding for video
if mode == 'video':
x = rearrange(x, '(b t) n c -> (b n) t c', b=N, t=T)
x = x + self.temporal_positional_embedding
x = rearrange(x, '(b n) t c -> b (n t) c', b=N, t=T)
else:
pass
x = torch.cat((cls_tokens, x), dim=1)
x = self.ln_pre(x)
x = x.permute(1, 0, 2) # NLD -> LND
features = self.transformer(
x, return_num=return_num, T=T, mode=mode
)
return features
def vit_2plus1d_dw_bias_b32(pretrained=True, num_frames=8, drop_path_rate=0., t_size=8):
model = VisionTransformer(
input_resolution=224,
patch_size=32,
width=768,
layers=12,
heads=12,
output_dim=512,
num_frames=num_frames,
drop_path_rate=drop_path_rate,
t_size=t_size,
)
if pretrained:
print('load pretrained weights')
state_dict = torch.load(_MODELS["ViT-B/32"], map_location='cpu')
model.load_state_dict(state_dict, strict=False)
return model.eval()
def vit_2plus1d_dw_bias_b16(pretrained=True, num_frames=8, drop_path_rate=0., t_size=8):
model = VisionTransformer(
input_resolution=224,
patch_size=16,
width=768,
layers=12,
heads=12,
output_dim=512,
num_frames=num_frames,
drop_path_rate=drop_path_rate,
t_size=t_size,
spatial_size=14,
)
if pretrained:
print('load pretrained weights')
state_dict = torch.load(_MODELS["ViT-B/16"], map_location='cpu')
model.load_state_dict(state_dict, strict=False)
return model.eval()
def vit_2plus1d_dw_bias_l14(pretrained=True, num_frames=8, drop_path_rate=0., t_size=8):
model = VisionTransformer(
input_resolution=224,
patch_size=14,
width=1024,
layers=24,
heads=16,
output_dim=768,
num_frames=num_frames,
drop_path_rate=drop_path_rate,
t_size=t_size,
)
if pretrained:
print('load pretrained weights')
state_dict = torch.load(_MODELS["ViT-L/14"], map_location='cpu')
model.load_state_dict(state_dict, strict=False)
return model.eval()
def vit_2plus1d_dw_bias_l14_336(pretrained=True, num_frames=8, drop_path_rate=0., t_size=8):
model = VisionTransformer(
input_resolution=336,
patch_size=14,
width=1024,
layers=24,
heads=16,
output_dim=768,
num_frames=num_frames,
drop_path_rate=drop_path_rate,
t_size=t_size,
)
if pretrained:
print('load pretrained weights')
state_dict = torch.load(_MODELS["ViT-L/14_336"], map_location='cpu')
model.load_state_dict(state_dict, strict=False)
return model.eval()
if __name__ == '__main__':
import time
from fvcore.nn import FlopCountAnalysis
from fvcore.nn import flop_count_table
model = vit_2plus1d_dw_bias_b32(pretrained=True)
flops = FlopCountAnalysis(model, torch.rand(4, 3, 8, 224, 224))
s = time.time()
print(flop_count_table(flops, max_depth=1))
print(time.time()-s) | InternVideo-main | Downstream/Video-Text-Retrieval/modules/clip_kc_new/evl_utils/clip_vit_2plus1d_dw_bias.py |
#!/usr/bin/env python
from collections import OrderedDict
from timm.models.layers import DropPath
import torch
import torch.nn as nn
import torch.nn.functional as F
class QuickGELU(nn.Module):
def forward(self, x: torch.Tensor):
return x * torch.sigmoid(1.702 * x)
class ResidualDecoderBlock(nn.Module):
def __init__(self, d_model: int, n_head: int, attn_mask: torch.Tensor = None,
mlp_factor: float = 4.0, dropout: float = 0.0, drop_path: float = 0.0):
super().__init__()
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
print(f'Drop path rate: {drop_path}')
self.attn = nn.MultiheadAttention(d_model, n_head)
self.ln_1 = nn.LayerNorm(d_model)
d_mlp = round(mlp_factor * d_model)
self.mlp = nn.Sequential(OrderedDict([
("c_fc", nn.Linear(d_model, d_mlp)),
("gelu", QuickGELU()),
("dropout", nn.Dropout(dropout)),
("c_proj", nn.Linear(d_mlp, d_model))
]))
self.ln_2 = nn.LayerNorm(d_model)
self.ln_3 = nn.LayerNorm(d_model)
self.attn_mask = attn_mask
nn.init.xavier_uniform_(self.attn.in_proj_weight)
nn.init.xavier_uniform_(self.attn.out_proj.weight)
nn.init.xavier_uniform_(self.mlp[0].weight)
nn.init.xavier_uniform_(self.mlp[-1].weight)
def attention(self, x: torch.Tensor, y: torch.Tensor):
#self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None
# return self.attn(x, y, y, need_weights=False, attn_mask=self.attn_mask)[0]
assert self.attn_mask is None # not implemented
# manual forward to add position information
d_model = self.ln_1.weight.size(0)
q = (x @ self.attn.in_proj_weight[:d_model].T) + self.attn.in_proj_bias[:d_model]
k = (y @ self.attn.in_proj_weight[d_model:-d_model].T) + self.attn.in_proj_bias[d_model:-d_model]
v = (y @ self.attn.in_proj_weight[-d_model:].T) + self.attn.in_proj_bias[-d_model:]
Tx, Ty, N = q.size(0), k.size(0), q.size(1)
q = q.view(Tx, N, self.attn.num_heads, self.attn.head_dim).permute(1, 2, 0, 3)
k = k.view(Ty, N, self.attn.num_heads, self.attn.head_dim).permute(1, 2, 0, 3)
v = v.view(Ty, N, self.attn.num_heads, self.attn.head_dim).permute(1, 2, 0, 3)
aff = (q @ k.transpose(-2, -1) / (self.attn.head_dim ** 0.5))
aff = aff.softmax(dim=-1)
out = aff @ v
out = out.permute(2, 0, 1, 3).flatten(2)
out = self.attn.out_proj(out)
return out
def forward(self, x: torch.Tensor, y: torch.Tensor):
x = x + self.drop_path(self.attention(self.ln_1(x), self.ln_3(y)))
x = x + self.drop_path(self.mlp(self.ln_2(x)))
return x
class TransformerDecoder(nn.Module):
def __init__(self, n_layers=4,
n_dim=768, n_head=12, mlp_factor=4.0, drop_path_rate=0.,
mlp_dropout=[0.5, 0.5, 0.5, 0.5], cls_dropout=0.5, t_size=8,
use_t_conv=True, use_t_pos_embed=True, num_classes=400,
add_residual=False,
):
super().__init__()
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, n_layers)]
self.dec = nn.ModuleList([
ResidualDecoderBlock(n_dim, n_head, mlp_factor=mlp_factor, dropout=mlp_dropout[i], drop_path=dpr[i])
for i in range(n_layers)
])
self.proj = nn.Sequential(
nn.LayerNorm(n_dim),
nn.Dropout(cls_dropout),
nn.Linear(n_dim, num_classes),
)
self.temporal_cls_token = nn.Parameter(torch.zeros(n_dim))
self.add_residual = add_residual
print(f'Add residual {add_residual}')
if use_t_conv:
self.tconv = nn.ModuleList([
nn.Conv1d(n_dim, n_dim, kernel_size=3, stride=1, padding=1, bias=True, groups=n_dim)
for i in range(n_layers)
])
for m in self.tconv:
nn.init.constant_(m.bias, 0.)
m.weight.data[...] = torch.Tensor([0, 1, 0])
else:
self.tconv = None
if use_t_pos_embed:
self.pemb_t = nn.Parameter(torch.zeros([n_layers, t_size, n_dim]))
else:
self.pemb_t = None
self.t_size = t_size
def forward(self, clip_feats_all):
# clip_feats_all = clip_feats_all[-len(self.dec):]
# only return n_layers features, save memory
clip_feats = [x for x in clip_feats_all]
L, N, T, C = clip_feats[0].size()
x = self.temporal_cls_token.view(1, 1, -1).repeat(1, N, 1)
for i in range(len(clip_feats)):
if self.tconv is not None:
L, N, T, C = clip_feats[i].shape
clip_feats[i] = clip_feats[i].permute(0, 1, 3, 2).flatten(0, 1) # L * N, C, T
clip_feats[i] = self.tconv[i](clip_feats[i]).permute(0, 2, 1).contiguous().view(L, N, T, C)
if self.pemb_t is not None:
clip_feats[i] = clip_feats[i] + self.pemb_t[i]
clip_feats[i] = clip_feats[i].permute(2, 0, 1, 3).flatten(0, 1) # T * L, N, C
for i in range(len(self.dec)):
x = self.dec[i](x, clip_feats[i])
if self.add_residual:
residual = clip_feats_all[-1][0].mean(1)
return self.proj(x[0, :, :] + residual)
else:
return self.proj(x[0, :, :])
if __name__ == '__main__':
model = TransformerDecoder()
# construct a fake input to demonstrate input tensor shape
L, N, T, C = 197, 1, 8, 768 # num_image_tokens, video_batch_size, t_size, feature_dim
# we use intermediate feature maps from multiple blocks, so input features should be a list
input_features = []
for i in range(4): # vit-b has 12 blocks
# every item in input_features contains features maps from a single block
# every item is a tuple containing 3 feature maps:
# (1) block output features (i.e. after mlp) with shape L, N, T, C
# (2) projected query features with shape L, N, T, C
# (3) projected key features with shape L, N, T, C
input_features.append(
torch.zeros([L, N, T, C]))
# some small optimizations:
# (1) We only decode from the last $n$ blocks so it's good as long as the last $n$ items of input_features is valid and all previous items can be filled with None to save memory. By default $n=4$.
# (2) projected query/key features are optional. If you are using an uncompatible image backbone without query/key (e.g. CNN), you can fill the position with None (i.e. the tuple should be (Tensor, None, None) and set use_image_attnmap=False when constructing the model.
print(model(input_features).shape) # should be N, 400
| InternVideo-main | Downstream/Video-Text-Retrieval/modules/clip_kc_new/evl_utils/evl_module.py |
import warnings
from sklearn import ensemble
# This ignore the scheduler warning, see https://github.com/Lightning-AI/lightning/issues/5558
warnings.filterwarnings("ignore", "Detected call of", UserWarning)
import os
import copy
import pytorch_lightning as pl
from CoTrain.config import ex
from CoTrain.modules import CoTrainTransformerSS, CLIP
from CoTrain.datamodules.video.multitask_datamodule import MTDataModule
import datetime
from pytorch_lightning.utilities.cloud_io import get_filesystem
from pytorch_lightning.utilities.cloud_io import load as pl_load
import torch
import numpy as np
from pytorch_lightning.strategies import DDPStrategy
torch.manual_seed(0)
class CustomDDPStrategy(DDPStrategy):
def configure_ddp(self):
super().configure_ddp()
self._model._set_static_graph() # THIS IS THE MAGIC LINE
def deterministic_index_select(x, dim, indices):
"""
input_tensor: Tensor
dim: dim
indices: 1D tensor
"""
tensor_transpose = torch.transpose(x, 0, dim)
return tensor_transpose[indices].transpose(dim, 0)
@ex.automain
def main(_config):
_config = copy.deepcopy(_config)
pl.seed_everything(_config["seed"])
dm = MTDataModule(_config, dist=True)
if not _config["clip"]:
model = CoTrainTransformerSS(_config)
else:
model = CLIP(_config)
# assert False, [n for n, p in model.named_parameters() if p.requires_grad][:10]
exp_name = f'{_config["exp_name"]}'
os.makedirs(_config["log_dir"], exist_ok=True)
checkpoint_callback = pl.callbacks.ModelCheckpoint(
save_top_k=_config["save_top_k"],
# every_n_epochs=_config["save_checkpoints_interval"],
every_n_train_steps=_config["val_check_interval"],
verbose=True,
monitor="contrastive/train/loss",
mode="min",
save_last=_config["save_last"],
dirpath=_config["model_dir"],
)
now = datetime.datetime.now()
if not isinstance(_config["load_path"], str):
instance_name = f'{exp_name}_seed{_config["seed"]}_from_multiple'
else:
instance_name = f'{exp_name}_seed{_config["seed"]}_from_{"_".join(_config["load_path"].split("/")[-2:])[:-5]}'
logger = pl.loggers.TensorBoardLogger(
_config["log_dir"],
name=instance_name,
version="version_0",
)
lr_callback = pl.callbacks.LearningRateMonitor(logging_interval="step")
summary_callback = pl.callbacks.ModelSummary(max_depth=1)
callbacks = [checkpoint_callback, lr_callback, summary_callback]
num_gpus = (
_config["num_gpus"]
if isinstance(_config["num_gpus"], int)
else len(_config["num_gpus"])
)
# print all config at the begin
print('='*70+'Config: '+'='*70)
print(instance_name)
print(_config)
print('='*150)
# notice _config["batch_size"] should be max length for all machines, eg. at least 1024
grad_steps = _config["batch_size"] // (
_config["per_gpu_batchsize"] * num_gpus * _config["num_nodes"]
)
assert grad_steps > 0, (_config["batch_size"], _config["per_gpu_batchsize"])
if not _config["clip_use_checkpoint"]:
# assert not _config["clip_use_checkpoint"], "Do not use gradient accumulation and checkpoint at the same time"
if _config["loss_names"]["openend_vqa"] >= 1:
find_unused_paramters = True
else:
find_unused_paramters = False
strategy = DDPStrategy(find_unused_parameters=find_unused_paramters)
else:
assert grad_steps == 1
strategy = CustomDDPStrategy()
max_steps = _config["max_steps"] if _config["max_steps"] is not None else None
resume_ckpt = _config["resume_from"]
if max_steps == None:
max_steps = -1
trainer = pl.Trainer(
devices=_config["num_gpus"],
num_nodes=_config["num_nodes"],
precision=_config["precision"],
accelerator="gpu",
benchmark=True,
deterministic=False,
max_epochs=_config["max_epoch"] if max_steps == -1 else 100,
max_steps=max_steps,
callbacks=callbacks,
logger=logger,
# prepare_data_per_node=False,
replace_sampler_ddp=False,
accumulate_grad_batches=grad_steps,
log_every_n_steps=10,
fast_dev_run=_config["fast_dev_run"],
val_check_interval=_config["val_check_interval"],
strategy=strategy,
# show_progress_bar=False,
# progress_bar_refresh_rate=0
)
fs = get_filesystem(resume_ckpt)
if fs.exists(resume_ckpt):
with fs.open(resume_ckpt, "rb") as f:
ckpt = torch.load(f, map_location='cpu')
# This hacks pl wrong steps for logger
global_step_offset = ckpt["global_step"]
trainer.fit_loop.epoch_loop._batches_that_stepped = global_step_offset
del ckpt
pass
else:
resume_ckpt = None
print("accumulate grad batches is: ", trainer.accumulate_grad_batches)
if not _config["test_only"]:
with torch.autograd.set_detect_anomaly(True):
trainer.fit(model, datamodule=dm, ckpt_path=resume_ckpt)
else:
trainer.test(model, datamodule=dm, ckpt_path=resume_ckpt)
| InternVideo-main | Downstream/multi-modalities-downstream/run.py |
from sacred import Experiment
ex = Experiment("CoTrain", save_git_info=False)
def _loss_names(d):
ret = {
# pretrain
"vtm": 0,
"mlm": 0,
"mpp": 0,
"vtc": 0,
"vcop": 0,
"dino": 0,
# downstream
"vqa": 0,
"openend_vqa": 0,
"mc_vqa": 0,
"nlvr2": 0,
"irtr": 0,
"multiple_choice": 0,
'vcr_q2a': 0,
'zs_classify': 0,
'contrastive': 0,
'cap': 0,
'mim': 0,
}
ret.update(d)
return ret
@ex.config
def config():
exp_name = "CoTrain"
seed = 0
video_datasets = ["wevid", "howto100m", "yttemporal"]
image_datasets = ["cc3m", "cc12m"]
val_datasets = []
loss_names = _loss_names({"vtm": 1, "mlm": 1})
val_loss_names = _loss_names({})
batch_size = 4096 # 128 x 32
# this is a desired batch size; pl trainer will accumulate gradients when per step batch is smaller.
linear_evaluation = False
draw_false_image = 1
# video setting
train_transform_keys = ["pixelbert"]
val_transform_keys = ["pixelbert"]
image_size = 224 # 384/224
patch_size = 16 # 16/32
max_image_len = -1
draw_false_video = 1
video_only = False
num_frames = 3 # input video frames
# Text Setting
vqav2_label_size = 3129
msrvttqa_label_size = 1501
max_text_len = 40 # original: 40, 200: for long sentences/paragraph
tokenizer = "pretrained/bert-base-uncased"
vocab_size = 30522
whole_word_masking = False
mlm_prob = 0.15
draw_false_text = 0
draw_options_text = 0
# Transformer Setting
vit = "vit_base_patch16_224" # "vit_base_patch32_384" / "vit_base_patch16_224"
hidden_size = 768
num_heads = 12
num_layers = 12
mlp_ratio = 4
drop_rate = 0.1
shared_embedding_dim = 512 # add for contrastive learning 512/256
# model_temporal_frames = 4 # add for model define, may not consistent with input data
save_checkpoints_interval = 1 # save each 5 epochs
# Optimizer Setting
optim_type = "adamw"
learning_rate = 1e-4
weight_decay = 0.01
decay_power = 1
max_epoch = 100
max_steps = 25000
warmup_steps = 2500
end_lr = 0
lr_mult = 1 # multiply lr for downstream heads
backend = 'a100' # gpu: a100/v100/others
# Downstream Setting
get_recall_metric = False
get_ind_recall_metric = False
retrieval_views = 3 # how many views for retrieval
# PL Trainer Setting
resume_from = None
fast_dev_run = False
val_check_interval = 1.0
test_only = False
# below params varies with the environment
data_root = ""
log_dir = "result"
per_gpu_batchsize = 0 # you should define this manually with per_gpu_batch_size=#
num_gpus = 1
num_nodes = 1
load_path = ""
num_workers = 16 # 0 will not lead to unstable memory usage but slow training ?
precision = 16
model_dir = None
# clip related settings
clip = ""
clip_type = "ori" # In ["evl", "ori"]
clip_freeze = False
clip_freeze_text = False
clip_dpr = 0.0
prompt_type = "all"
clip_lr_mult = 1
clip_no_pretrain = False
clip_grad_unfreeze_int = 0 # <= 0 for nothing
clip_evl_dropout = 0.5
mim_prob = 0.90
clip_mlm_decoder_n_layers = 4
clip_mim_decoder_n_layers = 4
clip_mim_decoder_width = 512
clip_cap_decoder_n_layers = 4
clip_init_zero = True
clip_qa_type = "vtc" # vtc for contrastive, cap for caption head, both for both
clip_mc_type = "vtc" # vtc for contrastive, cap for caption head, both for both
# weight = clip_weight * clip_wiseft_coef + load_path * (1 - clip_wiseft_coef), <= 0 for not using
clip_wiseft_coef = -1.0
clip_mmt = False
clip_alt_data = False
image_data_mult = 1
clip_cls_dropout = 0.5
save_last = True
save_top_k = 1
clip_use_checkpoint = False
clip_checkpoint_num = [0, 0, 0]
clip_momentum_ckpt = 1
clip_momentum_interval = 1
# Named configs for "environment" which define gpus and nodes, and paths
@ex.named_config
def env_dandelin():
data_root = "/data2/dsets/dataset"
log_dir = "/data2/CoTrain/result"
num_gpus = 8
num_nodes = 1
# ================================ begin: pretrain ======================
@ex.named_config
def task_mlm_vtm_cotrain():
exp_name = "mlm_vtm"
video_datasets = ["webvid"] # "howto100m",
image_datasets = ["cc3m"]
loss_names = _loss_names({"vtm": 1, "mlm": 1})
batch_size = 2048
max_epoch = 30
max_image_len = -1
val_check_interval = 1.0
save_checkpoints_interval = 3 # save each 5 epochs
@ex.named_config
def task_mlm_vtm_cotrain_seven():
exp_name = "mlm_vtm"
video_datasets = ["webvid", 'yttemporal', "howto100m"] # 'yttemporal', "howto100m",
image_datasets = ["cc3m", "cc12m", "vg", 'coco'] # , "vg", 'coco'
loss_names = _loss_names({"vtm": 1, "mlm": 1})
batch_size = 2048
max_epoch = 30
max_image_len = -1
val_check_interval = 1.0
save_checkpoints_interval = 1 # save each 5 epochs
@ex.named_config
def task_mlm_vtm_vcop_cotrain():
exp_name = "mlm_vtm"
video_datasets = ["howto100m", "webvid"]
image_datasets = ["cc3m"]
loss_names = _loss_names({"vtm": 1, "mlm": 1, "vcop": 1})
batch_size = 2048
max_epoch = 30
max_image_len = -1
val_check_interval = 1.0
save_checkpoints_interval = 5 # save each 5 epochs
@ex.named_config
def task_mlm_vtm_dino_cotrain():
exp_name = "mlm_vtm_dino_1f"
video_datasets = ["webvid"] # "howto100m",
image_datasets = ["cc3m"]
loss_names = _loss_names({"vtm": 1, "mlm": 1, "dino": 1}) # already include dino
train_transform_keys = ["pixelbert_randaug"]
val_transform_keys = ["pixelbert_randaug"]
batch_size = 1024
max_epoch = 100
max_image_len = -1
val_check_interval = 1.0
save_checkpoints_interval = 1 # save each 5 epochs
# ================================ end: pretrain ======================
# ================================ begin: finetune ======================
# ========== begin: multiple choice ================
# = for lsmdc multiple choice
@ex.named_config
def task_finetune_lsmdcchoice():
exp_name = "finetune_lsmdc_choice"
video_datasets = ["lsmdc_choice"]
image_datasets = []
loss_names = _loss_names({"multiple_choice": 1})
batch_size = 256
max_epoch = 20
max_steps = None
warmup_steps = 0.1
draw_false_text = 5 # 5 choices
learning_rate = 1e-5
val_check_interval = 0.5
lr_mult = 10
# = for msrvtt multiple choice
@ex.named_config
def task_finetune_msrvttchoice():
exp_name = "finetune_msrvtt_choice"
video_datasets = ["msrvtt_choice"]
image_datasets = []
loss_names = _loss_names({"multiple_choice": 1})
batch_size = 256
max_epoch = 10
max_steps = None
warmup_steps = 0.1
draw_false_text = 5 # 5 choices
learning_rate = 1e-4
val_check_interval = 0.5
lr_mult = 10
# ========== end: multiple choice ================
# ind itc
# ========== begin: retrieval ================
@ex.named_config
def task_finetune_vtc_irtr_msrvtt():
exp_name = "finetune_vtc_irtr_msrvtt"
video_datasets = ["msrvtt"]
image_datasets = []
train_transform_keys = ["pixelbert_randaug"]
loss_names = _loss_names({"vtc": 1})
batch_size = 1024
max_epoch = 50
max_steps = None
warmup_steps = 0.1 # 0.1/0.3
retrieval_views = 1 # use 5 views
get_recall_metric = False
get_ind_recall_metric = True
draw_false_text = 15
learning_rate = 6e-4 # 1/3e-4
# ========== end: retrieval ================
# ========== begin: vqa ================
# for msvd qa
@ex.named_config
def task_finetune_msvdqa():
exp_name = "finetune_msvd_qa"
video_datasets = ["msvdqa"]
image_datasets = []
loss_names = _loss_names({"openend_vqa": 1}) # msvd have same number of answers with msrvtt
batch_size = 512
msrvttqa_label_size = 1001 # vqa voculbary length 1000 + 1 background
max_epoch = 20
max_steps = None
warmup_steps = 0.1
draw_false_image = 0
learning_rate = 1e-4 # 1e-4
val_check_interval = 1.0
lr_mult = 10
# = add by for msrvtt qa
@ex.named_config
def task_finetune_msrvttqa():
exp_name = "finetune_msrvtt_qa"
video_datasets = ["msrvttqa"]
image_datasets = []
loss_names = _loss_names({"openend_vqa": 1})
batch_size = 512
msrvttqa_label_size = 1501 # 1501 / 4540
max_epoch = 20
max_steps = None
warmup_steps = 0.1 # 0.1
draw_false_image = 1
draw_false_text = 1
learning_rate = 1e-4 # 1e-4 normal
val_check_interval = 1.0
lr_mult = 10
# = for tgif qa on frameqa
@ex.named_config
def task_finetune_tgifqa():
exp_name = "finetune_tgif_qa"
video_datasets = ["tgif"]
image_datasets = []
loss_names = _loss_names({"openend_vqa": 1})
batch_size = 512
msrvttqa_label_size = 1541 # vqa voculbary length 1540 + 1 background
max_epoch = 20
max_steps = None
warmup_steps = 0.1
draw_false_image = 0
learning_rate = 1e-4 # 1e-4
val_check_interval = 1.0
lr_mult = 10
# = for tgif qa on action/trans
@ex.named_config
def task_finetune_tgif_action_trans():
exp_name = "finetune_tgif_action_trans"
video_datasets = ["tgifqa"]
image_datasets = []
loss_names = _loss_names({"mc_vqa": 1})
batch_size = 512
max_epoch = 100
max_steps = None
warmup_steps = 0.1
draw_false_image = 0
draw_options_text = 5 # 5 choices
learning_rate = 1e-4 # 1e-4
val_check_interval = 1.0
lr_mult = 10
# ========== end: vqa ================
# Task5: ===================== action recognition =====================
@ex.named_config
def task_finetune_action_recognition_hmdb51():
exp_name = "finetune_action_recognition_hmdb51"
video_datasets = ["hmdb51"]
image_datasets = []
loss_names = _loss_names({"openend_vqa": 1}) # have
msrvttqa_label_size = 52 # 51 + 1
batch_size = 256
max_epoch = 50
max_steps = None
warmup_steps = 0.1
draw_false_text = 15
learning_rate = 1e-4
@ex.named_config
def task_finetune_action_recognition_k400():
exp_name = "finetune_action_recognition_k400"
video_datasets = ["k400"]
image_datasets = []
loss_names = _loss_names({"openend_vqa": 1}) # have
msrvttqa_label_size = 401 # 400 + 1
batch_size = 256
max_epoch = 50
max_steps = None
warmup_steps = 0.1
draw_false_text = 15
learning_rate = 3e-4
val_check_interval = 1.0
# end: ===================== action recognition =====================
# ================================ end: finetune ======================
@ex.named_config
def step25k():
max_epoch = 100
max_steps = 25000
@ex.named_config
def step50k():
max_epoch = 100
max_steps = 50000
@ex.named_config
def step100k():
max_epoch = 100
max_steps = 100000
@ex.named_config
def step200k():
max_epoch = 200
max_steps = 200000
@ex.named_config
def step400k():
max_epoch = 400
max_steps = 400000
@ex.named_config
def epoch1():
max_epoch = 1
max_steps = None
@ex.named_config
def vit32_base():
vit = "vit_base_patch32_384"
patch_size = 32
hidden_size = 768
num_heads = 12
num_layers = 12
# ============================= begin: clip_kc pretrain ===================
# = for msrvtt multiple choice
@ex.named_config
def clip_kc_finetune_msrvttchoice():
exp_name = "clip_kc_finetune_msrvtt_choice"
video_datasets = ["msrvtt_choice"]
image_datasets = []
loss_names = _loss_names({"multiple_choice": 1})
batch_size = 512
max_epoch = 10
max_steps = None
warmup_steps = 0.1
draw_false_text = 5 # 5 choices
learning_rate = 1e-4
val_check_interval = 0.5
lr_mult = 10
max_text_len = 77
clip = "/mnt/lustre/share_data/likunchang.vendor/code/EVL/ViT-B-16.pt"
clip_type = "kc"
@ex.named_config
def clip_kc_contrastive_howto_cc3m_choice():
exp_name = "clip_kc_contrastive_howto_cc3m_choice"
video_datasets = ["howto100m"]
image_datasets = ["cc3m"]
train_transform_keys = ["open_clip"]
val_transform_keys = ["open_clip"]
loss_names = _loss_names({"contrastive": 1})
batch_size = 1024
max_epoch = 10
max_text_len = 77
clip = "/mnt/lustre/share_data/likunchang.vendor/code/EVL/ViT-B-16.pt"
clip_type = "kc"
vocab_size = 49408
draw_false_text = 5
val_datasets = ["msrvtt_choice", "lsmdc"]
val_loss_names = _loss_names({"multiple_choice": 1})
@ex.named_config
def clip_kc_contrastive_2plus3_choice():
exp_name = "clip_kc_contrastive_2plus3_choice"
video_datasets = ["webvid", "howto100m"]
image_datasets = ["cc3m", "cc12m", "yfcc15m"]
train_transform_keys = ["open_clip"]
val_transform_keys = ["open_clip"]
loss_names = _loss_names({"contrastive": 1})
batch_size = 1024
max_epoch = 10
max_text_len = 77
clip = "/mnt/lustre/share_data/likunchang.vendor/code/EVL/ViT-B-16.pt"
clip_type = "kc"
vocab_size = 49408
draw_false_text = 5
val_datasets = ["msrvtt_choice", "lsmdc_choice"]
val_loss_names = _loss_names({"multiple_choice": 1})
@ex.named_config
def clip_kc_contrastive_3plus4_choice():
exp_name = "clip_kc_contrastive_3plus4_choice"
video_datasets = ["webvid", "howto100m", "webvid10m"]
image_datasets = ["cc3m", "cc12m", "yfcc15m", "laion400m"]
train_transform_keys = ["open_clip"]
val_transform_keys = ["open_clip"]
loss_names = _loss_names({"contrastive": 1})
batch_size = 1024
max_epoch = 10
max_text_len = 77
clip = "/mnt/lustre/share_data/likunchang.vendor/code/EVL/ViT-B-16.pt"
clip_type = "kc"
vocab_size = 49408
draw_false_text = 5
val_datasets = ["msrvtt_choice", "lsmdc_choice"]
val_loss_names = _loss_names({"multiple_choice": 1})
@ex.named_config
def clip_kc_contrastive_cap_3plus4_choice():
exp_name = "clip_kc_contrastive_3plus4_choice"
video_datasets = ["webvid", "howto100m", "webvid10m"]
image_datasets = ["cc3m", "cc12m", "yfcc15m", "laion400m"]
train_transform_keys = ["open_clip"]
val_transform_keys = ["open_clip"]
loss_names = _loss_names({"contrastive": 1, "cap": 1})
batch_size = 1024
max_epoch = 10
max_text_len = 77
clip = "/mnt/lustre/share_data/likunchang.vendor/code/EVL/ViT-B-16.pt"
clip_type = "kc"
vocab_size = 49408
draw_false_text = 5
val_datasets = ["msrvtt_choice", "lsmdc_choice"]
val_loss_names = _loss_names({"multiple_choice": 1})
@ex.named_config
def clip_kc_new_B16_vtc_cap_3plusM_choice():
exp_name = "clip_kc_new_L14_vtc_cap_3plusM_choice"
video_datasets = ["webvid", "howto100m", "webvid10m"]
image_datasets = ["mix100m"]
train_transform_keys = ["open_clip"]
val_transform_keys = ["open_clip"]
loss_names = _loss_names({"contrastive": 1, "cap": 1})
per_gpu_batchsize = 32
num_frames = 8
max_epoch = 10
max_text_len = 77
learning_rate = 1e-4
clip = "/mnt/lustre/share_data/likunchang.vendor/code/EVL/ViT-B-16.pt"
clip_type = "kc_new"
vocab_size = 49408
draw_false_text = 5
val_datasets = ["msrvtt_choice", "lsmdc_choice"]
decay_power = "cosine"
clip_lr_mult = 0.1
weight_decay = 0.2
clip_evl_dropout = 0.0
clip_cap_decoder_n_layers = 6
warmup_steps = 4000
clip_alt_data = True
image_data_mult = 6
val_loss_names = _loss_names({"multiple_choice": 1})
@ex.named_config
def clip_kc_new_L14_vtc_cap_3plusM_choice():
exp_name = "clip_kc_new_L14_vtc_cap_3plusM_choice"
video_datasets = ["webvid", "howto100m", "webvid10m"]
image_datasets = ["mix100m"]
train_transform_keys = ["open_clip"]
val_transform_keys = ["open_clip"]
loss_names = _loss_names({"contrastive": 1, "cap": 1})
per_gpu_batchsize = 14
num_frames = 8
max_epoch = 10
max_text_len = 77
learning_rate = 8e-5
clip = "/mnt/lustre/share_data/likunchang.vendor/code/EVL/ViT-L-14.pt"
clip_type = "kc_new"
vocab_size = 49408
draw_false_text = 5
val_datasets = ["msrvtt_choice", "lsmdc_choice"]
decay_power = "cosine"
clip_lr_mult = 0.1
weight_decay = 0.2
clip_evl_dropout = 0.0
clip_cap_decoder_n_layers = 6
warmup_steps = 4000
clip_alt_data = True
image_data_mult = 6
val_loss_names = _loss_names({"multiple_choice": 1})
@ex.named_config
def clip_kc_new_L14_336_vtc_cap_4plusM_choice():
exp_name = "clip_kc_new_L14_336_vtc_cap_4plusM_choice"
video_datasets = ["webvid", "howto100m", "webvid10m", "youtube"]
image_datasets = ["mix100m"]
train_transform_keys = ["open_clip"]
val_transform_keys = ["open_clip"]
loss_names = _loss_names({"contrastive": 1, "cap": 1})
image_size = 336
per_gpu_batchsize = 24
clip_use_checkpoint = True
clip_checkpoint_num = [23, 100, 100]
num_frames = 8
max_epoch = 2
max_steps = None
max_text_len = 77
learning_rate = 4e-6
clip = "/mnt/lustre/share_data/likunchang.vendor/code/EVL/ViT-L-14-336px.pt"
clip_type = "kc_new"
vocab_size = 49408
draw_false_text = 5
val_datasets = ["msrvtt_choice", "lsmdc_choice"]
decay_power = "cosine"
weight_decay = 0.2
clip_evl_dropout = 0.0
clip_cap_decoder_n_layers = 6
warmup_steps = 2000
clip_alt_data = True
image_data_mult = 6
val_loss_names = _loss_names({"multiple_choice": 1})
# ============================== end: clip_kc pretrain ====================
@ex.named_config
def clip_finetune_msrvttqa():
exp_name = "clip_finetune_msrvtt_qa"
video_datasets = ["msrvttqa"]
image_datasets = []
loss_names = _loss_names({"openend_vqa": 1})
batch_size = 512
msrvttqa_label_size = 1501 # 1501 / 4540
max_epoch = 20
max_steps = None
warmup_steps = 0.1 # 0.1
draw_false_image = 1
draw_false_text = 1
learning_rate = 1e-4 # 1e-4 normal
val_check_interval = 1.0
lr_mult = 10
max_text_len = 77
clip = "/mnt/lustre/share_data/likunchang.vendor/code/EVL/ViT-B-16.pt"
clip_type = "ori"
@ex.named_config
def clip_finetune_tgifqa():
exp_name = "clip_finetune_tgif_qa"
video_datasets = ["tgif"]
image_datasets = []
loss_names = _loss_names({"openend_vqa": 1})
batch_size = 512
msrvttqa_label_size = 1541 # vqa voculbary length 1540 + 1 background
max_epoch = 20
max_steps = None
warmup_steps = 0.1
draw_false_image = 0
learning_rate = 1e-4 # 1e-4
val_check_interval = 1.0
lr_mult = 10
max_text_len = 77
clip = "/mnt/lustre/share_data/likunchang.vendor/code/EVL/ViT-B-16.pt"
clip_type = "ori"
@ex.named_config
def clip_finetune_msvdqa():
exp_name = "clip_finetune_msvd_qa"
video_datasets = ["msvdqa"]
image_datasets = []
loss_names = _loss_names({"openend_vqa": 1}) # msvd have same number of answers with msrvtt
batch_size = 512
msrvttqa_label_size = 1001 # vqa voculbary length 1000 + 1 background
max_epoch = 20
max_steps = None
warmup_steps = 0.1
draw_false_image = 0
learning_rate = 1e-4 # 1e-4
val_check_interval = 1.0
lr_mult = 10
max_text_len = 77
clip = "/mnt/lustre/share_data/likunchang.vendor/code/EVL/ViT-B-16.pt"
clip_type = "ori"
@ex.named_config
def clip_finetune_zs_k400():
exp_name = "clip_finetune_zs_k400"
video_datasets = ["k400_video"]
image_datasets = []
loss_names = _loss_names({"zs_classify": 1})
batch_size = 256
test_only = True
max_text_len = 77
clip = "/mnt/lustre/share_data/likunchang.vendor/code/EVL/ViT-B-16.pt"
clip_type = "ori"
# ============================== end: clip_kc pretrain ====================
@ex.named_config
def clip_vtc_choice():
exp_name = "clip_vtc_choice"
video_datasets = ["webvid10m"]
image_datasets = []
train_transform_keys = ["open_clip"]
val_transform_keys = ["open_clip"]
val_datasets = ["msrvtt_choice", "lsmdc_choice"]
loss_names = _loss_names({"contrastive": 1})
per_gpu_batchsize = 24
num_frames = 16
max_epoch = 10
max_text_len = 77
learning_rate = 1e-4
clip = "/mnt/petrelfs/share_data/liyizhuo/pretrained/clip_pretrained_models/ViT-B-16.pt"
clip_type = "ori"
vocab_size = 49408
draw_false_video = 0
draw_false_text = 5
decay_power = "cosine"
weight_decay = 0.2
warmup_steps = 4000
val_loss_names = _loss_names({"multiple_choice": 1})
@ex.named_config
def clip_vtc_mim_choice():
exp_name = "clip_vtc_mim_choice"
video_datasets = ["webvid10m"]
image_datasets = []
train_transform_keys = ["open_clip"]
val_transform_keys = ["open_clip"]
val_datasets = ["msrvtt_choice", "lsmdc_choice"]
loss_names = _loss_names({"contrastive": 1, "mim": 1})
per_gpu_batchsize = 128
num_frames = 16
max_epoch = 10
max_text_len = 77
learning_rate = 1e-4
clip = "/mnt/petrelfs/share_data/liyizhuo/pretrained/clip_pretrained_models/ViT-B-16.pt"
clip_type = "ori"
vocab_size = 49408
draw_false_video = 0
draw_false_text = 5
decay_power = "cosine"
weight_decay = 0.2
warmup_steps = 4000
val_loss_names = _loss_names({"multiple_choice": 1})
mim_prob = 0.90
clip_mim_decoder_n_layers = 1
@ex.named_config
def clip_vtc_mim_mlm_choice():
exp_name = "clip_vtc_mim_mlm_choice"
video_datasets = ["webvid10m"]
image_datasets = []
train_transform_keys = ["open_clip"]
val_transform_keys = ["open_clip"]
val_datasets = ["msrvtt_choice", "lsmdc_choice"]
loss_names = _loss_names({"contrastive": 1, "mim": 1, "mlm": 1})
per_gpu_batchsize = 128
num_frames = 16
max_epoch = 10
max_text_len = 77
learning_rate = 1e-4
clip = "/mnt/petrelfs/share_data/liyizhuo/pretrained/clip_pretrained_models/ViT-B-16.pt"
clip_type = "ori"
vocab_size = 49408
draw_false_video = 0
draw_false_text = 5
decay_power = "cosine"
weight_decay = 0.2
warmup_steps = 4000
val_loss_names = _loss_names({"multiple_choice": 1})
mim_prob = 0.90
clip_mim_decoder_n_layers = 1
@ex.named_config
def clip_vtc_mlm_choice():
exp_name = "clip_vtc_mlm_choice"
video_datasets = ["webvid10m"]
image_datasets = []
train_transform_keys = ["open_clip"]
val_transform_keys = ["open_clip"]
val_datasets = ["msrvtt_choice", "lsmdc_choice"]
loss_names = _loss_names({"contrastive": 1, "mlm": 1})
per_gpu_batchsize = 128
num_frames = 16
max_epoch = 10
max_text_len = 77
learning_rate = 1e-4
clip = "/mnt/petrelfs/share_data/liyizhuo/pretrained/clip_pretrained_models/ViT-B-16.pt"
clip_type = "ori"
vocab_size = 49408
draw_false_video = 0
draw_false_text = 5
decay_power = "cosine"
weight_decay = 0.2
warmup_steps = 4000
val_loss_names = _loss_names({"multiple_choice": 1})
# = for msrvtt multiple choice
@ex.named_config
def clip_finetune_msrvttchoice():
exp_name = "clip_finetune_msrvtt_choice"
video_datasets = ["webvid10m"]
val_datasets = ["msrvtt_choice", "lsmdc_choice"]
image_datasets = []
loss_names = _loss_names({"multiple_choice": 1})
num_frames = 16
batch_size = 512
max_epoch = 10
warmup_steps = 0.1
draw_false_text = 5 # 5 choices
learning_rate = 1e-4
val_check_interval = 0.5
max_text_len = 77
clip = "/mnt/petrelfs/share_data/liyizhuo/pretrained/clip_pretrained_models/ViT-B-16.pt"
clip_type = "ori"
# ============================== end: clip_kc new nc pretrain ====================
@ex.named_config
def clip_kc_nc_vtc_choice():
exp_name = "clip_kc_nc_vtc_choice"
video_datasets = ["webvid10m"]
image_datasets = []
train_transform_keys = ["open_clip"]
val_transform_keys = ["open_clip"]
val_datasets = ["msrvtt_choice", "lsmdc_choice"]
loss_names = _loss_names({"contrastive": 1})
num_frames = 8
max_epoch = 10
max_text_len = 77
learning_rate = 1e-5
clip = "/mnt/petrelfs/share_data/liyizhuo/pretrained/clip_pretrained_models/ViT-B-16.pt"
clip_type = "kc_new"
vocab_size = 49408
draw_false_video = 0
draw_false_text = 5
decay_power = "cosine"
weight_decay = 0.2
warmup_steps = 4000
clip_freeze_text = True
val_loss_names = _loss_names({"multiple_choice": 1})
per_gpu_batchsize = 32
batch_size = 256
@ex.named_config
def clip_kc_nc_vtc_mim_nd_choice():
exp_name = "clip_kc_nc_vtc_mim_nd_choice"
video_datasets = ["webvid10m"]
image_datasets = []
train_transform_keys = ["open_clip"]
val_transform_keys = ["open_clip"]
val_datasets = ["msrvtt_choice", "lsmdc_choice"]
loss_names = _loss_names({"contrastive": 1, "mim": 1})
num_frames = 8
max_epoch = 10
max_text_len = 77
learning_rate = 1e-5
clip = "/mnt/petrelfs/share_data/liyizhuo/pretrained/clip_pretrained_models/ViT-B-16.pt"
clip_type = "kc_new"
vocab_size = 49408
draw_false_video = 0
draw_false_text = 5
decay_power = "cosine"
weight_decay = 0.2
warmup_steps = 4000
clip_freeze_text = True
mim_prob = 0.90
val_loss_names = _loss_names({"multiple_choice": 1})
per_gpu_batchsize = 32
batch_size = 256
clip_mim_decoder_n_layers = 0
@ex.named_config
def clip_kc_nc_vtc_mlm_choice():
exp_name = "clip_kc_nc_vtc_mlm_choice"
video_datasets = ["webvid10m"]
image_datasets = []
train_transform_keys = ["open_clip"]
val_transform_keys = ["open_clip"]
val_datasets = ["msrvtt_choice", "lsmdc_choice"]
loss_names = _loss_names({"contrastive": 1, "mlm": 1})
num_frames = 8
max_epoch = 10
max_text_len = 77
learning_rate = 1e-5
clip = "/mnt/petrelfs/share_data/liyizhuo/pretrained/clip_pretrained_models/ViT-B-16.pt"
clip_type = "kc_new"
vocab_size = 49408
draw_false_video = 0
draw_false_text = 5
decay_power = "cosine"
weight_decay = 0.2
warmup_steps = 4000
clip_freeze_text = True
val_loss_names = _loss_names({"multiple_choice": 1})
per_gpu_batchsize = 32
batch_size = 256
clip_mim_decoder_n_layers = 0
@ex.named_config
def clip_kc_nc_vtc_mim_nd_mlm_choice():
exp_name = "clip_kc_nc_vtc_mim_nd_mlm_choice"
video_datasets = ["webvid10m"]
image_datasets = []
train_transform_keys = ["open_clip"]
val_transform_keys = ["open_clip"]
val_datasets = ["msrvtt_choice", "lsmdc_choice"]
loss_names = _loss_names({"contrastive": 1, "mlm": 1, "mim": 1})
num_frames = 8
max_epoch = 10
max_text_len = 77
learning_rate = 1e-5
clip = "/mnt/petrelfs/share_data/liyizhuo/pretrained/clip_pretrained_models/ViT-B-16.pt"
clip_type = "kc_new"
vocab_size = 49408
draw_false_video = 0
draw_false_text = 5
decay_power = "cosine"
weight_decay = 0.2
warmup_steps = 4000
clip_freeze_text = True
val_loss_names = _loss_names({"multiple_choice": 1})
per_gpu_batchsize = 128
batch_size = 1024
clip_mim_decoder_n_layers = 0
# = for msrvtt multiple choice
@ex.named_config
def clip_kc_nc_finetune_msrvttchoice():
exp_name = "clip_kc_nc_finetune_msrvttchoice"
video_datasets=["msrvtt_choice", "lsmdc_choice"]
image_datasets = []
loss_names = _loss_names({"multiple_choice": 1})
num_frames = 8
batch_size = 512
max_epoch = 10
warmup_steps = 0.1
draw_false_text = 5 # 5 choices
learning_rate = 1e-4
val_check_interval = 0.5
max_text_len = 77
clip = "/mnt/petrelfs/share_data/liyizhuo/pretrained/clip_pretrained_models/ViT-B-16.pt"
clip_type = "kc_new"
test_only = True | InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/config.py |
InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/__init__.py |
|
try:
from petrel_client.client import Client
client = Client()
# Disable boto logger
import logging
logging.getLogger('boto3').setLevel(logging.WARNING)
logging.getLogger('botocore').setLevel(logging.WARNING)
logging.getLogger('nose').setLevel(logging.WARNING)
except:
client = None
# == pretrain data
# = image
from .image.vg_caption_dataset import VisualGenomeCaptionDataset
from .image.coco_caption_karpathy_dataset import CocoCaptionKarpathyDataset
from .image.sbu_caption_dataset import SBUCaptionDataset
from .image.cc3m import CC3MDataset
from .image.cc12m import CC12MDataset
from .image.yfcc15m import YFCC15MDataset
from .image.laion400m import LAION400MDataset
from .image.conceptual_caption_dataset import ConceptualCaptionDataset
from .image.mix100m import MIX100MDataset
# = video
from .video.webvid import WEBVIDDataset
from .video.webvid10m import WEBVID10MDataset
from .video.howto100m import HT100MDataset
from .video.youtube import YOUTUBEDataset
from .video.yttemporal import YTTemporalDataset
# == downstream data
# = image
from .image.f30k_caption_karpathy_dataset import F30KCaptionKarpathyDataset
from .image.vqav2_dataset import VQAv2Dataset
from .image.nlvr2_dataset import NLVR2Dataset
from .image.vcr import VCRDataset
# = video
from .video.msrvtt import MSRVTTDataset
from .video.msrvttqa import MSRVTTQADataset
from .video.msrvtt_choice import MSRVTTChoiceDataset
from .video.msvd import MSVDDataset
from .video.lsmdc_dataset import LSMDCDataset
from .video.msvdqa import MSVDQADataset
from .video.ego4d import Ego4DDataset
from .video.tvqa import TVQADataset
from .video.lsmdc_choice import LSMDCChoiceDataset
from .video.ego4d_choice import EGO4DChoiceDataset
from .video.tgif import TGIFDataset
from .video.tgifqa import TGIFQADataset
from .video.didemo import DIDEMODataset
from .video.hmdb51 import HMDB51Dataset
from .video.ucf101 import UCF101Dataset
from .video.k400 import K400Dataset
from .video.activitynet import ActivityNetDataset
from .video.k400_video import K400VideoDataset | InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/datasets/__init__.py |
from .video_base_dataset import BaseDataset, read_large_frames_decord, get_video_len
import os
import pandas as pd
class EGO4DChoiceDataset(BaseDataset):
def __init__(self, *args, split="", **kwargs):
assert split in ["train", "val", "test"]
self.split = split
if self.split == "train":
Exception("no train data provided")
self.metadata = None
self.ans_lab_dict = None
if split == "train":
names = ["ego4d_choice_train"]
elif split == "val":
names = ["ego4d_choice_val"]
elif split == "test":
names = ["ego4d_choice_test"] # vqav2_test-dev for test-dev
super().__init__(
*args,
**kwargs,
names=names,
text_column_name="unknown",
remove_duplicate=False,
)
self._load_metadata()
def _load_metadata(self):
metadata_dir = './meta_data/ego4d'
split_files = {
'train': 'mc_val.csv', # no train and test available, only for zero-shot testing
'val': 'mc_val.csv',
'test': 'mc_val.csv'
}
target_split_fp = split_files[self.split]
self.metadata = pd.read_csv(os.path.join(metadata_dir, target_split_fp), sep=',', header=0, on_bad_lines='skip')
def _get_video_path(self, sample):
rel_video_fp = eval(sample["question"])[0] + '.mp4'
full_video_fp = os.path.join(self.data_dir, 'videos', rel_video_fp)
if not os.path.exists(full_video_fp):
Exception(IOError)
return full_video_fp, rel_video_fp
def get_raw_video(self, sample):
abs_fp, rel_fp = self._get_video_path(sample)
frame_loc = eval(sample["question"])[1]
frame_end = get_video_len(abs_fp)
imgs = read_large_frames_decord(abs_fp, frame_loc, frame_end, self.num_frames, mode=self.split)
if imgs is None:
raise Exception("Invalid video!", rel_fp)
else:
return imgs
def get_text(self, sample):
texts = []
for answer in eval(sample["answers"]):
text = answer[-1]
encoding = self.tokenizer(
text,
padding="max_length",
truncation=True,
max_length=self.max_text_len,
return_special_tokens_mask=True,
)
texts.append((text, encoding))
return texts
def get_answer_label(self, sample):
gt_text = eval(sample["question"])[-1]
answer_label = 0
for index, answer in enumerate(eval(sample["answers"])):
if answer[-1] == gt_text:
answer_label = index
return answer_label
def __getitem__(self, index):
sample = self.metadata.iloc[index]
# print(sample)
video_tensor = self.get_video(sample)
# index, question_index = self.index_mapper[index]
qid = index
answer = self.get_answer_label(sample)
ret = {
"video": video_tensor,
"vid_index": index,
"cap_index": index,
"raw_index": index,
'answer': answer
}
texts = self.get_text(sample)
ret["text"] = texts[0]
# print(len(texts))
for i in range(self.draw_false_text - 1):
ret.update({f"false_text_{i}": texts[i+1]})
return ret
def __len__(self):
return len(self.metadata) | InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/datasets/video/ego4d_choice.py |
import numpy as np
from .video_base_dataset import BaseDataset, read_frames_gif
import os
import json
import pandas as pd
import random
# 2022.1.28 read gif is too slow, may be need to speedup by convert gif -> video
# https://stackify.dev/833655-python-convert-gif-to-videomp4
class TGIFDataset(BaseDataset):
def __init__(self, *args, split="", **kwargs):
assert split in ["train", "val", "test"]
self.split = split
self.metadata = None
self.ans_lab_dict = None
if split == "train":
names = ["tgif_train"]
elif split == "val":
names = ["tgif_val"]
elif split == "test":
names = ["tgif_test"]
super().__init__(
*args,
**kwargs,
names=names,
text_column_name="questions",
remove_duplicate=False,
)
# self.num_frames = 4
self._load_metadata()
self.data_dir = "/mnt/lustre/share_data/heyinan/data/tgif" # TODO: Remove this piece of shit
def _load_metadata(self):
metadata_dir = './meta_data/tgif'
split_files = {
'train': 'frameqa_train.jsonl',
'val': 'frameqa_test.jsonl', # frameqa_val.jsonl
'test': 'frameqa_test.jsonl'
}
target_split_fp = split_files[self.split]
answer_fp = os.path.join(metadata_dir, 'frameqa_trainval_ans2label.json')
# answer_fp = os.path.join(metadata_dir, 'msrvtt_qa_ans2label.json')
with open(answer_fp, 'r') as JSON:
self.ans_lab_dict = json.load(JSON)
# path_or_buf=os.path.join(metadata_dir, target_split_fp)
metadata = pd.read_json(os.path.join(metadata_dir, target_split_fp), lines=True)
self.metadata = metadata
def _get_video_path(self, sample):
return os.path.join(self.data_dir, 'gifs', sample['gif_name']) + '.gif', sample['gif_name'] + '.gif'
def get_raw_video(self, sample):
abs_fp, rel_fp = self._get_video_path(sample)
imgs, idxs, vlen = read_frames_gif(abs_fp, self.num_frames, mode=self.split)
if imgs is None:
raise Exception("Invalid img!", rel_fp)
else:
return imgs
def get_text(self, sample):
text = sample['question']
encoding = self.tokenizer(
text,
padding="max_length",
truncation=True,
max_length=self.max_text_len,
return_special_tokens_mask=True,
)
return (text, encoding)
def get_answer_label(self, sample):
text = sample['answer']
ans_total_len = len(self.ans_lab_dict) + 1 # one additional class
try:
ans_label = self.ans_lab_dict[text] #
except KeyError:
ans_label = -100 # ignore classes
# ans_label = 1500 # other classes
scores = np.zeros(ans_total_len).astype(int)
scores[ans_label] = 1
return text, ans_label, scores
# return text, ans_label_vector, scores
def __getitem__(self, index):
result = None
while result is None:
sample = self.metadata.iloc[index]
try:
video_tensor = self.get_video(sample)
text = self.get_text(sample)
# index, question_index = self.index_mapper[index]
qid = index
result = True
except Exception as e:
gif_name = sample["gif_name"]
print(f"Error while read file idx {gif_name}")
assert self.split != "test"
index = random.randint(0, len(self.metadata) - 1)
if self.split != "test":
answers, labels, scores = self.get_answer_label(sample)
else:
answers = list()
labels = list()
scores = list()
return {
"video": video_tensor,
"text": text,
"vqa_answer": answers,
"vqa_labels": labels,
"vqa_scores": scores,
"qid": qid,
}
def __len__(self):
return len(self.metadata) | InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/datasets/video/tgif.py |
from .video_base_dataset import BaseDataset, sample_frames, video_clip_reader, clean_subtitles, align_using_dtw
import torch as th
import pandas as pd
import os
import numpy as np
import random
import ffmpeg
import json
import ftfy
class YTTemporalDataset(BaseDataset):
"""YTTemporal Video-Text loader."""
def __init__(self, *args, split="", **kwargs):
assert split in ["train", "val", "test"]
self.split = split
if split == "train":
names = ["yttemporal_train"]
elif split == "val":
names = ["yttemporal_val"]
elif split == "test":
names = ["yttemporal_test"]
super().__init__(*args, **kwargs, names=names, text_column_name="caption")
self.metadata = None
self._load_metadata()
self.min_time = 4.0
self.size = 224
self.fps = 2
self.num_sec = self.num_frames / float(self.fps)
self.crop_only = True
if self.split == 'train':
self.center_crop = False
else:
self.center_crop = True
self.benchmark = False
self.num_candidates = 1
self.random_flip = True
self._load_metadata()
def _load_metadata(self):
metadata_dir = './meta_data/yttemporal'
split_files = {
'train': 'train_success_2000000.csv', # _1000000
'val': 'val_success.csv', # there is no test
'test': 'val_success.csv'
}
target_split_fp = split_files[self.split]
metadata = pd.read_csv(os.path.join(metadata_dir, target_split_fp), sep='\t')
self.metadata = metadata["Name"]
def read_frames_ffmpeg(self, video_path, start, end):
start_seek = start
cmd = (
ffmpeg
.input(video_path, ss=start_seek, t=end-start + 0.01)
.filter('fps', fps=self.fps)
)
if self.center_crop:
aw, ah = 0.5, 0.5
else:
aw, ah = random.uniform(0, 1), random.uniform(0, 1)
if self.crop_only:
cmd = (
cmd.crop('(iw - {})*{}'.format(self.size, aw),
'(ih - {})*{}'.format(self.size, ah),
str(self.size), str(self.size))
)
else:
cmd = (
cmd.crop('(iw - min(iw,ih))*{}'.format(aw),
'(ih - min(iw,ih))*{}'.format(ah),
'min(iw,ih)',
'min(iw,ih)')
.filter('scale', self.size, self.size)
)
if self.random_flip and random.uniform(0, 1) > 0.5:
cmd = cmd.hflip()
out, _ = (
cmd.output('pipe:', format='rawvideo', pix_fmt='rgb24')
.run(capture_stdout=True, quiet=True)
)
video = np.frombuffer(out, np.uint8).reshape([-1, self.size, self.size, 3])
video_tensor = th.from_numpy(np.copy(video))
video_tensor = video_tensor.permute(3, 0, 1, 2) + 0.01
# print(video_tensor.size())
# print(video_tensor)
if video_tensor.shape[1] < self.num_frames:
zeros = th.ones((3, self.num_frames - video_tensor.shape[1], self.size, self.size), dtype=th.uint8)
video_tensor = th.cat((video_tensor, zeros), axis=1)
# # uniform n frames
# frame_indexs = sample_frames(self.num_frames, video_tensor.size(1))
# out_tensors = th.ones((3, self.num_frames, self.size, self.size), dtype=th.uint8)
# for i in range(self.num_frames):
# out_tensors[:, i] = video_tensor[:, frame_indexs[i]]
# print(out_tensors)
# return out_tensors
return video_tensor[:, :self.num_frames]
# # sample fix number of words
# def get_caption(self, caption_csv, words_len=32):
# with open(caption_csv, 'r') as f:
# cap = json.load(f)
# # random choice words_len words
# video_len = int(cap["info"]["duration"])
# all_text = cap["subtitles"] # [{'word': 'hey', 'time': 0.0}, {'word': 'guys', 'time': 0.149}]
# word_count = len(all_text)
#
# # clean noisy asr text
# all_text = clean_subtitles(all_text)
# vtt = pd.DataFrame(all_text)
# denoised_word_by_word = []
# for x in cap['denoised']:
# # Ftfy just in case
# cleanasr = ftfy.ftfy(x['cleanasr'])
# denoised_word_by_word += cleanasr.split(' ')
# # Align
# vtt['denoised'] = align_using_dtw(vtt['word'], denoised_word_by_word)
# max_word = min(word_count - 1, words_len)
# begin_word_index = random.randint(0, word_count - max_word)
# text = ""
# for i in range(max_word):
# text += vtt['denoised'][begin_word_index + i] + ' '
# start = float(all_text[begin_word_index]['time'])
# end = float(all_text[min(word_count-1, begin_word_index + max_word)]['time'])
# # print(text, start, end)
# return text, start, end, video_len
# sample fix video length
def get_caption(self, caption_csv):
with open(caption_csv, 'r') as f:
cap = json.load(f)
# random choice 10s-15s video clips
video_len = int(cap["info"]["duration"])
start = random.randint(0, max(1, video_len-15)) + random.random()
clip_len = random.randint(10, 15)
end = min(video_len-1, start + clip_len)
all_text = cap["subtitles"] # [{'word': 'hey', 'time': 0.0}, {'word': 'guys', 'time': 0.149}]
# clean noisy asr text
all_text = clean_subtitles(all_text)
vtt = pd.DataFrame(all_text)
denoised_word_by_word = []
for x in cap['denoised']:
# Ftfy just in case
cleanasr = ftfy.ftfy(x['cleanasr'])
denoised_word_by_word += cleanasr.split(' ')
# Align
vtt['denoised'] = align_using_dtw(vtt['word'], denoised_word_by_word)
text = ""
origin_text = ""
for index, item in enumerate(all_text):
if float(item['time']) > start and float(item['time']) < end:
text += vtt['denoised'][index] + " "
origin_text += item['word'] + " "
# print(text)
# print(origin_text)
if len(text) < 10:
Exception(IndexError)
if end - start < self.min_time:
diff = self.min_time - end + start
start = max(0, start - diff / 2)
end = start + self.min_time
return text, start, end, video_len
def get_text(self, sample):
caption_csv = self.get_caption_path(sample)
text, start, end, duration = self.get_caption(caption_csv)
# print(text, start, end)
# print(text)
# TODO: May need to be improved for edge cases.
encoding = self.tokenizer(
text,
padding="max_length",
truncation=True,
max_length=self.max_text_len,
return_special_tokens_mask=True,
)
return {"text": (text, encoding)}, start, end, duration
def get_caption_path(self, sample):
# YTTemporal/videos/subset_87/data/xx.mp4 -> YTTemporal/videos/subset_87/annotations/xx.csv
return os.path.join(self.data_dir, 'videos', sample.split('/')[0], 'annotations', sample.split('/')[-1][:-4] + '.json')
def get_false_text(self, rep):
random_index = random.randint(0, len(self.metadata) - 1)
sample = self.metadata.iloc[random_index]
caption_csv = self.get_caption_path(sample)
text, start, end = self.get_caption(caption_csv)
encoding = self.tokenizer(
text,
# padding="max_length",
truncation=True,
max_length=self.max_text_len,
return_special_tokens_mask=True,
)
return {f"false_text_{rep}": (text, encoding)}
def _get_video_path(self, sample):
rel_video_fp = sample
full_video_fp = os.path.join(self.data_dir, 'videos', rel_video_fp)
return full_video_fp, rel_video_fp
def get_raw_video(self, sample, begin, end, duration):
abs_fp, rel_fp = self._get_video_path(sample)
# print(abs_fp, rel_fp)
# imgs = self.read_frames_ffmpeg(abs_fp, begin, end).permute(1, 0, 2, 3)
videos = video_clip_reader(abs_fp, begin, end, duration, self.num_frames)
if videos.size(0) != self.num_frames:
raise Exception("video length not enough!", rel_fp)
if videos is None:
raise Exception("Invalid img!", rel_fp)
else:
return videos
def get_video(self, sample, start, end, duration):
videos = self.get_raw_video(sample, start, end, duration)
videos_tensor = self.video_aug(videos, self.video_transform)
return videos_tensor
def get_false_video(self, rep):
random_index = random.randint(0, len(self.metadata) - 1)
sample = self.metadata.iloc[random_index]
caption_csv = self.get_caption_path(sample)
_, start, end, duration = self.get_caption(caption_csv)
videos = self.get_raw_video(sample, start, end, duration)
videos_tensor = self.video_aug(videos, self.video_transform)
return {f"false_video_{rep}": videos_tensor}
def get_suite(self, index):
result = None
max_try = 5
try_time = 0
while result is None:
try_time += 1
sample = self.metadata.iloc[index]
# try:
ret = dict()
text, start, end, duration = self.get_text(sample)
ret.update(text)
videos_tensor = self.get_video(sample, start, end, duration)
# print(imgs_tensor.size())
ret.update({
"video": videos_tensor,
"vid_index": index,
"cap_index": index,
"raw_index": index,
})
ret.update({"replica": True if ret["cap_index"] > 0 else False})
for i in range(self.draw_false_video):
ret.update(self.get_false_video(i))
for i in range(self.draw_false_text):
ret.update(self.get_false_text(i))
result = True
# except Exception as e:
# # print(f"Error while read file idx {sample} in {self.names[0]} -> {e}")
# index = random.randint(0, len(self.metadata) - 1)
if try_time > max_try:
print(f"Exceed max time Error while read file idx {sample} in {self.names[0]}")
return ret
def __len__(self):
return len(self.metadata)
def __getitem__(self, index):
return self.get_suite(index)
| InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/datasets/video/yttemporal.py |
import numpy as np
from .video_base_dataset import BaseDataset
import os
import json
import pandas as pd
class MSRVTTQADataset(BaseDataset):
def __init__(self, *args, split="", **kwargs):
assert split in ["train", "val", "test"]
# if split == "test":
# split = "val"
self.split = split
self.metadata = None
self.ans_lab_dict = None
if split == "train":
names = ["msrvtt_qa_train"]
# names = ["msrvtt_qa_train", "msrvtt_qa_val"]
elif split == "val":
names = ["msrvtt_qa_test"] # ["msrvtt_qa_val"]
elif split == "test":
names = ["msrvtt_qa_test"] # vqav2_test-dev for test-dev
super().__init__(
*args,
**kwargs,
names=names,
text_column_name="questions",
remove_duplicate=False,
)
self.names = names
# self.num_frames = 4
self._load_metadata()
def _load_metadata(self):
metadata_dir = './meta_data/msrvtt'
split_files = {
'train': 'msrvtt_qa_train.jsonl',
'val': 'msrvtt_qa_test.jsonl',
'test': 'msrvtt_qa_test.jsonl'
}
answer_fp = os.path.join(metadata_dir, 'msrvtt_train_ans2label.json') # 1500 in total (all classes in train)
# answer_fp = os.path.join(metadata_dir, 'msrvtt_qa_ans2label.json') # 4539 in total (all classes in train+val+test)
answer_clip_id = os.path.join(metadata_dir, 'msrvtt_clip_id.json')
with open(answer_fp, 'r') as JSON:
self.ans_lab_dict = json.load(JSON)
with open(answer_clip_id, 'r') as JSON:
self.ans_clip_id = json.load(JSON)
for name in self.names:
split = name.split('_')[-1]
target_split_fp = split_files[split]
# path_or_buf=os.path.join(metadata_dir, target_split_fp)
metadata = pd.read_json(os.path.join(metadata_dir, target_split_fp), lines=True)
if self.metadata is None:
self.metadata = metadata
else:
self.metadata.update(metadata)
print("total {} samples for {}".format(len(self.metadata), self.names))
# data1.update(data2)
def get_text(self, sample):
text = sample['question']
encoding = self.tokenizer(
text,
padding="max_length",
truncation=True,
max_length=self.max_text_len,
return_special_tokens_mask=True,
)
return (text, encoding)
def get_answer_label(self, sample):
text = sample['answer']
ans_total_len = len(self.ans_lab_dict) + 1 # one additional class
try:
ans_label = self.ans_lab_dict[text] #
except KeyError:
ans_label = -100 # ignore classes
# ans_label = 1500 # other classes
scores = np.zeros(ans_total_len).astype(int)
scores[ans_label] = 1
return text, ans_label, scores
# return text, ans_label_vector, scores
def __getitem__(self, index):
sample = self.metadata.iloc[index]
video_tensor = self.get_video(sample)
text = self.get_text(sample)
# index, question_index = self.index_mapper[index]
qid = index
if self.split != "test":
answers, labels, scores = self.get_answer_label(sample)
else:
answers = list()
labels = list()
scores = list()
answers, labels, scores = self.get_answer_label(sample)
return {
"video": video_tensor,
"text": text,
"vqa_answer": answers,
"vqa_labels": labels,
"vqa_scores": scores,
"qid": qid,
"ans_clip_id": self.ans_clip_id,
}
def __len__(self):
return len(self.metadata) | InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/datasets/video/msrvttqa.py |
from .video_base_dataset import BaseDataset
import torch as th
import pandas as pd
import os
import numpy as np
import random
import ffmpeg
import io
import decord
decord.bridge.set_bridge('torch')
from CoTrain.datasets import client
class HT100MDataset(BaseDataset):
"""HowTo100M Video-Text loader."""
def __init__(self, *args, split="", **kwargs):
assert split in ["train", "val", "test"]
self.split = split
if split == "train":
names = ["howto100m_train"]
elif split == "val":
names = ["howto100m_val"]
elif split == "test":
names = ["howto100m_test"]
super().__init__(*args, **kwargs, names=names, text_column_name="caption")
self.metadata = None
self._load_metadata()
# for howto100
self.min_time = 4.0
self.size = 256
self.fps = 2
self.num_sec = self.num_frames / float(self.fps)
self.crop_only = True
if self.split == 'train':
self.center_crop = False
else:
self.center_crop = True
self.benchmark = False
self.num_candidates = 1
self.random_flip = True
if "s3://" in self.data_dir:
# Read from local
self.caption_dir = os.path.join("./meta_data/howto100m", 'howto100m_csv')
# self.caption_dir = os.path.join("./meta_data/howto100m", 'howto100m_csv')
else:
self.caption_dir = os.path.join(self.data_dir, 'howto100m_csv')
print(names, ": ", len(self.metadata), "samples in total.")
def _load_metadata(self):
metadata_dir = './meta_data/howto100m_our'
split_files = {
'train': 'ht100_videos_split.csv',
'val': 'ht100_videos_split_val.csv', # there is no test
'test': 'ht100_videos_split_val.csv'
}
target_split_fp = split_files[self.split]
metadata = pd.read_csv(os.path.join(metadata_dir, target_split_fp), sep='\t')
self.metadata = metadata["Name"]
def read_frames_ffmpeg(self, video_path, start, end):
start_seek = random.randint(int(start), int(max(start, end - self.num_sec)))
cmd = (
ffmpeg
.input(video_path, ss=start_seek, t=self.num_sec + 0.01)
.filter('fps', fps=self.fps)
)
if self.center_crop:
aw, ah = 0.5, 0.5
else:
aw, ah = random.uniform(0, 1), random.uniform(0, 1)
if self.crop_only:
cmd = (
cmd.crop('(iw - {})*{}'.format(self.size, aw),
'(ih - {})*{}'.format(self.size, ah),
str(self.size), str(self.size))
)
else:
cmd = (
cmd.crop('(iw - min(iw,ih))*{}'.format(aw),
'(ih - min(iw,ih))*{}'.format(ah),
'min(iw,ih)',
'min(iw,ih)')
.filter('scale', self.size, self.size)
)
if self.random_flip and random.uniform(0, 1) > 0.5:
cmd = cmd.hflip()
out, _ = (
cmd.output('pipe:', format='rawvideo', pix_fmt='rgb24').run(capture_stdout=True, quiet=True)
)
# print(np.frombuffer(out, np.uint8).shape)
video = np.frombuffer(out, np.uint8).reshape([-1, self.size, self.size, 3])
video_tensor = th.from_numpy(np.copy(video))
video_tensor = video_tensor.permute(3, 0, 1, 2) + 0.01 # prevent all dark vide
if video_tensor.shape[1] < self.num_frames:
zeros = th.ones((3, self.num_frames - video_tensor.shape[1], self.size, self.size), dtype=th.uint8)
video_tensor = th.cat((video_tensor, zeros), axis=1)
return video_tensor[:, :self.num_frames]
def read_frames_decord(self, video_path, start, end):
# Build Decord conntainer
video_bytes = client.get(video_path)
assert video_bytes is not None, "Get video failed from {}".format(video_path)
if isinstance(video_bytes, bytes):
video_bytes = io.BytesIO(video_bytes)
container = decord.VideoReader(video_bytes, ctx=decord.cpu(0))
real_fps = container.get_avg_fps()
start_frame, end_frame = real_fps * start, real_fps * end
num_real_frames = self.num_frames / self.fps * real_fps
start_seek = random.randint(int(start_frame), int(max(start_frame, end_frame - num_real_frames)))
indexes = np.linspace(start_seek, start_seek + num_real_frames, self.num_frames, endpoint=False, dtype=int)
indexes = [x for x in indexes if x < len(container)]
assert len(indexes) > 0, "Failed to decode from {}".format(video_path)
frames = container.get_batch(indexes) # [T, H, W, C]
_, H, W, _ = frames.shape
assert self.crop_only
if self.center_crop:
aw, ah = 0.5, 0.5
else:
aw, ah = random.uniform(0, 1), random.uniform(0, 1)
top = int((H - self.size) * ah)
left = int((W - self.size) * aw)
bottom, right = top + self.size, left + self.size
frames = frames[:, top:bottom, left:right, :]
if self.random_flip and random.uniform(0, 1) > 0.5:
frames = frames.flip([-2])
video_tensor = frames.permute(3, 0, 1, 2) + 0.01 # prevent all dark vide
if video_tensor.shape[1] < self.num_frames:
zeros = th.ones((3, self.num_frames - video_tensor.shape[1], self.size, self.size), dtype=th.uint8)
video_tensor = th.cat((video_tensor, zeros), axis=1)
# assert False, (video_tensor.min(), video_tensor.max())
# assert False, video_tensor.shape
return video_tensor[:, :self.num_frames]
# time consuming > load video, where is the csv file? (cfs->ceph)
def get_caption(self, caption):
cap = pd.read_csv(caption)
ind = random.randint(0, len(cap) - 1)
text = cap['text'].values[ind]
start, end = cap['start'].values[ind], cap['end'].values[ind]
if end - start < self.min_time:
diff = self.min_time - end + start
start = max(0, start - diff / 2)
end = start + self.min_time
return text, start, end
def get_text(self, sample):
caption_csv = self.get_caption_path(sample)
text, start, end = self.get_caption(caption_csv)
# print(text)
# TODO: May need to be improved for edge cases.
encoding = self.tokenizer(
text,
padding="max_length",
truncation=True,
max_length=self.max_text_len,
return_special_tokens_mask=True,
)
return {"text": (text, encoding)}, int(start), int(end)
def get_caption_path(self, sample):
# example xx/xx/xx.mp4 -> xx.csv
return os.path.join(self.caption_dir, sample.split('/')[-1].split('.')[0] + '.csv')
def get_false_text(self, rep):
random_index = random.randint(0, len(self.metadata) - 1)
sample = self.metadata.iloc[random_index]
caption_csv = self.get_caption_path(sample)
text, start, end = self.get_caption(caption_csv)
encoding = self.tokenizer(
text,
truncation=True,
max_length=self.max_text_len,
return_special_tokens_mask=True,
)
return {f"false_text_{rep}": (text, encoding)}
def _get_video_path(self, sample):
rel_video_fp = sample
if "s3://" in self.data_dir:
rel_video_fp = sample.split("/")[-1]
rel_video_fp = rel_video_fp.split(".")[0] + ".mp4" # All video on ceph is mp4
full_video_fp = os.path.join(self.data_dir, rel_video_fp)
return full_video_fp, rel_video_fp
def get_raw_video(self, sample, begin, end):
abs_fp, rel_fp = self._get_video_path(sample)
if "s3://" in abs_fp:
videos = self.read_frames_decord(abs_fp, begin, end).permute(1, 0, 2, 3)
else:
videos = self.read_frames_ffmpeg(abs_fp, begin, end).permute(1, 0, 2, 3)
if videos is None:
raise Exception("Invalid img!", rel_fp)
else:
return videos
def get_video(self, sample, start, end):
videos = self.get_raw_video(sample, start, end)
videos_tensor = self.video_aug(videos, self.video_transform, byte=True)
return videos_tensor
def get_false_video(self, rep):
random_index = random.randint(0, len(self.metadata) - 1)
sample = self.metadata.iloc[random_index]
caption_csv = self.get_caption_path(sample)
_, start, end = self.get_caption(caption_csv)
videos = self.get_raw_video(sample, start, end)
videos_tensor = self.video_aug(videos, self.video_transform, byte=True)
return {f"false_video_{rep}": videos_tensor}
def get_suite(self, index):
result = None
max_try = 10
try_time = 0
while result is None:
try_time += 1
sample = self.metadata.iloc[index]
try:
ret = dict()
text, start, end = self.get_text(sample)
ret.update(text)
videos_tensor = self.get_video(sample, start, end)
ret.update({
"video": videos_tensor,
"vid_index": index,
"cap_index": index,
"raw_index": index,
})
ret.update({"replica": True if ret["cap_index"] > 0 else False})
for i in range(self.draw_false_video):
ret.update(self.get_false_video(i))
for i in range(self.draw_false_text):
ret.update(self.get_false_text(i))
result = True
except Exception as e:
index = random.randint(0, len(self.metadata) - 1)
exc = e
if try_time > max_try:
print(f"Exceed max time Error while read file idx {sample} in {self.names[0]} with error {exc}")
try_time = 0
return ret
def __len__(self):
return len(self.metadata)
def __getitem__(self, index):
return self.get_suite(index)
| InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/datasets/video/howto100m.py |
from .video_base_dataset import BaseDataset
import os
import pandas as pd
import cv2
import torch
from CoTrain.datasets.video.video_base_dataset import sample_frames
# each sample: https://tvqa.cs.unc.edu/download_tvqa_plus.html
# {
# "answer_idx": "1",
# "qid": 134094,
# "ts": [5.99, 11.98],
# "a1": "Howard is talking to Raj and Leonard",
# "a0": "Howard is talking to Bernadette",
# "a3": "Howard is talking to Leonard and Penny",
# "a2": "Howard is talking to Sheldon , and Raj",
# "q": "Who is Howard talking to when he is in the lab room ?",
# "vid_name": "s05e02_seg02_clip_00",
# "a4": "Howard is talking to Penny and Bernadette",
# "bbox": {
# "14": [
# {
# "img_id": 14,
# "top": 153,
# "label": "Howard",
# "width": 180,
# "height": 207,
# "left": 339
# },
# {
# "img_id": 14,
# "top": 6,
# "label": "lab",
# "width": 637,
# "height": 354,
# "left": 3
# },
# ...
# ],
# "20": [ ... ],
# "26": [ ... ],
# "32": [ ... ],
# "38": [ ... ]
# }
# }
class TVQAPLUSDataset(BaseDataset):
def __init__(self, *args, split="", **kwargs):
assert split in ["train", "val", "test"]
self.split = split
self.metadata = None
self._load_metadata()
if split == "train":
names = ["tvqaplus_train"]
elif split == "val":
names = ["tvqaplus_val"]
elif split == "test":
names = ["tvqaplus_test"]
super().__init__(*args, **kwargs, names=names, text_column_name="caption")
# for appear objects
self.only_use_relevant_dets = True
if self.only_use_relevant_dets:
self.relevant_dets = [] # resort the detection numbers
self.relevant_dets_classes = []
def _load_metadata(self):
# download specific
metadata_dir = './meta_data/tvqa'
split_files = {
'train': 'tvqa_plus_train.jsonl',
'val': 'tvqa_plus_val.jsonl',
'test': 'tvqa_plus_test_public.jsonl' # no GT label for test set
}
target_split_fp = split_files[self.split]
metadata = pd.read_json(os.path.join(metadata_dir, target_split_fp), lines=True)
self.metadata = metadata
def _get_image_path(self, sample):
rel_fp = sample['vid_name']
return os.path.join(self.data_dir, rel_fp), rel_fp
def _get_caption(self, sample):
return sample[0]
# tvqaplus provide sampled frames (3 fps)
# To Do: considering sample one frame with bounding box
def get_raw_video(self, sample):
abs_fp, rel_fp = self._get_image_path(sample)
[beg_time, end_time] = sample['ts']
clip_len = int((float(end_time) - float(beg_time)) * 3)
rel_frame_index = sample_frames(self.num_frames, clip_len)
# sample N frames here
frames = []
for index in rel_frame_index:
img = cv2.imread(abs_fp + '{}.jpg'.format(index))
frame = torch.from_numpy(img).byte()
frame = frame.permute(2, 0, 1)
frames.append(frame)
frames = torch.stack(frames).permute(1, 0, 2, 3)
return frames
def get_text(self, sample):
question = self.get_question(sample)
qa_texts = []
# 5 choices
for i in range(5):
raw_text = question + "[SEP]" + sample["a{}".format(i)]
qa_encoding = self.tokenizer(
raw_text,
padding="max_length",
truncation=True,
max_length=self.max_text_len,
return_special_tokens_mask=True,
)
qa_texts.append((raw_text, qa_encoding))
return qa_texts
def get_answer_label(self, sample):
answer = int(sample['answer_idx'])
return answer
def get_question(self, sample):
return sample["q"]
def __len__(self):
return len(self.metadata)
def __getitem__(self, index):
sample = self.metadata.iloc[index]
self.relevant_dets = [] # initalize
self.relevant_dets_classes = []
answer = self.get_answer_label(sample)
ret = {
"vid_index": index,
"cap_index": index,
"raw_index": index,
'answer': answer
}
qa_texts = self.get_text(sample)
ret["text"] = qa_texts[0]
for i in range(self.draw_options_text - 1):
ret.update({f"options_text_{i}": qa_texts[i+1]})
video_tensor = self.get_video(sample)
ret["video"] = video_tensor
return ret
| InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/datasets/video/tvqaplus.py |
from .video_base_dataset import BaseDataset
import random
import os
import pandas as pd
class LSMDCDataset(BaseDataset):
def __init__(self, *args, split="", **kwargs):
assert split in ["train", "val", "test"]
self.split = split
self.metadata = None
if split == "train":
names = ["lsmdc_train"]
elif split == "val":
names = ["lsmdc_val"]
elif split == "test":
names = ["lsmdc_test"]
self._load_metadata()
# self.num_frames = kwargs['num_frames']
super().__init__(*args, **kwargs, names=names, text_column_name="caption")
def _load_metadata(self):
metadata_dir = './meta_data/lsmdc'
split_files = {
'train': 'LSMDC16_annos_training.csv',
'val': 'LSMDC16_challenge_1000_publictect.csv', # LSMDC16_annos_val.csv
'test': 'LSMDC16_challenge_1000_publictect.csv'
}
target_split_fp = split_files[self.split]
metadata = pd.read_csv(os.path.join(metadata_dir, target_split_fp), sep='\t', header=None, error_bad_lines=False)
self.metadata = metadata
print("load split {}, {} samples".format(self.split, len(metadata)))
def _get_video_path(self, sample):
# e.g. 3009_BATTLE_LOS_ANGELES_00.03.07.170-00.03.09.675 -> 3009_BATTLE_LOS_ANGELES/3009_BATTLE_LOS_ANGELES_00.03.07.170-00.03.09.675
sub_dir = '_'.join(sample[0].split('_')[:-1])
rel_video_fp = sample[0] + '.avi'
full_video_fp = os.path.join(self.data_dir, sub_dir, rel_video_fp)
return full_video_fp, rel_video_fp
def _get_caption(self, sample):
if self.split == 'train':
words = sample[0].split(',')
num_word = len(words)
index = random.randint(0, num_word - 1)
caption = words[index]
else:
# caption = sample[0]
words = sample[0].split(',')
num_word = len(words)
index = random.randint(0, num_word - 1)
caption = words[index]
return caption
| InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/datasets/video/lsmdc_dataset.py |
from .video_base_dataset import BaseDataset
import torch as th
import pandas as pd
import os
import numpy as np
import random
import ffmpeg
import io
import decord
import re
decord.bridge.set_bridge('torch')
from CoTrain.datasets import client
class YOUTUBEDataset(BaseDataset):
"""Youtube Video-Text loader."""
def __init__(self, *args, split="", **kwargs):
assert split in ["train", "val", "test"]
self.split = split
if split == "train":
names = ["youtube_train"]
elif split == "val":
names = ["youtube_val"]
elif split == "test":
names = ["youtube_test"]
super().__init__(*args, **kwargs, names=names, text_column_name="caption")
self.metadata = None
self._load_metadata()
# for howto100
self.min_time = 4.0
self.size = 256
self.fps = 2
self.num_sec = self.num_frames / float(self.fps)
self.crop_only = True
if self.split == 'train':
self.center_crop = False
else:
self.center_crop = True
self.benchmark = False
self.num_candidates = 1
self.random_flip = True
self.data_dir = "s3://youtubeBucket/videos/"
self.caption_dir = "s3://liyizhuo/youtubeProcessed/" # TODO: Remove this piece of shit
print(names, ": ", len(self.metadata), "samples in total.")
def _load_metadata(self):
metadata_dir = './meta_data/youtube'
split_files = {
'train': 'youtube_train.csv',
'val': 'youtube_val.csv', # there is no test
'test': 'youtube_val.csv'
}
target_split_fp = split_files[self.split]
metadata = pd.read_csv(os.path.join(metadata_dir, target_split_fp), sep='\t')
self.metadata = metadata["Name"]
def read_frames_ffmpeg(self, video_path, start, end):
start_seek = random.randint(int(start), int(max(start, end - self.num_sec)))
cmd = (
ffmpeg
.input(video_path, ss=start_seek, t=self.num_sec + 0.01)
.filter('fps', fps=self.fps)
)
if self.center_crop:
aw, ah = 0.5, 0.5
else:
aw, ah = random.uniform(0, 1), random.uniform(0, 1)
if self.crop_only:
cmd = (
cmd.crop('(iw - {})*{}'.format(self.size, aw),
'(ih - {})*{}'.format(self.size, ah),
str(self.size), str(self.size))
)
else:
cmd = (
cmd.crop('(iw - min(iw,ih))*{}'.format(aw),
'(ih - min(iw,ih))*{}'.format(ah),
'min(iw,ih)',
'min(iw,ih)')
.filter('scale', self.size, self.size)
)
if self.random_flip and random.uniform(0, 1) > 0.5:
cmd = cmd.hflip()
out, _ = (
cmd.output('pipe:', format='rawvideo', pix_fmt='rgb24').run(capture_stdout=True, quiet=True)
)
# print(np.frombuffer(out, np.uint8).shape)
video = np.frombuffer(out, np.uint8).reshape([-1, self.size, self.size, 3])
video_tensor = th.from_numpy(np.copy(video))
video_tensor = video_tensor.permute(3, 0, 1, 2) + 0.01 # prevent all dark vide
if video_tensor.shape[1] < self.num_frames:
zeros = th.ones((3, self.num_frames - video_tensor.shape[1], self.size, self.size), dtype=th.uint8)
video_tensor = th.cat((video_tensor, zeros), axis=1)
return video_tensor[:, :self.num_frames]
def read_frames_decord(self, video_path, start, end):
# Build Decord conntainer
video_bytes = client.get(video_path)
assert video_bytes is not None, "Get video failed from {}".format(video_path)
if isinstance(video_bytes, bytes):
video_bytes = io.BytesIO(video_bytes)
container = decord.VideoReader(video_bytes, ctx=decord.cpu(0))
real_fps = container.get_avg_fps()
start_frame, end_frame = real_fps * start, real_fps * end
num_real_frames = self.num_frames / self.fps * real_fps
start_seek = random.randint(int(start_frame), int(max(start_frame, end_frame - num_real_frames)))
indexes = np.linspace(start_seek, start_seek + num_real_frames, self.num_frames, endpoint=False, dtype=int)
indexes = [x for x in indexes if x < len(container)]
assert len(indexes) > 0, "Failed to decode from {}".format(video_path)
frames = container.get_batch(indexes) # [T, H, W, C]
_, H, W, _ = frames.shape
assert self.crop_only
if self.center_crop:
aw, ah = 0.5, 0.5
else:
aw, ah = random.uniform(0, 1), random.uniform(0, 1)
top = int((H - self.size) * ah)
left = int((W - self.size) * aw)
bottom, right = top + self.size, left + self.size
frames = frames[:, top:bottom, left:right, :]
if self.random_flip and random.uniform(0, 1) > 0.5:
frames = frames.flip([-2])
video_tensor = frames.permute(3, 0, 1, 2) + 0.01 # prevent all dark vide
if video_tensor.shape[1] < self.num_frames:
zeros = th.ones((3, self.num_frames - video_tensor.shape[1], self.size, self.size), dtype=th.uint8)
video_tensor = th.cat((video_tensor, zeros), axis=1)
# assert False, (video_tensor.min(), video_tensor.max())
# assert False, video_tensor.shape
return video_tensor[:, :self.num_frames]
# time consuming > load video, where is the csv file? (cfs->ceph)
def get_caption(self, caption):
lines = client.get(caption).decode().split("\n")
ind = random.randint(0, len(lines) - 1)
line = lines[ind + 1].split(",")
start, end = float(line[0]), float(line[1])
text = ",".join(line[2:])
# text = cap['text'].values[ind]
# start, end = cap['start'].values[ind], cap['end'].values[ind]
if end - start < self.min_time:
diff = self.min_time - end + start
start = max(0, start - diff / 2)
end = start + self.min_time
return text, start, end
def get_text(self, sample):
caption_csv = self.get_caption_path(sample)
text, start, end = self.get_caption(caption_csv)
# print(text)
# TODO: May need to be improved for edge cases.
encoding = self.tokenizer(
text,
padding="max_length",
truncation=True,
max_length=self.max_text_len,
return_special_tokens_mask=True,
)
return {"text": (text, encoding)}, int(start), int(end)
def get_caption_path(self, sample):
# example xx/xx/xx.mp4 -> xx.csv
return os.path.join(self.caption_dir, sample.split('.')[0] + '.csv')
def get_false_text(self, rep):
random_index = random.randint(0, len(self.metadata) - 1)
sample = self.metadata.iloc[random_index]
caption_csv = self.get_caption_path(sample)
text, start, end = self.get_caption(caption_csv)
encoding = self.tokenizer(
text,
truncation=True,
max_length=self.max_text_len,
return_special_tokens_mask=True,
)
return {f"false_text_{rep}": (text, encoding)}
def _get_video_path(self, sample):
rel_video_fp = sample
if "s3://" in self.data_dir:
rel_video_fp = sample.strip()
full_video_fp = os.path.join(self.data_dir, rel_video_fp)
return full_video_fp, rel_video_fp
def get_raw_video(self, sample, begin, end):
abs_fp, rel_fp = self._get_video_path(sample)
if "s3://" in abs_fp:
videos = self.read_frames_decord(abs_fp, begin, end).permute(1, 0, 2, 3)
else:
videos = self.read_frames_ffmpeg(abs_fp, begin, end).permute(1, 0, 2, 3)
if videos is None:
raise Exception("Invalid img!", rel_fp)
else:
return videos
def get_video(self, sample, start, end):
videos = self.get_raw_video(sample, start, end)
videos_tensor = self.video_aug(videos, self.video_transform, byte=True)
return videos_tensor
def get_false_video(self, rep):
random_index = random.randint(0, len(self.metadata) - 1)
sample = self.metadata.iloc[random_index]
caption_csv = self.get_caption_path(sample)
_, start, end = self.get_caption(caption_csv)
videos = self.get_raw_video(sample, start, end)
videos_tensor = self.video_aug(videos, self.video_transform, byte=True)
return {f"false_video_{rep}": videos_tensor}
def get_suite(self, index):
result = None
max_try = 10
try_time = 0
while result is None:
try_time += 1
sample = self.metadata.iloc[index]
try:
ret = dict()
text, start, end = self.get_text(sample)
ret.update(text)
videos_tensor = self.get_video(sample, start, end)
ret.update({
"video": videos_tensor,
"vid_index": index,
"cap_index": index,
"raw_index": index,
})
ret.update({"replica": True if ret["cap_index"] > 0 else False})
for i in range(self.draw_false_video):
ret.update(self.get_false_video(i))
for i in range(self.draw_false_text):
ret.update(self.get_false_text(i))
result = True
except Exception as e:
index = random.randint(0, len(self.metadata) - 1)
exc = e
if try_time > max_try:
print(f"Exceed max time Error while read file idx {sample} in {self.names[0]} with error {exc}")
try_time = 0
return ret
def __len__(self):
return len(self.metadata)
def __getitem__(self, index):
return self.get_suite(index)
| InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/datasets/video/youtube.py |
import numpy as np
from .video_base_dataset import BaseDataset
import os
import json
import pandas as pd
class ACTIVITYNETQADataset(BaseDataset):
def __init__(self, *args, split="", **kwargs):
assert split in ["train", "val", "test"]
# if split == "test":
# split = "val"
self.split = split
self.metadata = None
self.ans_lab_dict = None
if split == "train":
names = ["activitynet_qa_train"]
# names = ["msrvtt_qa_train", "msrvtt_qa_val"]
elif split == "val":
names = ["activitynet_qa_test"] # ["msrvtt_qa_val"]
elif split == "test":
names = ["activitynet_qa_test"] # vqav2_test-dev for test-dev
super().__init__(
*args,
**kwargs,
names=names,
text_column_name="questions",
remove_duplicate=False,
)
self.names = names
# self.num_frames = 4
self._load_metadata()
def _load_metadata(self):
metadata_dir = './meta_data/activitynet_qa'
split_files = {
'train': 'msrvtt_qa_train.jsonl',
'val': 'msrvtt_qa_test.jsonl',
'test': 'msrvtt_qa_test.jsonl'
}
answer_fp = os.path.join(metadata_dir, 'msrvtt_train_ans2label.json') # 1500 in total (all classes in train)
# answer_fp = os.path.join(metadata_dir, 'msrvtt_qa_ans2label.json') # 4539 in total (all classes in train+val+test)
answer_clip_id = os.path.join(metadata_dir, 'msrvtt_clip_id.json')
with open(answer_fp, 'r') as JSON:
self.ans_lab_dict = json.load(JSON)
with open(answer_clip_id, 'r') as JSON:
self.ans_clip_id = json.load(JSON)
for name in self.names:
split = name.split('_')[-1]
target_split_fp = split_files[split]
# path_or_buf=os.path.join(metadata_dir, target_split_fp)
metadata = pd.read_json(os.path.join(metadata_dir, target_split_fp), lines=True)
if self.metadata is None:
self.metadata = metadata
else:
self.metadata.update(metadata)
print("total {} samples for {}".format(len(self.metadata), self.names))
# data1.update(data2)
def get_text(self, sample):
text = sample['question']
encoding = self.tokenizer(
text,
padding="max_length",
truncation=True,
max_length=self.max_text_len,
return_special_tokens_mask=True,
)
return (text, encoding)
def get_answer_label(self, sample):
text = sample['answer']
ans_total_len = len(self.ans_lab_dict) + 1 # one additional class
try:
ans_label = self.ans_lab_dict[text] #
except KeyError:
ans_label = -100 # ignore classes
# ans_label = 1500 # other classes
scores = np.zeros(ans_total_len).astype(int)
scores[ans_label] = 1
return text, ans_label, scores
# return text, ans_label_vector, scores
def __getitem__(self, index):
sample = self.metadata.iloc[index]
video_tensor = self.get_video(sample)
text = self.get_text(sample)
# index, question_index = self.index_mapper[index]
qid = index
if self.split != "test":
answers, labels, scores = self.get_answer_label(sample)
else:
answers = list()
labels = list()
scores = list()
answers, labels, scores = self.get_answer_label(sample)
return {
"video": video_tensor,
"text": text,
"vqa_answer": answers,
"vqa_labels": labels,
"vqa_scores": scores,
"qid": qid,
"ans_clip_id": self.ans_clip_id,
}
def __len__(self):
return len(self.metadata) | InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/datasets/video/activitynetqa.py |
from .video_base_dataset import BaseDataset, read_frames_decord
import random
import os
import pandas as pd
from .pack_meta import pack_metadata, unpack_metadata
class WEBVID10MDataset(BaseDataset):
def __init__(self, *args, split="", **kwargs):
assert split in ["train", "val", "test"]
self.split = split
self.metadata = None
self.cut = "jsfusion"
if split == "train":
names = ["webvid10m_train"]
elif split == "val":
names = ["webvid10m_val"]
elif split == "test":
names = ["webvid10m_val"]
self._load_metadata()
print(names, ": ", len(self.metadata), "samples in total.")
super().__init__(*args, **kwargs, names=names, text_column_name="caption")
if "s3://" in self.data_dir:
self.data_dir = "s3://video_pub_new/WebVid10M/"
def _load_metadata(self):
metadata_dir = '/mnt/cache/share_data/DSK_datasets/webvid/'
split_files = {
'train': 'results_10M_train.csv',
'val': 'results_10M_val.csv', # there is no test
'test': 'results_10M_val.csv'
}
target_split_fp = split_files[self.split]
metadata = pd.read_csv(os.path.join(metadata_dir, target_split_fp))
metadata = metadata[["name", "page_dir", "videoid"]]
# metadata = metadata[:102400]
self.metadata = pack_metadata(self, metadata)
def _get_video_path(self, sample):
rel_video_fp = os.path.join(str(sample[1]), str(sample[2]) + '.mp4')
if "s3://" in self.data_dir:
full_video_fp = os.path.join(self.data_dir, rel_video_fp)
else:
full_video_fp = os.path.join(self.data_dir, self.split, rel_video_fp)
return full_video_fp, rel_video_fp
def _get_caption(self, sample):
return sample[0]
def get_raw_video(self, sample):
abs_fp, rel_fp = self._get_video_path(sample)
videos, idxs, vlen = read_frames_decord(abs_fp, self.num_frames, mode=self.split)
if videos is None:
raise Exception("Invalid video!", rel_fp)
else:
return videos
def get_video(self, index, sample):
videos = self.get_raw_video(sample)
videos_tensor = self.video_aug(videos, self.video_transform)
return {
"video": videos_tensor,
"vid_index": index,
"cap_index": index,
"raw_index": index,
}
def get_false_video(self, rep):
random_index = random.randint(0, len(self.metadata) - 1)
sample = unpack_metadata(self, random_index)
# can be different augmentation
videos = self.get_raw_video(sample)
videos_tensor = self.video_aug(videos, self.video_transform)
return {f"false_video_{rep}": videos_tensor}
def get_text(self, raw_index, sample):
text = sample[0]
# print(text)
encoding = self.tokenizer(
text,
padding="max_length",
truncation=True,
max_length=self.max_text_len,
return_special_tokens_mask=True,
)
# print(encoding.size())
return {
"text": (text, encoding),
"vid_index": raw_index,
"cap_index": raw_index,
"raw_index": raw_index,
}
def get_false_text(self, rep):
random_index = random.randint(0, len(self.metadata) - 1)
sample = unpack_metadata(self, random_index)
text = sample[0]
encoding = self.tokenizer(
text,
# padding="max_length",
truncation=True,
max_length=self.max_text_len,
return_special_tokens_mask=True,
)
return {f"false_text_{rep}": (text, encoding)}
def get_suite(self, index):
result = None
max_try = 10
try_time = 0
while result is None:
try_time += 1
sample = unpack_metadata(self, index)
try:
ret = dict()
ret.update(self.get_video(index, sample))
if not self.video_only:
txt = self.get_text(index, sample)
ret.update({"replica": True if txt["cap_index"] > 0 else False})
ret.update(txt)
for i in range(self.draw_false_video):
ret.update(self.get_false_video(i))
for i in range(self.draw_false_text):
ret.update(self.get_false_text(i))
result = True
except Exception as e:
index = random.randint(0, len(self.metadata) - 1)
exc = e
if try_time > max_try:
print(f"Exceed max time Error while read file idx {sample} in {self.names[0]} with error {exc}")
try_time=0
return ret
def __len__(self):
return len(self.metadata)
def __getitem__(self, index):
return self.get_suite(index) | InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/datasets/video/webvid10m.py |
from .video_base_dataset import BaseDataset
import random
import os
import pandas as pd
class MSVDDataset(BaseDataset):
def __init__(self, *args, split="", **kwargs):
assert split in ["train", "val", "test"]
self.split = split
self.metadata = None
if split == "train":
names = ["msvd_train"]
elif split == "val":
names = ["msvd_val"]
elif split == "test":
names = ["msvd_test"]
self._load_metadata()
# self.num_frames = kwargs['num_frames']
super().__init__(*args, **kwargs, names=names, text_column_name="caption")
def _load_metadata(self):
metadata_dir = './meta_data/msvd'
split_files = {
'train': 'MSVD_train.tsv',
'val': 'MSVD_test.tsv', # MSVD_val.tsv
'test': 'MSVD_test.tsv'
}
target_split_fp = split_files[self.split]
metadata = pd.read_csv(os.path.join(metadata_dir, target_split_fp), sep='\t')
self.metadata = metadata
print("load split {}, {} samples".format(self.split, len(metadata)))
def _get_video_path(self, sample):
rel_video_fp = sample[1] + '.avi'
full_video_fp = os.path.join(self.data_dir, 'YouTubeClips', rel_video_fp)
return full_video_fp, rel_video_fp
def _get_caption(self, sample):
if self.split == 'train':
words = sample[0].split(',')
num_word = len(words)
index = random.randint(0, num_word - 1)
caption = words[index]
else:
# caption = sample[0]
words = sample[0].split(',')
num_word = len(words)
index = random.randint(0, num_word - 1)
caption = words[index]
return caption
| InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/datasets/video/msvd.py |
import pandas as pd
import numpy as np
from typing import Union
SEP = "<<<sep>>>"
class DummyMeta(object):
def __init__(self, l):
self._len = l
def __len__(self):
return self._len
def string_to_sequence(s: Union[str, list], dtype=np.int32) -> np.ndarray:
if isinstance(s, list):
assert not any(SEP in x for x in s)
s = SEP.join(s)
return np.array([ord(c) for c in s], dtype=dtype)
def sequence_to_string(seq: np.ndarray) -> Union[str, list]:
s = ''.join([chr(c) for c in seq])
if SEP in s:
return s.split(SEP)
return s
def pack_sequences(seqs: Union[np.ndarray, list]) -> (np.ndarray, np.ndarray):
values = np.concatenate(seqs, axis=0)
offsets = np.cumsum([len(s) for s in seqs])
return values, offsets
def unpack_sequence(values: np.ndarray, offsets: np.ndarray, index: int) -> np.ndarray:
off1 = offsets[index]
if index > 0:
off0 = offsets[index - 1]
elif index == 0:
off0 = 0
else:
raise ValueError(index)
return values[off0:off1]
def pack_metadata(obj: object, df: pd.DataFrame):
assert not hasattr(obj, "metadata_keys")
assert not hasattr(obj, "metadata_is_str")
df = df.dropna()
metadata_keys = list(df.columns)
metadata_is_str = {c: df[c].dtype == pd.StringDtype for c in df.columns}
for c in df.columns:
if df[c].dtype == pd.StringDtype:
assert not hasattr(obj, "metadata_{}_v".format(c))
assert not hasattr(obj, "metadata_{}_o".format(c))
seq_v, seq_o = pack_sequences([string_to_sequence(s) for s in df[c]])
setattr(obj, "metadata_{}_v".format(c), seq_v)
setattr(obj, "metadata_{}_o".format(c), seq_o)
else:
assert not hasattr(obj, "metadata_{}".format(c))
seq = df[c].to_numpy()
setattr(obj, "metadata_{}".format(c), seq)
setattr(obj, "metadata_keys", metadata_keys)
setattr(obj, "metadata_is_str", metadata_is_str)
return DummyMeta(len(df))
def unpack_metadata(obj: object, i: int):
ret = []
for c in getattr(obj, "metadata_keys"):
if not getattr(obj, "metadata_is_str")[c]:
ret.append(getattr(obj, "metadata_{}".format(c))[i])
else:
ret.append(
sequence_to_string(
unpack_sequence(
getattr(obj, "metadata_{}_v".format(c)),
getattr(obj, "metadata_{}_o".format(c)),
i,
)
)
)
return pd.Series(dict(zip(obj.metadata_keys, ret)))
| InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/datasets/video/pack_meta.py |
from .video_base_dataset import BaseDataset
import os
import pandas as pd
import random
from .pack_meta import pack_metadata, unpack_metadata
class LSMDCChoiceDataset(BaseDataset):
def __init__(self, *args, split="", **kwargs):
assert split in ["train", "val", "test"]
self.split = split
self.metadata = None
self.ans_lab_dict = None
if split == "train":
names = ["lsmdc_choice_train"]
elif split == "val":
names = ["lsmdc_choice_val"]
elif split == "test":
names = ["lsmdc_choice_test"] # vqav2_test-dev for test-dev
super().__init__(
*args,
**kwargs,
names=names,
text_column_name="unknown",
remove_duplicate=False,
)
self._load_metadata()
if "s3://" in self.data_dir:
# Remove this fucking auto dir name
self.data_dir = os.path.dirname(self.data_dir)
# Add the real path
self.data_dir = os.path.join(self.data_dir, "LSMDC")
def _load_metadata(self):
metadata_dir = './meta_data/lsmdc'
split_files = {
'train': 'LSMDC16_multiple_choice_train.csv',
'val': 'LSMDC16_multiple_choice_test_randomized.csv', # 'LSMDC16_multiple_choice_valid.csv',
'test': 'LSMDC16_multiple_choice_test_randomized.csv'
}
target_split_fp = split_files[self.split]
print(os.path.join(metadata_dir, target_split_fp))
metadata = pd.read_csv(os.path.join(metadata_dir, target_split_fp), sep='\t', header=None, on_bad_lines='skip')
datalist = []
data_to_ignore = [
"3056_PUBLIC_ENEMIES_01.24.29.351-01.24.32.274",
"3035_INSIDE_MAN_02.02.18.839-02.02.25.201",
"3064_SPARKLE_2012_00.14.12.000-00.14.22.429",
]
for raw_id in range(len(metadata)):
raw_d = metadata.iloc[raw_id]
video_fp = raw_d[0] # 3001_21_JUMP_STREET_00.03.07.077-00.03.07.559
if video_fp.strip() in data_to_ignore:
continue
sub_path = video_fp.split('.')[0] # 3001_21_JUMP_STREET_00
remove = sub_path.split('_')[-1] # 00
sub_path = sub_path.replace('_'+remove,'/') # 3001_21_JUMP_STREET/
rel_video_fp = sub_path + video_fp + '.avi' #
options = [raw_d[idx] for idx in range(5, 10)]
d = dict(
id=video_fp,
vid_id=rel_video_fp,
answer=raw_d[10] - 1 if self.split in ['val', 'test'] else 0,
options=options,
)
datalist.append(d)
self.metadata = pack_metadata(self, pd.DataFrame(datalist))
print("load split {}, {} samples".format(self.split, len(self.metadata)))
def _get_video_path(self, sample):
rel_video_fp = sample['vid_id']
full_video_fp = os.path.join(self.data_dir, rel_video_fp)
# print(full_video_fp)
# assert os.path.exists(full_video_fp)
return full_video_fp, rel_video_fp
def get_text(self, sample):
texts = []
for text in sample['options']:
encoding = self.tokenizer(
text,
padding="max_length",
truncation=True,
max_length=self.max_text_len,
return_special_tokens_mask=True,
)
texts.append((text, encoding))
return texts
def get_answer_label(self, sample):
answer = sample['answer']
return answer
def __getitem__(self, index):
result = False
while not result:
try:
sample = unpack_metadata(self, index)
video_tensor = self.get_video(sample)
qid = index
answer = self.get_answer_label(sample)
ret = {
"video": video_tensor,
"vid_index": index,
"cap_index": index,
"raw_index": index,
'answer': answer
}
texts = self.get_text(sample)
ret["text"] = texts[0]
for i in range(self.draw_false_text - 1):
ret.update({f"false_text_{i}": texts[i+1]})
result = True
except Exception as e:
print(f"Error while read file idx {sample['vid_id']} in {self.names[0]} -> {e}")
index = random.randint(0, len(self.metadata) - 1)
return ret
def __len__(self):
return len(self.metadata) | InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/datasets/video/lsmdc_choice.py |
import random
import torch
import io
import os
import cv2
import numpy as np
from PIL import Image
from CoTrain.transforms import keys_to_transforms
import decord
from decord import cpu
import imageio
# add for ytt asr clean
import ftfy
import regex as re
import demoji
import editdistance
import tslearn.metrics
import string
from CoTrain.transforms.video.videoaug import VideoTransform, video_aug
from CoTrain.datasets import client
import CoTrain.modules.InternVideo as internvideo
class BaseDataset(torch.utils.data.Dataset):
def __init__(
self,
data_dir: str,
transform_keys: list,
image_size: int,
names: list,
text_column_name: str = "",
remove_duplicate=True,
max_text_len=40,
draw_false_image=0,
draw_false_video=0,
draw_false_text=0,
image_only=False,
video_only=False,
num_frames=1,
draw_options_text=0,
backend='v100'
):
"""
data_dir : where dataset file *.arrow lives; existence should be guaranteed via DataModule.prepare_data
transform_keys : keys for generating augmented views of videos
text_column_name : pyarrow table column name that has list of strings as elements
"""
assert len(transform_keys) >= 1
super().__init__()
self.transforms = keys_to_transforms(transform_keys, size=image_size)
self.text_column_name = text_column_name
self.names = names
self.max_text_len = max_text_len
self.draw_false_video = draw_false_video
self.draw_false_text = draw_false_text
self.video_only = video_only
self.data_dir = data_dir
if len(names) != 0:
dataset_name = names[0].split('_')[0]
if dataset_name in ['tgif', 'tgifqa']:
dataset_name = 'tgif'
self.data_dir = os.path.join(self.data_dir, dataset_name) # e.g. webvid_train -> webvid
split_name = dataset_name
if torch.distributed.get_rank() == 0:
print('*'*100)
print("video datasets: {}".format(names))
self.draw_options_text = draw_options_text
self.num_frames = num_frames
if torch.distributed.get_rank() == 0:
print("# frames for base dataset is: {}".format(self.num_frames))
if split_name in ['msrvtt', 'cc3m', 'webvid', 'msvd', 'vcr', 'howto100m', 'ego4d', 'yttemporal', 'tgif', 'hmdb51', 'k400']:
if torch.distributed.get_rank() == 0:
print("no arrow available for {}, load from disk".format(names[0]))
else:
print("not support video dataset")
self.video_transform = VideoTransform(mode=self.split, crop_size=image_size, backend=backend)
self.video_aug = video_aug
@property
def corpus(self):
return [text for texts in self.all_texts for text in texts]
def __len__(self):
return len(self.metadata)
def __getitem__(self, index):
return self.get_suite(index)
def get_raw_image(self, index, image_key="image"):
index, caption_index = self.index_mapper[index]
image_bytes = io.BytesIO(self.table[image_key][index].as_py())
image_bytes.seek(0)
return Image.open(image_bytes).convert("RGB")
def _get_video_path(self, sample):
if self.names[0] in ['msrvtt_train', 'msrvtt_test', 'msrvtt_val']:
return os.path.join(self.data_dir, 'videos', 'all', sample.name + '.mp4'), sample.name + '.mp4'
else:
return os.path.join(self.data_dir, 'videos', 'all', str(sample['video_id']) + '.mp4'), str(sample['video_id']) + '.mp4'
def get_raw_video(self, sample):
abs_fp, rel_fp = self._get_video_path(sample)
imgs, idxs, vlen = read_frames_decord(abs_fp, self.num_frames, mode=self.split)
if imgs is None:
raise Exception("Invalid img!", rel_fp)
else:
return imgs
def get_video(self, sample):
videos = self.get_raw_video(sample)
videos_tensor = self.video_aug(videos, self.video_transform)
return videos_tensor
def get_false_video(self, rep):
random_index = random.randint(0, len(self.metadata) - 1)
sample = self.metadata.iloc[random_index]
videos = self.get_raw_video(sample)
videos_tensor = self.video_aug(videos, self.video_transform)
return {f"false_video_{rep}": videos_tensor}
def _get_caption(self, sample):
if self.names[0] in ['msrvtt_train']:
caption = random.choice(sample['captions'])
else:
caption = sample['captions'][0]
return caption
def get_text(self, raw_index, sample):
text = self._get_caption(sample)
encoding = self.tokenizer(
text,
padding="max_length",
truncation=True,
max_length=self.max_text_len,
return_special_tokens_mask=True,
)
# print(encoding.size())
return {
"text": (text, encoding),
"vid_index": raw_index,
"cap_index": raw_index,
"raw_index": raw_index,
}
def get_false_text(self, rep):
random_index = random.randint(0, len(self.metadata) - 1)
sample = self.metadata.iloc[random_index]
text = self._get_caption(sample)
encoding = self.tokenizer(
text,
# padding="max_length",
truncation=True,
max_length=self.max_text_len,
return_special_tokens_mask=True,
)
return {f"false_text_{rep}": (text, encoding)}
def get_suite(self, index):
result = None
while result is None:
# retry_times += 1
sample = self.metadata.iloc[index]
# print(sample[1])
try:
video_tensor = self.get_video(sample)
ret = {
"video": video_tensor,
"vid_index": index,
"cap_index": index,
"raw_index": index,
}
if not self.video_only:
txt = self.get_text(index, sample)
ret.update({"replica": True if txt["cap_index"] > 0 else False})
ret.update(txt)
for i in range(self.draw_false_video):
ret.update(self.get_false_video(i))
for i in range(self.draw_false_text):
ret.update(self.get_false_text(i))
result = True
except Exception as e:
print(f"Error while read file idx {sample.name} in {self.names[0]} -> {e}")
index = random.randint(0, len(self.metadata) - 1)
return ret
def collate(self, batch, mlm_collator):
batch_size = len(batch)
keys = set([key for b in batch for key in b.keys()])
dict_batch = {k: [dic[k] if k in dic else None for dic in batch] for k in keys}
video_keys = [k for k in list(dict_batch.keys()) if "video" in k]
video_sizes = list()
# global & local video
for video_key in video_keys:
video_sizes += [ii.shape for i in dict_batch[video_key] if i is not None for ii in i]
# print(global_video_sizes, local_video_sizes)
for size in video_sizes:
# print(size)
assert (
len(size) == 4
), f"Collate error, an video should be in shape of (T, N, H, W), instead of given {size}"
if len(video_keys) != 0:
global_max_height = max([i[2] for i in video_sizes])
global_max_width = max([i[3] for i in video_sizes])
global_min_height = min([i[2] for i in video_sizes])
global_min_width = min([i[3] for i in video_sizes])
for video_key in video_keys:
video = dict_batch[video_key]
view_size = len(video[0])
if (view_size == 1 and
global_max_height == global_min_height and
global_max_width == global_min_width):
dict_batch[video_key] = [torch.stack([x[0] for x in video])]
continue
assert False, (view_size, global_max_height, global_min_height,
global_max_width, global_min_width)
new_videos = [
torch.zeros(batch_size, self.num_frames, 3, global_max_height, global_max_width)
for _ in range(view_size)
]
for bi in range(batch_size):
orig_batch = video[bi]
for vi in range(view_size):
if orig_batch is None:
# new_videos[vi][bi] = None
# modify by alex
continue
else:
orig = video[bi][vi]
# print(orig.size())
new_videos[vi][bi, :, :, : orig.shape[-2], : orig.shape[-1]] = orig
dict_batch[video_key] = new_videos
txt_keys = [k for k in list(dict_batch.keys()) if "text" in k]
# print(txt_keys)
if len(txt_keys) != 0:
texts = [[d[0] for d in dict_batch[txt_key]] for txt_key in txt_keys]
encodings = [[d[1] for d in dict_batch[txt_key]] for txt_key in txt_keys]
draw_text_len = len(encodings)
flatten_encodings = [e for encoding in encodings for e in encoding]
flatten_mlms = mlm_collator(flatten_encodings)
for i, txt_key in enumerate(txt_keys):
texts, encodings = (
[d[0] for d in dict_batch[txt_key]],
[d[1] for d in dict_batch[txt_key]],
)
mlm_ids, mlm_labels = (
flatten_mlms["input_ids"][batch_size * (i) : batch_size * (i + 1)],
flatten_mlms["labels"][batch_size * (i) : batch_size * (i + 1)],
)
input_ids = torch.zeros_like(mlm_ids)
attention_mask = torch.zeros_like(mlm_ids)
for _i, encoding in enumerate(encodings):
_input_ids, _attention_mask = (
torch.tensor(encoding["input_ids"]),
torch.tensor(encoding["attention_mask"]),
)
input_ids[_i, : len(_input_ids)] = _input_ids
attention_mask[_i, : len(_attention_mask)] = _attention_mask
dict_batch[txt_key] = texts
dict_batch[f"{txt_key}_ids"] = input_ids
dict_batch[f"{txt_key}_labels"] = torch.full_like(input_ids, -100)
dict_batch[f"{txt_key}_ids_mlm"] = mlm_ids
dict_batch[f"{txt_key}_labels_mlm"] = mlm_labels
dict_batch[f"{txt_key}_masks"] = attention_mask
clip_text_ids, clip_special_tokens_mask = internvideo.tokenize(
dict_batch["text"], truncate=True, return_special_tokens_mask=True)
dict_batch["clip_text_ids"] = clip_text_ids
dict_batch["clip_special_tokens_mask"] = clip_special_tokens_mask
return dict_batch
# def collate(self, batch, mlm_collator):
# batch_size = len(batch)
# keys = set([key for b in batch for key in b.keys()])
# dict_batch = {k: [dic[k] if k in dic else None for dic in batch] for k in keys}
#
# video_keys = [k for k in list(dict_batch.keys()) if "video" in k]
# video_sizes = list()
#
# # global & local video
# for video_key in video_keys:
# video_sizes += [ii.shape for i in dict_batch[video_key][0] if i is not None for ii in i]
# # print(global_video_sizes, local_video_sizes)
#
# for size in video_sizes:
# # print(size)
# assert (
# len(size) == 4
# ), f"Collate error, an video should be in shape of (T, N, H, W), instead of given {size}"
#
# if len(video_keys) != 0:
# global_max_height = max([i[2] for i in video_sizes])
# global_max_width = max([i[3] for i in video_sizes])
# local_max_height = min([i[2] for i in video_sizes])
# local_max_width = min([i[3] for i in video_sizes])
# for video_key in video_keys:
# video = dict_batch[video_key]
# global_view_size = len(dict_batch[video_key][0][0])
# local_view_size = len(dict_batch[video_key][0][1])
# # print(global_view_size, local_view_size)
#
# new_videos = [
# [
# torch.zeros(batch_size, self.num_frames, 3, global_max_height, global_max_width)
# for _ in range(global_view_size)
# ],
# [
# torch.zeros(batch_size, self.num_frames, 3, local_max_height, local_max_width)
# for _ in range(local_view_size)
# ]
# ]
# # print(len(img))
# for bi in range(batch_size):
# orig_batch = video[bi]
# for vi in range(global_view_size):
# if orig_batch is None:
# # new_videos[vi][bi] = None
# # modify by alex
# continue
# else:
# orig = video[bi][0][vi]
# # print(orig.size())
# new_videos[0][vi][bi, :, :, : orig.shape[-2], : orig.shape[-1]] = orig
#
# for bi in range(batch_size):
# orig_batch = video[bi]
# for vi in range(local_view_size):
# if orig_batch is None:
# # new_videos[vi][bi] = None
# # modify by alex
# continue
# else:
# orig = video[bi][1][vi]
# # print(orig.size())
# new_videos[1][vi][bi, :, :, : orig.shape[-2], : orig.shape[-1]] = orig
# dict_batch[video_key] = new_videos
#
# txt_keys = [k for k in list(dict_batch.keys()) if "text" in k]
# # print(txt_keys)
# if len(txt_keys) != 0:
# texts = [[d[0] for d in dict_batch[txt_key]] for txt_key in txt_keys]
# encodings = [[d[1] for d in dict_batch[txt_key]] for txt_key in txt_keys]
# draw_text_len = len(encodings)
# flatten_encodings = [e for encoding in encodings for e in encoding]
# flatten_mlms = mlm_collator(flatten_encodings)
#
# for i, txt_key in enumerate(txt_keys):
# texts, encodings = (
# [d[0] for d in dict_batch[txt_key]],
# [d[1] for d in dict_batch[txt_key]],
# )
#
# mlm_ids, mlm_labels = (
# flatten_mlms["input_ids"][batch_size * (i) : batch_size * (i + 1)],
# flatten_mlms["labels"][batch_size * (i) : batch_size * (i + 1)],
# )
#
# input_ids = torch.zeros_like(mlm_ids)
# attention_mask = torch.zeros_like(mlm_ids)
# for _i, encoding in enumerate(encodings):
# _input_ids, _attention_mask = (
# torch.tensor(encoding["input_ids"]),
# torch.tensor(encoding["attention_mask"]),
# )
# input_ids[_i, : len(_input_ids)] = _input_ids
# attention_mask[_i, : len(_attention_mask)] = _attention_mask
#
# dict_batch[txt_key] = texts
# dict_batch[f"{txt_key}_ids"] = input_ids
# dict_batch[f"{txt_key}_labels"] = torch.full_like(input_ids, -100)
# dict_batch[f"{txt_key}_ids_mlm"] = mlm_ids
# dict_batch[f"{txt_key}_labels_mlm"] = mlm_labels
# dict_batch[f"{txt_key}_masks"] = attention_mask
#
# return dict_batch
def sample_frames(num_frames, vlen, sample='rand', fix_start=None):
acc_samples = min(num_frames, vlen)
intervals = np.linspace(start=0, stop=vlen, num=acc_samples + 1).astype(int)
ranges = []
for idx, interv in enumerate(intervals[:-1]):
ranges.append((interv, intervals[idx + 1] - 1))
if sample == 'rand':
frame_idxs = [random.choice(range(x[0], x[1])) for x in ranges]
elif fix_start is not None:
frame_idxs = [x[0] + fix_start for x in ranges]
elif sample == 'uniform':
frame_idxs = [(x[0] + x[1]) // 2 for x in ranges]
else:
raise NotImplementedError
return frame_idxs
def read_frames_gif(video_path, num_frames, mode='train', fix_start=None):
if mode == 'train':
sample = 'rand'
else:
sample = 'uniform'
gif = imageio.get_reader(video_path)
vlen = len(gif)
frame_idxs = sample_frames(num_frames, vlen, sample=sample, fix_start=fix_start)
frames = []
for index, frame in enumerate(gif):
# for index in frame_idxs:
if index in frame_idxs:
frame = cv2.cvtColor(frame, cv2.COLOR_RGBA2RGB)
frame = torch.from_numpy(frame).byte()
# # (H x W x C) to (C x H x W)
frame = frame.permute(2, 0, 1)
# frame = Image.fromarray(frame)
frames.append(frame)
frames = torch.stack(frames) # .float() / 255
# print(frames.size())
return frames, frame_idxs, vlen
def read_frames_cv2(video_path, num_frames, sample='rand', fix_start=None):
# print(video_path)
cap = cv2.VideoCapture(video_path)
assert (cap.isOpened())
# for decord
# cap.set(3, 256)
# cap.set(4, 256)
vlen = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
# get indexes of sampled frames
frame_idxs = sample_frames(num_frames, vlen, sample=sample, fix_start=fix_start)
frames = []
success_idxs = []
for index in frame_idxs:
cap.set(cv2.CAP_PROP_POS_FRAMES, index - 1)
ret, frame = cap.read()
if ret:
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frame = torch.from_numpy(frame).byte()
# # (H x W x C) to (C x H x W)
frame = frame.permute(2, 0, 1)
# frame = Image.fromarray(frame)
frames.append(frame)
success_idxs.append(index)
else:
pass
# print(frame_idxs, ' fail ', index, f' (vlen {vlen})')
# return frames tensor
# convert cv to PIL
# img = Image.fromarray(imgs[0])
frames = torch.stack(frames) # .float() / 255
# print(frames.size())
cap.release()
return frames, success_idxs, vlen
def read_frames_decord(video_path, num_frames, mode='train', fix_start=None):
if "s3://" in video_path:
video_bytes = client.get(video_path)
assert video_bytes is not None, "Get video failed from {}".format(video_path)
video_path = video_bytes
if isinstance(video_path, bytes): # I really have no idea why I have to do this
video_path = io.BytesIO(video_bytes)
# print("video path: {}".format(video_path))
if mode in ['train']:
sample = 'rand'
else:
sample = 'uniform'
# video_reader = decord.VideoReader(video_path, width=512, height=512, num_threads=1, ctx=cpu(0))
video_reader = decord.VideoReader(video_path, width=256, height=256, num_threads=1, ctx=cpu(0))
# video_reader = decord.VideoReader(video_path, num_threads=1, ctx=cpu(0))
decord.bridge.set_bridge('torch')
vlen = len(video_reader)
frame_idxs = sample_frames(num_frames, vlen, sample=sample, fix_start=fix_start)
frames = video_reader.get_batch(frame_idxs).byte()
frames = frames.permute(0, 3, 1, 2).cpu()
return frames, frame_idxs, vlen
def read_frames_from_img_dir(video_path, num_frames, mode='train', fix_start=None, suffix='.jpg'):
if mode in ['train', 'val']:
sample = 'rand'
else:
sample = 'uniform'
vlen = len(os.listdir(video_path))
frame_idxs = sample_frames(num_frames, vlen, sample=sample, fix_start=fix_start)
frames = []
for idx in frame_idxs:
frame = cv2.imread(os.path.join(video_path, string(idx).zfill(3) + suffix))
frame = torch.from_numpy(frame).byte()
frame = frame.permute(2, 0, 1)
frames.append(frame)
frames = frames.permute(0, 3, 1, 2)
return frames, frame_idxs, vlen
def sample_frames_2(frame_loc, vlen, frame_end):
assert frame_loc <= frame_end
frame_idxs = [frame_loc]
return frame_idxs
def read_large_frames_decord(video_path, frame_loc, frame_end, num_frames, mode='train', fix_start=None):
# print('*'*100)
# print(mode)
if mode == 'train':
sample = 'rand'
else:
sample = 'uniform'
# video_reader = decord.VideoReader(video_path, width=256, height=256, num_threads=1, ctx=cpu(0))
video_reader = decord.VideoReader(video_path, width=512, height=512, num_threads=1, ctx=cpu(0))
decord.bridge.set_bridge('torch')
# vlen = len(video_reader)
frame_idxs = sample_frames(num_frames, 120, sample=sample, fix_start=fix_start)
for i in range(len(frame_idxs)):
# if random.random() < 0.5:
# frame_idxs[i] += frame_loc - 60
# else:
# frame_idxs[i] += frame_loc
frame_idxs[i] += frame_loc - 60
frame_idxs[i] = min(frame_idxs[i], frame_end-1)
frame_idxs[i] = max(0, frame_idxs[i])
# print(frame_loc, frame_end, frame_idxs)
frames = video_reader.get_batch(frame_idxs).byte()
frames = frames.permute(0, 3, 1, 2)
return frames
def video_clip_reader(video_path, begin_time, end_time, duration, num_frames):
cap = cv2.VideoCapture(video_path)
# assert (cap.isOpened())
vlen = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
# print(video_path, begin_time, end_time, duration, num_frames, vlen)
average_fps = vlen / duration
clip_len = (end_time - begin_time) * average_fps
frame_idxs = sample_frames(num_frames, int(clip_len), sample='rand')
frames = []
success_idxs = []
rel_index = int(begin_time * average_fps)
rel_index = max(rel_index, 0)
rel_index = min(rel_index, vlen-1)
for index in frame_idxs:
cap.set(cv2.CAP_PROP_POS_FRAMES, rel_index+index)
ret, frame = cap.read()
if ret:
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frame = torch.from_numpy(frame).byte()
# # (H x W x C) to (C x H x W)
frame = frame.permute(2, 0, 1)
# frame = Image.fromarray(frame)
frames.append(frame)
success_idxs.append(index)
else:
pass
# print(frame_idxs, ' fail ', index, f' (vlen {vlen})')
frames = torch.stack(frames)
cap.release()
if frames.size(0) < num_frames:
zeros = torch.ones((num_frames - frames.size(1), 3, frames.size(-2), frames.size(-1)), dtype=torch.uint8).byte()
frames = torch.cat((frames, zeros), axis=1)
if frames.size(0) != num_frames:
Exception(RuntimeError)
return frames
def video_reader(video_path, frame_loc, frame_end, num_frames):
cap = cv2.VideoCapture(video_path)
assert (cap.isOpened())
vlen = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
# get indexes of sampled frames fps is 30, 4s
frame_idxs = sample_frames(num_frames, 120, sample='rand')
# frame_idxs = sample_frames_2(frame_loc, vlen, frame_end)
frames = []
success_idxs = []
for index in frame_idxs:
if random.random() < 0.5:
rel_index = index + frame_loc - 120
else:
rel_index = index + frame_loc
rel_index = max(rel_index, 0)
rel_index = min(rel_index, frame_end)
cap.set(cv2.CAP_PROP_POS_FRAMES, rel_index)
ret, frame = cap.read()
if ret:
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frame = torch.from_numpy(frame).byte()
# # (H x W x C) to (C x H x W)
frame = frame.permute(2, 0, 1)
# frame = Image.fromarray(frame)
frames.append(frame)
success_idxs.append(index)
else:
pass
# print(frame_idxs, ' fail ', index, f' (vlen {vlen})')
frames = torch.stack(frames)
cap.release()
return frames
def fast_decode(video_path, num_frames, mode='train', fix_start=None, fps=30):
cap = cv2.VideoCapture(video_path)
vlen = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
cap.release()
max_len = vlen/30
num_sec = num_frames / float(fps)
size = 224
crop_only = True
random_flip = True
start_seek = random.randint(0, int(max(max_len, max_len - num_sec)))
cmd = (
ffmpeg
.input(video_path, ss=start_seek, t=num_sec + 0.1)
.filter('fps', fps=fps)
)
if mode=='train':
aw, ah = random.uniform(0, 1), random.uniform(0, 1)
else:
aw, ah = 0.5, 0.5
if crop_only:
cmd = (
cmd.crop('(iw - {})*{}'.format(size, aw),
'(ih - {})*{}'.format(size, ah),
str(size), str(size))
)
else:
cmd = (
cmd.crop('(iw - min(iw,ih))*{}'.format(aw),
'(ih - min(iw,ih))*{}'.format(ah),
'min(iw,ih)',
'min(iw,ih)')
.filter('scale', size, size)
)
if random_flip and random.uniform(0, 1) > 0.5:
cmd = cmd.hflip()
out, _ = (
cmd.output('pipe:', format='rawvideo', pix_fmt='rgb24')
.run(capture_stdout=True, quiet=True)
)
video = np.frombuffer(out, np.uint8).reshape([-1, size, size, 3])
video = torch.from_numpy(video)
video = video.permute(3, 0, 1, 2)
return video, _, _
# 21
colormaps = [(255, 0, 0), (0, 255, 0),
(0, 0, 255), (61, 145, 64), (127, 255, 212), (0, 201, 87),
(218, 112, 214), (255, 0, 255), (112, 128, 105), (250, 235, 215),
(240, 255, 255), (252, 230, 201), (255, 255, 0), (235, 142, 85),
(255, 97, 0), (176, 224, 230), (65, 106, 225,), (0, 255, 255),
(56, 94, 15), (8, 46, 84), (255, 192, 203)]
# only_use_relevant_dets ?
def color_img(im, object_meta, relevant_dets, only_use_relevant_dets=True):
# mask detected region
# only_use_relevant_dets: if true, we only mask regions that mentioned in question & answers
# print(relevant_dets)
if only_use_relevant_dets:
boxes = []
for index in relevant_dets:
boxes.append(object_meta['boxes'][index])
# print(object_meta['names'][index])
# object_index = relevant_dets
else:
boxes = object_meta['boxes']
# print(len(boxes))
# range(len(boxes))
for i in range(len(boxes)):
if i > 20:
break
bbox = boxes[i]
# white_rect = cv2.applyColorMap(white_rect, i)
# only considering bounding box here (wo fine-grained segmentation)
sub_img = im[int(bbox[1]):int(bbox[3]), int(bbox[0]):int(bbox[2])]
white_rect = np.ones(sub_img.shape, dtype=np.uint8) * 255
white_rect[:, :, 0] = colormaps[i][0]
white_rect[:, :, 1] = colormaps[i][1]
white_rect[:, :, 2] = colormaps[i][2]
res = cv2.addWeighted(sub_img, 0.7, white_rect, 0.3, 1.0)
im[int(bbox[1]):int(bbox[3]), int(bbox[0]):int(bbox[2])] = res
cv2.rectangle(im, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])), colormaps[i], 3)
return im
def get_video_len(video_src):
cap = cv2.VideoCapture(video_src)
vlen = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
cap.release()
return vlen
def align_using_dtw(input_asr, grover_output, radius_perc=0.1, radius_abs=32):
"""
:param input_asr: List of words
:param grover_output: List of words also, could be different size
:param radius_perc: Percent of input ASR
:param radius_abs: Absolute ntokens
:return:
"""
max_radius = int(max(len(input_asr) * radius_perc, radius_abs))
# sometimes grover just keeps going
if len(grover_output) > len(input_asr):
grover_output = grover_output[:len(input_asr) + max_radius]
# DONT give the alignment freedom if it's at the end of a sequence to just "give up" by padding with zeros
# Default value is high
auto2other = np.zeros((len(input_asr), len(grover_output)), dtype=np.float32) + 9999.0
def _preprocess_text(x):
return x.translate(str.maketrans('', '', string.punctuation)).strip().lower()
input_asr_pre = [_preprocess_text(x) for x in input_asr]
input_gro_pre = [_preprocess_text(x) for x in grover_output]
for a_idx, a in enumerate(input_asr_pre):
start = max(a_idx - max_radius, 0)
end = min(a_idx + max_radius, len(input_gro_pre))
for o_idx in range(start, end):
o = input_gro_pre[o_idx]
auto2other[a_idx, o_idx] = editdistance.eval(a, o)
idxs, score = tslearn.metrics.dtw_path_from_metric(auto2other, metric='precomputed')
denoised_out = [[] for x in input_asr]
has_seen = -1
for idx1, idx2 in idxs:
if (idx1 >= len(input_asr)) or (idx2 >= len(grover_output)):
break
if idx2 > has_seen:
# Basically don't add if it's a duplicate -- a grover output that matches to 2 things
# This often leads to slightly weird results because we really should match the next thing, but we instead matched the first thing
# e.g.
# input_asr_pre = ['much', 'of', 'a', 'pancake', 'waffle', 'person', 'so', 'i', 'love', 'a']
# input_gro_pre = ['much', 'of', 'a', 'pancakewaffle', 'person', 'so', 'i', 'love', 'a', 'good']
# but we align pancakewaffle-> pancake and person -> waffle AND person -> person
denoised_out[idx1].append(grover_output[idx2])
has_seen = idx2
return [' '.join(x) for x in denoised_out]
def clean_subtitles(subtitle_dicts):
"""
:param subtitle_dicts: {'word': X, 'time': Y}
:return:
"""
# Remove >> maybe using ftfy or something
new_dicts = []
for x in subtitle_dicts:
if x['word'].startswith('&') or x['word'].endswith(';'):
continue
fixed_word = ftfy.ftfy(x['word'])
if len(fixed_word) == 0:
continue
x['word'] = fixed_word
new_dicts.append(x)
return new_dicts
def clean_description(text):
# Strip emojis first
all_emojis = demoji.findall(text)
for k, v in all_emojis.items():
text = text.replace(k, f'[{v}]'.replace(' ', ''))
text = text.strip()
# Remove URLs
# https://stackoverflow.com/questions/11331982/how-to-remove-any-url-within-a-string-in-python/11332580
text = re.sub(
r'''(?i)\b((?:https?://|www\d{0,3}[.]|[a-z0-9.\-]+[.][a-z]{2,4}/)(?:[^\s()<>]+|\(([^\s()<>]+|(\([^\s()<>]+\)))*\))+(?:\(([^\s()<>]+|(\([^\s()<>]+\)))*\)|[^\s`!()\[\]{};:'".,<>?«»“”‘’]))''',
"%", text)
text = re.sub(' +', ' ', text) # Probably should have done
text = re.sub('\s*\n+', '\n', text)
text = text.strip()
return text | InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/datasets/video/video_base_dataset.py |
import numpy as np
from .video_base_dataset import BaseDataset
import os
import random
from CoTrain.transforms.video.videoaug import VideoTransform
class K400Dataset(BaseDataset):
def __init__(self, *args, split="", **kwargs):
assert split in ["train", "val", "test"]
self.split = split
self.metadata = None
self.ans_lab_dict = dict()
if split == "train":
names = ["k400_train"]
elif split == "val":
names = ["k400_val"]
elif split == "test":
names = ["k400_test"]
super().__init__(
*args,
**kwargs,
names=names,
text_column_name="questions",
remove_duplicate=False,
)
self.video_transform = VideoTransform(mode=self.split) # train or val model
self._load_metadata()
def _load_metadata(self):
metadata_dir = './meta_data/k400'
split_files = {
'train': 'k400_train_tsm.list',
'val': 'k400_test_tsm.list',
'test': 'k400_test_tsm.list'
}
target_split_fp = split_files[self.split]
with open(os.path.join(metadata_dir, target_split_fp)) as f:
self.metadata = f.readlines()
answer_fp = os.path.join(metadata_dir, 'kinetics_label_map.txt')
count = 0
with open(answer_fp, 'r') as f:
lines = f.readlines()
for line in lines:
self.ans_lab_dict[str(line.strip())] = count
count += 1
def _get_video_path(self, sample):
# find the name is os.listdir() e.g. abseiling/0wR5jVB-WPk.mp4
# /data/algceph/arcdata/Kinetics-400/train_zips/snowboarding/MCgJO4s1qBA_000129_000139.zip
# -> snowboarding/MCgJO4s1qBA_000129_000139.mp4
if self.split == 'train':
rel_path = sample[0][46:-4] + '.mp4'
else:
# val maybe mkv. webm etc.
fake_path = sample[0][44:-4]
sub_dir, video_name = fake_path.split('/')
rel_path = sub_dir
for video in os.listdir(os.path.join(self.data_dir, self.split, sub_dir)):
if video_name in video:
rel_path = os.path.join(rel_path, video)
break
full_path = os.path.join(self.data_dir, self.split, rel_path)
# print(full_path)
return full_path, rel_path
def get_text(self, sample):
text = "A persion is doing [MASK]"
encoding = self.tokenizer(
text,
padding="max_length",
truncation=True,
max_length=self.max_text_len,
return_special_tokens_mask=True,
)
return (text, encoding)
def get_answer_label(self, sample):
text = "None"
# print(len(self.ans_lab_dict))
ans_total_len = len(self.ans_lab_dict) + 1 # one additional class
ans_label = int(sample[1])
scores = np.zeros(ans_total_len).astype(int)
scores[ans_label] = 1
return text, ans_label, scores
def __getitem__(self, index):
result = None
while result is None:
sample = self.metadata[index].split('\t')
try:
video_tensor = self.get_video(sample)
text = self.get_text(sample)
qid = index
if self.split != "test":
answers, labels, scores = self.get_answer_label(sample)
else:
answers = list()
labels = list()
scores = list()
result = True
except Exception as e:
print(f"Error while read file idx {sample[0]} -> {e}")
index = random.randint(0, len(self.metadata) - 1)
return {
"video": video_tensor,
"text": text,
"vqa_answer": answers,
"vqa_labels": labels,
"vqa_scores": scores,
"qid": qid,
}
def __len__(self):
return len(self.metadata) | InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/datasets/video/k400.py |
import numpy as np
from .video_base_dataset import BaseDataset
import os
import json
import pandas as pd
class MSVDQADataset(BaseDataset):
def __init__(self, *args, split="", **kwargs):
assert split in ["train", "val", "test"]
self.split = split
self.metadata = None
self.ans_lab_dict = None
if split == "train":
names = ["msvd_qa_train"]
elif split == "val":
names = ["msvd_qa_test"] # test: directly output test result
# ["msvd_qa_val"]
elif split == "test":
names = ["msvd_qa_test"] # vqav2_test-dev for test-dev
super().__init__(
*args,
**kwargs,
names=names,
text_column_name="questions",
remove_duplicate=False,
)
self._load_metadata()
self.data_dir = "s3://video_pub/MSVD/" # TODO: Remove this piece of shit
def _load_metadata(self):
metadata_dir = './meta_data/msvd'
split_files = {
'train': 'msvd_train_qa_encode.json',
'val': 'msvd_val_qa_encode.json',
'test': 'msvd_test_qa_encode.json'
}
# read ans dict
self.ans_lab_dict = {}
answer_fp = os.path.join(metadata_dir, 'msvd_answer_set.txt')
answer_clip_id = os.path.join(metadata_dir, 'msvd_clip_id.json')
self.youtube_mapping_dict = dict()
with open(os.path.join(metadata_dir, 'msvd_youtube_mapping.txt')) as f:
lines = f.readlines()
for line in lines:
info = line.strip().split(' ')
self.youtube_mapping_dict[info[1]] = info[0]
with open(answer_fp, 'r') as f:
lines = f.readlines()
count = 0
for line in lines:
self.ans_lab_dict[str(line.strip())] = count
count += 1
with open(answer_clip_id, 'r') as JSON:
self.ans_clip_id = json.load(JSON)
for name in self.names:
split = name.split('_')[-1]
target_split_fp = split_files[split]
metadata = pd.read_json(os.path.join(metadata_dir, target_split_fp), lines=True)
if self.metadata is None:
self.metadata = metadata
else:
self.metadata.update(metadata)
print("total {} samples for {}".format(sum(1 for line in self.metadata), self.names))
def _get_video_path(self, sample):
rel_video_fp = self.youtube_mapping_dict['vid' + str(sample["video_id"])] + '.avi'
# print(rel_video_fp)
full_video_fp = os.path.join(self.data_dir, 'MSVD_Videos', rel_video_fp)
return full_video_fp, rel_video_fp
def get_text(self, sample):
text = sample['question']
encoding = self.tokenizer(
text,
padding="max_length",
truncation=True,
max_length=self.max_text_len,
return_special_tokens_mask=True,
)
return (text, encoding)
def get_answer_label(self, sample):
text = sample['answer']
ans_total_len = len(self.ans_lab_dict) + 1 # one additional class
try:
ans_label = self.ans_lab_dict[text] #
except KeyError:
ans_label = -100 # ignore classes
# ans_label = 1500 # other classes
scores = np.zeros(ans_total_len).astype(int)
scores[ans_label] = 1
return text, ans_label, scores
# return text, ans_label_vector, scores
def __getitem__(self, index):
sample = self.metadata[index].iloc[0]
video_tensor = self.get_video(sample)
text = self.get_text(sample)
# index, question_index = self.index_mapper[index]
qid = index
if self.split != "test":
answers, labels, scores = self.get_answer_label(sample)
else:
answers = list()
labels = list()
scores = list()
return {
"video": video_tensor,
"text": text,
"vqa_answer": answers,
"vqa_labels": labels,
"vqa_scores": scores,
"qid": qid,
"ans_clip_id": self.ans_clip_id,
}
def __len__(self):
return sum(1 for line in self.metadata) # count # json lines | InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/datasets/video/msvdqa.py |
InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/datasets/video/__init__.py |
|
from .video_base_dataset import BaseDataset
import os
import pandas as pd
# some videos are missed, for better results, do IO exception.
class DIDEMODataset(BaseDataset):
def __init__(self, *args, split="", **kwargs):
assert split in ["train", "val", "test"]
self.split = split
self.metadata = None
if split == "train":
names = ["didemo_train"]
elif split == "val":
names = ["didemo_val"]
elif split == "test":
names = ["didemo_val"]
super().__init__(*args, **kwargs, names=names, text_column_name="caption")
self._load_metadata()
def _load_metadata(self):
metadata_dir = './meta_data/didemo'
split_files = {
'train': 'DiDeMo_train.tsv',
'val': 'DiDeMo_val.tsv', # there is no test
'test': 'DiDeMo_test.tsv'
}
target_split_fp = split_files[self.split]
metadata = pd.read_csv(os.path.join(metadata_dir, target_split_fp), sep='\t')
self.metadata = metadata
print("load split {}, {} samples".format(self.split, len(metadata)))
def _get_video_path(self, sample):
rel_video_fp = sample[1]
full_video_fp = os.path.join(self.data_dir, '', rel_video_fp)
return full_video_fp, rel_video_fp
def _get_caption(self, sample):
return sample[0]
| InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/datasets/video/didemo.py |
from .video_base_dataset import BaseDataset, read_frames_gif
import random
import os
import pandas as pd
# action and transition: {
# "gif_name": "tumblr_nk172bbdPI1u1lr18o1_250",
# "question": "What does the butterfly do 10 or more than 10 times ?",
# "options": ["stuff marshmallow", "holds a phone towards face",
# "fall over", "talk", "flap wings"],
# "answer": 4
# }
class TGIFQADataset(BaseDataset):
def __init__(self, *args, split="", **kwargs):
assert split in ["train", "val", "test"]
self.split = split
self.data_split = "action" # transition/action
self.metadata = None
self._load_metadata()
if split == "train":
names = ["tgifqa_train"]
elif split == "val":
names = ["tgifqa_val"]
elif split == "test":
names = ["tgifqa_test"]
super().__init__(*args, **kwargs, names=names, text_column_name="caption")
# for appear objects
self.only_use_relevant_dets = True
if self.only_use_relevant_dets:
self.relevant_dets = [] # resort the detection numbers
self.relevant_dets_classes = []
self.fps = 3 # tgif sample 3 frames per second
self.data_dir = "/mnt/lustre/share_data/heyinan/data/tgif" # TODO: Remove this piece of shit
def _load_metadata(self):
# download specific
metadata_dir = './meta_data/tgif'
if self.data_split == "action":
split_files = {
'train': 'action_train.jsonl',
'val': 'action_test.jsonl', # action_val.jsonl
'test': 'action_test.jsonl' # no GT label for test set
}
elif self.data_split == "transition":
split_files = {
'train': 'transition_train.jsonl',
'val': 'transition_test.jsonl', # transition_val.jsonl
'test': 'transition_test.jsonl' # no GT label for test set
}
else:
Exception("not support split")
target_split_fp = split_files[self.split]
metadata = pd.read_json(os.path.join(metadata_dir, target_split_fp), lines=True)
self.metadata = metadata
# def _get_image_path(self, sample):
# # for example: tvqa/frames/raw_frames/frames_hq/met_frames/met_s06e22_seg01_clip_02
# dir_name = sample['vid_name'].split('_')[0]
# if dir_name not in ['bbt', 'castle', 'friends', 'grey', 'house', 'met']:
# dir_name = 'bbt'
# rel_fp = os.path.join('frames/raw_frames/frames_hq/', dir_name + '_frames', sample['vid_name'])
# return os.path.join(self.data_dir, rel_fp), rel_fp
def _get_caption(self, sample):
return sample[0]
def _get_video_path(self, sample):
return os.path.join(self.data_dir, 'gifs', sample['gif_name']) + '.gif', sample['gif_name'] + '.gif'
def get_raw_video(self, sample):
abs_fp, rel_fp = self._get_video_path(sample)
imgs, idxs, vlen = read_frames_gif(abs_fp, self.num_frames, mode=self.split)
if imgs is None:
raise Exception("Invalid img!", rel_fp)
else:
return imgs
def get_text(self, sample):
question = self.get_question(sample)
qa_texts = []
# 5 choices # ClipBERT: " ", Ours: [SEP]
options = " ".join(sample["options"][i] for i in range(5))
for i in range(5):
raw_text = question + "Options: " + options + "Answer: " + sample["options"][i]
# raw_text = question + "[SEP]" + sample["options"][i]
# print(raw_text)
qa_encoding = self.tokenizer(
raw_text,
padding="max_length",
truncation=True,
max_length=self.max_text_len,
return_special_tokens_mask=True,
)
qa_texts.append((raw_text, qa_encoding))
return qa_texts
def get_answer_label(self, sample):
answer = int(sample['answer'])
return answer
def get_question(self, sample):
return sample["question"]
def __len__(self):
return len(self.metadata)
def __getitem__(self, index):
result = None
while result is None:
sample = self.metadata.iloc[index]
try:
self.relevant_dets = [] # initalize
self.relevant_dets_classes = []
answer = self.get_answer_label(sample)
ret = {
"vid_index": index,
"cap_index": index,
"raw_index": index,
'answer': answer
}
qa_texts = self.get_text(sample)
ret["text"] = qa_texts[0]
for i in range(self.draw_options_text - 1):
ret.update({f"options_text_{i}": qa_texts[i+1]})
video_tensor = self.get_video(sample)
ret["image"] = video_tensor
result = True
except Exception as e:
print(f"Error while read file idx {sample.name} in {self.names[0]} -> {e}")
print("time stamp is: {}".format(sample['ts']))
index = random.randint(0, len(self.metadata) - 1)
return ret
| InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/datasets/video/tgifqa.py |
import numpy as np
from .video_base_dataset import BaseDataset
import os
class UCF101Dataset(BaseDataset):
def __init__(self, *args, split="", **kwargs):
assert split in ["train", "val", "test"]
self.split = split
self.metadata = None
self.ans_lab_dict = dict()
if split == "train":
names = ["ucf101_train"]
elif split == "val":
names = ["ucf101_val"]
elif split == "test":
names = ["ucf101_test"]
super().__init__(
*args,
**kwargs,
names=names,
text_column_name="questions",
remove_duplicate=False,
)
self._load_metadata()
def _load_metadata(self):
metadata_dir = './meta_data/ucf101'
split_files = {
'train': 'hmdb51_rgb_train_split_1.txt',
'val': 'hmdb51_rgb_val_split_1.txt',
'test': 'hmdb51_rgb_val_split_1.txt'
}
target_split_fp = split_files[self.split]
self.metadata = [x.strip().split(' ') for x in open(os.path.join(metadata_dir, target_split_fp))]
answer_fp = os.path.join(metadata_dir, 'hmdb51_classInd.txt')
with open(answer_fp, 'r') as f:
lines = f.readlines()
for line in lines:
self.ans_lab_dict[str(int(line.strip().split(' ')[0]) - 1)] = line.strip().split(' ')[1]
def _get_video_path(self, sample):
# self.ans_lab_dict[sample[2]],
return os.path.join(self.data_dir, sample[0].split('/')[-1]) + '.avi', sample[0].split('/')[-1] + '.avi'
def get_text(self, sample):
text = "A person is doing [MASK]"
encoding = self.tokenizer(
text,
padding="max_length",
truncation=True,
max_length=self.max_text_len,
return_special_tokens_mask=True,
)
return (text, encoding)
def get_answer_label(self, sample):
text = "None"
ans_total_len = len(self.ans_lab_dict) + 1 # one additional class
ans_label = int(sample[2])
scores = np.zeros(ans_total_len).astype(int)
scores[ans_label] = 1
return text, ans_label, scores
# return text, ans_label_vector, scores
def __getitem__(self, index):
sample = self.metadata[index] # .split(' ')
video_tensor = self.get_video(sample)
text = self.get_text(sample)
qid = index
if self.split != "test":
answers, labels, scores = self.get_answer_label(sample)
else:
answers = list()
labels = list()
scores = list()
return {
"video": video_tensor,
"text": text,
"vqa_answer": answers,
"vqa_labels": labels,
"vqa_scores": scores,
"qid": qid,
}
def __len__(self):
return len(self.metadata) | InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/datasets/video/ucf101.py |
from .video_base_dataset import BaseDataset
import random
import os
import pandas as pd
import cv2
import torch
from CoTrain.datasets.video.video_base_dataset import sample_frames
# each sample: https://tvqa.cs.unc.edu/download_tvqa.html
# {
# "a0": "A martini glass",
# "a1": "Nachos",
# "a2": "Her purse",
# "a3": "Marshall's book",
# "a4": "A beer bottle",
# "answer_idx": 4,
# "q": "What is Robin holding in her hand when she is talking to Ted about Zoey?",
# "qid": 7,
# "ts": "1.21-8.49",
# "vid_name": "met_s06e22_seg01_clip_02",
# "show_name":
# }
class TVQADataset(BaseDataset):
def __init__(self, *args, split="", **kwargs):
assert split in ["train", "val", "test"]
self.split = split
self.metadata = None
self._load_metadata()
if split == "train":
names = ["tvqa_train"]
elif split == "val":
names = ["tvqa_val"]
elif split == "test":
names = ["tvqa_test"]
super().__init__(*args, **kwargs, names=names, text_column_name="caption")
# for appear objects
self.only_use_relevant_dets = True
if self.only_use_relevant_dets:
self.relevant_dets = [] # resort the detection numbers
self.relevant_dets_classes = []
self.fps = 3 # tvqa sample 3 frames per second
def _load_metadata(self):
# download specific
metadata_dir = './meta_data/tvqa'
split_files = {
'train': 'tvqa_train.jsonl',
'val': 'tvqa_val.jsonl',
'test': 'tvqa_test_public.jsonl' # no GT label for test set
}
target_split_fp = split_files[self.split]
metadata = pd.read_json(os.path.join(metadata_dir, target_split_fp), lines=True)
self.metadata = metadata
def _get_image_path(self, sample):
# for example: tvqa/frames/raw_frames/frames_hq/met_frames/met_s06e22_seg01_clip_02
dir_name = sample['vid_name'].split('_')[0]
if dir_name not in ['bbt', 'castle', 'friends', 'grey', 'house', 'met']:
dir_name = 'bbt'
rel_fp = os.path.join('frames/raw_frames/frames_hq/', dir_name + '_frames', sample['vid_name'])
return os.path.join(self.data_dir, rel_fp), rel_fp
def _get_caption(self, sample):
return sample[0]
# need to speed up
def _get_video_len(self, dir):
return len(os.listdir(dir))
# tvqa provide sampled frames
def get_raw_video(self, sample):
abs_fp, rel_fp = self._get_image_path(sample)
[beg_time, end_time] = sample['ts'].split('-')
clip_len = int((float(end_time) - float(beg_time)) * self.fps)
# try:
# clip_len = int((float(end_time) - float(beg_time)) * self.fps)
# except ValueError:
# clip_len = 1
# prevent short than 1 second
clip_len = max(clip_len, 2*self.num_frames)
rel_frame_index = sample_frames(self.num_frames, clip_len)
begin_frame_index = max(1, int(float(beg_time) * self.fps))
video_len = self._get_video_len(abs_fp)
# sample N frames here
frames = []
for index in rel_frame_index:
abs_index = begin_frame_index + index
abs_index = min(video_len, abs_index)
image_rel_path = f'{abs_index:05}'
img = cv2.imread(os.path.join(abs_fp, '{}.jpg'.format(image_rel_path)))
# print(img)
# print(os.path.join(abs_fp, '{}.jpg'.format(image_rel_path)))
if img is None:
print(sample['vid_name'])
print(os.path.join(abs_fp, '{}.jpg'.format(image_rel_path)))
frame = torch.from_numpy(img).byte()
frame = frame.permute(2, 0, 1)
frames.append(frame)
frames = torch.stack(frames).permute(1, 0, 2, 3)
return frames
def get_text(self, sample):
question = self.get_question(sample)
qa_texts = []
# 5 choices # ClipBERT: " ", Ours: [SEP]
# if the length suppress than 40 ?
options = " ".join(sample["a{}".format(i)] for i in range(5))
for i in range(5):
raw_text = question + "Options: " + options + "Answer: " + sample["a{}".format(i)]
# raw_text = question + "[SEP]" + sample["a{}".format(i)]
# print(raw_text)
qa_encoding = self.tokenizer(
raw_text,
padding="max_length",
truncation=True,
max_length=self.max_text_len,
return_special_tokens_mask=True,
)
qa_texts.append((raw_text, qa_encoding))
return qa_texts
def get_answer_label(self, sample):
answer = int(sample['answer_idx'])
return answer
def get_question(self, sample):
return sample["q"]
def __len__(self):
return len(self.metadata)
def __getitem__(self, index):
result = None
while result is None:
sample = self.metadata.iloc[index]
try:
self.relevant_dets = [] # initalize
self.relevant_dets_classes = []
answer = self.get_answer_label(sample)
ret = {
"vid_index": index,
"cap_index": index,
"raw_index": index,
'answer': answer
}
qa_texts = self.get_text(sample)
ret["text"] = qa_texts[0]
for i in range(self.draw_options_text - 1):
ret.update({f"options_text_{i}": qa_texts[i+1]})
video_tensor = self.get_video(sample)
ret["image"] = video_tensor
result = True
except Exception as e:
print(f"Error while read file idx {sample.name} in {self.names[0]} -> {e}")
print("time stamp is: {}".format(sample['ts']))
index = random.randint(0, len(self.metadata) - 1)
return ret
| InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/datasets/video/tvqa.py |
from .video_base_dataset import BaseDataset, read_frames_decord
import random
import os
import pandas as pd
class WEBVIDDataset(BaseDataset):
def __init__(self, *args, split="", **kwargs):
assert split in ["train", "val", "test"]
self.split = split
self.metadata = None
self.cut = "jsfusion"
if split == "train":
names = ["webvid_train"]
elif split == "val":
names = ["webvid_val"]
elif split == "test":
names = ["webvid_val"]
self._load_metadata()
print(names, ": ", len(self.metadata), "samples in total.")
super().__init__(*args, **kwargs, names=names, text_column_name="caption")
if "s3://" in self.data_dir:
# Remove this fucking auto dir name
self.data_dir = os.path.dirname(self.data_dir)
# Add the real path
self.data_dir = os.path.join(self.data_dir, "WebVid2M")
def _load_metadata(self):
metadata_dir = './meta_data/webvid'
split_files = {
'train': 'webvid_training_success_full.tsv',
'val': 'webvid_validation_success_full.tsv', # there is no test
'test': 'webvid_validation_success_full.tsv'
}
target_split_fp = split_files[self.split]
metadata = pd.read_csv(os.path.join(metadata_dir, target_split_fp), sep='\t')
self.metadata = metadata
def _get_video_path(self, sample):
rel_video_fp = sample[1] + '.mp4'
if "s3://" in self.data_dir:
full_video_fp = os.path.join(self.data_dir, rel_video_fp)
else:
full_video_fp = os.path.join(self.data_dir, self.split, rel_video_fp)
return full_video_fp, rel_video_fp
def _get_caption(self, sample):
return sample[0]
def get_raw_video(self, sample):
abs_fp, rel_fp = self._get_video_path(sample)
videos, idxs, vlen = read_frames_decord(abs_fp, self.num_frames, mode=self.split)
if videos is None:
raise Exception("Invalid video!", rel_fp)
else:
return videos
def get_video(self, index, sample):
videos = self.get_raw_video(sample)
videos_tensor = self.video_aug(videos, self.video_transform)
return {
"video": videos_tensor,
"vid_index": index,
"cap_index": index,
"raw_index": index,
}
def get_false_video(self, rep):
random_index = random.randint(0, len(self.metadata) - 1)
sample = self.metadata.iloc[random_index]
# can be different augmentation
videos = self.get_raw_video(sample)
videos_tensor = self.video_aug(videos, self.video_transform)
return {f"false_video_{rep}": videos_tensor}
def get_text(self, raw_index, sample):
text = sample[0]
# print(text)
encoding = self.tokenizer(
text,
padding="max_length",
truncation=True,
max_length=self.max_text_len,
return_special_tokens_mask=True,
)
# print(encoding.size())
return {
"text": (text, encoding),
"vid_index": raw_index,
"cap_index": raw_index,
"raw_index": raw_index,
}
def get_false_text(self, rep):
random_index = random.randint(0, len(self.metadata) - 1)
sample = self.metadata.iloc[random_index]
text = sample[0]
encoding = self.tokenizer(
text,
# padding="max_length",
truncation=True,
max_length=self.max_text_len,
return_special_tokens_mask=True,
)
return {f"false_text_{rep}": (text, encoding)}
def get_suite(self, index):
result = None
max_try = 10
try_time = 0
while result is None:
try_time += 1
sample = self.metadata.iloc[index]
try:
ret = dict()
ret.update(self.get_video(index, sample))
if not self.video_only:
txt = self.get_text(index, sample)
ret.update({"replica": True if txt["cap_index"] > 0 else False})
ret.update(txt)
for i in range(self.draw_false_video):
ret.update(self.get_false_video(i))
for i in range(self.draw_false_text):
ret.update(self.get_false_text(i))
result = True
except Exception as e:
index = random.randint(0, len(self.metadata) - 1)
exc = e
if try_time > max_try:
print(f"Exceed max time Error while read file idx {sample} in {self.names[0]} with error {exc}")
try_time=0
return ret
def __len__(self):
return len(self.metadata)
def __getitem__(self, index):
return self.get_suite(index) | InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/datasets/video/webvid_old.py |
from .video_base_dataset import BaseDataset
import torch as th
import os
import numpy as np
import random
import ffmpeg
import json
from transforms.video.videoaug import VideoTransform
import subprocess
# {'timestamp_sec': 221.29666, 'narration_text': '#C C walks on the ground'}
class Ego4DDataset(BaseDataset):
"""EGO4D Video-Text loader."""
def __init__(self, *args, split="", **kwargs):
# def __init__(
# self,
# csv,
# video_root='',
# caption_root='',
# min_time=4.0,
# fps=16,
# num_frames=16,
# size=224,
# crop_only=False,
# center_crop=True,
# benchmark=False,
# token_to_word_path='data/dict.npy',
# max_words=20,
# num_candidates=1,
# random_left_right_flip=False,
# ):
# """
# Args:
# """
assert split in ["train", "val", "test"]
self.split = split
if split == "train":
names = ["ego4d_train"]
elif split == "val":
names = ["ego4d_val"]
elif split == "test":
names = ["ego4d_test"]
super().__init__(*args, **kwargs, names=names, text_column_name="caption")
self._load_metadata()
# for howto100
self.min_time = 2.0
self.size = 224
self.fps = 5
self.num_sec = self.video_num_frames / float(self.fps)
self.crop_only = False
self.center_crop = False
self.benchmark = False
self.num_candidates = 1
self.random_flip = True
# print(self.data_dir)
# for howto caption dir
self._load_metadata()
# print(kwargs)
# self.num_frames = kwargs['num_frames']
self.video_transform = VideoTransform(mode=self.split, num_frames=self.num_frames) # train or val model
def _load_metadata(self):
metadata_dir = './meta_data'
split_files = {
'train': 'ego4d/narration.json',
'val': 'ego4d/narration.json', # there is no test
'test': 'ego4d/narration.json'
}
target_split_fp = split_files[self.split]
with open(os.path.join(metadata_dir, target_split_fp), 'r') as jsonfile:
metadata = json.load(jsonfile)
# metadata = pd.read_csv(os.path.join(metadata_dir, target_split_fp), sep='\t')
self.metadata = metadata
self.meta_keys = list(metadata.keys())
def __len__(self):
return len(self.meta_keys)
def get_video_len(self, video_path):
duration = subprocess.check_output(
['ffprobe', '-i', video_path, '-show_entries', 'format=duration', '-v', 'quiet', '-of',
'csv=%s' % ("p=0")])
# print("ffmpeg duration is: {}".format(duration))
duration = float(str(duration)[2:-3]) # b'1027.806000\n' -> 1027.806
return duration
def read_frames_ffmpeg(self, video_path, center, video_len):
if center > video_len:
center = video_len - 2 * self.num_sec
start = int(max(0, center-self.min_time))
end = int(min(video_len, center+self.min_time))
start_seek = random.randint(start, int(max(start, end - self.num_sec)))
# video is too short
if video_len < 1:
start_seek = 0
if start_seek + self.num_sec + 0.1 > video_len:
start_seek = video_len - self.num_sec - 0.1
start_seek = max(start_seek, 0)
cmd = (
ffmpeg
.input(video_path, ss=start_seek, t=self.num_sec + 0.01)
.filter('fps', fps=self.fps)
)
if self.center_crop:
aw, ah = 0.5, 0.5
else:
aw, ah = random.uniform(0, 1), random.uniform(0, 1)
if self.crop_only:
cmd = (
cmd.crop('(iw - {})*{}'.format(self.size, aw),
'(ih - {})*{}'.format(self.size, ah),
str(self.size), str(self.size))
)
else:
cmd = (
cmd.crop('(iw - min(iw,ih))*{}'.format(aw),
'(ih - min(iw,ih))*{}'.format(ah),
'min(iw,ih)',
'min(iw,ih)')
.filter('scale', self.size, self.size)
)
if self.random_flip and random.uniform(0, 1) > 0.5:
cmd = cmd.hflip()
out, _ = (
cmd.output('pipe:', format='rawvideo', pix_fmt='rgb24')
.run(capture_stdout=True, quiet=True)
)
video = np.frombuffer(out, np.uint8).reshape([-1, self.size, self.size, 3])
video_tensor = th.from_numpy(np.copy(video))
video_tensor = video_tensor.permute(3, 0, 1, 2) + 0.01
if video_tensor.size()[1] != self.num_frames:
print(video_tensor.size(), start, end, start_seek, video_len)
# print("video length: {}".format(self.get_video_len_from_timestammp()))
# add gausian noise here to prevent all blank boxez
if video_tensor.shape[1] < self.num_frames:
zeros = th.ones((3, self.num_frames - video_tensor.shape[1], self.size, self.size), dtype=th.uint8)
video_tensor = th.cat((video_tensor, zeros), axis=1)
return video_tensor[:, :self.num_frames]
def _zero_pad_tensor_token(self, tensor, size):
if len(tensor) >= size:
return tensor[:size]
else:
zero = th.zeros(size - len(tensor)).long()
return th.cat((tensor, zero), dim=0)
def get_text(self, sample, index):
text = sample['narration_text']
# TODO: May need to be improved for edge cases.
encoding = self.tokenizer(
text,
padding="max_length",
truncation=True,
max_length=self.max_text_len,
return_special_tokens_mask=True,
)
return {
"text": (text, encoding),
"img_index": index,
"cap_index": index,
"raw_index": index,
}
def get_false_text(self, rep):
random_index = random.randint(0, len(self.metadata) - 1)
# two annotations
if random.random() < 0.5:
meta = self.metadata[self.meta_keys[random_index]]['narration_pass_1']
else:
meta = self.metadata[self.meta_keys[random_index]]['narration_pass_2']
sample = meta[random.randint(0, len(meta) - 1)] # random choice one sample
text = sample['narration_text']
encoding = self.tokenizer(
text,
# padding="max_length",
truncation=True,
max_length=self.max_text_len,
return_special_tokens_mask=True,
)
return {f"false_text_{rep}": (text, encoding)}
def _get_video_path(self, sample):
rel_video_fp = sample["video_path"] + '.mp4'
full_video_fp = os.path.join(self.data_dir, rel_video_fp)
if not os.path.exists(full_video_fp):
Exception(IOError)
return full_video_fp, rel_video_fp
def get_raw_video(self, sample):
abs_fp, rel_fp = self._get_video_path(sample)
# in four seconds
# print(sample)
sample["video_len"] = self.get_video_len(abs_fp)
center = sample['timestamp_sec']
imgs = self.read_frames_ffmpeg(abs_fp, center, sample["video_len"]).permute(1, 0, 2, 3)
# print(imgs.size())
if imgs is None:
raise Exception("Invalid video!", rel_fp)
else:
return imgs
def get_video(self, sample):
imgs_tensor = self.get_raw_video(sample)
return imgs_tensor
def get_false_video(self, rep):
random_index = random.randint(0, len(self.metadata) - 1)
# two annotations
if random.random() < 0.5:
meta = self.metadata[self.meta_keys[random_index]]['narration_pass_1']
else:
meta = self.metadata[self.meta_keys[random_index]]['narration_pass_2']
if len(meta) < 1:
return self.get_false_video(rep)
sample = meta[random.randint(0, len(meta) - 1)] # random choice one sample
sample["video_path"] = self.meta_keys[random_index] # video path
imgs_tensor = self.get_raw_video(sample)
return {f"false_image_{rep}": imgs_tensor}
def get_suite(self, index):
result = None
while result is None:
# two annotations
try:
if random.random() < 0.5:
meta = self.metadata[self.meta_keys[index]]['narration_pass_1']
else:
meta = self.metadata[self.meta_keys[index]]['narration_pass_2']
if len(meta) < 2:
random_index = random.randint(0, len(self.metadata) - 1)
return self.get_suite(random_index)
sample = meta[random.randint(0, len(meta)-1)] # random choice one sample
sample["video_path"] = self.meta_keys[index] # video path
# print(sample)
ret = dict()
text = self.get_text(sample, index)
ret.update({"replica": True if text["cap_index"] > 0 else False})
ret.update(text)
imgs_tensor = self.get_video(sample)
# print(imgs_tensor.size())
ret.update({
"image": imgs_tensor,
"img_index": index,
"cap_index": index,
"raw_index": index,
})
ret.update({"replica": True if ret["cap_index"] > 0 else False})
for i in range(self.draw_false_image):
ret.update(self.get_false_video(i))
for i in range(self.draw_false_text):
ret.update(self.get_false_text(i))
result = True
except Exception as e:
# print(e)
index = random.randint(0, len(self.metadata) - 1)
return ret
def __len__(self):
return len(self.metadata)
def __getitem__(self, index):
return self.get_suite(index)
| InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/datasets/video/ego4d_v2.py |
from docutils import DataError
from importlib_metadata import metadata
from .video_base_dataset import BaseDataset, read_frames_decord
import os
import pandas as pd
class K400VideoDataset(BaseDataset):
def __init__(self, *args, split="", **kwargs):
assert split in ["train", "val", "test"]
self.split = split
if self.split == "train":
Exception("no train data provided")
self.metadata = None
self.ans_texts = dict()
if split == "train":
names = ["k400_video_train"]
elif split == "val":
names = ["k400_video_val"]
elif split == "test":
names = ["k400_video_test"] # vqav2_test-dev for test-dev
super().__init__(
*args,
**kwargs,
names=names,
text_column_name="unknown",
remove_duplicate=False,
)
self._load_metadata()
self.data_dir = "s3://video_pub/K400_videos" # TODO: Remove this piece of shit
def _load_metadata(self):
metadata_dir = './meta_data/k400'
split_files = {
'train': 'k400_test_tsm.list',
'val': 'k400_test_tsm.list',
'test': 'k400_test_tsm.list',
}
target_split_fp = split_files[self.split]
with open(os.path.join(metadata_dir, target_split_fp)) as f:
self.metadata = f.readlines()
self.metadata = [x.strip() for x in self.metadata]
self.metadata = [x.split("\t") for x in self.metadata]
self.metadata = [[x[0].split("/")[-1][:11], int(x[1])] for x in self.metadata]
def _build_ans(self):
metadata_dir = './meta_data/k400'
answer_fp = os.path.join(metadata_dir, 'kinetics_label_map.txt')
ans_texts = open(answer_fp).readlines()
assert len(set(ans_texts)) == 400
ans_texts = [x.strip() for x in ans_texts]
ans_texts = ["A person is doing {}".format(x) for x in ans_texts]
ans_texts = [
(
x,
self.tokenizer(
x,
padding="max_length",
truncation=True,
max_length=self.max_text_len,
return_special_tokens_mask=True,
),
)
for x in ans_texts
]
self.ans_texts = {"text": ans_texts[0]}
self.ans_texts.update(
{
"false_text_{}".format(i): ans_texts[i + 1]
for i in range(len(ans_texts) - 1)
}
)
@staticmethod
def classes():
metadata_dir = './meta_data/k400'
answer_fp = os.path.join(metadata_dir, 'kinetics_label_map.txt')
ans_texts = open(answer_fp).readlines()
assert len(set(ans_texts)) == 400
ans_texts = [x.strip() for x in ans_texts]
return ans_texts
def _get_video_path(self, sample):
rel_video_fp = sample[0] + '.mp4'
if "s3://" in self.data_dir:
full_video_fp = os.path.join(self.data_dir, rel_video_fp)
else:
full_video_fp = os.path.join(self.data_dir, self.split, rel_video_fp)
return full_video_fp, rel_video_fp
def get_raw_video(self, sample):
abs_fp, rel_fp = self._get_video_path(sample)
videos, idxs, vlen = read_frames_decord(abs_fp, self.num_frames, mode=self.split)
if videos is None:
raise Exception("Invalid img!", rel_fp)
else:
return videos
def get_video(self, sample):
videos = self.get_raw_video(sample)
videos_tensor = self.video_aug(videos, self.video_transform)
return videos_tensor
def __getitem__(self, index):
ret = None
max_try = 10
try_time = 0
while ret is None:
try_time += 1
try:
sample = self.metadata[index]
image_tensor = self.get_video(sample)
answer = sample[1]
ret = {
"video": image_tensor,
"img_index": index,
'answer': answer,
}
except Exception as e:
index = (index + 1) % len(self.metadata)
exc = e
if try_time > max_try:
raise DataError(
f"Exceed max time Error while read file idx {sample[0]} with error {exc}"
)
return ret
def __len__(self):
return len(self.metadata) | InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/datasets/video/k400_video.py |
from .video_base_dataset import BaseDataset
import random
import os
import pandas as pd
import json
import numpy as np
class MSRVTTDataset(BaseDataset):
def __init__(self, *args, split="", **kwargs):
assert split in ["train", "val", "test"]
self.split = split
self.metadata = None
self.cut = "jsfusion"
if split == "train":
names = ["msrvtt_train"]
elif split == "val":
names = ["msrvtt_val"]
elif split == "test":
names = ["msrvtt_val"]
super().__init__(*args, **kwargs, names=names, text_column_name="caption")
self._load_metadata()
def _load_metadata(self):
json_fp = os.path.join(self.data_dir, 'annotation', 'MSR_VTT.json')
with open(json_fp, 'r') as fid:
data = json.load(fid)
df = pd.DataFrame(data['annotations'])
split_dir = os.path.join(self.data_dir, 'high-quality', 'structured-symlinks')
js_test_cap_idx_path = None
challenge_splits = {"val", "public_server_val", "public_server_test"}
if self.cut == "miech": # 7k
train_list_path = "train_list_miech.txt"
test_list_path = "test_list_miech.txt" # 1k
elif self.cut == "jsfusion": # 9k
train_list_path = "train_list_jsfusion.txt"
test_list_path = "val_list_jsfusion.txt" # 1k
js_test_cap_idx_path = "jsfusion_val_caption_idx.pkl"
elif self.cut in {"full-val", "full-test"}:
train_list_path = "train_list_full.txt"
if self.cut == "full-val":
test_list_path = "val_list_full.txt"
else:
test_list_path = "test_list_full.txt"
elif self.cut in challenge_splits:
train_list_path = "train_list.txt"
if self.cut == "val":
test_list_path = f"{self.cut}_list.txt"
else:
test_list_path = f"{self.cut}.txt"
else:
msg = "unrecognised MSRVTT split: {}"
raise ValueError(msg.format(self.cut))
train_df = pd.read_csv(os.path.join(split_dir, train_list_path), names=['videoid'])
test_df = pd.read_csv(os.path.join(split_dir, test_list_path), names=['videoid'])
self.split_sizes = {'train': len(train_df), 'val': len(test_df), 'test': len(test_df)}
if self.split == 'train':
df = df[df['image_id'].isin(train_df['videoid'])]
else:
df = df[df['image_id'].isin(test_df['videoid'])]
self.metadata = df.groupby(['image_id'])['caption'].apply(list)
if js_test_cap_idx_path is not None and self.split != 'train':
caps = pd.Series(np.load(os.path.join(split_dir, js_test_cap_idx_path), allow_pickle=True))
new_res = pd.DataFrame({'caps': self.metadata, 'cap_idx': caps})
new_res['test_caps'] = new_res.apply(lambda x: [x['caps'][x['cap_idx']]], axis=1)
self.metadata = new_res['test_caps']
self.metadata = pd.DataFrame({'captions': self.metadata})
print("load split {}, {} samples".format(self.split, len(self.metadata)))
# random choice or fixed?
def _get_caption(self, sample):
caption_sample = "rand"
if self.split in ['train', 'val'] and caption_sample == "rand":
caption = random.choice(sample['captions'])
else:
caption = sample['captions'][0]
return caption
| InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/datasets/video/msrvtt.py |
import numpy as np
from .video_base_dataset import BaseDataset
import os
class HMDB51Dataset(BaseDataset):
def __init__(self, *args, split="", **kwargs):
assert split in ["train", "val", "test"]
self.split = split
self.metadata = None
self.ans_lab_dict = dict()
if split == "train":
names = ["hmdb51_train"]
elif split == "val":
names = ["hmdb51_val"]
elif split == "test":
names = ["hmdb51_test"]
super().__init__(
*args,
**kwargs,
names=names,
text_column_name="questions",
remove_duplicate=False,
)
self._load_metadata()
def _load_metadata(self):
metadata_dir = './meta_data/hmdb51'
split_files = {
'train': 'hmdb51_rgb_train_split_1.txt',
'val': 'hmdb51_rgb_val_split_1.txt',
'test': 'hmdb51_rgb_val_split_1.txt'
}
target_split_fp = split_files[self.split]
self.metadata = [x.strip().split(' ') for x in open(os.path.join(metadata_dir, target_split_fp))]
answer_fp = os.path.join(metadata_dir, 'hmdb51_classInd.txt')
with open(answer_fp, 'r') as f:
lines = f.readlines()
for line in lines:
self.ans_lab_dict[str(int(line.strip().split(' ')[0]) - 1)] = line.strip().split(' ')[1]
def _get_video_path(self, sample):
# self.ans_lab_dict[sample[2]],
return os.path.join(self.data_dir, sample[0].split('/')[-1]) + '.avi', sample[0].split('/')[-1] + '.avi'
def get_text(self, sample):
text = "A person is doing [MASK]"
encoding = self.tokenizer(
text,
padding="max_length",
truncation=True,
max_length=self.max_text_len,
return_special_tokens_mask=True,
)
return (text, encoding)
def get_answer_label(self, sample):
text = "None"
ans_total_len = len(self.ans_lab_dict) + 1 # one additional class
ans_label = int(sample[2])
scores = np.zeros(ans_total_len).astype(int)
scores[ans_label] = 1
return text, ans_label, scores
# return text, ans_label_vector, scores
def __getitem__(self, index):
sample = self.metadata[index] # .split(' ')
video_tensor = self.get_video(sample)
text = self.get_text(sample)
qid = index
if self.split != "test":
answers, labels, scores = self.get_answer_label(sample)
else:
answers = list()
labels = list()
scores = list()
return {
"video": video_tensor,
"text": text,
"vqa_answer": answers,
"vqa_labels": labels,
"vqa_scores": scores,
"qid": qid,
}
def __len__(self):
return len(self.metadata) | InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/datasets/video/hmdb51.py |
from .video_base_dataset import BaseDataset
import os
import pandas as pd
from .pack_meta import pack_metadata, unpack_metadata
class MSRVTTChoiceDataset(BaseDataset):
def __init__(self, *args, split="", **kwargs):
assert split in ["train", "val", "test"]
self.split = split
if self.split == "train":
Exception("no train data provided")
self.metadata = None
self.ans_lab_dict = None
if split == "train":
names = ["msrvtt_choice_train"]
elif split == "val":
names = ["msrvtt_choice_val"]
elif split == "test":
names = ["msrvtt_choice_test"] # vqav2_test-dev for test-dev
# Since the data is distribued like everywhere
# We manully change data_dir
args = ("./meta_data", *args[1:])
super().__init__(
*args,
**kwargs,
names=names,
text_column_name="unknown",
remove_duplicate=False,
)
self._load_metadata()
def _load_metadata(self):
metadata_dir = './meta_data/msrvtt'
split_files = {
'train': 'msrvtt_mc_test.jsonl', # no train and test available, only for zero-shot
'val': 'msrvtt_mc_test.jsonl',
'test': 'msrvtt_mc_test.jsonl'
}
target_split_fp = split_files[self.split]
metadata = pd.read_json(os.path.join(metadata_dir, target_split_fp), lines=True)
self.metadata = pack_metadata(self, metadata)
def _get_video_path(self, sample):
return os.path.join(self.data_dir, 'videos', 'all', sample['clip_name'] + '.mp4'), sample['clip_name'] + '.mp4'
def get_text(self, sample):
texts = []
for text in sample['options']:
encoding = self.tokenizer(
text,
padding="max_length",
truncation=True,
max_length=self.max_text_len,
return_special_tokens_mask=True,
)
texts.append((text, encoding))
return texts
def get_answer_label(self, sample):
answer = sample['answer']
return answer
def __getitem__(self, index):
sample = unpack_metadata(self, index)
video_tensor = self.get_video(sample)
# index, question_index = self.index_mapper[index]
qid = index
answer = self.get_answer_label(sample)
ret = {
"video": video_tensor,
"vid_index": index,
"cap_index": index,
"raw_index": index,
'answer': answer
}
texts = self.get_text(sample)
ret["text"] = texts[0]
# print(len(texts))
for i in range(self.draw_false_text - 1):
ret.update({f"false_text_{i}": texts[i+1]})
# for i in range(self.draw_false_text-1):
# ret[f"false_text_{i}"] = texts[i+1]
# print(ret.keys())
return ret
def __len__(self):
return len(self.metadata) | InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/datasets/video/msrvtt_choice.py |
from .video_base_dataset import BaseDataset, read_large_frames_decord
import pandas as pd
import os
# {'timestamp_sec': 221.29666, 'narration_text': '#C C walks on the ground'}
class Ego4DDataset(BaseDataset):
"""EGO4D Video-Text loader."""
def __init__(self, *args, split="", **kwargs):
assert split in ["train", "val", "test"]
self.split = split
if split == "train":
names = ["ego4d_train"]
elif split == "val":
names = ["ego4d_val"]
elif split == "test":
names = ["ego4d_test"]
super().__init__(*args, **kwargs, names=names, text_column_name="caption")
self._load_metadata()
def _load_metadata(self):
metadata_dir = './meta_data/ego4d'
split_files = {
'train': 'ego4d_train_subset.csv',
'val': 'ego4d_val_ts_clean.csv',
'test': 'ego4d_val_ts_clean.csv' # there is no test
}
target_split_fp = split_files[self.split]
self.metadata = pd.read_csv(os.path.join(metadata_dir, target_split_fp), sep='\t', header=None, error_bad_lines=False)
def _get_video_path(self, sample):
rel_video_fp = sample[0] + '.mp4'
full_video_fp = os.path.join(self.data_dir, 'videos', rel_video_fp)
if not os.path.exists(full_video_fp):
Exception(IOError)
return full_video_fp, rel_video_fp
def _get_caption(self, sample):
return sample[6]
def get_raw_video(self, sample):
abs_fp, rel_fp = self._get_video_path(sample)
# if int(sample[2]) > 600:
# raise Exception("Video is longer than 10m!", rel_fp)
frame_end, frame_loc = int(sample[3]), int(sample[5])
# imgs = video_reader(abs_fp, frame_loc, frame_end, self.num_frames)
imgs = read_large_frames_decord(abs_fp, frame_loc, frame_end, self.num_frames)
if imgs is None:
raise Exception("Invalid video!", rel_fp)
else:
return imgs
| InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/datasets/video/ego4d.py |
from .video_base_dataset import BaseDataset, read_frames_decord
import random
import os
import pandas as pd
from .pack_meta import pack_metadata, unpack_metadata
class WEBVIDDataset(BaseDataset):
def __init__(self, *args, split="", **kwargs):
assert split in ["train", "val", "test"]
self.split = split
self.metadata = None
self.cut = "jsfusion"
if split == "train":
names = ["webvid_train"]
elif split == "val":
names = ["webvid_val"]
elif split == "test":
names = ["webvid_val"]
self._load_metadata()
print(names, ": ", len(self.metadata), "samples in total.")
super().__init__(*args, **kwargs, names=names, text_column_name="caption")
if "s3://" in self.data_dir:
self.data_dir = "s3://video_pub/WebVid2M/"
def _load_metadata(self):
metadata_dir = '/mnt/cache/share_data/DSK_datasets/webvid/'
split_files = {
'train': 'results_2M_train.csv',
'val': 'results_2M_val.csv', # there is no test
'test': 'results_2M_val.csv'
}
target_split_fp = split_files[self.split]
metadata = pd.read_csv(os.path.join(metadata_dir, target_split_fp))
metadata = metadata[["name", "page_dir", "videoid"]]
self.metadata = pack_metadata(self, metadata)
def _get_video_path(self, sample):
rel_video_fp = os.path.join(str(sample[1]), str(sample[2]) + '.mp4')
if "s3://" in self.data_dir:
full_video_fp = os.path.join(self.data_dir, rel_video_fp)
else:
full_video_fp = os.path.join(self.data_dir, self.split, rel_video_fp)
return full_video_fp, rel_video_fp
def _get_caption(self, sample):
return sample[0]
def get_raw_video(self, sample):
abs_fp, rel_fp = self._get_video_path(sample)
videos, idxs, vlen = read_frames_decord(abs_fp, self.num_frames, mode=self.split)
if videos is None:
raise Exception("Invalid video!", rel_fp)
else:
return videos
def get_video(self, index, sample):
videos = self.get_raw_video(sample)
videos_tensor = self.video_aug(videos, self.video_transform)
return {
"video": videos_tensor,
"vid_index": index,
"cap_index": index,
"raw_index": index,
}
def get_false_video(self, rep):
random_index = random.randint(0, len(self.metadata) - 1)
sample = unpack_metadata(self, random_index)
# can be different augmentation
videos = self.get_raw_video(sample)
videos_tensor = self.video_aug(videos, self.video_transform)
return {f"false_video_{rep}": videos_tensor}
def get_text(self, raw_index, sample):
text = sample[0]
# print(text)
encoding = self.tokenizer(
text,
padding="max_length",
truncation=True,
max_length=self.max_text_len,
return_special_tokens_mask=True,
)
# print(encoding.size())
return {
"text": (text, encoding),
"vid_index": raw_index,
"cap_index": raw_index,
"raw_index": raw_index,
}
def get_false_text(self, rep):
random_index = random.randint(0, len(self.metadata) - 1)
sample = unpack_metadata(self, random_index)
text = sample[0]
encoding = self.tokenizer(
text,
# padding="max_length",
truncation=True,
max_length=self.max_text_len,
return_special_tokens_mask=True,
)
return {f"false_text_{rep}": (text, encoding)}
def get_suite(self, index):
result = None
max_try = 10
try_time = 0
while result is None:
try_time += 1
sample = unpack_metadata(self, index)
try:
ret = dict()
ret.update(self.get_video(index, sample))
if not self.video_only:
txt = self.get_text(index, sample)
ret.update({"replica": True if txt["cap_index"] > 0 else False})
ret.update(txt)
for i in range(self.draw_false_video):
ret.update(self.get_false_video(i))
for i in range(self.draw_false_text):
ret.update(self.get_false_text(i))
result = True
except Exception as e:
index = random.randint(0, len(self.metadata) - 1)
exc = e
if try_time > max_try:
print(f"Exceed max time Error while read file idx {sample} in {self.names[0]} with error {exc}")
try_time=0
return ret
def __len__(self):
return len(self.metadata)
def __getitem__(self, index):
return self.get_suite(index) | InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/datasets/video/webvid.py |
from .video_base_dataset import BaseDataset, read_frames_from_img_dir
import random
import os
import pandas as pd
class ActivityNetDataset(BaseDataset):
def __init__(self, *args, split="", **kwargs):
assert split in ["train", "val", "test"]
self.split = split
self.metadata = None
if split == "train":
names = ["activitynet_train"]
elif split == "val":
names = ["activitynet_val"]
elif split == "test":
names = ["activitynet_val"]
self._load_metadata()
super().__init__(*args, **kwargs, names=names, text_column_name="caption")
def _load_metadata(self):
metadata_dir = './meta_data/activitynet'
split_files = {
'train': 'train.jsonl',
'val': 'val1.jsonl',
'test': 'val2.jsonl'
}
target_split_fp = split_files[self.split]
metadata = pd.read_json(os.path.join(metadata_dir, target_split_fp), lines=True)
self.metadata = metadata
def _get_video_path(self, sample):
rel_video_fp = sample['clip_name']
full_video_fp = os.path.join(self.data_dir, 'activitynet_frames', rel_video_fp)
return full_video_fp, rel_video_fp
def get_raw_video(self, sample):
abs_fp, rel_fp = self._get_video_path(sample)
imgs, idxs, vlen = read_frames_from_img_dir(abs_fp, self.num_frames, mode=self.split)
if imgs is None:
raise Exception("Invalid img!", rel_fp)
else:
return imgs
def get_video(self, index, sample, image_key="image"):
videos = self.get_raw_video(sample)
videos_tensor = self.video_aug(videos, self.video_transform)
return {
"video": videos_tensor,
"vid_index": index,
"cap_index": index,
"raw_index": index,
}
def get_false_video(self, rep, image_key="image"):
random_index = random.randint(0, len(self.metadata) - 1)
sample = self.metadata.iloc[random_index]
videos = self.get_raw_video(sample)
videos_tensor = self.video_aug(videos, self.video_transform)
return {f"false_video_{rep}": videos_tensor}
def get_text(self, raw_index, sample):
text = sample['caption']
# print(text)
encoding = self.tokenizer(
text,
padding="max_length",
truncation=True,
max_length=self.max_text_len,
return_special_tokens_mask=True,
)
# print(encoding.size())
return {
"text": (text, encoding),
"img_index": raw_index,
"cap_index": raw_index,
"raw_index": raw_index,
}
def get_false_text(self, rep):
random_index = random.randint(0, len(self.metadata) - 1)
sample = self.metadata.iloc[random_index]
text = sample['caption']
encoding = self.tokenizer(
text,
# padding="max_length",
truncation=True,
max_length=self.max_text_len,
return_special_tokens_mask=True,
)
return {f"false_text_{rep}": (text, encoding)}
def get_suite(self, index):
result = None
while result is None:
sample = self.metadata.iloc[index]
try:
ret = dict()
ret.update(self.get_video(index, sample))
if not self.image_only:
txt = self.get_text(index, sample)
ret.update({"replica": True if txt["cap_index"] > 0 else False})
ret.update(txt)
for i in range(self.draw_false_image):
ret.update(self.get_false_video(i))
for i in range(self.draw_false_text):
ret.update(self.get_false_text(i))
result = True
except Exception as e:
print(f"Error while read file idx {sample.name} in {self.names[0]} -> {e}")
index = random.randint(0, len(self.metadata) - 1)
return ret
def __len__(self):
return len(self.metadata)
def __getitem__(self, index):
return self.get_suite(index) | InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/datasets/video/activitynet.py |
from .base_dataset import BaseDataset
class F30KCaptionKarpathyDataset(BaseDataset):
def __init__(self, *args, split="", **kwargs):
assert split in ["train", "val", "test"]
if split == "train":
names = ["f30k_caption_karpathy_train", "f30k_caption_karpathy_val"]
elif split == "val":
names = ["f30k_caption_karpathy_test"]
elif split == "test":
names = ["f30k_caption_karpathy_test"]
super().__init__(*args, **kwargs, names=names, text_column_name="caption")
def __getitem__(self, index):
return self.get_suite(index)
| InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/datasets/image/f30k_caption_karpathy_dataset.py |
from .base_dataset import BaseDataset
class VisualGenomeCaptionDataset(BaseDataset):
def __init__(self, *args, split="", **kwargs):
assert split in ["train", "val", "test"]
if split == "test":
split = "val"
if split == "train":
names = ["vg_train"]
elif split == "val":
names = []
elif split == "test":
names = []
super().__init__(*args, **kwargs, names=names, text_column_name="caption")
def __getitem__(self, index):
return self.get_suite(index)
| InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/datasets/image/vg_caption_dataset.py |
import json
from .base_dataset import BaseDataset
import random
import os
import pandas as pd
import io
from PIL import Image
from CoTrain.datasets import client
class LAION400MDataset(BaseDataset):
def __init__(self, *args, split="", **kwargs):
assert split in ["train", "val", "test"]
self.split = split
self.metadata = None
self._load_metadata()
if split == "train":
names = ["laion400m_train"]
elif split == "val":
names = ["laion400m_val"]
elif split == "test":
names = ["laion400m_val"]
print(names, ": ", len(self.metadata), "samples in total.")
super().__init__(*args, **kwargs, names=names, text_column_name="caption")
self.data_dir = ""
def _load_metadata(self):
file_path = "/mnt/lustre/share_data/liyizhuo.vendor/datasets/LAION-400M-partial-meta.json"
if self.split == "train":
self.metadata = [json.loads(x) for x in open(file_path).readlines()[:-10]]
else:
self.metadata = [json.loads(x) for x in open(file_path).readlines()[-10:]]
self.metadata = [(x['caption'], x['filename']) for x in self.metadata]
def _get_image_path(self, sample):
# print(sample[1])
# rel_fp = str(sample[1]).split('/')[-1]
# print(os.path.join(self.data_dir, rel_fp))
rel_fp = sample[1]
return os.path.join(self.data_dir, rel_fp), rel_fp
def _get_caption(self, sample):
return sample[0]
def get_raw_image(self, sample):
# print(sample)
abs_fp, rel_fp = self._get_image_path(sample)
if "s3://" in abs_fp:
img_bytes = client.get(abs_fp)
assert img_bytes is not None, "Get image failed from {}".format(img_bytes)
img = Image.open(io.BytesIO(img_bytes)).convert("RGB")
else:
img = Image.open(abs_fp).convert("RGB")
if img is None:
raise Exception("Invalid img!", rel_fp)
else:
return img
def _get_object_path(self, sample):
"""
get the object npy path
Args:
sample (dict):
Returns:
abs path
"""
rel_object_fp = os.path.join(sample[1], '1.npz')
full_object_fp = os.path.join(self.object_dir, self.split, rel_object_fp)
return os.path.join(self.split, rel_object_fp), full_object_fp
def get_image(self, index, sample, image_key="image"):
image = self.get_raw_image(sample)
image_tensor = self.image_aug(image, self.transforms)
# image_tensor = [tr(image).unsqueeze(0) for tr in self.transforms]
return {
"video": image_tensor,
"vid_index": sample[1],
"cap_index": index,
"raw_index": index,
}
def get_false_image(self, rep, image_key="image"):
random_index = random.randint(0, len(self.metadata) - 1)
sample = self.metadata[random_index]
image = self.get_raw_image(sample)
#image_tensor = [tr(image).unsqueeze(0) for tr in self.transforms]
image_tensor = self.image_aug(image, self.transforms)
return {f"false_video_{rep}": image_tensor}
def get_text(self, raw_index, sample):
text = sample[0]
encoding = self.tokenizer(
text,
padding="max_length",
truncation=True,
max_length=self.max_text_len,
return_special_tokens_mask=True,
)
return {
"text": (text, encoding),
"vid_index": sample[1],
"cap_index": raw_index,
"raw_index": raw_index,
}
def get_false_text(self, rep):
random_index = random.randint(0, len(self.metadata) - 1)
sample = self.metadata[random_index]
text = sample[0]
encoding = self.tokenizer(
text,
truncation=True,
max_length=self.max_text_len,
return_special_tokens_mask=True,
)
return {f"false_text_{rep}": (text, encoding)}
def get_suite(self, index):
result = None
# print(self.draw_false_image) # 1
while result is None:
sample = self.metadata[index]
# print(sample)
try:
ret = dict()
ret.update(self.get_image(index, sample))
if not self.image_only:
txt = self.get_text(index, sample)
ret.update({"replica": True if txt["cap_index"] > 0 else False})
ret.update(txt)
for i in range(self.draw_false_image):
ret.update(self.get_false_image(i))
for i in range(self.draw_false_text):
ret.update(self.get_false_text(i))
result = True
except Exception as e:
print(f"Error while read file idx {sample[1]} in {self.names[0]} -> {e}")
index = random.randint(0, len(self.metadata) - 1)
# ret["image"] = ret["image"].unsqueeze(1)
return ret
def __len__(self):
return len(self.metadata)
def __getitem__(self, index):
return self.get_suite(index)
| InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/datasets/image/laion400m.py |
import random
import torch
import io
import pyarrow as pa
import os
import cv2
import numpy as np
from PIL import Image
from CoTrain.transforms import keys_to_transforms
import decord
from CoTrain.transforms.image.imageaug import image_aug
import CoTrain.modules.InternVideo as internvideo
class BaseDataset(torch.utils.data.Dataset):
def __init__(
self,
data_dir: str,
transform_keys: list,
image_size: int,
names: list,
text_column_name: str = "",
remove_duplicate=True,
max_text_len=40,
draw_false_image=0,
draw_false_video=0,
draw_false_text=0,
image_only=False,
num_frames=1,
draw_options_text=0,
backend='v100'
):
"""
data_dir : where dataset file *.arrow lives; existence should be guaranteed via DataModule.prepare_data
transform_keys : keys for generating augmented views of images
text_column_name : pyarrow table column name that has list of strings as elements
"""
assert len(transform_keys) >= 1
super().__init__()
self.transforms = keys_to_transforms(transform_keys, size=image_size, mode=self.split)
self.image_aug = image_aug
self.text_column_name = text_column_name
self.names = names
self.max_text_len = max_text_len
self.draw_false_image = draw_false_image
self.draw_false_text = draw_false_text
self.image_only = image_only
self.data_dir = data_dir
self.draw_options_text = draw_options_text
if torch.distributed.get_rank() == 0:
print('*'*100)
print("image sub datasets: {}".format(names))
# print(names)
split_name = None
if len(names) != 0:
self.data_dir = os.path.join(self.data_dir, names[0].split('_')[0]) # e.g. coco_train -> coco
split_name = names[0].split('_')[0]
if torch.distributed.get_rank() == 0:
print(self.data_dir)
if split_name and split_name in ['msrvtt', 'cc3m', 'vcr', 'cc12m', 'yfcc15m', 'laion400m', 'mix100m']:
if torch.distributed.get_rank() == 0:
print("no arrow available for {}, load from disk".format(names[0]))
else:
if len(names) != 0:
tables = [
pa.ipc.RecordBatchFileReader(
pa.memory_map(f"{self.data_dir}/{name}.arrow", "r")
).read_all()
for name in names
if os.path.isfile(f"{self.data_dir}/{name}.arrow")
]
# print(names, tables)
self.table_names = list()
for i, name in enumerate(names):
self.table_names += [name] * len(tables[i])
self.table = pa.concat_tables(tables, promote=True)
if text_column_name != "":
self.text_column_name = text_column_name
self.all_texts = self.table[text_column_name].to_pandas().tolist()
self.all_texts = (
[list(set(texts)) for texts in self.all_texts]
if remove_duplicate
else self.all_texts
)
else:
self.all_texts = list()
else:
self.all_texts = list()
self.index_mapper = dict()
if text_column_name != "" and not self.image_only:
j = 0
for i, texts in enumerate(self.all_texts):
for _j in range(len(texts)):
self.index_mapper[j] = (i, _j)
j += 1
else:
for i in range(len(self.table)):
self.index_mapper[i] = (i, None)
#
# if len(names) != 0:
# tables = [
# pa.ipc.RecordBatchFileReader(
# pa.memory_map(f"{data_dir}/{name}.arrow", "r")
# ).read_all()
# for name in names
# if os.path.isfile(f"{data_dir}/{name}.arrow")
# ]
#
# self.table_names = list()
# for i, name in enumerate(names):
# self.table_names += [name] * len(tables[i])
#
# self.table = pa.concat_tables(tables, promote=True)
# if text_column_name != "":
# self.text_column_name = text_column_name
# self.all_texts = self.table[text_column_name].to_pandas().tolist()
# self.all_texts = (
# [list(set(texts)) for texts in self.all_texts]
# if remove_duplicate
# else self.all_texts
# )
# else:
# self.all_texts = list()
# else:
# self.all_texts = list()
#
# self.index_mapper = dict()
#
# if text_column_name != "" and not self.image_only:
# j = 0
# for i, texts in enumerate(self.all_texts):
# for _j in range(len(texts)):
# self.index_mapper[j] = (i, _j)
# j += 1
# else:
# for i in range(len(self.table)):
# self.index_mapper[i] = (i, None)
@property
def corpus(self):
return [text for texts in self.all_texts for text in texts]
def __len__(self):
return len(self.index_mapper)
def get_raw_image(self, index, image_key="image"):
index, caption_index = self.index_mapper[index]
image_bytes = io.BytesIO(self.table[image_key][index].as_py())
image_bytes.seek(0)
return Image.open(image_bytes).convert("RGB")
def get_image(self, index, image_key="image"):
image = self.get_raw_image(index, image_key=image_key)
# image_tensor = [tr(image).unsqueeze(0) for tr in self.transforms]
image_tensor = self.image_aug(image, self.transforms)
return {
"video": image_tensor,
"vid_index": self.index_mapper[index][0],
"cap_index": self.index_mapper[index][1],
"raw_index": index,
}
def get_false_image(self, rep, image_key="image"):
random_index = random.randint(0, len(self.index_mapper) - 1)
image = self.get_raw_image(random_index, image_key=image_key)
# image_tensor = [tr(image).unsqueeze(0) for tr in self.transforms]
image_tensor = self.image_aug(image, self.transforms)
return {f"false_video_{rep}": image_tensor}
def get_text(self, raw_index):
index, caption_index = self.index_mapper[raw_index]
text = self.all_texts[index][caption_index]
encoding = self.tokenizer(
text,
padding="max_length",
truncation=True,
max_length=self.max_text_len,
return_special_tokens_mask=True,
)
return {
"text": (text, encoding),
"vid_index": index,
"cap_index": caption_index,
"raw_index": raw_index,
}
def get_false_text(self, rep):
random_index = random.randint(0, len(self.index_mapper) - 1)
index, caption_index = self.index_mapper[random_index]
text = self.all_texts[index][caption_index]
encoding = self.tokenizer(
text,
truncation=True,
max_length=self.max_text_len,
return_special_tokens_mask=True,
)
return {f"false_text_{rep}": (text, encoding)}
def get_suite(self, index):
result = None
while result is None:
try:
ret = dict()
ret.update(self.get_image(index))
if not self.image_only:
txt = self.get_text(index)
ret.update({"replica": True if txt["cap_index"] > 0 else False})
ret.update(txt)
for i in range(self.draw_false_image):
ret.update(self.get_false_image(i))
for i in range(self.draw_false_text):
ret.update(self.get_false_text(i))
result = True
except Exception as e:
print(f"Error while read file idx {index} in {self.names[0]} -> {e}")
index = random.randint(0, len(self.index_mapper) - 1)
return ret
def collate(self, batch, mlm_collator):
batch_size = len(batch)
keys = set([key for b in batch for key in b.keys()])
dict_batch = {k: [dic[k] if k in dic else None for dic in batch] for k in keys}
# print(dict_batch)
img_keys = [k for k in list(dict_batch.keys()) if "video" in k]
img_sizes = list()
for img_key in img_keys:
img_sizes += [ii.shape for i in dict_batch[img_key] if i is not None for ii in i]
for size in img_sizes:
assert (
len(size) == 4
), f"Collate error, an image should be in shape of (N, 3, H, W), instead of given {size}"
if len(img_keys) != 0:
global_max_height = max([i[2] for i in img_sizes])
global_max_width = max([i[3] for i in img_sizes])
for img_key in img_keys:
img = dict_batch[img_key]
view_size = len(dict_batch[img_key][0])
new_images = [
torch.zeros(batch_size, 1, 3, global_max_height, global_max_width)
for _ in range(view_size)
]
# print(len(img))
for bi in range(batch_size):
orig_batch = img[bi]
for vi in range(view_size):
if orig_batch is None:
continue
else:
orig = img[bi][vi]
new_images[vi][bi, :, :, : orig.shape[2], : orig.shape[3]] = orig
dict_batch[img_key] = new_images
txt_keys = [k for k in list(dict_batch.keys()) if "text" in k]
if len(txt_keys) != 0:
texts = [[d[0] for d in dict_batch[txt_key]] for txt_key in txt_keys]
encodings = [[d[1] for d in dict_batch[txt_key]] for txt_key in txt_keys]
draw_text_len = len(encodings)
flatten_encodings = [e for encoding in encodings for e in encoding]
flatten_mlms = mlm_collator(flatten_encodings)
for i, txt_key in enumerate(txt_keys):
texts, encodings = (
[d[0] for d in dict_batch[txt_key]],
[d[1] for d in dict_batch[txt_key]],
)
mlm_ids, mlm_labels = (
flatten_mlms["input_ids"][batch_size * (i) : batch_size * (i + 1)],
flatten_mlms["labels"][batch_size * (i) : batch_size * (i + 1)],
)
input_ids = torch.zeros_like(mlm_ids)
attention_mask = torch.zeros_like(mlm_ids)
for _i, encoding in enumerate(encodings):
_input_ids, _attention_mask = (
torch.tensor(encoding["input_ids"]),
torch.tensor(encoding["attention_mask"]),
)
input_ids[_i, : len(_input_ids)] = _input_ids
attention_mask[_i, : len(_attention_mask)] = _attention_mask
dict_batch[txt_key] = texts
dict_batch[f"{txt_key}_ids"] = input_ids
dict_batch[f"{txt_key}_labels"] = torch.full_like(input_ids, -100)
dict_batch[f"{txt_key}_ids_mlm"] = mlm_ids
dict_batch[f"{txt_key}_labels_mlm"] = mlm_labels
dict_batch[f"{txt_key}_masks"] = attention_mask
clip_text_ids, clip_special_tokens_mask = internvideo.tokenize(
dict_batch["text"], truncate=True, return_special_tokens_mask=True)
dict_batch["clip_text_ids"] = clip_text_ids
dict_batch["clip_special_tokens_mask"] = clip_special_tokens_mask
return dict_batch
# def collate(self, batch, mlm_collator):
# batch_size = len(batch)
# keys = set([key for b in batch for key in b.keys()])
# dict_batch = {k: [dic[k] if k in dic else None for dic in batch] for k in keys}
# # print(dict_batch)
#
# img_keys = [k for k in list(dict_batch.keys()) if "video" in k]
# img_sizes = list()
#
# for img_key in img_keys:
# img_sizes += [ii.shape for i in dict_batch[img_key][0] if i is not None for ii in i]
#
# for size in img_sizes:
# assert (
# len(size) == 4
# ), f"Collate error, an image should be in shape of (N, 3, H, W), instead of given {size}"
#
# if len(img_keys) != 0:
# global_max_height = max([i[2] for i in img_sizes])
# global_max_width = max([i[3] for i in img_sizes])
# local_max_height = min([i[2] for i in img_sizes])
# local_max_width = min([i[3] for i in img_sizes])
# for img_key in img_keys:
# img = dict_batch[img_key]
# global_view_size = len(dict_batch[img_key][0][0])
# local_view_size = len(dict_batch[img_key][0][1])
# # for image, padding one time dimension
# new_images = [
# [
# torch.zeros(batch_size, 1, 3, global_max_height, global_max_width)
# for _ in range(global_view_size)
# ],
# [
# torch.zeros(batch_size, 1, 3, local_max_height, local_max_width)
# for _ in range(local_view_size)
# ]
# ]
# # print(len(img))
# for bi in range(batch_size):
# orig_batch = img[bi]
# for vi in range(global_view_size):
# if orig_batch is None:
# continue
# else:
# orig = img[bi][0][vi]
# new_images[0][vi][bi, :, :, : orig.shape[2], : orig.shape[3]] = orig
#
# for bi in range(batch_size):
# orig_batch = img[bi]
# for vi in range(local_view_size):
# if orig_batch is None:
# continue
# else:
# orig = img[bi][1][vi]
# new_images[1][vi][bi, :, :, : orig.shape[2], : orig.shape[3]] = orig
#
# dict_batch[img_key] = new_images
#
# txt_keys = [k for k in list(dict_batch.keys()) if "text" in k]
#
# if len(txt_keys) != 0:
# texts = [[d[0] for d in dict_batch[txt_key]] for txt_key in txt_keys]
# encodings = [[d[1] for d in dict_batch[txt_key]] for txt_key in txt_keys]
# draw_text_len = len(encodings)
# flatten_encodings = [e for encoding in encodings for e in encoding]
# flatten_mlms = mlm_collator(flatten_encodings)
#
# for i, txt_key in enumerate(txt_keys):
# texts, encodings = (
# [d[0] for d in dict_batch[txt_key]],
# [d[1] for d in dict_batch[txt_key]],
# )
#
# mlm_ids, mlm_labels = (
# flatten_mlms["input_ids"][batch_size * (i) : batch_size * (i + 1)],
# flatten_mlms["labels"][batch_size * (i) : batch_size * (i + 1)],
# )
#
# input_ids = torch.zeros_like(mlm_ids)
# attention_mask = torch.zeros_like(mlm_ids)
# for _i, encoding in enumerate(encodings):
# _input_ids, _attention_mask = (
# torch.tensor(encoding["input_ids"]),
# torch.tensor(encoding["attention_mask"]),
# )
# input_ids[_i, : len(_input_ids)] = _input_ids
# attention_mask[_i, : len(_attention_mask)] = _attention_mask
#
# dict_batch[txt_key] = texts
# dict_batch[f"{txt_key}_ids"] = input_ids
# dict_batch[f"{txt_key}_labels"] = torch.full_like(input_ids, -100)
# dict_batch[f"{txt_key}_ids_mlm"] = mlm_ids
# dict_batch[f"{txt_key}_labels_mlm"] = mlm_labels
# dict_batch[f"{txt_key}_masks"] = attention_mask
#
# return dict_batch
def sample_frames(num_frames, vlen, sample='rand', fix_start=None):
acc_samples = min(num_frames, vlen)
intervals = np.linspace(start=0, stop=vlen, num=acc_samples + 1).astype(int)
ranges = []
for idx, interv in enumerate(intervals[:-1]):
ranges.append((interv, intervals[idx + 1] - 1))
if sample == 'rand':
frame_idxs = [random.choice(range(x[0], x[1])) for x in ranges]
elif fix_start is not None:
frame_idxs = [x[0] + fix_start for x in ranges]
elif sample == 'uniform':
frame_idxs = [(x[0] + x[1]) // 2 for x in ranges]
else:
raise NotImplementedError
return frame_idxs
def read_frames_cv2(video_path, num_frames, sample='rand', fix_start=None):
cap = cv2.VideoCapture(video_path)
assert (cap.isOpened())
vlen = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
# get indexes of sampled frames
frame_idxs = sample_frames(num_frames, vlen, sample=sample, fix_start=fix_start)
frames = []
success_idxs = []
for index in frame_idxs:
cap.set(cv2.CAP_PROP_POS_FRAMES, index - 1)
ret, frame = cap.read()
if ret:
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frame = torch.from_numpy(frame).byte()
# # (H x W x C) to (C x H x W)
frame = frame.permute(2, 0, 1)
frames.append(frame)
success_idxs.append(index)
else:
pass
# print(frame_idxs, ' fail ', index, f' (vlen {vlen})')
# return frames tensor
frames = torch.stack(frames) # .float() / 255
# print(frames.size())
cap.release()
return frames, success_idxs, vlen
def read_frames_decord(video_path, num_frames, sample='rand', fix_start=None):
video_reader = decord.VideoReader(video_path, num_threads=1)
vlen = len(video_reader)
frame_idxs = sample_frames(num_frames, vlen, sample=sample, fix_start=fix_start)
frames = video_reader.get_batch(frame_idxs)
frames = frames.float() / 255
frames = frames.permute(0, 3, 1, 2)
return frames, frame_idxs, vlen | InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/datasets/image/base_dataset.py |
from .base_dataset import BaseDataset
class CocoCaptionKarpathyDataset(BaseDataset):
def __init__(self, *args, split="", **kwargs):
assert split in ["train", "val", "test"]
self.split = split
if split == "train":
names = ["coco_caption_karpathy_train"] # , "coco_caption_karpathy_restval"
elif split == "val":
names = ["coco_caption_karpathy_val"]
# names = ["coco_caption_karpathy_test"]
# names = [] # for fast train
elif split == "test":
names = ["coco_caption_karpathy_test"]
# names = []
super().__init__(*args, **kwargs, names=names, text_column_name="caption")
def __getitem__(self, index):
suite = self.get_suite(index)
if "test" in self.split:
_index, _question_index = self.index_mapper[index]
iid = self.table["image_id"][_index].as_py()
iid = int(iid.split(".")[0].split("_")[-1])
suite.update({"iid": iid})
return suite
| InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/datasets/image/coco_caption_karpathy_dataset.py |
import json
from .base_dataset import BaseDataset
import random
import os
import pandas as pd
import io
from PIL import Image
from CoTrain.datasets import client
class YFCC15MDataset(BaseDataset):
def __init__(self, *args, split="", **kwargs):
assert split in ["train", "val", "test"]
self.split = split
self.metadata = None
self._load_metadata()
if split == "train":
names = ["yfcc15m_train"]
elif split == "val":
names = ["yfcc15m_val"]
elif split == "test":
names = ["yfcc15m_val"]
print(names, ": ", len(self.metadata), "samples in total.")
super().__init__(*args, **kwargs, names=names, text_column_name="caption")
self.data_dir = "s3://GCC/yfcc100m-part/data/"
def _load_metadata(self):
file_path = "/mnt/cache/share_data/DSK_datasets/yfcc15m/yfcc15m_clean.json"
if self.split == "train":
self.metadata = [json.loads(x) for x in open(file_path).readlines()[:-10]]
else:
self.metadata = [json.loads(x) for x in open(file_path).readlines()[-10:]]
self.metadata = [(x['caption'], x['filename']) for x in self.metadata]
def _get_image_path(self, sample):
# print(sample[1])
# rel_fp = str(sample[1]).split('/')[-1]
# print(os.path.join(self.data_dir, rel_fp))
rel_fp = sample[1]
return os.path.join(self.data_dir, rel_fp), rel_fp
def _get_caption(self, sample):
return sample[0]
def get_raw_image(self, sample):
# print(sample)
abs_fp, rel_fp = self._get_image_path(sample)
if "s3://" in abs_fp:
img_bytes = client.get(abs_fp)
assert img_bytes is not None, "Get image failed from {}".format(img_bytes)
img = Image.open(io.BytesIO(img_bytes)).convert("RGB")
else:
img = Image.open(abs_fp).convert("RGB")
if img is None:
raise Exception("Invalid img!", rel_fp)
else:
return img
def _get_object_path(self, sample):
"""
get the object npy path
Args:
sample (dict):
Returns:
abs path
"""
rel_object_fp = os.path.join(sample[1], '1.npz')
full_object_fp = os.path.join(self.object_dir, self.split, rel_object_fp)
return os.path.join(self.split, rel_object_fp), full_object_fp
def get_image(self, index, sample, image_key="image"):
image = self.get_raw_image(sample)
image_tensor = self.image_aug(image, self.transforms)
# image_tensor = [tr(image).unsqueeze(0) for tr in self.transforms]
return {
"video": image_tensor,
"vid_index": sample[1],
"cap_index": index,
"raw_index": index,
}
def get_false_image(self, rep, image_key="image"):
random_index = random.randint(0, len(self.metadata) - 1)
sample = self.metadata[random_index]
image = self.get_raw_image(sample)
#image_tensor = [tr(image).unsqueeze(0) for tr in self.transforms]
image_tensor = self.image_aug(image, self.transforms)
return {f"false_video_{rep}": image_tensor}
def get_text(self, raw_index, sample):
text = sample[0]
encoding = self.tokenizer(
text,
padding="max_length",
truncation=True,
max_length=self.max_text_len,
return_special_tokens_mask=True,
)
return {
"text": (text, encoding),
"vid_index": sample[1],
"cap_index": raw_index,
"raw_index": raw_index,
}
def get_false_text(self, rep):
random_index = random.randint(0, len(self.metadata) - 1)
sample = self.metadata[random_index]
text = sample[0]
encoding = self.tokenizer(
text,
truncation=True,
max_length=self.max_text_len,
return_special_tokens_mask=True,
)
return {f"false_text_{rep}": (text, encoding)}
def get_suite(self, index):
result = None
# print(self.draw_false_image) # 1
while result is None:
sample = self.metadata[index]
# print(sample)
try:
ret = dict()
ret.update(self.get_image(index, sample))
if not self.image_only:
txt = self.get_text(index, sample)
ret.update({"replica": True if txt["cap_index"] > 0 else False})
ret.update(txt)
for i in range(self.draw_false_image):
ret.update(self.get_false_image(i))
for i in range(self.draw_false_text):
ret.update(self.get_false_text(i))
result = True
except Exception as e:
print(f"Error while read file idx {sample[1]} in {self.names[0]} -> {e}")
index = random.randint(0, len(self.metadata) - 1)
# ret["image"] = ret["image"].unsqueeze(1)
return ret
def __len__(self):
return len(self.metadata)
def __getitem__(self, index):
return self.get_suite(index)
| InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/datasets/image/yfcc15m.py |
InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/datasets/image/__init__.py |
|
from glob import glob
from .base_dataset import BaseDataset
class SBUCaptionDataset(BaseDataset):
def __init__(self, *args, split="", **kwargs):
assert split in ["train", "val", "test"]
if split == "test":
split = "val"
if split == "train":
names = [f"sbu_{i}" for i in range(9)]
elif split == "val":
names = []
super().__init__(*args, **kwargs, names=names, text_column_name="caption")
def __getitem__(self, index):
return self.get_suite(index)
| InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/datasets/image/sbu_caption_dataset.py |
from CoTrain.datasets.video.video_base_dataset import BaseDataset, color_img
import random
import os
import pandas as pd
import cv2
from CoTrain.transforms.video.videoaug import VideoTransform
import torch
## from https://github.com/rowanz/r2c/blob/master/dataloaders/vcr.py
# Here's an example jsonl
# {
# "movie": "3015_CHARLIE_ST_CLOUD",
# "objects": ["person", "person", "person", "car"],
# "interesting_scores": [0],
# "answer_likelihood": "possible",
# "img_fn": "lsmdc_3015_CHARLIE_ST_CLOUD/[email protected]",
# "metadata_fn": "lsmdc_3015_CHARLIE_ST_CLOUD/[email protected]",
# "answer_orig": "No she does not",
# "question_orig": "Does 3 feel comfortable?",
# "rationale_orig": "She is standing with her arms crossed and looks disturbed",
# "question": ["Does", [2], "feel", "comfortable", "?"],
# "answer_match_iter": [3, 0, 2, 1],
# "answer_sources": [3287, 0, 10184, 2260],
# "answer_choices": [
# ["Yes", "because", "the", "person", "sitting", "next", "to", "her", "is", "smiling", "."],
# ["No", "she", "does", "not", "."],
# ["Yes", ",", "she", "is", "wearing", "something", "with", "thin", "straps", "."],
# ["Yes", ",", "she", "is", "cold", "."]],
# "answer_label": 1,
# "rationale_choices": [
# ["There", "is", "snow", "on", "the", "ground", ",", "and",
# "she", "is", "wearing", "a", "coat", "and", "hate", "."],
# ["She", "is", "standing", "with", "her", "arms", "crossed", "and", "looks", "disturbed", "."],
# ["She", "is", "sitting", "very", "rigidly", "and", "tensely", "on", "the", "edge", "of", "the",
# "bed", ".", "her", "posture", "is", "not", "relaxed", "and", "her", "face", "looks", "serious", "."],
# [[2], "is", "laying", "in", "bed", "but", "not", "sleeping", ".",
# "she", "looks", "sad", "and", "is", "curled", "into", "a", "ball", "."]],
# "rationale_sources": [1921, 0, 9750, 25743],
# "rationale_match_iter": [3, 0, 2, 1],
# "rationale_label": 1,
# "img_id": "train-0",
# "question_number": 0,
# "annot_id": "train-0",
# "match_fold": "train-0",
# "match_index": 0,
# }
class VCRDataset(BaseDataset):
def __init__(self, *args, split="", **kwargs):
assert split in ["train", "val", "test"]
self.split = split
self.metadata = None
self._load_metadata()
if split == "train":
names = ["vcr_train"]
elif split == "val":
names = ["vcr_val"]
elif split == "test":
names = ["vcr_test"]
super().__init__(*args, **kwargs, names=names, text_column_name="caption")
self.video_transform = VideoTransform(mode=split, num_frames=self.num_frames) # train or val model
# for appear objects
self.only_use_relevant_dets = True
if self.only_use_relevant_dets:
self.relevant_dets = [] # resort the detection numbers
self.relevant_dets_classes = []
def _load_metadata(self):
# download specific
metadata_dir = './meta_data/vcr1annots'
split_files = {
'train': 'train.jsonl',
'val': 'val.jsonl', # there is no test
'test': 'test.jsonl'
}
target_split_fp = split_files[self.split]
metadata = pd.read_json(os.path.join(metadata_dir, target_split_fp), lines=True)
self.metadata = metadata
def _get_image_path(self, sample):
# print(sample.keys())
# print(sample['img_fn'])
# VCR/vcr1images
rel_fp = sample['img_fn']
return os.path.join(self.data_dir, 'vcr1images', rel_fp), rel_fp
def get_objects(self, sample):
metadata2 = pd.read_json(os.path.join(self.data_dir, 'vcr1images',
sample['metadata_fn']), lines=True)
object_meta = metadata2.iloc[0]
return object_meta
def _get_caption(self, sample):
return sample[0]
# def _get_objects(self, sample):
# metadata2 = pd.read_json(os.path.join(self.data_dir,
# sample['metadata_fn']), lines=True)
# sample = metadata2.iloc[0]
# return sample['boxes']
def get_raw_image(self, sample, object_meta, img_color_mask=True):
# print(sample)
abs_fp, rel_fp = self._get_image_path(sample)
# img = Image.open(abs_fp)
img = cv2.imread(abs_fp)
# add bbox annotation here
if img_color_mask:
img = color_img(img, object_meta, self.relevant_dets, self.only_use_relevant_dets)
if img is None:
raise Exception("Invalid img!", rel_fp)
else:
return img
def get_image(self, index, sample, object_meta, image_key="image"):
frames = []
image = self.get_raw_image(sample, object_meta)
frame = torch.from_numpy(image).byte()
frame = frame.permute(2, 0, 1)
frames.append(frame)
frames = torch.stack(frames).permute(1, 0, 2, 3)
# print(frames.size())
image_tensor = [self.video_transform(frames).permute(1, 0, 2, 3)] # to tchw
# image = self.get_raw_image(sample)
# image_tensor = [tr(image) for tr in self.transforms]
# print(image_tensor.size())
# image_tensor.unsqueeze(0)
return image_tensor
def get_false_image(self, rep, image_key="image"):
random_index = random.randint(0, len(self.metadata) - 1)
sample = self.metadata.iloc[random_index]
image = self.get_raw_image(sample)
image_tensor = [tr(image) for tr in self.transforms]
return {f"false_image_{rep}": image_tensor}
# def get_text(self, sample, object_meta):
# question = self.get_question(sample, object_meta)
# texts = []
# for answer in sample['answer_choices']:
# raw_text = question + '[SEP]'
# for word in answer:
# if isinstance(word, list):
# for object_idx in word:
# self.relevant_dets.add(object_idx)
# rel_object_idx = object_idx
# if self.only_use_relevant_dets:
# rel_object_idx = len(self.relevant_dets) - 1 # begin from 0
# raw_text += ' ' + object_meta['names'][object_idx] + ' ' + str(rel_object_idx)
# else:
# raw_text += ' ' + word
# # for index in range(len(answer)):
# # raw_text += ' ' + str(answer[index])
# print(raw_text)
# encoding = self.tokenizer(
# raw_text,
# padding="max_length",
# truncation=True,
# max_length=self.max_text_len,
# return_special_tokens_mask=True,
# )
# texts.append((raw_text, encoding))
# return texts
def update_rele_det(self, sample, object_meta, index):
text = []
for i in range(len(sample['question'])):
text.append(sample['question'][i])
for i in range(len(sample['answer_choices'])):
for j in range(len(sample['answer_choices'][i])):
text.append(sample['answer_choices'][i][j])
for i in range(len(sample['rationale_choices'])):
for j in range(len(sample['rationale_choices'][i])):
text.append(sample['rationale_choices'][i][j])
# update relevant detes
for word in text:
if isinstance(word, list):
for object_idx in word:
# self.relevant_dets.add(object_idx)
if object_idx not in self.relevant_dets:
self.relevant_dets.append(object_idx)
for object in self.relevant_dets:
self.relevant_dets_classes.append(object_meta['names'][object])
# print(index, text)
# print(index, self.relevant_dets)
# print(index, self.relevant_dets_classes)
#
return text
def get_text(self, sample, object_meta, index):
# detect all object index and sort these items
if self.only_use_relevant_dets:
self.update_rele_det(sample, object_meta, index)
question = self.get_question(sample, object_meta)
qa_texts = []
# image size: 384 x 384
# prompt: [START] + "answer_question:"
# prompt: [START] + ' provide rationale:'),
# add all text tokens into this model.
for answer in sample['answer_choices']:
raw_text = question + 'answer question: '
for word in answer:
if isinstance(word, list):
for object_idx in word:
raw_text += ' ' + object_meta['names'][object_idx] + ' '
# rename the object index, for example
if self.only_use_relevant_dets:
raw_text += str(self.relevant_dets.index(object_idx))
else:
raw_text += str(object_idx)
else:
raw_text += ' ' + word
raw_text += '[END]'
# print(index, raw_text)
qa_encoding = self.tokenizer(
raw_text,
padding="max_length",
truncation=True,
max_length=self.max_text_len,
return_special_tokens_mask=True,
)
qa_texts.append((raw_text, qa_encoding))
gt_ans = sample['answer_choices'][sample['answer_label']]
gt_ans_text = ""
for word in gt_ans:
if isinstance(word, list):
for object_idx in word:
gt_ans_text += ' ' + object_meta['names'][object_idx] + ' '
# rename the object index, for example
if self.only_use_relevant_dets:
gt_ans_text += str(self.relevant_dets.index(object_idx))
else:
gt_ans_text += str(object_idx)
else:
gt_ans_text += ' ' + word
qar_texts = []
for reason in sample['rationale_choices']:
raw_text = question + gt_ans_text + 'provide rationale: '
for word in reason:
if isinstance(word, list):
for object_idx in word:
raw_text += ' ' + object_meta['names'][object_idx] + ' '
if self.only_use_relevant_dets:
raw_text += str(self.relevant_dets.index(object_idx))
else:
raw_text += str(object_idx)
else:
raw_text += ' ' + word
# print(index, raw_text)
raw_text += '[END]'
encoding = self.tokenizer(
raw_text,
padding="max_length",
truncation=True,
max_length=self.max_text_len,
return_special_tokens_mask=True,
)
qar_texts.append((raw_text, encoding))
return [qa_texts, qar_texts]
#
# def get_qar(self, sample, object_meta):
# question = self.get_question(sample, object_meta) + '[SEP]' # '[MIDDLE]'
# gt_ans = sample['answer_choices'][sample['answer_label']]
# gt_ans_text = ""
# # for index in range(len(gt_ans)):
# # gt_ans_text += ' ' + str(gt_ans[index])
# for word in gt_ans:
# if isinstance(word, list):
# for object_idx in word:
# self.relevant_dets.add(object_idx)
# rel_object_idx = object_idx
# if self.only_use_relevant_dets:
# rel_object_idx = len(self.relevant_dets) - 1 # begin from 0
# print(object_idx, rel_object_idx)
# gt_ans_text += ' ' + object_meta['names'][object_idx] + ' ' + str(rel_object_idx)
# else:
# gt_ans_text += ' ' + word
# texts = []
# for reason in sample['rationale_choices']:
# raw_text = question + gt_ans_text + '[SEP]'
# for word in reason:
# if isinstance(word, list):
# for object_idx in word:
# self.relevant_dets.add(object_idx)
# rel_object_idx = object_idx
# if self.only_use_relevant_dets:
# rel_object_idx = len(self.relevant_dets) - 1 # begin from 0
# raw_text += ' ' + object_meta['names'][object_idx] + ' ' + str(rel_object_idx)
# else:
# raw_text += ' ' + word
# print(raw_text)
# # for index in range(len(reason)):
# # raw_text += ' ' + str(reason[index])
# # self.relevant_dets.append(object_idx)
# # print(raw_text)
# encoding = self.tokenizer(
# raw_text,
# padding="max_length",
# truncation=True,
# max_length=self.max_text_len,
# return_special_tokens_mask=True,
# )
# texts.append((raw_text, encoding))
# return texts
def get_answer_label(self, sample):
answer = int(sample['answer_label'])
return answer
def get_reason_answer_label(self, sample):
answer = int(sample['rationale_label'])
return answer
def get_question(self, sample, object_meta):
raw_text = ""
for index in range(len(sample['question'])):
if isinstance(sample['question'][index], list):
for object_idx in sample['question'][index]:
raw_text += ' ' + object_meta['names'][object_idx] + ' '
if self.only_use_relevant_dets:
raw_text += str(self.relevant_dets.index(object_idx))
else:
raw_text += str(object_idx)
else:
raw_text += ' ' + str(sample['question'][index])
return raw_text
def __len__(self):
return len(self.metadata)
def __getitem__(self, index):
sample = self.metadata.iloc[index]
object_meta = self.get_objects(sample)
self.relevant_dets = [] # initalize
self.relevant_dets_classes = []
answer = self.get_answer_label(sample)
reason_answer = self.get_reason_answer_label(sample)
ret = {
"img_index": index,
"cap_index": index,
"raw_index": index,
'answer': answer,
'reason_answer': reason_answer
}
# texts = self.get_text(sample, object_meta)
# qar_texts = self.get_qar(sample, object_meta)
[qa_texts, qar_texts] = self.get_text(sample, object_meta, index)
ret["text"] = qa_texts[0]
# print(texts[0])
# update other answers as false text
for i in range(self.draw_options_text - 1):
ret.update({f"options_text_{i}": qa_texts[i+1]})
for j in range(self.draw_options_text):
ret.update({f"qar_text_{j}": qar_texts[j]})
# print(ret.keys())
image_tensor = self.get_image(index, sample, object_meta)
ret["image"] = image_tensor
return ret
| InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/datasets/image/vcr.py |
from .base_dataset import BaseDataset
import sys
import random
class NLVR2Dataset(BaseDataset):
def __init__(self, *args, split="", **kwargs):
assert split in ["train", "val", "test"]
self.split = split
if split == "train":
names = ["nlvr2_train"]
elif split == "val":
names = ["nlvr2_dev", "nlvr2_test1"]
elif split == "test":
names = ["nlvr2_dev", "nlvr2_test1"]
super().__init__(
*args,
**kwargs,
names=names,
text_column_name="questions",
remove_duplicate=False,
)
def __getitem__(self, index):
result = None
while result is None:
try:
image_tensor_0 = self.get_image(index, image_key="image_0")["image"]
image_tensor_1 = self.get_image(index, image_key="image_1")["image"]
text = self.get_text(index)["text"]
result = True
except:
print(
f"error while read file idx {index} in {self.names[0]}",
file=sys.stderr,
)
index = random.randint(0, len(self.index_mapper) - 1)
index, question_index = self.index_mapper[index]
answers = self.table["answers"][index][question_index].as_py()
answers = answers == "True"
return {
"image_0": image_tensor_0,
"image_1": image_tensor_1,
"text": text,
"answers": answers,
"table_name": self.table_names[index],
}
| InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/datasets/image/nlvr2_dataset.py |
import json
from .base_dataset import BaseDataset
import random
import os
import pandas as pd
import io
from PIL import Image
from CoTrain.datasets import client
class CC12MDataset(BaseDataset):
def __init__(self, *args, split="", **kwargs):
assert split in ["train", "val", "test"]
self.split = split
self.metadata = None
self._load_metadata()
if split == "train":
names = ["cc12m_train"]
elif split == "val":
names = ["cc12m_val"]
elif split == "test":
names = ["cc12m_val"]
print(names, ": ", len(self.metadata), "samples in total.")
super().__init__(*args, **kwargs, names=names, text_column_name="caption")
self.data_dir = "s3://GCC/GCC12m/"
def _load_metadata(self):
# download specific
# metadata_dir = './meta_data/cc12m'
# split_files = {
# 'train': 'train.tsv',
# 'val': 'val.tsv',
# 'test': 'test.tsv'
# }
# target_split_fp = split_files[self.split]
# metadata = pd.read_csv(os.path.join(metadata_dir, target_split_fp), sep='\t')
# self.metadata = metadata
file_path = "/mnt/cache/share_data/DSK_datasets/cc12m/cc12m_clean.json"
if self.split == "train":
self.metadata = [json.loads(x) for x in open(file_path).readlines()[:-10]]
else:
self.metadata = [json.loads(x) for x in open(file_path).readlines()[-10:]]
self.metadata = [(x['caption'], x['filename']) for x in self.metadata]
def _get_image_path(self, sample):
# print(sample[1])
# rel_fp = str(sample[1]).split('/')[-1]
# print(os.path.join(self.data_dir, rel_fp))
rel_fp = sample[1]
return os.path.join(self.data_dir, rel_fp), rel_fp
def _get_caption(self, sample):
return sample[0]
def get_raw_image(self, sample):
# print(sample)
abs_fp, rel_fp = self._get_image_path(sample)
if "s3://" in abs_fp:
img_bytes = client.get(abs_fp)
assert img_bytes is not None, "Get image failed from {}".format(img_bytes)
img = Image.open(io.BytesIO(img_bytes)).convert("RGB")
else:
img = Image.open(abs_fp).convert("RGB")
if img is None:
raise Exception("Invalid img!", rel_fp)
else:
return img
def _get_object_path(self, sample):
"""
get the object npy path
Args:
sample (dict):
Returns:
abs path
"""
rel_object_fp = os.path.join(sample[1], '1.npz')
full_object_fp = os.path.join(self.object_dir, self.split, rel_object_fp)
return os.path.join(self.split, rel_object_fp), full_object_fp
def get_image(self, index, sample, image_key="image"):
image = self.get_raw_image(sample)
image_tensor = self.image_aug(image, self.transforms)
# image_tensor = [tr(image).unsqueeze(0) for tr in self.transforms]
return {
"video": image_tensor,
"vid_index": sample[1],
"cap_index": index,
"raw_index": index,
}
def get_false_image(self, rep, image_key="image"):
random_index = random.randint(0, len(self.metadata) - 1)
sample = self.metadata[random_index]
image = self.get_raw_image(sample)
#image_tensor = [tr(image).unsqueeze(0) for tr in self.transforms]
image_tensor = self.image_aug(image, self.transforms)
return {f"false_video_{rep}": image_tensor}
def get_text(self, raw_index, sample):
text = sample[0]
encoding = self.tokenizer(
text,
padding="max_length",
truncation=True,
max_length=self.max_text_len,
return_special_tokens_mask=True,
)
return {
"text": (text, encoding),
"vid_index": sample[1],
"cap_index": raw_index,
"raw_index": raw_index,
}
def get_false_text(self, rep):
random_index = random.randint(0, len(self.metadata) - 1)
sample = self.metadata[random_index]
text = sample[0]
encoding = self.tokenizer(
text,
truncation=True,
max_length=self.max_text_len,
return_special_tokens_mask=True,
)
return {f"false_text_{rep}": (text, encoding)}
def get_suite(self, index):
result = None
# print(self.draw_false_image) # 1
while result is None:
sample = self.metadata[index]
# print(sample)
try:
ret = dict()
ret.update(self.get_image(index, sample))
if not self.image_only:
txt = self.get_text(index, sample)
ret.update({"replica": True if txt["cap_index"] > 0 else False})
ret.update(txt)
for i in range(self.draw_false_image):
ret.update(self.get_false_image(i))
for i in range(self.draw_false_text):
ret.update(self.get_false_text(i))
result = True
except Exception as e:
print(f"Error while read file idx {sample[1]} in {self.names[0]} -> {e}")
index = random.randint(0, len(self.metadata) - 1)
# ret["image"] = ret["image"].unsqueeze(1)
return ret
def __len__(self):
return len(self.metadata)
def __getitem__(self, index):
return self.get_suite(index)
| InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/datasets/image/cc12m.py |
from .base_dataset import BaseDataset
class VQAv2Dataset(BaseDataset):
def __init__(self, *args, split="", **kwargs):
assert split in ["train", "val", "test"]
self.split = split
if split == "train":
names = ["vqav2_train", "vqav2_trainable_val"]
elif split == "val":
names = ["vqav2_rest_val"]
elif split == "test":
names = ["vqav2_test"] # vqav2_test-dev for test-dev
super().__init__(
*args,
**kwargs,
names=names,
text_column_name="questions",
remove_duplicate=False,
)
def __getitem__(self, index):
image_tensor = self.get_image(index)["image"]
text = self.get_text(index)["text"]
index, question_index = self.index_mapper[index]
qid = self.table["question_id"][index][question_index].as_py()
if self.split != "test":
answers = self.table["answers"][index][question_index].as_py()
labels = self.table["answer_labels"][index][question_index].as_py()
scores = self.table["answer_scores"][index][question_index].as_py()
else:
answers = list()
labels = list()
scores = list()
return {
"image": image_tensor,
"text": text,
"vqa_answer": answers,
"vqa_labels": labels,
"vqa_scores": scores,
"qid": qid,
}
| InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/datasets/image/vqav2_dataset.py |
import json
from .base_dataset import BaseDataset
import random
import os
import pandas as pd
import numpy as np
import io
import torch
from PIL import Image
from CoTrain.datasets import client
import CoTrain.modules.dist_utils as du
class MIX100MDataset(BaseDataset):
def __init__(self, *args, split="", **kwargs):
assert split in ["train", "val", "test"]
self.split = split
self.metadata = None
self.world_size = du.get_world_size()
self.rank = du.get_rank()
self._load_metadata()
if split == "train":
names = ["mix100m_train"]
elif split == "val":
names = ["mix100m_val"]
elif split == "test":
names = ["mix100m_val"]
print(names, ": ", len(self.metadata), "samples in total.")
super().__init__(*args, **kwargs, names=names, text_column_name="caption")
self.data_dir = ""
def _load_metadata(self):
if self.split != "train":
file_path = "/mnt/lustre/share_data/liyizhuo/datasets/fake_mix100m_val.json"
self.metadata = [json.loads(x) for x in open(file_path).readlines()]
self.metadata = [
(" ".join(x["caption"]), os.path.join(x["image_root"], x["filename"]))
for x in self.metadata
]
return None
meta_root = (
"s3://liyizhuo/datasets/shlab_softmax_100m_10000/"
)
file_list = [os.path.join(meta_root, f"{i}".zfill(5) + ".json") for i in range(10)]
ranked_meta = [[] for _ in range(self.world_size)]
ranked_num = [0 for _ in range(self.world_size)]
import time
start_time = time.time()
count = 0
for seed_num, each_meta_file in enumerate(file_list):
f = client.get(each_meta_file).decode().strip().split("\n")
np.random.seed(seed_num)
random_ranks = np.random.randint(0, self.world_size, size=(len(f), ))
for i, line in enumerate(f):
count += 1
if self.rank == random_ranks[i]:
info = json.loads(line.encode("UTF-8"))
info = (
" ".join(info["caption"]),
os.path.join(info["image_root"], info["filename"]),
)
ranked_meta[self.rank].append(info)
ranked_num[self.rank] += 1
if count % 1000000 == 0 and self.rank == 0:
print(
"-------------------------------------------------------------- every 1M time:",
(time.time() - start_time),
"{}M".format(count / 1000000),
)
del f
self.metadata = ranked_meta[self.rank]
num = ranked_num[self.rank]
# balance data length in each subprocess
ranked_num = du.all_gather(num)
du.synchronize()
max_num = max(ranked_num)
if max_num > num:
diff = max_num - num
self.metadata.extend(random.sample(self.metadata, diff))
num = len(self.metadata)
assert num == max_num
def _get_image_path(self, sample):
# print(sample[1])
# rel_fp = str(sample[1]).split('/')[-1]
# print(os.path.join(self.data_dir, rel_fp))
rel_fp = sample[1]
return os.path.join(self.data_dir, rel_fp), rel_fp
def _get_caption(self, sample):
return sample[0]
def get_raw_image(self, sample):
# print(sample)
abs_fp, rel_fp = self._get_image_path(sample)
if "s3://" in abs_fp:
img_bytes = client.get(abs_fp)
assert img_bytes is not None, "Get image failed from {}".format(img_bytes)
img = Image.open(io.BytesIO(img_bytes)).convert("RGB")
else:
img = Image.open(abs_fp).convert("RGB")
if img is None:
raise Exception("Invalid img!", rel_fp)
else:
return img
def _get_object_path(self, sample):
"""
get the object npy path
Args:
sample (dict):
Returns:
abs path
"""
rel_object_fp = os.path.join(sample[1], "1.npz")
full_object_fp = os.path.join(self.object_dir, self.split, rel_object_fp)
return os.path.join(self.split, rel_object_fp), full_object_fp
def get_image(self, index, sample, image_key="image"):
image = self.get_raw_image(sample)
image_tensor = self.image_aug(image, self.transforms)
# image_tensor = [tr(image).unsqueeze(0) for tr in self.transforms]
return {
"video": image_tensor,
"vid_index": sample[1],
"cap_index": index,
"raw_index": index,
}
def get_false_image(self, rep, image_key="image"):
random_index = random.randint(0, len(self.metadata) - 1)
sample = self.metadata[random_index]
image = self.get_raw_image(sample)
# image_tensor = [tr(image).unsqueeze(0) for tr in self.transforms]
image_tensor = self.image_aug(image, self.transforms)
return {f"false_video_{rep}": image_tensor}
def get_text(self, raw_index, sample):
text = sample[0]
encoding = self.tokenizer(
text,
padding="max_length",
truncation=True,
max_length=self.max_text_len,
return_special_tokens_mask=True,
)
return {
"text": (text, encoding),
"vid_index": sample[1],
"cap_index": raw_index,
"raw_index": raw_index,
}
def get_false_text(self, rep):
random_index = random.randint(0, len(self.metadata) - 1)
sample = self.metadata[random_index]
text = sample[0]
encoding = self.tokenizer(
text,
truncation=True,
max_length=self.max_text_len,
return_special_tokens_mask=True,
)
return {f"false_text_{rep}": (text, encoding)}
def get_suite(self, index):
index %= len(self.metadata)
result = None
# print(self.draw_false_image) # 1
while result is None:
sample = self.metadata[index]
# print(sample)
try:
ret = dict()
ret.update(self.get_image(index, sample))
if not self.image_only:
txt = self.get_text(index, sample)
ret.update({"replica": True if txt["cap_index"] > 0 else False})
ret.update(txt)
for i in range(self.draw_false_image):
ret.update(self.get_false_image(i))
for i in range(self.draw_false_text):
ret.update(self.get_false_text(i))
result = True
except Exception as e:
print(
f"Error while read file idx {sample[1]} in {self.names[0]} -> {e}"
)
index = random.randint(0, len(self.metadata) - 1)
# ret["image"] = ret["image"].unsqueeze(1)
return ret
def __len__(self):
return len(self.metadata) * self.world_size
def __getitem__(self, index):
return self.get_suite(index)
| InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/datasets/image/mix100m.py |
from glob import glob
from .base_dataset import BaseDataset
class ConceptualCaptionDataset(BaseDataset):
def __init__(self, *args, split="", **kwargs):
assert split in ["train", "val", "test"]
if split == "test":
split = "val"
if split == "train":
names = [f"conceptual_caption_train_{i}" for i in range(30)]
elif split == "val":
names = ["conceptual_caption_val_0"]
super().__init__(*args, **kwargs, names=names, text_column_name="caption")
def __getitem__(self, index):
return self.get_suite(index)
| InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/datasets/image/conceptual_caption_dataset.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.