python_code
stringlengths 0
992k
| repo_name
stringlengths 8
46
| file_path
stringlengths 5
162
|
---|---|---|
import torch
from setuptools import setup, Extension
from torch.utils.cpp_extension import BuildExtension, CppExtension
setup(
name='nms_1d_cpu',
ext_modules=[
CppExtension(
name = 'nms_1d_cpu',
sources = ['./csrc/nms_cpu.cpp'],
extra_compile_args=['-fopenmp']
)
],
cmdclass={
'build_ext': BuildExtension
}
)
| InternVideo-main | Downstream/Temporal-Action-Localization/libs/utils/setup.py |
import os
import shutil
import time
import json
import pickle
from typing import Dict
import numpy as np
import pdb
import torch
from scipy.special import softmax
from .metrics import ANETdetection
# def load_results_from_pkl(filename):
# # load from pickle file
# assert os.path.isfile(filename)
# with open(filename, "rb") as f:
# results = pickle.load(f)
# return results
def load_results_from_json(filename):
assert os.path.isfile(filename)
with open(filename, "r") as f:
results = json.load(f)
# for activity net external classification scores
if 'results' in results:
results = results['results']
return results
def results_to_dict(results):
"""convert result arrays into dict used by json files"""
# video ids and allocate the dict
vidxs = sorted(list(set(results['video-id'])))
results_dict = {}
for vidx in vidxs:
results_dict[vidx] = []
# fill in the dict
for vidx, start, end, label, score in zip(
results['video-id'],
results['t-start'],
results['t-end'],
results['label'],
results['score']
):
results_dict[vidx].append(
{
"label" : int(label),
"score" : float(score),
"segment": [float(start), float(end)],
}
)
return results_dict
def results_to_array(results, num_pred):
# video ids and allocate the dict
vidxs = sorted(list(set(results['video-id'])))
results_dict = {}
for vidx in vidxs:
results_dict[vidx] = {
'label' : [],
'score' : [],
'segment' : [],
}
# fill in the dict
for vidx, start, end, label, score in zip(
results['video-id'],
results['t-start'],
results['t-end'],
results['label'],
results['score']
):
results_dict[vidx]['label'].append(int(label))
results_dict[vidx]['score'].append(float(score))
results_dict[vidx]['segment'].append(
[float(start), float(end)]
)
for vidx in vidxs:
label = np.asarray(results_dict[vidx]['label'])
score = np.asarray(results_dict[vidx]['score'])
segment = np.asarray(results_dict[vidx]['segment'])
# the score should be already sorted, just for safety
inds = np.argsort(score)[::-1][:num_pred]
label, score, segment = label[inds], score[inds], segment[inds]
results_dict[vidx]['label'] = label
results_dict[vidx]['score'] = score
results_dict[vidx]['segment'] = segment
return results_dict
def postprocess_results(results, cls_score_file, num_pred=200, topk=2):
# load results and convert to dict
# if isinstance(results, str):
# results = load_results_from_pkl(results)
# array -> dict
results = results_to_array(results, num_pred)
# load external classification scores
if '.json' in cls_score_file:
cls_scores = load_results_from_json(cls_score_file)
# else:
# cls_scores = load_results_from_pkl(cls_score_file)
# dict for processed results
processed_results = {
'video-id': [],
't-start' : [],
't-end': [],
'label': [],
'score': []
}
# process each video
for vid, result in results.items():
# pick top k cls scores and idx
if len(cls_scores[vid])==1:
curr_cls_scores = np.asarray(cls_scores[vid][0])
else:
curr_cls_scores = np.asarray(cls_scores[vid])
if max(curr_cls_scores)>1 or min(curr_cls_scores)<0:
curr_cls_scores=softmax(curr_cls_scores)
topk_cls_idx = np.argsort(curr_cls_scores)[::-1][:topk]
topk_cls_score = curr_cls_scores[topk_cls_idx]
# model outputs
pred_score, pred_segment, pred_label = \
result['score'], result['segment'], result['label']
num_segs = min(num_pred, len(pred_score))
# duplicate all segment and assign the topk labels
# K x 1 @ 1 N -> K x N -> KN
# multiply the scores
# temp = np.abs(topk_cls_score[:, None] @ pred_score[None, :])
# new_pred_score = np.sqrt(temp).flatten()
new_pred_score = np.sqrt(topk_cls_score[:, None] @ pred_score[None, :]).flatten()
new_pred_segment = np.tile(pred_segment, (topk, 1))
new_pred_label = np.tile(topk_cls_idx[:, None], (1, num_segs)).flatten()
# add to result
processed_results['video-id'].extend([vid]*num_segs*topk)
processed_results['t-start'].append(new_pred_segment[:, 0])
processed_results['t-end'].append(new_pred_segment[:, 1])
processed_results['label'].append(new_pred_label)
processed_results['score'].append(new_pred_score)
# pdb.set_trace()
processed_results['t-start'] = np.concatenate(
processed_results['t-start'], axis=0)
processed_results['t-end'] = np.concatenate(
processed_results['t-end'], axis=0)
processed_results['label'] = np.concatenate(
processed_results['label'],axis=0)
processed_results['score'] = np.concatenate(
processed_results['score'], axis=0)
return processed_results
| InternVideo-main | Downstream/Temporal-Action-Localization/libs/utils/postprocessing.py |
import os
# backbone (e.g., conv / transformer)
backbones = {}
def register_backbone(name):
def decorator(cls):
backbones[name] = cls
return cls
return decorator
# neck (e.g., FPN)
necks = {}
def register_neck(name):
def decorator(cls):
necks[name] = cls
return cls
return decorator
# location generator (point, segment, etc)
generators = {}
def register_generator(name):
def decorator(cls):
generators[name] = cls
return cls
return decorator
# meta arch (the actual implementation of each model)
meta_archs = {}
def register_meta_arch(name):
def decorator(cls):
meta_archs[name] = cls
return cls
return decorator
# builder functions
def make_backbone(name, **kwargs):
backbone = backbones[name](**kwargs)
return backbone
def make_neck(name, **kwargs):
neck = necks[name](**kwargs)
return neck
def make_meta_arch(name, **kwargs):
meta_arch = meta_archs[name](**kwargs)
return meta_arch
def make_generator(name, **kwargs):
generator = generators[name](**kwargs)
return generator
| InternVideo-main | Downstream/Temporal-Action-Localization/libs/modeling/models.py |
import math
import torch
from torch import nn
from torch.nn import functional as F
from .models import register_meta_arch, make_backbone, make_neck, make_generator
from .blocks import MaskedConv1D, Scale, LayerNorm
from .losses import ctr_diou_loss_1d, sigmoid_focal_loss
from ..utils import batched_nms
class PtTransformerClsHead(nn.Module):
"""
1D Conv heads for classification
"""
def __init__(
self,
input_dim,
feat_dim,
num_classes,
prior_prob=0.01,
num_layers=3,
kernel_size=3,
act_layer=nn.ReLU,
with_ln=False,
empty_cls = []
):
super().__init__()
self.act = act_layer()
# build the head
self.head = nn.ModuleList()
self.norm = nn.ModuleList()
for idx in range(num_layers-1):
if idx == 0:
in_dim = input_dim
out_dim = feat_dim
else:
in_dim = feat_dim
out_dim = feat_dim
self.head.append(
MaskedConv1D(
in_dim, out_dim, kernel_size,
stride=1,
padding=kernel_size//2,
bias=(not with_ln)
)
)
if with_ln:
self.norm.append(
LayerNorm(out_dim)
)
else:
self.norm.append(nn.Identity())
# classifier
self.cls_head = MaskedConv1D(
feat_dim, num_classes, kernel_size,
stride=1, padding=kernel_size//2
)
# use prior in model initialization to improve stability
# this will overwrite other weight init
bias_value = -(math.log((1 - prior_prob) / prior_prob))
torch.nn.init.constant_(self.cls_head.conv.bias, bias_value)
# a quick fix to empty categories:
# the weights assocaited with these categories will remain unchanged
# we set their bias to a large negative value to prevent their outputs
if len(empty_cls) > 0:
bias_value = -(math.log((1 - 1e-6) / 1e-6))
for idx in empty_cls:
torch.nn.init.constant_(self.cls_head.conv.bias[idx], bias_value)
def forward(self, fpn_feats, fpn_masks):
assert len(fpn_feats) == len(fpn_masks)
# apply the classifier for each pyramid level
out_logits = tuple()
for _, (cur_feat, cur_mask) in enumerate(zip(fpn_feats, fpn_masks)):
cur_out = cur_feat
for idx in range(len(self.head)):
cur_out, _ = self.head[idx](cur_out, cur_mask)
cur_out = self.act(self.norm[idx](cur_out))
cur_logits, _ = self.cls_head(cur_out, cur_mask)
out_logits += (cur_logits, )
# fpn_masks remains the same
return out_logits
class PtTransformerRegHead(nn.Module):
"""
Shared 1D Conv heads for regression
Simlar logic as PtTransformerClsHead with separated implementation for clarity
"""
def __init__(
self,
input_dim,
feat_dim,
fpn_levels,
num_layers=3,
kernel_size=3,
act_layer=nn.ReLU,
with_ln=False
):
super().__init__()
self.fpn_levels = fpn_levels
self.act = act_layer()
# build the conv head
self.head = nn.ModuleList()
self.norm = nn.ModuleList()
for idx in range(num_layers-1):
if idx == 0:
in_dim = input_dim
out_dim = feat_dim
else:
in_dim = feat_dim
out_dim = feat_dim
self.head.append(
MaskedConv1D(
in_dim, out_dim, kernel_size,
stride=1,
padding=kernel_size//2,
bias=(not with_ln)
)
)
if with_ln:
self.norm.append(
LayerNorm(out_dim)
)
else:
self.norm.append(nn.Identity())
self.scale = nn.ModuleList()
for idx in range(fpn_levels):
self.scale.append(Scale())
# segment regression
self.offset_head = MaskedConv1D(
feat_dim, 2, kernel_size,
stride=1, padding=kernel_size//2
)
def forward(self, fpn_feats, fpn_masks):
assert len(fpn_feats) == len(fpn_masks)
assert len(fpn_feats) == self.fpn_levels
# apply the classifier for each pyramid level
out_offsets = tuple()
for l, (cur_feat, cur_mask) in enumerate(zip(fpn_feats, fpn_masks)):
cur_out = cur_feat
for idx in range(len(self.head)):
cur_out, _ = self.head[idx](cur_out, cur_mask)
cur_out = self.act(self.norm[idx](cur_out))
cur_offsets, _ = self.offset_head(cur_out, cur_mask)
out_offsets += (F.relu(self.scale[l](cur_offsets)), )
# fpn_masks remains the same
return out_offsets
@register_meta_arch("LocPointTransformer")
class PtTransformer(nn.Module):
"""
Transformer based model for single stage action localization
"""
def __init__(
self,
backbone_type, # a string defines which backbone we use
fpn_type, # a string defines which fpn we use
backbone_arch, # a tuple defines # layers in embed / stem / branch
scale_factor, # scale factor between branch layers
input_dim, # input feat dim
max_seq_len, # max sequence length (used for training)
max_buffer_len_factor, # max buffer size (defined a factor of max_seq_len)
n_head, # number of heads for self-attention in transformer
n_mha_win_size, # window size for self attention; -1 to use full seq
embd_kernel_size, # kernel size of the embedding network
embd_dim, # output feat channel of the embedding network
embd_with_ln, # attach layernorm to embedding network
fpn_dim, # feature dim on FPN
fpn_with_ln, # if to apply layer norm at the end of fpn
head_dim, # feature dim for head
regression_range, # regression range on each level of FPN
head_num_layers, # number of layers in the head (including the classifier)
head_kernel_size, # kernel size for reg/cls heads
head_with_ln, # attache layernorm to reg/cls heads
use_abs_pe, # if to use abs position encoding
use_rel_pe, # if to use rel position encoding
num_classes, # number of action classes
train_cfg, # other cfg for training
test_cfg # other cfg for testing
):
super().__init__()
# re-distribute params to backbone / neck / head
self.fpn_strides = [scale_factor**i for i in range(backbone_arch[-1]+1)]
self.reg_range = regression_range
assert len(self.fpn_strides) == len(self.reg_range)
self.scale_factor = scale_factor
# #classes = num_classes + 1 (background) with last category as background
# e.g., num_classes = 10 -> 0, 1, ..., 9 as actions, 10 as background
self.num_classes = num_classes
# check the feature pyramid and local attention window size
self.max_seq_len = max_seq_len
if isinstance(n_mha_win_size, int):
self.mha_win_size = [n_mha_win_size]*len(self.fpn_strides)
else:
assert len(n_mha_win_size) == len(self.fpn_strides)
self.mha_win_size = n_mha_win_size
max_div_factor = 1
for l, (s, w) in enumerate(zip(self.fpn_strides, self.mha_win_size)):
stride = s * (w // 2) * 2 if w > 1 else s
# print(stride)
assert max_seq_len % stride == 0, "max_seq_len must be divisible by fpn stride and window size"
if max_div_factor < stride:
max_div_factor = stride
self.max_div_factor = max_div_factor
# training time config
self.train_center_sample = train_cfg['center_sample']
assert self.train_center_sample in ['radius', 'none']
self.train_center_sample_radius = train_cfg['center_sample_radius']
self.train_loss_weight = train_cfg['loss_weight']
self.train_cls_prior_prob = train_cfg['cls_prior_prob']
self.train_dropout = train_cfg['dropout']
self.train_droppath = train_cfg['droppath']
self.train_label_smoothing = train_cfg['label_smoothing']
# test time config
self.test_pre_nms_thresh = test_cfg['pre_nms_thresh']
self.test_pre_nms_topk = test_cfg['pre_nms_topk']
self.test_iou_threshold = test_cfg['iou_threshold']
self.test_min_score = test_cfg['min_score']
self.test_max_seg_num = test_cfg['max_seg_num']
self.test_nms_method = test_cfg['nms_method']
assert self.test_nms_method in ['soft', 'hard', 'none']
self.test_duration_thresh = test_cfg['duration_thresh']
self.test_multiclass_nms = test_cfg['multiclass_nms']
self.test_nms_sigma = test_cfg['nms_sigma']
self.test_voting_thresh = test_cfg['voting_thresh']
# we will need a better way to dispatch the params to backbones / necks
# backbone network: conv + transformer
assert backbone_type in ['convTransformer', 'conv']
if backbone_type == 'convTransformer':
self.backbone = make_backbone(
'convTransformer',
**{
'n_in' : input_dim,
'n_embd' : embd_dim,
'n_head': n_head,
'n_embd_ks': embd_kernel_size,
'max_len': max_seq_len,
'arch' : backbone_arch,
'mha_win_size': self.mha_win_size,
'scale_factor' : scale_factor,
'with_ln' : embd_with_ln,
'attn_pdrop' : 0.0,
'proj_pdrop' : self.train_dropout,
'path_pdrop' : self.train_droppath,
'use_abs_pe' : use_abs_pe,
'use_rel_pe' : use_rel_pe
}
)
else:
self.backbone = make_backbone(
'conv',
**{
'n_in': input_dim,
'n_embd': embd_dim,
'n_embd_ks': embd_kernel_size,
'arch': backbone_arch,
'scale_factor': scale_factor,
'with_ln' : embd_with_ln
}
)
# fpn network: convs
assert fpn_type in ['fpn', 'identity']
self.neck = make_neck(
fpn_type,
**{
'in_channels' : [embd_dim] * (backbone_arch[-1] + 1),
'out_channel' : fpn_dim,
'scale_factor' : scale_factor,
'with_ln' : fpn_with_ln
}
)
# location generator: points
self.point_generator = make_generator(
'point',
**{
'max_seq_len' : max_seq_len * max_buffer_len_factor,
'fpn_levels' : len(self.fpn_strides),
'scale_factor' : scale_factor,
'regression_range' : self.reg_range
}
)
# classfication and regerssion heads
self.cls_head = PtTransformerClsHead(
fpn_dim, head_dim, self.num_classes,
kernel_size=head_kernel_size,
prior_prob=self.train_cls_prior_prob,
with_ln=head_with_ln,
num_layers=head_num_layers,
empty_cls=train_cfg['head_empty_cls']
)
self.reg_head = PtTransformerRegHead(
fpn_dim, head_dim, len(self.fpn_strides),
kernel_size=head_kernel_size,
num_layers=head_num_layers,
with_ln=head_with_ln
)
# maintain an EMA of #foreground to stabilize the loss normalizer
# useful for small mini-batch training
self.loss_normalizer = train_cfg['init_loss_norm']
self.loss_normalizer_momentum = 0.9
@property
def device(self):
# a hacky way to get the device type
# will throw an error if parameters are on different devices
return list(set(p.device for p in self.parameters()))[0]
def forward(self, video_list):
# batch the video list into feats (B, C, T) and masks (B, 1, T)
batched_inputs, batched_masks = self.preprocessing(video_list)
# forward the network (backbone -> neck -> heads)
feats, masks = self.backbone(batched_inputs, batched_masks)
fpn_feats, fpn_masks = self.neck(feats, masks)
# fpn_feats [16, 256, 768] ..[16, 256, 384]..[16, 256, 24]
# compute the point coordinate along the FPN
# this is used for computing the GT or decode the final results
# points: List[T x 4] with length = # fpn levels
# (shared across all samples in the mini-batch)
points = self.point_generator(fpn_feats)
# out_cls: List[B, #cls + 1, T_i]
out_cls_logits = self.cls_head(fpn_feats, fpn_masks)
# out_offset: List[B, 2, T_i]
out_offsets = self.reg_head(fpn_feats, fpn_masks)
# permute the outputs
# out_cls: F List[B, #cls, T_i] -> F List[B, T_i, #cls]
out_cls_logits = [x.permute(0, 2, 1) for x in out_cls_logits]
# out_offset: F List[B, 2 (xC), T_i] -> F List[B, T_i, 2 (xC)]
out_offsets = [x.permute(0, 2, 1) for x in out_offsets]
# fpn_masks: F list[B, 1, T_i] -> F List[B, T_i]
fpn_masks = [x.squeeze(1) for x in fpn_masks]
# return loss during training
if self.training:
# generate segment/lable List[N x 2] / List[N] with length = B
assert video_list[0]['segments'] is not None, "GT action labels does not exist"
assert video_list[0]['labels'] is not None, "GT action labels does not exist"
# print(video_list)
gt_segments = [x['segments'].to(self.device) for x in video_list]
gt_labels = [x['labels'].to(self.device) for x in video_list]
# compute the gt labels for cls & reg
# list of prediction targets
gt_cls_labels, gt_offsets = self.label_points(
points, gt_segments, gt_labels)
# compute the loss and return
losses = self.losses(
fpn_masks,
out_cls_logits, out_offsets,
gt_cls_labels, gt_offsets
)
return losses
else:
# decode the actions (sigmoid / stride, etc)
results = self.inference(
video_list, points, fpn_masks,
out_cls_logits, out_offsets
)
return results
@torch.no_grad()
def preprocessing(self, video_list, padding_val=0.0):
"""
Generate batched features and masks from a list of dict items
"""
feats = [x['feats'] for x in video_list]
feats_lens = torch.as_tensor([feat.shape[-1] for feat in feats])
max_len = feats_lens.max(0).values.item()
if self.training:
assert max_len <= self.max_seq_len, "Input length must be smaller than max_seq_len during training"
# set max_len to self.max_seq_len
max_len = self.max_seq_len
# batch input shape B, C, T
batch_shape = [len(feats), feats[0].shape[0], max_len]
batched_inputs = feats[0].new_full(batch_shape, padding_val)
for feat, pad_feat in zip(feats, batched_inputs):
pad_feat[..., :feat.shape[-1]].copy_(feat)
else:
assert len(video_list) == 1, "Only support batch_size = 1 during inference"
# input length < self.max_seq_len, pad to max_seq_len
if max_len <= self.max_seq_len:
max_len = self.max_seq_len
else:
# pad the input to the next divisible size
stride = self.max_div_factor
max_len = (max_len + (stride - 1)) // stride * stride
padding_size = [0, max_len - feats_lens[0]]
batched_inputs = F.pad(
feats[0], padding_size, value=padding_val).unsqueeze(0)
# generate the mask
batched_masks = torch.arange(max_len)[None, :] < feats_lens[:, None]
# push to device
batched_inputs = batched_inputs.to(self.device)
batched_masks = batched_masks.unsqueeze(1).to(self.device)
return batched_inputs, batched_masks
@torch.no_grad()
def label_points(self, points, gt_segments, gt_labels):
# concat points on all fpn levels List[T x 4] -> F T x 4
# This is shared for all samples in the mini-batch
num_levels = len(points)
concat_points = torch.cat(points, dim=0)
gt_cls, gt_offset = [], []
# loop over each video sample
for gt_segment, gt_label in zip(gt_segments, gt_labels):
cls_targets, reg_targets = self.label_points_single_video(
concat_points, gt_segment, gt_label
)
# append to list (len = # images, each of size FT x C)
gt_cls.append(cls_targets)
gt_offset.append(reg_targets)
return gt_cls, gt_offset
@torch.no_grad()
def label_points_single_video(self, concat_points, gt_segment, gt_label):
# concat_points : F T x 4 (t, regressoin range, stride)
# gt_segment : N (#Events) x 2
# gt_label : N (#Events) x 1
num_pts = concat_points.shape[0]
num_gts = gt_segment.shape[0]
# corner case where current sample does not have actions
if num_gts == 0:
cls_targets = gt_segment.new_full((num_pts, self.num_classes), 0)
reg_targets = gt_segment.new_zeros((num_pts, 2))
return cls_targets, reg_targets
# compute the lengths of all segments -> F T x N
lens = gt_segment[:, 1] - gt_segment[:, 0]
lens = lens[None, :].repeat(num_pts, 1)
# compute the distance of every point to each segment boundary
# auto broadcasting for all reg target-> F T x N x2
gt_segs = gt_segment[None].expand(num_pts, num_gts, 2)
left = concat_points[:, 0, None] - gt_segs[:, :, 0]
right = gt_segs[:, :, 1] - concat_points[:, 0, None]
reg_targets = torch.stack((left, right), dim=-1)
if self.train_center_sample == 'radius':
# center of all segments F T x N
center_pts = 0.5 * (gt_segs[:, :, 0] + gt_segs[:, :, 1])
# center sampling based on stride radius
# compute the new boundaries:
# concat_points[:, 3] stores the stride
t_mins = \
center_pts - concat_points[:, 3, None] * self.train_center_sample_radius
t_maxs = \
center_pts + concat_points[:, 3, None] * self.train_center_sample_radius
# prevent t_mins / maxs from over-running the action boundary
# left: torch.maximum(t_mins, gt_segs[:, :, 0])
# right: torch.minimum(t_maxs, gt_segs[:, :, 1])
# F T x N (distance to the new boundary)
cb_dist_left = concat_points[:, 0, None] \
- torch.maximum(t_mins, gt_segs[:, :, 0])
cb_dist_right = torch.minimum(t_maxs, gt_segs[:, :, 1]) \
- concat_points[:, 0, None]
# F T x N x 2
center_seg = torch.stack(
(cb_dist_left, cb_dist_right), -1)
# F T x N
inside_gt_seg_mask = center_seg.min(-1)[0] > 0
else:
# inside an gt action
inside_gt_seg_mask = reg_targets.min(-1)[0] > 0
# limit the regression range for each location
max_regress_distance = reg_targets.max(-1)[0]
# F T x N
inside_regress_range = torch.logical_and(
(max_regress_distance >= concat_points[:, 1, None]),
(max_regress_distance <= concat_points[:, 2, None])
)
# if there are still more than one actions for one moment
# pick the one with the shortest duration (easiest to regress)
lens.masked_fill_(inside_gt_seg_mask==0, float('inf'))
lens.masked_fill_(inside_regress_range==0, float('inf'))
# F T x N -> F T
min_len, min_len_inds = lens.min(dim=1)
# corner case: multiple actions with very similar durations (e.g., THUMOS14)
min_len_mask = torch.logical_and(
(lens <= (min_len[:, None] + 1e-3)), (lens < float('inf'))
).to(reg_targets.dtype)
# cls_targets: F T x C; reg_targets F T x 2
gt_label_one_hot = F.one_hot(
gt_label, self.num_classes
).to(reg_targets.dtype)
cls_targets = min_len_mask @ gt_label_one_hot
# to prevent multiple GT actions with the same label and boundaries
cls_targets.clamp_(min=0.0, max=1.0)
# OK to use min_len_inds
reg_targets = reg_targets[range(num_pts), min_len_inds]
# normalization based on stride
reg_targets /= concat_points[:, 3, None]
return cls_targets, reg_targets
def losses(
self, fpn_masks,
out_cls_logits, out_offsets,
gt_cls_labels, gt_offsets
):
# self.criterion_KL = nn.KLDivLoss(reduction='batchmean', log_target=True)
# kl_loss=[]
# for ii in range(len(out_cls_logits)-1):
# outputs=out_cls_logits[ii][:,::2,:]
# targets=out_cls_logits[ii+1]
# outputs = F.softmax(outputs, dim=2)
# targets = F.softmax(targets, dim=2)
# loss1 = self.criterion_KL(outputs, targets)
# loss2 = self.criterion_KL(targets, outputs)
# kl_loss.append(loss1*0.2 + loss2*0.2)
# fpn_masks, out_*: F (List) [B, T_i, C]
# gt_* : B (list) [F T, C]
# fpn_masks -> (B, FT)
valid_mask = torch.cat(fpn_masks, dim=1)
# 1. classification loss
# stack the list -> (B, FT) -> (# Valid, )
gt_cls = torch.stack(gt_cls_labels)
pos_mask = torch.logical_and((gt_cls.sum(-1) > 0), valid_mask)
# cat the predicted offsets -> (B, FT, 2 (xC)) -> # (#Pos, 2 (xC))
pred_offsets = torch.cat(out_offsets, dim=1)[pos_mask]
gt_offsets = torch.stack(gt_offsets)[pos_mask]
# update the loss normalizer
num_pos = pos_mask.sum().item()
self.loss_normalizer = self.loss_normalizer_momentum * self.loss_normalizer + (
1 - self.loss_normalizer_momentum
) * max(num_pos, 1)
# gt_cls is already one hot encoded now, simply masking out
gt_target = gt_cls[valid_mask]
# optinal label smoothing
gt_target *= 1 - self.train_label_smoothing
gt_target += self.train_label_smoothing / (self.num_classes + 1)
# focal loss
cls_loss = sigmoid_focal_loss(
torch.cat(out_cls_logits, dim=1)[valid_mask],
gt_target,
reduction='sum'
)
cls_loss /= self.loss_normalizer
# 2. regression using IoU/GIoU loss (defined on positive samples)
if num_pos == 0:
reg_loss = 0 * pred_offsets.sum()
else:
# giou loss defined on positive samples
reg_loss = ctr_diou_loss_1d(
pred_offsets,
gt_offsets,
reduction='sum'
)
reg_loss /= self.loss_normalizer
if self.train_loss_weight > 0:
loss_weight = self.train_loss_weight
else:
loss_weight = cls_loss.detach() / max(reg_loss.item(), 0.01)
# return a dict of losses
final_loss = cls_loss + reg_loss * loss_weight
# for kl in kl_loss:
# final_loss=final_loss+kl
return {'cls_loss' : cls_loss,
'reg_loss' : reg_loss,
'final_loss' : final_loss}
@torch.no_grad()
def inference(
self,
video_list,
points, fpn_masks,
out_cls_logits, out_offsets
):
# video_list B (list) [dict]
# points F (list) [T_i, 4]
# fpn_masks, out_*: F (List) [B, T_i, C]
results = []
# 1: gather video meta information
vid_idxs = [x['video_id'] for x in video_list]
vid_fps = [x['fps'] for x in video_list]
vid_lens = [x['duration'] for x in video_list]
vid_ft_stride = [x['feat_stride'] for x in video_list]
vid_ft_nframes = [x['feat_num_frames'] for x in video_list]
# 2: inference on each single video and gather the results
# upto this point, all results use timestamps defined on feature grids
for idx, (vidx, fps, vlen, stride, nframes) in enumerate(
zip(vid_idxs, vid_fps, vid_lens, vid_ft_stride, vid_ft_nframes)
):
# gather per-video outputs
cls_logits_per_vid = [x[idx] for x in out_cls_logits]
offsets_per_vid = [x[idx] for x in out_offsets]
fpn_masks_per_vid = [x[idx] for x in fpn_masks]
# inference on a single video (should always be the case)
results_per_vid = self.inference_single_video(
points, fpn_masks_per_vid,
cls_logits_per_vid, offsets_per_vid
)
# pass through video meta info
results_per_vid['video_id'] = vidx
results_per_vid['fps'] = fps
results_per_vid['duration'] = vlen
results_per_vid['feat_stride'] = stride
results_per_vid['feat_num_frames'] = nframes
results.append(results_per_vid)
# step 3: postprocssing
results = self.postprocessing(results)
return results
@torch.no_grad()
def inference_single_video(
self,
points,
fpn_masks,
out_cls_logits,
out_offsets,
):
# points F (list) [T_i, 4]
# fpn_masks, out_*: F (List) [T_i, C]
segs_all = []
scores_all = []
cls_idxs_all = []
# loop over fpn levels
for cls_i, offsets_i, pts_i, mask_i in zip(
out_cls_logits, out_offsets, points, fpn_masks
):
# sigmoid normalization for output logits
pred_prob = (cls_i.sigmoid() * mask_i.unsqueeze(-1)).flatten()
# Apply filtering to make NMS faster following detectron2
# 1. Keep seg with confidence score > a threshold
keep_idxs1 = (pred_prob > self.test_pre_nms_thresh)
pred_prob = pred_prob[keep_idxs1]
topk_idxs = keep_idxs1.nonzero(as_tuple=True)[0]
# 2. Keep top k top scoring boxes only
num_topk = min(self.test_pre_nms_topk, topk_idxs.size(0))
pred_prob, idxs = pred_prob.sort(descending=True)
pred_prob = pred_prob[:num_topk].clone()
topk_idxs = topk_idxs[idxs[:num_topk]].clone()
# fix a warning in pytorch 1.9
pt_idxs = torch.div(
topk_idxs, self.num_classes, rounding_mode='floor'
)
cls_idxs = torch.fmod(topk_idxs, self.num_classes)
# 3. gather predicted offsets
offsets = offsets_i[pt_idxs]
pts = pts_i[pt_idxs]
# 4. compute predicted segments (denorm by stride for output offsets)
seg_left = pts[:, 0] - offsets[:, 0] * pts[:, 3]
seg_right = pts[:, 0] + offsets[:, 1] * pts[:, 3]
pred_segs = torch.stack((seg_left, seg_right), -1)
# 5. Keep seg with duration > a threshold (relative to feature grids)
seg_areas = seg_right - seg_left
keep_idxs2 = seg_areas > self.test_duration_thresh
# *_all : N (filtered # of segments) x 2 / 1
segs_all.append(pred_segs[keep_idxs2])
scores_all.append(pred_prob[keep_idxs2])
cls_idxs_all.append(cls_idxs[keep_idxs2])
# cat along the FPN levels (F N_i, C)
segs_all, scores_all, cls_idxs_all = [
torch.cat(x) for x in [segs_all, scores_all, cls_idxs_all]
]
results = {'segments' : segs_all,
'scores' : scores_all,
'labels' : cls_idxs_all}
return results
@torch.no_grad()
def postprocessing(self, results):
# input : list of dictionary items
# (1) push to CPU; (2) NMS; (3) convert to actual time stamps
processed_results = []
for results_per_vid in results:
# unpack the meta info
vidx = results_per_vid['video_id']
fps = results_per_vid['fps']
vlen = float(results_per_vid['duration'])
stride = results_per_vid['feat_stride']
nframes = results_per_vid['feat_num_frames']
# 1: unpack the results and move to CPU
segs = results_per_vid['segments'].detach().cpu()
scores = results_per_vid['scores'].detach().cpu()
labels = results_per_vid['labels'].detach().cpu()
if self.test_nms_method != 'none':
# 2: batched nms (only implemented on CPU)
segs, scores, labels = batched_nms(
segs, scores, labels,
self.test_iou_threshold,
self.test_min_score,
self.test_max_seg_num,
use_soft_nms = (self.test_nms_method == 'soft'),
multiclass = self.test_multiclass_nms,
sigma = self.test_nms_sigma,
voting_thresh = self.test_voting_thresh
)
# 3: convert from feature grids to seconds
if segs.shape[0] > 0:
segs = (segs * stride + 0.5 * nframes) / fps
# truncate all boundaries within [0, duration]
segs[segs<=0.0] *= 0.0
segs[segs>=vlen] = segs[segs>=vlen] * 0.0 + vlen
# 4: repack the results
processed_results.append(
{'video_id' : vidx,
'segments' : segs,
'scores' : scores,
'labels' : labels}
)
return processed_results
| InternVideo-main | Downstream/Temporal-Action-Localization/libs/modeling/meta_archs.py |
from .blocks import (MaskedConv1D, MaskedMHCA, MaskedMHA, LayerNorm,
TransformerBlock, ConvBlock, Scale, AffineDropPath)
from .models import make_backbone, make_neck, make_meta_arch, make_generator
from . import backbones # backbones
from . import necks # necks
from . import loc_generators # location generators
from . import meta_archs # full models
__all__ = ['MaskedConv1D', 'MaskedMHCA', 'MaskedMHA', 'LayerNorm'
'TransformerBlock', 'ConvBlock', 'Scale', 'AffineDropPath',
'make_backbone', 'make_neck', 'make_meta_arch', 'make_generator']
| InternVideo-main | Downstream/Temporal-Action-Localization/libs/modeling/__init__.py |
import torch
from torch import nn
from torch.nn import functional as F
from .models import register_backbone
from .blocks import (get_sinusoid_encoding, TransformerBlock, MaskedConv1D,
ConvBlock, LayerNorm)
@register_backbone("convTransformer")
class ConvTransformerBackbone(nn.Module):
"""
A backbone that combines convolutions with transformers
"""
def __init__(
self,
n_in, # input feature dimension
n_embd, # embedding dimension (after convolution)
n_head, # number of head for self-attention in transformers
n_embd_ks, # conv kernel size of the embedding network
max_len, # max sequence length
arch = (2, 2, 5), # (#convs, #stem transformers, #branch transformers)
mha_win_size = [-1]*6, # size of local window for mha
scale_factor = 2, # dowsampling rate for the branch,
with_ln = False, # if to attach layernorm after conv
attn_pdrop = 0.0, # dropout rate for the attention map
proj_pdrop = 0.0, # dropout rate for the projection / MLP
path_pdrop = 0.0, # droput rate for drop path
use_abs_pe = False, # use absolute position embedding
use_rel_pe = False, # use relative position embedding
):
super().__init__()
assert len(arch) == 3
assert len(mha_win_size) == (1 + arch[2])
self.arch = arch
self.mha_win_size = mha_win_size
self.max_len = max_len
self.relu = nn.ReLU(inplace=True)
self.scale_factor = scale_factor
self.use_abs_pe = use_abs_pe
self.use_rel_pe = use_rel_pe
# position embedding (1, C, T), rescaled by 1/sqrt(n_embd)
if self.use_abs_pe:
pos_embd = get_sinusoid_encoding(self.max_len, n_embd) / (n_embd**0.5)
self.register_buffer("pos_embd", pos_embd, persistent=False)
# embedding network using convs
self.embd = nn.ModuleList()
self.embd_norm = nn.ModuleList()
for idx in range(arch[0]):
if idx == 0:
in_channels = n_in
else:
in_channels = n_embd
self.embd.append(MaskedConv1D(
in_channels, n_embd, n_embd_ks,
stride=1, padding=n_embd_ks//2, bias=(not with_ln)
)
)
if with_ln:
self.embd_norm.append(
LayerNorm(n_embd)
)
else:
self.embd_norm.append(nn.Identity())
# stem network using (vanilla) transformer
self.stem = nn.ModuleList()
for idx in range(arch[1]):
self.stem.append(TransformerBlock(
n_embd, n_head,
n_ds_strides=(1, 1),
attn_pdrop=attn_pdrop,
proj_pdrop=proj_pdrop,
path_pdrop=path_pdrop,
mha_win_size=self.mha_win_size[0],
use_rel_pe=self.use_rel_pe
)
)
# main branch using transformer with pooling
self.branch = nn.ModuleList()
for idx in range(arch[2]):
self.branch.append(TransformerBlock(
n_embd, n_head,
n_ds_strides=(self.scale_factor, self.scale_factor),
attn_pdrop=attn_pdrop,
proj_pdrop=proj_pdrop,
path_pdrop=path_pdrop,
mha_win_size=self.mha_win_size[1+idx],
use_rel_pe=self.use_rel_pe
)
)
# init weights
self.apply(self.__init_weights__)
def __init_weights__(self, module):
# set nn.Linear/nn.Conv1d bias term to 0
if isinstance(module, (nn.Linear, nn.Conv1d)):
if module.bias is not None:
torch.nn.init.constant_(module.bias, 0.)
def forward(self, x, mask):
# x: batch size, feature channel, sequence length,
# mask: batch size, 1, sequence length (bool)
B, C, T = x.size()
# embedding network
for idx in range(len(self.embd)):
x, mask = self.embd[idx](x, mask)
x = self.relu(self.embd_norm[idx](x))
# training: using fixed length position embeddings
if self.use_abs_pe and self.training:
assert T <= self.max_len, "Reached max length."
pe = self.pos_embd
# add pe to x
x = x + pe[:, :, :T] * mask.to(x.dtype)
# inference: re-interpolate position embeddings for over-length sequences
if self.use_abs_pe and (not self.training):
if T >= self.max_len:
pe = F.interpolate(
self.pos_embd, T, mode='linear', align_corners=False)
else:
pe = self.pos_embd
# add pe to x
x = x + pe[:, :, :T] * mask.to(x.dtype)
# stem transformer
for idx in range(len(self.stem)):
x, mask = self.stem[idx](x, mask)
# prep for outputs
out_feats = tuple()
out_masks = tuple()
# 1x resolution
out_feats += (x, )
out_masks += (mask, )
# main branch with downsampling
for idx in range(len(self.branch)):
x, mask = self.branch[idx](x, mask)
out_feats += (x, )
out_masks += (mask, )
return out_feats, out_masks
@register_backbone("conv")
class ConvBackbone(nn.Module):
"""
A backbone that with only conv
"""
def __init__(
self,
n_in, # input feature dimension
n_embd, # embedding dimension (after convolution)
n_embd_ks, # conv kernel size of the embedding network
arch = (2, 2, 5), # (#convs, #stem convs, #branch convs)
scale_factor = 2, # dowsampling rate for the branch
with_ln=False, # if to use layernorm
):
super().__init__()
assert len(arch) == 3
self.arch = arch
self.relu = nn.ReLU(inplace=True)
self.scale_factor = scale_factor
# embedding network using convs
self.embd = nn.ModuleList()
self.embd_norm = nn.ModuleList()
for idx in range(arch[0]):
if idx == 0:
in_channels = n_in
else:
in_channels = n_embd
self.embd.append(MaskedConv1D(
in_channels, n_embd, n_embd_ks,
stride=1, padding=n_embd_ks//2, bias=(not with_ln)
)
)
if with_ln:
self.embd_norm.append(
LayerNorm(n_embd)
)
else:
self.embd_norm.append(nn.Identity())
# stem network using (vanilla) transformer
self.stem = nn.ModuleList()
for idx in range(arch[1]):
self.stem.append(ConvBlock(n_embd, 3, 1))
# main branch using transformer with pooling
self.branch = nn.ModuleList()
for idx in range(arch[2]):
self.branch.append(ConvBlock(n_embd, 3, self.scale_factor))
# init weights
self.apply(self.__init_weights__)
def __init_weights__(self, module):
# set nn.Linear bias term to 0
if isinstance(module, (nn.Linear, nn.Conv1d)):
if module.bias is not None:
torch.nn.init.constant_(module.bias, 0.)
def forward(self, x, mask):
# x: batch size, feature channel, sequence length,
# mask: batch size, 1, sequence length (bool)
B, C, T = x.size()
# embedding network
for idx in range(len(self.embd)):
x, mask = self.embd[idx](x, mask)
x = self.relu(self.embd_norm[idx](x))
# stem conv
for idx in range(len(self.stem)):
x, mask = self.stem[idx](x, mask)
# prep for outputs
out_feats = tuple()
out_masks = tuple()
# 1x resolution
out_feats += (x, )
out_masks += (mask, )
# main branch with downsampling
for idx in range(len(self.branch)):
x, mask = self.branch[idx](x, mask)
out_feats += (x, )
out_masks += (mask, )
return out_feats, out_masks
| InternVideo-main | Downstream/Temporal-Action-Localization/libs/modeling/backbones.py |
import torch
from torch import nn
from torch.nn import functional as F
from .models import register_neck
from .blocks import MaskedConv1D, LayerNorm
@register_neck("fpn")
class FPN1D(nn.Module):
"""
Feature pyramid network
"""
def __init__(
self,
in_channels, # input feature channels, len(in_channels) = # levels
out_channel, # output feature channel
scale_factor=2.0, # downsampling rate between two fpn levels
start_level=0, # start fpn level
end_level=-1, # end fpn level
with_ln=True # if to apply layer norm at the end
):
super().__init__()
assert isinstance(in_channels, list) or isinstance(in_channels, tuple)
self.in_channels = in_channels
self.out_channel = out_channel
self.scale_factor = scale_factor
self.start_level = start_level
if end_level == -1:
self.end_level = len(in_channels)
else:
self.end_level = end_level
assert self.end_level <= len(in_channels)
assert (self.start_level >= 0) and (self.start_level < self.end_level)
self.lateral_convs = nn.ModuleList()
self.fpn_convs = nn.ModuleList()
self.fpn_norms = nn.ModuleList()
for i in range(self.start_level, self.end_level):
# disable bias if using layer norm
l_conv = MaskedConv1D(
in_channels[i], out_channel, 1, bias=(not with_ln))
# use depthwise conv here for efficiency
fpn_conv = MaskedConv1D(
out_channel, out_channel, 3,
padding=1, bias=(not with_ln), groups=out_channel
)
# layer norm for order (B C T)
if with_ln:
fpn_norm = LayerNorm(out_channel)
else:
fpn_norm = nn.Identity()
self.lateral_convs.append(l_conv)
self.fpn_convs.append(fpn_conv)
self.fpn_norms.append(fpn_norm)
def forward(self, inputs, fpn_masks):
# inputs must be a list / tuple
assert len(inputs) == len(self.in_channels)
assert len(fpn_masks) == len(self.in_channels)
# build laterals, fpn_masks will remain the same with 1x1 convs
laterals = []
for i in range(len(self.lateral_convs)):
x, _ = self.lateral_convs[i](
inputs[i + self.start_level], fpn_masks[i + self.start_level]
)
laterals.append(x)
# build top-down path
used_backbone_levels = len(laterals)
for i in range(used_backbone_levels - 1, 0, -1):
laterals[i-1] += F.interpolate(
laterals[i],
scale_factor=self.scale_factor,
mode='nearest'
)
# fpn conv / norm -> outputs
# mask will remain the same
fpn_feats = tuple()
for i in range(used_backbone_levels):
x, _ = self.fpn_convs[i](
laterals[i], fpn_masks[i + self.start_level])
x = self.fpn_norms[i](x)
fpn_feats += (x, )
return fpn_feats, fpn_masks
@register_neck('identity')
class FPNIdentity(nn.Module):
def __init__(
self,
in_channels, # input feature channels, len(in_channels) = # levels
out_channel, # output feature channel
scale_factor=2.0, # downsampling rate between two fpn levels
start_level=0, # start fpn level
end_level=-1, # end fpn level
with_ln=True # if to apply layer norm at the end
):
super().__init__()
self.in_channels = in_channels
self.out_channel = out_channel
self.scale_factor = scale_factor
self.start_level = start_level
if end_level == -1:
self.end_level = len(in_channels)
else:
self.end_level = end_level
assert self.end_level <= len(in_channels)
assert (self.start_level >= 0) and (self.start_level < self.end_level)
self.fpn_norms = nn.ModuleList()
for i in range(self.start_level, self.end_level):
# check feat dims
assert self.in_channels[i + self.start_level] == self.out_channel
# layer norm for order (B C T)
if with_ln:
fpn_norm = LayerNorm(out_channel)
else:
fpn_norm = nn.Identity()
self.fpn_norms.append(fpn_norm)
def forward(self, inputs, fpn_masks):
# inputs must be a list / tuple
assert len(inputs) == len(self.in_channels)
assert len(fpn_masks) == len(self.in_channels)
# apply norms, fpn_masks will remain the same with 1x1 convs
fpn_feats = tuple()
for i in range(len(self.fpn_norms)):
x = self.fpn_norms[i](inputs[i + self.start_level])
fpn_feats += (x, )
return fpn_feats, fpn_masks
| InternVideo-main | Downstream/Temporal-Action-Localization/libs/modeling/necks.py |
import torch
from torch.nn import functional as F
@torch.jit.script
def sigmoid_focal_loss(
inputs: torch.Tensor,
targets: torch.Tensor,
alpha: float = 0.25,
gamma: float = 2.0,
reduction: str = "none",
) -> torch.Tensor:
"""
Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002.
Taken from
https://github.com/facebookresearch/fvcore/blob/master/fvcore/nn/focal_loss.py
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
Args:
inputs: A float tensor of arbitrary shape.
The predictions for each example.
targets: A float tensor with the same shape as inputs. Stores the binary
classification label for each element in inputs
(0 for the negative class and 1 for the positive class).
alpha: (optional) Weighting factor in range (0,1) to balance
positive vs negative examples. Default = 0.25 (no weighting).
gamma: Exponent of the modulating factor (1 - p_t) to
balance easy vs hard examples.
reduction: 'none' | 'mean' | 'sum'
'none': No reduction will be applied to the output.
'mean': The output will be averaged.
'sum': The output will be summed.
Returns:
Loss tensor with the reduction option applied.
"""
inputs = inputs.float()
targets = targets.float()
p = torch.sigmoid(inputs)
ce_loss = F.binary_cross_entropy_with_logits(inputs, targets, reduction="none")
p_t = p * targets + (1 - p) * (1 - targets)
loss = ce_loss * ((1 - p_t) ** gamma)
if alpha >= 0:
alpha_t = alpha * targets + (1 - alpha) * (1 - targets)
loss = alpha_t * loss
if reduction == "mean":
loss = loss.mean()
elif reduction == "sum":
loss = loss.sum()
return loss
@torch.jit.script
def ctr_giou_loss_1d(
input_offsets: torch.Tensor,
target_offsets: torch.Tensor,
reduction: str = 'none',
eps: float = 1e-8,
) -> torch.Tensor:
"""
Generalized Intersection over Union Loss (Hamid Rezatofighi et. al)
https://arxiv.org/abs/1902.09630
This is an implementation that assumes a 1D event is represented using
the same center point with different offsets, e.g.,
(t1, t2) = (c - o_1, c + o_2) with o_i >= 0
Reference code from
https://github.com/facebookresearch/fvcore/blob/master/fvcore/nn/giou_loss.py
Args:
input/target_offsets (Tensor): 1D offsets of size (N, 2)
reduction: 'none' | 'mean' | 'sum'
'none': No reduction will be applied to the output.
'mean': The output will be averaged.
'sum': The output will be summed.
eps (float): small number to prevent division by zero
"""
input_offsets = input_offsets.float()
target_offsets = target_offsets.float()
# check all 1D events are valid
assert (input_offsets >= 0.0).all(), "predicted offsets must be non-negative"
assert (target_offsets >= 0.0).all(), "GT offsets must be non-negative"
lp, rp = input_offsets[:, 0], input_offsets[:, 1]
lg, rg = target_offsets[:, 0], target_offsets[:, 1]
# intersection key points
lkis = torch.min(lp, lg)
rkis = torch.min(rp, rg)
# iou
intsctk = rkis + lkis
unionk = (lp + rp) + (lg + rg) - intsctk
iouk = intsctk / unionk.clamp(min=eps)
# giou is reduced to iou in our setting, skip unnecessary steps
loss = 1.0 - iouk
if reduction == "mean":
loss = loss.mean() if loss.numel() > 0 else 0.0 * loss.sum()
elif reduction == "sum":
loss = loss.sum()
return loss
@torch.jit.script
def ctr_diou_loss_1d(
input_offsets: torch.Tensor,
target_offsets: torch.Tensor,
reduction: str = 'none',
eps: float = 1e-8,
) -> torch.Tensor:
"""
Distance-IoU Loss (Zheng et. al)
https://arxiv.org/abs/1911.08287
This is an implementation that assumes a 1D event is represented using
the same center point with different offsets, e.g.,
(t1, t2) = (c - o_1, c + o_2) with o_i >= 0
Reference code from
https://github.com/facebookresearch/fvcore/blob/master/fvcore/nn/giou_loss.py
Args:
input/target_offsets (Tensor): 1D offsets of size (N, 2)
reduction: 'none' | 'mean' | 'sum'
'none': No reduction will be applied to the output.
'mean': The output will be averaged.
'sum': The output will be summed.
eps (float): small number to prevent division by zero
"""
input_offsets = input_offsets.float()
target_offsets = target_offsets.float()
# check all 1D events are valid
assert (input_offsets >= 0.0).all(), "predicted offsets must be non-negative"
assert (target_offsets >= 0.0).all(), "GT offsets must be non-negative"
lp, rp = input_offsets[:, 0], input_offsets[:, 1]
lg, rg = target_offsets[:, 0], target_offsets[:, 1]
# intersection key points
lkis = torch.min(lp, lg)
rkis = torch.min(rp, rg)
# iou
intsctk = rkis + lkis
unionk = (lp + rp) + (lg + rg) - intsctk
iouk = intsctk / unionk.clamp(min=eps)
# smallest enclosing box
lc = torch.max(lp, lg)
rc = torch.max(rp, rg)
len_c = lc + rc
# offset between centers
rho = 0.5 * (rp - lp - rg + lg)
# diou
loss = 1.0 - iouk + torch.square(rho / len_c.clamp(min=eps))
if reduction == "mean":
loss = loss.mean() if loss.numel() > 0 else 0.0 * loss.sum()
elif reduction == "sum":
loss = loss.sum()
return loss
| InternVideo-main | Downstream/Temporal-Action-Localization/libs/modeling/losses.py |
import torch
from torch import nn
from torch.nn import functional as F
from .models import register_generator
class BufferList(nn.Module):
"""
Similar to nn.ParameterList, but for buffers
Taken from https://github.com/facebookresearch/detectron2/blob/master/detectron2/modeling/anchor_generator.py
"""
def __init__(self, buffers):
super().__init__()
for i, buffer in enumerate(buffers):
# Use non-persistent buffer so the values are not saved in checkpoint
self.register_buffer(str(i), buffer, persistent=False)
def __len__(self):
return len(self._buffers)
def __iter__(self):
return iter(self._buffers.values())
@register_generator('point')
class PointGenerator(nn.Module):
"""
A generator for temporal "points"
max_seq_len can be much larger than the actual seq length
"""
def __init__(
self,
max_seq_len, # max sequence length that the generator will buffer
fpn_levels, # number of fpn levels
scale_factor, # scale factor between two fpn levels
regression_range, # regression range (on feature grids)
use_offset=False # if to align the points at grid centers
):
super().__init__()
# sanity check, # fpn levels and length divisible
assert len(regression_range) == fpn_levels
assert max_seq_len % scale_factor**(fpn_levels - 1) == 0
# save params
self.max_seq_len = max_seq_len
self.fpn_levels = fpn_levels
self.scale_factor = scale_factor
self.regression_range = regression_range
self.use_offset = use_offset
# generate all points and buffer the list
self.buffer_points = self._generate_points()
def _generate_points(self):
points_list = []
# loop over all points at each pyramid level
for l in range(self.fpn_levels):
stride = self.scale_factor ** l
reg_range = torch.as_tensor(
self.regression_range[l], dtype=torch.float)
fpn_stride = torch.as_tensor(stride, dtype=torch.float)
points = torch.arange(0, self.max_seq_len, stride)[:, None]
# add offset if necessary (not in our current model)
if self.use_offset:
points += 0.5 * stride
# pad the time stamp with additional regression range / stride
reg_range = reg_range[None].repeat(points.shape[0], 1)
fpn_stride = fpn_stride[None].repeat(points.shape[0], 1)
# size: T x 4 (ts, reg_range, stride)
points_list.append(torch.cat((points, reg_range, fpn_stride), dim=1))
return BufferList(points_list)
def forward(self, feats):
# feats will be a list of torch tensors
assert len(feats) == self.fpn_levels
pts_list = []
feat_lens = [feat.shape[-1] for feat in feats]
for feat_len, buffer_pts in zip(feat_lens, self.buffer_points):
assert feat_len <= buffer_pts.shape[0], "Reached max buffer length for point generator"
pts = buffer_pts[:feat_len, :]
pts_list.append(pts)
return pts_list
| InternVideo-main | Downstream/Temporal-Action-Localization/libs/modeling/loc_generators.py |
import math
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn
from .weight_init import trunc_normal_
class MaskedConv1D(nn.Module):
"""
Masked 1D convolution. Interface remains the same as Conv1d.
Only support a sub set of 1d convs
"""
def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
bias=True,
padding_mode='zeros'
):
super().__init__()
# element must be aligned
assert (kernel_size % 2 == 1) and (kernel_size // 2 == padding)
# stride
self.stride = stride
self.conv = nn.Conv1d(in_channels, out_channels, kernel_size,
stride, padding, dilation, groups, bias, padding_mode)
# zero out the bias term if it exists
if bias:
torch.nn.init.constant_(self.conv.bias, 0.)
def forward(self, x, mask):
# x: batch size, feature channel, sequence length,
# mask: batch size, 1, sequence length (bool)
B, C, T = x.size()
# input length must be divisible by stride
assert T % self.stride == 0
# conv
out_conv = self.conv(x)
# compute the mask
if self.stride > 1:
# downsample the mask using nearest neighbor
out_mask = F.interpolate(
mask.to(x.dtype),
size=T//self.stride,
mode='nearest'
)
else:
# masking out the features
out_mask = mask.to(x.dtype)
# masking the output, stop grad to mask
out_conv = out_conv * out_mask.detach()
out_mask = out_mask.bool()
return out_conv, out_mask
class LayerNorm(nn.Module):
"""
LayerNorm that supports inputs of size B, C, T
"""
def __init__(
self,
num_channels,
eps = 1e-5,
affine = True,
device = None,
dtype = None,
):
super().__init__()
factory_kwargs = {'device': device, 'dtype': dtype}
self.num_channels = num_channels
self.eps = eps
self.affine = affine
if self.affine:
self.weight = nn.Parameter(
torch.ones([1, num_channels, 1], **factory_kwargs))
self.bias = nn.Parameter(
torch.zeros([1, num_channels, 1], **factory_kwargs))
else:
self.register_parameter('weight', None)
self.register_parameter('bias', None)
def forward(self, x):
assert x.dim() == 3
assert x.shape[1] == self.num_channels
# normalization along C channels
mu = torch.mean(x, dim=1, keepdim=True)
res_x = x - mu
sigma = torch.mean(res_x**2, dim=1, keepdim=True)
out = res_x / torch.sqrt(sigma + self.eps)
# apply weight and bias
if self.affine:
out *= self.weight
out += self.bias
return out
# helper functions for Transformer blocks
def get_sinusoid_encoding(n_position, d_hid):
''' Sinusoid position encoding table '''
def get_position_angle_vec(position):
return [position / np.power(10000, 2 * (hid_j // 2) / d_hid) for hid_j in range(d_hid)]
sinusoid_table = np.array([get_position_angle_vec(pos_i) for pos_i in range(n_position)])
sinusoid_table[:, 0::2] = np.sin(sinusoid_table[:, 0::2]) # dim 2i
sinusoid_table[:, 1::2] = np.cos(sinusoid_table[:, 1::2]) # dim 2i+1
# return a tensor of size 1 C T
return torch.FloatTensor(sinusoid_table).unsqueeze(0).transpose(1, 2)
# attention / transformers
class MaskedMHA(nn.Module):
"""
Multi Head Attention with mask
Modified from https://github.com/karpathy/minGPT/blob/master/mingpt/model.py
"""
def __init__(
self,
n_embd, # dimension of the input embedding
n_head, # number of heads in multi-head self-attention
attn_pdrop=0.0, # dropout rate for the attention map
proj_pdrop=0.0 # dropout rate for projection op
):
super().__init__()
assert n_embd % n_head == 0
self.n_embd = n_embd
self.n_head = n_head
self.n_channels = n_embd // n_head
self.scale = 1.0 / math.sqrt(self.n_channels)
# key, query, value projections for all heads
# it is OK to ignore masking, as the mask will be attached on the attention
self.key = nn.Conv1d(self.n_embd, self.n_embd, 1)
self.query = nn.Conv1d(self.n_embd, self.n_embd, 1)
self.value = nn.Conv1d(self.n_embd, self.n_embd, 1)
# regularization
self.attn_drop = nn.Dropout(attn_pdrop)
self.proj_drop = nn.Dropout(proj_pdrop)
# output projection
self.proj = nn.Conv1d(self.n_embd, self.n_embd, 1)
def forward(self, x, mask):
# x: batch size, feature channel, sequence length,
# mask: batch size, 1, sequence length (bool)
B, C, T = x.size()
# calculate query, key, values for all heads in batch
# (B, nh * hs, T)
k = self.key(x)
q = self.query(x)
v = self.value(x)
# move head forward to be the batch dim
# (B, nh * hs, T) -> (B, nh, T, hs)
k = k.view(B, self.n_head, self.n_channels, -1).transpose(2, 3)
q = q.view(B, self.n_head, self.n_channels, -1).transpose(2, 3)
v = v.view(B, self.n_head, self.n_channels, -1).transpose(2, 3)
# self-attention: (B, nh, T, hs) x (B, nh, hs, T) -> (B, nh, T, T)
att = (q * self.scale) @ k.transpose(-2, -1)
# prevent q from attending to invalid tokens
att = att.masked_fill(torch.logical_not(mask[:, :, None, :]), float('-inf'))
# softmax attn
att = F.softmax(att, dim=-1)
att = self.attn_drop(att)
# (B, nh, T, T) x (B, nh, T, hs) -> (B, nh, T, hs)
out = att @ (v * mask[:, :, :, None].to(v.dtype))
# re-assemble all head outputs side by side
out = out.transpose(2, 3).contiguous().view(B, C, -1)
# output projection + skip connection
out = self.proj_drop(self.proj(out)) * mask.to(out.dtype)
return out, mask
class MaskedMHCA(nn.Module):
"""
Multi Head Conv Attention with mask
Add a depthwise convolution within a standard MHA
The extra conv op can be used to
(1) encode relative position information (relacing position encoding);
(2) downsample the features if needed;
(3) match the feature channels
Note: With current implementation, the downsampled feature will be aligned
to every s+1 time step, where s is the downsampling stride. This allows us
to easily interpolate the corresponding positional embeddings.
Modified from https://github.com/karpathy/minGPT/blob/master/mingpt/model.py
"""
def __init__(
self,
n_embd, # dimension of the output features
n_head, # number of heads in multi-head self-attention
n_qx_stride=1, # dowsampling stride for query and input
n_kv_stride=1, # downsampling stride for key and value
attn_pdrop=0.0, # dropout rate for the attention map
proj_pdrop=0.0, # dropout rate for projection op
):
super().__init__()
assert n_embd % n_head == 0
self.n_embd = n_embd
self.n_head = n_head
self.n_channels = n_embd // n_head
self.scale = 1.0 / math.sqrt(self.n_channels)
# conv/pooling operations
assert (n_qx_stride == 1) or (n_qx_stride % 2 == 0)
assert (n_kv_stride == 1) or (n_kv_stride % 2 == 0)
self.n_qx_stride = n_qx_stride
self.n_kv_stride = n_kv_stride
# query conv (depthwise)
kernel_size = self.n_qx_stride + 1 if self.n_qx_stride > 1 else 3
stride, padding = self.n_kv_stride, kernel_size // 2
# 1d depthwise conv
self.query_conv = MaskedConv1D(
self.n_embd, self.n_embd, kernel_size,
stride=stride, padding=padding, groups=self.n_embd, bias=False
)
# layernorm
self.query_norm = LayerNorm(self.n_embd)
# key, value conv (depthwise)
kernel_size = self.n_kv_stride + 1 if self.n_kv_stride > 1 else 3
stride, padding = self.n_kv_stride, kernel_size // 2
# 1d depthwise conv
self.key_conv = MaskedConv1D(
self.n_embd, self.n_embd, kernel_size,
stride=stride, padding=padding, groups=self.n_embd, bias=False
)
self.key_norm = LayerNorm(self.n_embd)
self.value_conv = MaskedConv1D(
self.n_embd, self.n_embd, kernel_size,
stride=stride, padding=padding, groups=self.n_embd, bias=False
)
# layernorm
self.value_norm = LayerNorm(self.n_embd)
# key, query, value projections for all heads
# it is OK to ignore masking, as the mask will be attached on the attention
self.key = nn.Conv1d(self.n_embd, self.n_embd, 1)
self.query = nn.Conv1d(self.n_embd, self.n_embd, 1)
self.value = nn.Conv1d(self.n_embd, self.n_embd, 1)
# regularization
self.attn_drop = nn.Dropout(attn_pdrop)
self.proj_drop = nn.Dropout(proj_pdrop)
# output projection
self.proj = nn.Conv1d(self.n_embd, self.n_embd, 1)
def forward(self, x, mask):
# x: batch size, feature channel, sequence length,
# mask: batch size, 1, sequence length (bool)
B, C, T = x.size()
# query conv -> (B, nh * hs, T')
q, qx_mask = self.query_conv(x, mask)
q = self.query_norm(q)
# key, value conv -> (B, nh * hs, T'')
k, kv_mask = self.key_conv(x, mask)
k = self.key_norm(k)
v, _ = self.value_conv(x, mask)
v = self.value_norm(v)
# projections
q = self.query(q)
k = self.key(k)
v = self.value(v)
# move head forward to be the batch dim
# (B, nh * hs, T'/T'') -> (B, nh, T'/T'', hs)
k = k.view(B, self.n_head, self.n_channels, -1).transpose(2, 3)
q = q.view(B, self.n_head, self.n_channels, -1).transpose(2, 3)
v = v.view(B, self.n_head, self.n_channels, -1).transpose(2, 3)
# self-attention: (B, nh, T', hs) x (B, nh, hs, T'') -> (B, nh, T', T'')
att = (q * self.scale) @ k.transpose(-2, -1)
# prevent q from attending to invalid tokens
att = att.masked_fill(torch.logical_not(kv_mask[:, :, None, :]), float('-inf'))
# softmax attn
att = F.softmax(att, dim=-1)
att = self.attn_drop(att)
# (B, nh, T', T'') x (B, nh, T'', hs) -> (B, nh, T', hs)
out = att @ (v * kv_mask[:, :, :, None].to(v.dtype))
# re-assemble all head outputs side by side
out = out.transpose(2, 3).contiguous().view(B, C, -1)
# output projection + skip connection
out = self.proj_drop(self.proj(out)) * qx_mask.to(out.dtype)
return out, qx_mask
class LocalMaskedMHCA(nn.Module):
"""
Local Multi Head Conv Attention with mask
Add a depthwise convolution within a standard MHA
The extra conv op can be used to
(1) encode relative position information (relacing position encoding);
(2) downsample the features if needed;
(3) match the feature channels
Note: With current implementation, the downsampled feature will be aligned
to every s+1 time step, where s is the downsampling stride. This allows us
to easily interpolate the corresponding positional embeddings.
The implementation is fairly tricky, code reference from
https://github.com/huggingface/transformers/blob/master/src/transformers/models/longformer/modeling_longformer.py
"""
def __init__(
self,
n_embd, # dimension of the output features
n_head, # number of heads in multi-head self-attention
window_size, # size of the local attention window
n_qx_stride=1, # dowsampling stride for query and input
n_kv_stride=1, # downsampling stride for key and value
attn_pdrop=0.0, # dropout rate for the attention map
proj_pdrop=0.0, # dropout rate for projection op
use_rel_pe=False # use relative position encoding
):
super().__init__()
assert n_embd % n_head == 0
self.n_embd = n_embd
self.n_head = n_head
self.n_channels = n_embd // n_head
self.scale = 1.0 / math.sqrt(self.n_channels)
self.window_size = window_size
self.window_overlap = window_size // 2
# must use an odd window size
assert self.window_size > 1 and self.n_head >= 1
self.use_rel_pe = use_rel_pe
# conv/pooling operations
assert (n_qx_stride == 1) or (n_qx_stride % 2 == 0)
assert (n_kv_stride == 1) or (n_kv_stride % 2 == 0)
self.n_qx_stride = n_qx_stride
self.n_kv_stride = n_kv_stride
# query conv (depthwise)
kernel_size = self.n_qx_stride + 1 if self.n_qx_stride > 1 else 3
stride, padding = self.n_kv_stride, kernel_size // 2
# 1d depthwise conv
self.query_conv = MaskedConv1D(
self.n_embd, self.n_embd, kernel_size,
stride=stride, padding=padding, groups=self.n_embd, bias=False
)
# layernorm
self.query_norm = LayerNorm(self.n_embd)
# key, value conv (depthwise)
kernel_size = self.n_kv_stride + 1 if self.n_kv_stride > 1 else 3
stride, padding = self.n_kv_stride, kernel_size // 2
# 1d depthwise conv
self.key_conv = MaskedConv1D(
self.n_embd, self.n_embd, kernel_size,
stride=stride, padding=padding, groups=self.n_embd, bias=False
)
self.key_norm = LayerNorm(self.n_embd)
self.value_conv = MaskedConv1D(
self.n_embd, self.n_embd, kernel_size,
stride=stride, padding=padding, groups=self.n_embd, bias=False
)
# layernorm
self.value_norm = LayerNorm(self.n_embd)
# key, query, value projections for all heads
# it is OK to ignore masking, as the mask will be attached on the attention
self.key = nn.Conv1d(self.n_embd, self.n_embd, 1)
self.query = nn.Conv1d(self.n_embd, self.n_embd, 1)
self.value = nn.Conv1d(self.n_embd, self.n_embd, 1)
# regularization
self.attn_drop = nn.Dropout(attn_pdrop)
self.proj_drop = nn.Dropout(proj_pdrop)
# output projection
self.proj = nn.Conv1d(self.n_embd, self.n_embd, 1)
# relative position encoding
if self.use_rel_pe:
self.rel_pe = nn.Parameter(
torch.zeros(1, 1, self.n_head, self.window_size))
trunc_normal_(self.rel_pe, std=(2.0 / self.n_embd)**0.5)
@staticmethod
def _chunk(x, window_overlap):
"""convert into overlapping chunks. Chunk size = 2w, overlap size = w"""
# x: B x nh, T, hs
# non-overlapping chunks of size = 2w -> B x nh, T//2w, 2w, hs
x = x.view(
x.size(0),
x.size(1) // (window_overlap * 2),
window_overlap * 2,
x.size(2),
)
# use `as_strided` to make the chunks overlap with an overlap size = window_overlap
chunk_size = list(x.size())
chunk_size[1] = chunk_size[1] * 2 - 1
chunk_stride = list(x.stride())
chunk_stride[1] = chunk_stride[1] // 2
# B x nh, #chunks = T//w - 1, 2w, hs
return x.as_strided(size=chunk_size, stride=chunk_stride)
@staticmethod
def _pad_and_transpose_last_two_dims(x, padding):
"""pads rows and then flips rows and columns"""
# padding value is not important because it will be overwritten
x = nn.functional.pad(x, padding)
x = x.view(*x.size()[:-2], x.size(-1), x.size(-2))
return x
@staticmethod
def _mask_invalid_locations(input_tensor, affected_seq_len):
beginning_mask_2d = input_tensor.new_ones(affected_seq_len, affected_seq_len + 1).tril().flip(dims=[0])
beginning_mask = beginning_mask_2d[None, :, None, :]
ending_mask = beginning_mask.flip(dims=(1, 3))
beginning_input = input_tensor[:, :affected_seq_len, :, : affected_seq_len + 1]
beginning_mask = beginning_mask.expand(beginning_input.size())
# `== 1` converts to bool or uint8
beginning_input.masked_fill_(beginning_mask == 1, -float("inf"))
ending_input = input_tensor[:, -affected_seq_len:, :, -(affected_seq_len + 1) :]
ending_mask = ending_mask.expand(ending_input.size())
# `== 1` converts to bool or uint8
ending_input.masked_fill_(ending_mask == 1, -float("inf"))
@staticmethod
def _pad_and_diagonalize(x):
"""
shift every row 1 step right, converting columns into diagonals.
Example::
chunked_hidden_states: [ 0.4983, 2.6918, -0.0071, 1.0492,
-1.8348, 0.7672, 0.2986, 0.0285,
-0.7584, 0.4206, -0.0405, 0.1599,
2.0514, -1.1600, 0.5372, 0.2629 ]
window_overlap = num_rows = 4
(pad & diagonalize) =>
[ 0.4983, 2.6918, -0.0071, 1.0492, 0.0000, 0.0000, 0.0000
0.0000, -1.8348, 0.7672, 0.2986, 0.0285, 0.0000, 0.0000
0.0000, 0.0000, -0.7584, 0.4206, -0.0405, 0.1599, 0.0000
0.0000, 0.0000, 0.0000, 2.0514, -1.1600, 0.5372, 0.2629 ]
"""
total_num_heads, num_chunks, window_overlap, hidden_dim = x.size()
# total_num_heads x num_chunks x window_overlap x (hidden_dim+window_overlap+1).
x = nn.functional.pad(
x, (0, window_overlap + 1)
)
# total_num_heads x num_chunks x window_overlap*window_overlap+window_overlap
x = x.view(total_num_heads, num_chunks, -1)
# total_num_heads x num_chunks x window_overlap*window_overlap
x = x[:, :, :-window_overlap]
x = x.view(
total_num_heads, num_chunks, window_overlap, window_overlap + hidden_dim
)
x = x[:, :, :, :-1]
return x
def _sliding_chunks_query_key_matmul(
self, query, key, num_heads, window_overlap
):
"""
Matrix multiplication of query and key tensors using with a sliding window attention pattern. This implementation splits the input into overlapping chunks of size 2w with an overlap of size w (window_overlap)
"""
# query / key: B*nh, T, hs
bnh, seq_len, head_dim = query.size()
batch_size = bnh // num_heads
assert seq_len % (window_overlap * 2) == 0
assert query.size() == key.size()
chunks_count = seq_len // window_overlap - 1
# B * num_heads, head_dim, #chunks=(T//w - 1), 2w
chunk_query = self._chunk(query, window_overlap)
chunk_key = self._chunk(key, window_overlap)
# matrix multiplication
# bcxd: batch_size * num_heads x chunks x 2window_overlap x head_dim
# bcyd: batch_size * num_heads x chunks x 2window_overlap x head_dim
# bcxy: batch_size * num_heads x chunks x 2window_overlap x 2window_overlap
diagonal_chunked_attention_scores = torch.einsum(
"bcxd,bcyd->bcxy", (chunk_query, chunk_key))
# convert diagonals into columns
# B * num_heads, #chunks, 2w, 2w+1
diagonal_chunked_attention_scores = self._pad_and_transpose_last_two_dims(
diagonal_chunked_attention_scores, padding=(0, 0, 0, 1)
)
# allocate space for the overall attention matrix where the chunks are combined. The last dimension
# has (window_overlap * 2 + 1) columns. The first (window_overlap) columns are the window_overlap lower triangles (attention from a word to
# window_overlap previous words). The following column is attention score from each word to itself, then
# followed by window_overlap columns for the upper triangle.
diagonal_attention_scores = diagonal_chunked_attention_scores.new_empty(
(batch_size * num_heads, chunks_count + 1, window_overlap, window_overlap * 2 + 1)
)
# copy parts from diagonal_chunked_attention_scores into the combined matrix of attentions
# - copying the main diagonal and the upper triangle
diagonal_attention_scores[:, :-1, :, window_overlap:] = diagonal_chunked_attention_scores[
:, :, :window_overlap, : window_overlap + 1
]
diagonal_attention_scores[:, -1, :, window_overlap:] = diagonal_chunked_attention_scores[
:, -1, window_overlap:, : window_overlap + 1
]
# - copying the lower triangle
diagonal_attention_scores[:, 1:, :, :window_overlap] = diagonal_chunked_attention_scores[
:, :, -(window_overlap + 1) : -1, window_overlap + 1 :
]
diagonal_attention_scores[:, 0, 1:window_overlap, 1:window_overlap] = diagonal_chunked_attention_scores[
:, 0, : window_overlap - 1, 1 - window_overlap :
]
# separate batch_size and num_heads dimensions again
diagonal_attention_scores = diagonal_attention_scores.view(
batch_size, num_heads, seq_len, 2 * window_overlap + 1
).transpose(2, 1)
self._mask_invalid_locations(diagonal_attention_scores, window_overlap)
return diagonal_attention_scores
def _sliding_chunks_matmul_attn_probs_value(
self, attn_probs, value, num_heads, window_overlap
):
"""
Same as _sliding_chunks_query_key_matmul but for attn_probs and value tensors. Returned tensor will be of the
same shape as `attn_probs`
"""
bnh, seq_len, head_dim = value.size()
batch_size = bnh // num_heads
assert seq_len % (window_overlap * 2) == 0
assert attn_probs.size(3) == 2 * window_overlap + 1
chunks_count = seq_len // window_overlap - 1
# group batch_size and num_heads dimensions into one, then chunk seq_len into chunks of size 2 window overlap
chunked_attn_probs = attn_probs.transpose(1, 2).reshape(
batch_size * num_heads, seq_len // window_overlap, window_overlap, 2 * window_overlap + 1
)
# pad seq_len with w at the beginning of the sequence and another window overlap at the end
padded_value = nn.functional.pad(value, (0, 0, window_overlap, window_overlap), value=-1)
# chunk padded_value into chunks of size 3 window overlap and an overlap of size window overlap
chunked_value_size = (batch_size * num_heads, chunks_count + 1, 3 * window_overlap, head_dim)
chunked_value_stride = padded_value.stride()
chunked_value_stride = (
chunked_value_stride[0],
window_overlap * chunked_value_stride[1],
chunked_value_stride[1],
chunked_value_stride[2],
)
chunked_value = padded_value.as_strided(size=chunked_value_size, stride=chunked_value_stride)
chunked_attn_probs = self._pad_and_diagonalize(chunked_attn_probs)
context = torch.einsum("bcwd,bcdh->bcwh", (chunked_attn_probs, chunked_value))
return context.view(batch_size, num_heads, seq_len, head_dim)
def forward(self, x, mask):
# x: batch size, feature channel, sequence length,
# mask: batch size, 1, sequence length (bool)
B, C, T = x.size()
# step 1: depth convolutions
# query conv -> (B, nh * hs, T')
q, qx_mask = self.query_conv(x, mask)
q = self.query_norm(q)
# key, value conv -> (B, nh * hs, T'')
k, kv_mask = self.key_conv(x, mask)
k = self.key_norm(k)
v, _ = self.value_conv(x, mask)
v = self.value_norm(v)
# step 2: query, key, value transforms & reshape
# projections
q = self.query(q)
k = self.key(k)
v = self.value(v)
# (B, nh * hs, T) -> (B, nh, T, hs)
q = q.view(B, self.n_head, self.n_channels, -1).transpose(2, 3)
k = k.view(B, self.n_head, self.n_channels, -1).transpose(2, 3)
v = v.view(B, self.n_head, self.n_channels, -1).transpose(2, 3)
# view as (B * nh, T, hs)
q = q.view(B * self.n_head, -1, self.n_channels).contiguous()
k = k.view(B * self.n_head, -1, self.n_channels).contiguous()
v = v.view(B * self.n_head, -1, self.n_channels).contiguous()
# step 3: compute local self-attention with rel pe and masking
q *= self.scale
# chunked query key attention -> B, T, nh, 2w+1 = window_size
att = self._sliding_chunks_query_key_matmul(
q, k, self.n_head, self.window_overlap)
# rel pe
if self.use_rel_pe:
att += self.rel_pe
# kv_mask -> B, T'', 1
inverse_kv_mask = torch.logical_not(
kv_mask[:, :, :, None].view(B, -1, 1))
# 0 for valid slot, -inf for masked ones
float_inverse_kv_mask = inverse_kv_mask.type_as(q).masked_fill(
inverse_kv_mask, -1e4)
# compute the diagonal mask (for each local window)
diagonal_mask = self._sliding_chunks_query_key_matmul(
float_inverse_kv_mask.new_ones(size=float_inverse_kv_mask.size()),
float_inverse_kv_mask,
1,
self.window_overlap
)
att += diagonal_mask
# ignore input masking for now
att = nn.functional.softmax(att, dim=-1)
# softmax sometimes inserts NaN if all positions are masked, replace them with 0
att = att.masked_fill(
torch.logical_not(kv_mask.squeeze(1)[:, :, None, None]), 0.0)
att = self.attn_drop(att)
# step 4: compute attention value product + output projection
# chunked attn value product -> B, nh, T, hs
out = self._sliding_chunks_matmul_attn_probs_value(
att, v, self.n_head, self.window_overlap)
# transpose to B, nh, hs, T -> B, nh*hs, T
out = out.transpose(2, 3).contiguous().view(B, C, -1)
# output projection + skip connection
out = self.proj_drop(self.proj(out)) * qx_mask.to(out.dtype)
return out, qx_mask
class TransformerBlock(nn.Module):
"""
A simple (post layer norm) Transformer block
Modified from https://github.com/karpathy/minGPT/blob/master/mingpt/model.py
"""
def __init__(
self,
n_embd, # dimension of the input features
n_head, # number of attention heads
n_ds_strides=(1, 1), # downsampling strides for q & x, k & v
n_out=None, # output dimension, if None, set to input dim
n_hidden=None, # dimension of the hidden layer in MLP
act_layer=nn.GELU, # nonlinear activation used in MLP, default GELU
attn_pdrop=0.0, # dropout rate for the attention map
proj_pdrop=0.0, # dropout rate for the projection / MLP
path_pdrop=0.0, # drop path rate
mha_win_size=-1, # > 0 to use window mha
use_rel_pe=False # if to add rel position encoding to attention
):
super().__init__()
assert len(n_ds_strides) == 2
# layer norm for order (B C T)
self.ln1 = LayerNorm(n_embd)
self.ln2 = LayerNorm(n_embd)
# specify the attention module
if mha_win_size > 1:
self.attn = LocalMaskedMHCA(
n_embd,
n_head,
window_size=mha_win_size,
n_qx_stride=n_ds_strides[0],
n_kv_stride=n_ds_strides[1],
attn_pdrop=attn_pdrop,
proj_pdrop=proj_pdrop,
use_rel_pe=use_rel_pe # only valid for local attention
)
else:
self.attn = MaskedMHCA(
n_embd,
n_head,
n_qx_stride=n_ds_strides[0],
n_kv_stride=n_ds_strides[1],
attn_pdrop=attn_pdrop,
proj_pdrop=proj_pdrop
)
# input
if n_ds_strides[0] > 1:
kernel_size, stride, padding = \
n_ds_strides[0] + 1, n_ds_strides[0], (n_ds_strides[0] + 1)//2
self.pool_skip = nn.MaxPool1d(
kernel_size, stride=stride, padding=padding)
else:
self.pool_skip = nn.Identity()
# two layer mlp
if n_hidden is None:
n_hidden = 4 * n_embd # default
if n_out is None:
n_out = n_embd
# ok to use conv1d here with stride=1
self.mlp = nn.Sequential(
nn.Conv1d(n_embd, n_hidden, 1),
act_layer(),
nn.Dropout(proj_pdrop, inplace=True),
nn.Conv1d(n_hidden, n_out, 1),
nn.Dropout(proj_pdrop, inplace=True),
)
# drop path
if path_pdrop > 0.0:
self.drop_path_attn = AffineDropPath(n_embd, drop_prob = path_pdrop)
self.drop_path_mlp = AffineDropPath(n_out, drop_prob = path_pdrop)
else:
self.drop_path_attn = nn.Identity()
self.drop_path_mlp = nn.Identity()
def forward(self, x, mask, pos_embd=None):
# pre-LN transformer: https://arxiv.org/pdf/2002.04745.pdf
out, out_mask = self.attn(self.ln1(x), mask)
out_mask_float = out_mask.to(out.dtype)
out = self.pool_skip(x) * out_mask_float + self.drop_path_attn(out)
# FFN
out = out + self.drop_path_mlp(self.mlp(self.ln2(out)) * out_mask_float)
# optionally add pos_embd to the output
if pos_embd is not None:
out += pos_embd * out_mask_float
return out, out_mask
class ConvBlock(nn.Module):
"""
A simple conv block similar to the basic block used in ResNet
"""
def __init__(
self,
n_embd, # dimension of the input features
kernel_size=3, # conv kernel size
n_ds_stride=1, # downsampling stride for the current layer
expansion_factor=2, # expansion factor of feat dims
n_out=None, # output dimension, if None, set to input dim
act_layer=nn.ReLU, # nonlinear activation used after conv, default ReLU
):
super().__init__()
# must use odd sized kernel
assert (kernel_size % 2 == 1) and (kernel_size > 1)
padding = kernel_size // 2
if n_out is None:
n_out = n_embd
# 1x3 (strided) -> 1x3 (basic block in resnet)
width = n_embd * expansion_factor
self.conv1 = MaskedConv1D(
n_embd, width, kernel_size, n_ds_stride, padding=padding)
self.conv2 = MaskedConv1D(
width, n_out, kernel_size, 1, padding=padding)
# attach downsampling conv op
if n_ds_stride > 1:
# 1x1 strided conv (same as resnet)
self.downsample = MaskedConv1D(n_embd, n_out, 1, n_ds_stride)
else:
self.downsample = None
self.act = act_layer()
def forward(self, x, mask, pos_embd=None):
identity = x
out, out_mask = self.conv1(x, mask)
out = self.act(out)
out, out_mask = self.conv2(out, out_mask)
# downsampling
if self.downsample is not None:
identity, _ = self.downsample(x, mask)
# residual connection
out += identity
out = self.act(out)
return out, out_mask
# drop path: from https://github.com/facebookresearch/SlowFast/blob/master/slowfast/models/common.py
class Scale(nn.Module):
"""
Multiply the output regression range by a learnable constant value
"""
def __init__(self, init_value=1.0):
"""
init_value : initial value for the scalar
"""
super().__init__()
self.scale = nn.Parameter(
torch.tensor(init_value, dtype=torch.float32),
requires_grad=True
)
def forward(self, x):
"""
input -> scale * input
"""
return x * self.scale
# The follow code is modified from
# https://github.com/facebookresearch/SlowFast/blob/master/slowfast/models/common.py
def drop_path(x, drop_prob=0.0, training=False):
"""
Stochastic Depth per sample.
"""
if drop_prob == 0.0 or not training:
return x
keep_prob = 1 - drop_prob
shape = (x.shape[0],) + (1,) * (
x.ndim - 1
) # work with diff dim tensors, not just 2D ConvNets
mask = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device)
mask.floor_() # binarize
output = x.div(keep_prob) * mask
return output
class DropPath(nn.Module):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""
def __init__(self, drop_prob=None):
super(DropPath, self).__init__()
self.drop_prob = drop_prob
def forward(self, x):
return drop_path(x, self.drop_prob, self.training)
class AffineDropPath(nn.Module):
"""
Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks) with a per channel scaling factor (and zero init)
See: https://arxiv.org/pdf/2103.17239.pdf
"""
def __init__(self, num_dim, drop_prob=0.0, init_scale_value=1e-4):
super().__init__()
self.scale = nn.Parameter(
init_scale_value * torch.ones((1, num_dim, 1)),
requires_grad=True
)
self.drop_prob = drop_prob
def forward(self, x):
return drop_path(self.scale * x, self.drop_prob, self.training)
| InternVideo-main | Downstream/Temporal-Action-Localization/libs/modeling/blocks.py |
# from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/layers/weight_init.py
import torch
import math
import warnings
def _no_grad_trunc_normal_(tensor, mean, std, a, b):
# Cut & paste from PyTorch official master until it's in a few official releases - RW
# Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
def norm_cdf(x):
# Computes standard normal cumulative distribution function
return (1. + math.erf(x / math.sqrt(2.))) / 2.
if (mean < a - 2 * std) or (mean > b + 2 * std):
warnings.warn("mean is more than 2 std from [a, b] in nn.init.trunc_normal_. "
"The distribution of values may be incorrect.",
stacklevel=2)
with torch.no_grad():
# Values are generated by using a truncated uniform distribution and
# then using the inverse CDF for the normal distribution.
# Get upper and lower cdf values
l = norm_cdf((a - mean) / std)
u = norm_cdf((b - mean) / std)
# Uniformly fill tensor with values from [l, u], then translate to
# [2l-1, 2u-1].
tensor.uniform_(2 * l - 1, 2 * u - 1)
# Use inverse cdf transform for normal distribution to get truncated
# standard normal
tensor.erfinv_()
# Transform to proper mean, std
tensor.mul_(std * math.sqrt(2.))
tensor.add_(mean)
# Clamp to ensure it's in the proper range
tensor.clamp_(min=a, max=b)
return tensor
def trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.):
# type: (Tensor, float, float, float, float) -> Tensor
r"""Fills the input Tensor with values drawn from a truncated
normal distribution. The values are effectively drawn from the
normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)`
with values outside :math:`[a, b]` redrawn until they are within
the bounds. The method used for generating the random values works
best when :math:`a \leq \text{mean} \leq b`.
Args:
tensor: an n-dimensional `torch.Tensor`
mean: the mean of the normal distribution
std: the standard deviation of the normal distribution
a: the minimum cutoff value
b: the maximum cutoff value
Examples:
>>> w = torch.empty(3, 5)
>>> nn.init.trunc_normal_(w)
"""
return _no_grad_trunc_normal_(tensor, mean, std, a, b)
| InternVideo-main | Downstream/Temporal-Action-Localization/libs/modeling/weight_init.py |
# --------------------------------------------------------
# Based on BEiT, timm, DINO and DeiT code bases
# https://github.com/microsoft/unilm/tree/master/beit
# https://github.com/rwightman/pytorch-image-models/tree/master/timm
# https://github.com/facebookresearch/deit
# https://github.com/facebookresearch/dino
# --------------------------------------------------------'
import math
import os
import sys
from typing import Iterable, Optional
import numpy as np
import torch
from scipy.special import softmax
from timm.data import Mixup
from timm.utils import ModelEma, accuracy
import utils
def train_class_batch(model, samples, target, criterion):
outputs = model(samples)
loss = criterion(outputs, target)
return loss, outputs
def get_loss_scale_for_deepspeed(model):
optimizer = model.optimizer
return optimizer.loss_scale if hasattr(
optimizer, "loss_scale") else optimizer.cur_scale
def train_one_epoch(model: torch.nn.Module,
criterion: torch.nn.Module,
data_loader: Iterable,
optimizer: torch.optim.Optimizer,
device: torch.device,
epoch: int,
loss_scaler,
args,
max_norm: float = 0,
model_ema: Optional[ModelEma] = None,
mixup_fn: Optional[Mixup] = None,
log_writer=None,
start_steps=None,
lr_schedule_values=None,
wd_schedule_values=None,
num_training_steps_per_epoch=None,
update_freq=None):
model.train(True)
metric_logger = utils.MetricLogger(delimiter=" ")
metric_logger.add_meter(
'lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}'))
metric_logger.add_meter(
'min_lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}'))
header = 'Epoch: [{}]'.format(epoch)
print_freq = 10
if loss_scaler is None:
model.zero_grad()
model.micro_steps = 0
else:
optimizer.zero_grad()
for data_iter_step, (samples, targets, _, _) in enumerate(
metric_logger.log_every(data_loader, print_freq, header)):
step = data_iter_step // update_freq
if step >= num_training_steps_per_epoch:
continue
it = start_steps + step # global training iteration
# Update LR & WD for the first acc
if lr_schedule_values is not None or wd_schedule_values is not None and data_iter_step % update_freq == 0:
for i, param_group in enumerate(optimizer.param_groups):
if lr_schedule_values is not None:
param_group["lr"] = lr_schedule_values[it] * param_group[
"lr_scale"]
if wd_schedule_values is not None and param_group[
"weight_decay"] > 0:
param_group["weight_decay"] = wd_schedule_values[it]
samples = samples.to(device, non_blocking=True)
targets = targets.to(device, non_blocking=True)
if args.num_segment > 1:
samples = samples.view((-1, ) + samples.size()[2:])
if mixup_fn is not None:
B, C, T, H, W = samples.shape
samples = samples.view(B, C * T, H, W)
samples, targets = mixup_fn(samples, targets)
samples = samples.view(B, C, T, H, W)
if loss_scaler is None:
samples = samples.half()
loss, output = train_class_batch(model, samples, targets,
criterion)
else:
with torch.cuda.amp.autocast():
loss, output = train_class_batch(model, samples, targets,
criterion)
loss_value = loss.item()
if not math.isfinite(loss_value):
print("Loss is {}, stopping training".format(loss_value))
sys.exit(1)
if loss_scaler is None:
loss /= update_freq
model.backward(loss)
grad_norm = model.get_global_grad_norm()
# if utils.is_main_process():
# import json
# all_grad = {}
# for name, param in model.module.named_parameters():
# print(name)
# if hasattr(param, 'grad') and param.grad is not None:
# all_grad[name] = torch.norm(param.grad.detach(), 2).item()
# with open('pfs_work_dir/grad_test/temp.json', 'w') as f:
# json.dump(all_grad, f)
# torch.distributed.barrier()
# sys.exit(1)
model.step()
if (data_iter_step + 1) % update_freq == 0:
# model.zero_grad()
# Deepspeed will call step() & model.zero_grad() automatic
if model_ema is not None:
model_ema.update(model)
loss_scale_value = get_loss_scale_for_deepspeed(model)
else:
# this attribute is added by timm on one optimizer (adahessian)
is_second_order = hasattr(
optimizer, 'is_second_order') and optimizer.is_second_order
loss /= update_freq
grad_norm = loss_scaler(loss,
optimizer,
clip_grad=max_norm,
parameters=model.parameters(),
create_graph=is_second_order,
update_grad=(data_iter_step + 1) %
update_freq == 0)
if (data_iter_step + 1) % update_freq == 0:
optimizer.zero_grad()
if model_ema is not None:
model_ema.update(model)
loss_scale_value = loss_scaler.state_dict()["scale"]
torch.cuda.synchronize()
if mixup_fn is None:
class_acc = (output.max(-1)[-1] == targets).float().mean()
else:
class_acc = None
metric_logger.update(loss=loss_value)
metric_logger.update(class_acc=class_acc)
metric_logger.update(loss_scale=loss_scale_value)
min_lr = 10.
max_lr = 0.
for group in optimizer.param_groups:
min_lr = min(min_lr, group["lr"])
max_lr = max(max_lr, group["lr"])
metric_logger.update(lr=max_lr)
metric_logger.update(min_lr=min_lr)
weight_decay_value = None
for group in optimizer.param_groups:
if group["weight_decay"] > 0:
weight_decay_value = group["weight_decay"]
metric_logger.update(weight_decay=weight_decay_value)
metric_logger.update(grad_norm=grad_norm)
if log_writer is not None:
log_writer.update(loss=loss_value, head="loss")
log_writer.update(class_acc=class_acc, head="loss")
log_writer.update(loss_scale=loss_scale_value, head="opt")
log_writer.update(lr=max_lr, head="opt")
log_writer.update(min_lr=min_lr, head="opt")
log_writer.update(weight_decay=weight_decay_value, head="opt")
log_writer.update(grad_norm=grad_norm, head="opt")
log_writer.set_step()
# gather the stats from all processes
metric_logger.synchronize_between_processes()
print("Averaged stats:", metric_logger)
return {k: meter.global_avg for k, meter in metric_logger.meters.items()}
@torch.no_grad()
def validation_one_epoch(data_loader, model, device):
criterion = torch.nn.CrossEntropyLoss()
metric_logger = utils.MetricLogger(delimiter=" ")
header = 'Val:'
# switch to evaluation mode
model.eval()
for batch in metric_logger.log_every(data_loader, 10, header):
images = batch[0]
target = batch[1]
images = images.to(device, non_blocking=True)
target = target.to(device, non_blocking=True)
# compute output
with torch.cuda.amp.autocast():
output = model(images)
loss = criterion(output, target)
acc1, acc5 = accuracy(output, target, topk=(1, 5))
batch_size = images.shape[0]
metric_logger.update(loss=loss.item())
metric_logger.meters['acc1'].update(acc1.item(), n=batch_size)
metric_logger.meters['acc5'].update(acc5.item(), n=batch_size)
# gather the stats from all processes
metric_logger.synchronize_between_processes()
print(
'* Acc@1 {top1.global_avg:.3f} Acc@5 {top5.global_avg:.3f} loss {losses.global_avg:.3f}'
.format(top1=metric_logger.acc1,
top5=metric_logger.acc5,
losses=metric_logger.loss))
return {k: meter.global_avg for k, meter in metric_logger.meters.items()}
@torch.no_grad()
def final_test(data_loader, model, device, file):
criterion = torch.nn.CrossEntropyLoss()
metric_logger = utils.MetricLogger(delimiter=" ")
header = 'Test:'
# switch to evaluation mode
model.eval()
final_result = []
for batch in metric_logger.log_every(data_loader, 10, header):
images = batch[0]
target = batch[1]
ids = batch[2]
chunk_nb = batch[3]
split_nb = batch[4]
images = images.to(device, non_blocking=True)
target = target.to(device, non_blocking=True)
# compute output
with torch.cuda.amp.autocast():
output = model(images)
loss = criterion(output, target)
for i in range(output.size(0)):
string = "{} {} {} {} {}\n".format(ids[i], \
str(output.data[i].cpu().numpy().tolist()), \
str(int(target[i].cpu().numpy())), \
str(int(chunk_nb[i].cpu().numpy())), \
str(int(split_nb[i].cpu().numpy())))
final_result.append(string)
acc1, acc5 = accuracy(output, target, topk=(1, 5))
batch_size = images.shape[0]
metric_logger.update(loss=loss.item())
metric_logger.meters['acc1'].update(acc1.item(), n=batch_size)
metric_logger.meters['acc5'].update(acc5.item(), n=batch_size)
if not os.path.exists(file):
os.mknod(file)
with open(file, 'w') as f:
f.write("{}, {}\n".format(acc1, acc5))
for line in final_result:
f.write(line)
# gather the stats from all processes
metric_logger.synchronize_between_processes()
print(
'* Acc@1 {top1.global_avg:.3f} Acc@5 {top5.global_avg:.3f} loss {losses.global_avg:.3f}'
.format(top1=metric_logger.acc1,
top5=metric_logger.acc5,
losses=metric_logger.loss))
return {k: meter.global_avg for k, meter in metric_logger.meters.items()}
def merge(eval_path, num_tasks, method='prob'):
assert method in ['prob', 'score']
dict_feats = {}
dict_label = {}
dict_pos = {}
print("Reading individual output files")
for x in range(num_tasks):
file = os.path.join(eval_path, str(x) + '.txt')
lines = open(file, 'r').readlines()[1:]
for line in lines:
line = line.strip()
name = line.split('[')[0]
label = line.split(']')[1].split(' ')[1]
chunk_nb = line.split(']')[1].split(' ')[2]
split_nb = line.split(']')[1].split(' ')[3]
data = np.fromstring(line.split('[')[1].split(']')[0],
dtype=np.float,
sep=',')
if not name in dict_feats:
dict_feats[name] = []
dict_label[name] = 0
dict_pos[name] = []
if chunk_nb + split_nb in dict_pos[name]:
continue
if method == 'prob':
dict_feats[name].append(softmax(data))
else:
dict_feats[name].append(data)
dict_pos[name].append(chunk_nb + split_nb)
dict_label[name] = label
print("Computing final results")
input_lst = []
print(len(dict_feats))
for i, item in enumerate(dict_feats):
input_lst.append([i, item, dict_feats[item], dict_label[item]])
from multiprocessing import Pool
p = Pool(64)
ans = p.map(compute_video, input_lst)
top1 = [x[1] for x in ans]
top5 = [x[2] for x in ans]
pred = [x[0] for x in ans]
label = [x[3] for x in ans]
final_top1, final_top5 = np.mean(top1), np.mean(top5)
# print(final_top1*100 ,final_top5*100)
return final_top1 * 100, final_top5 * 100
def compute_video(lst):
i, video_id, data, label = lst
feat = [x for x in data]
feat = np.mean(feat, axis=0)
pred = np.argmax(feat)
top1 = (int(pred) == int(label)) * 1.0
top5 = (int(label) in np.argsort(-feat)[:5]) * 1.0
return [pred, top1, top5, int(label)]
| InternVideo-main | Pretrain/VideoMAE/engine_for_finetuning.py |
# --------------------------------------------------------
# Based on BEiT, timm, DINO and DeiT code bases
# https://github.com/microsoft/unilm/tree/master/beit
# https://github.com/rwightman/pytorch-image-models/tree/master/timm
# https://github.com/facebookresearch/deit
# https://github.com/facebookresearch/dino
# --------------------------------------------------------'
import math
import random
import numpy as np
import torch
from einops import rearrange
def topk(matrix, K, axis=1):
if axis == 0:
row_index = np.arange(matrix.shape[1 - axis])
topk_index = np.argpartition(-matrix, K, axis=axis)[0:K, :]
topk_data = matrix[topk_index, row_index]
topk_index_sort = np.argsort(-topk_data, axis=axis)
topk_data_sort = topk_data[topk_index_sort, row_index]
topk_index_sort = topk_index[0:K, :][topk_index_sort, row_index]
else:
column_index = np.arange(matrix.shape[1 - axis])[:, None]
topk_index = np.argpartition(-matrix, K, axis=axis)[:, 0:K]
topk_data = matrix[column_index, topk_index]
topk_index_sort = np.argsort(-topk_data, axis=axis)
topk_data_sort = topk_data[column_index, topk_index_sort]
topk_index_sort = topk_index[:, 0:K][column_index, topk_index_sort]
return (topk_data_sort, topk_index_sort)
class MaskingGenerator:
def update_state(self, epoch):
pass
class RandomMaskingGenerator(MaskingGenerator):
def __init__(self, input_size, mask_ratio):
if not isinstance(input_size, tuple):
input_size = (input_size, ) * 3
self.frames, self.height, self.width = input_size
self.num_patches = self.frames * self.height * self.width # 8x14x14
self.num_mask = int(mask_ratio * self.num_patches)
def __repr__(self):
repr_str = "Mask: total patches {}, mask patches {}".format(
self.num_patches, self.num_mask)
return repr_str
def __call__(self):
mask = np.hstack([
np.zeros(self.num_patches - self.num_mask),
np.ones(self.num_mask),
])
np.random.shuffle(mask)
return mask # [196*8]
class Cell():
def __init__(self, num_masks, num_patches):
self.num_masks = num_masks
self.num_patches = num_patches
self.size = num_masks + num_patches
self.queue = np.hstack([np.ones(num_masks), np.zeros(num_patches)])
self.queue_ptr = 0
def set_ptr(self, pos=-1):
self.queue_ptr = np.random.randint(self.size) if pos < 0 else pos
def get_cell(self):
cell_idx = (np.arange(self.size) + self.queue_ptr) % self.size
return self.queue[cell_idx]
def run_cell(self):
self.queue_ptr += 1
class CellRunningMaskingGenerator(MaskingGenerator):
def __init__(self, input_size, mask_ratio=0.5, is_train=True):
self.frames, self.height, self.width = input_size
self.mask_ratio = mask_ratio
self.ptr_pos = -1 if is_train else 0
num_masks_per_cell = int(4 * self.mask_ratio)
assert 0 < num_masks_per_cell < 4
num_patches_per_cell = 4 - num_masks_per_cell
self.cell = Cell(num_masks_per_cell, num_patches_per_cell)
self.cell_size = self.cell.size
mask_list = []
for ptr_pos in range(self.cell_size):
self.cell.set_ptr(ptr_pos)
mask = []
for _ in range(self.frames):
self.cell.run_cell()
mask_unit = self.cell.get_cell().reshape(2, 2)
mask_map = np.tile(mask_unit,
[self.height // 2, self.width // 2])
mask.append(mask_map)
mask = np.stack(mask, axis=0).flatten()
mask_list.append(mask)
self.all_mask_maps = np.stack(mask_list, axis=0)
def __repr__(self):
repr_str = f"Cell Running Mask with mask ratio {self.mask_ratio}"
return repr_str
def __call__(self, batch_size):
mask_idx_list = np.random.randint(self.cell_size, size=(batch_size))
return torch.as_tensor(self.all_mask_maps[mask_idx_list])
class RandomDecodeMaskingGenerator(MaskingGenerator):
def __init__(self, input_size, mask_ratio=0.5):
self.frame, self.height, self.width = input_size
self.mask_ratio = mask_ratio
self.num_patches = self.frame * self.height * self.width # 8x14x14
self.num_mask = int(mask_ratio * self.num_patches)
def __repr__(self):
repr_str = "Mask: total patches {}, mask patches {}".format(
self.num_patches, self.num_mask)
return repr_str
def __call__(self, batch_size):
rand = torch.as_tensor(np.random.randn(batch_size, self.num_patches))
mask_idx = torch.topk(rand, self.num_mask, dim=-1,
sorted=False).indices
mask = torch.zeros(batch_size,
self.num_patches).scatter_(-1, mask_idx, 1)
return mask
class TemporalConsistencyMaskingGenerator(MaskingGenerator):
def __init__(self, input_size, mask_ratio):
self.frames, self.height, self.width = input_size
self.num_patches_per_frame = self.height * self.width # 14x14
self.total_patches = self.frames * self.num_patches_per_frame
self.num_masks_per_frame = int(mask_ratio * self.num_patches_per_frame)
self.total_masks = self.frames * self.num_masks_per_frame
def __repr__(self):
repr_str = "Mask: total patches {}, mask patches {}".format(
self.total_patches, self.total_masks)
return repr_str
def __call__(self):
mask_per_frame = np.hstack([
np.zeros(self.num_patches_per_frame - self.num_masks_per_frame),
np.ones(self.num_masks_per_frame),
])
np.random.shuffle(mask_per_frame)
mask = np.tile(mask_per_frame, (self.frames, 1)).flatten()
return mask # [196*8]
class TemporalProgressiveMaskingGenerator(MaskingGenerator):
def __init__(self, input_size, mask_ratio):
self.frames, self.height, self.width = input_size
self.num_patches_per_frame = self.height * self.width # 14x14
self.total_patches = self.frames * self.num_patches_per_frame # 8x14x14
max_keep_patch = int(
(1 - mask_ratio) * self.num_patches_per_frame) # 1 - 0.75 = 0.25
min_keep_patch = int(0.05 * self.num_patches_per_frame)
self.keep_patches_list = np.linspace(max_keep_patch, min_keep_patch,
self.frames).astype(int)
self.total_masks = self.total_patches - self.keep_patches_list.sum()
def __repr__(self):
repr_str = "Mask: total patches {}, mask patches {}".format(
self.total_patches, self.total_masks)
return repr_str
def __call__(self):
rand = np.random.randn(1, self.num_patches_per_frame)
mask = np.zeros((self.frames, self.num_patches_per_frame),
dtype=np.bool)
for i in range(self.frames):
top_k, _ = topk(rand, self.keep_patches_list[i])
the_topk = top_k[0][-1]
mask[i] = rand <= the_topk
mask = mask.flatten().astype(int)
return mask # [196*8]
class TemporalCenteringProgressiveMaskingGenerator(MaskingGenerator):
def __init__(self, input_size, mask_ratio):
self.num_frames, self.height, self.width = input_size
self.num_patches_per_frame = self.height * self.width # 14x14
self.total_patches = self.num_frames * self.num_patches_per_frame # 8x14x14
min_mask_ratio = mask_ratio # 0.9 -> keep 19 token
# 0.979 -> keep 4 token 0.95 -> keep 9 token
max_mask_ratio = 0.95
max_keep_patch = int(
(1 - min_mask_ratio) * self.num_patches_per_frame) # 1 - 0.9 = 0.1
min_keep_patch = int((1 - max_mask_ratio) *
self.num_patches_per_frame) # 1 - 0.95 = 0.05
patches_list = np.linspace(max_keep_patch, min_keep_patch,
self.num_frames // 2).astype(int).tolist()
self.keep_patches_list = patches_list.copy()
patches_list.reverse()
self.keep_patches_list = patches_list + self.keep_patches_list
self.total_masks = self.total_patches - sum(self.keep_patches_list)
def __repr__(self):
repr_str = "Mask: total patches {}, mask patches {}".format(
self.total_patches, self.total_masks)
return repr_str
def __call__(self):
rand = np.random.randn(1, self.num_patches_per_frame)
mask = np.zeros((self.num_frames, self.num_patches_per_frame),
dtype=np.bool)
for i in range(self.num_frames):
top_k, _ = topk(rand, self.keep_patches_list[i])
the_topk = top_k[0][-1]
mask[i] = rand <= the_topk
mask = mask.flatten().astype(int)
return mask # [196*8]
| InternVideo-main | Pretrain/VideoMAE/masking_generator.py |
# -*- coding: utf-8 -*-
import argparse
# --------------------------------------------------------
# Based on BEiT, timm, DINO and DeiT code bases
# https://github.com/microsoft/unilm/tree/master/beit
# https://github.com/rwightman/pytorch-image-models/tree/master/timm
# https://github.com/facebookresearch/deit
# https://github.com/facebookresearch/dino
# --------------------------------------------------------'
import os
from pathlib import Path
# import datetime
import numpy as np
# import time
import torch
import torch.backends.cudnn as cudnn
from decord import VideoReader, cpu
from einops import rearrange
from petrel_client.client import Client
from PIL import Image
from timm.data import create_transform
from timm.data.constants import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
IMAGENET_INCEPTION_MEAN,
IMAGENET_INCEPTION_STD,
)
from timm.models import create_model
from torchvision import datasets, transforms
from torchvision.transforms import ToPILImage
import modeling_pretrain
import utils
from datasets import DataAugmentationForMAE
from kinetics import VideoClsDataset
from mae import VideoMAE
from masking_generator import (
RandomMaskingGenerator,
TemporalCenteringProgressiveMaskingGenerator,
TemporalConsistencyMaskingGenerator,
TemporalProgressiveMaskingGenerator,
)
from transforms import *
class DataAugmentationForMAE(object):
def __init__(self, args):
imagenet_default_mean_and_std = args.imagenet_default_mean_and_std
mean = IMAGENET_INCEPTION_MEAN if not imagenet_default_mean_and_std else IMAGENET_DEFAULT_MEAN
std = IMAGENET_INCEPTION_STD if not imagenet_default_mean_and_std else IMAGENET_DEFAULT_STD
self.input_mean = [0.485, 0.456, 0.406]
self.input_std = [0.229, 0.224, 0.225]
div = True
roll = False
normalize = GroupNormalize(self.input_mean, self.input_std)
self.train_augmentation = GroupCenterCrop(args.input_size)
# self.train_augmentation = GroupMultiScaleCrop(args.input_size, [1, .875, .75, .66])
self.transform = transforms.Compose([
# GroupScale((240,320)),
self.train_augmentation,
Stack(roll=roll),
ToTorchFormatTensor(div=div),
normalize,
])
if args.mask_type == 'random':
self.masked_position_generator = RandomMaskingGenerator(
args.window_size, args.mask_ratio)
elif args.mask_type == 't_consist':
self.masked_position_generator = TemporalConsistencyMaskingGenerator(
args.window_size, args.mask_ratio)
def __call__(self, images):
process_data, _ = self.transform(images)
return process_data, self.masked_position_generator()
def __repr__(self):
repr = "(DataAugmentationForBEiT,\n"
repr += " transform = %s,\n" % str(self.transform)
repr += " Masked position generator = %s,\n" % str(
self.masked_position_generator)
repr += ")"
return repr
def get_args():
parser = argparse.ArgumentParser('MAE visualization reconstruction script',
add_help=False)
parser.add_argument('img_path', type=str, help='input image path')
parser.add_argument('save_path', type=str, help='save image path')
parser.add_argument('model_path',
type=str,
help='checkpoint path of model')
parser.add_argument(
'--mask_type',
default='random',
choices=['random', 't_consist', 't_progressive', 't_center_prog'],
type=str,
help='masked strategy of visual tokens/patches')
parser.add_argument('--num_frames', type=int, default=16)
parser.add_argument('--sampling_rate', type=int, default=4)
parser.add_argument('--decoder_depth',
default=4,
type=int,
help='depth of decoder')
parser.add_argument('--input_size',
default=224,
type=int,
help='images input size for backbone')
parser.add_argument('--device',
default='cuda:0',
help='device to use for training / testing')
parser.add_argument('--imagenet_default_mean_and_std',
default=True,
action='store_true')
parser.add_argument(
'--mask_ratio',
default=0.75,
type=float,
help='ratio of the visual tokens/patches need be masked')
# Model parameters
parser.add_argument('--model',
default='pretrain_mae_base_patch16_224',
type=str,
metavar='MODEL',
help='Name of model to vis')
parser.add_argument('--drop_path',
type=float,
default=0.0,
metavar='PCT',
help='Drop path rate (default: 0.1)')
return parser.parse_args()
def get_model(args):
print(f"Creating model: {args.model}")
model = create_model(args.model,
pretrained=False,
drop_path_rate=args.drop_path,
drop_block_rate=None,
decoder_depth=args.decoder_depth)
return model
def main(args):
print(args)
device = torch.device(args.device)
cudnn.benchmark = True
model = get_model(args)
patch_size = model.encoder.patch_embed.patch_size
print("Patch size = %s" % str(patch_size))
args.window_size = (args.num_frames // 2, args.input_size // patch_size[0],
args.input_size // patch_size[1])
args.patch_size = patch_size
model.to(device)
checkpoint = torch.load(args.model_path, map_location='cpu')
model.load_state_dict(checkpoint['model'])
model.eval()
if args.save_path:
Path(args.save_path).mkdir(parents=True, exist_ok=True)
tmp = np.arange(0, 32, 2) + 60
frame_id_list = tmp.tolist()
if args.img_path.startswith("s3:"):
client = Client()
video_bytes = client.get(args.img_path)
vr = VideoReader(memoryview(video_bytes), mc=True, ctx=cpu(0))
else:
with open(args.img_path, 'rb') as f:
vr = VideoReader(f, ctx=cpu(0))
duration = len(vr)
new_length = 1
new_step = 1
skip_length = new_length * new_step
video_data = vr.get_batch(frame_id_list).asnumpy()
print(video_data.shape)
img = [
Image.fromarray(video_data[vid, :, :, :]).convert('RGB')
for vid, _ in enumerate(frame_id_list)
]
transforms = DataAugmentationForMAE(args)
img, bool_masked_pos = transforms((img, None)) # T*C,H,W
# print(img.shape)
img = img.view((args.num_frames, 3) + img.size()[-2:]).transpose(
0, 1) # T*C,H,W -> T,C,H,W -> C,T,H,W
# img = img.view(( -1 , args.num_frames) + img.size()[-2:])
bool_masked_pos = torch.from_numpy(bool_masked_pos)
with torch.no_grad():
# img = img[None, :]
# bool_masked_pos = bool_masked_pos[None, :]
img = img.unsqueeze(0)
print(img.shape)
bool_masked_pos = bool_masked_pos.unsqueeze(0)
img = img.to(device, non_blocking=True)
bool_masked_pos = bool_masked_pos.to(
device, non_blocking=True).flatten(1).to(torch.bool)
outputs = model(img, bool_masked_pos)
#save original img
mean = torch.as_tensor(IMAGENET_DEFAULT_MEAN).to(device)[None, :, None,
None, None]
std = torch.as_tensor(IMAGENET_DEFAULT_STD).to(device)[None, :, None,
None, None]
# unnorm_images = images * std + mean # in [0, 1]
print(img.shape)
ori_img = img * std + mean # in [0, 1]
imgs = [
ToPILImage()(ori_img[0, :, vid, :, :].cpu())
for vid, _ in enumerate(frame_id_list)
]
for id, im in enumerate(imgs):
im.save(f"{args.save_path}/ori_img{id}.jpg")
img_squeeze = rearrange(
ori_img,
'b c (t p0) (h p1) (w p2) -> b (t h w) (p0 p1 p2) c',
p0=2,
p1=patch_size[0],
p2=patch_size[0])
img_norm = (img_squeeze - img_squeeze.mean(dim=-2, keepdim=True)) / (
img_squeeze.var(dim=-2, unbiased=True, keepdim=True).sqrt() + 1e-6)
img_patch = rearrange(img_norm, 'b n p c -> b n (p c)')
img_patch[bool_masked_pos] = outputs
#make mask
mask = torch.ones_like(img_patch)
mask[bool_masked_pos] = 0
mask = rearrange(mask, 'b n (p c) -> b n p c', c=3)
mask = rearrange(mask,
'b (t h w) (p0 p1 p2) c -> b c (t p0) (h p1) (w p2) ',
p0=2,
p1=patch_size[0],
p2=patch_size[1],
h=14,
w=14)
#save reconstruction img
rec_img = rearrange(img_patch, 'b n (p c) -> b n p c', c=3)
# Notice: To visualize the reconstruction image, we add the predict and the original mean and var of each patch. Issue #40
rec_img = rec_img * (
img_squeeze.var(dim=-2, unbiased=True, keepdim=True).sqrt() +
1e-6) + img_squeeze.mean(dim=-2, keepdim=True)
rec_img = rearrange(
rec_img,
'b (t h w) (p0 p1 p2) c -> b c (t p0) (h p1) (w p2)',
p0=2,
p1=patch_size[0],
p2=patch_size[1],
h=14,
w=14)
imgs = [
ToPILImage()(rec_img[0, :, vid, :, :].cpu().clamp(0, 0.996))
for vid, _ in enumerate(frame_id_list)
]
# imgs = [ ToPILImage()(rec_img[0, :, vid, :, :].cpu().clip(0,0.996)) for vid, _ in enumerate(frame_id_list) ]
for id, im in enumerate(imgs):
im.save(f"{args.save_path}/rec_img{id}.jpg")
#save random mask img
img_mask = rec_img * mask
imgs = [
ToPILImage()(img_mask[0, :, vid, :, :].cpu())
for vid, _ in enumerate(frame_id_list)
]
for id, im in enumerate(imgs):
im.save(f"{args.save_path}/mask_img{id}.jpg")
if __name__ == '__main__':
opts = get_args()
main(opts)
| InternVideo-main | Pretrain/VideoMAE/run_mae_vis.py |
# --------------------------------------------------------
# Based on BEiT, timm, DINO and DeiT code bases
# https://github.com/microsoft/unilm/tree/master/beit
# https://github.com/rwightman/pytorch-image-models/tree/master/timm
# https://github.com/facebookresearch/deit
# https://github.com/facebookresearch/dino
# --------------------------------------------------------'
import math
import numbers
import random
import warnings
import numpy as np
import torch
import torchvision
import torchvision.transforms.functional as F
from PIL import Image, ImageOps
class ToNumpy:
def __call__(self, pil_img):
np_img = np.array(pil_img, dtype=np.uint8)
if np_img.ndim < 3:
np_img = np.expand_dims(np_img, axis=-1)
np_img = np.rollaxis(np_img, 2) # HWC to CHW
return np_img
class ToTensor:
def __init__(self, dtype=torch.float32):
self.dtype = dtype
def __call__(self, pil_img):
np_img = np.array(pil_img, dtype=np.uint8)
if np_img.ndim < 3:
np_img = np.expand_dims(np_img, axis=-1)
np_img = np.rollaxis(np_img, 2) # HWC to CHW
return torch.from_numpy(np_img).to(dtype=self.dtype)
_pil_interpolation_to_str = {
Image.NEAREST: 'PIL.Image.NEAREST',
Image.BILINEAR: 'PIL.Image.BILINEAR',
Image.BICUBIC: 'PIL.Image.BICUBIC',
Image.LANCZOS: 'PIL.Image.LANCZOS',
Image.HAMMING: 'PIL.Image.HAMMING',
Image.BOX: 'PIL.Image.BOX',
}
def _pil_interp(method):
if method == 'bicubic':
return Image.BICUBIC
elif method == 'lanczos':
return Image.LANCZOS
elif method == 'hamming':
return Image.HAMMING
else:
# default bilinear, do we want to allow nearest?
return Image.BILINEAR
_RANDOM_INTERPOLATION = (Image.BILINEAR, Image.BICUBIC)
class RandomResizedCropAndInterpolationWithTwoPic:
"""Crop the given PIL Image to random size and aspect ratio with random interpolation.
A crop of random size (default: of 0.08 to 1.0) of the original size and a random
aspect ratio (default: of 3/4 to 4/3) of the original aspect ratio is made. This crop
is finally resized to given size.
This is popularly used to train the Inception networks.
Args:
size: expected output size of each edge
scale: range of size of the origin size cropped
ratio: range of aspect ratio of the origin aspect ratio cropped
interpolation: Default: PIL.Image.BILINEAR
"""
def __init__(self, size, second_size=None, scale=(0.08, 1.0), ratio=(3. / 4., 4. / 3.),
interpolation='bilinear', second_interpolation='lanczos'):
if isinstance(size, tuple):
self.size = size
else:
self.size = (size, size)
if second_size is not None:
if isinstance(second_size, tuple):
self.second_size = second_size
else:
self.second_size = (second_size, second_size)
else:
self.second_size = None
if (scale[0] > scale[1]) or (ratio[0] > ratio[1]):
warnings.warn("range should be of kind (min, max)")
if interpolation == 'random':
self.interpolation = _RANDOM_INTERPOLATION
else:
self.interpolation = _pil_interp(interpolation)
self.second_interpolation = _pil_interp(second_interpolation)
self.scale = scale
self.ratio = ratio
@staticmethod
def get_params(img, scale, ratio):
"""Get parameters for ``crop`` for a random sized crop.
Args:
img (PIL Image): Image to be cropped.
scale (tuple): range of size of the origin size cropped
ratio (tuple): range of aspect ratio of the origin aspect ratio cropped
Returns:
tuple: params (i, j, h, w) to be passed to ``crop`` for a random
sized crop.
"""
area = img.size[0] * img.size[1]
for attempt in range(10):
target_area = random.uniform(*scale) * area
log_ratio = (math.log(ratio[0]), math.log(ratio[1]))
aspect_ratio = math.exp(random.uniform(*log_ratio))
w = int(round(math.sqrt(target_area * aspect_ratio)))
h = int(round(math.sqrt(target_area / aspect_ratio)))
if w <= img.size[0] and h <= img.size[1]:
i = random.randint(0, img.size[1] - h)
j = random.randint(0, img.size[0] - w)
return i, j, h, w
# Fallback to central crop
in_ratio = img.size[0] / img.size[1]
if in_ratio < min(ratio):
w = img.size[0]
h = int(round(w / min(ratio)))
elif in_ratio > max(ratio):
h = img.size[1]
w = int(round(h * max(ratio)))
else: # whole image
w = img.size[0]
h = img.size[1]
i = (img.size[1] - h) // 2
j = (img.size[0] - w) // 2
return i, j, h, w
def __call__(self, img):
"""
Args:
img (PIL Image): Image to be cropped and resized.
Returns:
PIL Image: Randomly cropped and resized image.
"""
i, j, h, w = self.get_params(img, self.scale, self.ratio)
if isinstance(self.interpolation, (tuple, list)):
interpolation = random.choice(self.interpolation)
else:
interpolation = self.interpolation
if self.second_size is None:
return F.resized_crop(img, i, j, h, w, self.size, interpolation)
else:
return F.resized_crop(img, i, j, h, w, self.size, interpolation), \
F.resized_crop(img, i, j, h, w, self.second_size, self.second_interpolation)
def __repr__(self):
if isinstance(self.interpolation, (tuple, list)):
interpolate_str = ' '.join([_pil_interpolation_to_str[x] for x in self.interpolation])
else:
interpolate_str = _pil_interpolation_to_str[self.interpolation]
format_string = self.__class__.__name__ + '(size={0}'.format(self.size)
format_string += ', scale={0}'.format(tuple(round(s, 4) for s in self.scale))
format_string += ', ratio={0}'.format(tuple(round(r, 4) for r in self.ratio))
format_string += ', interpolation={0}'.format(interpolate_str)
if self.second_size is not None:
format_string += ', second_size={0}'.format(self.second_size)
format_string += ', second_interpolation={0}'.format(_pil_interpolation_to_str[self.second_interpolation])
format_string += ')'
return format_string
class GroupRandomCrop(object):
def __init__(self, size):
if isinstance(size, numbers.Number):
self.size = (int(size), int(size))
else:
self.size = size
def __call__(self, img_tuple):
img_group, label = img_tuple
w, h = img_group[0].size
th, tw = self.size
out_images = list()
x1 = random.randint(0, w - tw)
y1 = random.randint(0, h - th)
for img in img_group:
assert(img.size[0] == w and img.size[1] == h)
if w == tw and h == th:
out_images.append(img)
else:
out_images.append(img.crop((x1, y1, x1 + tw, y1 + th)))
return (out_images, label)
class GroupCenterCrop(object):
def __init__(self, size):
self.worker = torchvision.transforms.CenterCrop(size)
def __call__(self, img_tuple):
img_group, label = img_tuple
return ([self.worker(img) for img in img_group], label)
class GroupRandomHorizontalFlip(object):
"""Randomly horizontally flips the given PIL.Image with a probability of 0.5
"""
def __init__(self, selective_flip=True, is_flow=False):
self.is_flow = is_flow
self.class_LeftRight = [86,87,93,94,166,167] if selective_flip else []
def __call__(self, img_tuple, is_flow=False):
img_group, label = img_tuple
v = random.random()
if (label not in self.class_LeftRight) and v < 0.5:
ret = [img.transpose(Image.FLIP_LEFT_RIGHT) for img in img_group]
if self.is_flow:
for i in range(0, len(ret), 2):
ret[i] = ImageOps.invert(ret[i]) # invert flow pixel values when flipping
return (ret, label)
else:
return img_tuple
class GroupNormalize(object):
def __init__(self, mean, std):
self.mean = mean
self.std = std
def __call__(self, tensor_tuple):
tensor, label = tensor_tuple
rep_mean = self.mean * (tensor.size()[0]//len(self.mean))
rep_std = self.std * (tensor.size()[0]//len(self.std))
# TODO: make efficient
for t, m, s in zip(tensor, rep_mean, rep_std):
t.sub_(m).div_(s)
return (tensor,label)
class GroupGrayScale(object):
def __init__(self, size):
self.worker = torchvision.transforms.Grayscale(size)
def __call__(self, img_tuple):
img_group, label = img_tuple
return ([self.worker(img) for img in img_group], label)
class GroupScale(object):
""" Rescales the input PIL.Image to the given 'size'.
'size' will be the size of the smaller edge.
For example, if height > width, then image will be
rescaled to (size * height / width, size)
size: size of the smaller edge
interpolation: Default: PIL.Image.BILINEAR
"""
def __init__(self, size, interpolation=Image.BILINEAR):
self.worker = torchvision.transforms.Resize(size, interpolation)
def __call__(self, img_tuple):
img_group, label = img_tuple
return ([self.worker(img) for img in img_group], label)
class GroupOverSample(object):
def __init__(self, crop_size, scale_size=None):
self.crop_size = crop_size if not isinstance(crop_size, int) else (crop_size, crop_size)
if scale_size is not None:
self.scale_worker = GroupScale(scale_size)
else:
self.scale_worker = None
def __call__(self, img_tuple):
if self.scale_worker is not None:
img_tuple = self.scale_worker(img_tuple)
img_group, label = img_tuple
image_w, image_h = img_group[0].size
crop_w, crop_h = self.crop_size
offsets = GroupMultiScaleCrop.fill_fix_offset(False, image_w, image_h, crop_w, crop_h)
oversample_group = list()
for o_w, o_h in offsets:
normal_group = list()
flip_group = list()
for i, img in enumerate(img_group):
crop = img.crop((o_w, o_h, o_w + crop_w, o_h + crop_h))
normal_group.append(crop)
flip_crop = crop.copy().transpose(Image.FLIP_LEFT_RIGHT)
if img.mode == 'L' and i % 2 == 0:
flip_group.append(ImageOps.invert(flip_crop))
else:
flip_group.append(flip_crop)
oversample_group.extend(normal_group)
oversample_group.extend(flip_group)
return (oversample_group, label)
class GroupFullResSample(object):
def __init__(self, crop_size, scale_size=None, flip=True):
self.crop_size = crop_size if not isinstance(crop_size, int) else (crop_size, crop_size)
if scale_size is not None:
self.scale_worker = GroupScale(scale_size)
else:
self.scale_worker = None
self.flip = flip
def __call__(self, img_tuple):
if self.scale_worker is not None:
img_tuple = self.scale_worker(img_tuple)
img_group, label = img_tuple
image_w, image_h = img_group[0].size
crop_w, crop_h = self.crop_size
w_step = (image_w - crop_w) // 4
h_step = (image_h - crop_h) // 4
offsets = list()
offsets.append((0 * w_step, 2 * h_step)) # left
offsets.append((4 * w_step, 2 * h_step)) # right
offsets.append((2 * w_step, 2 * h_step)) # center
oversample_group = list()
for o_w, o_h in offsets:
normal_group = list()
flip_group = list()
for i, img in enumerate(img_group):
crop = img.crop((o_w, o_h, o_w + crop_w, o_h + crop_h))
normal_group.append(crop)
if self.flip:
flip_crop = crop.copy().transpose(Image.FLIP_LEFT_RIGHT)
if img.mode == 'L' and i % 2 == 0:
flip_group.append(ImageOps.invert(flip_crop))
else:
flip_group.append(flip_crop)
oversample_group.extend(normal_group)
oversample_group.extend(flip_group)
return (oversample_group, label)
class GroupMultiScaleCrop(object):
def __init__(self, input_size, scales=None, max_distort=1, fix_crop=True, more_fix_crop=True):
self.scales = scales if scales is not None else [1, 875, .75, .66]
self.max_distort = max_distort
self.fix_crop = fix_crop
self.more_fix_crop = more_fix_crop
self.input_size = input_size if not isinstance(input_size, int) else [input_size, input_size]
self.interpolation = Image.BILINEAR
def __call__(self, img_tuple):
img_group, label = img_tuple
im_size = img_group[0].size
crop_w, crop_h, offset_w, offset_h = self._sample_crop_size(im_size)
crop_img_group = [img.crop((offset_w, offset_h, offset_w + crop_w, offset_h + crop_h)) for img in img_group]
ret_img_group = [img.resize((self.input_size[0], self.input_size[1]), self.interpolation) for img in crop_img_group]
return (ret_img_group, label)
def _sample_crop_size(self, im_size):
image_w, image_h = im_size[0], im_size[1]
# find a crop size
base_size = min(image_w, image_h)
crop_sizes = [int(base_size * x) for x in self.scales]
crop_h = [self.input_size[1] if abs(x - self.input_size[1]) < 3 else x for x in crop_sizes]
crop_w = [self.input_size[0] if abs(x - self.input_size[0]) < 3 else x for x in crop_sizes]
pairs = []
for i, h in enumerate(crop_h):
for j, w in enumerate(crop_w):
if abs(i - j) <= self.max_distort:
pairs.append((w, h))
crop_pair = random.choice(pairs)
if not self.fix_crop:
w_offset = random.randint(0, image_w - crop_pair[0])
h_offset = random.randint(0, image_h - crop_pair[1])
else:
w_offset, h_offset = self._sample_fix_offset(image_w, image_h, crop_pair[0], crop_pair[1])
return crop_pair[0], crop_pair[1], w_offset, h_offset
def _sample_fix_offset(self, image_w, image_h, crop_w, crop_h):
offsets = self.fill_fix_offset(self.more_fix_crop, image_w, image_h, crop_w, crop_h)
return random.choice(offsets)
@staticmethod
def fill_fix_offset(more_fix_crop, image_w, image_h, crop_w, crop_h):
w_step = (image_w - crop_w) // 4
h_step = (image_h - crop_h) // 4
ret = list()
ret.append((0, 0)) # upper left
ret.append((4 * w_step, 0)) # upper right
ret.append((0, 4 * h_step)) # lower left
ret.append((4 * w_step, 4 * h_step)) # lower right
ret.append((2 * w_step, 2 * h_step)) # center
if more_fix_crop:
ret.append((0, 2 * h_step)) # center left
ret.append((4 * w_step, 2 * h_step)) # center right
ret.append((2 * w_step, 4 * h_step)) # lower center
ret.append((2 * w_step, 0 * h_step)) # upper center
ret.append((1 * w_step, 1 * h_step)) # upper left quarter
ret.append((3 * w_step, 1 * h_step)) # upper right quarter
ret.append((1 * w_step, 3 * h_step)) # lower left quarter
ret.append((3 * w_step, 3 * h_step)) # lower righ quarter
return ret
class GroupRandomSizedCrop(object):
"""Random crop the given PIL.Image to a random size of (0.08 to 1.0) of the original size
and and a random aspect ratio of 3/4 to 4/3 of the original aspect ratio
This is popularly used to train the Inception networks
size: size of the smaller edge
interpolation: Default: PIL.Image.BILINEAR
"""
def __init__(self, size, interpolation=Image.BILINEAR):
self.size = size
self.interpolation = interpolation
def __call__(self, img_tuple):
img_group, label = img_tuple
for attempt in range(10):
area = img_group[0].size[0] * img_group[0].size[1]
target_area = random.uniform(0.08, 1.0) * area
aspect_ratio = random.uniform(3. / 4, 4. / 3)
w = int(round(math.sqrt(target_area * aspect_ratio)))
h = int(round(math.sqrt(target_area / aspect_ratio)))
if random.random() < 0.5:
w, h = h, w
if w <= img_group[0].size[0] and h <= img_group[0].size[1]:
x1 = random.randint(0, img_group[0].size[0] - w)
y1 = random.randint(0, img_group[0].size[1] - h)
found = True
break
else:
found = False
x1 = 0
y1 = 0
if found:
out_group = list()
for img in img_group:
img = img.crop((x1, y1, x1 + w, y1 + h))
assert(img.size == (w, h))
out_group.append(img.resize((self.size, self.size), self.interpolation))
return out_group
else:
# Fallback
scale = GroupScale(self.size, interpolation=self.interpolation)
crop = GroupRandomCrop(self.size)
return crop(scale(img_group))
class Stack(object):
def __init__(self, roll=False):
self.roll = roll
def __call__(self, img_tuple):
img_group, label = img_tuple
if img_group[0].mode == 'L':
return (np.concatenate([np.expand_dims(x, 2) for x in img_group], axis=2), label)
elif img_group[0].mode == 'RGB':
if self.roll:
return (np.concatenate([np.array(x)[:, :, ::-1] for x in img_group], axis=2), label)
else:
return (np.concatenate(img_group, axis=2), label)
class ToTorchFormatTensor(object):
""" Converts a PIL.Image (RGB) or numpy.ndarray (H x W x C) in the range [0, 255]
to a torch.FloatTensor of shape (C x H x W) in the range [0.0, 1.0] """
def __init__(self, div=True):
self.div = div
def __call__(self, pic_tuple):
pic, label = pic_tuple
if isinstance(pic, np.ndarray):
# handle numpy array
img = torch.from_numpy(pic).permute(2, 0, 1).contiguous()
else:
# handle PIL Image
img = torch.ByteTensor(torch.ByteStorage.from_buffer(pic.tobytes()))
img = img.view(pic.size[1], pic.size[0], len(pic.mode))
# put it from HWC to CHW format
# yikes, this transpose takes 80% of the loading time/CPU
img = img.transpose(0, 1).transpose(0, 2).contiguous()
return (img.float().div(255.) if self.div else img.float(), label)
class IdentityTransform(object):
def __call__(self, data):
return data
# if __name__ == "__main__":
# trans = torchvision.transforms.Compose([
# GroupScale(256),
# GroupRandomCrop(224),
# Stack(),
# ToTorchFormatTensor(),
# GroupNormalize(
# mean=[.485, .456, .406],
# std=[.229, .224, .225]
# )]
# )
# im = Image.open('../tensorflow-model-zoo.torch/lena_299.png')
# color_group = [im] * 3
# rst = trans(color_group)
# gray_group = [im.convert('L')] * 9
# gray_rst = trans(gray_group)
# trans2 = torchvision.transforms.Compose([
# GroupRandomSizedCrop(256),
# Stack(),
# ToTorchFormatTensor(),
# GroupNormalize(
# mean=[.485, .456, .406],
# std=[.229, .224, .225])
# ])
# print(trans2(color_group)) | InternVideo-main | Pretrain/VideoMAE/transforms.py |
# --------------------------------------------------------
# Based on BEiT, timm, DINO and DeiT code bases
# https://github.com/microsoft/unilm/tree/master/beit
# https://github.com/rwightman/pytorch-image-models/tree/master/timm
# https://github.com/facebookresearch/deit
# https://github.com/facebookresearch/dino
# --------------------------------------------------------'
import math
import sys
from typing import Iterable
import torch
import torch.nn as nn
from einops import rearrange
from timm.data.constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
import utils
def train_one_epoch(model: torch.nn.Module,
data_loader: Iterable,
optimizer: torch.optim.Optimizer,
device: torch.device,
epoch: int,
loss_scaler,
max_norm: float = 0,
patch_size: int = 16,
normlize_target: bool = True,
log_writer=None,
lr_scheduler=None,
start_steps=None,
lr_schedule_values=None,
wd_schedule_values=None):
model.train()
metric_logger = utils.MetricLogger(delimiter=" ")
metric_logger.add_meter(
'lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}'))
metric_logger.add_meter(
'min_lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}'))
header = 'Epoch: [{}]'.format(epoch)
print_freq = 10
loss_func = nn.MSELoss()
data_loader.dataset.transform.masked_position_generator.update_state(epoch)
for step, batch in enumerate(
metric_logger.log_every(data_loader, print_freq, header)):
# assign learning rate & weight decay for each step
it = start_steps + step # global training iteration
if lr_schedule_values is not None or wd_schedule_values is not None:
for i, param_group in enumerate(optimizer.param_groups):
if lr_schedule_values is not None:
param_group[
"lr"] = lr_schedule_values[it] * param_group["lr_scale"]
if wd_schedule_values is not None and param_group[
"weight_decay"] > 0:
param_group["weight_decay"] = wd_schedule_values[it]
images, bool_masked_pos = batch
images = images.to(device, non_blocking=True)
bool_masked_pos = bool_masked_pos.to(
device, non_blocking=True).flatten(1).to(torch.bool)
# print(images.shape)
# import pdb; pdb.set_trace()
with torch.no_grad():
# calculate the predict label
mean = torch.as_tensor(IMAGENET_DEFAULT_MEAN).to(device)[None, :,
None, None,
None]
std = torch.as_tensor(IMAGENET_DEFAULT_STD).to(device)[None, :,
None, None,
None]
unnorm_images = images * std + mean # in [0, 1]
if normlize_target:
images_squeeze = rearrange(
unnorm_images,
'b c (t p0) (h p1) (w p2) -> b (t h w) (p0 p1 p2) c',
p0=2,
p1=patch_size,
p2=patch_size)
images_norm = (images_squeeze - images_squeeze.mean(
dim=-2, keepdim=True)) / (
images_squeeze.var(dim=-2, unbiased=True,
keepdim=True).sqrt() + 1e-6)
# we find that the mean is about 0.48 and standard deviation is about 0.08.
images_patch = rearrange(images_norm, 'b n p c -> b n (p c)')
else:
images_patch = rearrange(
unnorm_images,
'b c (t p0) (h p1) (w p2) -> b (t h w) (p0 p1 p2 c)',
p0=2,
p1=patch_size,
p2=patch_size)
B, N, C = images_patch.shape
labels = images_patch[bool_masked_pos].reshape(B, -1, C)
if loss_scaler is None:
outputs = model(images, bool_masked_pos)
loss = loss_func(input=outputs, target=labels)
else:
with torch.cuda.amp.autocast():
outputs = model(images, bool_masked_pos)
loss = loss_func(input=outputs, target=labels)
loss_value = loss.item()
if not math.isfinite(loss_value):
print("Loss is {}, stopping training".format(loss_value))
sys.exit(2)
optimizer.zero_grad()
if loss_scaler is None:
loss.backward()
if max_norm is None:
grad_norm = utils.get_grad_norm_(model.parameters())
else:
grad_norm = torch.nn.utils.clip_grad_norm_(
model.parameters(), max_norm)
optimizer.step()
loss_scale_value = 0
else:
# this attribute is added by timm on one optimizer (adahessian)
is_second_order = hasattr(
optimizer, 'is_second_order') and optimizer.is_second_order
grad_norm = loss_scaler(
loss,
optimizer,
clip_grad=max_norm,
parameters=model.parameters(),
create_graph=is_second_order)
loss_scale_value = loss_scaler.state_dict()["scale"]
torch.cuda.synchronize()
metric_logger.update(loss=loss_value)
metric_logger.update(loss_scale=loss_scale_value)
min_lr = 10.
max_lr = 0.
for group in optimizer.param_groups:
min_lr = min(min_lr, group["lr"])
max_lr = max(max_lr, group["lr"])
metric_logger.update(lr=max_lr)
metric_logger.update(min_lr=min_lr)
weight_decay_value = None
for group in optimizer.param_groups:
if group["weight_decay"] > 0:
weight_decay_value = group["weight_decay"]
metric_logger.update(weight_decay=weight_decay_value)
metric_logger.update(grad_norm=grad_norm)
if log_writer is not None:
log_writer.update(loss=loss_value, head="loss")
log_writer.update(loss_scale=loss_scale_value, head="opt")
log_writer.update(lr=max_lr, head="opt")
log_writer.update(min_lr=min_lr, head="opt")
log_writer.update(weight_decay=weight_decay_value, head="opt")
log_writer.update(grad_norm=grad_norm, head="opt")
log_writer.set_step()
if lr_scheduler is not None:
lr_scheduler.step_update(start_steps + step)
# gather the stats from all processes
metric_logger.synchronize_between_processes()
print("Averaged stats:", metric_logger)
return {k: meter.global_avg for k, meter in metric_logger.meters.items()}
| InternVideo-main | Pretrain/VideoMAE/engine_for_pretraining.py |
# --------------------------------------------------------
# Based on BEiT, timm, DINO and DeiT code bases
# https://github.com/microsoft/unilm/tree/master/beit
# https://github.com/rwightman/pytorch-image-models/tree/master/timm
# https://github.com/facebookresearch/deit
# https://github.com/facebookresearch/dino
# --------------------------------------------------------'
import math
from functools import partial
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.checkpoint as cp
from timm.models.layers import trunc_normal_ as __call_trunc_normal_
from timm.models.registry import register_model
from modeling_finetune import Block, PatchEmbed, _cfg, get_sinusoid_encoding_table
def trunc_normal_(tensor, mean=0., std=1.):
__call_trunc_normal_(tensor, mean=mean, std=std, a=-std, b=std)
__all__ = [
'pretrain_mae_base_patch16_224',
'pretrain_mae_large_patch16_224',
'pretrain_mae_huge_patch16_224',
'pretrain_mae_giant_patch14_224',
'pretrain_mae_gigantic_patch14_224',
]
class PretrainVisionTransformerEncoder(nn.Module):
""" Vision Transformer with support for patch or hybrid CNN input stage
"""
def __init__(self,
img_size=224,
patch_size=16,
in_chans=3,
num_classes=0,
embed_dim=768,
depth=12,
num_heads=12,
mlp_ratio=4.,
qkv_bias=False,
qk_scale=None,
drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.,
norm_layer=nn.LayerNorm,
init_values=None,
tubelet_size=2,
use_learnable_pos_emb=False,
with_cp=False):
super().__init__()
self.num_classes = num_classes
self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models
self.patch_embed = PatchEmbed(
img_size=img_size,
patch_size=patch_size,
in_chans=in_chans,
embed_dim=embed_dim,
tubelet_size=tubelet_size)
num_patches = self.patch_embed.num_patches
self.with_cp = with_cp
# TODO: Add the cls token
# self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
if use_learnable_pos_emb:
self.pos_embed = nn.Parameter(
torch.zeros(1, num_patches + 1, embed_dim))
else:
# sine-cosine positional embeddings
self.pos_embed = get_sinusoid_encoding_table(num_patches, embed_dim)
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)
] # stochastic depth decay rule
self.blocks = nn.ModuleList([
Block(
dim=embed_dim,
num_heads=num_heads,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
drop=drop_rate,
attn_drop=attn_drop_rate,
drop_path=dpr[i],
norm_layer=norm_layer,
init_values=init_values) for i in range(depth)
])
self.norm = norm_layer(embed_dim)
self.head = nn.Linear(
embed_dim, num_classes) if num_classes > 0 else nn.Identity()
if use_learnable_pos_emb:
trunc_normal_(self.pos_embed, std=.02)
# trunc_normal_(self.cls_token, std=.02)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
nn.init.xavier_uniform_(m.weight)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def get_num_layers(self):
return len(self.blocks)
@torch.jit.ignore
def no_weight_decay(self):
return {'pos_embed', 'cls_token'}
def get_classifier(self):
return self.head
def reset_classifier(self, num_classes, global_pool=''):
self.num_classes = num_classes
self.head = nn.Linear(
self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()
def forward_features(self, x, mask):
_, _, T, _, _ = x.shape
x = self.patch_embed(x)
# cls_tokens = self.cls_token.expand(batch_size, -1, -1)
# x = torch.cat((cls_tokens, x), dim=1)
x = x + self.pos_embed.type_as(x).to(x.device).clone().detach()
B, _, C = x.shape
x_vis = x[~mask].reshape(B, -1, C) # ~mask means visible
for blk in self.blocks:
if self.with_cp:
x_vis = cp.checkpoint(blk, x_vis)
else:
x_vis = blk(x_vis)
x_vis = self.norm(x_vis)
return x_vis
def forward_check_variance(self, x, mask):
_, _, T, _, _ = x.shape
x = self.patch_embed(x)
# cls_tokens = self.cls_token.expand(batch_size, -1, -1)
# x = torch.cat((cls_tokens, x), dim=1)
x = x + self.pos_embed.type_as(x).to(x.device).clone().detach()
B, _, C = x.shape
x_vis = x[~mask].reshape(B, -1, C) # ~mask means visible
# x [B, N, C]
avg_var_list = []
for blk in self.blocks:
x_vis = blk(x_vis)
avg_var = torch.mean(torch.var(x_vis, dim=-1))
avg_var_list.append(avg_var)
for i, avg_var in enumerate(avg_var_list):
print(f'avg variance of block {i}: {avg_var}', flush=True)
x_vis = self.norm(x_vis)
return x_vis
def forward(self, x, mask):
# x = self.forward_check_variance(x, mask)
x = self.forward_features(x, mask)
x = self.head(x)
return x
class PretrainVisionTransformerDecoder(nn.Module):
""" Vision Transformer with support for patch or hybrid CNN input stage
"""
def __init__(self,
patch_size=16,
num_classes=768,
embed_dim=768,
depth=12,
num_heads=12,
mlp_ratio=4.,
qkv_bias=False,
qk_scale=None,
drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.,
norm_layer=nn.LayerNorm,
init_values=None,
num_patches=196,
tubelet_size=2,
with_cp=False,
with_fp16=True):
super().__init__()
self.num_classes = num_classes
assert num_classes == 3 * tubelet_size * patch_size**2
self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models
self.patch_size = patch_size
self.with_cp = with_cp
self.with_fp16 = with_fp16
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)
] # stochastic depth decay rule
self.blocks = nn.ModuleList([
Block(
dim=embed_dim,
num_heads=num_heads,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
drop=drop_rate,
attn_drop=attn_drop_rate,
drop_path=dpr[i],
norm_layer=norm_layer,
init_values=init_values) for i in range(depth)
])
self.norm = norm_layer(embed_dim)
self.head = nn.Linear(
embed_dim, num_classes) if num_classes > 0 else nn.Identity()
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
nn.init.xavier_uniform_(m.weight)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def get_num_layers(self):
return len(self.blocks)
@torch.jit.ignore
def no_weight_decay(self):
return {'pos_embed', 'cls_token'}
def get_classifier(self):
return self.head
def reset_classifier(self, num_classes, global_pool=''):
self.num_classes = num_classes
self.head = nn.Linear(
self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()
def forward(self, x, return_token_num):
with torch.cuda.amp.autocast_mode.autocast(enabled=self.with_fp16):
for blk in self.blocks:
if self.with_cp:
x = cp.checkpoint(blk, x)
else:
x = blk(x)
if return_token_num > 0:
# only return the mask tokens predict pixels
x = self.head(self.norm(x[:, -return_token_num:]))
else:
# [B, N, 3*16^2]
x = self.head(self.norm(x))
return x
class PretrainVisionTransformer(nn.Module):
""" Vision Transformer with support for patch or hybrid CNN input stage
"""
def __init__(
self,
img_size=224,
patch_size=16,
encoder_in_chans=3,
encoder_num_classes=0,
encoder_embed_dim=768,
encoder_depth=12,
encoder_num_heads=12,
decoder_num_classes=1536, # decoder_num_classes=768,
decoder_embed_dim=512,
decoder_depth=8,
decoder_num_heads=8,
mlp_ratio=4.,
qkv_bias=False,
qk_scale=None,
drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.,
norm_layer=nn.LayerNorm,
init_values=0.,
use_learnable_pos_emb=False,
tubelet_size=2,
num_classes=0, # avoid the error from create_fn in timm
in_chans=0, # avoid the error from create_fn in timm
with_cp=False,
):
super().__init__()
self.encoder = PretrainVisionTransformerEncoder(
img_size=img_size,
patch_size=patch_size,
in_chans=encoder_in_chans,
num_classes=encoder_num_classes,
embed_dim=encoder_embed_dim,
depth=encoder_depth,
num_heads=encoder_num_heads,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
drop_rate=drop_rate,
attn_drop_rate=attn_drop_rate,
drop_path_rate=drop_path_rate,
norm_layer=norm_layer,
init_values=init_values,
tubelet_size=tubelet_size,
use_learnable_pos_emb=use_learnable_pos_emb,
with_cp=with_cp)
self.decoder = PretrainVisionTransformerDecoder(
patch_size=patch_size,
num_patches=self.encoder.patch_embed.num_patches,
num_classes=decoder_num_classes,
embed_dim=decoder_embed_dim,
depth=decoder_depth,
num_heads=decoder_num_heads,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
drop_rate=drop_rate,
attn_drop_rate=attn_drop_rate,
drop_path_rate=drop_path_rate,
norm_layer=norm_layer,
init_values=init_values,
tubelet_size=tubelet_size,
with_cp=with_cp,
with_fp16=True)
self.encoder_to_decoder = nn.Linear(
encoder_embed_dim, decoder_embed_dim, bias=False)
self.mask_token = nn.Parameter(torch.zeros(1, 1, decoder_embed_dim))
self.pos_embed = get_sinusoid_encoding_table(
self.encoder.patch_embed.num_patches, decoder_embed_dim)
trunc_normal_(self.mask_token, std=.02)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
nn.init.xavier_uniform_(m.weight)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def get_num_layers(self):
return len(self.blocks)
@torch.jit.ignore
def no_weight_decay(self):
return {'pos_embed', 'cls_token', 'mask_token'}
def forward(self, x, mask):
_, _, T, _, _ = x.shape
x_vis = self.encoder(x, mask) # [B, N_vis, C_e]
x_vis = self.encoder_to_decoder(x_vis) # [B, N_vis, C_d]
B, N_vis, C = x_vis.shape
# we don't unshuffle the correct visible token order,
# but shuffle the pos embedding accorddingly.
expand_pos_embed = self.pos_embed.expand(B, -1, -1).type_as(x).to(
x.device).clone().detach()
pos_emd_vis = expand_pos_embed[~mask].reshape(B, -1, C)
pos_emd_mask = expand_pos_embed[mask].reshape(B, -1, C)
x_full = torch.cat(
[x_vis + pos_emd_vis, self.mask_token + pos_emd_mask],
dim=1) # [B, N, C_d]
# notice: if N_mask==0, the shape of x is [B, N_mask, 3 * 16 * 16]
x = self.decoder(x_full,
pos_emd_mask.shape[1]) # [B, N_mask, 3 * 16 * 16]
return x
@register_model
def pretrain_mae_small_patch16_224(pretrained=False, **kwargs):
model = PretrainVisionTransformer(
img_size=224,
patch_size=16,
encoder_embed_dim=384,
encoder_depth=12,
encoder_num_heads=6,
encoder_num_classes=0,
decoder_num_classes=1536, # decoder_num_classes=768,
decoder_embed_dim=192, #decoder_depth=4,
decoder_num_heads=3,
mlp_ratio=4,
qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6),
**kwargs)
model.default_cfg = _cfg()
if pretrained:
checkpoint = torch.load(kwargs["init_ckpt"], map_location="cpu")
model.load_state_dict(checkpoint["model"])
return model
@register_model
def pretrain_mae_base_patch16_224(pretrained=False, **kwargs):
model = PretrainVisionTransformer(
img_size=224,
patch_size=16,
encoder_embed_dim=768,
encoder_depth=12,
encoder_num_heads=12,
encoder_num_classes=0,
decoder_num_classes=1536, # decoder_num_classes=768,
decoder_embed_dim=384, #decoder_depth=4,
decoder_num_heads=6,
mlp_ratio=4,
qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6),
**kwargs)
model.default_cfg = _cfg()
if pretrained:
checkpoint = torch.load(kwargs["init_ckpt"], map_location="cpu")
model.load_state_dict(checkpoint["model"])
return model
@register_model
def pretrain_mae_large_patch16_224(pretrained=False, **kwargs):
model = PretrainVisionTransformer(
img_size=224,
patch_size=16,
encoder_embed_dim=1024,
encoder_depth=24,
encoder_num_heads=16,
encoder_num_classes=0,
decoder_num_classes=1536, # decoder_num_classes=768,
decoder_embed_dim=512, #decoder_depth=8,
decoder_num_heads=8,
mlp_ratio=4,
qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6),
**kwargs)
model.default_cfg = _cfg()
if pretrained:
checkpoint = torch.load(kwargs["init_ckpt"], map_location="cpu")
model.load_state_dict(checkpoint["model"])
return model
@register_model
def pretrain_mae_huge_patch16_224(pretrained=False, **kwargs):
model = PretrainVisionTransformer(
img_size=224,
patch_size=16,
encoder_embed_dim=1280,
encoder_depth=32,
encoder_num_heads=16,
encoder_num_classes=0,
decoder_num_classes=1536, # decoder_num_classes=768,
decoder_embed_dim=512, #decoder_depth=8,
decoder_num_heads=8,
mlp_ratio=4,
qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6),
**kwargs)
model.default_cfg = _cfg()
if pretrained:
checkpoint = torch.load(kwargs["init_ckpt"], map_location="cpu")
model.load_state_dict(checkpoint["model"])
return model
@register_model
def pretrain_mae_giant_patch16_224(pretrained=False, **kwargs):
model = PretrainVisionTransformer(
img_size=224,
patch_size=16,
encoder_embed_dim=1408,
encoder_depth=40,
encoder_num_heads=16,
encoder_num_classes=0,
decoder_num_classes=1536,
decoder_embed_dim=512,
decoder_num_heads=8,
mlp_ratio=48 / 11,
qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6),
**kwargs)
model.default_cfg = _cfg()
if pretrained:
checkpoint = torch.load(kwargs["init_ckpt"], map_location="cpu")
model.load_state_dict(checkpoint["model"])
return model
@register_model
def pretrain_mae_giant_patch14_224(pretrained=False, **kwargs):
model = PretrainVisionTransformer(
img_size=224,
patch_size=14,
encoder_embed_dim=1408,
encoder_depth=40,
encoder_num_heads=16,
encoder_num_classes=0,
decoder_num_classes=1176,
decoder_embed_dim=512,
decoder_num_heads=8,
mlp_ratio=48 / 11,
qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6),
**kwargs)
model.default_cfg = _cfg()
if pretrained:
checkpoint = torch.load(kwargs["init_ckpt"], map_location="cpu")
model.load_state_dict(checkpoint["model"])
return model
@register_model
def pretrain_mae_gigantic_patch14_224(pretrained=False, **kwargs):
model = PretrainVisionTransformer(
img_size=224,
patch_size=14,
encoder_embed_dim=1664,
encoder_depth=48,
encoder_num_heads=16,
encoder_num_classes=0,
decoder_num_classes=1176, # decoder_num_classes=768,
decoder_embed_dim=512, #decoder_depth=8,
decoder_num_heads=8,
mlp_ratio=64 / 13,
qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6),
**kwargs)
model.default_cfg = _cfg()
if pretrained:
checkpoint = torch.load(kwargs["init_ckpt"], map_location="cpu")
model.load_state_dict(checkpoint["model"])
return model
| InternVideo-main | Pretrain/VideoMAE/modeling_pretrain.py |
import io
import os
import random
import cv2
import decord
import numpy as np
import torch
from decord import VideoReader, cpu
from petrel_client.client import Client
from PIL import Image
class HybridVideoMAE(torch.utils.data.Dataset):
"""Load your own video classification dataset.
Parameters
----------
root : str, required.
Path to the root folder storing the dataset.
setting : str, required.
A text file describing the dataset, each line per video sample.
There are three items in each line: (1) video path; (2) video length and (3) video label.
train : bool, default True.
Whether to load the training or validation set.
test_mode : bool, default False.
Whether to perform evaluation on the test set.
Usually there is three-crop or ten-crop evaluation strategy involved.
name_pattern : str, default None.
The naming pattern of the decoded video frames.
For example, img_00012.jpg.
video_ext : str, default 'mp4'.
If video_loader is set to True, please specify the video format accordinly.
is_color : bool, default True.
Whether the loaded image is color or grayscale.
modality : str, default 'rgb'.
Input modalities, we support only rgb video frames for now.
Will add support for rgb difference image and optical flow image later.
num_segments : int, default 1.
Number of segments to evenly divide the video into clips.
A useful technique to obtain global video-level information.
Limin Wang, etal, Temporal Segment Networks: Towards Good Practices for Deep Action Recognition, ECCV 2016.
num_crop : int, default 1.
Number of crops for each image. default is 1.
Common choices are three crops and ten crops during evaluation.
new_length : int, default 1.
The length of input video clip. Default is a single image, but it can be multiple video frames.
For example, new_length=16 means we will extract a video clip of consecutive 16 frames.
new_step : int, default 1.
Temporal sampling rate. For example, new_step=1 means we will extract a video clip of consecutive frames.
new_step=2 means we will extract a video clip of every other frame.
temporal_jitter : bool, default False.
Whether to temporally jitter if new_step > 1.
video_loader : bool, default False.
Whether to use video loader to load data.
use_decord : bool, default True.
Whether to use Decord video loader to load data. Otherwise use mmcv video loader.
transform : function, default None.
A function that takes data and label and transforms them.
data_aug : str, default 'v1'.
Different types of data augmentation auto. Supports v1, v2, v3 and v4.
lazy_init : bool, default False.
If set to True, build a dataset instance without loading any dataset.
"""
def __init__(self,
root,
setting,
train=True,
test_mode=False,
name_pattern='img_{:05}.jpg',
video_ext='mp4',
is_color=True,
modality='rgb',
num_segments=1,
num_crop=1,
new_length=1,
new_step=1,
transform=None,
temporal_jitter=False,
video_loader=False,
use_decord=False,
lazy_init=False,
num_sample=1):
super(HybridVideoMAE, self).__init__()
self.root = root
self.setting = setting
self.train = train
self.test_mode = test_mode
self.is_color = is_color
self.modality = modality
self.num_segments = num_segments
self.num_crop = num_crop
self.new_length = new_length
self.new_step = new_step
self.skip_length = self.new_length * self.new_step
self.temporal_jitter = temporal_jitter
self.name_pattern = name_pattern
self.video_loader = video_loader
self.video_ext = video_ext
self.use_decord = use_decord
self.transform = transform
self.lazy_init = lazy_init
self.num_sample = num_sample
# temp for hybrid train
self.ava_fname_tmpl = 'image_{:06}.jpg'
self.ssv2_fname_tmpl = 'img_{:05}.jpg'
self.decord = True
self.client = Client()
if not self.lazy_init:
self.clips = self._make_dataset(root, setting)
if len(self.clips) == 0:
raise (
RuntimeError("Found 0 video clips in subfolders of: " +
root + "\n"
"Check your data directory (opt.data-dir)."))
def __getitem__(self, index):
while True:
try:
video_name, start_idx, total_frame, target = self.clips[index]
if total_frame < 0:
self.new_step = 4
self.skip_length = self.new_length * self.new_step
video_bytes = self.client.get(video_name)
decord_vr = VideoReader(io.BytesIO(video_bytes),
num_threads=1,
ctx=cpu(0))
duration = len(decord_vr)
segment_indices, skip_offsets = self._sample_train_indices(
duration)
frame_id_list = self.get_frame_id_list(
duration, segment_indices, skip_offsets)
video_data = decord_vr.get_batch(frame_id_list).asnumpy()
images = [
Image.fromarray(
video_data[vid, :, :, :]).convert('RGB')
for vid, _ in enumerate(frame_id_list)
]
break
else:
# ssv2 & ava
if 'SomethingV2' in video_name:
self.new_step = 2
self.skip_length = self.new_length * self.new_step
fname_tmpl = self.ssv2_fname_tmpl
elif 'AVA' in video_name:
self.new_step = 4
self.skip_length = self.new_length * self.new_step
fname_tmpl = self.ava_fname_tmpl
else:
self.new_step = 4
self.skip_length = self.new_length * self.new_step
fname_tmpl = self.name_pattern
segment_indices, skip_offsets = self._sample_train_indices(
total_frame)
frame_id_list = self.get_frame_id_list(
total_frame, segment_indices, skip_offsets)
images = []
for idx in frame_id_list:
frame_fname = os.path.join(
video_name, fname_tmpl.format(idx + start_idx))
img_bytes = self.client.get(frame_fname)
img_np = np.frombuffer(img_bytes, np.uint8)
img = cv2.imdecode(img_np, cv2.IMREAD_COLOR)
cv2.cvtColor(img, cv2.COLOR_BGR2RGB, img)
images.append(Image.fromarray(img))
break
break
except Exception as e:
print("Failed to load video from {} with error {}".format(
video_name, e))
index = random.randint(0, len(self.clips) - 1)
if self.num_sample > 1:
process_data_list = []
mask_list = []
for _ in range(self.num_sample):
process_data, mask = self.transform((images, None))
process_data = process_data.view(
(self.new_length, 3) + process_data.size()[-2:]).transpose(
0, 1)
process_data_list.append(process_data)
mask_list.append(mask)
return process_data_list, mask_list
else:
process_data, mask = self.transform((images, None))
# T*C,H,W -> T,C,H,W -> C,T,H,W
process_data = process_data.view(
(self.new_length, 3) + process_data.size()[-2:]).transpose(
0, 1)
return process_data, mask
def __len__(self):
return len(self.clips)
def _make_dataset(self, root, setting):
if not os.path.exists(setting):
raise (RuntimeError(
"Setting file %s doesn't exist. Check opt.train-list and opt.val-list. "
% (setting)))
clips = []
with open(setting) as split_f:
data = split_f.readlines()
for line in data:
line_info = line.split(' ')
# line format: video_path, video_duration, video_label
if len(line_info) < 2:
raise (RuntimeError(
'Video input format is not correct, missing one or more element. %s'
% line))
clip_path = os.path.join(root, line_info[0])
start_idx = int(line_info[1])
total_frame = int(line_info[2])
target = int(line_info[-1]) #往往最后一列才是类别标签
item = (clip_path, start_idx, total_frame, target)
clips.append(item)
return clips
def _sample_train_indices(self, num_frames):
average_duration = (num_frames - self.skip_length +
1) // self.num_segments
if average_duration > 0:
offsets = np.multiply(list(range(self.num_segments)),
average_duration)
offsets = offsets + np.random.randint(average_duration,
size=self.num_segments)
elif num_frames > max(self.num_segments, self.skip_length):
offsets = np.sort(
np.random.randint(num_frames - self.skip_length + 1,
size=self.num_segments))
else:
offsets = np.zeros((self.num_segments, ))
if self.temporal_jitter:
skip_offsets = np.random.randint(self.new_step,
size=self.skip_length //
self.new_step)
else:
skip_offsets = np.zeros(self.skip_length // self.new_step,
dtype=int)
return offsets + 1, skip_offsets
def get_frame_id_list(self, duration, indices, skip_offsets):
frame_id_list = []
for seg_ind in indices:
offset = int(seg_ind)
for i, _ in enumerate(range(0, self.skip_length, self.new_step)):
if offset + skip_offsets[i] <= duration:
frame_id = offset + skip_offsets[i] - 1
else:
frame_id = offset - 1
frame_id_list.append(frame_id)
if offset + self.new_step < duration:
offset += self.new_step
return frame_id_list
class VideoMAE(torch.utils.data.Dataset):
"""Load your own video classification dataset.
Parameters
----------
root : str, required.
Path to the root folder storing the dataset.
setting : str, required.
A text file describing the dataset, each line per video sample.
There are three items in each line: (1) video path; (2) video length and (3) video label.
train : bool, default True.
Whether to load the training or validation set.
test_mode : bool, default False.
Whether to perform evaluation on the test set.
Usually there is three-crop or ten-crop evaluation strategy involved.
name_pattern : str, default None.
The naming pattern of the decoded video frames.
For example, img_00012.jpg.
video_ext : str, default 'mp4'.
If video_loader is set to True, please specify the video format accordinly.
is_color : bool, default True.
Whether the loaded image is color or grayscale.
modality : str, default 'rgb'.
Input modalities, we support only rgb video frames for now.
Will add support for rgb difference image and optical flow image later.
num_segments : int, default 1.
Number of segments to evenly divide the video into clips.
A useful technique to obtain global video-level information.
Limin Wang, etal, Temporal Segment Networks: Towards Good Practices for Deep Action Recognition, ECCV 2016.
num_crop : int, default 1.
Number of crops for each image. default is 1.
Common choices are three crops and ten crops during evaluation.
new_length : int, default 1.
The length of input video clip. Default is a single image, but it can be multiple video frames.
For example, new_length=16 means we will extract a video clip of consecutive 16 frames.
new_step : int, default 1.
Temporal sampling rate. For example, new_step=1 means we will extract a video clip of consecutive frames.
new_step=2 means we will extract a video clip of every other frame.
temporal_jitter : bool, default False.
Whether to temporally jitter if new_step > 1.
video_loader : bool, default False.
Whether to use video loader to load data.
use_decord : bool, default True.
Whether to use Decord video loader to load data. Otherwise use mmcv video loader.
transform : function, default None.
A function that takes data and label and transforms them.
data_aug : str, default 'v1'.
Different types of data augmentation auto. Supports v1, v2, v3 and v4.
lazy_init : bool, default False.
If set to True, build a dataset instance without loading any dataset.
"""
def __init__(self,
root,
setting,
train=True,
test_mode=False,
name_pattern='img_{:05}.jpg',
video_ext='mp4',
is_color=True,
modality='rgb',
num_segments=1,
num_crop=1,
new_length=1,
new_step=1,
transform=None,
temporal_jitter=False,
video_loader=False,
use_decord=False,
lazy_init=False,
num_sample=1):
super(VideoMAE, self).__init__()
self.root = root
self.setting = setting
self.train = train
self.test_mode = test_mode
self.is_color = is_color
self.modality = modality
self.num_segments = num_segments
self.num_crop = num_crop
self.new_length = new_length
self.new_step = new_step
self.skip_length = self.new_length * self.new_step
self.temporal_jitter = temporal_jitter
self.name_pattern = name_pattern
self.video_loader = video_loader
self.video_ext = video_ext
self.use_decord = use_decord
self.transform = transform
self.lazy_init = lazy_init
self.num_sample = num_sample
self.decord = True
self.client = Client()
if not self.lazy_init:
self.clips = self._make_dataset(root, setting)
if len(self.clips) == 0:
raise (
RuntimeError("Found 0 video clips in subfolders of: " +
root + "\n"
"Check your data directory (opt.data-dir)."))
def __getitem__(self, index):
while True:
try:
video_name, start_idx, total_frame, target = self.clips[index]
if total_frame < 0: # load video
if video_name.startswith('s3:'):
video_bytes = self.client.get(video_name)
decord_vr = VideoReader(io.BytesIO(video_bytes),
num_threads=1,
ctx=cpu(0))
else:
decord_vr = VideoReader(video_name,
num_threads=1,
ctx=cpu(0))
duration = len(decord_vr)
segment_indices, skip_offsets = self._sample_train_indices(
duration)
frame_id_list = self.get_frame_id_list(
duration, segment_indices, skip_offsets)
video_data = decord_vr.get_batch(frame_id_list).asnumpy()
images = [
Image.fromarray(
video_data[vid, :, :, :]).convert('RGB')
for vid, _ in enumerate(frame_id_list)
]
else: # load frames
segment_indices, skip_offsets = self._sample_train_indices(
total_frame)
frame_id_list = self.get_frame_id_list(
total_frame, segment_indices, skip_offsets)
images = []
for idx in frame_id_list:
frame_fname = os.path.join(
video_name,
self.name_pattern.format(idx + start_idx))
if frame_fname.startswith('s3:'):
img_bytes = self.client.get(frame_fname)
img_np = np.frombuffer(img_bytes, np.uint8)
img = cv2.imdecode(img_np, cv2.IMREAD_COLOR)
cv2.cvtColor(img, cv2.COLOR_BGR2RGB, img)
img = Image.fromarray(img)
else:
with open(frame_fname, 'rb') as f:
img = Image.open(f)
img = img.convert('RGB')
images.append(img)
# walk through the if-else branch without error
# break out the while-true loop
break
except Exception as e:
print("Failed to load video from {} with error {}".format(
video_name, e))
index = random.randint(0, len(self.clips) - 1)
if self.num_sample > 1:
process_data_list = []
mask_list = []
for _ in range(self.num_sample):
process_data, mask = self.transform((images, None))
process_data = process_data.view(
(self.new_length, 3) + process_data.size()[-2:]).transpose(
0, 1)
process_data_list.append(process_data)
mask_list.append(mask)
return process_data_list, mask_list
else:
process_data, mask = self.transform((images, None))
# T*C,H,W -> T,C,H,W -> C,T,H,W
process_data = process_data.view(
(self.new_length, 3) + process_data.size()[-2:]).transpose(
0, 1)
return process_data, mask
def __len__(self):
return len(self.clips)
def _make_dataset(self, root, setting):
if not os.path.exists(setting):
raise (RuntimeError(
"Setting file %s doesn't exist. Check opt.train-list and opt.val-list. "
% (setting)))
clips = []
with open(setting) as split_f:
data = split_f.readlines()
for line in data:
line_info = line.split(' ')
# line format: video_path, video_duration, video_label
if len(line_info) < 2:
raise (RuntimeError(
'Video input format is not correct, missing one or more element. %s'
% line))
clip_path = os.path.join(root, line_info[0])
start_idx = int(line_info[1])
total_frame = int(line_info[2])
target = int(line_info[-1])
item = (clip_path, start_idx, total_frame, target)
clips.append(item)
return clips
def _sample_train_indices(self, num_frames):
average_duration = (num_frames - self.skip_length +
1) // self.num_segments
if average_duration > 0:
offsets = np.multiply(list(range(self.num_segments)),
average_duration)
offsets = offsets + np.random.randint(average_duration,
size=self.num_segments)
elif num_frames > max(self.num_segments, self.skip_length):
offsets = np.sort(
np.random.randint(num_frames - self.skip_length + 1,
size=self.num_segments))
else:
offsets = np.zeros((self.num_segments, ))
if self.temporal_jitter:
skip_offsets = np.random.randint(self.new_step,
size=self.skip_length //
self.new_step)
else:
skip_offsets = np.zeros(self.skip_length // self.new_step,
dtype=int)
return offsets + 1, skip_offsets
def get_frame_id_list(self, duration, indices, skip_offsets):
frame_id_list = []
for seg_ind in indices:
offset = int(seg_ind)
for i, _ in enumerate(range(0, self.skip_length, self.new_step)):
if offset + skip_offsets[i] <= duration:
frame_id = offset + skip_offsets[i] - 1
else:
frame_id = offset - 1
frame_id_list.append(frame_id)
if offset + self.new_step < duration:
offset += self.new_step
return frame_id_list
class OldVideoMAE(torch.utils.data.Dataset):
"""Load your own video classification dataset.
Parameters
----------
root : str, required.
Path to the root folder storing the dataset.
setting : str, required.
A text file describing the dataset, each line per video sample.
There are three items in each line: (1) video path; (2) video length and (3) video label.
train : bool, default True.
Whether to load the training or validation set.
test_mode : bool, default False.
Whether to perform evaluation on the test set.
Usually there is three-crop or ten-crop evaluation strategy involved.
name_pattern : str, default None.
The naming pattern of the decoded video frames.
For example, img_00012.jpg.
video_ext : str, default 'mp4'.
If video_loader is set to True, please specify the video format accordinly.
is_color : bool, default True.
Whether the loaded image is color or grayscale.
modality : str, default 'rgb'.
Input modalities, we support only rgb video frames for now.
Will add support for rgb difference image and optical flow image later.
num_segments : int, default 1.
Number of segments to evenly divide the video into clips.
A useful technique to obtain global video-level information.
Limin Wang, etal, Temporal Segment Networks: Towards Good Practices for Deep Action Recognition, ECCV 2016.
num_crop : int, default 1.
Number of crops for each image. default is 1.
Common choices are three crops and ten crops during evaluation.
new_length : int, default 1.
The length of input video clip. Default is a single image, but it can be multiple video frames.
For example, new_length=16 means we will extract a video clip of consecutive 16 frames.
new_step : int, default 1.
Temporal sampling rate. For example, new_step=1 means we will extract a video clip of consecutive frames.
new_step=2 means we will extract a video clip of every other frame.
temporal_jitter : bool, default False.
Whether to temporally jitter if new_step > 1.
video_loader : bool, default False.
Whether to use video loader to load data.
use_decord : bool, default True.
Whether to use Decord video loader to load data. Otherwise use mmcv video loader.
transform : function, default None.
A function that takes data and label and transforms them.
data_aug : str, default 'v1'.
Different types of data augmentation auto. Supports v1, v2, v3 and v4.
lazy_init : bool, default False.
If set to True, build a dataset instance without loading any dataset.
"""
def __init__(self,
root,
setting,
train=True,
test_mode=False,
name_pattern='img_%05d.jpg',
video_ext='mp4',
is_color=True,
modality='rgb',
num_segments=1,
num_crop=1,
new_length=1,
new_step=1,
transform=None,
temporal_jitter=False,
video_loader=False,
use_decord=False,
lazy_init=False,
num_sample=1):
super(OldVideoMAE, self).__init__()
self.root = root
self.setting = setting
self.train = train
self.test_mode = test_mode
self.is_color = is_color
self.modality = modality
self.num_segments = num_segments
self.num_crop = num_crop
self.new_length = new_length
self.new_step = new_step
self.skip_length = self.new_length * self.new_step
self.temporal_jitter = temporal_jitter
self.name_pattern = name_pattern
self.video_loader = video_loader
self.video_ext = video_ext
self.use_decord = use_decord
self.transform = transform
self.lazy_init = lazy_init
self.num_sample = num_sample
self.decord = True
conf_path = '~/petreloss.conf'
client = Client(conf_path)
self.client = client
if not self.lazy_init:
self.clips = self._make_dataset(root, setting)
if len(self.clips) == 0:
raise (
RuntimeError("Found 0 video clips in subfolders of: " +
root + "\n"
"Check your data directory (opt.data-dir)."))
def __getitem__(self, index):
while True:
try:
directory, target = self.clips[index]
if self.video_loader:
if '.' in directory.split('/')[-1]:
video_name = directory
else:
video_name = '{}.{}'.format(directory, self.video_ext)
if video_name.startswith('s3'):
video_bytes = self.client.get(video_name)
decord_vr = VideoReader(io.BytesIO(video_bytes),
num_threads=1,
ctx=cpu(0))
else:
decord_vr = VideoReader(video_name,
num_threads=1,
ctx=cpu(0))
duration = len(decord_vr)
if duration > 0:
break
except Exception as e:
print("Failed to load video from {} with error {}".format(
video_name, e))
index = random.randint(0, len(self.clips) - 1)
if self.num_sample > 1:
process_data_list = []
mask_list = []
for _ in range(self.num_sample):
process_data, mask = self.get_one_sample(
decord_vr, duration, directory)
process_data_list.append(process_data)
mask_list.append(mask)
return process_data_list, mask_list
else:
return self.get_one_sample(decord_vr, duration, directory)
def get_one_sample(self, decord_vr, duration, directory):
decord_vr.seek(0)
segment_indices, skip_offsets = self._sample_train_indices(duration)
images = self._video_TSN_decord_batch_loader(directory, decord_vr,
duration, segment_indices,
skip_offsets)
# T*C,H,W -> T,C,H,W -> C,T,H,W
process_data, mask = self.transform((images, None))
process_data = process_data.view((self.new_length, 3) +
process_data.size()[-2:]).transpose(
0, 1)
return process_data, mask
def __len__(self):
return len(self.clips)
def _make_dataset(self, directory, setting):
if not os.path.exists(setting):
raise (RuntimeError(
"Setting file %s doesn't exist. Check opt.train-list and opt.val-list. "
% (setting)))
clips = []
with open(setting) as split_f:
data = split_f.readlines()
for line in data:
line_info = line.split(' ')
# line format: video_path, video_duration, video_label
if len(line_info) < 2:
raise (RuntimeError(
'Video input format is not correct, missing one or more element. %s'
% line))
clip_path = os.path.join(line_info[0])
target = int(line_info[-1])
item = (clip_path, target)
clips.append(item)
return clips
def _sample_train_indices(self, num_frames):
average_duration = (num_frames - self.skip_length +
1) // self.num_segments
if average_duration > 0:
offsets = np.multiply(list(range(self.num_segments)),
average_duration)
offsets = offsets + np.random.randint(average_duration,
size=self.num_segments)
elif num_frames > max(self.num_segments, self.skip_length):
offsets = np.sort(
np.random.randint(num_frames - self.skip_length + 1,
size=self.num_segments))
else:
offsets = np.zeros((self.num_segments, ))
if self.temporal_jitter:
skip_offsets = np.random.randint(self.new_step,
size=self.skip_length //
self.new_step)
else:
skip_offsets = np.zeros(self.skip_length // self.new_step,
dtype=int)
return offsets + 1, skip_offsets
def _video_TSN_decord_batch_loader(self, directory, video_reader, duration,
indices, skip_offsets):
sampled_list = []
frame_id_list = []
for seg_ind in indices:
offset = int(seg_ind)
for i, _ in enumerate(range(0, self.skip_length, self.new_step)):
if offset + skip_offsets[i] <= duration:
frame_id = offset + skip_offsets[i] - 1
else:
frame_id = offset - 1
frame_id_list.append(frame_id)
if offset + self.new_step < duration:
offset += self.new_step
try:
video_data = video_reader.get_batch(frame_id_list).asnumpy()
sampled_list = [
Image.fromarray(video_data[vid, :, :, :]).convert('RGB')
for vid, _ in enumerate(frame_id_list)
]
except:
raise RuntimeError(
'Error occured in reading frames {} from video {} of duration {}.'
.format(frame_id_list, directory, duration))
return sampled_list
| InternVideo-main | Pretrain/VideoMAE/mae.py |
# --------------------------------------------------------
# Based on BEiT, timm, DINO and DeiT code bases
# https://github.com/microsoft/unilm/tree/master/beit
# https://github.com/rwightman/pytorch-image-models/tree/master/timm
# https://github.com/facebookresearch/deit
# https://github.com/facebookresearch/dino
# --------------------------------------------------------'
import os
import torch
from timm.data import create_transform
from timm.data.constants import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
IMAGENET_INCEPTION_MEAN,
IMAGENET_INCEPTION_STD,
)
from torchvision import datasets, transforms
from anet import ANetClsDataset
from kinetics import RawFrameDataset, VideoClsDataset
from mae import HybridVideoMAE, VideoMAE
from masking_generator import (
CellRunningMaskingGenerator,
RandomMaskingGenerator,
TemporalCenteringProgressiveMaskingGenerator,
TemporalConsistencyMaskingGenerator,
TemporalProgressiveMaskingGenerator,
)
from ssv2 import SSRawFrameClsDataset
from transforms import *
class DataAugmentationForMAE(object):
def __init__(self, args):
imagenet_default_mean_and_std = args.imagenet_default_mean_and_std
mean = IMAGENET_INCEPTION_MEAN if not imagenet_default_mean_and_std else IMAGENET_DEFAULT_MEAN
std = IMAGENET_INCEPTION_STD if not imagenet_default_mean_and_std else IMAGENET_DEFAULT_STD
self.input_mean = [0.485, 0.456, 0.406]
self.input_std = [0.229, 0.224, 0.225]
div = True
roll = False
normalize = GroupNormalize(self.input_mean, self.input_std)
self.train_augmentation = GroupMultiScaleCrop(args.input_size,
[1, .875, .75, .66])
self.transform = transforms.Compose([
# GroupScale((240,320)),
self.train_augmentation,
Stack(roll=roll),
ToTorchFormatTensor(div=div),
normalize,
])
if args.mask_type == 'random':
self.masked_position_generator = RandomMaskingGenerator(
args.window_size, args.mask_ratio)
elif args.mask_type == 't_consist':
self.masked_position_generator = TemporalConsistencyMaskingGenerator(
args.window_size, args.mask_ratio)
elif args.mask_type == 't_progressive':
self.masked_position_generator = TemporalProgressiveMaskingGenerator(
args.window_size, args.mask_ratio)
elif args.mask_type == 't_center_prog':
self.masked_position_generator = TemporalCenteringProgressiveMaskingGenerator(
args.window_size, args.mask_ratio)
def __call__(self, images):
process_data, _ = self.transform(images)
return process_data, self.masked_position_generator()
def __repr__(self):
repr = "(DataAugmentationForBEiT,\n"
repr += " transform = %s,\n" % str(self.transform)
repr += " Masked position generator = %s,\n" % str(
self.masked_position_generator)
repr += ")"
return repr
def build_pretraining_dataset(args):
transform = DataAugmentationForMAE(args)
dataset = HybridVideoMAE(
root=args.data_root,
# dataset = VideoMAE(root=args.data_root,
setting=args.data_path,
train=True,
test_mode=False,
video_ext='mp4',
is_color=True,
modality='rgb',
num_segments=1,
num_crop=1,
new_length=args.num_frames,
new_step=args.sampling_rate,
transform=transform,
temporal_jitter=False,
video_loader=True,
use_decord=True,
lazy_init=False,
num_sample=args.num_sample)
print("Data Aug = %s" % str(transform))
return dataset
def build_dataset(is_train, test_mode, args):
if args.data_set == 'Kinetics-400':
mode = None
anno_path = None
if is_train == True:
mode = 'train'
anno_path = os.path.join(args.data_path, 'train.csv')
elif test_mode == True:
mode = 'test'
anno_path = os.path.join(args.data_path, 'val.csv')
else:
mode = 'validation'
anno_path = os.path.join(args.data_path, 'val.csv')
if not args.sparse_sample:
dataset = VideoClsDataset(
anno_path=anno_path,
data_path='/',
mode=mode,
clip_len=args.num_frames,
frame_sample_rate=args.sampling_rate,
num_segment=1,
test_num_segment=args.test_num_segment,
test_num_crop=args.test_num_crop,
num_crop=1 if not test_mode else 3,
keep_aspect_ratio=True,
crop_size=args.input_size,
short_side_size=args.short_side_size,
new_height=256,
new_width=320,
sparse_sample=False,
v4d_segment=args.num_segment if is_train else 1,
args=args)
else:
dataset = VideoClsDataset(
anno_path=anno_path,
data_path='/',
mode=mode,
clip_len=1,
frame_sample_rate=1,
num_segment=args.num_frames,
test_num_segment=args.test_num_segment,
test_num_crop=args.test_num_crop,
num_crop=1 if not test_mode else 3,
keep_aspect_ratio=True,
crop_size=args.input_size,
short_side_size=args.short_side_size,
new_height=256,
new_width=320,
sparse_sample=True,
v4d_segment=args.num_segment if is_train else 1,
args=args)
nb_classes = 400
elif args.data_set == 'Kinetics-600':
mode = None
anno_path = None
if is_train == True:
mode = 'train'
anno_path = os.path.join(args.data_path, 'train.csv')
elif test_mode == True:
mode = 'test'
anno_path = os.path.join(args.data_path, 'val.csv')
else:
mode = 'validation'
anno_path = os.path.join(args.data_path, 'val.csv')
dataset = VideoClsDataset(anno_path=anno_path,
data_path='/',
mode=mode,
clip_len=args.num_frames,
frame_sample_rate=args.sampling_rate,
num_segment=1,
test_num_segment=args.test_num_segment,
test_num_crop=args.test_num_crop,
num_crop=1 if not test_mode else 3,
keep_aspect_ratio=True,
crop_size=args.input_size,
short_side_size=args.short_side_size,
new_height=256,
new_width=320,
args=args)
nb_classes = 600
elif args.data_set == 'Kinetics-700':
mode = None
anno_path = None
if is_train == True:
mode = 'train'
anno_path = os.path.join(args.data_path, 'train.csv')
elif test_mode == True:
mode = 'test'
anno_path = os.path.join(args.data_path, 'val.csv')
else:
mode = 'validation'
anno_path = os.path.join(args.data_path, 'val.csv')
dataset = VideoClsDataset(anno_path=anno_path,
data_path='/',
mode=mode,
clip_len=args.num_frames,
frame_sample_rate=args.sampling_rate,
num_segment=1,
test_num_segment=args.test_num_segment,
test_num_crop=args.test_num_crop,
num_crop=1 if not test_mode else 3,
keep_aspect_ratio=True,
crop_size=args.input_size,
short_side_size=args.short_side_size,
new_height=256,
new_width=320,
args=args)
nb_classes = 700
elif args.data_set == 'MixKinetics':
mode = None
anno_path = None
if is_train == True:
mode = 'train'
anno_path = os.path.join(args.data_path, 'train.csv')
elif test_mode == True:
mode = 'test'
anno_path = os.path.join(args.data_path, 'val.csv')
else:
mode = 'validation'
anno_path = os.path.join(args.data_path, 'val.csv')
dataset = VideoClsDataset(anno_path=anno_path,
data_path='/',
mode=mode,
clip_len=args.num_frames,
frame_sample_rate=args.sampling_rate,
num_segment=1,
test_num_segment=args.test_num_segment,
test_num_crop=args.test_num_crop,
num_crop=1 if not test_mode else 3,
keep_aspect_ratio=True,
crop_size=args.input_size,
short_side_size=args.short_side_size,
new_height=256,
new_width=320,
args=args)
nb_classes = 710
elif args.data_set == 'SSV2':
mode = None
anno_path = None
if is_train == True:
mode = 'train'
anno_path = os.path.join(args.data_path, 'train.csv')
elif test_mode == True:
mode = 'test'
anno_path = os.path.join(args.data_path, 'val.csv')
else:
mode = 'validation'
anno_path = os.path.join(args.data_path, 'val.csv')
dataset = SSRawFrameClsDataset(anno_path=anno_path,
data_path='/',
mode=mode,
clip_len=1,
num_segment=args.num_frames,
test_num_segment=args.test_num_segment,
test_num_crop=args.test_num_crop,
num_crop=1 if not test_mode else 3,
keep_aspect_ratio=True,
crop_size=args.input_size,
short_side_size=args.short_side_size,
new_height=256,
new_width=320,
filename_tmpl='img_{:05}.jpg',
args=args)
# dataset = RawFrameDataset(anno_path=anno_path,
# data_path='/',
# mode=mode,
# clip_len=args.num_frames,
# frame_sample_rate=args.sampling_rate,
# num_segment=1,
# test_num_segment=args.test_num_segment,
# test_num_crop=args.test_num_crop,
# num_crop=1 if not test_mode else 3,
# keep_aspect_ratio=True,
# crop_size=args.input_size,
# short_side_size=args.short_side_size,
# new_height=256,
# new_width=320,
# filename_tmpl='img_{:05}.jpg',
# args=args)
nb_classes = 174
elif args.data_set == 'UCF101':
mode = None
anno_path = None
if is_train == True:
mode = 'train'
anno_path = os.path.join(args.data_path, 'train.csv')
elif test_mode == True:
mode = 'test'
anno_path = os.path.join(args.data_path, 'val.csv')
else:
mode = 'validation'
anno_path = os.path.join(args.data_path, 'val.csv')
dataset = VideoClsDataset(anno_path=anno_path,
data_path='/',
mode=mode,
clip_len=args.num_frames,
frame_sample_rate=args.sampling_rate,
num_segment=1,
test_num_segment=args.test_num_segment,
test_num_crop=args.test_num_crop,
num_crop=1 if not test_mode else 3,
keep_aspect_ratio=True,
crop_size=args.input_size,
short_side_size=args.short_side_size,
new_height=256,
new_width=320,
args=args)
nb_classes = 101
elif args.data_set == 'HMDB51':
mode = None
anno_path = None
if is_train == True:
mode = 'train'
anno_path = os.path.join(args.data_path, 'train.csv')
elif test_mode == True:
mode = 'test'
anno_path = os.path.join(args.data_path, 'val.csv')
else:
mode = 'validation'
anno_path = os.path.join(args.data_path, 'val.csv')
dataset = VideoClsDataset(anno_path=anno_path,
data_path='/',
mode=mode,
clip_len=args.num_frames,
frame_sample_rate=args.sampling_rate,
num_segment=1,
test_num_segment=args.test_num_segment,
test_num_crop=args.test_num_crop,
num_crop=1 if not test_mode else 3,
keep_aspect_ratio=True,
crop_size=args.input_size,
short_side_size=args.short_side_size,
new_height=256,
new_width=320,
args=args)
nb_classes = 51
elif args.data_set == 'Diving48':
mode = None
anno_path = None
if is_train == True:
mode = 'train'
anno_path = os.path.join(args.data_path, 'train.csv')
elif test_mode == True:
mode = 'test'
anno_path = os.path.join(args.data_path, 'val.csv')
else:
mode = 'validation'
anno_path = os.path.join(args.data_path, 'val.csv')
dataset = VideoClsDataset(anno_path=anno_path,
data_path='/',
mode=mode,
clip_len=args.num_frames,
frame_sample_rate=args.sampling_rate,
num_segment=1,
test_num_segment=args.test_num_segment,
test_num_crop=args.test_num_crop,
num_crop=1 if not test_mode else 3,
keep_aspect_ratio=True,
crop_size=args.input_size,
short_side_size=args.short_side_size,
new_height=256,
new_width=320,
args=args)
nb_classes = 48
elif args.data_set == 'ANet':
mode = None
anno_path = None
if is_train == True:
mode = 'train'
anno_path = os.path.join(args.data_path, 'train.csv')
elif test_mode == True:
mode = 'test'
anno_path = os.path.join(args.data_path, 'test.csv')
else:
mode = 'validation'
anno_path = os.path.join(args.data_path, 'val.csv')
dataset = ANetClsDataset(anno_path=anno_path,
data_path='/',
mode=mode,
clip_len=args.num_frames,
frame_sample_rate=args.sampling_rate,
num_segment=1,
test_num_segment=args.test_num_segment,
test_num_crop=args.test_num_crop,
num_crop=1 if not test_mode else 3,
keep_aspect_ratio=True,
crop_size=args.input_size,
short_side_size=args.short_side_size,
new_height=256,
new_width=320,
args=args)
nb_classes = 200
else:
raise NotImplementedError()
assert nb_classes == args.nb_classes
print("Number of the class = %d" % args.nb_classes)
return dataset, nb_classes
def build_transform(is_train, test_mode, args):
resize_im = args.input_size > 32
imagenet_default_mean_and_std = args.imagenet_default_mean_and_std
mean = IMAGENET_INCEPTION_MEAN if not imagenet_default_mean_and_std else IMAGENET_DEFAULT_MEAN
std = IMAGENET_INCEPTION_STD if not imagenet_default_mean_and_std else IMAGENET_DEFAULT_STD
if is_train:
# this should always dispatch to transforms_imagenet_train
transform = create_transform(
input_size=args.input_size,
is_training=True,
color_jitter=args.color_jitter,
auto_augment=args.aa,
interpolation=args.train_interpolation,
re_prob=args.reprob,
re_mode=args.remode,
re_count=args.recount,
mean=mean,
std=std,
)
if not resize_im:
# replace RandomResizedCropAndInterpolation with
# RandomCrop
transform.transforms[0] = transforms.RandomCrop(args.input_size,
padding=4)
return transform
t = []
if resize_im:
if args.crop_pct is None:
if args.input_size < 384:
args.crop_pct = 224 / 256
else:
args.crop_pct = 1.0
size = int(args.input_size / args.crop_pct)
t.append(
transforms.Resize(
size,
interpolation=3), # to maintain same ratio w.r.t. 224 images
)
t.append(transforms.CenterCrop(args.input_size))
t.append(transforms.ToTensor())
t.append(transforms.Normalize(mean, std))
return transforms.Compose(t)
| InternVideo-main | Pretrain/VideoMAE/datasets.py |
import os
import warnings
import cv2
import numpy as np
import torch
from decord import VideoReader, cpu
from petrel_client.client import Client
from torch.utils.data import Dataset
from torchvision import transforms
import video_transforms as video_transforms
import volume_transforms as volume_transforms
from random_erasing import RandomErasing
class SSRawFrameClsDataset(Dataset):
"""Load your own raw frame classification dataset."""
def __init__(self,
anno_path,
data_path,
mode='train',
clip_len=8,
crop_size=224,
short_side_size=256,
new_height=256,
new_width=340,
keep_aspect_ratio=True,
num_segment=1,
num_crop=1,
test_num_segment=10,
test_num_crop=3,
filename_tmpl='img_{:05}.jpg',
args=None):
self.anno_path = anno_path
self.data_path = data_path
self.mode = mode
self.clip_len = clip_len
self.crop_size = crop_size
self.short_side_size = short_side_size
self.new_height = new_height
self.new_width = new_width
self.keep_aspect_ratio = keep_aspect_ratio
self.num_segment = num_segment
self.test_num_segment = test_num_segment
self.num_crop = num_crop
self.test_num_crop = test_num_crop
self.filename_tmpl = filename_tmpl
self.args = args
self.aug = False
self.rand_erase = False
if self.mode in ['train']:
self.aug = True
if self.args.reprob > 0:
self.rand_erase = True
if VideoReader is None:
raise ImportError(
"Unable to import `decord` which is required to read videos.")
import pandas as pd
cleaned = pd.read_csv(self.anno_path, header=None, delimiter=' ')
self.dataset_samples = list(cleaned.values[:, 0])
self.total_frames = list(cleaned.values[:, 1])
self.label_array = list(cleaned.values[:, -1])
self.client = Client()
if (mode == 'train'):
pass
elif (mode == 'validation'):
self.data_transform = video_transforms.Compose([
video_transforms.Resize(self.short_side_size,
interpolation='bilinear'),
video_transforms.CenterCrop(size=(self.crop_size,
self.crop_size)),
volume_transforms.ClipToTensor(),
video_transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
elif mode == 'test':
self.data_resize = video_transforms.Compose([
video_transforms.Resize(size=(short_side_size),
interpolation='bilinear')
])
self.data_transform = video_transforms.Compose([
volume_transforms.ClipToTensor(),
video_transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
self.test_seg = []
self.test_dataset = []
self.test_total_frames = []
self.test_label_array = []
for ck in range(self.test_num_segment):
for cp in range(self.test_num_crop):
for idx in range(len(self.label_array)):
self.test_seg.append((ck, cp))
self.test_dataset.append(self.dataset_samples[idx])
self.test_total_frames.append(self.total_frames[idx])
self.test_label_array.append(self.label_array[idx])
def __getitem__(self, index):
if self.mode == 'train':
args = self.args
scale_t = 1
sample = self.dataset_samples[index]
total_frame = self.total_frames[index]
buffer = self.load_frame(sample,
total_frame,
sample_rate_scale=scale_t) # T H W C
if len(buffer) == 0:
while len(buffer) == 0:
warnings.warn(
"video {} not correctly loaded during training".format(
sample))
index = np.random.randint(self.__len__())
sample = self.dataset_samples[index]
total_frame = self.total_frames[index]
buffer = self.load_frame(sample,
total_frame,
sample_rate_scale=scale_t)
if args.num_sample > 1:
frame_list = []
label_list = []
index_list = []
for _ in range(args.num_sample):
new_frames = self._aug_frame(buffer, args)
label = self.label_array[index]
frame_list.append(new_frames)
label_list.append(label)
index_list.append(index)
return frame_list, label_list, index_list, {}
else:
buffer = self._aug_frame(buffer, args)
return buffer, self.label_array[index], index, {}
elif self.mode == 'validation':
sample = self.dataset_samples[index]
total_frame = self.total_frames[index]
buffer = self.load_frame(sample, total_frame)
if len(buffer) == 0:
while len(buffer) == 0:
warnings.warn(
"video {} not correctly loaded during validation".
format(sample))
index = np.random.randint(self.__len__())
sample = self.dataset_samples[index]
buffer = self.load_frame(sample, total_frame)
buffer = self.data_transform(buffer)
return buffer, self.label_array[index], sample.split(
"/")[-1].split(".")[0]
elif self.mode == 'test':
sample = self.test_dataset[index]
total_frame = self.test_total_frames[index]
chunk_nb, split_nb = self.test_seg[index]
buffer = self.load_frame(sample, total_frame)
while len(buffer) == 0:
warnings.warn("video {}, temporal {}, spatial {} not found during testing".format(\
str(self.test_dataset[index]), chunk_nb, split_nb))
index = np.random.randint(self.__len__())
sample = self.test_dataset[index]
total_frame = self.test_total_frames[index]
chunk_nb, split_nb = self.test_seg[index]
buffer = self.load_frame(sample, total_frame)
buffer = self.data_resize(buffer)
if isinstance(buffer, list):
buffer = np.stack(buffer, 0)
spatial_step = 1.0 * (max(buffer.shape[1], buffer.shape[2]) - self.short_side_size) \
/ (self.test_num_crop - 1)
temporal_start = chunk_nb
spatial_start = int(split_nb * spatial_step)
if buffer.shape[1] >= buffer.shape[2]:
buffer = buffer[temporal_start::self.test_num_segment, \
spatial_start:spatial_start + self.short_side_size, :, :]
else:
buffer = buffer[temporal_start::self.test_num_segment, \
:, spatial_start:spatial_start + self.short_side_size, :]
buffer = self.data_transform(buffer)
return buffer, self.test_label_array[index], sample.split("/")[-1].split(".")[0], \
chunk_nb, split_nb
else:
raise NameError('mode {} unkown'.format(self.mode))
def _aug_frame(
self,
buffer,
args,
):
aug_transform = video_transforms.create_random_augment(
input_size=(self.crop_size, self.crop_size),
auto_augment=args.aa,
interpolation=args.train_interpolation,
)
buffer = [transforms.ToPILImage()(frame) for frame in buffer]
buffer = aug_transform(buffer)
buffer = [transforms.ToTensor()(img) for img in buffer]
buffer = torch.stack(buffer) # T C H W
buffer = buffer.permute(0, 2, 3, 1) # T H W C
# T H W C
buffer = tensor_normalize(buffer, [0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])
# T H W C -> C T H W.
buffer = buffer.permute(3, 0, 1, 2)
# Perform data augmentation.
scl, asp = (
[0.08, 1.0],
[0.75, 1.3333],
)
buffer = spatial_sampling(
buffer,
spatial_idx=-1,
min_scale=256,
max_scale=320,
crop_size=self.crop_size,
random_horizontal_flip=False if args.data_set == 'SSV2' else True,
inverse_uniform_sampling=False,
aspect_ratio=asp,
scale=scl,
motion_shift=False)
if self.rand_erase:
erase_transform = RandomErasing(
args.reprob,
mode=args.remode,
max_count=args.recount,
num_splits=args.recount,
device="cpu",
)
buffer = buffer.permute(1, 0, 2, 3)
buffer = erase_transform(buffer)
buffer = buffer.permute(1, 0, 2, 3)
return buffer
def load_frame(self, sample, num_frames, sample_rate_scale=1):
"""Load video content using Decord"""
fname = sample
if self.mode == 'test':
tick = num_frames / float(self.num_segment)
all_index = []
for t_seg in range(self.test_num_segment):
tmp_index = [
int(t_seg * tick / self.test_num_segment + tick * x)
for x in range(self.num_segment)
]
all_index.extend(tmp_index)
all_index = list(np.sort(np.array(all_index)))
imgs = []
for idx in all_index:
frame_fname = os.path.join(fname,
self.filename_tmpl.format(idx + 1))
img_bytes = self.client.get(frame_fname)
img_np = np.frombuffer(img_bytes, np.uint8)
img = cv2.imdecode(img_np, cv2.IMREAD_COLOR)
cv2.cvtColor(img, cv2.COLOR_BGR2RGB, img)
imgs.append(img)
buffer = np.array(imgs)
return buffer
# handle temporal segments
average_duration = num_frames // self.num_segment
all_index = []
if average_duration > 0:
if self.mode == 'validation':
all_index = list(
np.multiply(list(range(self.num_segment)),
average_duration) +
np.ones(self.num_segment, dtype=int) *
(average_duration // 2))
else:
all_index = list(
np.multiply(list(range(self.num_segment)),
average_duration) +
np.random.randint(average_duration, size=self.num_segment))
elif num_frames > self.num_segment:
if self.mode == 'validation':
all_index = list(range(self.num_segment))
else:
all_index = list(
np.sort(
np.random.randint(num_frames, size=self.num_segment)))
else:
all_index = [0] * (self.num_segment - num_frames) + list(
range(num_frames))
all_index = list(np.array(all_index))
imgs = []
for idx in all_index:
frame_fname = os.path.join(fname,
self.filename_tmpl.format(idx + 1))
img_bytes = self.client.get(frame_fname)
img_np = np.frombuffer(img_bytes, np.uint8)
img = cv2.imdecode(img_np, cv2.IMREAD_COLOR)
cv2.cvtColor(img, cv2.COLOR_BGR2RGB, img)
imgs.append(img)
buffer = np.array(imgs)
return buffer
def __len__(self):
if self.mode != 'test':
return len(self.dataset_samples)
else:
return len(self.test_dataset)
def spatial_sampling(
frames,
spatial_idx=-1,
min_scale=256,
max_scale=320,
crop_size=224,
random_horizontal_flip=True,
inverse_uniform_sampling=False,
aspect_ratio=None,
scale=None,
motion_shift=False,
):
"""
Perform spatial sampling on the given video frames. If spatial_idx is
-1, perform random scale, random crop, and random flip on the given
frames. If spatial_idx is 0, 1, or 2, perform spatial uniform sampling
with the given spatial_idx.
Args:
frames (tensor): frames of images sampled from the video. The
dimension is `num frames` x `height` x `width` x `channel`.
spatial_idx (int): if -1, perform random spatial sampling. If 0, 1,
or 2, perform left, center, right crop if width is larger than
height, and perform top, center, buttom crop if height is larger
than width.
min_scale (int): the minimal size of scaling.
max_scale (int): the maximal size of scaling.
crop_size (int): the size of height and width used to crop the
frames.
inverse_uniform_sampling (bool): if True, sample uniformly in
[1 / max_scale, 1 / min_scale] and take a reciprocal to get the
scale. If False, take a uniform sample from [min_scale,
max_scale].
aspect_ratio (list): Aspect ratio range for resizing.
scale (list): Scale range for resizing.
motion_shift (bool): Whether to apply motion shift for resizing.
Returns:
frames (tensor): spatially sampled frames.
"""
assert spatial_idx in [-1, 0, 1, 2]
if spatial_idx == -1:
if aspect_ratio is None and scale is None:
frames, _ = video_transforms.random_short_side_scale_jitter(
images=frames,
min_size=min_scale,
max_size=max_scale,
inverse_uniform_sampling=inverse_uniform_sampling,
)
frames, _ = video_transforms.random_crop(frames, crop_size)
else:
transform_func = (video_transforms.random_resized_crop_with_shift
if motion_shift else
video_transforms.random_resized_crop)
frames = transform_func(
images=frames,
target_height=crop_size,
target_width=crop_size,
scale=scale,
ratio=aspect_ratio,
)
if random_horizontal_flip:
frames, _ = video_transforms.horizontal_flip(0.5, frames)
else:
# The testing is deterministic and no jitter should be performed.
# min_scale, max_scale, and crop_size are expect to be the same.
assert len({min_scale, max_scale, crop_size}) == 1
frames, _ = video_transforms.random_short_side_scale_jitter(
frames, min_scale, max_scale)
frames, _ = video_transforms.uniform_crop(frames, crop_size,
spatial_idx)
return frames
def tensor_normalize(tensor, mean, std):
"""
Normalize a given tensor by subtracting the mean and dividing the std.
Args:
tensor (tensor): tensor to normalize.
mean (tensor or list): mean value to subtract.
std (tensor or list): std to divide.
"""
if tensor.dtype == torch.uint8:
tensor = tensor.float()
tensor = tensor / 255.0
if type(mean) == list:
mean = torch.tensor(mean)
if type(std) == list:
std = torch.tensor(std)
tensor = tensor - mean
tensor = tensor / std
return tensor
| InternVideo-main | Pretrain/VideoMAE/ssv2.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
"""
This implementation is based on
https://github.com/rwightman/pytorch-image-models/blob/master/timm/data/auto_augment.py
pulished under an Apache License 2.0.
COMMENT FROM ORIGINAL:
AutoAugment, RandAugment, and AugMix for PyTorch
This code implements the searched ImageNet policies with various tweaks and
improvements and does not include any of the search code. AA and RA
Implementation adapted from:
https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/autoaugment.py
AugMix adapted from:
https://github.com/google-research/augmix
Papers:
AutoAugment: Learning Augmentation Policies from Data
https://arxiv.org/abs/1805.09501
Learning Data Augmentation Strategies for Object Detection
https://arxiv.org/abs/1906.11172
RandAugment: Practical automated data augmentation...
https://arxiv.org/abs/1909.13719
AugMix: A Simple Data Processing Method to Improve Robustness and
Uncertainty https://arxiv.org/abs/1912.02781
Hacked together by / Copyright 2020 Ross Wightman
"""
import math
import random
import re
import numpy as np
import PIL
from PIL import Image, ImageEnhance, ImageOps
_PIL_VER = tuple([int(x) for x in PIL.__version__.split(".")[:2]])
_FILL = (128, 128, 128)
# This signifies the max integer that the controller RNN could predict for the
# augmentation scheme.
_MAX_LEVEL = 10.0
_HPARAMS_DEFAULT = {
"translate_const": 250,
"img_mean": _FILL,
}
_RANDOM_INTERPOLATION = (Image.BILINEAR, Image.BICUBIC)
def _interpolation(kwargs):
interpolation = kwargs.pop("resample", Image.BILINEAR)
if isinstance(interpolation, (list, tuple)):
return random.choice(interpolation)
else:
return interpolation
def _check_args_tf(kwargs):
if "fillcolor" in kwargs and _PIL_VER < (5, 0):
kwargs.pop("fillcolor")
kwargs["resample"] = _interpolation(kwargs)
def shear_x(img, factor, **kwargs):
_check_args_tf(kwargs)
return img.transform(
img.size, Image.AFFINE, (1, factor, 0, 0, 1, 0), **kwargs
)
def shear_y(img, factor, **kwargs):
_check_args_tf(kwargs)
return img.transform(
img.size, Image.AFFINE, (1, 0, 0, factor, 1, 0), **kwargs
)
def translate_x_rel(img, pct, **kwargs):
pixels = pct * img.size[0]
_check_args_tf(kwargs)
return img.transform(
img.size, Image.AFFINE, (1, 0, pixels, 0, 1, 0), **kwargs
)
def translate_y_rel(img, pct, **kwargs):
pixels = pct * img.size[1]
_check_args_tf(kwargs)
return img.transform(
img.size, Image.AFFINE, (1, 0, 0, 0, 1, pixels), **kwargs
)
def translate_x_abs(img, pixels, **kwargs):
_check_args_tf(kwargs)
return img.transform(
img.size, Image.AFFINE, (1, 0, pixels, 0, 1, 0), **kwargs
)
def translate_y_abs(img, pixels, **kwargs):
_check_args_tf(kwargs)
return img.transform(
img.size, Image.AFFINE, (1, 0, 0, 0, 1, pixels), **kwargs
)
def rotate(img, degrees, **kwargs):
_check_args_tf(kwargs)
if _PIL_VER >= (5, 2):
return img.rotate(degrees, **kwargs)
elif _PIL_VER >= (5, 0):
w, h = img.size
post_trans = (0, 0)
rotn_center = (w / 2.0, h / 2.0)
angle = -math.radians(degrees)
matrix = [
round(math.cos(angle), 15),
round(math.sin(angle), 15),
0.0,
round(-math.sin(angle), 15),
round(math.cos(angle), 15),
0.0,
]
def transform(x, y, matrix):
(a, b, c, d, e, f) = matrix
return a * x + b * y + c, d * x + e * y + f
matrix[2], matrix[5] = transform(
-rotn_center[0] - post_trans[0],
-rotn_center[1] - post_trans[1],
matrix,
)
matrix[2] += rotn_center[0]
matrix[5] += rotn_center[1]
return img.transform(img.size, Image.AFFINE, matrix, **kwargs)
else:
return img.rotate(degrees, resample=kwargs["resample"])
def auto_contrast(img, **__):
return ImageOps.autocontrast(img)
def invert(img, **__):
return ImageOps.invert(img)
def equalize(img, **__):
return ImageOps.equalize(img)
def solarize(img, thresh, **__):
return ImageOps.solarize(img, thresh)
def solarize_add(img, add, thresh=128, **__):
lut = []
for i in range(256):
if i < thresh:
lut.append(min(255, i + add))
else:
lut.append(i)
if img.mode in ("L", "RGB"):
if img.mode == "RGB" and len(lut) == 256:
lut = lut + lut + lut
return img.point(lut)
else:
return img
def posterize(img, bits_to_keep, **__):
if bits_to_keep >= 8:
return img
return ImageOps.posterize(img, bits_to_keep)
def contrast(img, factor, **__):
return ImageEnhance.Contrast(img).enhance(factor)
def color(img, factor, **__):
return ImageEnhance.Color(img).enhance(factor)
def brightness(img, factor, **__):
return ImageEnhance.Brightness(img).enhance(factor)
def sharpness(img, factor, **__):
return ImageEnhance.Sharpness(img).enhance(factor)
def _randomly_negate(v):
"""With 50% prob, negate the value"""
return -v if random.random() > 0.5 else v
def _rotate_level_to_arg(level, _hparams):
# range [-30, 30]
level = (level / _MAX_LEVEL) * 30.0
level = _randomly_negate(level)
return (level,)
def _enhance_level_to_arg(level, _hparams):
# range [0.1, 1.9]
return ((level / _MAX_LEVEL) * 1.8 + 0.1,)
def _enhance_increasing_level_to_arg(level, _hparams):
# the 'no change' level is 1.0, moving away from that towards 0. or 2.0 increases the enhancement blend
# range [0.1, 1.9]
level = (level / _MAX_LEVEL) * 0.9
level = 1.0 + _randomly_negate(level)
return (level,)
def _shear_level_to_arg(level, _hparams):
# range [-0.3, 0.3]
level = (level / _MAX_LEVEL) * 0.3
level = _randomly_negate(level)
return (level,)
def _translate_abs_level_to_arg(level, hparams):
translate_const = hparams["translate_const"]
level = (level / _MAX_LEVEL) * float(translate_const)
level = _randomly_negate(level)
return (level,)
def _translate_rel_level_to_arg(level, hparams):
# default range [-0.45, 0.45]
translate_pct = hparams.get("translate_pct", 0.45)
level = (level / _MAX_LEVEL) * translate_pct
level = _randomly_negate(level)
return (level,)
def _posterize_level_to_arg(level, _hparams):
# As per Tensorflow TPU EfficientNet impl
# range [0, 4], 'keep 0 up to 4 MSB of original image'
# intensity/severity of augmentation decreases with level
return (int((level / _MAX_LEVEL) * 4),)
def _posterize_increasing_level_to_arg(level, hparams):
# As per Tensorflow models research and UDA impl
# range [4, 0], 'keep 4 down to 0 MSB of original image',
# intensity/severity of augmentation increases with level
return (4 - _posterize_level_to_arg(level, hparams)[0],)
def _posterize_original_level_to_arg(level, _hparams):
# As per original AutoAugment paper description
# range [4, 8], 'keep 4 up to 8 MSB of image'
# intensity/severity of augmentation decreases with level
return (int((level / _MAX_LEVEL) * 4) + 4,)
def _solarize_level_to_arg(level, _hparams):
# range [0, 256]
# intensity/severity of augmentation decreases with level
return (int((level / _MAX_LEVEL) * 256),)
def _solarize_increasing_level_to_arg(level, _hparams):
# range [0, 256]
# intensity/severity of augmentation increases with level
return (256 - _solarize_level_to_arg(level, _hparams)[0],)
def _solarize_add_level_to_arg(level, _hparams):
# range [0, 110]
return (int((level / _MAX_LEVEL) * 110),)
LEVEL_TO_ARG = {
"AutoContrast": None,
"Equalize": None,
"Invert": None,
"Rotate": _rotate_level_to_arg,
# There are several variations of the posterize level scaling in various Tensorflow/Google repositories/papers
"Posterize": _posterize_level_to_arg,
"PosterizeIncreasing": _posterize_increasing_level_to_arg,
"PosterizeOriginal": _posterize_original_level_to_arg,
"Solarize": _solarize_level_to_arg,
"SolarizeIncreasing": _solarize_increasing_level_to_arg,
"SolarizeAdd": _solarize_add_level_to_arg,
"Color": _enhance_level_to_arg,
"ColorIncreasing": _enhance_increasing_level_to_arg,
"Contrast": _enhance_level_to_arg,
"ContrastIncreasing": _enhance_increasing_level_to_arg,
"Brightness": _enhance_level_to_arg,
"BrightnessIncreasing": _enhance_increasing_level_to_arg,
"Sharpness": _enhance_level_to_arg,
"SharpnessIncreasing": _enhance_increasing_level_to_arg,
"ShearX": _shear_level_to_arg,
"ShearY": _shear_level_to_arg,
"TranslateX": _translate_abs_level_to_arg,
"TranslateY": _translate_abs_level_to_arg,
"TranslateXRel": _translate_rel_level_to_arg,
"TranslateYRel": _translate_rel_level_to_arg,
}
NAME_TO_OP = {
"AutoContrast": auto_contrast,
"Equalize": equalize,
"Invert": invert,
"Rotate": rotate,
"Posterize": posterize,
"PosterizeIncreasing": posterize,
"PosterizeOriginal": posterize,
"Solarize": solarize,
"SolarizeIncreasing": solarize,
"SolarizeAdd": solarize_add,
"Color": color,
"ColorIncreasing": color,
"Contrast": contrast,
"ContrastIncreasing": contrast,
"Brightness": brightness,
"BrightnessIncreasing": brightness,
"Sharpness": sharpness,
"SharpnessIncreasing": sharpness,
"ShearX": shear_x,
"ShearY": shear_y,
"TranslateX": translate_x_abs,
"TranslateY": translate_y_abs,
"TranslateXRel": translate_x_rel,
"TranslateYRel": translate_y_rel,
}
class AugmentOp:
"""
Apply for video.
"""
def __init__(self, name, prob=0.5, magnitude=10, hparams=None):
hparams = hparams or _HPARAMS_DEFAULT
self.aug_fn = NAME_TO_OP[name]
self.level_fn = LEVEL_TO_ARG[name]
self.prob = prob
self.magnitude = magnitude
self.hparams = hparams.copy()
self.kwargs = {
"fillcolor": hparams["img_mean"]
if "img_mean" in hparams
else _FILL,
"resample": hparams["interpolation"]
if "interpolation" in hparams
else _RANDOM_INTERPOLATION,
}
# If magnitude_std is > 0, we introduce some randomness
# in the usually fixed policy and sample magnitude from a normal distribution
# with mean `magnitude` and std-dev of `magnitude_std`.
# NOTE This is my own hack, being tested, not in papers or reference impls.
self.magnitude_std = self.hparams.get("magnitude_std", 0)
def __call__(self, img_list):
if self.prob < 1.0 and random.random() > self.prob:
return img_list
magnitude = self.magnitude
if self.magnitude_std and self.magnitude_std > 0:
magnitude = random.gauss(magnitude, self.magnitude_std)
magnitude = min(_MAX_LEVEL, max(0, magnitude)) # clip to valid range
level_args = (
self.level_fn(magnitude, self.hparams)
if self.level_fn is not None
else ()
)
if isinstance(img_list, list):
return [
self.aug_fn(img, *level_args, **self.kwargs) for img in img_list
]
else:
return self.aug_fn(img_list, *level_args, **self.kwargs)
_RAND_TRANSFORMS = [
"AutoContrast",
"Equalize",
"Invert",
"Rotate",
"Posterize",
"Solarize",
"SolarizeAdd",
"Color",
"Contrast",
"Brightness",
"Sharpness",
"ShearX",
"ShearY",
"TranslateXRel",
"TranslateYRel",
]
_RAND_INCREASING_TRANSFORMS = [
"AutoContrast",
"Equalize",
"Invert",
"Rotate",
"PosterizeIncreasing",
"SolarizeIncreasing",
"SolarizeAdd",
"ColorIncreasing",
"ContrastIncreasing",
"BrightnessIncreasing",
"SharpnessIncreasing",
"ShearX",
"ShearY",
"TranslateXRel",
"TranslateYRel",
]
# These experimental weights are based loosely on the relative improvements mentioned in paper.
# They may not result in increased performance, but could likely be tuned to so.
_RAND_CHOICE_WEIGHTS_0 = {
"Rotate": 0.3,
"ShearX": 0.2,
"ShearY": 0.2,
"TranslateXRel": 0.1,
"TranslateYRel": 0.1,
"Color": 0.025,
"Sharpness": 0.025,
"AutoContrast": 0.025,
"Solarize": 0.005,
"SolarizeAdd": 0.005,
"Contrast": 0.005,
"Brightness": 0.005,
"Equalize": 0.005,
"Posterize": 0,
"Invert": 0,
}
def _select_rand_weights(weight_idx=0, transforms=None):
transforms = transforms or _RAND_TRANSFORMS
assert weight_idx == 0 # only one set of weights currently
rand_weights = _RAND_CHOICE_WEIGHTS_0
probs = [rand_weights[k] for k in transforms]
probs /= np.sum(probs)
return probs
def rand_augment_ops(magnitude=10, hparams=None, transforms=None):
hparams = hparams or _HPARAMS_DEFAULT
transforms = transforms or _RAND_TRANSFORMS
return [
AugmentOp(name, prob=0.5, magnitude=magnitude, hparams=hparams)
for name in transforms
]
class RandAugment:
def __init__(self, ops, num_layers=2, choice_weights=None):
self.ops = ops
self.num_layers = num_layers
self.choice_weights = choice_weights
def __call__(self, img):
# no replacement when using weighted choice
ops = np.random.choice(
self.ops,
self.num_layers,
replace=self.choice_weights is None,
p=self.choice_weights,
)
for op in ops:
img = op(img)
return img
def rand_augment_transform(config_str, hparams):
"""
RandAugment: Practical automated data augmentation... - https://arxiv.org/abs/1909.13719
Create a RandAugment transform
:param config_str: String defining configuration of random augmentation. Consists of multiple sections separated by
dashes ('-'). The first section defines the specific variant of rand augment (currently only 'rand'). The remaining
sections, not order sepecific determine
'm' - integer magnitude of rand augment
'n' - integer num layers (number of transform ops selected per image)
'w' - integer probabiliy weight index (index of a set of weights to influence choice of op)
'mstd' - float std deviation of magnitude noise applied
'inc' - integer (bool), use augmentations that increase in severity with magnitude (default: 0)
Ex 'rand-m9-n3-mstd0.5' results in RandAugment with magnitude 9, num_layers 3, magnitude_std 0.5
'rand-mstd1-w0' results in magnitude_std 1.0, weights 0, default magnitude of 10 and num_layers 2
:param hparams: Other hparams (kwargs) for the RandAugmentation scheme
:return: A PyTorch compatible Transform
"""
magnitude = _MAX_LEVEL # default to _MAX_LEVEL for magnitude (currently 10)
num_layers = 2 # default to 2 ops per image
weight_idx = None # default to no probability weights for op choice
transforms = _RAND_TRANSFORMS
config = config_str.split("-")
assert config[0] == "rand"
config = config[1:]
for c in config:
cs = re.split(r"(\d.*)", c)
if len(cs) < 2:
continue
key, val = cs[:2]
if key == "mstd":
# noise param injected via hparams for now
hparams.setdefault("magnitude_std", float(val))
elif key == "inc":
if bool(val):
transforms = _RAND_INCREASING_TRANSFORMS
elif key == "m":
magnitude = int(val)
elif key == "n":
num_layers = int(val)
elif key == "w":
weight_idx = int(val)
else:
assert NotImplementedError
ra_ops = rand_augment_ops(
magnitude=magnitude, hparams=hparams, transforms=transforms
)
choice_weights = (
None if weight_idx is None else _select_rand_weights(weight_idx)
)
return RandAugment(ra_ops, num_layers, choice_weights=choice_weights)
| InternVideo-main | Pretrain/VideoMAE/rand_augment.py |
import numpy as np
import torch
from PIL import Image
def convert_img(img):
"""Converts (H, W, C) numpy.ndarray to (C, W, H) format
"""
if len(img.shape) == 3:
img = img.transpose(2, 0, 1)
if len(img.shape) == 2:
img = np.expand_dims(img, 0)
return img
class ClipToTensor(object):
"""Convert a list of m (H x W x C) numpy.ndarrays in the range [0, 255]
to a torch.FloatTensor of shape (C x m x H x W) in the range [0, 1.0]
"""
def __init__(self, channel_nb=3, div_255=True, numpy=False):
self.channel_nb = channel_nb
self.div_255 = div_255
self.numpy = numpy
def __call__(self, clip):
"""
Args: clip (list of numpy.ndarray): clip (list of images)
to be converted to tensor.
"""
# Retrieve shape
if isinstance(clip[0], np.ndarray):
h, w, ch = clip[0].shape
assert ch == self.channel_nb, 'Got {0} instead of 3 channels'.format(
ch)
elif isinstance(clip[0], Image.Image):
w, h = clip[0].size
else:
raise TypeError('Expected numpy.ndarray or PIL.Image\
but got list of {0}'.format(type(clip[0])))
np_clip = np.zeros([self.channel_nb, len(clip), int(h), int(w)])
# Convert
for img_idx, img in enumerate(clip):
if isinstance(img, np.ndarray):
pass
elif isinstance(img, Image.Image):
img = np.array(img, copy=False)
else:
raise TypeError('Expected numpy.ndarray or PIL.Image\
but got list of {0}'.format(type(clip[0])))
img = convert_img(img)
np_clip[:, img_idx, :, :] = img
if self.numpy:
if self.div_255:
np_clip = np_clip / 255.0
return np_clip
else:
tensor_clip = torch.from_numpy(np_clip)
if not isinstance(tensor_clip, torch.FloatTensor):
tensor_clip = tensor_clip.float()
if self.div_255:
tensor_clip = torch.div(tensor_clip, 255)
return tensor_clip
# Note this norms data to -1/1
class ClipToTensor_K(object):
"""Convert a list of m (H x W x C) numpy.ndarrays in the range [0, 255]
to a torch.FloatTensor of shape (C x m x H x W) in the range [0, 1.0]
"""
def __init__(self, channel_nb=3, div_255=True, numpy=False):
self.channel_nb = channel_nb
self.div_255 = div_255
self.numpy = numpy
def __call__(self, clip):
"""
Args: clip (list of numpy.ndarray): clip (list of images)
to be converted to tensor.
"""
# Retrieve shape
if isinstance(clip[0], np.ndarray):
h, w, ch = clip[0].shape
assert ch == self.channel_nb, 'Got {0} instead of 3 channels'.format(
ch)
elif isinstance(clip[0], Image.Image):
w, h = clip[0].size
else:
raise TypeError('Expected numpy.ndarray or PIL.Image\
but got list of {0}'.format(type(clip[0])))
np_clip = np.zeros([self.channel_nb, len(clip), int(h), int(w)])
# Convert
for img_idx, img in enumerate(clip):
if isinstance(img, np.ndarray):
pass
elif isinstance(img, Image.Image):
img = np.array(img, copy=False)
else:
raise TypeError('Expected numpy.ndarray or PIL.Image\
but got list of {0}'.format(type(clip[0])))
img = convert_img(img)
np_clip[:, img_idx, :, :] = img
if self.numpy:
if self.div_255:
np_clip = (np_clip - 127.5) / 127.5
return np_clip
else:
tensor_clip = torch.from_numpy(np_clip)
if not isinstance(tensor_clip, torch.FloatTensor):
tensor_clip = tensor_clip.float()
if self.div_255:
tensor_clip = torch.div(torch.sub(tensor_clip, 127.5), 127.5)
return tensor_clip
class ToTensor(object):
"""Converts numpy array to tensor
"""
def __call__(self, array):
tensor = torch.from_numpy(array)
return tensor
| InternVideo-main | Pretrain/VideoMAE/volume_transforms.py |
# --------------------------------------------------------
# Based on BEiT, timm, DINO and DeiT code bases
# https://github.com/microsoft/unilm/tree/master/beit
# https://github.com/rwightman/pytorch-image-models/tree/master/timm
# https://github.com/facebookresearch/deit
# https://github.com/facebookresearch/dino
# --------------------------------------------------------'
import argparse
import datetime
import json
import os
import time
from collections import OrderedDict
from functools import partial
from pathlib import Path
import numpy as np
import torch
import torch.backends.cudnn as cudnn
from scipy import interpolate
from timm.data.mixup import Mixup
from timm.loss import LabelSmoothingCrossEntropy, SoftTargetCrossEntropy
from timm.models import create_model
from timm.utils import ModelEma
import modeling_finetune
import utils
from datasets import build_dataset
from engine_for_finetuning import (
final_test,
merge,
train_one_epoch,
validation_one_epoch,
)
from optim_factory import (
LayerDecayValueAssigner,
create_optimizer,
get_parameter_groups,
)
from utils import NativeScalerWithGradNormCount as NativeScaler
from utils import multiple_samples_collate
def get_args():
parser = argparse.ArgumentParser(
'MAE fine-tuning and evaluation script for image classification',
add_help=False)
parser.add_argument('--batch_size', default=64, type=int)
parser.add_argument('--epochs', default=30, type=int)
parser.add_argument('--update_freq', default=1, type=int)
parser.add_argument('--save_ckpt_freq', default=100, type=int)
# Model parameters
parser.add_argument('--model',
default='deit_base_patch16_224',
type=str,
metavar='MODEL',
help='Name of model to train')
parser.add_argument('--tubelet_size', type=int, default=2)
parser.add_argument('--input_size',
default=224,
type=int,
help='images input size')
parser.add_argument('--with_checkpoint',
action='store_true',
default=False)
parser.add_argument('--drop',
type=float,
default=0.0,
metavar='PCT',
help='Dropout rate (default: 0.)')
parser.add_argument('--attn_drop_rate',
type=float,
default=0.0,
metavar='PCT',
help='Attention dropout rate (default: 0.)')
parser.add_argument('--drop_path',
type=float,
default=0.1,
metavar='PCT',
help='Drop path rate (default: 0.1)')
parser.add_argument('--head_drop_rate',
type=float,
default=0.0,
metavar='PCT',
help='cls head dropout rate (default: 0.)')
parser.add_argument('--disable_eval_during_finetuning',
action='store_true',
default=False)
parser.add_argument('--model_ema', action='store_true', default=False)
parser.add_argument('--model_ema_decay',
type=float,
default=0.9999,
help='')
parser.add_argument('--model_ema_force_cpu',
action='store_true',
default=False,
help='')
# Optimizer parameters
parser.add_argument('--opt',
default='adamw',
type=str,
metavar='OPTIMIZER',
help='Optimizer (default: "adamw"')
parser.add_argument('--opt_eps',
default=1e-8,
type=float,
metavar='EPSILON',
help='Optimizer Epsilon (default: 1e-8)')
parser.add_argument(
'--opt_betas',
default=None,
type=float,
nargs='+',
metavar='BETA',
help='Optimizer Betas (default: None, use opt default)')
parser.add_argument('--clip_grad',
type=float,
default=None,
metavar='NORM',
help='Clip gradient norm (default: None, no clipping)')
parser.add_argument('--momentum',
type=float,
default=0.9,
metavar='M',
help='SGD momentum (default: 0.9)')
parser.add_argument('--weight_decay',
type=float,
default=0.05,
help='weight decay (default: 0.05)')
parser.add_argument('--weight_decay_end',
type=float,
default=None,
help="""Final value of the
weight decay. We use a cosine schedule for WD and using a larger decay by
the end of training improves performance for ViTs.""")
parser.add_argument('--lr',
type=float,
default=1e-3,
metavar='LR',
help='learning rate (default: 1e-3)')
parser.add_argument('--layer_decay', type=float, default=0.75)
parser.add_argument('--extra_decay', type=float, default=1.0)
parser.add_argument('--warmup_lr',
type=float,
default=1e-8,
metavar='LR',
help='warmup learning rate (default: 1e-6)')
parser.add_argument(
'--min_lr',
type=float,
default=1e-6,
metavar='LR',
help='lower lr bound for cyclic schedulers that hit 0 (1e-5)')
parser.add_argument('--warmup_epochs',
type=int,
default=5,
metavar='N',
help='epochs to warmup LR, if scheduler supports')
parser.add_argument(
'--warmup_steps',
type=int,
default=-1,
metavar='N',
help='num of steps to warmup LR, will overload warmup_epochs if set > 0'
)
# Augmentation parameters
parser.add_argument('--color_jitter',
type=float,
default=0.4,
metavar='PCT',
help='Color jitter factor (default: 0.4)')
parser.add_argument('--num_sample',
type=int,
default=2,
help='Repeated_aug (default: 2)')
parser.add_argument('--num_segment',
type=int,
default=1,
help='segments used for v4d (default: 1)')
# parser.add_argument('--aa', type=str, default='rand-m9-mstd0.5-inc1', metavar='NAME',
parser.add_argument(
'--aa',
type=str,
default='rand-m7-n4-mstd0.5-inc1',
metavar='NAME',
help=
'Use AutoAugment policy. "v0" or "original". " + "(default: rand-m7-n4-mstd0.5-inc1)'
),
parser.add_argument('--smoothing',
type=float,
default=0.1,
help='Label smoothing (default: 0.1)')
parser.add_argument(
'--train_interpolation',
type=str,
default='bicubic',
help=
'Training interpolation (random, bilinear, bicubic default: "bicubic")'
)
# Evaluation parameters
parser.add_argument('--crop_pct', type=float, default=None)
parser.add_argument('--short_side_size', type=int, default=224)
parser.add_argument('--test_num_segment', type=int, default=10)
parser.add_argument('--test_num_crop', type=int, default=3)
# * Random Erase params
parser.add_argument('--reprob',
type=float,
default=0.25,
metavar='PCT',
help='Random erase prob (default: 0.25)')
parser.add_argument('--remode',
type=str,
default='pixel',
help='Random erase mode (default: "pixel")')
parser.add_argument('--recount',
type=int,
default=1,
help='Random erase count (default: 1)')
parser.add_argument(
'--resplit',
action='store_true',
default=False,
help='Do not random erase first (clean) augmentation split')
# * Mixup params
parser.add_argument('--mixup',
type=float,
default=0.8,
help='mixup alpha, mixup enabled if > 0.')
parser.add_argument('--cutmix',
type=float,
default=1.0,
help='cutmix alpha, cutmix enabled if > 0.')
parser.add_argument(
'--cutmix_minmax',
type=float,
nargs='+',
default=None,
help=
'cutmix min/max ratio, overrides alpha and enables cutmix if set (default: None)'
)
parser.add_argument(
'--mixup_prob',
type=float,
default=1.0,
help=
'Probability of performing mixup or cutmix when either/both is enabled'
)
parser.add_argument(
'--mixup_switch_prob',
type=float,
default=0.5,
help=
'Probability of switching to cutmix when both mixup and cutmix enabled'
)
parser.add_argument(
'--mixup_mode',
type=str,
default='batch',
help='How to apply mixup/cutmix params. Per "batch", "pair", or "elem"'
)
# * Finetuning params
parser.add_argument('--finetune',
default='',
help='finetune from checkpoint')
parser.add_argument('--model_key', default='model|module', type=str)
parser.add_argument('--model_prefix', default='', type=str)
parser.add_argument('--init_scale', default=0.001, type=float)
parser.add_argument('--use_mean_pooling', action='store_true')
parser.set_defaults(use_mean_pooling=True)
parser.add_argument('--use_cls',
action='store_false',
dest='use_mean_pooling')
# Dataset parameters
parser.add_argument('--data_path',
default='/datasets01/imagenet_full_size/061417/',
type=str,
help='dataset path')
parser.add_argument('--eval_data_path',
default=None,
type=str,
help='dataset path for evaluation')
parser.add_argument('--nb_classes',
default=400,
type=int,
help='number of the classification types')
parser.add_argument('--imagenet_default_mean_and_std',
default=True,
action='store_true')
parser.add_argument('--num_segments', type=int, default=1)
parser.add_argument('--num_frames', type=int, default=16)
parser.add_argument('--sampling_rate', type=int, default=4)
parser.add_argument('--sparse_sample', default=False, action='store_true')
parser.add_argument('--data_set',
default='Kinetics-400',
choices=[
'Kinetics-400', 'Kinetics-600', 'Kinetics-700',
'SSV2', 'UCF101', 'HMDB51', 'Diving48',
'image_folder', 'ANet', 'MixKinetics'
],
type=str,
help='ImageNet dataset path')
parser.add_argument('--output_dir',
default='',
help='path where to save, empty for no saving')
parser.add_argument('--log_dir',
default=None,
help='path where to tensorboard log')
parser.add_argument('--device',
default='cuda',
help='device to use for training / testing')
parser.add_argument('--seed', default=0, type=int)
parser.add_argument('--resume', default='', help='resume from checkpoint')
parser.add_argument('--auto_resume', action='store_true')
parser.add_argument('--no_auto_resume',
action='store_false',
dest='auto_resume')
parser.set_defaults(auto_resume=True)
parser.add_argument('--save_ckpt', action='store_true')
parser.add_argument('--no_save_ckpt',
action='store_false',
dest='save_ckpt')
parser.set_defaults(save_ckpt=True)
parser.add_argument('--start_epoch',
default=0,
type=int,
metavar='N',
help='start epoch')
parser.add_argument('--eval',
action='store_true',
help='Perform evaluation only')
parser.add_argument('--validation',
action='store_true',
help='Perform validation only')
parser.add_argument('--dist_eval',
action='store_true',
default=False,
help='Enabling distributed evaluation')
parser.add_argument('--num_workers', default=10, type=int)
parser.add_argument(
'--pin_mem',
action='store_true',
help=
'Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.'
)
parser.add_argument('--no_pin_mem', action='store_false', dest='pin_mem')
parser.set_defaults(pin_mem=True)
# distributed training parameters
parser.add_argument('--world_size',
default=1,
type=int,
help='number of distributed processes')
parser.add_argument('--local_rank', default=-1, type=int)
parser.add_argument('--dist_on_itp', action='store_true')
parser.add_argument('--dist_url',
default='env://',
help='url used to set up distributed training')
parser.add_argument('--enable_deepspeed',
action='store_true',
default=False)
known_args, _ = parser.parse_known_args()
if known_args.enable_deepspeed:
# try:
import deepspeed
from deepspeed import DeepSpeedConfig
parser = deepspeed.add_config_arguments(parser)
ds_init = deepspeed.initialize
# except:
# print("Please 'pip install deepspeed==0.4.0'")
# exit(0)
else:
ds_init = None
return parser.parse_args(), ds_init
def main(args, ds_init):
utils.init_distributed_mode(args)
if ds_init is not None:
utils.create_ds_config(args)
print(args)
device = torch.device(args.device)
# fix the seed for reproducibility
seed = args.seed + utils.get_rank()
torch.manual_seed(seed)
np.random.seed(seed)
# random.seed(seed)
cudnn.benchmark = True
dataset_train, args.nb_classes = build_dataset(is_train=True,
test_mode=False,
args=args)
if args.disable_eval_during_finetuning:
dataset_val = None
else:
dataset_val, _ = build_dataset(is_train=False,
test_mode=False,
args=args)
dataset_test, _ = build_dataset(is_train=False, test_mode=True, args=args)
if True: # args.distributed:
num_tasks = utils.get_world_size()
global_rank = utils.get_rank()
sampler_train = torch.utils.data.DistributedSampler(
dataset_train,
num_replicas=num_tasks,
rank=global_rank,
shuffle=True)
print("Sampler_train = %s" % str(sampler_train))
if args.dist_eval:
if len(dataset_val) % num_tasks != 0:
print(
'Warning: Enabling distributed evaluation with an eval dataset not divisible by process number. '
'This will slightly alter validation results as extra duplicate entries are added to achieve '
'equal num of samples per-process.')
sampler_val = torch.utils.data.DistributedSampler(
dataset_val,
num_replicas=num_tasks,
rank=global_rank,
shuffle=False)
sampler_test = torch.utils.data.DistributedSampler(
dataset_test,
num_replicas=num_tasks,
rank=global_rank,
shuffle=False)
else:
sampler_val = torch.utils.data.SequentialSampler(dataset_val)
else:
sampler_train = torch.utils.data.RandomSampler(dataset_train)
sampler_val = torch.utils.data.SequentialSampler(dataset_val)
if global_rank == 0 and args.log_dir is not None:
os.makedirs(args.log_dir, exist_ok=True)
log_writer = utils.TensorboardLogger(log_dir=args.log_dir)
else:
log_writer = None
if args.num_sample > 1:
collate_func = partial(multiple_samples_collate, fold=False)
else:
collate_func = None
data_loader_train = torch.utils.data.DataLoader(
dataset_train,
sampler=sampler_train,
batch_size=args.batch_size,
num_workers=args.num_workers,
pin_memory=args.pin_mem,
drop_last=True,
collate_fn=collate_func,
persistent_workers=True)
if dataset_val is not None:
data_loader_val = torch.utils.data.DataLoader(
dataset_val,
sampler=sampler_val,
batch_size=int(1.5 * args.batch_size),
num_workers=args.num_workers,
pin_memory=args.pin_mem,
drop_last=False,
persistent_workers=True)
else:
data_loader_val = None
if dataset_test is not None:
data_loader_test = torch.utils.data.DataLoader(
dataset_test,
sampler=sampler_test,
batch_size=args.batch_size,
num_workers=args.num_workers,
pin_memory=args.pin_mem,
drop_last=False,
persistent_workers=True)
else:
data_loader_test = None
mixup_fn = None
mixup_active = args.mixup > 0 or args.cutmix > 0. or args.cutmix_minmax is not None
if mixup_active:
print("Mixup is activated!")
mixup_fn = Mixup(mixup_alpha=args.mixup,
cutmix_alpha=args.cutmix,
cutmix_minmax=args.cutmix_minmax,
prob=args.mixup_prob,
switch_prob=args.mixup_switch_prob,
mode=args.mixup_mode,
label_smoothing=args.smoothing,
num_classes=args.nb_classes)
model = create_model(
args.model,
img_size=args.input_size,
pretrained=False,
num_classes=args.nb_classes,
all_frames=args.num_frames * args.num_segments,
tubelet_size=args.tubelet_size,
drop_rate=args.drop,
drop_path_rate=args.drop_path,
attn_drop_rate=args.attn_drop_rate,
head_drop_rate=args.head_drop_rate,
drop_block_rate=None,
use_mean_pooling=args.use_mean_pooling,
init_scale=args.init_scale,
with_cp=args.with_checkpoint,
num_segment=args.num_segment,
)
patch_size = model.patch_embed.patch_size
print("Patch size = %s" % str(patch_size))
# args.window_size = (args.input_size // patch_size[0], args.input_size // patch_size[1])
args.window_size = (args.num_frames // 2, args.input_size // patch_size[0],
args.input_size // patch_size[1])
args.patch_size = patch_size
if args.finetune:
if args.finetune.startswith('https'):
checkpoint = torch.hub.load_state_dict_from_url(args.finetune,
map_location='cpu',
check_hash=True)
else:
checkpoint = torch.load(args.finetune, map_location='cpu')
print("Load ckpt from %s" % args.finetune)
checkpoint_model = None
for model_key in args.model_key.split('|'):
if model_key in checkpoint:
checkpoint_model = checkpoint[model_key]
print("Load state_dict by model_key = %s" % model_key)
break
if checkpoint_model is None:
checkpoint_model = checkpoint
state_dict = model.state_dict()
for k in ['head.weight', 'head.bias']:
if k in checkpoint_model and checkpoint_model[
k].shape != state_dict[k].shape:
if checkpoint_model[k].shape[0] == 710:
if args.data_set == 'Kinetics-400':
print('Convert to k400 class head')
checkpoint_model[k] = checkpoint_model[k][:400]
elif args.data_set == 'Kinetics-600':
print('Convert to k600 class head')
label_map_path = '/mnt/petrelfs/huangbingkun/data/mix_kinetics/label_mixto600.json'
label_map = json.load(open(label_map_path))
checkpoint_model[k] = checkpoint_model[k][label_map]
elif args.data_set == 'Kinetics-700':
print('Convert to k700 class head')
label_map_path = '/mnt/petrelfs/huangbingkun/data/mix_kinetics/label_mixto700.json'
label_map = json.load(open(label_map_path))
checkpoint_model[k] = checkpoint_model[k][label_map]
else:
print(f"Removing key {k} from pretrained checkpoint")
del checkpoint_model[k]
else:
print(f"Removing key {k} from pretrained checkpoint")
del checkpoint_model[k]
all_keys = list(checkpoint_model.keys())
new_dict = OrderedDict()
for key in all_keys:
if key.startswith('backbone.'):
new_dict[key[9:]] = checkpoint_model[key]
elif key.startswith('encoder.'):
new_dict[key[8:]] = checkpoint_model[key]
else:
new_dict[key] = checkpoint_model[key]
checkpoint_model = new_dict
# interpolate position embedding
if 'pos_embed' in checkpoint_model:
pos_embed_checkpoint = checkpoint_model['pos_embed']
embedding_size = pos_embed_checkpoint.shape[-1] # channel dim
num_patches = model.patch_embed.num_patches #
num_extra_tokens = model.pos_embed.shape[-2] - num_patches # 0/1
# height (== width) for the checkpoint position embedding
orig_size = int(
((pos_embed_checkpoint.shape[-2] - num_extra_tokens) //
(args.num_frames // model.patch_embed.tubelet_size))**0.5)
# height (== width) for the new position embedding
new_size = int(
(num_patches //
(args.num_frames // model.patch_embed.tubelet_size))**0.5)
# class_token and dist_token are kept unchanged
if orig_size != new_size:
print("Position interpolate from %dx%d to %dx%d" %
(orig_size, orig_size, new_size, new_size))
extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens]
# only the position tokens are interpolated
pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:]
# B, L, C -> BT, H, W, C -> BT, C, H, W
pos_tokens = pos_tokens.reshape(
-1, args.num_frames // model.patch_embed.tubelet_size,
orig_size, orig_size, embedding_size)
pos_tokens = pos_tokens.reshape(-1, orig_size, orig_size,
embedding_size).permute(
0, 3, 1, 2)
pos_tokens = torch.nn.functional.interpolate(
pos_tokens,
size=(new_size, new_size),
mode='bicubic',
align_corners=False)
# BT, C, H, W -> BT, H, W, C -> B, T, H, W, C
pos_tokens = pos_tokens.permute(0, 2, 3, 1).reshape(
-1, args.num_frames // model.patch_embed.tubelet_size,
new_size, new_size, embedding_size)
pos_tokens = pos_tokens.flatten(1, 3) # B, L, C
new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1)
checkpoint_model['pos_embed'] = new_pos_embed
elif args.input_size != 224:
pos_tokens = model.pos_embed
org_num_frames = 16
T = org_num_frames // args.tubelet_size
P = int((pos_tokens.shape[1] // T) ** 0.5)
C = pos_tokens.shape[2]
new_P = args.input_size // patch_size[0]
# B, L, C -> BT, H, W, C -> BT, C, H, W
pos_tokens = pos_tokens.reshape(-1, T, P, P, C)
pos_tokens = pos_tokens.reshape(-1, P, P, C).permute(0, 3, 1, 2)
pos_tokens = torch.nn.functional.interpolate(
pos_tokens, size=(new_P, new_P), mode='bicubic', align_corners=False)
# BT, C, H, W -> BT, H, W, C -> B, T, H, W, C
pos_tokens = pos_tokens.permute(0, 2, 3, 1).reshape(-1, T, new_P, new_P, C)
pos_tokens = pos_tokens.flatten(1, 3) # B, L, C
model.pos_embed = pos_tokens # update
if args.num_frames != 16:
org_num_frames = 16
T = org_num_frames // args.tubelet_size
pos_tokens = model.pos_embed
new_T = args.num_frames // args.tubelet_size
P = int((pos_tokens.shape[1] // T) ** 0.5)
C = pos_tokens.shape[2]
pos_tokens = pos_tokens.reshape(-1, T, P, P, C)
pos_tokens = pos_tokens.permute(0, 2, 3, 4, 1).reshape(-1, C, T) # BHW,C,T
pos_tokens = torch.nn.functional.interpolate(
pos_tokens, size=new_T, mode='linear')
pos_tokens = pos_tokens.reshape(1, P, P, C, new_T).permute(0, 4, 1, 2, 3)
pos_tokens = pos_tokens.flatten(1, 3)
model.pos_embed = pos_tokens # update
utils.load_state_dict(model,
checkpoint_model,
prefix=args.model_prefix)
# model.load_state_dict(checkpoint_model, strict=False)
model.to(device)
model_ema = None
if args.model_ema:
# Important to create EMA model after cuda(), DP wrapper, and AMP but before SyncBN and DDP wrapper
model_ema = ModelEma(model,
decay=args.model_ema_decay,
device='cpu' if args.model_ema_force_cpu else '',
resume='')
print("Using EMA with decay = %.8f" % args.model_ema_decay)
model_without_ddp = model
n_parameters = sum(p.numel() for p in model.parameters()
if p.requires_grad)
print("Model = %s" % str(model_without_ddp))
print('number of params:', n_parameters)
total_batch_size = args.batch_size * args.update_freq * utils.get_world_size(
)
num_training_steps_per_epoch = len(dataset_train) // total_batch_size
args.lr = args.lr * total_batch_size / 256
#########scale the lr#############
args.min_lr = args.min_lr * total_batch_size / 256
args.warmup_lr = args.warmup_lr * total_batch_size / 256
#########scale the lr#############
print("LR = %.8f" % args.lr)
print("Batch size = %d" % total_batch_size)
print("Update frequent = %d" % args.update_freq)
print("Number of training examples = %d" % len(dataset_train))
print("Number of training training per epoch = %d" %
num_training_steps_per_epoch)
num_layers = model_without_ddp.get_num_layers()
if args.layer_decay < 1.0:
assigner = LayerDecayValueAssigner(list(
args.layer_decay**(num_layers + 1 - i)
for i in range(num_layers + 2)),
extra_decay=args.extra_decay)
else:
assigner = None
if assigner is not None:
print("Assigned values = %s" % str(assigner.values))
skip_weight_decay_list = model.no_weight_decay()
print("Skip weight decay list: ", skip_weight_decay_list)
if args.enable_deepspeed:
loss_scaler = None
optimizer_params = get_parameter_groups(
model, args.weight_decay, skip_weight_decay_list,
assigner.get_layer_id if assigner is not None else None,
assigner.get_scale if assigner is not None else None)
model, optimizer, _, _ = ds_init(
args=args,
model=model,
model_parameters=optimizer_params,
dist_init_required=not args.distributed,
)
print("model.gradient_accumulation_steps() = %d" %
model.gradient_accumulation_steps())
assert model.gradient_accumulation_steps() == args.update_freq
else:
if args.distributed:
model = torch.nn.parallel.DistributedDataParallel(
model, device_ids=[args.gpu], find_unused_parameters=False)
model_without_ddp = model.module
optimizer = create_optimizer(args,
model_without_ddp,
skip_list=skip_weight_decay_list,
get_num_layer=assigner.get_layer_id
if assigner is not None else None,
get_layer_scale=assigner.get_scale
if assigner is not None else None)
loss_scaler = NativeScaler()
print("Use step level LR scheduler!")
lr_schedule_values = utils.cosine_scheduler(
args.lr,
args.min_lr,
args.epochs,
num_training_steps_per_epoch,
warmup_epochs=args.warmup_epochs,
warmup_steps=args.warmup_steps,
)
if args.weight_decay_end is None:
args.weight_decay_end = args.weight_decay
wd_schedule_values = utils.cosine_scheduler(args.weight_decay,
args.weight_decay_end,
args.epochs,
num_training_steps_per_epoch)
print("Max WD = %.7f, Min WD = %.7f" %
(max(wd_schedule_values), min(wd_schedule_values)))
if mixup_fn is not None:
# smoothing is handled with mixup label transform
criterion = SoftTargetCrossEntropy()
elif args.smoothing > 0.:
criterion = LabelSmoothingCrossEntropy(smoothing=args.smoothing)
else:
criterion = torch.nn.CrossEntropyLoss()
print("criterion = %s" % str(criterion))
utils.auto_load_model(args=args,
model=model,
model_without_ddp=model_without_ddp,
optimizer=optimizer,
loss_scaler=loss_scaler,
model_ema=model_ema)
if args.validation:
test_stats = validation_one_epoch(data_loader_val, model, device)
print(
f"{len(dataset_val)} val images: Top-1 {test_stats['acc1']:.2f}%, Top-5 {test_stats['acc5']:.2f}%, loss {test_stats['loss']:.4f}"
)
exit(0)
if args.eval:
preds_file = os.path.join(args.output_dir, str(global_rank) + '.txt')
test_stats = final_test(data_loader_test, model, device, preds_file)
torch.distributed.barrier()
if global_rank == 0:
print("Start merging results...")
final_top1, final_top5 = merge(args.output_dir, num_tasks)
print(
f"Accuracy of the network on the {len(dataset_test)} test videos: Top-1: {final_top1:.2f}%, Top-5: {final_top5:.2f}%"
)
log_stats = {'Final top-1': final_top1, 'Final Top-5': final_top1}
if args.output_dir and utils.is_main_process():
with open(os.path.join(args.output_dir, "log.txt"),
mode="a",
encoding="utf-8") as f:
f.write(json.dumps(log_stats) + "\n")
exit(0)
# test_stats = validation_one_epoch(data_loader_test, model, device)
# print(f"Accuracy of the network on the {len(dataset_test)} test videos: {test_stats['acc1']:.1f}%")
print(f"Start training for {args.epochs} epochs")
start_time = time.time()
max_accuracy = 0.0
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
data_loader_train.sampler.set_epoch(epoch)
if log_writer is not None:
log_writer.set_step(epoch * num_training_steps_per_epoch *
args.update_freq)
train_stats = train_one_epoch(
model,
criterion,
data_loader_train,
optimizer,
device,
epoch,
loss_scaler,
args,
args.clip_grad,
model_ema,
mixup_fn,
log_writer=log_writer,
start_steps=epoch * num_training_steps_per_epoch,
lr_schedule_values=lr_schedule_values,
wd_schedule_values=wd_schedule_values,
num_training_steps_per_epoch=num_training_steps_per_epoch,
update_freq=args.update_freq,
)
if args.output_dir and args.save_ckpt:
if (epoch +
1) % args.save_ckpt_freq == 0 or epoch + 1 == args.epochs:
utils.save_model(args=args,
model=model,
model_without_ddp=model_without_ddp,
optimizer=optimizer,
loss_scaler=loss_scaler,
epoch=epoch,
model_ema=model_ema)
if data_loader_val is not None:
test_stats = validation_one_epoch(data_loader_val, model, device)
print(
f"Accuracy of the network on the {len(dataset_val)} val images: {test_stats['acc1']:.2f}%"
)
if max_accuracy < test_stats["acc1"]:
max_accuracy = test_stats["acc1"]
if args.output_dir and args.save_ckpt:
utils.save_model(args=args,
model=model,
model_without_ddp=model_without_ddp,
optimizer=optimizer,
loss_scaler=loss_scaler,
epoch="best",
model_ema=model_ema)
print(f'Max accuracy: {max_accuracy:.2f}%')
if log_writer is not None:
log_writer.update(val_acc1=test_stats['acc1'],
head="perf",
step=epoch)
log_writer.update(val_acc5=test_stats['acc5'],
head="perf",
step=epoch)
log_writer.update(val_loss=test_stats['loss'],
head="perf",
step=epoch)
log_stats = {
**{f'train_{k}': v
for k, v in train_stats.items()},
**{f'val_{k}': v
for k, v in test_stats.items()}, 'epoch': epoch,
'n_parameters': n_parameters
}
else:
log_stats = {
**{f'train_{k}': v
for k, v in train_stats.items()},
# **{f'test_{k}': v for k, v in test_stats.items()},
'epoch': epoch,
'n_parameters': n_parameters
}
if args.output_dir and utils.is_main_process():
if log_writer is not None:
log_writer.flush()
with open(os.path.join(args.output_dir, "log.txt"),
mode="a",
encoding="utf-8") as f:
f.write(json.dumps(log_stats) + "\n")
preds_file = os.path.join(args.output_dir, str(global_rank) + '.txt')
test_stats = final_test(data_loader_test, model, device, preds_file)
torch.distributed.barrier()
if global_rank == 0:
print("Start merging results...")
final_top1, final_top5 = merge(args.output_dir, num_tasks)
print(
f"Accuracy of the network on the {len(dataset_test)} test videos: Top-1: {final_top1:.2f}%, Top-5: {final_top5:.2f}%"
)
log_stats = {'Final top-1': final_top1, 'Final Top-5': final_top5}
if args.output_dir and utils.is_main_process():
with open(os.path.join(args.output_dir, "log.txt"),
mode="a",
encoding="utf-8") as f:
f.write(json.dumps(log_stats) + "\n")
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('Training time {}'.format(total_time_str))
if __name__ == '__main__':
opts, ds_init = get_args()
if opts.output_dir:
Path(opts.output_dir).mkdir(parents=True, exist_ok=True)
main(opts, ds_init)
| InternVideo-main | Pretrain/VideoMAE/run_class_finetuning.py |
# --------------------------------------------------------
# Based on BEiT, timm, DINO and DeiT code bases
# https://github.com/microsoft/unilm/tree/master/beit
# https://github.com/rwightman/pytorch-image-models/tree/master/timm
# https://github.com/facebookresearch/deit
# https://github.com/facebookresearch/dino
# --------------------------------------------------------'
import argparse
import datetime
import json
import os
import time
from collections import OrderedDict
from functools import partial
from pathlib import Path
import numpy as np
import torch
import torch.backends.cudnn as cudnn
from timm.models import create_model
import modeling_pretrain
import utils
from datasets import build_pretraining_dataset
from engine_for_pretraining import train_one_epoch
from optim_factory import create_optimizer
from utils import NativeScalerWithGradNormCount as NativeScaler
from utils import multiple_pretrain_samples_collate
def get_args():
parser = argparse.ArgumentParser('MAE pre-training script', add_help=False)
parser.add_argument('--batch_size', default=64, type=int)
parser.add_argument('--epochs', default=300, type=int)
parser.add_argument('--save_ckpt_freq', default=50, type=int)
# Model parameters
parser.add_argument(
'--model',
default='pretrain_mae_base_patch16_224',
type=str,
metavar='MODEL',
help='Name of model to train')
parser.add_argument('--with_checkpoint', action='store_true', default=False)
parser.add_argument(
'--decoder_depth', default=4, type=int, help='depth of decoder')
parser.add_argument(
'--mask_type',
default='random',
choices=['random', 't_consist', 't_progressive', 't_center_prog'],
type=str,
help='masked strategy of visual tokens/patches')
parser.add_argument(
'--mask_ratio',
default=0.75,
type=float,
help='ratio of the visual tokens/patches need be masked')
parser.add_argument(
'--decode_ratio',
default=1.0,
type=float,
help='ratio of the visual tokens/patches need be masked')
parser.add_argument(
'--input_size',
default=224,
type=int,
help='images input size for backbone')
parser.add_argument(
'--drop_path',
type=float,
default=0.0,
metavar='PCT',
help='Drop path rate (default: 0.1)')
parser.add_argument(
'--normlize_target',
default=True,
type=bool,
help='normalized the target patch pixels')
# Optimizer parameters
parser.add_argument(
'--opt',
default='adamw',
type=str,
metavar='OPTIMIZER',
help='Optimizer (default: "adamw"')
parser.add_argument(
'--opt_eps',
default=1e-8,
type=float,
metavar='EPSILON',
help='Optimizer Epsilon (default: 1e-8)')
parser.add_argument(
'--opt_betas',
default=None,
type=float,
nargs='+',
metavar='BETA',
help='Optimizer Betas (default: None, use opt default)')
parser.add_argument(
'--clip_grad',
type=float,
default=None,
metavar='NORM',
help='Clip gradient norm (default: None, no clipping)')
parser.add_argument(
'--momentum',
type=float,
default=0.9,
metavar='M',
help='SGD momentum (default: 0.9)')
parser.add_argument(
'--weight_decay',
type=float,
default=0.05,
help='weight decay (default: 0.05)')
parser.add_argument(
'--weight_decay_end',
type=float,
default=None,
help="""Final value of the
weight decay. We use a cosine schedule for WD.
(Set the same value with args.weight_decay to keep weight decay no change)"""
)
parser.add_argument(
'--lr',
type=float,
default=1.5e-4,
metavar='LR',
help='learning rate (default: 1.5e-4)')
parser.add_argument(
'--warmup_lr',
type=float,
default=1e-6,
metavar='LR',
help='warmup learning rate (default: 1e-6)')
parser.add_argument(
'--min_lr',
type=float,
default=1e-5,
metavar='LR',
help='lower lr bound for cyclic schedulers that hit 0 (1e-5)')
parser.add_argument(
'--warmup_epochs',
type=int,
default=40,
metavar='N',
help='epochs to warmup LR, if scheduler supports')
parser.add_argument(
'--warmup_steps',
type=int,
default=-1,
metavar='N',
help='epochs to warmup LR, if scheduler supports')
# Augmentation parameters
parser.add_argument(
'--color_jitter',
type=float,
default=0.0,
metavar='PCT',
help='Color jitter factor (default: 0.4)')
parser.add_argument(
'--train_interpolation',
type=str,
default='bicubic',
help=
'Training interpolation (random, bilinear, bicubic default: "bicubic")')
# * Finetuning params
parser.add_argument(
'--finetune', default='', help='finetune from checkpoint')
# Dataset parameters
parser.add_argument(
'--data_path',
default='/datasets01/imagenet_full_size/061417/train',
type=str,
help='dataset path')
parser.add_argument(
'--data_root', default='', type=str, help='dataset path root')
parser.add_argument(
'--imagenet_default_mean_and_std', default=True, action='store_true')
parser.add_argument('--num_frames', type=int, default=16)
parser.add_argument('--sampling_rate', type=int, default=4)
parser.add_argument('--num_sample', type=int, default=1)
parser.add_argument(
'--output_dir',
default='',
help='path where to save, empty for no saving')
parser.add_argument(
'--log_dir', default=None, help='path where to tensorboard log')
parser.add_argument(
'--device', default='cuda', help='device to use for training / testing')
parser.add_argument('--seed', default=0, type=int)
parser.add_argument('--resume', default='', help='resume from checkpoint')
parser.add_argument('--auto_resume', action='store_true')
parser.add_argument(
'--no_auto_resume', action='store_false', dest='auto_resume')
parser.set_defaults(auto_resume=True)
parser.add_argument(
'--start_epoch', default=0, type=int, metavar='N', help='start epoch')
parser.add_argument('--num_workers', default=10, type=int)
parser.add_argument(
'--pin_mem',
action='store_true',
help=
'Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.'
)
parser.add_argument(
'--no_pin_mem', action='store_false', dest='pin_mem', help='')
parser.set_defaults(pin_mem=True)
# distributed training parameters
parser.add_argument(
'--world_size',
default=1,
type=int,
help='number of distributed processes')
parser.add_argument('--local_rank', default=-1, type=int)
parser.add_argument('--dist_on_itp', action='store_true')
parser.add_argument(
'--dist_url',
default='env://',
help='url used to set up distributed training')
return parser.parse_args()
def get_model(args):
print(f"Creating model: {args.model}")
model = create_model(
args.model,
pretrained=False,
drop_path_rate=args.drop_path,
drop_block_rate=None,
decoder_depth=args.decoder_depth,
with_cp=args.with_checkpoint)
return model
def main(args):
utils.init_distributed_mode(args)
print(args)
device = torch.device(args.device)
# fix the seed for reproducibility
seed = args.seed + utils.get_rank()
torch.manual_seed(seed)
np.random.seed(seed)
# random.seed(seed)
cudnn.benchmark = True
model = get_model(args)
patch_size = model.encoder.patch_embed.patch_size
print("Patch size = %s" % str(patch_size))
args.window_size = (args.num_frames // 2, args.input_size // patch_size[0],
args.input_size // patch_size[1])
args.patch_size = patch_size
# get dataset
dataset_train = build_pretraining_dataset(args)
if True: # args.distributed:
num_tasks = utils.get_world_size()
global_rank = utils.get_rank()
sampler_rank = global_rank
num_training_steps_per_epoch = len(
dataset_train) // args.batch_size // num_tasks
sampler_train = torch.utils.data.DistributedSampler(
dataset_train,
num_replicas=num_tasks,
rank=sampler_rank,
shuffle=True)
print("Sampler_train = %s" % str(sampler_train))
else:
sampler_train = torch.utils.data.RandomSampler(dataset_train)
if global_rank == 0 and args.log_dir is not None:
os.makedirs(args.log_dir, exist_ok=True)
log_writer = utils.TensorboardLogger(log_dir=args.log_dir)
else:
log_writer = None
if args.num_sample > 1:
collate_func = partial(multiple_pretrain_samples_collate, fold=False)
else:
collate_func = None
data_loader_train = torch.utils.data.DataLoader(
dataset_train,
sampler=sampler_train,
batch_size=args.batch_size,
num_workers=args.num_workers,
pin_memory=args.pin_mem,
drop_last=True,
collate_fn=collate_func,
worker_init_fn=utils.seed_worker,
persistent_workers=True)
if args.finetune:
checkpoint = torch.load(args.finetune, map_location='cpu')
print("Load ckpt from %s" % args.finetune)
checkpoint_model = None
for model_key in ['model', 'module']:
if model_key in checkpoint:
checkpoint_model = checkpoint[model_key]
print("Load state_dict by model_key = %s" % model_key)
break
if checkpoint_model is None:
checkpoint_model = checkpoint
utils.load_state_dict(model, checkpoint_model)
model.to(device)
model_without_ddp = model
n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad)
print("Model = %s" % str(model_without_ddp))
print('number of params: {} M'.format(n_parameters / 1e6))
total_batch_size = args.batch_size * utils.get_world_size()
args.lr = args.lr * total_batch_size / 256
#########scale the lr#############
args.min_lr = args.min_lr * total_batch_size / 256
args.warmup_lr = args.warmup_lr * total_batch_size / 256
#########scale the lr#############
print("LR = %.8f" % args.lr)
print("Batch size = %d" % total_batch_size)
print("Number of training steps = %d" % num_training_steps_per_epoch)
print("Number of training examples per epoch = %d" %
(total_batch_size * num_training_steps_per_epoch))
if args.distributed:
model = torch.nn.parallel.DistributedDataParallel(
model, device_ids=[args.gpu], find_unused_parameters=False)
model_without_ddp = model.module
optimizer = create_optimizer(args, model_without_ddp)
loss_scaler = NativeScaler()
print("Use step level LR & WD scheduler!")
lr_schedule_values = utils.cosine_scheduler(
args.lr,
args.min_lr,
args.epochs,
num_training_steps_per_epoch,
warmup_epochs=args.warmup_epochs,
warmup_steps=args.warmup_steps,
)
if args.weight_decay_end is None:
args.weight_decay_end = args.weight_decay
wd_schedule_values = utils.cosine_scheduler(args.weight_decay,
args.weight_decay_end,
args.epochs,
num_training_steps_per_epoch)
print("Max WD = %.7f, Min WD = %.7f" %
(max(wd_schedule_values), min(wd_schedule_values)))
utils.auto_load_model(
args=args,
model=model,
model_without_ddp=model_without_ddp,
optimizer=optimizer,
loss_scaler=loss_scaler)
torch.cuda.empty_cache()
print(f"Start training for {args.epochs} epochs")
start_time = time.time()
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
data_loader_train.sampler.set_epoch(epoch)
if log_writer is not None:
log_writer.set_step(epoch * num_training_steps_per_epoch)
train_stats = train_one_epoch(
model,
data_loader_train,
optimizer,
device,
epoch,
loss_scaler,
args.clip_grad,
log_writer=log_writer,
start_steps=epoch * num_training_steps_per_epoch,
lr_schedule_values=lr_schedule_values,
wd_schedule_values=wd_schedule_values,
patch_size=patch_size[0],
normlize_target=args.normlize_target)
if args.output_dir:
if (epoch +
1) % args.save_ckpt_freq == 0 or epoch + 1 == args.epochs:
utils.save_model(
args=args,
model=model,
model_without_ddp=model_without_ddp,
optimizer=optimizer,
loss_scaler=loss_scaler,
epoch=epoch)
log_stats = {
**{f'train_{k}': v
for k, v in train_stats.items()}, 'epoch': epoch,
'n_parameters': n_parameters
}
if args.output_dir and utils.is_main_process():
if log_writer is not None:
log_writer.flush()
with open(
os.path.join(args.output_dir, "log.txt"),
mode="a",
encoding="utf-8") as f:
f.write(json.dumps(log_stats) + "\n")
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('Training time {}'.format(total_time_str))
if __name__ == '__main__':
opts = get_args()
if opts.output_dir:
Path(opts.output_dir).mkdir(parents=True, exist_ok=True)
main(opts)
| InternVideo-main | Pretrain/VideoMAE/run_mae_pretraining.py |
import numbers
import cv2
import numpy as np
import PIL
import torch
def _is_tensor_clip(clip):
return torch.is_tensor(clip) and clip.ndimension() == 4
def crop_clip(clip, min_h, min_w, h, w):
if isinstance(clip[0], np.ndarray):
cropped = [img[min_h:min_h + h, min_w:min_w + w, :] for img in clip]
elif isinstance(clip[0], PIL.Image.Image):
cropped = [
img.crop((min_w, min_h, min_w + w, min_h + h)) for img in clip
]
else:
raise TypeError('Expected numpy.ndarray or PIL.Image' +
'but got list of {0}'.format(type(clip[0])))
return cropped
def resize_clip(clip, size, interpolation='bilinear'):
if isinstance(clip[0], np.ndarray):
if isinstance(size, numbers.Number):
im_h, im_w, im_c = clip[0].shape
# Min spatial dim already matches minimal size
if (im_w <= im_h and im_w == size) or (im_h <= im_w
and im_h == size):
return clip
new_h, new_w = get_resize_sizes(im_h, im_w, size)
size = (new_w, new_h)
else:
size = size[0], size[1]
if interpolation == 'bilinear':
np_inter = cv2.INTER_LINEAR
else:
np_inter = cv2.INTER_NEAREST
scaled = [
cv2.resize(img, size, interpolation=np_inter) for img in clip
]
elif isinstance(clip[0], PIL.Image.Image):
if isinstance(size, numbers.Number):
im_w, im_h = clip[0].size
# Min spatial dim already matches minimal size
if (im_w <= im_h and im_w == size) or (im_h <= im_w
and im_h == size):
return clip
new_h, new_w = get_resize_sizes(im_h, im_w, size)
size = (new_w, new_h)
else:
size = size[1], size[0]
if interpolation == 'bilinear':
pil_inter = PIL.Image.BILINEAR
else:
pil_inter = PIL.Image.NEAREST
scaled = [img.resize(size, pil_inter) for img in clip]
else:
raise TypeError('Expected numpy.ndarray or PIL.Image' +
'but got list of {0}'.format(type(clip[0])))
return scaled
def get_resize_sizes(im_h, im_w, size):
if im_w < im_h:
ow = size
oh = int(size * im_h / im_w)
else:
oh = size
ow = int(size * im_w / im_h)
return oh, ow
def normalize(clip, mean, std, inplace=False):
if not _is_tensor_clip(clip):
raise TypeError('tensor is not a torch clip.')
if not inplace:
clip = clip.clone()
dtype = clip.dtype
mean = torch.as_tensor(mean, dtype=dtype, device=clip.device)
std = torch.as_tensor(std, dtype=dtype, device=clip.device)
clip.sub_(mean[:, None, None, None]).div_(std[:, None, None, None])
return clip
| InternVideo-main | Pretrain/VideoMAE/functional.py |
# --------------------------------------------------------
# --------------------------------------------------------
# Based on BEiT, timm, DINO and DeiT code bases
# https://github.com/microsoft/unilm/tree/master/beit
# https://github.com/rwightman/pytorch-image-models/tree/master/timm
# https://github.com/facebookresearch/deit
# https://github.com/facebookresearch/dino
# --------------------------------------------------------'
import datetime
import io
import json
import math
import os
import random
import subprocess
import time
from collections import defaultdict, deque
from pathlib import Path
import numpy as np
import torch
import torch.distributed as dist
from tensorboardX import SummaryWriter
from timm.utils import get_state_dict
from torch._six import inf
from torch.utils.data._utils.collate import default_collate
class SmoothedValue(object):
"""Track a series of values and provide access to smoothed values over a
window or the global series average.
"""
def __init__(self, window_size=20, fmt=None):
if fmt is None:
fmt = "{median:.4f} ({global_avg:.4f})"
self.deque = deque(maxlen=window_size)
self.total = 0.0
self.count = 0
self.fmt = fmt
def update(self, value, n=1):
self.deque.append(value)
self.count += n
self.total += value * n
def synchronize_between_processes(self):
"""
Warning: does not synchronize the deque!
"""
if not is_dist_avail_and_initialized():
return
t = torch.tensor([self.count, self.total],
dtype=torch.float64,
device='cuda')
dist.barrier()
dist.all_reduce(t)
t = t.tolist()
self.count = int(t[0])
self.total = t[1]
@property
def median(self):
d = torch.tensor(list(self.deque))
return d.median().item()
@property
def avg(self):
d = torch.tensor(list(self.deque), dtype=torch.float32)
return d.mean().item()
@property
def global_avg(self):
return self.total / self.count
@property
def max(self):
return max(self.deque)
@property
def value(self):
return self.deque[-1]
def __str__(self):
return self.fmt.format(median=self.median,
avg=self.avg,
global_avg=self.global_avg,
max=self.max,
value=self.value)
class MetricLogger(object):
def __init__(self, delimiter="\t"):
self.meters = defaultdict(SmoothedValue)
self.delimiter = delimiter
def update(self, **kwargs):
for k, v in kwargs.items():
if v is None:
continue
if isinstance(v, torch.Tensor):
v = v.item()
assert isinstance(v, (float, int))
self.meters[k].update(v)
def __getattr__(self, attr):
if attr in self.meters:
return self.meters[attr]
if attr in self.__dict__:
return self.__dict__[attr]
raise AttributeError("'{}' object has no attribute '{}'".format(
type(self).__name__, attr))
def __str__(self):
loss_str = []
for name, meter in self.meters.items():
loss_str.append("{}: {}".format(name, str(meter)))
return self.delimiter.join(loss_str)
def synchronize_between_processes(self):
for meter in self.meters.values():
meter.synchronize_between_processes()
def add_meter(self, name, meter):
self.meters[name] = meter
def log_every(self, iterable, print_freq, header=None):
i = 0
if not header:
header = ''
start_time = time.time()
end = time.time()
iter_time = SmoothedValue(fmt='{avg:.4f} (max: {max:.4f})')
data_time = SmoothedValue(fmt='{avg:.4f} (max: {max:.4f})')
space_fmt = ':' + str(len(str(len(iterable)))) + 'd'
log_msg = [
header, '[{0' + space_fmt + '}/{1}]', 'eta: {eta}', '{meters}',
'time: {time}', 'data: {data}'
]
if torch.cuda.is_available():
log_msg.append('max mem: {memory:.0f}')
log_msg = self.delimiter.join(log_msg)
MB = 1024.0 * 1024.0
for obj in iterable:
data_time.update(time.time() - end)
yield obj
iter_time.update(time.time() - end)
if i % print_freq == 0 or i == len(iterable) - 1:
eta_seconds = iter_time.global_avg * (len(iterable) - i)
eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
if torch.cuda.is_available():
print(
log_msg.format(
i,
len(iterable),
eta=eta_string,
meters=str(self),
time=str(iter_time),
data=str(data_time),
memory=torch.cuda.max_memory_allocated() / MB))
else:
print(
log_msg.format(i,
len(iterable),
eta=eta_string,
meters=str(self),
time=str(iter_time),
data=str(data_time)))
i += 1
end = time.time()
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('{} Total time: {} ({:.4f} s / it)'.format(
header, total_time_str, total_time / len(iterable)))
class TensorboardLogger(object):
def __init__(self, log_dir):
self.writer = SummaryWriter(logdir=log_dir)
self.step = 0
def set_step(self, step=None):
if step is not None:
self.step = step
else:
self.step += 1
def update(self, head='scalar', step=None, **kwargs):
for k, v in kwargs.items():
if v is None:
continue
if isinstance(v, torch.Tensor):
v = v.item()
assert isinstance(v, (float, int))
self.writer.add_scalar(head + "/" + k, v,
self.step if step is None else step)
def flush(self):
self.writer.flush()
def seed_worker(worker_id):
worker_seed = torch.initial_seed() % 2**32
np.random.seed(worker_seed)
random.seed(worker_seed)
def _load_checkpoint_for_ema(model_ema, checkpoint):
"""
Workaround for ModelEma._load_checkpoint to accept an already-loaded object
"""
mem_file = io.BytesIO()
torch.save(checkpoint, mem_file)
mem_file.seek(0)
model_ema._load_checkpoint(mem_file)
def setup_for_distributed(is_master):
"""
This function disables printing when not in master process
"""
import builtins as __builtin__
builtin_print = __builtin__.print
def print(*args, **kwargs):
force = kwargs.pop('force', False)
if is_master or force:
builtin_print(*args, **kwargs)
__builtin__.print = print
def is_dist_avail_and_initialized():
if not dist.is_available():
return False
if not dist.is_initialized():
return False
return True
def get_world_size():
if not is_dist_avail_and_initialized():
return 1
return dist.get_world_size()
def get_rank():
if not is_dist_avail_and_initialized():
return 0
return dist.get_rank()
def is_main_process():
return get_rank() == 0
def save_on_master(*args, **kwargs):
if is_main_process():
torch.save(*args, **kwargs)
def init_distributed_mode(args):
if args.dist_on_itp:
args.rank = int(os.environ['OMPI_COMM_WORLD_RANK'])
args.world_size = int(os.environ['OMPI_COMM_WORLD_SIZE'])
args.gpu = int(os.environ['OMPI_COMM_WORLD_LOCAL_RANK'])
args.dist_url = "tcp://%s:%s" % (os.environ['MASTER_ADDR'],
os.environ['MASTER_PORT'])
os.environ['LOCAL_RANK'] = str(args.gpu)
os.environ['RANK'] = str(args.rank)
os.environ['WORLD_SIZE'] = str(args.world_size)
# ["RANK", "WORLD_SIZE", "MASTER_ADDR", "MASTER_PORT", "LOCAL_RANK"]
elif 'SLURM_PROCID' in os.environ:
args.rank = int(os.environ['SLURM_PROCID'])
args.gpu = int(os.environ['SLURM_LOCALID'])
args.world_size = int(os.environ['SLURM_NTASKS'])
os.environ['RANK'] = str(args.rank)
os.environ['LOCAL_RANK'] = str(args.gpu)
os.environ['WORLD_SIZE'] = str(args.world_size)
node_list = os.environ['SLURM_NODELIST']
addr = subprocess.getoutput(
f'scontrol show hostname {node_list} | head -n1')
if 'MASTER_ADDR' not in os.environ:
os.environ['MASTER_ADDR'] = addr
elif 'RANK' in os.environ and 'WORLD_SIZE' in os.environ:
args.rank = int(os.environ["RANK"])
args.world_size = int(os.environ['WORLD_SIZE'])
args.gpu = int(os.environ['LOCAL_RANK'])
else:
print('Not using distributed mode')
args.distributed = False
return
args.distributed = True
torch.cuda.set_device(args.gpu)
args.dist_backend = 'nccl'
print('| distributed init (rank {}): {}, gpu {}'.format(
args.rank, args.dist_url, args.gpu),
flush=True)
torch.distributed.init_process_group(backend=args.dist_backend,
init_method=args.dist_url,
world_size=args.world_size,
rank=args.rank)
torch.cuda.empty_cache()
torch.distributed.barrier()
assert torch.distributed.is_initialized()
setup_for_distributed(args.rank == 0)
def load_state_dict(model,
state_dict,
prefix='',
ignore_missing="relative_position_index"):
missing_keys = []
unexpected_keys = []
error_msgs = []
# copy state_dict so _load_from_state_dict can modify it
metadata = getattr(state_dict, '_metadata', None)
state_dict = state_dict.copy()
if metadata is not None:
state_dict._metadata = metadata
def load(module, prefix=''):
local_metadata = {} if metadata is None else metadata.get(
prefix[:-1], {})
module._load_from_state_dict(state_dict, prefix, local_metadata, True,
missing_keys, unexpected_keys, error_msgs)
for name, child in module._modules.items():
if child is not None:
load(child, prefix + name + '.')
load(model, prefix=prefix)
warn_missing_keys = []
ignore_missing_keys = []
for key in missing_keys:
keep_flag = True
for ignore_key in ignore_missing.split('|'):
if ignore_key in key:
keep_flag = False
break
if keep_flag:
warn_missing_keys.append(key)
else:
ignore_missing_keys.append(key)
missing_keys = warn_missing_keys
if len(missing_keys) > 0:
print("Weights of {} not initialized from pretrained model: {}".format(
model.__class__.__name__, missing_keys))
if len(unexpected_keys) > 0:
print("Weights from pretrained model not used in {}: {}".format(
model.__class__.__name__, unexpected_keys))
if len(ignore_missing_keys) > 0:
print(
"Ignored weights of {} not initialized from pretrained model: {}".
format(model.__class__.__name__, ignore_missing_keys))
if len(error_msgs) > 0:
print('\n'.join(error_msgs))
class NativeScalerWithGradNormCount:
state_dict_key = "amp_scaler"
def __init__(self):
self._scaler = torch.cuda.amp.GradScaler()
def __call__(self,
loss,
optimizer,
clip_grad=None,
parameters=None,
create_graph=False,
update_grad=True):
self._scaler.scale(loss).backward(create_graph=create_graph)
if update_grad:
if clip_grad is not None:
assert parameters is not None
self._scaler.unscale_(
optimizer
) # unscale the gradients of optimizer's assigned params in-place
norm = torch.nn.utils.clip_grad_norm_(parameters, clip_grad)
else:
self._scaler.unscale_(optimizer)
norm = get_grad_norm_(parameters)
self._scaler.step(optimizer)
self._scaler.update()
else:
norm = None
return norm
def state_dict(self):
return self._scaler.state_dict()
def load_state_dict(self, state_dict):
self._scaler.load_state_dict(state_dict)
def get_grad_norm_(parameters, norm_type: float = 2.0) -> torch.Tensor:
if isinstance(parameters, torch.Tensor):
parameters = [parameters]
parameters = [p for p in parameters if p.grad is not None]
norm_type = float(norm_type)
if len(parameters) == 0:
return torch.tensor(0.)
device = parameters[0].grad.device
if norm_type == inf:
total_norm = max(p.grad.detach().abs().max().to(device)
for p in parameters)
else:
total_norm = torch.norm(
torch.stack([
torch.norm(p.grad.detach(), norm_type).to(device)
for p in parameters
]), norm_type)
return total_norm
def cosine_scheduler(base_value,
final_value,
epochs,
niter_per_ep,
warmup_epochs=0,
start_warmup_value=0,
warmup_steps=-1):
warmup_schedule = np.array([])
warmup_iters = warmup_epochs * niter_per_ep
if warmup_steps > 0:
warmup_iters = warmup_steps
print("Set warmup steps = %d" % warmup_iters)
if warmup_epochs > 0:
warmup_schedule = np.linspace(start_warmup_value, base_value,
warmup_iters)
iters = np.arange(epochs * niter_per_ep - warmup_iters)
schedule = np.array([
final_value + 0.5 * (base_value - final_value) *
(1 + math.cos(math.pi * i / (len(iters)))) for i in iters
])
schedule = np.concatenate((warmup_schedule, schedule))
assert len(schedule) == epochs * niter_per_ep
return schedule
def save_model(args,
epoch,
model,
model_without_ddp,
optimizer,
loss_scaler,
model_ema=None):
output_dir = Path(args.output_dir)
epoch_name = str(epoch)
if loss_scaler is not None:
checkpoint_paths = [output_dir / ('checkpoint-%s.pth' % epoch_name)]
for checkpoint_path in checkpoint_paths:
to_save = {
'model': model_without_ddp.state_dict(),
'optimizer': optimizer.state_dict(),
'epoch': epoch,
'scaler': loss_scaler.state_dict(),
'args': args,
}
if model_ema is not None:
to_save['model_ema'] = get_state_dict(model_ema)
save_on_master(to_save, checkpoint_path)
else:
client_state = {'epoch': epoch}
if model_ema is not None:
client_state['model_ema'] = get_state_dict(model_ema)
model.save_checkpoint(save_dir=args.output_dir,
tag="checkpoint-%s" % epoch_name,
client_state=client_state)
def auto_load_model(args,
model,
model_without_ddp,
optimizer,
loss_scaler,
model_ema=None):
output_dir = Path(args.output_dir)
if loss_scaler is not None:
# torch.amp
if args.auto_resume and len(args.resume) == 0:
import glob
all_checkpoints = glob.glob(
os.path.join(output_dir, 'checkpoint-*.pth'))
latest_ckpt = -1
for ckpt in all_checkpoints:
t = ckpt.split('-')[-1].split('.')[0]
if t.isdigit():
latest_ckpt = max(int(t), latest_ckpt)
if latest_ckpt >= 0:
args.resume = os.path.join(output_dir,
'checkpoint-%d.pth' % latest_ckpt)
print("Auto resume checkpoint: %s" % args.resume)
if args.resume:
if args.resume.startswith('https'):
checkpoint = torch.hub.load_state_dict_from_url(
args.resume, map_location='cpu', check_hash=True)
else:
checkpoint = torch.load(args.resume, map_location='cpu')
model_without_ddp.load_state_dict(checkpoint['model'])
print("Resume checkpoint %s" % args.resume)
if 'optimizer' in checkpoint and 'epoch' in checkpoint:
optimizer.load_state_dict(checkpoint['optimizer'])
args.start_epoch = checkpoint['epoch'] + 1
if hasattr(args, 'model_ema') and args.model_ema:
_load_checkpoint_for_ema(model_ema,
checkpoint['model_ema'])
if 'scaler' in checkpoint:
loss_scaler.load_state_dict(checkpoint['scaler'])
print("With optim & sched!")
else:
# deepspeed, only support '--auto_resume'.
if args.auto_resume:
import glob
all_checkpoints = glob.glob(
os.path.join(output_dir, 'checkpoint-*'))
latest_ckpt = -1
for ckpt in all_checkpoints:
t = ckpt.split('-')[-1].split('.')[0]
if t.isdigit():
latest_ckpt = max(int(t), latest_ckpt)
if latest_ckpt >= 0:
args.resume = os.path.join(output_dir,
'checkpoint-%d' % latest_ckpt)
print("Auto resume checkpoint: %d" % latest_ckpt)
_, client_states = model.load_checkpoint(args.output_dir,
tag='checkpoint-%d' %
latest_ckpt)
if 'epoch' in client_states:
args.start_epoch = client_states['epoch'] + 1
if model_ema is not None:
if args.model_ema:
_load_checkpoint_for_ema(model_ema,
client_states['model_ema'])
def create_ds_config(args):
args.deepspeed_config = os.path.join(args.output_dir,
"deepspeed_config.json")
with open(args.deepspeed_config, mode="w") as writer:
ds_config = {
"train_batch_size":
args.batch_size * args.update_freq * get_world_size(),
"train_micro_batch_size_per_gpu":
args.batch_size,
"steps_per_print":
1000,
"gradient_clipping":
0.0 if args.clip_grad is None else args.clip_grad,
"optimizer": {
"type": "Adam",
"adam_w_mode": True,
"params": {
"lr": args.lr,
"weight_decay": args.weight_decay,
"bias_correction": True,
"betas": [0.9, 0.999],
"eps": 1e-8
}
},
"fp16": {
"enabled": True,
"loss_scale": 0,
"initial_scale_power": 7,
"loss_scale_window": 128
}
}
writer.write(json.dumps(ds_config, indent=2))
def multiple_samples_collate(batch, fold=False):
"""
Collate function for repeated augmentation. Each instance in the batch has
more than one sample.
Args:
batch (tuple or list): data batch to collate.
Returns:
(tuple): collated data batch.
"""
inputs, labels, video_idx, extra_data = zip(*batch)
inputs = [item for sublist in inputs for item in sublist]
labels = [item for sublist in labels for item in sublist]
video_idx = [item for sublist in video_idx for item in sublist]
inputs, labels, video_idx, extra_data = (
default_collate(inputs),
default_collate(labels),
default_collate(video_idx),
default_collate(extra_data),
)
if fold:
return [inputs], labels, video_idx, extra_data
else:
return inputs, labels, video_idx, extra_data
def multiple_pretrain_samples_collate(batch, fold=False):
"""
Collate function for repeated augmentation. Each instance in the batch has
more than one sample.
Args:
batch (tuple or list): data batch to collate.
Returns:
(tuple): collated data batch.
"""
process_data, mask = zip(*batch)
process_data = [item for sublist in process_data for item in sublist]
mask = [item for sublist in mask for item in sublist]
process_data, mask = (
default_collate(process_data),
default_collate(mask),
)
if fold:
return [process_data], mask
else:
return process_data, mask
| InternVideo-main | Pretrain/VideoMAE/utils.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import math
from functools import partial, reduce
from operator import mul
import torch
import torch.nn as nn
from timm.models.layers.helpers import to_2tuple
from modeling_finetune import PatchEmbed, VisionTransformer, _cfg
__all__ = [
'vit_small',
'vit_base',
'vit_conv_small',
'vit_conv_base',
]
class VisionTransformerMoCo(VisionTransformer):
def __init__(self, stop_grad_conv1=False, **kwargs):
super().__init__(**kwargs)
# Use fixed 2D sin-cos position embedding
# self.build_2d_sincos_position_embedding()
# weight initialization
for name, m in self.named_modules():
if isinstance(m, nn.Linear):
if 'qkv' in name:
# treat the weights of Q, K, V separately
val = math.sqrt(6. / float(m.weight.shape[0] // 3 + m.weight.shape[1]))
nn.init.uniform_(m.weight, -val, val)
else:
nn.init.xavier_uniform_(m.weight)
if m.bias is not None:
nn.init.zeros_(m.bias)
#######################################
# nn.init.normal_(self.cls_token, std=1e-6)
#######################################
if isinstance(self.patch_embed, PatchEmbed):
# xavier_uniform initialization
val = math.sqrt(6. / float(3 * reduce(mul, self.patch_embed.patch_size, 1) + self.embed_dim))
nn.init.uniform_(self.patch_embed.proj.weight, -val, val)
nn.init.zeros_(self.patch_embed.proj.bias)
if stop_grad_conv1:
self.patch_embed.proj.weight.requires_grad = False
self.patch_embed.proj.bias.requires_grad = False
# def build_2d_sincos_position_embedding(self, temperature=10000.):
# h, w = self.patch_embed.grid_size
# grid_w = torch.arange(w, dtype=torch.float32)
# grid_h = torch.arange(h, dtype=torch.float32)
# grid_w, grid_h = torch.meshgrid(grid_w, grid_h)
# assert self.embed_dim % 4 == 0, 'Embed dimension must be divisible by 4 for 2D sin-cos position embedding'
# pos_dim = self.embed_dim // 4
# omega = torch.arange(pos_dim, dtype=torch.float32) / pos_dim
# omega = 1. / (temperature**omega)
# out_w = torch.einsum('m,d->md', [grid_w.flatten(), omega])
# out_h = torch.einsum('m,d->md', [grid_h.flatten(), omega])
# pos_emb = torch.cat([torch.sin(out_w), torch.cos(out_w), torch.sin(out_h), torch.cos(out_h)], dim=1)[None, :, :]
# assert self.num_tokens == 1, 'Assuming one and only one token, [cls]'
# pe_token = torch.zeros([1, 1, self.embed_dim], dtype=torch.float32)
# self.pos_embed = nn.Parameter(torch.cat([pe_token, pos_emb], dim=1))
# self.pos_embed.requires_grad = False
class ConvStem(nn.Module):
"""
ConvStem, from Early Convolutions Help Transformers See Better, Tete et al. https://arxiv.org/abs/2106.14881
"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768, norm_layer=None, flatten=True):
super().__init__()
assert patch_size == 16, 'ConvStem only supports patch size of 16'
assert embed_dim % 8 == 0, 'Embed dimension must be divisible by 8 for ConvStem'
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
self.img_size = img_size
self.patch_size = patch_size
self.grid_size = (img_size[0] // patch_size[0], img_size[1] // patch_size[1])
self.num_patches = self.grid_size[0] * self.grid_size[1]
self.flatten = flatten
# build stem, similar to the design in https://arxiv.org/abs/2106.14881
stem = []
input_dim, output_dim = 3, embed_dim // 8
for l in range(4):
stem.append(nn.Conv2d(input_dim, output_dim, kernel_size=3, stride=2, padding=1, bias=False))
stem.append(nn.BatchNorm2d(output_dim))
stem.append(nn.ReLU(inplace=True))
input_dim = output_dim
output_dim *= 2
stem.append(nn.Conv2d(input_dim, embed_dim, kernel_size=1))
self.proj = nn.Sequential(*stem)
self.norm = norm_layer(embed_dim) if norm_layer else nn.Identity()
def forward(self, x):
B, C, H, W = x.shape
assert H == self.img_size[0] and W == self.img_size[1], \
f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})."
x = self.proj(x)
if self.flatten:
x = x.flatten(2).transpose(1, 2) # BCHW -> BNC
x = self.norm(x)
return x
def vit_small(**kwargs):
model = VisionTransformerMoCo(
patch_size=16, embed_dim=384, depth=12, num_heads=12, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
model.default_cfg = _cfg()
return model
def vit_base(**kwargs):
model = VisionTransformerMoCo(
patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
model.default_cfg = _cfg()
return model
def vit_conv_small(**kwargs):
# minus one ViT block
model = VisionTransformerMoCo(
patch_size=16, embed_dim=384, depth=11, num_heads=12, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), embed_layer=ConvStem, **kwargs)
model.default_cfg = _cfg()
return model
def vit_conv_base(**kwargs):
# minus one ViT block
model = VisionTransformerMoCo(
patch_size=16, embed_dim=768, depth=11, num_heads=12, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), embed_layer=ConvStem, **kwargs)
model.default_cfg = _cfg()
return model | InternVideo-main | Pretrain/VideoMAE/vits.py |
# --------------------------------------------------------
# Based on BEiT, timm, DINO and DeiT code bases
# https://github.com/microsoft/unilm/tree/master/beit
# https://github.com/rwightman/pytorch-image-models/tree/master/timm
# https://github.com/facebookresearch/deit
# https://github.com/facebookresearch/dino
# --------------------------------------------------------'
import argparse
import datetime
import json
import os
import time
from collections import OrderedDict
from pathlib import Path
import numpy as np
import torch
import torch.backends.cudnn as cudnn
from scipy import interpolate
from timm.data.mixup import Mixup
from timm.loss import LabelSmoothingCrossEntropy, SoftTargetCrossEntropy
from timm.models import create_model
from timm.utils import ModelEma
import modeling_finetune
import utils
from datasets import build_dataset
from engine_for_finetuning import (
final_test,
merge,
train_one_epoch,
validation_one_epoch,
)
from optim_factory import (
LayerDecayValueAssigner,
create_optimizer,
get_parameter_groups,
)
from utils import NativeScalerWithGradNormCount as NativeScaler
def get_args():
parser = argparse.ArgumentParser(
'MAE fine-tuning and evaluation script for image classification',
add_help=False)
parser.add_argument('--batch_size', default=64, type=int)
parser.add_argument('--epochs', default=30, type=int)
parser.add_argument('--update_freq', default=1, type=int)
parser.add_argument('--save_ckpt_freq', default=100, type=int)
# Model parameters
parser.add_argument('--model',
default='deit_base_patch16_224',
type=str,
metavar='MODEL',
help='Name of model to train')
parser.add_argument('--tubelet_size', type=int, default=2)
parser.add_argument('--input_size',
default=224,
type=int,
help='images input size')
parser.add_argument('--drop',
type=float,
default=0.0,
metavar='PCT',
help='Dropout rate (default: 0.)')
parser.add_argument('--attn_drop_rate',
type=float,
default=0.0,
metavar='PCT',
help='Attention dropout rate (default: 0.)')
parser.add_argument('--drop_path',
type=float,
default=0.1,
metavar='PCT',
help='Drop path rate (default: 0.1)')
parser.add_argument('--disable_eval_during_finetuning',
action='store_true',
default=False)
parser.add_argument('--model_ema', action='store_true', default=False)
parser.add_argument('--model_ema_decay',
type=float,
default=0.9999,
help='')
parser.add_argument('--model_ema_force_cpu',
action='store_true',
default=False,
help='')
# Optimizer parameters
parser.add_argument('--opt',
default='adamw',
type=str,
metavar='OPTIMIZER',
help='Optimizer (default: "adamw"')
parser.add_argument('--opt_eps',
default=1e-8,
type=float,
metavar='EPSILON',
help='Optimizer Epsilon (default: 1e-8)')
parser.add_argument(
'--opt_betas',
default=None,
type=float,
nargs='+',
metavar='BETA',
help='Optimizer Betas (default: None, use opt default)')
parser.add_argument('--clip_grad',
type=float,
default=None,
metavar='NORM',
help='Clip gradient norm (default: None, no clipping)')
parser.add_argument('--momentum',
type=float,
default=0.9,
metavar='M',
help='SGD momentum (default: 0.9)')
parser.add_argument('--weight_decay',
type=float,
default=0.05,
help='weight decay (default: 0.05)')
parser.add_argument('--weight_decay_end',
type=float,
default=None,
help="""Final value of the
weight decay. We use a cosine schedule for WD and using a larger decay by
the end of training improves performance for ViTs.""")
parser.add_argument('--lr',
type=float,
default=1e-3,
metavar='LR',
help='learning rate (default: 1e-3)')
parser.add_argument('--layer_decay', type=float, default=0.75)
parser.add_argument('--warmup_lr',
type=float,
default=1e-6,
metavar='LR',
help='warmup learning rate (default: 1e-6)')
parser.add_argument(
'--min_lr',
type=float,
default=1e-6,
metavar='LR',
help='lower lr bound for cyclic schedulers that hit 0 (1e-5)')
parser.add_argument('--warmup_epochs',
type=int,
default=5,
metavar='N',
help='epochs to warmup LR, if scheduler supports')
parser.add_argument(
'--warmup_steps',
type=int,
default=-1,
metavar='N',
help='num of steps to warmup LR, will overload warmup_epochs if set > 0'
)
# Augmentation parameters
parser.add_argument('--color_jitter',
type=float,
default=0.4,
metavar='PCT',
help='Color jitter factor (default: 0.4)')
# parser.add_argument('--aa', type=str, default='rand-m9-mstd0.5-inc1', metavar='NAME',
parser.add_argument(
'--aa',
type=str,
default='rand-m7-n4-mstd0.5-inc1',
metavar='NAME',
help=
'Use AutoAugment policy. "v0" or "original". " + "(default: rand-m7-n4-mstd0.5-inc1)'
),
parser.add_argument('--smoothing',
type=float,
default=0.1,
help='Label smoothing (default: 0.1)')
parser.add_argument(
'--train_interpolation',
type=str,
default='bicubic',
help=
'Training interpolation (random, bilinear, bicubic default: "bicubic")'
)
# Evaluation parameters
parser.add_argument('--crop_pct', type=float, default=None)
parser.add_argument('--short_side_size', type=int, default=224)
parser.add_argument('--test_num_segment', type=int, default=10)
parser.add_argument('--test_num_crop', type=int, default=3)
# * Random Erase params
parser.add_argument('--reprob',
type=float,
default=0.25,
metavar='PCT',
help='Random erase prob (default: 0.25)')
parser.add_argument('--remode',
type=str,
default='pixel',
help='Random erase mode (default: "pixel")')
parser.add_argument('--recount',
type=int,
default=1,
help='Random erase count (default: 1)')
parser.add_argument(
'--resplit',
action='store_true',
default=False,
help='Do not random erase first (clean) augmentation split')
# * Mixup params
parser.add_argument('--mixup',
type=float,
default=0.8,
help='mixup alpha, mixup enabled if > 0.')
parser.add_argument('--cutmix',
type=float,
default=1.0,
help='cutmix alpha, cutmix enabled if > 0.')
parser.add_argument(
'--cutmix_minmax',
type=float,
nargs='+',
default=None,
help=
'cutmix min/max ratio, overrides alpha and enables cutmix if set (default: None)'
)
parser.add_argument(
'--mixup_prob',
type=float,
default=1.0,
help=
'Probability of performing mixup or cutmix when either/both is enabled'
)
parser.add_argument(
'--mixup_switch_prob',
type=float,
default=0.5,
help=
'Probability of switching to cutmix when both mixup and cutmix enabled'
)
parser.add_argument(
'--mixup_mode',
type=str,
default='batch',
help='How to apply mixup/cutmix params. Per "batch", "pair", or "elem"'
)
# * Finetuning params
parser.add_argument('--finetune',
default='',
help='finetune from checkpoint')
parser.add_argument('--model_key', default='model|module', type=str)
parser.add_argument('--model_prefix', default='', type=str)
parser.add_argument('--init_scale', default=0.001, type=float)
parser.add_argument('--use_mean_pooling', action='store_true')
parser.set_defaults(use_mean_pooling=True)
parser.add_argument('--use_cls',
action='store_false',
dest='use_mean_pooling')
# Dataset parameters
parser.add_argument('--data_path',
default='/datasets01/imagenet_full_size/061417/',
type=str,
help='dataset path')
parser.add_argument('--eval_data_path',
default=None,
type=str,
help='dataset path for evaluation')
parser.add_argument('--nb_classes',
default=400,
type=int,
help='number of the classification types')
parser.add_argument('--imagenet_default_mean_and_std',
default=True,
action='store_true')
parser.add_argument('--num_segments', type=int, default=1)
parser.add_argument('--num_frames', type=int, default=16)
parser.add_argument('--sampling_rate', type=int, default=4)
parser.add_argument('--data_set',
default='Kinetics-400',
choices=[
'Kinetics-400', 'SSV2', 'UCF101', 'HMDB51',
'Diving48', 'image_folder'
],
type=str,
help='ImageNet dataset path')
parser.add_argument('--output_dir',
default='',
help='path where to save, empty for no saving')
parser.add_argument('--log_dir',
default=None,
help='path where to tensorboard log')
parser.add_argument('--device',
default='cuda',
help='device to use for training / testing')
parser.add_argument('--seed', default=0, type=int)
parser.add_argument('--resume', default='', help='resume from checkpoint')
parser.add_argument('--auto_resume', action='store_true')
parser.add_argument('--no_auto_resume',
action='store_false',
dest='auto_resume')
parser.set_defaults(auto_resume=True)
parser.add_argument('--save_ckpt', action='store_true')
parser.add_argument('--no_save_ckpt',
action='store_false',
dest='save_ckpt')
parser.set_defaults(save_ckpt=True)
parser.add_argument('--start_epoch',
default=0,
type=int,
metavar='N',
help='start epoch')
parser.add_argument('--eval',
action='store_true',
help='Perform evaluation only')
parser.add_argument('--dist_eval',
action='store_true',
default=False,
help='Enabling distributed evaluation')
parser.add_argument('--num_workers', default=10, type=int)
parser.add_argument(
'--pin_mem',
action='store_true',
help=
'Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.'
)
parser.add_argument('--no_pin_mem', action='store_false', dest='pin_mem')
parser.set_defaults(pin_mem=True)
# distributed training parameters
parser.add_argument('--world_size',
default=1,
type=int,
help='number of distributed processes')
parser.add_argument('--local_rank', default=-1, type=int)
parser.add_argument('--dist_on_itp', action='store_true')
parser.add_argument('--dist_url',
default='env://',
help='url used to set up distributed training')
parser.add_argument('--enable_deepspeed',
action='store_true',
default=False)
known_args, _ = parser.parse_known_args()
if known_args.enable_deepspeed:
try:
import deepspeed
from deepspeed import DeepSpeedConfig
parser = deepspeed.add_config_arguments(parser)
ds_init = deepspeed.initialize
except:
print("Please 'pip install deepspeed==0.4.0'")
exit(0)
else:
ds_init = None
return parser.parse_args(), ds_init
def main(args, ds_init):
utils.init_distributed_mode(args)
if ds_init is not None:
utils.create_ds_config(args)
print(args)
device = torch.device(args.device)
# fix the seed for reproducibility
seed = args.seed + utils.get_rank()
torch.manual_seed(seed)
np.random.seed(seed)
# random.seed(seed)
cudnn.benchmark = True
dataset_train, args.nb_classes = build_dataset(is_train=True,
test_mode=False,
args=args)
if args.disable_eval_during_finetuning:
dataset_val = None
else:
dataset_val, _ = build_dataset(is_train=False,
test_mode=False,
args=args)
dataset_test, _ = build_dataset(is_train=False, test_mode=True, args=args)
if True: # args.distributed:
num_tasks = utils.get_world_size()
global_rank = utils.get_rank()
sampler_train = torch.utils.data.DistributedSampler(
dataset_train,
num_replicas=num_tasks,
rank=global_rank,
shuffle=True)
print("Sampler_train = %s" % str(sampler_train))
if args.dist_eval:
if len(dataset_val) % num_tasks != 0:
print(
'Warning: Enabling distributed evaluation with an eval dataset not divisible by process number. '
'This will slightly alter validation results as extra duplicate entries are added to achieve '
'equal num of samples per-process.')
sampler_val = torch.utils.data.DistributedSampler(
dataset_val,
num_replicas=num_tasks,
rank=global_rank,
shuffle=False)
sampler_test = torch.utils.data.DistributedSampler(
dataset_test,
num_replicas=num_tasks,
rank=global_rank,
shuffle=False)
else:
sampler_val = torch.utils.data.SequentialSampler(dataset_val)
else:
sampler_train = torch.utils.data.RandomSampler(dataset_train)
sampler_val = torch.utils.data.SequentialSampler(dataset_val)
if global_rank == 0 and args.log_dir is not None:
os.makedirs(args.log_dir, exist_ok=True)
log_writer = utils.TensorboardLogger(log_dir=args.log_dir)
else:
log_writer = None
data_loader_train = torch.utils.data.DataLoader(
dataset_train,
sampler=sampler_train,
batch_size=args.batch_size,
num_workers=args.num_workers,
pin_memory=args.pin_mem,
drop_last=True,
persistent_workers=True)
if dataset_val is not None:
data_loader_val = torch.utils.data.DataLoader(
dataset_val,
sampler=sampler_val,
batch_size=int(1.5 * args.batch_size),
num_workers=args.num_workers,
pin_memory=args.pin_mem,
drop_last=False,
persistent_workers=True)
else:
data_loader_val = None
if dataset_test is not None:
data_loader_test = torch.utils.data.DataLoader(
dataset_test,
sampler=sampler_test,
batch_size=args.batch_size,
num_workers=args.num_workers,
pin_memory=args.pin_mem,
drop_last=False,
persistent_workers=True)
else:
data_loader_test = None
mixup_fn = None
mixup_active = args.mixup > 0 or args.cutmix > 0. or args.cutmix_minmax is not None
if mixup_active:
print("Mixup is activated!")
mixup_fn = Mixup(mixup_alpha=args.mixup,
cutmix_alpha=args.cutmix,
cutmix_minmax=args.cutmix_minmax,
prob=args.mixup_prob,
switch_prob=args.mixup_switch_prob,
mode=args.mixup_mode,
label_smoothing=args.smoothing,
num_classes=args.nb_classes)
model = create_model(
args.model,
pretrained=False,
num_classes=args.nb_classes,
all_frames=args.num_frames * args.num_segments,
tubelet_size=args.tubelet_size,
drop_rate=args.drop,
drop_path_rate=args.drop_path,
attn_drop_rate=args.attn_drop_rate,
drop_block_rate=None,
use_mean_pooling=args.use_mean_pooling,
init_scale=args.init_scale,
)
patch_size = model.patch_embed.patch_size
print("Patch size = %s" % str(patch_size))
# args.window_size = (args.input_size // patch_size[0], args.input_size // patch_size[1])
args.window_size = (args.num_frames // 2, args.input_size // patch_size[0],
args.input_size // patch_size[1])
args.patch_size = patch_size
if args.finetune:
if args.finetune.startswith('https'):
checkpoint = torch.hub.load_state_dict_from_url(args.finetune,
map_location='cpu',
check_hash=True)
else:
checkpoint = torch.load(args.finetune, map_location='cpu')
print("Load ckpt from %s" % args.finetune)
checkpoint_model = None
for model_key in args.model_key.split('|'):
if model_key in checkpoint:
checkpoint_model = checkpoint[model_key]
print("Load state_dict by model_key = %s" % model_key)
break
if checkpoint_model is None:
checkpoint_model = checkpoint
state_dict = model.state_dict()
for k in ['head.weight', 'head.bias']:
if k in checkpoint_model and checkpoint_model[
k].shape != state_dict[k].shape:
print(f"Removing key {k} from pretrained checkpoint")
del checkpoint_model[k]
all_keys = list(checkpoint_model.keys())
new_dict = OrderedDict()
for key in all_keys:
if key.startswith('backbone.'):
new_dict[key[9:]] = checkpoint_model[key]
elif key.startswith('encoder.'):
new_dict[key[8:]] = checkpoint_model[key]
else:
new_dict[key] = checkpoint_model[key]
checkpoint_model = new_dict
# interpolate position embedding
if 'pos_embed' in checkpoint_model:
pos_embed_checkpoint = checkpoint_model['pos_embed']
embedding_size = pos_embed_checkpoint.shape[-1] # channel dim
num_patches = model.patch_embed.num_patches #
num_extra_tokens = model.pos_embed.shape[-2] - num_patches # 0/1
# height (== width) for the checkpoint position embedding
orig_size = int(
((pos_embed_checkpoint.shape[-2] - num_extra_tokens) //
(args.num_frames // model.patch_embed.tubelet_size))**0.5)
# height (== width) for the new position embedding
new_size = int(
(num_patches //
(args.num_frames // model.patch_embed.tubelet_size))**0.5)
# class_token and dist_token are kept unchanged
if orig_size != new_size:
print("Position interpolate from %dx%d to %dx%d" %
(orig_size, orig_size, new_size, new_size))
extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens]
# only the position tokens are interpolated
pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:]
# B, L, C -> BT, H, W, C -> BT, C, H, W
pos_tokens = pos_tokens.reshape(
-1, args.num_frames // model.patch_embed.tubelet_size,
orig_size, orig_size, embedding_size)
pos_tokens = pos_tokens.reshape(-1, orig_size, orig_size,
embedding_size).permute(
0, 3, 1, 2)
pos_tokens = torch.nn.functional.interpolate(
pos_tokens,
size=(new_size, new_size),
mode='bicubic',
align_corners=False)
# BT, C, H, W -> BT, H, W, C -> B, T, H, W, C
pos_tokens = pos_tokens.permute(0, 2, 3, 1).reshape(
-1, args.num_frames // model.patch_embed.tubelet_size,
new_size, new_size, embedding_size)
pos_tokens = pos_tokens.flatten(1, 3) # B, L, C
new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1)
checkpoint_model['pos_embed'] = new_pos_embed
utils.load_state_dict(model,
checkpoint_model,
prefix=args.model_prefix)
# model.load_state_dict(checkpoint_model, strict=False)
model.to(device)
model_ema = None
if args.model_ema:
# Important to create EMA model after cuda(), DP wrapper, and AMP but before SyncBN and DDP wrapper
model_ema = ModelEma(model,
decay=args.model_ema_decay,
device='cpu' if args.model_ema_force_cpu else '',
resume='')
print("Using EMA with decay = %.8f" % args.model_ema_decay)
model_without_ddp = model
n_parameters = sum(p.numel() for p in model.parameters()
if p.requires_grad)
print("Model = %s" % str(model_without_ddp))
print('number of params:', n_parameters)
total_batch_size = args.batch_size * args.update_freq * utils.get_world_size(
)
num_training_steps_per_epoch = len(dataset_train) // total_batch_size
args.lr = args.lr * total_batch_size / 256
#########scale the lr#############
args.min_lr = args.min_lr * total_batch_size / 256
args.warmup_lr = args.warmup_lr * total_batch_size / 256
#########scale the lr#############
print("LR = %.8f" % args.lr)
print("Batch size = %d" % total_batch_size)
print("Update frequent = %d" % args.update_freq)
print("Number of training examples = %d" % len(dataset_train))
print("Number of training training per epoch = %d" %
num_training_steps_per_epoch)
num_layers = model_without_ddp.get_num_layers()
if args.layer_decay < 1.0:
assigner = LayerDecayValueAssigner(
list(args.layer_decay**(num_layers + 1 - i)
for i in range(num_layers + 2)))
else:
assigner = None
if assigner is not None:
print("Assigned values = %s" % str(assigner.values))
skip_weight_decay_list = model.no_weight_decay()
print("Skip weight decay list: ", skip_weight_decay_list)
if args.enable_deepspeed:
loss_scaler = None
optimizer_params = get_parameter_groups(
model, args.weight_decay, skip_weight_decay_list,
assigner.get_layer_id if assigner is not None else None,
assigner.get_scale if assigner is not None else None)
model, optimizer, _, _ = ds_init(
args=args,
model=model,
model_parameters=optimizer_params,
dist_init_required=not args.distributed,
)
print("model.gradient_accumulation_steps() = %d" %
model.gradient_accumulation_steps())
assert model.gradient_accumulation_steps() == args.update_freq
else:
if args.distributed:
model = torch.nn.parallel.DistributedDataParallel(
model, device_ids=[args.gpu], find_unused_parameters=True)
model_without_ddp = model.module
optimizer = create_optimizer(args,
model_without_ddp,
skip_list=skip_weight_decay_list,
get_num_layer=assigner.get_layer_id
if assigner is not None else None,
get_layer_scale=assigner.get_scale
if assigner is not None else None)
loss_scaler = NativeScaler()
print("Use step level LR scheduler!")
lr_schedule_values = utils.cosine_scheduler(
args.lr,
args.min_lr,
args.epochs,
num_training_steps_per_epoch,
warmup_epochs=args.warmup_epochs,
warmup_steps=args.warmup_steps,
)
if args.weight_decay_end is None:
args.weight_decay_end = args.weight_decay
wd_schedule_values = utils.cosine_scheduler(args.weight_decay,
args.weight_decay_end,
args.epochs,
num_training_steps_per_epoch)
print("Max WD = %.7f, Min WD = %.7f" %
(max(wd_schedule_values), min(wd_schedule_values)))
if mixup_fn is not None:
# smoothing is handled with mixup label transform
criterion = SoftTargetCrossEntropy()
elif args.smoothing > 0.:
criterion = LabelSmoothingCrossEntropy(smoothing=args.smoothing)
else:
criterion = torch.nn.CrossEntropyLoss()
print("criterion = %s" % str(criterion))
utils.auto_load_model(args=args,
model=model,
model_without_ddp=model_without_ddp,
optimizer=optimizer,
loss_scaler=loss_scaler,
model_ema=model_ema)
if args.eval:
preds_file = os.path.join(args.output_dir, str(global_rank) + '.txt')
test_stats = final_test(data_loader_test, model, device, preds_file)
torch.distributed.barrier()
if global_rank == 0:
print("Start merging results...")
final_top1, final_top5 = merge(args.output_dir, num_tasks)
print(
f"Accuracy of the network on the {len(dataset_test)} test videos: Top-1: {final_top1:.2f}%, Top-5: {final_top5:.2f}%"
)
log_stats = {'Final top-1': final_top1, 'Final Top-5': final_top1}
if args.output_dir and utils.is_main_process():
with open(os.path.join(args.output_dir, "log.txt"),
mode="a",
encoding="utf-8") as f:
f.write(json.dumps(log_stats) + "\n")
exit(0)
# test_stats = validation_one_epoch(data_loader_test, model, device)
# print(f"Accuracy of the network on the {len(dataset_test)} test videos: {test_stats['acc1']:.1f}%")
print(f"Start training for {args.epochs} epochs")
start_time = time.time()
max_accuracy = 0.0
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
data_loader_train.sampler.set_epoch(epoch)
if log_writer is not None:
log_writer.set_step(epoch * num_training_steps_per_epoch *
args.update_freq)
train_stats = train_one_epoch(
model,
criterion,
data_loader_train,
optimizer,
device,
epoch,
loss_scaler,
args.clip_grad,
model_ema,
mixup_fn,
log_writer=log_writer,
start_steps=epoch * num_training_steps_per_epoch,
lr_schedule_values=lr_schedule_values,
wd_schedule_values=wd_schedule_values,
num_training_steps_per_epoch=num_training_steps_per_epoch,
update_freq=args.update_freq,
)
if args.output_dir and args.save_ckpt:
if (epoch +
1) % args.save_ckpt_freq == 0 or epoch + 1 == args.epochs:
utils.save_model(args=args,
model=model,
model_without_ddp=model_without_ddp,
optimizer=optimizer,
loss_scaler=loss_scaler,
epoch=epoch,
model_ema=model_ema)
if data_loader_val is not None:
test_stats = validation_one_epoch(data_loader_val, model, device)
print(
f"Accuracy of the network on the {len(dataset_val)} val images: {test_stats['acc1']:.1f}%"
)
if max_accuracy < test_stats["acc1"]:
max_accuracy = test_stats["acc1"]
if args.output_dir and args.save_ckpt:
utils.save_model(args=args,
model=model,
model_without_ddp=model_without_ddp,
optimizer=optimizer,
loss_scaler=loss_scaler,
epoch="best",
model_ema=model_ema)
print(f'Max accuracy: {max_accuracy:.2f}%')
if log_writer is not None:
log_writer.update(val_acc1=test_stats['acc1'],
head="perf",
step=epoch)
log_writer.update(val_acc5=test_stats['acc5'],
head="perf",
step=epoch)
log_writer.update(val_loss=test_stats['loss'],
head="perf",
step=epoch)
log_stats = {
**{f'train_{k}': v
for k, v in train_stats.items()},
**{f'val_{k}': v
for k, v in test_stats.items()}, 'epoch': epoch,
'n_parameters': n_parameters
}
else:
log_stats = {
**{f'train_{k}': v
for k, v in train_stats.items()},
# **{f'test_{k}': v for k, v in test_stats.items()},
'epoch': epoch,
'n_parameters': n_parameters
}
if args.output_dir and utils.is_main_process():
if log_writer is not None:
log_writer.flush()
with open(os.path.join(args.output_dir, "log.txt"),
mode="a",
encoding="utf-8") as f:
f.write(json.dumps(log_stats) + "\n")
preds_file = os.path.join(args.output_dir, str(global_rank) + '.txt')
test_stats = final_test(data_loader_test, model, device, preds_file)
torch.distributed.barrier()
if global_rank == 0:
print("Start merging results...")
final_top1, final_top5 = merge(args.output_dir, num_tasks)
print(
f"Accuracy of the network on the {len(dataset_test)} test videos: Top-1: {final_top1:.2f}%, Top-5: {final_top5:.2f}%"
)
log_stats = {'Final top-1': final_top1, 'Final Top-5': final_top5}
if args.output_dir and utils.is_main_process():
with open(os.path.join(args.output_dir, "log.txt"),
mode="a",
encoding="utf-8") as f:
f.write(json.dumps(log_stats) + "\n")
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('Training time {}'.format(total_time_str))
if __name__ == '__main__':
opts, ds_init = get_args()
if opts.output_dir:
Path(opts.output_dir).mkdir(parents=True, exist_ok=True)
main(opts, ds_init)
| InternVideo-main | Pretrain/VideoMAE/run_class_linear.py |
import os
import numpy as np
from matplotlib import use
from scipy.special import softmax
def merge(eval_paths, num_tasks, use_softmax=False):
dict_feats = {}
dict_label = {}
print("Reading individual output files")
if not isinstance(eval_paths, list):
eval_paths = [eval_paths]
for eval_path in eval_paths:
dict_pos = {}
for x in range(num_tasks):
file = os.path.join(eval_path, str(x) + '.txt')
lines = open(file, 'r').readlines()[1:]
for line in lines:
line = line.strip()
name = line.split('[')[0]
label = line.split(']')[1].split(' ')[1]
chunk_nb = line.split(']')[1].split(' ')[2]
split_nb = line.split(']')[1].split(' ')[3]
data = np.fromstring(line.split('[')[1].split(']')[0],
dtype=np.float64,
sep=',')
if not name in dict_feats:
dict_feats[name] = []
dict_label[name] = 0
if not name in dict_pos:
dict_pos[name] = []
if chunk_nb + split_nb in dict_pos[name]:
continue
if use_softmax:
dict_feats[name].append(softmax(data))
else:
dict_feats[name].append(data)
dict_pos[name].append(chunk_nb + split_nb)
dict_label[name] = label
print("Computing final results")
input_lst = []
print(len(dict_feats))
for i, item in enumerate(dict_feats):
input_lst.append([i, item, dict_feats[item], dict_label[item]])
from multiprocessing import Pool
p = Pool(64)
ans = p.map(compute_video, input_lst)
top1 = [x[1] for x in ans]
top5 = [x[2] for x in ans]
pred = [x[0] for x in ans]
label = [x[3] for x in ans]
final_top1, final_top5 = np.mean(top1), np.mean(top5)
return final_top1 * 100, final_top5 * 100
def compute_video(lst):
i, video_id, data, label = lst
feat = [x for x in data]
feat = np.mean(feat, axis=0)
# print(feat.shape)
try:
pred = np.argmax(feat)
top1 = (int(pred) == int(label)) * 1.0
top5 = (int(label) in np.argsort(-feat)[:5]) * 1.0
except:
pred = 0
top1 = 1.0
top5 = 1.0
label = 0
return [pred, top1, top5, int(label)]
if __name__ == '__main__':
eval_path1 = '/mnt/petrelfs/huangbingkun/VideoMAE-clean/work_dir/full_sta_web_k400_finetune'
eval_path2 = '/mnt/petrelfs/huangbingkun/VideoMAE-clean/work_dir/pred_txts'
num_tasks = 32
eval_paths = [eval_path1, eval_path2]
final_top1, final_top5 = merge(eval_paths, num_tasks, use_softmax=True)
print(
f"Accuracy of the network on the test videos: Top-1: {final_top1:.2f}%, Top-5: {final_top5:.2f}%"
)
| InternVideo-main | Pretrain/VideoMAE/ensemble.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
"""
This implementation is based on
https://github.com/rwightman/pytorch-image-models/blob/master/timm/data/random_erasing.py
pulished under an Apache License 2.0.
COMMENT FROM ORIGINAL:
Originally inspired by impl at https://github.com/zhunzhong07/Random-Erasing, Apache 2.0
Copyright Zhun Zhong & Liang Zheng
Hacked together by / Copyright 2020 Ross Wightman
"""
import math
import random
import torch
def _get_pixels(
per_pixel, rand_color, patch_size, dtype=torch.float32, device="cuda"
):
# NOTE I've seen CUDA illegal memory access errors being caused by the normal_()
# paths, flip the order so normal is run on CPU if this becomes a problem
# Issue has been fixed in master https://github.com/pytorch/pytorch/issues/19508
if per_pixel:
return torch.empty(patch_size, dtype=dtype, device=device).normal_()
elif rand_color:
return torch.empty(
(patch_size[0], 1, 1), dtype=dtype, device=device
).normal_()
else:
return torch.zeros((patch_size[0], 1, 1), dtype=dtype, device=device)
class RandomErasing:
"""Randomly selects a rectangle region in an image and erases its pixels.
'Random Erasing Data Augmentation' by Zhong et al.
See https://arxiv.org/pdf/1708.04896.pdf
This variant of RandomErasing is intended to be applied to either a batch
or single image tensor after it has been normalized by dataset mean and std.
Args:
probability: Probability that the Random Erasing operation will be performed.
min_area: Minimum percentage of erased area wrt input image area.
max_area: Maximum percentage of erased area wrt input image area.
min_aspect: Minimum aspect ratio of erased area.
mode: pixel color mode, one of 'const', 'rand', or 'pixel'
'const' - erase block is constant color of 0 for all channels
'rand' - erase block is same per-channel random (normal) color
'pixel' - erase block is per-pixel random (normal) color
max_count: maximum number of erasing blocks per image, area per box is scaled by count.
per-image count is randomly chosen between 1 and this value.
"""
def __init__(
self,
probability=0.5,
min_area=0.02,
max_area=1 / 3,
min_aspect=0.3,
max_aspect=None,
mode="const",
min_count=1,
max_count=None,
num_splits=0,
device="cuda",
cube=True,
):
self.probability = probability
self.min_area = min_area
self.max_area = max_area
max_aspect = max_aspect or 1 / min_aspect
self.log_aspect_ratio = (math.log(min_aspect), math.log(max_aspect))
self.min_count = min_count
self.max_count = max_count or min_count
self.num_splits = num_splits
mode = mode.lower()
self.rand_color = False
self.per_pixel = False
self.cube = cube
if mode == "rand":
self.rand_color = True # per block random normal
elif mode == "pixel":
self.per_pixel = True # per pixel random normal
else:
assert not mode or mode == "const"
self.device = device
def _erase(self, img, chan, img_h, img_w, dtype):
if random.random() > self.probability:
return
area = img_h * img_w
count = (
self.min_count
if self.min_count == self.max_count
else random.randint(self.min_count, self.max_count)
)
for _ in range(count):
for _ in range(10):
target_area = (
random.uniform(self.min_area, self.max_area) * area / count
)
aspect_ratio = math.exp(random.uniform(*self.log_aspect_ratio))
h = int(round(math.sqrt(target_area * aspect_ratio)))
w = int(round(math.sqrt(target_area / aspect_ratio)))
if w < img_w and h < img_h:
top = random.randint(0, img_h - h)
left = random.randint(0, img_w - w)
img[:, top : top + h, left : left + w] = _get_pixels(
self.per_pixel,
self.rand_color,
(chan, h, w),
dtype=dtype,
device=self.device,
)
break
def _erase_cube(
self,
img,
batch_start,
batch_size,
chan,
img_h,
img_w,
dtype,
):
if random.random() > self.probability:
return
area = img_h * img_w
count = (
self.min_count
if self.min_count == self.max_count
else random.randint(self.min_count, self.max_count)
)
for _ in range(count):
for _ in range(100):
target_area = (
random.uniform(self.min_area, self.max_area) * area / count
)
aspect_ratio = math.exp(random.uniform(*self.log_aspect_ratio))
h = int(round(math.sqrt(target_area * aspect_ratio)))
w = int(round(math.sqrt(target_area / aspect_ratio)))
if w < img_w and h < img_h:
top = random.randint(0, img_h - h)
left = random.randint(0, img_w - w)
for i in range(batch_start, batch_size):
img_instance = img[i]
img_instance[
:, top : top + h, left : left + w
] = _get_pixels(
self.per_pixel,
self.rand_color,
(chan, h, w),
dtype=dtype,
device=self.device,
)
break
def __call__(self, input):
if len(input.size()) == 3:
self._erase(input, *input.size(), input.dtype)
else:
batch_size, chan, img_h, img_w = input.size()
# skip first slice of batch if num_splits is set (for clean portion of samples)
batch_start = (
batch_size // self.num_splits if self.num_splits > 1 else 0
)
if self.cube:
self._erase_cube(
input,
batch_start,
batch_size,
chan,
img_h,
img_w,
input.dtype,
)
else:
for i in range(batch_start, batch_size):
self._erase(input[i], chan, img_h, img_w, input.dtype)
return input
| InternVideo-main | Pretrain/VideoMAE/random_erasing.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import math
import numbers
# import cv2
import random
import numpy as np
import PIL
import torch
import torchvision
import torchvision.transforms.functional as F
from PIL import Image
from torchvision import transforms
import functional as FF
from rand_augment import rand_augment_transform
from random_erasing import RandomErasing
_pil_interpolation_to_str = {
Image.NEAREST: "PIL.Image.NEAREST",
Image.BILINEAR: "PIL.Image.BILINEAR",
Image.BICUBIC: "PIL.Image.BICUBIC",
Image.LANCZOS: "PIL.Image.LANCZOS",
Image.HAMMING: "PIL.Image.HAMMING",
Image.BOX: "PIL.Image.BOX",
}
_RANDOM_INTERPOLATION = (Image.BILINEAR, Image.BICUBIC)
def _pil_interp(method):
if method == "bicubic":
return Image.BICUBIC
elif method == "lanczos":
return Image.LANCZOS
elif method == "hamming":
return Image.HAMMING
else:
return Image.BILINEAR
def random_short_side_scale_jitter(
images, min_size, max_size, boxes=None, inverse_uniform_sampling=False
):
"""
Perform a spatial short scale jittering on the given images and
corresponding boxes.
Args:
images (tensor): images to perform scale jitter. Dimension is
`num frames` x `channel` x `height` x `width`.
min_size (int): the minimal size to scale the frames.
max_size (int): the maximal size to scale the frames.
boxes (ndarray): optional. Corresponding boxes to images.
Dimension is `num boxes` x 4.
inverse_uniform_sampling (bool): if True, sample uniformly in
[1 / max_scale, 1 / min_scale] and take a reciprocal to get the
scale. If False, take a uniform sample from [min_scale, max_scale].
Returns:
(tensor): the scaled images with dimension of
`num frames` x `channel` x `new height` x `new width`.
(ndarray or None): the scaled boxes with dimension of
`num boxes` x 4.
"""
if inverse_uniform_sampling:
size = int(
round(1.0 / np.random.uniform(1.0 / max_size, 1.0 / min_size))
)
else:
size = int(round(np.random.uniform(min_size, max_size)))
height = images.shape[2]
width = images.shape[3]
if (width <= height and width == size) or (
height <= width and height == size
):
return images, boxes
new_width = size
new_height = size
if width < height:
new_height = int(math.floor((float(height) / width) * size))
if boxes is not None:
boxes = boxes * float(new_height) / height
else:
new_width = int(math.floor((float(width) / height) * size))
if boxes is not None:
boxes = boxes * float(new_width) / width
return (
torch.nn.functional.interpolate(
images,
size=(new_height, new_width),
mode="bilinear",
align_corners=False,
),
boxes,
)
def crop_boxes(boxes, x_offset, y_offset):
"""
Peform crop on the bounding boxes given the offsets.
Args:
boxes (ndarray or None): bounding boxes to peform crop. The dimension
is `num boxes` x 4.
x_offset (int): cropping offset in the x axis.
y_offset (int): cropping offset in the y axis.
Returns:
cropped_boxes (ndarray or None): the cropped boxes with dimension of
`num boxes` x 4.
"""
cropped_boxes = boxes.copy()
cropped_boxes[:, [0, 2]] = boxes[:, [0, 2]] - x_offset
cropped_boxes[:, [1, 3]] = boxes[:, [1, 3]] - y_offset
return cropped_boxes
def random_crop(images, size, boxes=None):
"""
Perform random spatial crop on the given images and corresponding boxes.
Args:
images (tensor): images to perform random crop. The dimension is
`num frames` x `channel` x `height` x `width`.
size (int): the size of height and width to crop on the image.
boxes (ndarray or None): optional. Corresponding boxes to images.
Dimension is `num boxes` x 4.
Returns:
cropped (tensor): cropped images with dimension of
`num frames` x `channel` x `size` x `size`.
cropped_boxes (ndarray or None): the cropped boxes with dimension of
`num boxes` x 4.
"""
if images.shape[2] == size and images.shape[3] == size:
return images
height = images.shape[2]
width = images.shape[3]
y_offset = 0
if height > size:
y_offset = int(np.random.randint(0, height - size))
x_offset = 0
if width > size:
x_offset = int(np.random.randint(0, width - size))
cropped = images[
:, :, y_offset : y_offset + size, x_offset : x_offset + size
]
cropped_boxes = (
crop_boxes(boxes, x_offset, y_offset) if boxes is not None else None
)
return cropped, cropped_boxes
def horizontal_flip(prob, images, boxes=None):
"""
Perform horizontal flip on the given images and corresponding boxes.
Args:
prob (float): probility to flip the images.
images (tensor): images to perform horizontal flip, the dimension is
`num frames` x `channel` x `height` x `width`.
boxes (ndarray or None): optional. Corresponding boxes to images.
Dimension is `num boxes` x 4.
Returns:
images (tensor): images with dimension of
`num frames` x `channel` x `height` x `width`.
flipped_boxes (ndarray or None): the flipped boxes with dimension of
`num boxes` x 4.
"""
if boxes is None:
flipped_boxes = None
else:
flipped_boxes = boxes.copy()
if np.random.uniform() < prob:
images = images.flip((-1))
if len(images.shape) == 3:
width = images.shape[2]
elif len(images.shape) == 4:
width = images.shape[3]
else:
raise NotImplementedError("Dimension does not supported")
if boxes is not None:
flipped_boxes[:, [0, 2]] = width - boxes[:, [2, 0]] - 1
return images, flipped_boxes
def uniform_crop(images, size, spatial_idx, boxes=None, scale_size=None):
"""
Perform uniform spatial sampling on the images and corresponding boxes.
Args:
images (tensor): images to perform uniform crop. The dimension is
`num frames` x `channel` x `height` x `width`.
size (int): size of height and weight to crop the images.
spatial_idx (int): 0, 1, or 2 for left, center, and right crop if width
is larger than height. Or 0, 1, or 2 for top, center, and bottom
crop if height is larger than width.
boxes (ndarray or None): optional. Corresponding boxes to images.
Dimension is `num boxes` x 4.
scale_size (int): optinal. If not None, resize the images to scale_size before
performing any crop.
Returns:
cropped (tensor): images with dimension of
`num frames` x `channel` x `size` x `size`.
cropped_boxes (ndarray or None): the cropped boxes with dimension of
`num boxes` x 4.
"""
assert spatial_idx in [0, 1, 2]
ndim = len(images.shape)
if ndim == 3:
images = images.unsqueeze(0)
height = images.shape[2]
width = images.shape[3]
if scale_size is not None:
if width <= height:
width, height = scale_size, int(height / width * scale_size)
else:
width, height = int(width / height * scale_size), scale_size
images = torch.nn.functional.interpolate(
images,
size=(height, width),
mode="bilinear",
align_corners=False,
)
y_offset = int(math.ceil((height - size) / 2))
x_offset = int(math.ceil((width - size) / 2))
if height > width:
if spatial_idx == 0:
y_offset = 0
elif spatial_idx == 2:
y_offset = height - size
else:
if spatial_idx == 0:
x_offset = 0
elif spatial_idx == 2:
x_offset = width - size
cropped = images[
:, :, y_offset : y_offset + size, x_offset : x_offset + size
]
cropped_boxes = (
crop_boxes(boxes, x_offset, y_offset) if boxes is not None else None
)
if ndim == 3:
cropped = cropped.squeeze(0)
return cropped, cropped_boxes
def clip_boxes_to_image(boxes, height, width):
"""
Clip an array of boxes to an image with the given height and width.
Args:
boxes (ndarray): bounding boxes to perform clipping.
Dimension is `num boxes` x 4.
height (int): given image height.
width (int): given image width.
Returns:
clipped_boxes (ndarray): the clipped boxes with dimension of
`num boxes` x 4.
"""
clipped_boxes = boxes.copy()
clipped_boxes[:, [0, 2]] = np.minimum(
width - 1.0, np.maximum(0.0, boxes[:, [0, 2]])
)
clipped_boxes[:, [1, 3]] = np.minimum(
height - 1.0, np.maximum(0.0, boxes[:, [1, 3]])
)
return clipped_boxes
def blend(images1, images2, alpha):
"""
Blend two images with a given weight alpha.
Args:
images1 (tensor): the first images to be blended, the dimension is
`num frames` x `channel` x `height` x `width`.
images2 (tensor): the second images to be blended, the dimension is
`num frames` x `channel` x `height` x `width`.
alpha (float): the blending weight.
Returns:
(tensor): blended images, the dimension is
`num frames` x `channel` x `height` x `width`.
"""
return images1 * alpha + images2 * (1 - alpha)
def grayscale(images):
"""
Get the grayscale for the input images. The channels of images should be
in order BGR.
Args:
images (tensor): the input images for getting grayscale. Dimension is
`num frames` x `channel` x `height` x `width`.
Returns:
img_gray (tensor): blended images, the dimension is
`num frames` x `channel` x `height` x `width`.
"""
# R -> 0.299, G -> 0.587, B -> 0.114.
img_gray = torch.tensor(images)
gray_channel = (
0.299 * images[:, 2] + 0.587 * images[:, 1] + 0.114 * images[:, 0]
)
img_gray[:, 0] = gray_channel
img_gray[:, 1] = gray_channel
img_gray[:, 2] = gray_channel
return img_gray
def color_jitter(images, img_brightness=0, img_contrast=0, img_saturation=0):
"""
Perfrom a color jittering on the input images. The channels of images
should be in order BGR.
Args:
images (tensor): images to perform color jitter. Dimension is
`num frames` x `channel` x `height` x `width`.
img_brightness (float): jitter ratio for brightness.
img_contrast (float): jitter ratio for contrast.
img_saturation (float): jitter ratio for saturation.
Returns:
images (tensor): the jittered images, the dimension is
`num frames` x `channel` x `height` x `width`.
"""
jitter = []
if img_brightness != 0:
jitter.append("brightness")
if img_contrast != 0:
jitter.append("contrast")
if img_saturation != 0:
jitter.append("saturation")
if len(jitter) > 0:
order = np.random.permutation(np.arange(len(jitter)))
for idx in range(0, len(jitter)):
if jitter[order[idx]] == "brightness":
images = brightness_jitter(img_brightness, images)
elif jitter[order[idx]] == "contrast":
images = contrast_jitter(img_contrast, images)
elif jitter[order[idx]] == "saturation":
images = saturation_jitter(img_saturation, images)
return images
def brightness_jitter(var, images):
"""
Perfrom brightness jittering on the input images. The channels of images
should be in order BGR.
Args:
var (float): jitter ratio for brightness.
images (tensor): images to perform color jitter. Dimension is
`num frames` x `channel` x `height` x `width`.
Returns:
images (tensor): the jittered images, the dimension is
`num frames` x `channel` x `height` x `width`.
"""
alpha = 1.0 + np.random.uniform(-var, var)
img_bright = torch.zeros(images.shape)
images = blend(images, img_bright, alpha)
return images
def contrast_jitter(var, images):
"""
Perfrom contrast jittering on the input images. The channels of images
should be in order BGR.
Args:
var (float): jitter ratio for contrast.
images (tensor): images to perform color jitter. Dimension is
`num frames` x `channel` x `height` x `width`.
Returns:
images (tensor): the jittered images, the dimension is
`num frames` x `channel` x `height` x `width`.
"""
alpha = 1.0 + np.random.uniform(-var, var)
img_gray = grayscale(images)
img_gray[:] = torch.mean(img_gray, dim=(1, 2, 3), keepdim=True)
images = blend(images, img_gray, alpha)
return images
def saturation_jitter(var, images):
"""
Perfrom saturation jittering on the input images. The channels of images
should be in order BGR.
Args:
var (float): jitter ratio for saturation.
images (tensor): images to perform color jitter. Dimension is
`num frames` x `channel` x `height` x `width`.
Returns:
images (tensor): the jittered images, the dimension is
`num frames` x `channel` x `height` x `width`.
"""
alpha = 1.0 + np.random.uniform(-var, var)
img_gray = grayscale(images)
images = blend(images, img_gray, alpha)
return images
def lighting_jitter(images, alphastd, eigval, eigvec):
"""
Perform AlexNet-style PCA jitter on the given images.
Args:
images (tensor): images to perform lighting jitter. Dimension is
`num frames` x `channel` x `height` x `width`.
alphastd (float): jitter ratio for PCA jitter.
eigval (list): eigenvalues for PCA jitter.
eigvec (list[list]): eigenvectors for PCA jitter.
Returns:
out_images (tensor): the jittered images, the dimension is
`num frames` x `channel` x `height` x `width`.
"""
if alphastd == 0:
return images
# generate alpha1, alpha2, alpha3.
alpha = np.random.normal(0, alphastd, size=(1, 3))
eig_vec = np.array(eigvec)
eig_val = np.reshape(eigval, (1, 3))
rgb = np.sum(
eig_vec * np.repeat(alpha, 3, axis=0) * np.repeat(eig_val, 3, axis=0),
axis=1,
)
out_images = torch.zeros_like(images)
if len(images.shape) == 3:
# C H W
channel_dim = 0
elif len(images.shape) == 4:
# T C H W
channel_dim = 1
else:
raise NotImplementedError(f"Unsupported dimension {len(images.shape)}")
for idx in range(images.shape[channel_dim]):
# C H W
if len(images.shape) == 3:
out_images[idx] = images[idx] + rgb[2 - idx]
# T C H W
elif len(images.shape) == 4:
out_images[:, idx] = images[:, idx] + rgb[2 - idx]
else:
raise NotImplementedError(
f"Unsupported dimension {len(images.shape)}"
)
return out_images
def color_normalization(images, mean, stddev):
"""
Perform color nomration on the given images.
Args:
images (tensor): images to perform color normalization. Dimension is
`num frames` x `channel` x `height` x `width`.
mean (list): mean values for normalization.
stddev (list): standard deviations for normalization.
Returns:
out_images (tensor): the noramlized images, the dimension is
`num frames` x `channel` x `height` x `width`.
"""
if len(images.shape) == 3:
assert (
len(mean) == images.shape[0]
), "channel mean not computed properly"
assert (
len(stddev) == images.shape[0]
), "channel stddev not computed properly"
elif len(images.shape) == 4:
assert (
len(mean) == images.shape[1]
), "channel mean not computed properly"
assert (
len(stddev) == images.shape[1]
), "channel stddev not computed properly"
else:
raise NotImplementedError(f"Unsupported dimension {len(images.shape)}")
out_images = torch.zeros_like(images)
for idx in range(len(mean)):
# C H W
if len(images.shape) == 3:
out_images[idx] = (images[idx] - mean[idx]) / stddev[idx]
elif len(images.shape) == 4:
out_images[:, idx] = (images[:, idx] - mean[idx]) / stddev[idx]
else:
raise NotImplementedError(
f"Unsupported dimension {len(images.shape)}"
)
return out_images
def _get_param_spatial_crop(
scale, ratio, height, width, num_repeat=10, log_scale=True, switch_hw=False
):
"""
Given scale, ratio, height and width, return sampled coordinates of the videos.
"""
for _ in range(num_repeat):
area = height * width
target_area = random.uniform(*scale) * area
if log_scale:
log_ratio = (math.log(ratio[0]), math.log(ratio[1]))
aspect_ratio = math.exp(random.uniform(*log_ratio))
else:
aspect_ratio = random.uniform(*ratio)
w = int(round(math.sqrt(target_area * aspect_ratio)))
h = int(round(math.sqrt(target_area / aspect_ratio)))
if np.random.uniform() < 0.5 and switch_hw:
w, h = h, w
if 0 < w <= width and 0 < h <= height:
i = random.randint(0, height - h)
j = random.randint(0, width - w)
return i, j, h, w
# Fallback to central crop
in_ratio = float(width) / float(height)
if in_ratio < min(ratio):
w = width
h = int(round(w / min(ratio)))
elif in_ratio > max(ratio):
h = height
w = int(round(h * max(ratio)))
else: # whole image
w = width
h = height
i = (height - h) // 2
j = (width - w) // 2
return i, j, h, w
def random_resized_crop(
images,
target_height,
target_width,
scale=(0.8, 1.0),
ratio=(3.0 / 4.0, 4.0 / 3.0),
):
"""
Crop the given images to random size and aspect ratio. A crop of random
size (default: of 0.08 to 1.0) of the original size and a random aspect
ratio (default: of 3/4 to 4/3) of the original aspect ratio is made. This
crop is finally resized to given size. This is popularly used to train the
Inception networks.
Args:
images: Images to perform resizing and cropping.
target_height: Desired height after cropping.
target_width: Desired width after cropping.
scale: Scale range of Inception-style area based random resizing.
ratio: Aspect ratio range of Inception-style area based random resizing.
"""
height = images.shape[2]
width = images.shape[3]
i, j, h, w = _get_param_spatial_crop(scale, ratio, height, width)
cropped = images[:, :, i : i + h, j : j + w]
return torch.nn.functional.interpolate(
cropped,
size=(target_height, target_width),
mode="bilinear",
align_corners=False,
)
def random_resized_crop_with_shift(
images,
target_height,
target_width,
scale=(0.8, 1.0),
ratio=(3.0 / 4.0, 4.0 / 3.0),
):
"""
This is similar to random_resized_crop. However, it samples two different
boxes (for cropping) for the first and last frame. It then linearly
interpolates the two boxes for other frames.
Args:
images: Images to perform resizing and cropping.
target_height: Desired height after cropping.
target_width: Desired width after cropping.
scale: Scale range of Inception-style area based random resizing.
ratio: Aspect ratio range of Inception-style area based random resizing.
"""
t = images.shape[1]
height = images.shape[2]
width = images.shape[3]
i, j, h, w = _get_param_spatial_crop(scale, ratio, height, width)
i_, j_, h_, w_ = _get_param_spatial_crop(scale, ratio, height, width)
i_s = [int(i) for i in torch.linspace(i, i_, steps=t).tolist()]
j_s = [int(i) for i in torch.linspace(j, j_, steps=t).tolist()]
h_s = [int(i) for i in torch.linspace(h, h_, steps=t).tolist()]
w_s = [int(i) for i in torch.linspace(w, w_, steps=t).tolist()]
out = torch.zeros((3, t, target_height, target_width))
for ind in range(t):
out[:, ind : ind + 1, :, :] = torch.nn.functional.interpolate(
images[
:,
ind : ind + 1,
i_s[ind] : i_s[ind] + h_s[ind],
j_s[ind] : j_s[ind] + w_s[ind],
],
size=(target_height, target_width),
mode="bilinear",
align_corners=False,
)
return out
def create_random_augment(
input_size,
auto_augment=None,
interpolation="bilinear",
):
"""
Get video randaug transform.
Args:
input_size: The size of the input video in tuple.
auto_augment: Parameters for randaug. An example:
"rand-m7-n4-mstd0.5-inc1" (m is the magnitude and n is the number
of operations to apply).
interpolation: Interpolation method.
"""
if isinstance(input_size, tuple):
img_size = input_size[-2:]
else:
img_size = input_size
if auto_augment:
assert isinstance(auto_augment, str)
if isinstance(img_size, tuple):
img_size_min = min(img_size)
else:
img_size_min = img_size
aa_params = {"translate_const": int(img_size_min * 0.45)}
if interpolation and interpolation != "random":
aa_params["interpolation"] = _pil_interp(interpolation)
if auto_augment.startswith("rand"):
return transforms.Compose(
[rand_augment_transform(auto_augment, aa_params)]
)
raise NotImplementedError
def random_sized_crop_img(
im,
size,
jitter_scale=(0.08, 1.0),
jitter_aspect=(3.0 / 4.0, 4.0 / 3.0),
max_iter=10,
):
"""
Performs Inception-style cropping (used for training).
"""
assert (
len(im.shape) == 3
), "Currently only support image for random_sized_crop"
h, w = im.shape[1:3]
i, j, h, w = _get_param_spatial_crop(
scale=jitter_scale,
ratio=jitter_aspect,
height=h,
width=w,
num_repeat=max_iter,
log_scale=False,
switch_hw=True,
)
cropped = im[:, i : i + h, j : j + w]
return torch.nn.functional.interpolate(
cropped.unsqueeze(0),
size=(size, size),
mode="bilinear",
align_corners=False,
).squeeze(0)
# The following code are modified based on timm lib, we will replace the following
# contents with dependency from PyTorchVideo.
# https://github.com/facebookresearch/pytorchvideo
class RandomResizedCropAndInterpolation:
"""Crop the given PIL Image to random size and aspect ratio with random interpolation.
A crop of random size (default: of 0.08 to 1.0) of the original size and a random
aspect ratio (default: of 3/4 to 4/3) of the original aspect ratio is made. This crop
is finally resized to given size.
This is popularly used to train the Inception networks.
Args:
size: expected output size of each edge
scale: range of size of the origin size cropped
ratio: range of aspect ratio of the origin aspect ratio cropped
interpolation: Default: PIL.Image.BILINEAR
"""
def __init__(
self,
size,
scale=(0.08, 1.0),
ratio=(3.0 / 4.0, 4.0 / 3.0),
interpolation="bilinear",
):
if isinstance(size, tuple):
self.size = size
else:
self.size = (size, size)
if (scale[0] > scale[1]) or (ratio[0] > ratio[1]):
print("range should be of kind (min, max)")
if interpolation == "random":
self.interpolation = _RANDOM_INTERPOLATION
else:
self.interpolation = _pil_interp(interpolation)
self.scale = scale
self.ratio = ratio
@staticmethod
def get_params(img, scale, ratio):
"""Get parameters for ``crop`` for a random sized crop.
Args:
img (PIL Image): Image to be cropped.
scale (tuple): range of size of the origin size cropped
ratio (tuple): range of aspect ratio of the origin aspect ratio cropped
Returns:
tuple: params (i, j, h, w) to be passed to ``crop`` for a random
sized crop.
"""
area = img.size[0] * img.size[1]
for _ in range(10):
target_area = random.uniform(*scale) * area
log_ratio = (math.log(ratio[0]), math.log(ratio[1]))
aspect_ratio = math.exp(random.uniform(*log_ratio))
w = int(round(math.sqrt(target_area * aspect_ratio)))
h = int(round(math.sqrt(target_area / aspect_ratio)))
if w <= img.size[0] and h <= img.size[1]:
i = random.randint(0, img.size[1] - h)
j = random.randint(0, img.size[0] - w)
return i, j, h, w
# Fallback to central crop
in_ratio = img.size[0] / img.size[1]
if in_ratio < min(ratio):
w = img.size[0]
h = int(round(w / min(ratio)))
elif in_ratio > max(ratio):
h = img.size[1]
w = int(round(h * max(ratio)))
else: # whole image
w = img.size[0]
h = img.size[1]
i = (img.size[1] - h) // 2
j = (img.size[0] - w) // 2
return i, j, h, w
def __call__(self, img):
"""
Args:
img (PIL Image): Image to be cropped and resized.
Returns:
PIL Image: Randomly cropped and resized image.
"""
i, j, h, w = self.get_params(img, self.scale, self.ratio)
if isinstance(self.interpolation, (tuple, list)):
interpolation = random.choice(self.interpolation)
else:
interpolation = self.interpolation
return F.resized_crop(img, i, j, h, w, self.size, interpolation)
def __repr__(self):
if isinstance(self.interpolation, (tuple, list)):
interpolate_str = " ".join(
[_pil_interpolation_to_str[x] for x in self.interpolation]
)
else:
interpolate_str = _pil_interpolation_to_str[self.interpolation]
format_string = self.__class__.__name__ + "(size={0}".format(self.size)
format_string += ", scale={0}".format(
tuple(round(s, 4) for s in self.scale)
)
format_string += ", ratio={0}".format(
tuple(round(r, 4) for r in self.ratio)
)
format_string += ", interpolation={0})".format(interpolate_str)
return format_string
def transforms_imagenet_train(
img_size=224,
scale=None,
ratio=None,
hflip=0.5,
vflip=0.0,
color_jitter=0.4,
auto_augment=None,
interpolation="random",
use_prefetcher=False,
mean=(0.485, 0.456, 0.406),
std=(0.229, 0.224, 0.225),
re_prob=0.0,
re_mode="const",
re_count=1,
re_num_splits=0,
separate=False,
):
"""
If separate==True, the transforms are returned as a tuple of 3 separate transforms
for use in a mixing dataset that passes
* all data through the first (primary) transform, called the 'clean' data
* a portion of the data through the secondary transform
* normalizes and converts the branches above with the third, final transform
"""
if isinstance(img_size, tuple):
img_size = img_size[-2:]
else:
img_size = img_size
scale = tuple(scale or (0.08, 1.0)) # default imagenet scale range
ratio = tuple(
ratio or (3.0 / 4.0, 4.0 / 3.0)
) # default imagenet ratio range
primary_tfl = [
RandomResizedCropAndInterpolation(
img_size, scale=scale, ratio=ratio, interpolation=interpolation
)
]
if hflip > 0.0:
primary_tfl += [transforms.RandomHorizontalFlip(p=hflip)]
if vflip > 0.0:
primary_tfl += [transforms.RandomVerticalFlip(p=vflip)]
secondary_tfl = []
if auto_augment:
assert isinstance(auto_augment, str)
if isinstance(img_size, tuple):
img_size_min = min(img_size)
else:
img_size_min = img_size
aa_params = dict(
translate_const=int(img_size_min * 0.45),
img_mean=tuple([min(255, round(255 * x)) for x in mean]),
)
if interpolation and interpolation != "random":
aa_params["interpolation"] = _pil_interp(interpolation)
if auto_augment.startswith("rand"):
secondary_tfl += [rand_augment_transform(auto_augment, aa_params)]
elif auto_augment.startswith("augmix"):
raise NotImplementedError("Augmix not implemented")
else:
raise NotImplementedError("Auto aug not implemented")
elif color_jitter is not None:
# color jitter is enabled when not using AA
if isinstance(color_jitter, (list, tuple)):
# color jitter should be a 3-tuple/list if spec brightness/contrast/saturation
# or 4 if also augmenting hue
assert len(color_jitter) in (3, 4)
else:
# if it's a scalar, duplicate for brightness, contrast, and saturation, no hue
color_jitter = (float(color_jitter),) * 3
secondary_tfl += [transforms.ColorJitter(*color_jitter)]
final_tfl = []
final_tfl += [
transforms.ToTensor(),
transforms.Normalize(mean=torch.tensor(mean), std=torch.tensor(std)),
]
if re_prob > 0.0:
final_tfl.append(
RandomErasing(
re_prob,
mode=re_mode,
max_count=re_count,
num_splits=re_num_splits,
device="cpu",
cube=False,
)
)
if separate:
return (
transforms.Compose(primary_tfl),
transforms.Compose(secondary_tfl),
transforms.Compose(final_tfl),
)
else:
return transforms.Compose(primary_tfl + secondary_tfl + final_tfl)
############################################################################################################
############################################################################################################
class Compose(object):
"""Composes several transforms
Args:
transforms (list of ``Transform`` objects): list of transforms
to compose
"""
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, clip):
for t in self.transforms:
clip = t(clip)
return clip
class RandomHorizontalFlip(object):
"""Horizontally flip the list of given images randomly
with a probability 0.5
"""
def __call__(self, clip):
"""
Args:
img (PIL.Image or numpy.ndarray): List of images to be cropped
in format (h, w, c) in numpy.ndarray
Returns:
PIL.Image or numpy.ndarray: Randomly flipped clip
"""
if random.random() < 0.5:
if isinstance(clip[0], np.ndarray):
return [np.fliplr(img) for img in clip]
elif isinstance(clip[0], PIL.Image.Image):
return [
img.transpose(PIL.Image.FLIP_LEFT_RIGHT) for img in clip
]
else:
raise TypeError('Expected numpy.ndarray or PIL.Image' +
' but got list of {0}'.format(type(clip[0])))
return clip
class RandomResize(object):
"""Resizes a list of (H x W x C) numpy.ndarray to the final size
The larger the original image is, the more times it takes to
interpolate
Args:
interpolation (str): Can be one of 'nearest', 'bilinear'
defaults to nearest
size (tuple): (widht, height)
"""
def __init__(self, ratio=(3. / 4., 4. / 3.), interpolation='nearest'):
self.ratio = ratio
self.interpolation = interpolation
def __call__(self, clip):
scaling_factor = random.uniform(self.ratio[0], self.ratio[1])
if isinstance(clip[0], np.ndarray):
im_h, im_w, im_c = clip[0].shape
elif isinstance(clip[0], PIL.Image.Image):
im_w, im_h = clip[0].size
new_w = int(im_w * scaling_factor)
new_h = int(im_h * scaling_factor)
new_size = (new_w, new_h)
resized = FF.resize_clip(
clip, new_size, interpolation=self.interpolation)
return resized
class Resize(object):
"""Resizes a list of (H x W x C) numpy.ndarray to the final size
The larger the original image is, the more times it takes to
interpolate
Args:
interpolation (str): Can be one of 'nearest', 'bilinear'
defaults to nearest
size (tuple): (widht, height)
"""
def __init__(self, size, interpolation='nearest'):
self.size = size
self.interpolation = interpolation
def __call__(self, clip):
resized = FF.resize_clip(
clip, self.size, interpolation=self.interpolation)
return resized
class RandomCrop(object):
"""Extract random crop at the same location for a list of images
Args:
size (sequence or int): Desired output size for the
crop in format (h, w)
"""
def __init__(self, size):
if isinstance(size, numbers.Number):
size = (size, size)
self.size = size
def __call__(self, clip):
"""
Args:
img (PIL.Image or numpy.ndarray): List of images to be cropped
in format (h, w, c) in numpy.ndarray
Returns:
PIL.Image or numpy.ndarray: Cropped list of images
"""
h, w = self.size
if isinstance(clip[0], np.ndarray):
im_h, im_w, im_c = clip[0].shape
elif isinstance(clip[0], PIL.Image.Image):
im_w, im_h = clip[0].size
else:
raise TypeError('Expected numpy.ndarray or PIL.Image' +
'but got list of {0}'.format(type(clip[0])))
if w > im_w or h > im_h:
error_msg = (
'Initial image size should be larger then '
'cropped size but got cropped sizes : ({w}, {h}) while '
'initial image is ({im_w}, {im_h})'.format(
im_w=im_w, im_h=im_h, w=w, h=h))
raise ValueError(error_msg)
x1 = random.randint(0, im_w - w)
y1 = random.randint(0, im_h - h)
cropped = FF.crop_clip(clip, y1, x1, h, w)
return cropped
class ThreeCrop(object):
"""Extract random crop at the same location for a list of images
Args:
size (sequence or int): Desired output size for the
crop in format (h, w)
"""
def __init__(self, size):
if isinstance(size, numbers.Number):
size = (size, size)
self.size = size
def __call__(self, clip):
"""
Args:
img (PIL.Image or numpy.ndarray): List of images to be cropped
in format (h, w, c) in numpy.ndarray
Returns:
PIL.Image or numpy.ndarray: Cropped list of images
"""
h, w = self.size
if isinstance(clip[0], np.ndarray):
im_h, im_w, im_c = clip[0].shape
elif isinstance(clip[0], PIL.Image.Image):
im_w, im_h = clip[0].size
else:
raise TypeError('Expected numpy.ndarray or PIL.Image' +
'but got list of {0}'.format(type(clip[0])))
if w != im_w and h != im_h:
clip = FF.resize_clip(clip, self.size, interpolation="bilinear")
im_h, im_w, im_c = clip[0].shape
step = np.max((np.max((im_w, im_h)) - self.size[0]) // 2, 0)
cropped = []
for i in range(3):
if (im_h > self.size[0]):
x1 = 0
y1 = i * step
cropped.extend(FF.crop_clip(clip, y1, x1, h, w))
else:
x1 = i * step
y1 = 0
cropped.extend(FF.crop_clip(clip, y1, x1, h, w))
return cropped
class RandomRotation(object):
"""Rotate entire clip randomly by a random angle within
given bounds
Args:
degrees (sequence or int): Range of degrees to select from
If degrees is a number instead of sequence like (min, max),
the range of degrees, will be (-degrees, +degrees).
"""
def __init__(self, degrees):
if isinstance(degrees, numbers.Number):
if degrees < 0:
raise ValueError('If degrees is a single number,'
'must be positive')
degrees = (-degrees, degrees)
else:
if len(degrees) != 2:
raise ValueError('If degrees is a sequence,'
'it must be of len 2.')
self.degrees = degrees
def __call__(self, clip):
"""
Args:
img (PIL.Image or numpy.ndarray): List of images to be cropped
in format (h, w, c) in numpy.ndarray
Returns:
PIL.Image or numpy.ndarray: Cropped list of images
"""
import skimage
angle = random.uniform(self.degrees[0], self.degrees[1])
if isinstance(clip[0], np.ndarray):
rotated = [skimage.transform.rotate(img, angle) for img in clip]
elif isinstance(clip[0], PIL.Image.Image):
rotated = [img.rotate(angle) for img in clip]
else:
raise TypeError('Expected numpy.ndarray or PIL.Image' +
'but got list of {0}'.format(type(clip[0])))
return rotated
class CenterCrop(object):
"""Extract center crop at the same location for a list of images
Args:
size (sequence or int): Desired output size for the
crop in format (h, w)
"""
def __init__(self, size):
if isinstance(size, numbers.Number):
size = (size, size)
self.size = size
def __call__(self, clip):
"""
Args:
img (PIL.Image or numpy.ndarray): List of images to be cropped
in format (h, w, c) in numpy.ndarray
Returns:
PIL.Image or numpy.ndarray: Cropped list of images
"""
h, w = self.size
if isinstance(clip[0], np.ndarray):
im_h, im_w, im_c = clip[0].shape
elif isinstance(clip[0], PIL.Image.Image):
im_w, im_h = clip[0].size
else:
raise TypeError('Expected numpy.ndarray or PIL.Image' +
'but got list of {0}'.format(type(clip[0])))
if w > im_w or h > im_h:
error_msg = (
'Initial image size should be larger then '
'cropped size but got cropped sizes : ({w}, {h}) while '
'initial image is ({im_w}, {im_h})'.format(
im_w=im_w, im_h=im_h, w=w, h=h))
raise ValueError(error_msg)
x1 = int(round((im_w - w) / 2.))
y1 = int(round((im_h - h) / 2.))
cropped = FF.crop_clip(clip, y1, x1, h, w)
return cropped
class ColorJitter(object):
"""Randomly change the brightness, contrast and saturation and hue of the clip
Args:
brightness (float): How much to jitter brightness. brightness_factor
is chosen uniformly from [max(0, 1 - brightness), 1 + brightness].
contrast (float): How much to jitter contrast. contrast_factor
is chosen uniformly from [max(0, 1 - contrast), 1 + contrast].
saturation (float): How much to jitter saturation. saturation_factor
is chosen uniformly from [max(0, 1 - saturation), 1 + saturation].
hue(float): How much to jitter hue. hue_factor is chosen uniformly from
[-hue, hue]. Should be >=0 and <= 0.5.
"""
def __init__(self, brightness=0, contrast=0, saturation=0, hue=0):
self.brightness = brightness
self.contrast = contrast
self.saturation = saturation
self.hue = hue
def get_params(self, brightness, contrast, saturation, hue):
if brightness > 0:
brightness_factor = random.uniform(
max(0, 1 - brightness), 1 + brightness)
else:
brightness_factor = None
if contrast > 0:
contrast_factor = random.uniform(
max(0, 1 - contrast), 1 + contrast)
else:
contrast_factor = None
if saturation > 0:
saturation_factor = random.uniform(
max(0, 1 - saturation), 1 + saturation)
else:
saturation_factor = None
if hue > 0:
hue_factor = random.uniform(-hue, hue)
else:
hue_factor = None
return brightness_factor, contrast_factor, saturation_factor, hue_factor
def __call__(self, clip):
"""
Args:
clip (list): list of PIL.Image
Returns:
list PIL.Image : list of transformed PIL.Image
"""
if isinstance(clip[0], np.ndarray):
raise TypeError(
'Color jitter not yet implemented for numpy arrays')
elif isinstance(clip[0], PIL.Image.Image):
brightness, contrast, saturation, hue = self.get_params(
self.brightness, self.contrast, self.saturation, self.hue)
# Create img transform function sequence
img_transforms = []
if brightness is not None:
img_transforms.append(lambda img: torchvision.transforms.functional.adjust_brightness(img, brightness))
if saturation is not None:
img_transforms.append(lambda img: torchvision.transforms.functional.adjust_saturation(img, saturation))
if hue is not None:
img_transforms.append(lambda img: torchvision.transforms.functional.adjust_hue(img, hue))
if contrast is not None:
img_transforms.append(lambda img: torchvision.transforms.functional.adjust_contrast(img, contrast))
random.shuffle(img_transforms)
# Apply to all images
jittered_clip = []
for img in clip:
for func in img_transforms:
jittered_img = func(img)
jittered_clip.append(jittered_img)
else:
raise TypeError('Expected numpy.ndarray or PIL.Image' +
'but got list of {0}'.format(type(clip[0])))
return jittered_clip
class Normalize(object):
"""Normalize a clip with mean and standard deviation.
Given mean: ``(M1,...,Mn)`` and std: ``(S1,..,Sn)`` for ``n`` channels, this transform
will normalize each channel of the input ``torch.*Tensor`` i.e.
``input[channel] = (input[channel] - mean[channel]) / std[channel]``
.. note::
This transform acts out of place, i.e., it does not mutates the input tensor.
Args:
mean (sequence): Sequence of means for each channel.
std (sequence): Sequence of standard deviations for each channel.
"""
def __init__(self, mean, std):
self.mean = mean
self.std = std
def __call__(self, clip):
"""
Args:
clip (Tensor): Tensor clip of size (T, C, H, W) to be normalized.
Returns:
Tensor: Normalized Tensor clip.
"""
return FF.normalize(clip, self.mean, self.std)
def __repr__(self):
return self.__class__.__name__ + '(mean={0}, std={1})'.format(self.mean, self.std)
| InternVideo-main | Pretrain/VideoMAE/video_transforms.py |
# pylint: disable=line-too-long,too-many-lines,missing-docstring
import io
import os
import random
import warnings
import cv2
import decord
import numpy as np
import torch
import torch.distributed as dist
from decord import VideoReader, cpu
from numpy.lib.function_base import disp
from petrel_client.client import Client
from PIL import Image
from torch.utils.data import Dataset
from torchvision import transforms
import video_transforms as video_transforms
import volume_transforms as volume_transforms
# from video_transforms import create_random_augment
from random_erasing import RandomErasing
class ANetClsDataset(Dataset):
"""Load your own video classification dataset."""
def __init__(self,
anno_path,
data_path,
mode='train',
clip_len=16,
frame_sample_rate=4,
crop_size=224,
short_side_size=256,
new_height=256,
new_width=340,
keep_aspect_ratio=True,
num_segment=1,
num_crop=1,
test_num_segment=1,
test_num_crop=1,
sparse_sampling=True,
args=None):
self.anno_path = anno_path
self.data_path = data_path
self.mode = mode
self.clip_len = clip_len
self.frame_sample_rate = frame_sample_rate
self.crop_size = crop_size
self.short_side_size = short_side_size
self.new_height = new_height
self.new_width = new_width
self.keep_aspect_ratio = keep_aspect_ratio
self.num_segment = num_segment
self.test_num_segment = test_num_segment
self.num_crop = num_crop
self.test_num_crop = test_num_crop
self.sparse_sampling = sparse_sampling
self.args = args
self.aug = False
self.rand_erase = False
# self.use_temporal_gradient = False
# self.temporal_gradient_rate = 0.0
if self.mode in ['train']:
self.aug = True
if self.args.reprob > 0:
self.rand_erase = True
if VideoReader is None:
raise ImportError(
"Unable to import `decord` which is required to read videos.")
import pandas as pd
cleaned = pd.read_csv(self.anno_path, header=None, delimiter=' ')
self.dataset_samples = list(cleaned.values[:, 0])
self.total_times = list(cleaned.values[:, 1])
self.start_times = list(cleaned.values[:, 2])
self.end_times = list(cleaned.values[:, 3])
self.label_array = list(cleaned.values[:, 4])
self.client = Client()
if (mode == 'train'):
pass
elif (mode == 'validation'):
self.data_transform = video_transforms.Compose([
video_transforms.Resize(self.short_side_size,
interpolation='bilinear'),
video_transforms.CenterCrop(size=(self.crop_size,
self.crop_size)),
volume_transforms.ClipToTensor(),
video_transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
elif mode == 'test':
self.data_resize = video_transforms.Compose([
video_transforms.Resize(size=(short_side_size),
interpolation='bilinear')
])
self.data_transform = video_transforms.Compose([
volume_transforms.ClipToTensor(),
video_transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
self.test_seg = []
self.test_dataset = []
self.test_total_times = []
self.test_start_times = []
self.test_end_times = []
self.test_label_array = []
for ck in range(self.test_num_segment):
for cp in range(self.test_num_crop):
for idx in range(len(self.label_array)):
sample_label = self.label_array[idx]
self.test_label_array.append(sample_label)
self.test_total_times.append(self.total_times[idx])
self.test_start_times.append(self.start_times[idx])
self.test_end_times.append(self.end_times[idx])
self.test_dataset.append(self.dataset_samples[idx])
self.test_seg.append((ck, cp))
def __getitem__(self, index):
if self.mode == 'train':
args = self.args
scale_t = 1
# data_transform_func = self.data_transform
sample = self.dataset_samples[index]
total_time = self.total_times[index]
start_time = self.start_times[index]
end_time = self.end_times[index]
buffer = self.loadvideo_decord(sample,
total_time,
start_time,
end_time,
sample_rate_scale=scale_t)
if len(buffer) == 0:
while len(buffer) == 0:
warnings.warn(
"video {} not correctly loaded during training".format(
sample))
index = np.random.randint(self.__len__())
sample = self.dataset_samples[index]
total_time = self.total_times[index]
start_time = self.start_times[index]
end_time = self.end_times[index]
buffer = self.loadvideo_decord(sample,
total_time,
start_time,
end_time,
sample_rate_scale=scale_t)
# buffer = data_transform_func(buffer) # T H W C
if args.num_sample > 1:
frame_list = []
label_list = []
index_list = []
for _ in range(args.num_sample):
new_frames = self._aug_frame(buffer, args)
label = self.label_array[index]
#new_frames = [new_frames]
frame_list.append(new_frames)
label_list.append(label)
index_list.append(index)
return frame_list, label_list, index_list, {}
else:
buffer = self._aug_frame(buffer, args)
return buffer, self.label_array[index], index, {}
elif self.mode == 'validation':
sample = self.dataset_samples[index]
total_time = self.total_times[index]
start_time = self.start_times[index]
end_time = self.end_times[index]
buffer = self.loadvideo_decord(sample, total_time, start_time,
end_time)
if len(buffer) == 0:
while len(buffer) == 0:
warnings.warn(
"video {} not correctly loaded during validation".
format(sample))
index = np.random.randint(self.__len__())
sample = self.dataset_samples[index]
total_time = self.total_times[index]
start_time = self.start_times[index]
end_time = self.end_times[index]
buffer = self.loadvideo_decord(sample, total_time,
start_time, end_time)
buffer = self.data_transform(buffer)
return buffer, self.label_array[index], sample.split(
"/")[-1].split(".")[0]
elif self.mode == 'test':
sample = self.test_dataset[index]
total_time = self.total_times[index]
start_time = self.start_times[index]
end_time = self.end_times[index]
chunk_nb, split_nb = self.test_seg[index]
buffer = self.loadvideo_decord(sample, total_time, start_time,
end_time)
while len(buffer) == 0:
warnings.warn("video {}, temporal {}, spatial {} not found during testing".format(\
str(self.test_dataset[index]), chunk_nb, split_nb))
index = np.random.randint(self.__len__())
sample = self.test_dataset[index]
total_time = self.total_times[index]
start_time = self.start_times[index]
end_time = self.end_times[index]
chunk_nb, split_nb = self.test_seg[index]
buffer = self.loadvideo_decord(sample, total_time, start_time,
end_time)
buffer = self.data_resize(buffer)
if isinstance(buffer, list):
buffer = np.stack(buffer, 0)
spatial_step = 1.0 * (max(buffer.shape[1], buffer.shape[2]) - self.short_side_size) \
/ (self.test_num_crop - 1)
temporal_step = max(1.0 * (buffer.shape[0] - self.clip_len) \
/ (self.test_num_segment - 1), 0)
temporal_start = int(chunk_nb * temporal_step)
spatial_start = int(split_nb * spatial_step)
if buffer.shape[1] >= buffer.shape[2]:
buffer = buffer[temporal_start:temporal_start + self.clip_len, \
spatial_start:spatial_start + self.short_side_size, :, :]
else:
buffer = buffer[temporal_start:temporal_start + self.clip_len, \
:, spatial_start:spatial_start + self.short_side_size, :]
buffer = self.data_transform(buffer)
return buffer, self.test_label_array[index], sample.split("/")[-1].split(".")[0], \
chunk_nb, split_nb
else:
raise NameError('mode {} unkown'.format(self.mode))
def _aug_frame(
self,
buffer,
args,
):
aug_transform = video_transforms.create_random_augment(
input_size=(self.crop_size, self.crop_size),
auto_augment=args.aa,
interpolation=args.train_interpolation,
)
buffer = [transforms.ToPILImage()(frame) for frame in buffer]
buffer = aug_transform(buffer)
buffer = [transforms.ToTensor()(img) for img in buffer]
buffer = torch.stack(buffer) # T C H W
buffer = buffer.permute(0, 2, 3, 1) # T H W C
# T H W C
buffer = tensor_normalize(buffer, [0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])
# T H W C -> C T H W.
buffer = buffer.permute(3, 0, 1, 2)
# Perform data augmentation.
scl, asp = (
[0.08, 1.0],
[0.75, 1.3333],
)
buffer = spatial_sampling(
buffer,
spatial_idx=-1,
min_scale=256,
max_scale=320,
crop_size=224,
random_horizontal_flip=False if args.data_set == 'SSV2' else True,
inverse_uniform_sampling=False,
aspect_ratio=asp,
scale=scl,
motion_shift=False)
if self.rand_erase:
erase_transform = RandomErasing(
args.reprob,
mode=args.remode,
max_count=args.recount,
num_splits=args.recount,
device="cpu",
)
buffer = buffer.permute(1, 0, 2, 3) # C T H W -> T C H W
buffer = erase_transform(buffer)
buffer = buffer.permute(1, 0, 2, 3) # T C H W -> C T H W
# buffer = self.data_transform_after(buffer)
return buffer
def loadvideo_decord(self,
sample,
total_time,
start_time,
end_time,
sample_rate_scale=1):
"""Load video content using Decord"""
# pylint: disable=line-too-long, bare-except, unnecessary-comprehension
# fname = self.data_path + sample
fname = sample
if self.keep_aspect_ratio:
if fname.startswith('s3'):
video_bytes = self.client.get(fname)
vr = VideoReader(io.BytesIO(video_bytes),
num_threads=1,
ctx=cpu(0))
else:
vr = VideoReader(fname, num_threads=1, ctx=cpu(0))
else:
if fname.startswith('s3:'):
video_bytes = self.client.get(fname)
vr = VideoReader(io.BytesIO(video_bytes),
width=self.new_width,
height=self.new_height,
num_threads=1,
ctx=cpu(0))
else:
vr = VideoReader(fname,
width=self.new_width,
height=self.new_height,
num_threads=1,
ctx=cpu(0))
if self.mode == 'test':
all_index = [x for x in range(0, len(vr), self.frame_sample_rate)]
while len(all_index) < self.clip_len:
all_index.append(all_index[-1])
vr.seek(0)
buffer = vr.get_batch(all_index).asnumpy()
return buffer
# handle temporal segments
total_num_frames = len(vr)
trimmed_video_len = int(total_num_frames * (end_time - start_time) /
total_time)
all_index = []
if self.sparse_sampling:
average_duration = trimmed_video_len // self.clip_len
if average_duration > 0:
all_index += list(
np.multiply(list(range(self.clip_len)), average_duration) +
np.random.randint(average_duration, size=self.clip_len))
elif trimmed_video_len > self.num_segment:
all_index += list(
np.sort(
np.random.randint(trimmed_video_len,
size=self.clip_len)))
else:
all_index += [0] * (self.clip_len - trimmed_video_len) + list(
range(trimmed_video_len))
all_index = list(np.array(all_index))
else:
seg_len = trimmed_video_len // self.num_segment
converted_len = int(self.clip_len * self.frame_sample_rate)
for i in range(self.num_segment):
if seg_len <= converted_len:
index = np.linspace(0,
seg_len,
num=seg_len // self.frame_sample_rate)
index = np.concatenate(
(index,
np.ones(self.clip_len -
seg_len // self.frame_sample_rate) * seg_len))
index = np.clip(index, 0, seg_len - 1).astype(np.int64)
else:
end_idx = np.random.randint(converted_len, seg_len)
str_idx = end_idx - converted_len
index = np.linspace(str_idx, end_idx, num=self.clip_len)
index = np.clip(index, str_idx,
end_idx - 1).astype(np.int64)
index = index + i * seg_len
all_index.extend(list(index))
all_index = all_index[::int(sample_rate_scale)]
init_off = int(total_num_frames * start_time / total_time)
all_index = [idx + init_off for idx in all_index]
all_index = [
idx if idx < total_num_frames else total_num_frames - 1
for idx in all_index
]
vr.seek(0)
buffer = vr.get_batch(all_index).asnumpy()
return buffer
def __len__(self):
if self.mode != 'test':
return len(self.dataset_samples)
else:
return len(self.test_dataset)
def spatial_sampling(
frames,
spatial_idx=-1,
min_scale=256,
max_scale=320,
crop_size=224,
random_horizontal_flip=True,
inverse_uniform_sampling=False,
aspect_ratio=None,
scale=None,
motion_shift=False,
):
"""
Perform spatial sampling on the given video frames. If spatial_idx is
-1, perform random scale, random crop, and random flip on the given
frames. If spatial_idx is 0, 1, or 2, perform spatial uniform sampling
with the given spatial_idx.
Args:
frames (tensor): frames of images sampled from the video. The
dimension is `num frames` x `height` x `width` x `channel`.
spatial_idx (int): if -1, perform random spatial sampling. If 0, 1,
or 2, perform left, center, right crop if width is larger than
height, and perform top, center, buttom crop if height is larger
than width.
min_scale (int): the minimal size of scaling.
max_scale (int): the maximal size of scaling.
crop_size (int): the size of height and width used to crop the
frames.
inverse_uniform_sampling (bool): if True, sample uniformly in
[1 / max_scale, 1 / min_scale] and take a reciprocal to get the
scale. If False, take a uniform sample from [min_scale,
max_scale].
aspect_ratio (list): Aspect ratio range for resizing.
scale (list): Scale range for resizing.
motion_shift (bool): Whether to apply motion shift for resizing.
Returns:
frames (tensor): spatially sampled frames.
"""
assert spatial_idx in [-1, 0, 1, 2]
if spatial_idx == -1:
if aspect_ratio is None and scale is None:
frames, _ = video_transforms.random_short_side_scale_jitter(
images=frames,
min_size=min_scale,
max_size=max_scale,
inverse_uniform_sampling=inverse_uniform_sampling,
)
frames, _ = video_transforms.random_crop(frames, crop_size)
else:
transform_func = (video_transforms.random_resized_crop_with_shift
if motion_shift else
video_transforms.random_resized_crop)
frames = transform_func(
images=frames,
target_height=crop_size,
target_width=crop_size,
scale=scale,
ratio=aspect_ratio,
)
if random_horizontal_flip:
frames, _ = video_transforms.horizontal_flip(0.5, frames)
else:
# The testing is deterministic and no jitter should be performed.
# min_scale, max_scale, and crop_size are expect to be the same.
assert len({min_scale, max_scale, crop_size}) == 1
frames, _ = video_transforms.random_short_side_scale_jitter(
frames, min_scale, max_scale)
frames, _ = video_transforms.uniform_crop(frames, crop_size,
spatial_idx)
return frames
def tensor_normalize(tensor, mean, std):
"""
Normalize a given tensor by subtracting the mean and dividing the std.
Args:
tensor (tensor): tensor to normalize.
mean (tensor or list): mean value to subtract.
std (tensor or list): std to divide.
"""
if tensor.dtype == torch.uint8:
tensor = tensor.float()
tensor = tensor / 255.0
if type(mean) == list:
mean = torch.tensor(mean)
if type(std) == list:
std = torch.tensor(std)
tensor = tensor - mean
tensor = tensor / std
return tensor
| InternVideo-main | Pretrain/VideoMAE/anet.py |
# pylint: disable=line-too-long,too-many-lines,missing-docstring
import io
import os
import random
import warnings
import cv2
import decord
import numpy as np
import torch
from decord import VideoReader, cpu
from petrel_client.client import Client
from PIL import Image
from torch.utils.data import Dataset
from torchvision import transforms
import video_transforms as video_transforms
import volume_transforms as volume_transforms
# from video_transforms import create_random_augment
from random_erasing import RandomErasing
class RawFrameDataset(Dataset):
"""Load your own video classification dataset."""
def __init__(self,
anno_path,
data_path,
mode='train',
clip_len=8,
frame_sample_rate=2,
crop_size=224,
short_side_size=256,
new_height=256,
new_width=340,
keep_aspect_ratio=True,
num_segment=1,
num_crop=1,
test_num_segment=10,
test_num_crop=3,
fname_tmpl='img_{:05}.jpg',
args=None):
self.anno_path = anno_path
self.data_path = data_path
self.mode = mode
self.clip_len = clip_len
self.frame_sample_rate = frame_sample_rate
self.crop_size = crop_size
self.short_side_size = short_side_size
self.new_height = new_height
self.new_width = new_width
self.keep_aspect_ratio = keep_aspect_ratio
self.num_segment = num_segment
self.test_num_segment = test_num_segment
self.num_crop = num_crop
self.test_num_crop = test_num_crop
self.fname_tmpl = fname_tmpl
self.args = args
self.aug = False
self.rand_erase = False
if self.mode in ['train']:
self.aug = True
if self.args.reprob > 0:
self.rand_erase = True
if VideoReader is None:
raise ImportError(
"Unable to import `decord` which is required to read videos.")
import pandas as pd
cleaned = pd.read_csv(self.anno_path, header=None, delimiter=' ')
self.dataset_samples = list(cleaned.values[:, 0])
self.total_frames = list(cleaned.values[:, 1])
self.label_array = list(cleaned.values[:, -1])
self.client = Client()
if (mode == 'train'):
pass
elif (mode == 'validation'):
self.data_transform = video_transforms.Compose([
video_transforms.Resize(
self.short_side_size, interpolation='bilinear'),
video_transforms.CenterCrop(
size=(self.crop_size, self.crop_size)),
volume_transforms.ClipToTensor(),
video_transforms.Normalize(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
elif mode == 'test':
self.data_resize = video_transforms.Compose([
video_transforms.Resize(
size=(short_side_size), interpolation='bilinear')
])
self.data_transform = video_transforms.Compose([
volume_transforms.ClipToTensor(),
video_transforms.Normalize(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
self.test_seg = []
self.test_dataset = []
self.test_total_frames = []
self.test_label_array = []
for ck in range(self.test_num_segment):
for cp in range(self.test_num_crop):
for idx in range(len(self.label_array)):
self.test_seg.append((ck, cp))
self.test_dataset.append(self.dataset_samples[idx])
self.test_total_frames.append(self.total_frames[idx])
self.test_label_array.append(self.label_array[idx])
def __getitem__(self, index):
if self.mode == 'train':
args = self.args
scale_t = 1
sample = self.dataset_samples[index]
total_frame = self.total_frames[index]
buffer = self.load_frame(
sample, total_frame, sample_rate_scale=scale_t) # T H W C
if len(buffer) == 0:
while len(buffer) == 0:
warnings.warn(
"video {} not correctly loaded during training".format(
sample))
index = np.random.randint(self.__len__())
sample = self.dataset_samples[index]
total_frame = self.total_frames[index]
buffer = self.load_frame(
sample, total_frame, sample_rate_scale=scale_t)
if args.num_sample > 1:
frame_list = []
label_list = []
index_list = []
for _ in range(args.num_sample):
new_frames = self._aug_frame(buffer, args)
label = self.label_array[index]
frame_list.append(new_frames)
label_list.append(label)
index_list.append(index)
return frame_list, label_list, index_list, {}
else:
buffer = self._aug_frame(buffer, args)
return buffer, self.label_array[index], index, {}
elif self.mode == 'validation':
sample = self.dataset_samples[index]
total_frame = self.total_frames[index]
buffer = self.load_frame(sample, total_frame)
if len(buffer) == 0:
while len(buffer) == 0:
warnings.warn(
"video {} not correctly loaded during validation".
format(sample))
index = np.random.randint(self.__len__())
sample = self.dataset_samples[index]
total_frame = self.total_frames[index]
buffer = self.load_frame(sample, total_frame)
buffer = self.data_transform(buffer)
return buffer, self.label_array[index], sample.split("/")[-1].split(
".")[0]
elif self.mode == 'test':
sample = self.test_dataset[index]
total_frame = self.test_total_frames[index]
chunk_nb, split_nb = self.test_seg[index]
buffer = self.load_frame(sample, total_frame)
while len(buffer) == 0:
warnings.warn("video {}, temporal {}, spatial {} not found during testing".format(\
str(self.test_dataset[index]), chunk_nb, split_nb))
index = np.random.randint(self.__len__())
sample = self.test_dataset[index]
total_frame = self.test_total_frames[index]
chunk_nb, split_nb = self.test_seg[index]
buffer = self.load_frame(sample, total_frame)
buffer = self.data_resize(buffer)
if isinstance(buffer, list):
buffer = np.stack(buffer, 0)
spatial_step = 1.0 * (max(buffer.shape[1], buffer.shape[2]) - self.short_side_size) \
/ (self.test_num_crop - 1)
temporal_step = max(1.0 * (buffer.shape[0] - self.clip_len) \
/ (self.test_num_segment - 1), 0)
temporal_start = int(chunk_nb * temporal_step)
spatial_start = int(split_nb * spatial_step)
if buffer.shape[1] >= buffer.shape[2]:
buffer = buffer[temporal_start:temporal_start + self.clip_len, \
spatial_start:spatial_start + self.short_side_size, :, :]
else:
buffer = buffer[temporal_start:temporal_start + self.clip_len, \
:, spatial_start:spatial_start + self.short_side_size, :]
buffer = self.data_transform(buffer)
return buffer, self.test_label_array[index], sample.split("/")[-1].split(".")[0], \
chunk_nb, split_nb
else:
raise NameError('mode {} unkown'.format(self.mode))
def _aug_frame(
self,
buffer,
args,
):
aug_transform = video_transforms.create_random_augment(
input_size=(self.crop_size, self.crop_size),
auto_augment=args.aa,
interpolation=args.train_interpolation,
)
buffer = [transforms.ToPILImage()(frame) for frame in buffer]
buffer = aug_transform(buffer)
buffer = [transforms.ToTensor()(img) for img in buffer]
buffer = torch.stack(buffer) # T C H W
buffer = buffer.permute(0, 2, 3, 1) # T H W C
# T H W C
buffer = tensor_normalize(buffer, [0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])
# T H W C -> C T H W.
buffer = buffer.permute(3, 0, 1, 2)
# Perform data augmentation.
scl, asp = (
[0.08, 1.0],
[0.75, 1.3333],
)
buffer = spatial_sampling(
buffer,
spatial_idx=-1,
min_scale=256,
max_scale=320,
crop_size=224,
random_horizontal_flip=False if args.data_set == 'SSV2' else True,
inverse_uniform_sampling=False,
aspect_ratio=asp,
scale=scl,
motion_shift=False)
if self.rand_erase:
erase_transform = RandomErasing(
args.reprob,
mode=args.remode,
max_count=args.recount,
num_splits=args.recount,
device="cpu",
)
buffer = buffer.permute(1, 0, 2, 3) # C T H W -> T C H W
buffer = erase_transform(buffer)
buffer = buffer.permute(1, 0, 2, 3) # T C H W -> C T H W
# buffer = self.data_transform_after(buffer)
return buffer
def load_frame(self, sample, num_frames, sample_rate_scale=1):
"""Load video rawframe"""
# pylint: disable=line-too-long, bare-except, unnecessary-comprehension
# fname = self.data_path + sample
fname = sample
if self.mode == 'test':
all_index = [
x for x in range(0, num_frames, self.frame_sample_rate)
]
while len(all_index) < self.clip_len:
all_index.append(all_index[-1])
imgs = []
for idx in all_index:
frame_fname = os.path.join(fname,
self.fname_tmpl.format(idx + 1))
img_bytes = self.client.get(frame_fname)
img_np = np.frombuffer(img_bytes, np.uint8)
img = cv2.imdecode(img_np, cv2.IMREAD_COLOR)
cv2.cvtColor(img, cv2.COLOR_BGR2RGB, img)
imgs.append(img)
buffer = np.array(imgs)
return buffer
# handle temporal segments
converted_len = int(self.clip_len * self.frame_sample_rate)
seg_len = num_frames // self.num_segment
all_index = []
for i in range(self.num_segment):
if seg_len <= converted_len:
index = np.linspace(
0, seg_len, num=seg_len // self.frame_sample_rate)
index = np.concatenate((
index,
np.ones(self.clip_len - seg_len // self.frame_sample_rate) *
seg_len))
index = np.clip(index, 0, seg_len - 1).astype(np.int64)
else:
end_idx = np.random.randint(converted_len, seg_len)
str_idx = end_idx - converted_len
index = np.linspace(str_idx, end_idx, num=self.clip_len)
index = np.clip(index, str_idx, end_idx - 1).astype(np.int64)
index = index + i * seg_len
all_index.extend(list(index))
all_index = all_index[::int(sample_rate_scale)]
imgs = []
for idx in all_index:
frame_fname = os.path.join(fname, self.fname_tmpl.format(idx + 1))
img_bytes = self.client.get(frame_fname)
img_np = np.frombuffer(img_bytes, np.uint8)
img = cv2.imdecode(img_np, cv2.IMREAD_COLOR)
cv2.cvtColor(img, cv2.COLOR_BGR2RGB, img)
imgs.append(img)
buffer = np.array(imgs)
return buffer
def __len__(self):
if self.mode != 'test':
return len(self.dataset_samples)
else:
return len(self.test_dataset)
class VideoClsDataset(Dataset):
"""Load your own video classification dataset."""
def __init__(self,
anno_path,
data_path,
mode='train',
clip_len=8,
frame_sample_rate=2,
crop_size=224,
short_side_size=256,
new_height=256,
new_width=340,
keep_aspect_ratio=True,
num_segment=1,
num_crop=1,
test_num_segment=10,
test_num_crop=3,
sparse_sample=False,
v4d_segment=1,
args=None):
self.anno_path = anno_path
self.data_path = data_path
self.mode = mode
self.clip_len = clip_len
self.frame_sample_rate = frame_sample_rate
self.crop_size = crop_size
self.short_side_size = short_side_size
self.new_height = new_height
self.new_width = new_width
self.keep_aspect_ratio = keep_aspect_ratio
self.num_segment = num_segment
self.test_num_segment = test_num_segment
self.num_crop = num_crop
self.test_num_crop = test_num_crop
self.sparse_sample = sparse_sample
self.v4d_segment = v4d_segment
self.args = args
self.aug = False
self.rand_erase = False
# self.use_temporal_gradient = False
# self.temporal_gradient_rate = 0.0
if self.mode in ['train']:
self.aug = True
if self.args.reprob > 0:
self.rand_erase = True
if VideoReader is None:
raise ImportError(
"Unable to import `decord` which is required to read videos.")
import pandas as pd
cleaned = pd.read_csv(self.anno_path, header=None, delimiter=' ')
self.dataset_samples = list(cleaned.values[:, 0])
self.label_array = list(cleaned.values[:, 1])
# conf_path = '/mnt/lustre/huangbingkun/petreloss.conf'
conf_path = '~/petreloss2.conf'
self.client = Client(conf_path)
if (mode == 'train'):
pass
elif (mode == 'validation'):
self.data_transform = video_transforms.Compose([
video_transforms.Resize(
self.short_side_size, interpolation='bilinear'),
video_transforms.CenterCrop(
size=(self.crop_size, self.crop_size)),
volume_transforms.ClipToTensor(),
video_transforms.Normalize(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
elif mode == 'test':
self.data_resize = video_transforms.Compose([
video_transforms.Resize(
size=(short_side_size), interpolation='bilinear')
])
self.data_transform = video_transforms.Compose([
volume_transforms.ClipToTensor(),
video_transforms.Normalize(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
self.test_seg = []
self.test_dataset = []
self.test_label_array = []
for ck in range(self.test_num_segment):
for cp in range(self.test_num_crop):
for idx in range(len(self.label_array)):
sample_label = self.label_array[idx]
self.test_label_array.append(sample_label)
self.test_dataset.append(self.dataset_samples[idx])
self.test_seg.append((ck, cp))
def __getitem__(self, index):
if self.mode == 'train':
args = self.args
scale_t = 1
# data_transform_func = self.data_transform
sample = self.dataset_samples[index]
buffer = self.loadvideo_decord(
sample, sample_rate_scale=scale_t) # T H W C
if len(buffer) == 0:
while len(buffer) == 0:
warnings.warn(
"video {} not correctly loaded during training".format(
sample))
index = np.random.randint(self.__len__())
sample = self.dataset_samples[index]
buffer = self.loadvideo_decord(
sample, sample_rate_scale=scale_t)
# buffer = data_transform_func(buffer) # T H W C
if args.num_sample > 1:
frame_list = []
label_list = []
index_list = []
for _ in range(args.num_sample):
new_frames = self._aug_frame(buffer, args)
label = self.label_array[index]
#new_frames = [new_frames]
frame_list.append(new_frames)
label_list.append(label)
index_list.append(index)
return frame_list, label_list, index_list, {}
else:
buffer = self._aug_frame(buffer, args)
if self.v4d_segment > 1:
# C T H W
C, T, H, W = buffer.shape
buffer = buffer.view(C, -1, self.v4d_segment, H, W)
buffer = buffer.permute(2, 0, 1, 3, 4)
return buffer, self.label_array[index], index, {}
elif self.mode == 'validation':
sample = self.dataset_samples[index]
buffer = self.loadvideo_decord(sample)
if len(buffer) == 0:
while len(buffer) == 0:
warnings.warn(
"video {} not correctly loaded during validation".
format(sample))
index = np.random.randint(self.__len__())
sample = self.dataset_samples[index]
buffer = self.loadvideo_decord(sample)
buffer = self.data_transform(buffer)
return buffer, self.label_array[index], sample.split("/")[-1].split(
".")[0]
elif self.mode == 'test':
sample = self.test_dataset[index]
chunk_nb, split_nb = self.test_seg[index]
buffer = self.loadvideo_decord(sample)
while len(buffer) == 0:
warnings.warn("video {}, temporal {}, spatial {} not found during testing".format(\
str(self.test_dataset[index]), chunk_nb, split_nb))
index = np.random.randint(self.__len__())
sample = self.test_dataset[index]
chunk_nb, split_nb = self.test_seg[index]
buffer = self.loadvideo_decord(sample)
buffer = self.data_resize(buffer)
if isinstance(buffer, list):
buffer = np.stack(buffer, 0)
if self.sparse_sample:
spatial_step = 1.0 * (max(buffer.shape[1], buffer.shape[2]) - self.short_side_size) \
/ (self.test_num_crop - 1)
temporal_start = chunk_nb
spatial_start = int(split_nb * spatial_step)
if buffer.shape[1] >= buffer.shape[2]:
buffer = buffer[temporal_start::self.test_num_segment, \
spatial_start:spatial_start + self.short_side_size, :, :]
else:
buffer = buffer[temporal_start::self.test_num_segment, \
:, spatial_start:spatial_start + self.short_side_size, :]
else:
spatial_step = 1.0 * (max(buffer.shape[1], buffer.shape[2]) - self.short_side_size) \
/ (self.test_num_crop - 1)
temporal_step = max(1.0 * (buffer.shape[0] - self.clip_len) \
/ (self.test_num_segment - 1), 0)
temporal_start = int(chunk_nb * temporal_step)
spatial_start = int(split_nb * spatial_step)
if buffer.shape[1] >= buffer.shape[2]:
buffer = buffer[temporal_start:temporal_start + self.clip_len, \
spatial_start:spatial_start + self.short_side_size, :, :]
else:
buffer = buffer[temporal_start:temporal_start + self.clip_len, \
:, spatial_start:spatial_start + self.short_side_size, :]
buffer = self.data_transform(buffer)
return buffer, self.test_label_array[index], sample.split("/")[-1].split(".")[0], \
chunk_nb, split_nb
else:
raise NameError('mode {} unkown'.format(self.mode))
def _aug_frame(
self,
buffer,
args,
):
aug_transform = video_transforms.create_random_augment(
input_size=(self.crop_size, self.crop_size),
auto_augment=args.aa,
interpolation=args.train_interpolation,
)
buffer = [transforms.ToPILImage()(frame) for frame in buffer]
buffer = aug_transform(buffer)
buffer = [transforms.ToTensor()(img) for img in buffer]
buffer = torch.stack(buffer) # T C H W
buffer = buffer.permute(0, 2, 3, 1) # T H W C
# T H W C
buffer = tensor_normalize(buffer, [0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])
# T H W C -> C T H W.
buffer = buffer.permute(3, 0, 1, 2)
# Perform data augmentation.
scl, asp = (
[0.08, 1.0],
[0.75, 1.3333],
)
buffer = spatial_sampling(
buffer,
spatial_idx=-1,
min_scale=256,
max_scale=320,
# crop_size=224,
crop_size=args.input_size,
random_horizontal_flip=False if args.data_set == 'SSV2' else True,
inverse_uniform_sampling=False,
aspect_ratio=asp,
scale=scl,
motion_shift=False)
if self.rand_erase:
erase_transform = RandomErasing(
args.reprob,
mode=args.remode,
max_count=args.recount,
num_splits=args.recount,
device="cpu",
)
buffer = buffer.permute(1, 0, 2, 3) # C T H W -> T C H W
buffer = erase_transform(buffer)
buffer = buffer.permute(1, 0, 2, 3) # T C H W -> C T H W
# buffer = self.data_transform_after(buffer)
return buffer
def loadvideo_decord(self, sample, sample_rate_scale=1):
"""Load video content using Decord"""
# pylint: disable=line-too-long, bare-except, unnecessary-comprehension
# fname = self.data_path + sample
fname = sample
try:
if self.keep_aspect_ratio:
if fname.startswith('s3'):
video_bytes = self.client.get(fname)
vr = VideoReader(
io.BytesIO(video_bytes), num_threads=1, ctx=cpu(0))
else:
vr = VideoReader(fname, num_threads=1, ctx=cpu(0))
else:
if fname.startswith('s3:'):
video_bytes = self.client.get(fname)
vr = VideoReader(
io.BytesIO(video_bytes),
width=self.new_width,
height=self.new_height,
num_threads=1,
ctx=cpu(0))
else:
vr = VideoReader(
fname,
width=self.new_width,
height=self.new_height,
num_threads=1,
ctx=cpu(0))
except Exception as e:
print("Failed to load video from {} with error {}".format(fname, e))
return []
segment_len = len(vr) // self.v4d_segment
if self.v4d_segment == 1:
buffer = self.loadvideo_one_v4d_segment(vr, 0, segment_len,
sample_rate_scale)
else:
buffer_list = []
for i in range(self.v4d_segment):
# [T H W C]
buffer = self.loadvideo_one_v4d_segment(vr, i * segment_len,
segment_len,
sample_rate_scale)
buffer_list.append(buffer)
buffer = np.concatenate(buffer_list, axis=0)
return buffer
def loadvideo_one_v4d_segment(self,
vr,
start_idx,
length,
sample_rate_scale=1):
if self.mode == 'test':
if self.sparse_sample:
tick = length / float(self.num_segment)
all_index = []
for t_seg in range(self.test_num_segment):
tmp_index = [
int(t_seg * tick / self.test_num_segment + tick * x)
for x in range(self.num_segment)
]
all_index.extend(tmp_index)
all_index = list(np.sort(np.array(all_index)))
else:
all_index = [
x for x in range(0, length, self.frame_sample_rate)
]
while len(all_index) < self.clip_len:
all_index.append(all_index[-1])
vr.seek(0)
buffer = vr.get_batch(all_index).asnumpy()
return buffer
# handle temporal segments
converted_len = int(self.clip_len * self.frame_sample_rate)
seg_len = length // self.num_segment
all_index = []
for i in range(self.num_segment):
if seg_len <= converted_len:
index = np.linspace(
0, seg_len, num=seg_len // self.frame_sample_rate)
index = np.concatenate((
index,
np.ones(self.clip_len - seg_len // self.frame_sample_rate) *
seg_len))
index = np.clip(index, 0, seg_len - 1).astype(np.int64)
else:
if self.mode == 'validation':
end_idx = (converted_len + seg_len) // 2
else:
end_idx = np.random.randint(converted_len, seg_len)
str_idx = end_idx - converted_len
index = np.linspace(str_idx, end_idx, num=self.clip_len)
index = np.clip(index, str_idx, end_idx - 1).astype(np.int64)
index = index + i * seg_len
all_index.extend(list(index))
all_index = all_index[::int(sample_rate_scale)]
all_index = [idx + start_idx for idx in all_index]
vr.seek(0)
buffer = vr.get_batch(all_index).asnumpy()
return buffer
def __len__(self):
if self.mode != 'test':
return len(self.dataset_samples)
else:
return len(self.test_dataset)
def spatial_sampling(
frames,
spatial_idx=-1,
min_scale=256,
max_scale=320,
crop_size=224,
random_horizontal_flip=True,
inverse_uniform_sampling=False,
aspect_ratio=None,
scale=None,
motion_shift=False,
):
"""
Perform spatial sampling on the given video frames. If spatial_idx is
-1, perform random scale, random crop, and random flip on the given
frames. If spatial_idx is 0, 1, or 2, perform spatial uniform sampling
with the given spatial_idx.
Args:
frames (tensor): frames of images sampled from the video. The
dimension is `num frames` x `height` x `width` x `channel`.
spatial_idx (int): if -1, perform random spatial sampling. If 0, 1,
or 2, perform left, center, right crop if width is larger than
height, and perform top, center, buttom crop if height is larger
than width.
min_scale (int): the minimal size of scaling.
max_scale (int): the maximal size of scaling.
crop_size (int): the size of height and width used to crop the
frames.
inverse_uniform_sampling (bool): if True, sample uniformly in
[1 / max_scale, 1 / min_scale] and take a reciprocal to get the
scale. If False, take a uniform sample from [min_scale,
max_scale].
aspect_ratio (list): Aspect ratio range for resizing.
scale (list): Scale range for resizing.
motion_shift (bool): Whether to apply motion shift for resizing.
Returns:
frames (tensor): spatially sampled frames.
"""
assert spatial_idx in [-1, 0, 1, 2]
if spatial_idx == -1:
if aspect_ratio is None and scale is None:
frames, _ = video_transforms.random_short_side_scale_jitter(
images=frames,
min_size=min_scale,
max_size=max_scale,
inverse_uniform_sampling=inverse_uniform_sampling,
)
frames, _ = video_transforms.random_crop(frames, crop_size)
else:
transform_func = (
video_transforms.random_resized_crop_with_shift
if motion_shift else video_transforms.random_resized_crop)
frames = transform_func(
images=frames,
target_height=crop_size,
target_width=crop_size,
scale=scale,
ratio=aspect_ratio,
)
if random_horizontal_flip:
frames, _ = video_transforms.horizontal_flip(0.5, frames)
else:
# The testing is deterministic and no jitter should be performed.
# min_scale, max_scale, and crop_size are expect to be the same.
assert len({min_scale, max_scale, crop_size}) == 1
frames, _ = video_transforms.random_short_side_scale_jitter(
frames, min_scale, max_scale)
frames, _ = video_transforms.uniform_crop(frames, crop_size,
spatial_idx)
return frames
def tensor_normalize(tensor, mean, std):
"""
Normalize a given tensor by subtracting the mean and dividing the std.
Args:
tensor (tensor): tensor to normalize.
mean (tensor or list): mean value to subtract.
std (tensor or list): std to divide.
"""
if tensor.dtype == torch.uint8:
tensor = tensor.float()
tensor = tensor / 255.0
if type(mean) == list:
mean = torch.tensor(mean)
if type(std) == list:
std = torch.tensor(std)
tensor = tensor - mean
tensor = tensor / std
return tensor
| InternVideo-main | Pretrain/VideoMAE/kinetics.py |
# --------------------------------------------------------
# Based on BEiT, timm, DINO and DeiT code bases
# https://github.com/microsoft/unilm/tree/master/beit
# https://github.com/rwightman/pytorch-image-models/tree/master/timm
# https://github.com/facebookresearch/deit
# https://github.com/facebookresearch/dino
# --------------------------------------------------------'
import math
from functools import partial
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.checkpoint as cp
from timm.models.layers import drop_path, to_2tuple, trunc_normal_
from timm.models.registry import register_model
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 400,
'input_size': (3, 224, 224),
'pool_size': None,
'crop_pct': .9,
'interpolation': 'bicubic',
'mean': (0.5, 0.5, 0.5),
'std': (0.5, 0.5, 0.5),
**kwargs
}
class DropPath(nn.Module):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
"""
def __init__(self, drop_prob=None):
super(DropPath, self).__init__()
self.drop_prob = drop_prob
def forward(self, x):
return drop_path(x, self.drop_prob, self.training)
def extra_repr(self) -> str:
return 'p={}'.format(self.drop_prob)
class Mlp(nn.Module):
def __init__(self,
in_features,
hidden_features=None,
out_features=None,
act_layer=nn.GELU,
drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
# x = self.drop(x)
# commit this for the orignal BERT implement
x = self.fc2(x)
x = self.drop(x)
return x
class Attention(nn.Module):
def __init__(self,
dim,
num_heads=8,
qkv_bias=False,
qk_scale=None,
attn_drop=0.,
proj_drop=0.,
attn_head_dim=None):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
if attn_head_dim is not None:
head_dim = attn_head_dim
all_head_dim = head_dim * self.num_heads
self.scale = qk_scale or head_dim**-0.5
self.qkv = nn.Linear(dim, all_head_dim * 3, bias=False)
if qkv_bias:
self.q_bias = nn.Parameter(torch.zeros(all_head_dim))
self.v_bias = nn.Parameter(torch.zeros(all_head_dim))
else:
self.q_bias = None
self.v_bias = None
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(all_head_dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x):
B, N, C = x.shape
qkv_bias = None
if self.q_bias is not None:
qkv_bias = torch.cat(
(self.q_bias, torch.zeros_like(self.v_bias,
requires_grad=False),
self.v_bias))
# qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
qkv = F.linear(input=x, weight=self.qkv.weight, bias=qkv_bias)
qkv = qkv.reshape(B, N, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[
2] # make torchscript happy (cannot use tensor as tuple)
q = q * self.scale
attn = (q @ k.transpose(-2, -1))
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, -1)
x = self.proj(x)
x = self.proj_drop(x)
return x
class Block(nn.Module):
def __init__(self,
dim,
num_heads,
mlp_ratio=4.,
qkv_bias=False,
qk_scale=None,
drop=0.,
attn_drop=0.,
drop_path=0.,
init_values=None,
act_layer=nn.GELU,
norm_layer=nn.LayerNorm,
attn_head_dim=None):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = Attention(dim,
num_heads=num_heads,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
attn_drop=attn_drop,
proj_drop=drop,
attn_head_dim=attn_head_dim)
# NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
self.drop_path = DropPath(
drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim,
hidden_features=mlp_hidden_dim,
act_layer=act_layer,
drop=drop)
if init_values > 0:
self.gamma_1 = nn.Parameter(init_values * torch.ones((dim)),
requires_grad=True)
self.gamma_2 = nn.Parameter(init_values * torch.ones((dim)),
requires_grad=True)
else:
self.gamma_1, self.gamma_2 = None, None
def forward(self, x):
if self.gamma_1 is None:
x = x + self.drop_path(self.attn(self.norm1(x)))
x = x + self.drop_path(self.mlp(self.norm2(x)))
else:
x = x + self.drop_path(self.gamma_1 * self.attn(self.norm1(x)))
x = x + self.drop_path(self.gamma_2 * self.mlp(self.norm2(x)))
return x
class PatchEmbed(nn.Module):
""" Image to Patch Embedding
"""
def __init__(self,
img_size=224,
patch_size=16,
in_chans=3,
embed_dim=768,
num_frames=16,
tubelet_size=2):
super().__init__()
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
self.tubelet_size = int(tubelet_size)
num_patches = (img_size[1] // patch_size[1]) * (
img_size[0] // patch_size[0]) * (num_frames // self.tubelet_size)
# self.patch_shape = (img_size[0] // patch_size[0], img_size[1] // patch_size[1])
self.img_size = img_size
self.patch_size = patch_size
self.num_patches = num_patches
self.proj = nn.Conv3d(in_channels=in_chans,
out_channels=embed_dim,
kernel_size=(self.tubelet_size, patch_size[0],
patch_size[1]),
stride=(self.tubelet_size, patch_size[0],
patch_size[1]))
def forward(self, x, **kwargs):
B, C, T, H, W = x.shape
# FIXME look at relaxing size constraints
assert H == self.img_size[0] and W == self.img_size[1], \
f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})."
# b, c, l -> b, l, c
x = self.proj(x).flatten(2).transpose(1, 2)
return x
# sin-cos position encoding
# https://github.com/jadore801120/attention-is-all-you-need-pytorch/blob/master/transformer/Models.py#L31
def get_sinusoid_encoding_table(n_position, d_hid):
''' Sinusoid position encoding table '''
# TODO: make it with torch instead of numpy
def get_position_angle_vec(position):
return [
position / np.power(10000, 2 * (hid_j // 2) / d_hid)
for hid_j in range(d_hid)
]
sinusoid_table = np.array(
[get_position_angle_vec(pos_i) for pos_i in range(n_position)])
sinusoid_table[:, 0::2] = np.sin(sinusoid_table[:, 0::2]) # dim 2i
sinusoid_table[:, 1::2] = np.cos(sinusoid_table[:, 1::2]) # dim 2i+1
# return torch.FloatTensor(sinusoid_table).unsqueeze(0)
return torch.tensor(sinusoid_table, dtype=torch.float,
requires_grad=False).unsqueeze(0)
class VisionTransformer(nn.Module):
""" Vision Transformer with support for patch or hybrid CNN input stage
"""
def __init__(self,
img_size=224,
patch_size=16,
in_chans=3,
num_classes=1000,
embed_dim=768,
depth=12,
num_heads=12,
mlp_ratio=4.,
qkv_bias=False,
qk_scale=None,
drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.,
head_drop_rate=0.,
norm_layer=nn.LayerNorm,
init_values=0.,
use_learnable_pos_emb=False,
init_scale=0.,
all_frames=16,
tubelet_size=2,
use_mean_pooling=True,
with_cp=False,
num_segment=1):
super().__init__()
self.num_classes = num_classes
self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models
self.tubelet_size = tubelet_size
self.patch_embed = PatchEmbed(img_size=img_size,
patch_size=patch_size,
in_chans=in_chans,
embed_dim=embed_dim,
num_frames=all_frames,
tubelet_size=self.tubelet_size)
num_patches = self.patch_embed.num_patches
self.with_cp = with_cp
self.num_segment = num_segment
# self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
if use_learnable_pos_emb:
self.pos_embed = nn.Parameter(
torch.zeros(1, num_patches, embed_dim))
else:
# sine-cosine positional embeddings is on the way
if img_size != 224 or all_frames != 16:
org_img_size = (224, 224)
org_num_frames = 16
num_patches = (org_img_size[1] // patch_size) * (org_img_size[0] // patch_size) * (
org_num_frames // self.tubelet_size)
self.pos_embed = get_sinusoid_encoding_table(
num_patches, embed_dim)
self.pos_drop = nn.Dropout(p=drop_rate)
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)
] # stochastic depth decay rule
self.blocks = nn.ModuleList([
Block(dim=embed_dim,
num_heads=num_heads,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
drop=drop_rate,
attn_drop=attn_drop_rate,
drop_path=dpr[i],
norm_layer=norm_layer,
init_values=init_values) for i in range(depth)
])
self.norm = nn.Identity() if use_mean_pooling else norm_layer(
embed_dim)
self.fc_norm = norm_layer(embed_dim) if use_mean_pooling else None
self.head_dropout = nn.Dropout(head_drop_rate)
self.head = nn.Linear(
embed_dim, num_classes) if num_classes > 0 else nn.Identity()
if use_learnable_pos_emb:
trunc_normal_(self.pos_embed, std=.02)
# trunc_normal_(self.cls_token, std=.02)
# trunc_normal_(self.head.weight, std=.02)
self.apply(self._init_weights)
self.head.weight.data.mul_(init_scale)
self.head.bias.data.mul_(init_scale)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def get_num_layers(self):
return len(self.blocks)
@torch.jit.ignore
def no_weight_decay(self):
return {'pos_embed', 'cls_token'}
def get_classifier(self):
return self.head
def reset_classifier(self, num_classes, global_pool=''):
self.num_classes = num_classes
self.head = nn.Linear(
self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()
def forward_features(self, x):
x = self.patch_embed(x)
B, _, _ = x.size()
# cls_tokens = self.cls_token.expand(batch_size, -1, -1) # stole cls_tokens impl from Phil Wang, thanks
# x = torch.cat((cls_tokens, x), dim=1)
if self.pos_embed is not None:
x = x + self.pos_embed.expand(B, -1, -1).type_as(x).to(
x.device).clone().detach()
x = self.pos_drop(x)
for blk in self.blocks:
if self.with_cp:
x = cp.checkpoint(blk, x)
else:
x = blk(x)
x = self.norm(x)
if self.fc_norm is not None:
# return self.fc_norm(x[:, 1:].mean(1))
return self.fc_norm(x.mean(1))
else:
return x[:, 0]
def forward_check_variance(self, x):
x = self.patch_embed(x)
B, _, _ = x.size()
# cls_tokens = self.cls_token.expand(batch_size, -1, -1) # stole cls_tokens impl from Phil Wang, thanks
# x = torch.cat((cls_tokens, x), dim=1)
if self.pos_embed is not None:
x = x + self.pos_embed.expand(B, -1, -1).type_as(x).to(
x.device).clone().detach()
x = self.pos_drop(x)
# x [B, N, C]
avg_var_list = []
for blk in self.blocks:
x = blk(x)
avg_var = torch.mean(torch.var(x, dim=-1))
avg_var_list.append(avg_var)
for i, avg_var in enumerate(avg_var_list):
print(f'avg variance of block {i}: {avg_var}', flush=True)
x = self.norm(x)
if self.fc_norm is not None:
# return self.fc_norm(x[:, 1:].mean(1))
return self.fc_norm(x.mean(1))
else:
return x[:, 0]
def forward(self, x):
# x = self.forward_check_variance(x)
x = self.forward_features(x)
x = self.head_dropout(x)
x = self.head(x)
if self.num_segment > 1 and self.training:
x = x.view((-1, self.num_segment) + x.size()[1:])
x = x.mean(dim=1)
return x
@register_model
def vit_small_patch16_224(pretrained=False, **kwargs):
model = VisionTransformer(patch_size=16,
embed_dim=384,
depth=12,
num_heads=6,
mlp_ratio=4,
qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6),
**kwargs)
model.default_cfg = _cfg()
return model
@register_model
def vit_base_patch16_224(pretrained=False, **kwargs):
model = VisionTransformer(patch_size=16,
embed_dim=768,
depth=12,
num_heads=12,
mlp_ratio=4,
qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6),
**kwargs)
model.default_cfg = _cfg()
return model
@register_model
def vit_base_patch16_384(pretrained=False, **kwargs):
model = VisionTransformer(img_size=384,
patch_size=16,
embed_dim=768,
depth=12,
num_heads=12,
mlp_ratio=4,
qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6),
**kwargs)
model.default_cfg = _cfg()
return model
@register_model
def vit_large_patch16_224(pretrained=False, **kwargs):
model = VisionTransformer(patch_size=16,
embed_dim=1024,
depth=24,
num_heads=16,
mlp_ratio=4,
qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6),
**kwargs)
model.default_cfg = _cfg()
return model
@register_model
def vit_large_patch16_384(pretrained=False, **kwargs):
model = VisionTransformer(img_size=384,
patch_size=16,
embed_dim=1024,
depth=24,
num_heads=16,
mlp_ratio=4,
qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6),
**kwargs)
model.default_cfg = _cfg()
return model
@register_model
def vit_large_patch16_512(pretrained=False, **kwargs):
model = VisionTransformer(img_size=512,
patch_size=16,
embed_dim=1024,
depth=24,
num_heads=16,
mlp_ratio=4,
qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6),
**kwargs)
model.default_cfg = _cfg()
return model
@register_model
def vit_huge_patch16_224(pretrained=False, **kwargs):
model = VisionTransformer(patch_size=16,
embed_dim=1280,
depth=32,
num_heads=16,
mlp_ratio=4,
qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6),
**kwargs)
model.default_cfg = _cfg()
return model
@register_model
def vit_giant_patch14_224(pretrained=False, **kwargs):
model = VisionTransformer(patch_size=14,
embed_dim=1408,
depth=40,
num_heads=16,
mlp_ratio=48 / 11,
qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6),
**kwargs)
model.default_cfg = _cfg()
return model
@register_model
def vit_giant_patch16_224(pretrained=False, **kwargs):
model = VisionTransformer(patch_size=16,
embed_dim=1408,
depth=40,
num_heads=16,
mlp_ratio=48 / 11,
qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6),
**kwargs)
model.default_cfg = _cfg()
return model
@register_model
def vit_gigantic_patch14_224(pretrained=False, **kwargs):
model = VisionTransformer(patch_size=14,
embed_dim=1664,
depth=48,
num_heads=16,
mlp_ratio=64 / 13,
qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6),
**kwargs)
model.default_cfg = _cfg()
return model
| InternVideo-main | Pretrain/VideoMAE/modeling_finetune.py |
# --------------------------------------------------------
# Based on BEiT, timm, DINO and DeiT code bases
# https://github.com/microsoft/unilm/tree/master/beit
# https://github.com/rwightman/pytorch-image-models/tree/master/timm
# https://github.com/facebookresearch/deit
# https://github.com/facebookresearch/dino
# --------------------------------------------------------'
import json
import torch
from timm.optim.adafactor import Adafactor
from timm.optim.adahessian import Adahessian
from timm.optim.adamp import AdamP
from timm.optim.lookahead import Lookahead
from timm.optim.nadam import Nadam
from timm.optim.novograd import NovoGrad
from timm.optim.nvnovograd import NvNovoGrad
from timm.optim.radam import RAdam
from timm.optim.rmsprop_tf import RMSpropTF
from timm.optim.sgdp import SGDP
from torch import optim as optim
try:
from apex.optimizers import FusedAdam, FusedLAMB, FusedNovoGrad, FusedSGD
has_apex = True
except ImportError:
has_apex = False
def get_num_layer_for_vit(var_name, num_max_layer):
if var_name in ("cls_token", "mask_token", "pos_embed"):
return 0
elif var_name.startswith("patch_embed"):
return -1
elif var_name.startswith("rel_pos_bias"):
return num_max_layer - 1
elif var_name.startswith("blocks"):
layer_id = int(var_name.split('.')[1])
return layer_id + 1
else:
return num_max_layer - 1
class LayerDecayValueAssigner(object):
def __init__(self, values, extra_decay=1.0):
assert extra_decay >= 0.
self.values = values
self.extra_scale = extra_decay
def get_scale(self, layer_id, extra_scale=1.0):
if layer_id < 0:
return extra_scale * self.values[0]
return self.values[layer_id]
def get_layer_id(self, var_name):
return get_num_layer_for_vit(var_name, len(self.values))
def get_parameter_groups(model,
weight_decay=1e-5,
skip_list=(),
get_num_layer=None,
get_layer_scale=None):
parameter_group_names = {}
parameter_group_vars = {}
for name, param in model.named_parameters():
if not param.requires_grad:
continue # frozen weights
if len(param.shape) == 1 or name.endswith(
".bias") or name in skip_list:
group_name = "no_decay"
this_weight_decay = 0.
else:
group_name = "decay"
this_weight_decay = weight_decay
if get_num_layer is not None:
layer_id = get_num_layer(name)
group_name = "layer_%d_%s" % (layer_id, group_name)
else:
layer_id = None
if group_name not in parameter_group_names:
if get_layer_scale is not None:
scale = get_layer_scale(layer_id)
else:
scale = 1.
parameter_group_names[group_name] = {
"weight_decay": this_weight_decay,
"params": [],
"lr_scale": scale
}
parameter_group_vars[group_name] = {
"weight_decay": this_weight_decay,
"params": [],
"lr_scale": scale
}
parameter_group_vars[group_name]["params"].append(param)
parameter_group_names[group_name]["params"].append(name)
print("Param groups = %s" % json.dumps(parameter_group_names, indent=2))
return list(parameter_group_vars.values())
def create_optimizer(args,
model,
get_num_layer=None,
get_layer_scale=None,
filter_bias_and_bn=True,
skip_list=None):
opt_lower = args.opt.lower()
weight_decay = args.weight_decay
if weight_decay and filter_bias_and_bn:
skip = {}
if skip_list is not None:
skip = skip_list
elif hasattr(model, 'no_weight_decay'):
skip = model.no_weight_decay()
parameters = get_parameter_groups(model, weight_decay, skip,
get_num_layer, get_layer_scale)
weight_decay = 0.
else:
parameters = model.parameters()
if 'fused' in opt_lower:
assert has_apex and torch.cuda.is_available(
), 'APEX and CUDA required for fused optimizers'
opt_args = dict(lr=args.lr, weight_decay=weight_decay)
if hasattr(args, 'opt_eps') and args.opt_eps is not None:
opt_args['eps'] = args.opt_eps
if hasattr(args, 'opt_betas') and args.opt_betas is not None:
opt_args['betas'] = args.opt_betas
print("optimizer settings:", opt_args)
opt_split = opt_lower.split('_')
opt_lower = opt_split[-1]
if opt_lower == 'sgd' or opt_lower == 'nesterov':
opt_args.pop('eps', None)
optimizer = optim.SGD(parameters,
momentum=args.momentum,
nesterov=True,
**opt_args)
elif opt_lower == 'momentum':
opt_args.pop('eps', None)
optimizer = optim.SGD(parameters,
momentum=args.momentum,
nesterov=False,
**opt_args)
elif opt_lower == 'adam':
optimizer = optim.Adam(parameters, **opt_args)
elif opt_lower == 'adamw':
optimizer = optim.AdamW(parameters, **opt_args)
elif opt_lower == 'nadam':
optimizer = Nadam(parameters, **opt_args)
elif opt_lower == 'radam':
optimizer = RAdam(parameters, **opt_args)
elif opt_lower == 'adamp':
optimizer = AdamP(parameters, wd_ratio=0.01, nesterov=True, **opt_args)
elif opt_lower == 'sgdp':
optimizer = SGDP(parameters,
momentum=args.momentum,
nesterov=True,
**opt_args)
elif opt_lower == 'adadelta':
optimizer = optim.Adadelta(parameters, **opt_args)
elif opt_lower == 'adafactor':
if not args.lr:
opt_args['lr'] = None
optimizer = Adafactor(parameters, **opt_args)
elif opt_lower == 'adahessian':
optimizer = Adahessian(parameters, **opt_args)
elif opt_lower == 'rmsprop':
optimizer = optim.RMSprop(parameters,
alpha=0.9,
momentum=args.momentum,
**opt_args)
elif opt_lower == 'rmsproptf':
optimizer = RMSpropTF(parameters,
alpha=0.9,
momentum=args.momentum,
**opt_args)
elif opt_lower == 'novograd':
optimizer = NovoGrad(parameters, **opt_args)
elif opt_lower == 'nvnovograd':
optimizer = NvNovoGrad(parameters, **opt_args)
elif opt_lower == 'fusedsgd':
opt_args.pop('eps', None)
optimizer = FusedSGD(parameters,
momentum=args.momentum,
nesterov=True,
**opt_args)
elif opt_lower == 'fusedmomentum':
opt_args.pop('eps', None)
optimizer = FusedSGD(parameters,
momentum=args.momentum,
nesterov=False,
**opt_args)
elif opt_lower == 'fusedadam':
optimizer = FusedAdam(parameters, adam_w_mode=False, **opt_args)
elif opt_lower == 'fusedadamw':
optimizer = FusedAdam(parameters, adam_w_mode=True, **opt_args)
elif opt_lower == 'fusedlamb':
optimizer = FusedLAMB(parameters, **opt_args)
elif opt_lower == 'fusednovograd':
opt_args.setdefault('betas', (0.95, 0.98))
optimizer = FusedNovoGrad(parameters, **opt_args)
else:
assert False and "Invalid optimizer"
raise ValueError
if len(opt_split) > 1:
if opt_split[0] == 'lookahead':
optimizer = Lookahead(optimizer)
return optimizer
| InternVideo-main | Pretrain/VideoMAE/optim_factory.py |
import torch
import InternVideo
text_cand = ["an airplane is taking off", "an airplane is flying", "a dog is chasing a ball"]
video = InternVideo.load_video("./data/demo.mp4").cuda()
model = InternVideo.load_model("./models/InternVideo-MM-L-14.ckpt").cuda()
text = InternVideo.tokenize(
text_cand
).cuda()
with torch.no_grad():
text_features = model.encode_text(text)
video_features = model.encode_video(video.unsqueeze(0))
video_features = torch.nn.functional.normalize(video_features, dim=1)
text_features = torch.nn.functional.normalize(text_features, dim=1)
t = model.logit_scale.exp()
probs = (video_features @ text_features.T * t).softmax(dim=-1).cpu().numpy()
print("Label probs: ") # [[9.5619422e-01 4.3805469e-02 2.0393253e-07]]
for t, p in zip(text_cand, probs[0]):
print("{:30s}: {:.4f}".format(t, p))
| InternVideo-main | Pretrain/Multi-Modalities-Pretraining/demo.py |
from .internvideo import * | InternVideo-main | Pretrain/Multi-Modalities-Pretraining/InternVideo/__init__.py |
import numbers
import random
import numpy as np
import PIL
import skimage
import skimage.transform
import torchvision
import torch
from torchvision import transforms
from PIL import Image
import torch
import cv2
def _is_tensor_clip(clip):
return torch.is_tensor(clip) and clip.ndimension() == 4
def crop_clip(clip, min_h, min_w, h, w):
if isinstance(clip[0], np.ndarray):
cropped = [img[min_h:min_h + h, min_w:min_w + w, :] for img in clip]
elif isinstance(clip[0], PIL.Image.Image):
cropped = [
img.crop((min_w, min_h, min_w + w, min_h + h)) for img in clip
]
else:
raise TypeError('Expected numpy.ndarray or PIL.Image' +
'but got list of {0}'.format(type(clip[0])))
return cropped
def resize_clip(clip, size, interpolation='bilinear'):
if isinstance(clip[0], np.ndarray):
if isinstance(size, numbers.Number):
im_h, im_w, im_c = clip[0].shape
# Min spatial dim already matches minimal size
if (im_w <= im_h and im_w == size) or (im_h <= im_w
and im_h == size):
return clip
new_h, new_w = get_resize_sizes(im_h, im_w, size)
size = (new_w, new_h)
else:
size = size[1], size[0]
if interpolation == 'bilinear':
np_inter = cv2.INTER_LINEAR
else:
np_inter = cv2.INTER_NEAREST
scaled = [
cv2.resize(img, size, interpolation=np_inter) for img in clip
]
elif isinstance(clip[0], PIL.Image.Image):
if isinstance(size, numbers.Number):
im_w, im_h = clip[0].size
# Min spatial dim already matches minimal size
if (im_w <= im_h and im_w == size) or (im_h <= im_w
and im_h == size):
return clip
new_h, new_w = get_resize_sizes(im_h, im_w, size)
size = (new_w, new_h)
else:
size = size[1], size[0]
if interpolation == 'bilinear':
pil_inter = PIL.Image.NEAREST
else:
pil_inter = PIL.Image.BILINEAR
scaled = [img.resize(size, pil_inter) for img in clip]
else:
raise TypeError('Expected numpy.ndarray or PIL.Image' +
'but got list of {0}'.format(type(clip[0])))
return scaled
def get_resize_sizes(im_h, im_w, size):
if im_w < im_h:
ow = size
oh = int(size * im_h / im_w)
else:
oh = size
ow = int(size * im_w / im_h)
return oh, ow
def normalize(clip, mean, std, inplace=False):
if not _is_tensor_clip(clip):
raise TypeError('tensor is not a torch clip_test.')
if not inplace:
clip = clip.clone()
dtype = clip.dtype
dim = len(mean)
mean = torch.as_tensor(mean, dtype=dtype, device=clip.device)
std = torch.as_tensor(std, dtype=dtype, device=clip.device)
# print(clip_test.size())
# if dim == 3:
clip.sub_(mean[:, None, None, None]).div_(std[:, None, None, None])
# else:
# clip_test.sub_(mean[:, None, None]).div_(std[:, None, None])
return clip
def convert_img(img):
"""Converts (H, W, C) numpy.ndarray to (C, W, H) format
"""
if len(img.shape) == 3:
img = img.transpose(2, 0, 1)
if len(img.shape) == 2:
img = np.expand_dims(img, 0)
return img
class ClipToTensor(object):
"""Convert a list of m (H x W x C) numpy.ndarrays in the range [0, 255]
to a torch.FloatTensor of shape (C x m x H x W) in the range [0, 1.0]
"""
def __init__(self, channel_nb=3, div_255=True, numpy=False):
self.channel_nb = channel_nb
self.div_255 = div_255
self.numpy = numpy
def __call__(self, clip):
"""
Args: clip_test (list of numpy.ndarray): clip_test (list of images)
to be converted to tensor.
"""
# Retrieve shape
if isinstance(clip[0], np.ndarray):
h, w, ch = clip[0].shape
assert ch == self.channel_nb, 'Got {0} instead of 3 channels'.format(
ch)
elif isinstance(clip[0], Image.Image):
w, h = clip[0].size
else:
raise TypeError('Expected numpy.ndarray or PIL.Image\
but got list of {0}'.format(type(clip[0])))
np_clip = np.zeros([self.channel_nb, len(clip), int(h), int(w)])
# Convert
for img_idx, img in enumerate(clip):
if isinstance(img, np.ndarray):
pass
elif isinstance(img, Image.Image):
img = np.array(img, copy=False)
else:
raise TypeError('Expected numpy.ndarray or PIL.Image\
but got list of {0}'.format(type(clip[0])))
img = convert_img(img)
np_clip[:, img_idx, :, :] = img
if self.numpy:
if self.div_255:
np_clip = np_clip / 255
return np_clip
else:
tensor_clip = torch.from_numpy(np_clip)
if not isinstance(tensor_clip, torch.FloatTensor):
tensor_clip = tensor_clip.float()
if self.div_255:
tensor_clip = tensor_clip.div(255)
return tensor_clip
class ToTensor(object):
"""Converts numpy array to tensor
"""
def __call__(self, array):
tensor = torch.from_numpy(array)
return tensor
class ColorDistortion(object):
def __init__(self, s=1.0):
self.s = s
self.color_jitter = transforms.ColorJitter(0.8*s, 0.8*s, 0.8*s, 0.2*s)
self.rnd_color_jitter = transforms.RandomApply([self.color_jitter], p=0.8)
self.rnd_gray = transforms.RandomGrayscale(p=0.2)
def __call__(self, video):
color_distort = transforms.Compose([self.rnd_color_jitter, self.rnd_gray])
return color_distort(video)
class Compose(object):
"""Composes several transforms
Args:
transforms (list of ``Transform`` objects): list of transforms
to compose
"""
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, clip):
for t in self.transforms:
clip = t(clip)
return clip
class RandomHorizontalFlip(object):
"""Horizontally flip the list of given images randomly
with a probability 0.5
"""
def __call__(self, clip):
"""
Args:
img (PIL.Image or numpy.ndarray): List of images to be cropped
in format (h, w, c) in numpy.ndarray
Returns:
PIL.Image or numpy.ndarray: Randomly flipped clip_test
"""
if random.random() < 0.5:
if isinstance(clip[0], np.ndarray):
return [np.fliplr(img) for img in clip]
elif isinstance(clip[0], PIL.Image.Image):
return [
img.transpose(PIL.Image.FLIP_LEFT_RIGHT) for img in clip
]
else:
raise TypeError('Expected numpy.ndarray or PIL.Image' +
' but got list of {0}'.format(type(clip[0])))
return clip
class RandomResize(object):
"""Resizes a list of (H x W x C) numpy.ndarray to the final size
The larger the original image is, the more times it takes to
interpolate
Args:
interpolation (str): Can be one of 'nearest', 'bilinear'
defaults to nearest
size (tuple): (widht, height)
"""
def __init__(self, ratio=(3. / 4., 4. / 3.), interpolation='nearest'):
self.ratio = ratio
self.interpolation = interpolation
def __call__(self, clip):
scaling_factor = random.uniform(self.ratio[0], self.ratio[1])
if isinstance(clip[0], np.ndarray):
im_h, im_w, im_c = clip[0].shape
elif isinstance(clip[0], PIL.Image.Image):
im_w, im_h = clip[0].size
new_w = int(im_w * scaling_factor)
new_h = int(im_h * scaling_factor)
new_size = (new_w, new_h)
resized = resize_clip(
clip, new_size, interpolation=self.interpolation)
return resized
class Resize(object):
"""Resizes a list of (H x W x C) numpy.ndarray to the final size
The larger the original image is, the more times it takes to
interpolate
Args:
interpolation (str): Can be one of 'nearest', 'bilinear'
defaults to nearest
size (tuple): (widht, height)
"""
def __init__(self, size, interpolation='nearest'):
self.size = size
self.interpolation = interpolation
def __call__(self, clip):
resized = resize_clip(
clip, self.size, interpolation=self.interpolation)
return resized
class RandomCrop(object):
"""Extract random crop at the same location for a list of images
Args:
size (sequence or int): Desired output size for the
crop in format (h, w)
"""
def __init__(self, size):
if isinstance(size, numbers.Number):
size = (size, size)
self.size = size
def __call__(self, clip):
"""
Args:
img (PIL.Image or numpy.ndarray): List of images to be cropped
in format (h, w, c) in numpy.ndarray
Returns:
PIL.Image or numpy.ndarray: Cropped list of images
"""
h, w = self.size
if isinstance(clip[0], np.ndarray):
im_h, im_w, im_c = clip[0].shape
elif isinstance(clip[0], PIL.Image.Image):
im_w, im_h = clip[0].size
else:
raise TypeError('Expected numpy.ndarray or PIL.Image' +
'but got list of {0}'.format(type(clip[0])))
if w > im_w or h > im_h:
error_msg = (
'Initial image size should be larger then '
'cropped size but got cropped sizes : ({w}, {h}) while '
'initial image is ({im_w}, {im_h})'.format(
im_w=im_w, im_h=im_h, w=w, h=h))
raise ValueError(error_msg)
x1 = random.randint(0, im_w - w)
y1 = random.randint(0, im_h - h)
cropped = crop_clip(clip, y1, x1, h, w)
return cropped
class CornerCrop(object):
def __init__(self, size, crop_position=None):
self.size = size
if crop_position is None:
self.randomize = True
else:
self.randomize = False
self.crop_position = crop_position
self.crop_positions = ['c', 'tl', 'tr', 'bl', 'br']
def __call__(self, imgs):
t, h, w, c = imgs.shape
corner_imgs = list()
for n in self.crop_positions:
#print(n)
if n == 'c':
th, tw = (self.size, self.size)
x1 = int(round((w- tw) / 2.))
y1 = int(round((h - th) / 2.))
x2 = x1 + tw
y2 = y1 + th
elif n == 'tl':
x1 = 0
y1 = 0
x2 = self.size
y2 = self.size
elif n == 'tr':
x1 = w - self.size
y1 = 0
x2 = w
y2 = self.size
elif n == 'bl':
x1 = 0
y1 = h - self.size
x2 = self.size
y2 = h
elif n == 'br':
x1 = w - self.size
y1 = h - self.size
x2 = w
y2 = h
corner_imgs.append(imgs[:, y1:y2, x1:x2, :])
return corner_imgs
def randomize_parameters(self):
if self.randomize:
self.crop_position = self.crop_positions[random.randint(
0,
len(self.crop_positions) - 1)]
class RandomRotation(object):
"""Rotate entire clip_test randomly by a random angle within
given bounds
Args:
degrees (sequence or int): Range of degrees to select from
If degrees is a number instead of sequence like (min, max),
the range of degrees, will be (-degrees, +degrees).
"""
def __init__(self, degrees):
if isinstance(degrees, numbers.Number):
if degrees < 0:
raise ValueError('If degrees is a single number,'
'must be positive')
degrees = (-degrees, degrees)
else:
if len(degrees) != 2:
raise ValueError('If degrees is a sequence,'
'it must be of len 2.')
self.degrees = degrees
def __call__(self, clip):
"""
Args:
img (PIL.Image or numpy.ndarray): List of images to be cropped
in format (h, w, c) in numpy.ndarray
Returns:
PIL.Image or numpy.ndarray: Cropped list of images
"""
angle = random.uniform(self.degrees[0], self.degrees[1])
if isinstance(clip[0], np.ndarray):
rotated = [skimage.transform.rotate(img, angle) for img in clip]
elif isinstance(clip[0], PIL.Image.Image):
rotated = [img.rotate(angle) for img in clip]
else:
raise TypeError('Expected numpy.ndarray or PIL.Image' +
'but got list of {0}'.format(type(clip[0])))
return rotated
class STA_RandomRotation(object):
"""Rotate entire clip_test randomly by a random angle within
given bounds
Args:
degrees (sequence or int): Range of degrees to select from
If degrees is a number instead of sequence like (min, max),
the range of degrees, will be (-degrees, +degrees).
"""
def __init__(self, degrees):
if isinstance(degrees, numbers.Number):
if degrees < 0:
raise ValueError('If degrees is a single number,'
'must be positive')
degrees = (-degrees, degrees)
else:
if len(degrees) != 2:
raise ValueError('If degrees is a sequence,'
'it must be of len 2.')
self.degrees = degrees
def __call__(self, clip):
"""
Args:
img (PIL.Image or numpy.ndarray): List of images to be cropped
in format (h, w, c) in numpy.ndarray
Returns:
PIL.Image or numpy.ndarray: Cropped list of images
"""
bsz = len(clip)
angle = random.uniform(self.degrees[0], self.degrees[1])
angles = [(i+1)/(bsz+1) * angle for i in range(bsz)]
if isinstance(clip[0], np.ndarray):
rotated = [skimage.transform.rotate(img, angles[i]) for i, img in enumerate(clip)]
elif isinstance(clip[0], PIL.Image.Image):
rotated = [img.rotate(angles[i]) for i, img in enumerate(clip)]
else:
raise TypeError('Expected numpy.ndarray or PIL.Image' +
'but got list of {0}'.format(type(clip[0])))
return rotated
class Each_RandomRotation(object):
"""Rotate entire clip_test randomly by a random angle within
given bounds
Args:
degrees (sequence or int): Range of degrees to select from
If degrees is a number instead of sequence like (min, max),
the range of degrees, will be (-degrees, +degrees).
"""
def __init__(self, degrees):
if isinstance(degrees, numbers.Number):
if degrees < 0:
raise ValueError('If degrees is a single number,'
'must be positive')
degrees = (-degrees, degrees)
else:
if len(degrees) != 2:
raise ValueError('If degrees is a sequence,'
'it must be of len 2.')
self.degrees = degrees
def __call__(self, clip):
"""
Args:
img (PIL.Image or numpy.ndarray): List of images to be cropped
in format (h, w, c) in numpy.ndarray
Returns:
PIL.Image or numpy.ndarray: Cropped list of images
"""
bsz = len(clip)
angles = [random.uniform(self.degrees[0], self.degrees[1]) for i in range(bsz)]
# print(angles)
if isinstance(clip[0], np.ndarray):
rotated = [skimage.transform.rotate(img, angles[i]) for i, img in enumerate(clip)]
elif isinstance(clip[0], PIL.Image.Image):
rotated = [img.rotate(angles[i]) for i, img in enumerate(clip)]
else:
raise TypeError('Expected numpy.ndarray or PIL.Image' +
'but got list of {0}'.format(type(clip[0])))
return rotated
class CenterCrop(object):
"""Extract center crop at the same location for a list of images
Args:
size (sequence or int): Desired output size for the
crop in format (h, w)
"""
def __init__(self, size):
if isinstance(size, numbers.Number):
size = (size, size)
self.size = size
def __call__(self, clip):
"""
Args:
img (PIL.Image or numpy.ndarray): List of images to be cropped
in format (h, w, c) in numpy.ndarray
Returns:
PIL.Image or numpy.ndarray: Cropped list of images
"""
h, w = self.size
if isinstance(clip[0], np.ndarray):
im_h, im_w, im_c = clip[0].shape
elif isinstance(clip[0], PIL.Image.Image):
im_w, im_h = clip[0].size
else:
raise TypeError('Expected numpy.ndarray or PIL.Image' +
'but got list of {0}'.format(type(clip[0])))
if w > im_w or h > im_h:
error_msg = (
'Initial image size should be larger then '
'cropped size but got cropped sizes : ({w}, {h}) while '
'initial image is ({im_w}, {im_h})'.format(
im_w=im_w, im_h=im_h, w=w, h=h))
raise ValueError(error_msg)
x1 = int(round((im_w - w) / 2.))
y1 = int(round((im_h - h) / 2.))
cropped = crop_clip(clip, y1, x1, h, w)
return cropped
class ColorJitter(object):
"""Randomly change the brightness, contrast and saturation and hue of the clip_test
Args:
brightness (float): How much to jitter brightness. brightness_factor
is chosen uniformly from [max(0, 1 - brightness), 1 + brightness].
contrast (float): How much to jitter contrast. contrast_factor
is chosen uniformly from [max(0, 1 - contrast), 1 + contrast].
saturation (float): How much to jitter saturation. saturation_factor
is chosen uniformly from [max(0, 1 - saturation), 1 + saturation].
hue(float): How much to jitter hue. hue_factor is chosen uniformly from
[-hue, hue]. Should be >=0 and <= 0.5.
"""
def __init__(self, brightness=0, contrast=0, saturation=0, hue=0):
self.brightness = brightness
self.contrast = contrast
self.saturation = saturation
self.hue = hue
def get_params(self, brightness, contrast, saturation, hue):
if brightness > 0:
brightness_factor = random.uniform(
max(0, 1 - brightness), 1 + brightness)
else:
brightness_factor = None
if contrast > 0:
contrast_factor = random.uniform(
max(0, 1 - contrast), 1 + contrast)
else:
contrast_factor = None
if saturation > 0:
saturation_factor = random.uniform(
max(0, 1 - saturation), 1 + saturation)
else:
saturation_factor = None
if hue > 0:
hue_factor = random.uniform(-hue, hue)
else:
hue_factor = None
return brightness_factor, contrast_factor, saturation_factor, hue_factor
def __call__(self, clip):
"""
Args:
clip_test (list): list of PIL.Image
Returns:
list PIL.Image : list of transformed PIL.Image
"""
if isinstance(clip[0], np.ndarray):
raise TypeError(
'Color jitter not yet implemented for numpy arrays')
elif isinstance(clip[0], PIL.Image.Image):
brightness, contrast, saturation, hue = self.get_params(
self.brightness, self.contrast, self.saturation, self.hue)
# Create img sync_dir function sequence
img_transforms = []
if brightness is not None:
img_transforms.append(lambda img: torchvision.transforms.functional.adjust_brightness(img, brightness))
if saturation is not None:
img_transforms.append(lambda img: torchvision.transforms.functional.adjust_saturation(img, saturation))
if hue is not None:
img_transforms.append(lambda img: torchvision.transforms.functional.adjust_hue(img, hue))
if contrast is not None:
img_transforms.append(lambda img: torchvision.transforms.functional.adjust_contrast(img, contrast))
random.shuffle(img_transforms)
# Apply to all images
jittered_clip = []
for img in clip:
for func in img_transforms:
jittered_img = func(img)
jittered_clip.append(jittered_img)
else:
raise TypeError('Expected numpy.ndarray or PIL.Image' +
'but got list of {0}'.format(type(clip[0])))
return jittered_clip
class EachColorJitter(object):
"""Randomly change the brightness, contrast and saturation and hue of the clip_test
Args:
brightness (float): How much to jitter brightness. brightness_factor
is chosen uniformly from [max(0, 1 - brightness), 1 + brightness].
contrast (float): How much to jitter contrast. contrast_factor
is chosen uniformly from [max(0, 1 - contrast), 1 + contrast].
saturation (float): How much to jitter saturation. saturation_factor
is chosen uniformly from [max(0, 1 - saturation), 1 + saturation].
hue(float): How much to jitter hue. hue_factor is chosen uniformly from
[-hue, hue]. Should be >=0 and <= 0.5.
"""
def __init__(self, brightness=0, contrast=0, saturation=0, hue=0):
self.brightness = brightness
self.contrast = contrast
self.saturation = saturation
self.hue = hue
def get_params(self, brightness, contrast, saturation, hue):
if brightness > 0:
brightness_factor = random.uniform(
max(0, 1 - brightness), 1 + brightness)
else:
brightness_factor = None
if contrast > 0:
contrast_factor = random.uniform(
max(0, 1 - contrast), 1 + contrast)
else:
contrast_factor = None
if saturation > 0:
saturation_factor = random.uniform(
max(0, 1 - saturation), 1 + saturation)
else:
saturation_factor = None
if hue > 0:
hue_factor = random.uniform(-hue, hue)
else:
hue_factor = None
return brightness_factor, contrast_factor, saturation_factor, hue_factor
def __call__(self, clip):
"""
Args:
clip_test (list): list of PIL.Image
Returns:
list PIL.Image : list of transformed PIL.Image
"""
if isinstance(clip[0], np.ndarray):
raise TypeError(
'Color jitter not yet implemented for numpy arrays')
elif isinstance(clip[0], PIL.Image.Image):
brightness, contrast, saturation, hue = self.get_params(
self.brightness, self.contrast, self.saturation, self.hue)
# Create img sync_dir function sequence
img_transforms = []
if brightness is not None:
img_transforms.append(lambda img: torchvision.transforms.functional.adjust_brightness(img, brightness))
if saturation is not None:
img_transforms.append(lambda img: torchvision.transforms.functional.adjust_saturation(img, saturation))
if hue is not None:
img_transforms.append(lambda img: torchvision.transforms.functional.adjust_hue(img, hue))
if contrast is not None:
img_transforms.append(lambda img: torchvision.transforms.functional.adjust_contrast(img, contrast))
random.shuffle(img_transforms)
# Apply to all images
jittered_clip = []
for img in clip:
for func in img_transforms:
jittered_img = func(img)
jittered_clip.append(jittered_img)
else:
raise TypeError('Expected numpy.ndarray or PIL.Image' +
'but got list of {0}'.format(type(clip[0])))
return jittered_clip
class Normalize(object):
"""Normalize a clip_test with mean and standard deviation.
Given mean: ``(M1,...,Mn)`` and std: ``(S1,..,Sn)`` for ``n`` channels, this sync_dir
will normalize each channel of the input ``torch.*Tensor`` i.e.
``input[channel] = (input[channel] - mean[channel]) / std[channel]``
.. note::
This sync_dir acts out of place, i.e., it does not mutates the input tensor.
Args:
mean (sequence): Sequence of means for each channel.
std (sequence): Sequence of standard deviations for each channel.
"""
def __init__(self, mean, std):
self.mean = mean
self.std = std
def __call__(self, clip):
"""
Args:
clip (Tensor): Tensor clip_test of size (T, C, H, W) to be normalized.
Returns:
Tensor: Normalized Tensor clip_test.
"""
return normalize(clip, self.mean, self.std)
def __repr__(self):
return self.__class__.__name__ + '(mean={0}, std={1})'.format(self.mean, self.std)
class TensorToNumpy(object):
def __init__(self):
pass
def __call__(self, clip):
np_clip = clip.permute(1, 2, 3, 0).cpu().detach().numpy()
pil_clip = [Image.fromarray(np.uint8(numpy_image)).convert('RGB') for numpy_image in np_clip]
return pil_clip
| InternVideo-main | Pretrain/Multi-Modalities-Pretraining/InternVideo/video_transform.py |
import gzip
import html
import os
from functools import lru_cache
import ftfy
import regex as re
@lru_cache()
def default_bpe():
return os.path.join(os.path.dirname(os.path.abspath(__file__)), "bpe_simple_vocab_16e6.txt.gz")
@lru_cache()
def bytes_to_unicode():
"""
Returns list of utf-8 byte and a corresponding list of unicode strings.
The reversible bpe codes work on unicode strings.
This means you need a large # of unicode characters in your vocab if you want to avoid UNKs.
When you're at something like a 10B token dataset you end up needing around 5K for decent coverage.
This is a signficant percentage of your normal, say, 32K bpe vocab.
To avoid that, we want lookup tables between utf-8 bytes and unicode strings.
And avoids mapping to whitespace/control characters the bpe code barfs on.
"""
bs = list(range(ord("!"), ord("~")+1))+list(range(ord("¡"), ord("¬")+1))+list(range(ord("®"), ord("ÿ")+1))
cs = bs[:]
n = 0
for b in range(2**8):
if b not in bs:
bs.append(b)
cs.append(2**8+n)
n += 1
cs = [chr(n) for n in cs]
return dict(zip(bs, cs))
def get_pairs(word):
"""Return set of symbol pairs in a word.
Word is represented as tuple of symbols (symbols being variable-length strings).
"""
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
return pairs
def basic_clean(text):
text = ftfy.fix_text(text)
text = html.unescape(html.unescape(text))
return text.strip()
def whitespace_clean(text):
text = re.sub(r'\s+', ' ', text)
text = text.strip()
return text
class SimpleTokenizer(object):
def __init__(self, bpe_path: str = default_bpe()):
self.byte_encoder = bytes_to_unicode()
self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
merges = gzip.open(bpe_path).read().decode("utf-8").split('\n')
merges = merges[1:49152-256-2+1]
merges = [tuple(merge.split()) for merge in merges]
vocab = list(bytes_to_unicode().values())
vocab = vocab + [v+'</w>' for v in vocab]
for merge in merges:
vocab.append(''.join(merge))
vocab.extend(['<|startoftext|>', '<|endoftext|>'])
self.encoder = dict(zip(vocab, range(len(vocab))))
self.decoder = {v: k for k, v in self.encoder.items()}
self.bpe_ranks = dict(zip(merges, range(len(merges))))
self.cache = {'<|startoftext|>': '<|startoftext|>', '<|endoftext|>': '<|endoftext|>'}
self.pat = re.compile(r"""<\|startoftext\|>|<\|endoftext\|>|'s|'t|'re|'ve|'m|'ll|'d|[\p{L}]+|[\p{N}]|[^\s\p{L}\p{N}]+""", re.IGNORECASE)
def bpe(self, token):
if token in self.cache:
return self.cache[token]
word = tuple(token[:-1]) + ( token[-1] + '</w>',)
pairs = get_pairs(word)
if not pairs:
return token+'</w>'
while True:
bigram = min(pairs, key = lambda pair: self.bpe_ranks.get(pair, float('inf')))
if bigram not in self.bpe_ranks:
break
first, second = bigram
new_word = []
i = 0
while i < len(word):
try:
j = word.index(first, i)
new_word.extend(word[i:j])
i = j
except:
new_word.extend(word[i:])
break
if word[i] == first and i < len(word)-1 and word[i+1] == second:
new_word.append(first+second)
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if len(word) == 1:
break
else:
pairs = get_pairs(word)
word = ' '.join(word)
self.cache[token] = word
return word
def encode(self, text):
bpe_tokens = []
text = whitespace_clean(basic_clean(text)).lower()
for token in re.findall(self.pat, text):
token = ''.join(self.byte_encoder[b] for b in token.encode('utf-8'))
bpe_tokens.extend(self.encoder[bpe_token] for bpe_token in self.bpe(token).split(' '))
return bpe_tokens
def decode(self, tokens):
text = ''.join([self.decoder[token] for token in tokens])
text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors="replace").replace('</w>', ' ')
return text
| InternVideo-main | Pretrain/Multi-Modalities-Pretraining/InternVideo/simple_tokenizer.py |
import torch
import numpy as np
import decord
from typing import Any, OrderedDict, Union, List
from pkg_resources import packaging
from torchvision import transforms
from . import video_transform
from .simple_tokenizer import SimpleTokenizer as _Tokenizer
from .clip_utils.model import build_model
__all__ = ["load_model", "load_video", "tokenize"]
_tokenizer = _Tokenizer()
def load_model(path):
state = torch.load(path, map_location="cpu")["state_dict"]
state = {k[len("clip.") :]: v for k, v in state.items() if k.startswith("clip.")}
model = build_model(state_dict=state)
return model
def load_video(path):
video_reader = decord.VideoReader(path, num_threads=1, ctx=decord.cpu(0))
decord.bridge.set_bridge('torch')
video_len = len(video_reader)
video = video_reader.get_batch(np.linspace(0, video_len - 1, 8).astype(np.int)).byte()
video = video.permute(3, 0, 1, 2)
input_mean = [0.48145466, 0.4578275, 0.40821073]
input_std = [0.26862954, 0.26130258, 0.27577711]
crop_size, scale_size = 224, 256
trans = transforms.Compose([
video_transform.TensorToNumpy(),
video_transform.Resize(scale_size),
video_transform.CenterCrop(crop_size),
video_transform.ClipToTensor(channel_nb=3),
video_transform.Normalize(mean=input_mean, std=input_std)
])
video = trans(video)
return video
def tokenize(
texts: Union[str, List[str]],
context_length: int = 77,
truncate: bool = False,
return_special_tokens_mask: bool = False,
) -> Union[torch.IntTensor, torch.LongTensor, torch.BoolTensor]:
"""
Returns the tokenized representation of given input string(s)
Parameters
----------
texts : Union[str, List[str]]
An input string or a list of input strings to tokenize
context_length : int
The context length to use; all CLIP models use 77 as the context length
truncate: bool
Whether to truncate the text in case its encoding is longer than the context length
Returns
-------
A two-dimensional tensor containing the resulting tokens, shape = [number of input strings, context_length].
We return LongTensor when torch version is <1.8.0, since older index_select requires indices to be long.
"""
if isinstance(texts, str):
texts = [texts]
sot_token = _tokenizer.encoder["<|startoftext|>"]
eot_token = _tokenizer.encoder["<|endoftext|>"]
all_tokens = [[sot_token] + _tokenizer.encode(text) + [eot_token] for text in texts]
if packaging.version.parse(torch.__version__) < packaging.version.parse("1.8.0"):
result = torch.zeros(len(all_tokens), context_length, dtype=torch.long)
else:
result = torch.zeros(len(all_tokens), context_length, dtype=torch.int)
special_tokens_mask = torch.zeros(len(all_tokens), context_length, dtype=torch.bool)
for i, tokens in enumerate(all_tokens):
if len(tokens) > context_length:
if truncate:
tokens = tokens[:context_length]
tokens[-1] = eot_token
else:
raise RuntimeError(
f"Input {texts[i]} is too long for context length {context_length}"
)
result[i, : len(tokens)] = torch.tensor(tokens)
special_tokens_mask[i, len(tokens) :] = 1
if return_special_tokens_mask:
return result, special_tokens_mask
return result
| InternVideo-main | Pretrain/Multi-Modalities-Pretraining/InternVideo/internvideo.py |
from .clip import *
| InternVideo-main | Pretrain/Multi-Modalities-Pretraining/InternVideo/clip_utils/__init__.py |
from collections import OrderedDict
from typing import Tuple, Union
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn
from torch.utils.checkpoint import checkpoint_sequential
from . import utils
class LayerNorm(nn.LayerNorm):
"""Subclass torch's LayerNorm to handle fp16."""
def forward(self, x: torch.Tensor):
orig_type = x.dtype
ret = super().forward(x.type(torch.float32))
return ret.type(orig_type)
class QuickGELU(nn.Module):
def forward(self, x: torch.Tensor):
return x * torch.sigmoid(1.702 * x)
class ResidualAttentionBlock(nn.Module):
def __init__(self, d_model: int, n_head: int, attn_mask: torch.Tensor = None):
super().__init__()
self.attn = nn.MultiheadAttention(d_model, n_head)
self.ln_1 = LayerNorm(d_model)
self.mlp = nn.Sequential(
OrderedDict(
[
("c_fc", nn.Linear(d_model, d_model * 4)),
("gelu", QuickGELU()),
("c_proj", nn.Linear(d_model * 4, d_model)),
]
)
)
self.ln_2 = LayerNorm(d_model)
self.attn_mask = attn_mask
def attention(self, x: torch.Tensor):
self.attn_mask = (
self.attn_mask.to(dtype=x.dtype, device=x.device)
if self.attn_mask is not None
else None
)
return self.attn(x, x, x, need_weights=False, attn_mask=self.attn_mask)[0]
def forward(self, x: torch.Tensor):
x = x + self.attention(self.ln_1(x))
x = x + self.mlp(self.ln_2(x))
return x
class Transformer(nn.Module):
def __init__(
self,
width: int,
layers: int,
heads: int,
attn_mask: torch.Tensor = None,
use_checkpoint=False,
checkpoint_num=[0, 0],
):
super().__init__()
self.width = width
self.layers = layers
self.resblocks = nn.Sequential(
*[ResidualAttentionBlock(width, heads, attn_mask) for _ in range(layers)]
)
self.use_checkpoint = use_checkpoint
self.checkpoint_num = checkpoint_num
def forward(self, x: torch.Tensor):
if self.use_checkpoint and self.checkpoint_num[1] > 0:
segments = min(len(self.resblocks), self.checkpoint_num[1])
return checkpoint_sequential(self.resblocks, segments, x)
else:
return self.resblocks(x)
class VideoIntern(nn.Module):
def __init__(
self,
embed_dim: int,
# vision
vision_width: int,
# text
context_length: int,
vocab_size: int,
transformer_width: int,
transformer_heads: int,
transformer_layers: int,
# uni
n_layers=4,
n_dim=768,
n_head=12,
drop_path_rate=0.0,
mlp_dropout=[0.5, 0.5, 0.5, 0.5],
cls_dropout=0.5,
t_size=8,
use_image_attnmap=True,
backbone='vit_2plus1d_dw_bias_b16',
return_list=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11],
use_checkpoint=False,
checkpoint_num=[0],
):
super().__init__()
self.vision_width = n_dim
self.context_length = context_length
self.visual = utils.__dict__[backbone](
pretrained=False,
t_size=t_size,
mlp_dropout=mlp_dropout,
cls_dropout=cls_dropout,
n_dim=n_dim,
n_head=n_head,
return_list=return_list,
drop_path_rate=drop_path_rate,
backbone_drop_path_rate=drop_path_rate,
use_checkpoint=use_checkpoint,
checkpoint_num=checkpoint_num,
)
self.visual_ln_post = nn.LayerNorm(n_dim)
scale = n_dim**-0.5
self.visual_proj = nn.Parameter(scale * torch.randn(n_dim, embed_dim))
self.return_qk = use_image_attnmap
self.return_num = n_layers
self.transformer = Transformer(
width=transformer_width,
layers=transformer_layers,
heads=transformer_heads,
attn_mask=self.build_attention_mask(),
use_checkpoint=use_checkpoint,
checkpoint_num=checkpoint_num,
)
self.vocab_size = vocab_size
self.token_embedding = nn.Embedding(vocab_size, transformer_width)
self.positional_embedding = nn.Parameter(
torch.empty(self.context_length, transformer_width)
)
self.ln_final = LayerNorm(transformer_width)
self.text_projection = nn.Parameter(torch.empty(transformer_width, embed_dim))
self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07))
self.embed_dim = embed_dim
# We seperate the mask embedding to load pretrained model
self.text_mask_embedding = nn.Parameter(torch.empty(1, 1, transformer_width))
# # To keep the num_embeddings unchanged, we add this to embedded text
# self.eot_token_embedding = nn.Parameter(torch.empty(1, transformer_width))
self.initialize_parameters()
def initialize_parameters(self):
nn.init.normal_(self.token_embedding.weight, std=0.02)
nn.init.normal_(self.positional_embedding, std=0.01)
nn.init.normal_(self.text_mask_embedding, std=0.02)
# nn.init.constant_(self.eot_token_embedding, 0.0)
proj_std = (self.transformer.width**-0.5) * (
(2 * self.transformer.layers) ** -0.5
)
attn_std = self.transformer.width**-0.5
fc_std = (2 * self.transformer.width) ** -0.5
for block in self.transformer.resblocks:
nn.init.normal_(block.attn.in_proj_weight, std=attn_std)
nn.init.normal_(block.attn.out_proj.weight, std=proj_std)
nn.init.normal_(block.mlp.c_fc.weight, std=fc_std)
nn.init.normal_(block.mlp.c_proj.weight, std=proj_std)
if self.text_projection is not None:
nn.init.normal_(self.text_projection, std=self.transformer.width**-0.5)
nn.init.constant_(self.visual_ln_post.weight, 1.0)
nn.init.constant_(self.visual_ln_post.bias, 0.0)
def build_attention_mask(self):
# lazily create causal attention mask, with full attention between the vision tokens
# pytorch uses additive attention mask; fill with -inf
mask = torch.empty(self.context_length, self.context_length)
mask.fill_(float("-inf"))
mask.triu_(1) # zero out the lower diagonal
return mask
@property
def dtype(self):
return self.visual.conv1.weight.dtype
def encode_video(
self, video, return_all_feats=False, masked_indices=None, mode="video"
):
# video: [N, C, T, H, W]
feats = self.visual(video, return_all_feats=return_all_feats, mode=mode)
if return_all_feats:
x, feats = feats
else:
x = feats
x = self.visual_ln_post(x)
if self.visual_proj is not None:
x = x @ self.visual_proj
if return_all_feats:
return x, feats # [N, C], [L, N, T, C]
return x
def encode_text(self, text, masked_indices=None, return_all_feats=False):
# assert (text.max(dim=-1)[0] + 1 == self.token_embedding.num_embeddings).all(), \
# "The last token of each sentence should be eot_token, check the input"
x = self.token_embedding(text).type(self.dtype) # [batch_size, n_ctx, d_model]
# x[torch.arange(x.shape[0]), text.argmax(dim=-1)] += self.eot_token_embedding
if masked_indices is not None:
x[masked_indices] = self.text_mask_embedding
x = x + self.positional_embedding.type(self.dtype)
x = x.permute(1, 0, 2) # NLD -> LND
x = self.transformer(x)
x = x.permute(1, 0, 2) # LND -> NLD
x = self.ln_final(x).type(self.dtype)
# x.shape = [batch_size, n_ctx, transformer.width]
# take features from the eot embedding (eot_token is the highest number in each sequence)
feats = x[torch.arange(x.shape[0]), text.argmax(dim=-1)]
if self.text_projection is not None:
feats = feats @ self.text_projection
if return_all_feats:
return feats, x
return feats
def build_model(
state_dict: dict,
n_layers=4,
n_dim=768,
n_head=12,
mlp_factor=4.0,
drop_path_rate=0.0,
mlp_dropout=[0.5, 0.5, 0.5, 0.5],
cls_dropout=0.5,
t_size=8,
spatial_size=14,
use_t_conv=True,
use_image_attnmap=True,
use_t_pos_embed=True,
no_pretrain=False,
init_zero=True,
use_checkpoint=False,
checkpoint_num=[0],
):
vision_width = state_dict["visual.conv1.weight"].shape[0]
vision_layers = len(
[
k
for k in state_dict.keys()
if k.startswith("visual.") and k.endswith(".attn.in_proj_weight")
]
)
vision_patch_size = state_dict["visual.conv1.weight"].shape[-1]
grid_size = round((state_dict["visual.positional_embedding"].shape[0] - 1) ** 0.5)
image_resolution = vision_patch_size * grid_size
embed_dim = state_dict["text_projection"].shape[1]
context_length = state_dict["positional_embedding"].shape[0]
vocab_size = state_dict["token_embedding.weight"].shape[0]
transformer_width = state_dict["ln_final.weight"].shape[0]
transformer_heads = transformer_width // 64
transformer_layers = len(
set(
k.split(".")[2]
for k in state_dict
if k.startswith(f"transformer.resblocks")
)
)
vision_width = state_dict["visual_proj"].shape[0]
n_dim = vision_width
if vision_width == 768:
backbone = "vit_only_global_b16"
n_head = 12
return_list = [8, 9, 10, 11]
elif vision_width == 1024:
backbone = "vit_only_global_l14"
n_head = 16
return_list = [20, 21, 22, 23]
else:
raise NotImplementedError
model = VideoIntern(
embed_dim,
vision_width,
context_length,
vocab_size,
transformer_width,
transformer_heads,
transformer_layers,
n_layers=n_layers,
n_dim=n_dim,
n_head=n_head,
drop_path_rate=drop_path_rate,
mlp_dropout=mlp_dropout,
cls_dropout=cls_dropout,
t_size=t_size,
use_image_attnmap=use_image_attnmap,
backbone=backbone,
return_list=return_list,
use_checkpoint=use_checkpoint,
checkpoint_num=checkpoint_num,
)
model.load_state_dict(state_dict, strict=False)
return model.eval()
| InternVideo-main | Pretrain/Multi-Modalities-Pretraining/InternVideo/clip_utils/model.py |
import hashlib
import os
import urllib
import warnings
from typing import Any, Union, List
from pkg_resources import packaging
import torch
from PIL import Image
from torchvision.transforms import Compose, Resize, CenterCrop, ToTensor, Normalize
from tqdm import tqdm
from .model import build_model
from .simple_tokenizer import SimpleTokenizer as _Tokenizer
try:
from torchvision.transforms import InterpolationMode
BICUBIC = InterpolationMode.BICUBIC
except ImportError:
BICUBIC = Image.BICUBIC
if packaging.version.parse(torch.__version__) < packaging.version.parse("1.7.1"):
warnings.warn("PyTorch version 1.7.1 or higher is recommended")
__all__ = ["available_models", "load", "tokenize"]
# _tokenizer = _Tokenizer()
_MODELS = {
"ViT-B/32": "https://openaipublic.azureedge.net/clip/models/40d365715913c9da98579312b702a82c18be219cc2a73407c4526f58eba950af/ViT-B-32.pt",
"ViT-B/16": "https://openaipublic.azureedge.net/clip/models/5806e77cd80f8b59890b7e101eabd078d9fb84e6937f9e85e4ecb61988df416f/ViT-B-16.pt",
"ViT-L/14": "https://openaipublic.azureedge.net/clip/models/b8cca3fd41ae0c99ba7e8951adf17d267cdb84cd88be6f7c2e0eca1737a03836/ViT-L-14.pt",
}
def _download(url: str, root: str):
os.makedirs(root, exist_ok=True)
filename = os.path.basename(url)
expected_sha256 = url.split("/")[-2]
download_target = os.path.join(root, filename)
if os.path.exists(download_target) and not os.path.isfile(download_target):
raise RuntimeError(f"{download_target} exists and is not a regular file")
if os.path.isfile(download_target):
if hashlib.sha256(open(download_target, "rb").read()).hexdigest() == expected_sha256:
return download_target
else:
warnings.warn(f"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file")
with urllib.request.urlopen(url) as source, open(download_target, "wb") as output:
with tqdm(total=int(source.info().get("Content-Length")), ncols=80, unit='iB', unit_scale=True, unit_divisor=1024) as loop:
while True:
buffer = source.read(8192)
if not buffer:
break
output.write(buffer)
loop.update(len(buffer))
if hashlib.sha256(open(download_target, "rb").read()).hexdigest() != expected_sha256:
raise RuntimeError(f"Model has been downloaded but the SHA256 checksum does not not match")
return download_target
def _convert_image_to_rgb(image):
return image.convert("RGB")
def _transform(n_px):
return Compose([
Resize(n_px, interpolation=BICUBIC),
CenterCrop(n_px),
_convert_image_to_rgb,
ToTensor(),
Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)),
])
def available_models() -> List[str]:
"""Returns the names of available CLIP models"""
return list(_MODELS.keys())
def load(
name: str,
device: Union[str, torch.device] = "cuda" if torch.cuda.is_available() else "cpu",
jit: bool = False, download_root: str = None,
# evl
n_layers=12, n_dim=768, n_head=12, mlp_factor=4.0, drop_path_rate=0.,
mlp_dropout=[0.0, 0.0, 0.0, 0.0], cls_dropout=0.5, t_size=8, spatial_size=14,
use_t_conv=True, use_image_attnmap=True, use_t_pos_embed=True, dropout=0.0, no_pretrain=False, init_zero=True,
use_checkpoint=False, checkpoint_num=[0, 0, 0],
):
"""Load a CLIP model
Parameters
----------
name : str
A model name listed by `clip.available_models()`, or the path to a model checkpoint containing the state_dict
device : Union[str, torch.device]
The device to put the loaded model
jit : bool
Whether to load the optimized JIT model or more hackable non-JIT model (default).
download_root: str
path to download the model files; by default, it uses "~/.cache/clip"
Returns
-------
model : torch.nn.Module
The CLIP model
preprocess : Callable[[PIL.Image], torch.Tensor]
A torchvision transform that converts a PIL image into a tensor that the returned model can take as its input
"""
if name in _MODELS:
model_path = _download(_MODELS[name], download_root or os.path.expanduser("~/.cache/clip"))
elif os.path.isfile(name):
model_path = name
else:
raise RuntimeError(f"Model {name} not found; available models = {available_models()}")
with open(model_path, 'rb') as opened_file:
try:
# loading JIT archive
model = torch.jit.load(opened_file, map_location=device if jit else "cpu").eval()
state_dict = None
except RuntimeError:
# loading saved state dict
if jit:
warnings.warn(f"File {model_path} is not a JIT archive. Loading as a state dict instead")
jit = False
state_dict = torch.load(model_path, map_location="cpu")
if not jit:
model = build_model(
state_dict or model.state_dict(),
n_layers=n_layers, n_dim=n_dim, n_head=n_head, mlp_factor=mlp_factor, drop_path_rate=drop_path_rate,
mlp_dropout=mlp_dropout, cls_dropout=cls_dropout, t_size=t_size, spatial_size=spatial_size,
use_t_conv=use_t_conv, use_image_attnmap=use_image_attnmap, use_t_pos_embed=use_t_pos_embed, no_pretrain=no_pretrain,
init_zero=init_zero, use_checkpoint=use_checkpoint, checkpoint_num=checkpoint_num,
).to(device)
if str(device) == "cpu":
model.float()
return model, _transform(model.visual.input_resolution)
# patch the device names
device_holder = torch.jit.trace(lambda: torch.ones([]).to(torch.device(device)), example_inputs=[])
device_node = [n for n in device_holder.graph.findAllNodes("prim::Constant") if "Device" in repr(n)][-1]
def patch_device(module):
try:
graphs = [module.graph] if hasattr(module, "graph") else []
except RuntimeError:
graphs = []
if hasattr(module, "forward1"):
graphs.append(module.forward1.graph)
for graph in graphs:
for node in graph.findAllNodes("prim::Constant"):
if "value" in node.attributeNames() and str(node["value"]).startswith("cuda"):
node.copyAttributes(device_node)
model.apply(patch_device)
patch_device(model.encode_image)
patch_device(model.encode_text)
# patch dtype to float32 on CPU
if str(device) == "cpu":
float_holder = torch.jit.trace(lambda: torch.ones([]).float(), example_inputs=[])
float_input = list(float_holder.graph.findNode("aten::to").inputs())[1]
float_node = float_input.node()
def patch_float(module):
try:
graphs = [module.graph] if hasattr(module, "graph") else []
except RuntimeError:
graphs = []
if hasattr(module, "forward1"):
graphs.append(module.forward1.graph)
for graph in graphs:
for node in graph.findAllNodes("aten::to"):
inputs = list(node.inputs())
for i in [1, 2]: # dtype can be the second or third argument to aten::to()
if inputs[i].node()["value"] == 5:
inputs[i].node().copyAttributes(float_node)
model.apply(patch_float)
patch_float(model.encode_image)
patch_float(model.encode_text)
model.float()
return model, _transform(model.input_resolution.item())
def tokenize(texts: Union[str, List[str]], context_length: int = 77, truncate: bool = False, return_special_tokens_mask: bool = False) -> Union[torch.IntTensor, torch.LongTensor, torch.BoolTensor]:
"""
Returns the tokenized representation of given input string(s)
Parameters
----------
texts : Union[str, List[str]]
An input string or a list of input strings to tokenize
context_length : int
The context length to use; all CLIP models use 77 as the context length
truncate: bool
Whether to truncate the text in case its encoding is longer than the context length
Returns
-------
A two-dimensional tensor containing the resulting tokens, shape = [number of input strings, context_length].
We return LongTensor when torch version is <1.8.0, since older index_select requires indices to be long.
"""
if isinstance(texts, str):
texts = [texts]
sot_token = _tokenizer.encoder["<|startoftext|>"]
eot_token = _tokenizer.encoder["<|endoftext|>"]
all_tokens = [[sot_token] + _tokenizer.encode(text) + [eot_token] for text in texts]
if packaging.version.parse(torch.__version__) < packaging.version.parse("1.8.0"):
result = torch.zeros(len(all_tokens), context_length, dtype=torch.long)
else:
result = torch.zeros(len(all_tokens), context_length, dtype=torch.int)
special_tokens_mask = torch.zeros(len(all_tokens), context_length, dtype=torch.bool)
for i, tokens in enumerate(all_tokens):
if len(tokens) > context_length:
if truncate:
tokens = tokens[:context_length]
tokens[-1] = eot_token
else:
raise RuntimeError(f"Input {texts[i]} is too long for context length {context_length}")
result[i, :len(tokens)] = torch.tensor(tokens)
special_tokens_mask[i, len(tokens):] = 1
if return_special_tokens_mask:
return result, special_tokens_mask
return result
| InternVideo-main | Pretrain/Multi-Modalities-Pretraining/InternVideo/clip_utils/clip.py |
import gzip
import html
import os
from functools import lru_cache
import ftfy
import regex as re
@lru_cache()
def default_bpe():
return os.path.join(os.path.dirname(os.path.abspath(__file__)), "bpe_simple_vocab_16e6.txt.gz")
@lru_cache()
def bytes_to_unicode():
"""
Returns list of utf-8 byte and a corresponding list of unicode strings.
The reversible bpe codes work on unicode strings.
This means you need a large # of unicode characters in your vocab if you want to avoid UNKs.
When you're at something like a 10B token dataset you end up needing around 5K for decent coverage.
This is a signficant percentage of your normal, say, 32K bpe vocab.
To avoid that, we want lookup tables between utf-8 bytes and unicode strings.
And avoids mapping to whitespace/control characters the bpe code barfs on.
"""
bs = list(range(ord("!"), ord("~")+1))+list(range(ord("¡"), ord("¬")+1))+list(range(ord("®"), ord("ÿ")+1))
cs = bs[:]
n = 0
for b in range(2**8):
if b not in bs:
bs.append(b)
cs.append(2**8+n)
n += 1
cs = [chr(n) for n in cs]
return dict(zip(bs, cs))
def get_pairs(word):
"""Return set of symbol pairs in a word.
Word is represented as tuple of symbols (symbols being variable-length strings).
"""
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
return pairs
def basic_clean(text):
text = ftfy.fix_text(text)
text = html.unescape(html.unescape(text))
return text.strip()
def whitespace_clean(text):
text = re.sub(r'\s+', ' ', text)
text = text.strip()
return text
class SimpleTokenizer(object):
def __init__(self, bpe_path: str = default_bpe()):
self.byte_encoder = bytes_to_unicode()
self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
merges = gzip.open(bpe_path).read().decode("utf-8").split('\n')
merges = merges[1:49152-256-2+1]
merges = [tuple(merge.split()) for merge in merges]
vocab = list(bytes_to_unicode().values())
vocab = vocab + [v+'</w>' for v in vocab]
for merge in merges:
vocab.append(''.join(merge))
vocab.extend(['<|startoftext|>', '<|endoftext|>'])
self.encoder = dict(zip(vocab, range(len(vocab))))
self.decoder = {v: k for k, v in self.encoder.items()}
self.bpe_ranks = dict(zip(merges, range(len(merges))))
self.cache = {'<|startoftext|>': '<|startoftext|>', '<|endoftext|>': '<|endoftext|>'}
self.pat = re.compile(r"""<\|startoftext\|>|<\|endoftext\|>|'s|'t|'re|'ve|'m|'ll|'d|[\p{L}]+|[\p{N}]|[^\s\p{L}\p{N}]+""", re.IGNORECASE)
def bpe(self, token):
if token in self.cache:
return self.cache[token]
word = tuple(token[:-1]) + ( token[-1] + '</w>',)
pairs = get_pairs(word)
if not pairs:
return token+'</w>'
while True:
bigram = min(pairs, key = lambda pair: self.bpe_ranks.get(pair, float('inf')))
if bigram not in self.bpe_ranks:
break
first, second = bigram
new_word = []
i = 0
while i < len(word):
try:
j = word.index(first, i)
new_word.extend(word[i:j])
i = j
except:
new_word.extend(word[i:])
break
if word[i] == first and i < len(word)-1 and word[i+1] == second:
new_word.append(first+second)
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if len(word) == 1:
break
else:
pairs = get_pairs(word)
word = ' '.join(word)
self.cache[token] = word
return word
def encode(self, text):
bpe_tokens = []
text = whitespace_clean(basic_clean(text)).lower()
for token in re.findall(self.pat, text):
token = ''.join(self.byte_encoder[b] for b in token.encode('utf-8'))
bpe_tokens.extend(self.encoder[bpe_token] for bpe_token in self.bpe(token).split(' '))
return bpe_tokens
def decode(self, tokens):
text = ''.join([self.decoder[token] for token in tokens])
text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors="replace").replace('</w>', ' ')
return text
| InternVideo-main | Pretrain/Multi-Modalities-Pretraining/InternVideo/clip_utils/simple_tokenizer.py |
#!/usr/bin/env python
import warnings
from typing import Tuple, Optional
import torch
from torch import Tensor
from torch.nn.modules.linear import Linear
from torch.nn.init import xavier_uniform_
from torch.nn.init import constant_
from torch.nn.init import xavier_normal_
from torch.nn.parameter import Parameter
from torch.nn.modules.module import Module
from .attention_module import multi_head_attention_forward
class _LinearWithBias(Linear):
bias: Tensor
def __init__(self, in_features: int, out_features: int) -> None:
super().__init__(in_features, out_features, bias=True)
class MultiheadAttention(Module):
r"""Allows the model to jointly attend to information
from different representation subspaces.
See reference: Attention Is All You Need
.. math::
\text{MultiHead}(Q, K, V) = \text{Concat}(head_1,\dots,head_h)W^O
\text{where} head_i = \text{Attention}(QW_i^Q, KW_i^K, VW_i^V)
Args:
embed_dim: total dimension of the model.
num_heads: parallel attention heads.
dropout: a Dropout layer on attn_output_weights. Default: 0.0.
bias: add bias as module parameter. Default: True.
add_bias_kv: add bias to the key and value sequences at dim=0.
add_zero_attn: add a new batch of zeros to the key and
value sequences at dim=1.
kdim: total number of features in key. Default: None.
vdim: total number of features in value. Default: None.
Note: if kdim and vdim are None, they will be set to embed_dim such that
query, key, and value have the same number of features.
Examples::
>>> multihead_attn = nn.MultiheadAttention(embed_dim, num_heads)
>>> attn_output, attn_output_weights = multihead_attn(query, key, value)
"""
bias_k: Optional[torch.Tensor]
bias_v: Optional[torch.Tensor]
def __init__(self, embed_dim, num_heads, dropout=0., bias=True, add_bias_kv=False, add_zero_attn=False, kdim=None, vdim=None):
super(MultiheadAttention, self).__init__()
self.embed_dim = embed_dim
self.kdim = kdim if kdim is not None else embed_dim
self.vdim = vdim if vdim is not None else embed_dim
self._qkv_same_embed_dim = self.kdim == embed_dim and self.vdim == embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads"
if self._qkv_same_embed_dim is False:
self.q_proj_weight = Parameter(torch.Tensor(embed_dim, embed_dim))
self.k_proj_weight = Parameter(torch.Tensor(embed_dim, self.kdim))
self.v_proj_weight = Parameter(torch.Tensor(embed_dim, self.vdim))
self.register_parameter('in_proj_weight', None)
else:
self.in_proj_weight = Parameter(torch.empty(3 * embed_dim, embed_dim))
self.register_parameter('q_proj_weight', None)
self.register_parameter('k_proj_weight', None)
self.register_parameter('v_proj_weight', None)
if bias:
self.in_proj_bias = Parameter(torch.empty(3 * embed_dim))
else:
self.register_parameter('in_proj_bias', None)
self.out_proj = _LinearWithBias(embed_dim, embed_dim)
if add_bias_kv:
self.bias_k = Parameter(torch.empty(1, 1, embed_dim))
self.bias_v = Parameter(torch.empty(1, 1, embed_dim))
else:
self.bias_k = self.bias_v = None
self.add_zero_attn = add_zero_attn
self._reset_parameters()
def _reset_parameters(self):
if self._qkv_same_embed_dim:
xavier_uniform_(self.in_proj_weight)
else:
xavier_uniform_(self.q_proj_weight)
xavier_uniform_(self.k_proj_weight)
xavier_uniform_(self.v_proj_weight)
if self.in_proj_bias is not None:
constant_(self.in_proj_bias, 0.)
constant_(self.out_proj.bias, 0.)
if self.bias_k is not None:
xavier_normal_(self.bias_k)
if self.bias_v is not None:
xavier_normal_(self.bias_v)
def __setstate__(self, state):
# Support loading old MultiheadAttention checkpoints generated by v1.1.0
if '_qkv_same_embed_dim' not in state:
state['_qkv_same_embed_dim'] = True
super(MultiheadAttention, self).__setstate__(state)
def forward(self, query, key, value, key_padding_mask=None,
need_weights=True, attn_mask=None, return_qk=False):
# type: (Tensor, Tensor, Tensor, Optional[Tensor], bool, Optional[Tensor], bool) -> Tuple[Tensor, Optional[Tensor]]
r"""
Args:
query, key, value: map a query and a set of key-value pairs to an output.
See "Attention Is All You Need" for more details.
key_padding_mask: if provided, specified padding elements in the key will
be ignored by the attention. When given a binary mask and a value is True,
the corresponding value on the attention layer will be ignored. When given
a byte mask and a value is non-zero, the corresponding value on the attention
layer will be ignored
need_weights: output attn_output_weights.
attn_mask: 2D or 3D mask that prevents attention to certain positions. A 2D mask will be broadcasted for all
the batches while a 3D mask allows to specify a different mask for the entries of each batch.
Shape:
- Inputs:
- query: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is
the embedding dimension.
- key: :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- value: :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- key_padding_mask: :math:`(N, S)` where N is the batch size, S is the source sequence length.
If a ByteTensor is provided, the non-zero positions will be ignored while the position
with the zero positions will be unchanged. If a BoolTensor is provided, the positions with the
value of ``True`` will be ignored while the position with the value of ``False`` will be unchanged.
- attn_mask: 2D mask :math:`(L, S)` where L is the target sequence length, S is the source sequence length.
3D mask :math:`(N*num_heads, L, S)` where N is the batch size, L is the target sequence length,
S is the source sequence length. attn_mask ensure that position i is allowed to attend the unmasked
positions. If a ByteTensor is provided, the non-zero positions are not allowed to attend
while the zero positions will be unchanged. If a BoolTensor is provided, positions with ``True``
is not allowed to attend while ``False`` values will be unchanged. If a FloatTensor
is provided, it will be added to the attention weight.
- return_qk: whether return Q and K.
- Outputs:
- attn_output: :math:`(L, N, E)` where L is the target sequence length, N is the batch size,
E is the embedding dimension.
- attn_output_weights: :math:`(N, L, S)` where N is the batch size,
L is the target sequence length, S is the source sequence length.
"""
if return_qk:
if not self._qkv_same_embed_dim:
q, k, attn_output, attn_output_weights = multi_head_attention_forward(
query, key, value, self.embed_dim, self.num_heads,
self.in_proj_weight, self.in_proj_bias,
self.bias_k, self.bias_v, self.add_zero_attn,
self.dropout, self.out_proj.weight, self.out_proj.bias,
training=self.training,
key_padding_mask=key_padding_mask, need_weights=need_weights,
attn_mask=attn_mask, use_separate_proj_weight=True,
q_proj_weight=self.q_proj_weight, k_proj_weight=self.k_proj_weight,
v_proj_weight=self.v_proj_weight, return_qk=True)
else:
q, k, attn_output, attn_output_weights = multi_head_attention_forward(
query, key, value, self.embed_dim, self.num_heads,
self.in_proj_weight, self.in_proj_bias,
self.bias_k, self.bias_v, self.add_zero_attn,
self.dropout, self.out_proj.weight, self.out_proj.bias,
training=self.training,
key_padding_mask=key_padding_mask, need_weights=need_weights,
attn_mask=attn_mask, return_qk=True)
return q, k, attn_output, attn_output_weights
else:
if not self._qkv_same_embed_dim:
return multi_head_attention_forward(
query, key, value, self.embed_dim, self.num_heads,
self.in_proj_weight, self.in_proj_bias,
self.bias_k, self.bias_v, self.add_zero_attn,
self.dropout, self.out_proj.weight, self.out_proj.bias,
training=self.training,
key_padding_mask=key_padding_mask, need_weights=need_weights,
attn_mask=attn_mask, use_separate_proj_weight=True,
q_proj_weight=self.q_proj_weight, k_proj_weight=self.k_proj_weight,
v_proj_weight=self.v_proj_weight)
else:
return multi_head_attention_forward(
query, key, value, self.embed_dim, self.num_heads,
self.in_proj_weight, self.in_proj_bias,
self.bias_k, self.bias_v, self.add_zero_attn,
self.dropout, self.out_proj.weight, self.out_proj.bias,
training=self.training,
key_padding_mask=key_padding_mask, need_weights=need_weights,
attn_mask=attn_mask)
| InternVideo-main | Pretrain/Multi-Modalities-Pretraining/InternVideo/clip_utils/utils/attention.py |
#!/usr/bin/env python
import os
from collections import OrderedDict
from timm.models.layers import DropPath
import torch
from torch import nn
import torch.nn.functional as F
import torch.utils.checkpoint as checkpoint
from .attention import MultiheadAttention
import logging
logger = logging.getLogger(__name__)
MODEL_PATH = '/mnt/lustre/share_data/likunchang.vendor/model'
_MODELS = {
"ViT-B/32": os.path.join(MODEL_PATH, "vit_b32.pth"),
"ViT-B/16": os.path.join(MODEL_PATH, "vit_b16.pth"),
"ViT-L/14": os.path.join(MODEL_PATH, "vit_l14.pth"),
"ViT-L/14_336": os.path.join(MODEL_PATH, "vit_l14_336.pth"),
}
def conv_1x1x1(inp, oup, groups=1):
return nn.Conv3d(inp, oup, (1, 1, 1), (1, 1, 1), (0, 0, 0), groups=groups)
def conv_3x3x3(inp, oup, groups=1):
return nn.Conv3d(inp, oup, (3, 3, 3), (1, 1, 1), (1, 1, 1), groups=groups)
def conv_1x3x3(inp, oup, groups=1):
return nn.Conv3d(inp, oup, (1, 3, 3), (1, 1, 1), (0, 1, 1), groups=groups)
def bn_3d(dim):
return nn.BatchNorm3d(dim)
class LayerNorm(nn.LayerNorm):
"""Subclass torch's LayerNorm to handle fp16."""
def forward(self, x):
orig_type = x.dtype
ret = super().forward(x.type(torch.float32))
return ret.type(orig_type)
class QuickGELU(nn.Module):
def forward(self, x):
return x * torch.sigmoid(1.702 * x)
class ResidualAttentionBlock(nn.Module):
def __init__(
self, d_model, n_head, attn_mask=None, drop_path=0.0,
):
super().__init__()
self.n_head = n_head
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
logger.info(f'Drop path rate: {drop_path}')
# spatial
self.attn = MultiheadAttention(d_model, n_head)
self.ln_1 = LayerNorm(d_model)
self.mlp = nn.Sequential(OrderedDict([
("c_fc", nn.Linear(d_model, d_model * 4)),
("gelu", QuickGELU()),
("c_proj", nn.Linear(d_model * 4, d_model))
]))
self.ln_2 = LayerNorm(d_model)
self.attn_mask = attn_mask
def attention(self, x):
self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None
return self.attn(x, x, x, need_weights=False, attn_mask=self.attn_mask)[0]
def forward(self, x, T=8, use_checkpoint=False):
# x: 1+HW, NT, C
# MHSA
if use_checkpoint:
attn_out = checkpoint.checkpoint(self.attention, self.ln_1(x))
x = x + self.drop_path(attn_out)
else:
x = x + self.drop_path(self.attention(self.ln_1(x)))
# FFN
if use_checkpoint:
mlp_out = checkpoint.checkpoint(self.mlp, self.ln_2(x))
x = x + self.drop_path(mlp_out)
else:
x = x + self.drop_path(self.mlp(self.ln_2(x)))
return x
class Extractor(nn.Module):
def __init__(
self, d_model, n_head, attn_mask=None,
mlp_factor=4.0, dropout=0.0, drop_path=0.0,
):
super().__init__()
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
logger.info(f'Drop path rate: {drop_path}')
self.attn = nn.MultiheadAttention(d_model, n_head)
self.ln_1 = nn.LayerNorm(d_model)
d_mlp = round(mlp_factor * d_model)
self.mlp = nn.Sequential(OrderedDict([
("c_fc", nn.Linear(d_model, d_mlp)),
("gelu", QuickGELU()),
("dropout", nn.Dropout(dropout)),
("c_proj", nn.Linear(d_mlp, d_model))
]))
self.ln_2 = nn.LayerNorm(d_model)
self.ln_3 = nn.LayerNorm(d_model)
self.attn_mask = attn_mask
# zero init
nn.init.xavier_uniform_(self.attn.in_proj_weight)
nn.init.constant_(self.attn.out_proj.weight, 0.)
nn.init.constant_(self.attn.out_proj.bias, 0.)
nn.init.xavier_uniform_(self.mlp[0].weight)
nn.init.constant_(self.mlp[-1].weight, 0.)
nn.init.constant_(self.mlp[-1].bias, 0.)
def attention(self, x: torch.Tensor, y: torch.Tensor):
#self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None
# return self.attn(x, y, y, need_weights=False, attn_mask=self.attn_mask)[0]
assert self.attn_mask is None # not implemented
# manual forward to add position information
d_model = self.ln_1.weight.size(0)
q = (x @ self.attn.in_proj_weight[:d_model].T) + self.attn.in_proj_bias[:d_model]
k = (y @ self.attn.in_proj_weight[d_model:-d_model].T) + self.attn.in_proj_bias[d_model:-d_model]
v = (y @ self.attn.in_proj_weight[-d_model:].T) + self.attn.in_proj_bias[-d_model:]
Tx, Ty, N = q.size(0), k.size(0), q.size(1)
q = q.view(Tx, N, self.attn.num_heads, self.attn.head_dim).permute(1, 2, 0, 3)
k = k.view(Ty, N, self.attn.num_heads, self.attn.head_dim).permute(1, 2, 0, 3)
v = v.view(Ty, N, self.attn.num_heads, self.attn.head_dim).permute(1, 2, 0, 3)
aff = (q @ k.transpose(-2, -1) / (self.attn.head_dim ** 0.5))
aff = aff.softmax(dim=-1)
out = aff @ v
out = out.permute(2, 0, 1, 3).flatten(2)
out = self.attn.out_proj(out)
return out
def forward(self, x: torch.Tensor, y: torch.Tensor):
x = x + self.drop_path(self.attention(self.ln_1(x), self.ln_3(y)))
x = x + self.drop_path(self.mlp(self.ln_2(x)))
return x
class Transformer(nn.Module):
def __init__(
self, width, layers, heads, attn_mask=None, backbone_drop_path_rate=0.,
use_checkpoint=False, checkpoint_num=[0], t_size=8,
return_list=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11],
n_layers=12, n_dim=768, n_head=12, mlp_factor=4.0, drop_path_rate=0.,
mlp_dropout=[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5],
cls_dropout=0.5, num_classes=400,
):
super().__init__()
self.T = t_size
self.return_list = return_list
# Backbone
b_dpr = [x.item() for x in torch.linspace(0, backbone_drop_path_rate, layers)]
self.resblocks = nn.ModuleList([
ResidualAttentionBlock(
width, heads, attn_mask,
drop_path=b_dpr[i],
) for i in range(layers)
])
# checkpoint
self.use_checkpoint = use_checkpoint
self.checkpoint_num = checkpoint_num
logger.info(f'Use checkpoint: {self.use_checkpoint}')
logger.info(f'Checkpoint number: {self.checkpoint_num}')
# Extractor
assert n_layers == len(return_list)
self.temporal_cls_token = nn.Parameter(torch.zeros(1, 1, n_dim))
self.dpe = nn.ModuleList([
nn.Conv3d(n_dim, n_dim, kernel_size=3, stride=1, padding=1, bias=True, groups=n_dim)
for i in range(n_layers)
])
for m in self.dpe:
nn.init.constant_(m.bias, 0.)
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, n_layers)]
self.dec = nn.ModuleList([
Extractor(
n_dim, n_head, mlp_factor=mlp_factor,
dropout=mlp_dropout[i], drop_path=dpr[i],
) for i in range(n_layers)
])
# # projection
# self.proj = nn.Sequential(
# nn.LayerNorm(n_dim),
# nn.Dropout(cls_dropout),
# nn.Linear(n_dim, num_classes),
# )
self.balance = nn.Parameter(torch.zeros((n_dim)))
self.sigmoid = nn.Sigmoid()
def forward(self, x, mode='video', return_all_feats=False):
if mode == 'video':
T_down = self.T
else:
T_down = 1
L, NT, C = x.shape
N = NT // T_down
H = W = int((L - 1) ** 0.5)
cls_token = self.temporal_cls_token.repeat(1, N, 1)
j = -1
for i, resblock in enumerate(self.resblocks):
if self.use_checkpoint and i < self.checkpoint_num[0]:
x = resblock(x, T_down, use_checkpoint=True)
else:
x = resblock(x, T_down)
if i in self.return_list:
j += 1
tmp_x = x.clone()
tmp_x = tmp_x.view(L, N, T_down, C)
# dpe
_, tmp_feats = tmp_x[:1], tmp_x[1:]
tmp_feats = tmp_feats.permute(1, 3, 2, 0).reshape(N, C, T_down, H, W)
tmp_feats = self.dpe[j](tmp_feats).view(N, C, T_down, L - 1).permute(3, 0, 2, 1)
tmp_x[1:] = tmp_x[1:] + tmp_feats
# enhancer
tmp_x = tmp_x.permute(2, 0, 1, 3).flatten(0, 1) # T * L, N, C
cls_token = self.dec[j](cls_token, tmp_x)
weight = self.sigmoid(self.balance)
residual = x.view(L, N, T_down, C)[0].mean(1) # L, N, T, C
# return self.proj((1 - weight) * cls_token[0, :, :] + weight * residual)
feats = (1 - weight) * cls_token[0, :, :] + weight * residual
if return_all_feats:
return feats, x.view(L, N, T_down, C)
return feats
class VisionTransformer(nn.Module):
def __init__(
self,
# backbone
input_resolution, patch_size, width, layers, heads, output_dim, backbone_drop_path_rate=0.,
use_checkpoint=False, checkpoint_num=[0], t_size=8,
# extractor
return_list=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11],
n_layers=12, n_dim=768, n_head=12, mlp_factor=4.0, drop_path_rate=0.,
mlp_dropout=[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5],
cls_dropout=0.5, num_classes=400,
):
super().__init__()
self.input_resolution = input_resolution
self.output_dim = output_dim
self.conv1 = nn.Conv3d(3, width, (1, patch_size, patch_size), (1, patch_size, patch_size), (0, 0, 0), bias=False)
scale = width ** -0.5
self.class_embedding = nn.Parameter(scale * torch.randn(width))
self.positional_embedding = nn.Parameter(scale * torch.randn((input_resolution // patch_size) ** 2 + 1, width))
self.ln_pre = LayerNorm(width)
self.transformer = Transformer(
width, layers, heads,
backbone_drop_path_rate=backbone_drop_path_rate,
use_checkpoint=use_checkpoint, checkpoint_num=checkpoint_num, t_size=t_size,
return_list=return_list, n_layers=n_layers, n_dim=n_dim, n_head=n_head,
mlp_factor=mlp_factor, drop_path_rate=drop_path_rate, mlp_dropout=mlp_dropout,
cls_dropout=cls_dropout, num_classes=num_classes,
)
def forward(self, x, mode='video', return_all_feats=False):
x = self.conv1(x) # shape = [*, width, grid, grid]
N, C, T, H, W = x.shape
x = x.permute(0, 2, 3, 4, 1).reshape(N * T, H * W, C)
x = torch.cat([self.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device), x], dim=1) # shape = [*, grid ** 2 + 1, width]
x = x + self.positional_embedding.to(x.dtype)
x = self.ln_pre(x)
x = x.permute(1, 0, 2) # NLD -> LND
out = self.transformer(x, mode=mode, return_all_feats=return_all_feats)
return out
def inflate_weight(weight_2d, time_dim, center=True):
if center:
weight_3d = torch.zeros(*weight_2d.shape)
weight_3d = weight_3d.unsqueeze(2).repeat(1, 1, time_dim, 1, 1)
middle_idx = time_dim // 2
weight_3d[:, :, middle_idx, :, :] = weight_2d
else:
weight_3d = weight_2d.unsqueeze(2).repeat(1, 1, time_dim, 1, 1)
weight_3d = weight_3d / time_dim
return weight_3d
def load_state_dict(model, state_dict):
state_dict_3d = model.state_dict()
for k in state_dict.keys():
if state_dict[k].shape != state_dict_3d[k].shape:
if len(state_dict_3d[k].shape) <= 2:
logger.info(f'Ignore: {k}')
continue
logger.info(f'Inflate: {k}, {state_dict[k].shape} => {state_dict_3d[k].shape}')
time_dim = state_dict_3d[k].shape[2]
state_dict[k] = inflate_weight(state_dict[k], time_dim)
model.load_state_dict(state_dict, strict=False)
def vit_only_global_b32(
pretrained=True, use_checkpoint=False, checkpoint_num=[0],
t_size=16, backbone_drop_path_rate=0.,
return_list=[8, 9, 10, 11],
n_layers=4, n_dim=768, n_head=12, mlp_factor=4.0, drop_path_rate=0.,
mlp_dropout=[0.5, 0.5, 0.5, 0.5],
cls_dropout=0.5, num_classes=400,
):
model = VisionTransformer(
input_resolution=224,
patch_size=32,
width=768,
layers=12,
heads=12,
output_dim=512,
use_checkpoint=use_checkpoint,
checkpoint_num=checkpoint_num,
t_size=t_size,
backbone_drop_path_rate=backbone_drop_path_rate,
return_list=return_list,
n_layers=n_layers,
n_dim=n_dim,
n_head=n_head,
mlp_factor=mlp_factor,
drop_path_rate=drop_path_rate,
mlp_dropout=mlp_dropout,
cls_dropout=cls_dropout,
num_classes=num_classes,
)
if pretrained:
logger.info('load pretrained weights')
state_dict = torch.load(_MODELS["ViT-B/32"], map_location='cpu')
load_state_dict(model, state_dict)
return model.eval()
def vit_only_global_b16(
pretrained=True, use_checkpoint=False, checkpoint_num=[0],
t_size=16, backbone_drop_path_rate=0.,
return_list=[8, 9, 10, 11],
n_layers=4, n_dim=768, n_head=12, mlp_factor=4.0, drop_path_rate=0.,
mlp_dropout=[0.5, 0.5, 0.5, 0.5],
cls_dropout=0.5, num_classes=400,
):
model = VisionTransformer(
input_resolution=224,
patch_size=16,
width=768,
layers=12,
heads=12,
output_dim=512,
use_checkpoint=use_checkpoint,
checkpoint_num=checkpoint_num,
t_size=t_size,
backbone_drop_path_rate=backbone_drop_path_rate,
return_list=return_list,
n_layers=n_layers,
n_dim=n_dim,
n_head=n_head,
mlp_factor=mlp_factor,
drop_path_rate=drop_path_rate,
mlp_dropout=mlp_dropout,
cls_dropout=cls_dropout,
num_classes=num_classes,
)
if pretrained:
logger.info('load pretrained weights')
state_dict = torch.load(_MODELS["ViT-B/16"], map_location='cpu')
load_state_dict(model, state_dict)
return model.eval()
def vit_only_global_l14(
pretrained=True, use_checkpoint=False, checkpoint_num=[0],
t_size=16, backbone_drop_path_rate=0.,
return_list=[20, 21, 22, 23],
n_layers=4, n_dim=1024, n_head=16, mlp_factor=4.0, drop_path_rate=0.,
mlp_dropout=[0.5, 0.5, 0.5, 0.5],
cls_dropout=0.5, num_classes=400,
):
model = VisionTransformer(
input_resolution=224,
patch_size=14,
width=1024,
layers=24,
heads=16,
output_dim=768,
use_checkpoint=use_checkpoint,
checkpoint_num=checkpoint_num,
t_size=t_size,
backbone_drop_path_rate=backbone_drop_path_rate,
return_list=return_list,
n_layers=n_layers,
n_dim=n_dim,
n_head=n_head,
mlp_factor=mlp_factor,
drop_path_rate=drop_path_rate,
mlp_dropout=mlp_dropout,
cls_dropout=cls_dropout,
num_classes=num_classes,
)
if pretrained:
logger.info('load pretrained weights')
state_dict = torch.load(_MODELS["ViT-L/14"], map_location='cpu')
load_state_dict(model, state_dict)
return model.eval()
def vit_only_global_l14_336(
pretrained=True, use_checkpoint=False, checkpoint_num=[0],
t_size=16, backbone_drop_path_rate=0.,
return_list=[20, 21, 22, 23],
n_layers=4, n_dim=1024, n_head=16, mlp_factor=4.0, drop_path_rate=0.,
mlp_dropout=[0.5, 0.5, 0.5, 0.5],
cls_dropout=0.5, num_classes=400,
):
model = VisionTransformer(
input_resolution=336,
patch_size=14,
width=1024,
layers=24,
heads=16,
output_dim=768,
use_checkpoint=use_checkpoint,
checkpoint_num=checkpoint_num,
t_size=t_size,
backbone_drop_path_rate=backbone_drop_path_rate,
return_list=return_list,
n_layers=n_layers,
n_dim=n_dim,
n_head=n_head,
mlp_factor=mlp_factor,
drop_path_rate=drop_path_rate,
mlp_dropout=mlp_dropout,
cls_dropout=cls_dropout,
num_classes=num_classes,
)
if pretrained:
logger.info('load pretrained weights')
state_dict = torch.load(_MODELS["ViT-L/14_336"], map_location='cpu')
load_state_dict(model, state_dict)
return model.eval()
if __name__ == '__main__':
import time
from fvcore.nn import FlopCountAnalysis
from fvcore.nn import flop_count_table
import numpy as np
seed = 4217
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
num_frames = 8
model = vit_only_global_l14(
pretrained=False,
t_size=num_frames, backbone_drop_path_rate=0.2, drop_path_rate=0.4,
use_checkpoint=True, checkpoint_num=[0],
)
flops = FlopCountAnalysis(model, torch.rand(1, 3, num_frames, 224, 224))
s = time.time()
logger.info(flop_count_table(flops, max_depth=1))
logger.info(time.time()-s) | InternVideo-main | Pretrain/Multi-Modalities-Pretraining/InternVideo/clip_utils/utils/clip_vit_only_global.py |
# from .evl_module import TransformerDecoder
from .clip_vit_only_global import vit_only_global_b32, vit_only_global_b16, vit_only_global_l14, vit_only_global_l14_336 | InternVideo-main | Pretrain/Multi-Modalities-Pretraining/InternVideo/clip_utils/utils/__init__.py |
r"""Functional interface"""
import warnings
import math
import torch
from torch import _VF
from torch._jit_internal import Optional, Tuple
from torch.overrides import has_torch_function, handle_torch_function
from torch.nn.functional import pad, linear, softmax, dropout
Tensor = torch.Tensor
def multi_head_attention_forward(query: Tensor,
key: Tensor,
value: Tensor,
embed_dim_to_check: int,
num_heads: int,
in_proj_weight: Tensor,
in_proj_bias: Tensor,
bias_k: Optional[Tensor],
bias_v: Optional[Tensor],
add_zero_attn: bool,
dropout_p: float,
out_proj_weight: Tensor,
out_proj_bias: Tensor,
training: bool = True,
key_padding_mask: Optional[Tensor] = None,
need_weights: bool = True,
attn_mask: Optional[Tensor] = None,
use_separate_proj_weight: bool = False,
q_proj_weight: Optional[Tensor] = None,
k_proj_weight: Optional[Tensor] = None,
v_proj_weight: Optional[Tensor] = None,
static_k: Optional[Tensor] = None,
static_v: Optional[Tensor] = None,
return_qk: bool = False
) -> Tuple[Tensor, Optional[Tensor]]:
r"""
Args:
query, key, value: map a query and a set of key-value pairs to an output.
See "Attention Is All You Need" for more details.
embed_dim_to_check: total dimension of the model.
num_heads: parallel attention heads.
in_proj_weight, in_proj_bias: input projection weight and bias.
bias_k, bias_v: bias of the key and value sequences to be added at dim=0.
add_zero_attn: add a new batch of zeros to the key and
value sequences at dim=1.
dropout_p: probability of an element to be zeroed.
out_proj_weight, out_proj_bias: the output projection weight and bias.
training: apply dropout if is ``True``.
key_padding_mask: if provided, specified padding elements in the key will
be ignored by the attention. This is an binary mask. When the value is True,
the corresponding value on the attention layer will be filled with -inf.
need_weights: output attn_output_weights.
attn_mask: 2D or 3D mask that prevents attention to certain positions. A 2D mask will be broadcasted for all
the batches while a 3D mask allows to specify a different mask for the entries of each batch.
use_separate_proj_weight: the function accept the proj. weights for query, key,
and value in different forms. If false, in_proj_weight will be used, which is
a combination of q_proj_weight, k_proj_weight, v_proj_weight.
q_proj_weight, k_proj_weight, v_proj_weight, in_proj_bias: input projection weight and bias.
static_k, static_v: static key and value used for attention operators.
Shape:
Inputs:
- query: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is
the embedding dimension.
- key: :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- value: :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- key_padding_mask: :math:`(N, S)` where N is the batch size, S is the source sequence length.
If a ByteTensor is provided, the non-zero positions will be ignored while the zero positions
will be unchanged. If a BoolTensor is provided, the positions with the
value of ``True`` will be ignored while the position with the value of ``False`` will be unchanged.
- attn_mask: 2D mask :math:`(L, S)` where L is the target sequence length, S is the source sequence length.
3D mask :math:`(N*num_heads, L, S)` where N is the batch size, L is the target sequence length,
S is the source sequence length. attn_mask ensures that position i is allowed to attend the unmasked
positions. If a ByteTensor is provided, the non-zero positions are not allowed to attend
while the zero positions will be unchanged. If a BoolTensor is provided, positions with ``True``
are not allowed to attend while ``False`` values will be unchanged. If a FloatTensor
is provided, it will be added to the attention weight.
- static_k: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length,
N is the batch size, E is the embedding dimension. E/num_heads is the head dimension.
- static_v: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length,
N is the batch size, E is the embedding dimension. E/num_heads is the head dimension.
- return_qk: whether return Q and K.
Outputs:
- attn_output: :math:`(L, N, E)` where L is the target sequence length, N is the batch size,
E is the embedding dimension.
- attn_output_weights: :math:`(N, L, S)` where N is the batch size,
L is the target sequence length, S is the source sequence length.
"""
if not torch.jit.is_scripting():
tens_ops = (query, key, value, in_proj_weight, in_proj_bias, bias_k, bias_v,
out_proj_weight, out_proj_bias)
if any([type(t) is not Tensor for t in tens_ops]) and has_torch_function(tens_ops):
return handle_torch_function(
multi_head_attention_forward, tens_ops, query, key, value,
embed_dim_to_check, num_heads, in_proj_weight, in_proj_bias,
bias_k, bias_v, add_zero_attn, dropout_p, out_proj_weight,
out_proj_bias, training=training, key_padding_mask=key_padding_mask,
need_weights=need_weights, attn_mask=attn_mask,
use_separate_proj_weight=use_separate_proj_weight,
q_proj_weight=q_proj_weight, k_proj_weight=k_proj_weight,
v_proj_weight=v_proj_weight, static_k=static_k, static_v=static_v)
tgt_len, bsz, embed_dim = query.size()
assert embed_dim == embed_dim_to_check
# allow MHA to have different sizes for the feature dimension
assert key.size(0) == value.size(0) and key.size(1) == value.size(1)
head_dim = embed_dim // num_heads
assert head_dim * num_heads == embed_dim, "embed_dim must be divisible by num_heads"
scaling = float(head_dim) ** -0.5
if not use_separate_proj_weight:
if torch.equal(query, key) and torch.equal(key, value):
# self-attention
q, k, v = linear(query, in_proj_weight, in_proj_bias).chunk(3, dim=-1)
elif torch.equal(key, value):
# encoder-decoder attention
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = 0
_end = embed_dim
_w = in_proj_weight[_start:_end, :]
if _b is not None:
_b = _b[_start:_end]
q = linear(query, _w, _b)
if key is None:
assert value is None
k = None
v = None
else:
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = embed_dim
_end = None
_w = in_proj_weight[_start:, :]
if _b is not None:
_b = _b[_start:]
k, v = linear(key, _w, _b).chunk(2, dim=-1)
else:
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = 0
_end = embed_dim
_w = in_proj_weight[_start:_end, :]
if _b is not None:
_b = _b[_start:_end]
q = linear(query, _w, _b)
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = embed_dim
_end = embed_dim * 2
_w = in_proj_weight[_start:_end, :]
if _b is not None:
_b = _b[_start:_end]
k = linear(key, _w, _b)
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = embed_dim * 2
_end = None
_w = in_proj_weight[_start:, :]
if _b is not None:
_b = _b[_start:]
v = linear(value, _w, _b)
else:
q_proj_weight_non_opt = torch.jit._unwrap_optional(q_proj_weight)
len1, len2 = q_proj_weight_non_opt.size()
assert len1 == embed_dim and len2 == query.size(-1)
k_proj_weight_non_opt = torch.jit._unwrap_optional(k_proj_weight)
len1, len2 = k_proj_weight_non_opt.size()
assert len1 == embed_dim and len2 == key.size(-1)
v_proj_weight_non_opt = torch.jit._unwrap_optional(v_proj_weight)
len1, len2 = v_proj_weight_non_opt.size()
assert len1 == embed_dim and len2 == value.size(-1)
if in_proj_bias is not None:
q = linear(query, q_proj_weight_non_opt, in_proj_bias[0:embed_dim])
k = linear(key, k_proj_weight_non_opt, in_proj_bias[embed_dim:(embed_dim * 2)])
v = linear(value, v_proj_weight_non_opt, in_proj_bias[(embed_dim * 2):])
else:
q = linear(query, q_proj_weight_non_opt, in_proj_bias)
k = linear(key, k_proj_weight_non_opt, in_proj_bias)
v = linear(value, v_proj_weight_non_opt, in_proj_bias)
q = q * scaling
if attn_mask is not None:
assert attn_mask.dtype == torch.float32 or attn_mask.dtype == torch.float64 or \
attn_mask.dtype == torch.float16 or attn_mask.dtype == torch.uint8 or attn_mask.dtype == torch.bool, \
'Only float, byte, and bool types are supported for attn_mask, not {}'.format(attn_mask.dtype)
if attn_mask.dtype == torch.uint8:
warnings.warn("Byte tensor for attn_mask in nn.MultiheadAttention is deprecated. Use bool tensor instead.")
attn_mask = attn_mask.to(torch.bool)
if attn_mask.dim() == 2:
attn_mask = attn_mask.unsqueeze(0)
if list(attn_mask.size()) != [1, query.size(0), key.size(0)]:
raise RuntimeError('The size of the 2D attn_mask is not correct.')
elif attn_mask.dim() == 3:
if list(attn_mask.size()) != [bsz * num_heads, query.size(0), key.size(0)]:
raise RuntimeError('The size of the 3D attn_mask is not correct.')
else:
raise RuntimeError("attn_mask's dimension {} is not supported".format(attn_mask.dim()))
# attn_mask's dim is 3 now.
# convert ByteTensor key_padding_mask to bool
if key_padding_mask is not None and key_padding_mask.dtype == torch.uint8:
warnings.warn("Byte tensor for key_padding_mask in nn.MultiheadAttention is deprecated. Use bool tensor instead.")
key_padding_mask = key_padding_mask.to(torch.bool)
if bias_k is not None and bias_v is not None:
if static_k is None and static_v is None:
k = torch.cat([k, bias_k.repeat(1, bsz, 1)])
v = torch.cat([v, bias_v.repeat(1, bsz, 1)])
if attn_mask is not None:
attn_mask = pad(attn_mask, (0, 1))
if key_padding_mask is not None:
key_padding_mask = pad(key_padding_mask, (0, 1))
else:
assert static_k is None, "bias cannot be added to static key."
assert static_v is None, "bias cannot be added to static value."
else:
assert bias_k is None
assert bias_v is None
# L, N, E
if return_qk:
return_q = q.clone() / scaling
return_k = k.clone()
q = q.contiguous().view(tgt_len, bsz * num_heads, head_dim).transpose(0, 1)
if k is not None:
k = k.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1)
if v is not None:
v = v.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1)
if static_k is not None:
assert static_k.size(0) == bsz * num_heads
assert static_k.size(2) == head_dim
k = static_k
if static_v is not None:
assert static_v.size(0) == bsz * num_heads
assert static_v.size(2) == head_dim
v = static_v
src_len = k.size(1)
if key_padding_mask is not None:
assert key_padding_mask.size(0) == bsz
assert key_padding_mask.size(1) == src_len
if add_zero_attn:
src_len += 1
k = torch.cat([k, torch.zeros((k.size(0), 1) + k.size()[2:], dtype=k.dtype, device=k.device)], dim=1)
v = torch.cat([v, torch.zeros((v.size(0), 1) + v.size()[2:], dtype=v.dtype, device=v.device)], dim=1)
if attn_mask is not None:
attn_mask = pad(attn_mask, (0, 1))
if key_padding_mask is not None:
key_padding_mask = pad(key_padding_mask, (0, 1))
attn_output_weights = torch.bmm(q, k.transpose(1, 2))
assert list(attn_output_weights.size()) == [bsz * num_heads, tgt_len, src_len]
if attn_mask is not None:
if attn_mask.dtype == torch.bool:
attn_output_weights.masked_fill_(attn_mask, float('-inf'))
else:
attn_output_weights += attn_mask
if key_padding_mask is not None:
attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len)
attn_output_weights = attn_output_weights.masked_fill(
key_padding_mask.unsqueeze(1).unsqueeze(2),
float('-inf'),
)
attn_output_weights = attn_output_weights.view(bsz * num_heads, tgt_len, src_len)
attn_output_weights = softmax(
attn_output_weights, dim=-1)
attn_output_weights = dropout(attn_output_weights, p=dropout_p, training=training)
attn_output = torch.bmm(attn_output_weights, v)
assert list(attn_output.size()) == [bsz * num_heads, tgt_len, head_dim]
attn_output = attn_output.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim)
attn_output = linear(attn_output, out_proj_weight, out_proj_bias)
if return_qk:
if need_weights:
# average attention weights over heads
attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len)
return return_q, return_k, attn_output, attn_output_weights.sum(dim=1) / num_heads
else:
return return_q, return_k, attn_output, None
else:
if need_weights:
# average attention weights over heads
attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len)
return attn_output, attn_output_weights.sum(dim=1) / num_heads
else:
return attn_output, None
| InternVideo-main | Pretrain/Multi-Modalities-Pretraining/InternVideo/clip_utils/utils/attention_module.py |
r"""Functional interface"""
import warnings
import math
import torch
from torch import _VF
from torch._jit_internal import Optional, Tuple
from torch.overrides import has_torch_function, handle_torch_function
from torch.nn.functional import pad, linear, softmax, dropout
Tensor = torch.Tensor
def multi_head_attention_forward(query: Tensor,
key: Tensor,
value: Tensor,
embed_dim_to_check: int,
num_heads: int,
in_proj_weight: Tensor,
in_proj_bias: Tensor,
bias_k: Optional[Tensor],
bias_v: Optional[Tensor],
add_zero_attn: bool,
dropout_p: float,
out_proj_weight: Tensor,
out_proj_bias: Tensor,
training: bool = True,
key_padding_mask: Optional[Tensor] = None,
need_weights: bool = True,
attn_mask: Optional[Tensor] = None,
use_separate_proj_weight: bool = False,
q_proj_weight: Optional[Tensor] = None,
k_proj_weight: Optional[Tensor] = None,
v_proj_weight: Optional[Tensor] = None,
static_k: Optional[Tensor] = None,
static_v: Optional[Tensor] = None,
return_qk: bool = False,
rpb: Tensor = None,
) -> Tuple[Tensor, Optional[Tensor]]:
r"""
Args:
query, key, value: map a query and a set of key-value pairs to an output.
See "Attention Is All You Need" for more details.
embed_dim_to_check: total dimension of the model.
num_heads: parallel attention heads.
in_proj_weight, in_proj_bias: input projection weight and bias.
bias_k, bias_v: bias of the key and value sequences to be added at dim=0.
add_zero_attn: add a new batch of zeros to the key and
value sequences at dim=1.
dropout_p: probability of an element to be zeroed.
out_proj_weight, out_proj_bias: the output projection weight and bias.
training: apply dropout if is ``True``.
key_padding_mask: if provided, specified padding elements in the key will
be ignored by the attention. This is an binary mask. When the value is True,
the corresponding value on the attention layer will be filled with -inf.
need_weights: output attn_output_weights.
attn_mask: 2D or 3D mask that prevents attention to certain positions. A 2D mask will be broadcasted for all
the batches while a 3D mask allows to specify a different mask for the entries of each batch.
use_separate_proj_weight: the function accept the proj. weights for query, key,
and value in different forms. If false, in_proj_weight will be used, which is
a combination of q_proj_weight, k_proj_weight, v_proj_weight.
q_proj_weight, k_proj_weight, v_proj_weight, in_proj_bias: input projection weight and bias.
static_k, static_v: static key and value used for attention operators.
Shape:
Inputs:
- query: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is
the embedding dimension.
- key: :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- value: :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- key_padding_mask: :math:`(N, S)` where N is the batch size, S is the source sequence length.
If a ByteTensor is provided, the non-zero positions will be ignored while the zero positions
will be unchanged. If a BoolTensor is provided, the positions with the
value of ``True`` will be ignored while the position with the value of ``False`` will be unchanged.
- attn_mask: 2D mask :math:`(L, S)` where L is the target sequence length, S is the source sequence length.
3D mask :math:`(N*num_heads, L, S)` where N is the batch size, L is the target sequence length,
S is the source sequence length. attn_mask ensures that position i is allowed to attend the unmasked
positions. If a ByteTensor is provided, the non-zero positions are not allowed to attend
while the zero positions will be unchanged. If a BoolTensor is provided, positions with ``True``
are not allowed to attend while ``False`` values will be unchanged. If a FloatTensor
is provided, it will be added to the attention weight.
- static_k: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length,
N is the batch size, E is the embedding dimension. E/num_heads is the head dimension.
- static_v: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length,
N is the batch size, E is the embedding dimension. E/num_heads is the head dimension.
- return_qk: whether return Q and K.
- rpb: relative postion bias
Outputs:
- attn_output: :math:`(L, N, E)` where L is the target sequence length, N is the batch size,
E is the embedding dimension.
- attn_output_weights: :math:`(N, L, S)` where N is the batch size,
L is the target sequence length, S is the source sequence length.
"""
if not torch.jit.is_scripting():
tens_ops = (query, key, value, in_proj_weight, in_proj_bias, bias_k, bias_v,
out_proj_weight, out_proj_bias)
if any([type(t) is not Tensor for t in tens_ops]) and has_torch_function(tens_ops):
return handle_torch_function(
multi_head_attention_forward, tens_ops, query, key, value,
embed_dim_to_check, num_heads, in_proj_weight, in_proj_bias,
bias_k, bias_v, add_zero_attn, dropout_p, out_proj_weight,
out_proj_bias, training=training, key_padding_mask=key_padding_mask,
need_weights=need_weights, attn_mask=attn_mask,
use_separate_proj_weight=use_separate_proj_weight,
q_proj_weight=q_proj_weight, k_proj_weight=k_proj_weight,
v_proj_weight=v_proj_weight, static_k=static_k, static_v=static_v)
tgt_len, bsz, embed_dim = query.size()
assert embed_dim == embed_dim_to_check
# allow MHA to have different sizes for the feature dimension
assert key.size(0) == value.size(0) and key.size(1) == value.size(1)
head_dim = embed_dim // num_heads
assert head_dim * num_heads == embed_dim, "embed_dim must be divisible by num_heads"
scaling = float(head_dim) ** -0.5
if not use_separate_proj_weight:
if torch.equal(query, key) and torch.equal(key, value):
# self-attention
q, k, v = linear(query, in_proj_weight, in_proj_bias).chunk(3, dim=-1)
elif torch.equal(key, value):
# encoder-decoder attention
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = 0
_end = embed_dim
_w = in_proj_weight[_start:_end, :]
if _b is not None:
_b = _b[_start:_end]
q = linear(query, _w, _b)
if key is None:
assert value is None
k = None
v = None
else:
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = embed_dim
_end = None
_w = in_proj_weight[_start:, :]
if _b is not None:
_b = _b[_start:]
k, v = linear(key, _w, _b).chunk(2, dim=-1)
else:
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = 0
_end = embed_dim
_w = in_proj_weight[_start:_end, :]
if _b is not None:
_b = _b[_start:_end]
q = linear(query, _w, _b)
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = embed_dim
_end = embed_dim * 2
_w = in_proj_weight[_start:_end, :]
if _b is not None:
_b = _b[_start:_end]
k = linear(key, _w, _b)
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = embed_dim * 2
_end = None
_w = in_proj_weight[_start:, :]
if _b is not None:
_b = _b[_start:]
v = linear(value, _w, _b)
else:
q_proj_weight_non_opt = torch.jit._unwrap_optional(q_proj_weight)
len1, len2 = q_proj_weight_non_opt.size()
assert len1 == embed_dim and len2 == query.size(-1)
k_proj_weight_non_opt = torch.jit._unwrap_optional(k_proj_weight)
len1, len2 = k_proj_weight_non_opt.size()
assert len1 == embed_dim and len2 == key.size(-1)
v_proj_weight_non_opt = torch.jit._unwrap_optional(v_proj_weight)
len1, len2 = v_proj_weight_non_opt.size()
assert len1 == embed_dim and len2 == value.size(-1)
if in_proj_bias is not None:
q = linear(query, q_proj_weight_non_opt, in_proj_bias[0:embed_dim])
k = linear(key, k_proj_weight_non_opt, in_proj_bias[embed_dim:(embed_dim * 2)])
v = linear(value, v_proj_weight_non_opt, in_proj_bias[(embed_dim * 2):])
else:
q = linear(query, q_proj_weight_non_opt, in_proj_bias)
k = linear(key, k_proj_weight_non_opt, in_proj_bias)
v = linear(value, v_proj_weight_non_opt, in_proj_bias)
q = q * scaling
if attn_mask is not None:
assert attn_mask.dtype == torch.float32 or attn_mask.dtype == torch.float64 or \
attn_mask.dtype == torch.float16 or attn_mask.dtype == torch.uint8 or attn_mask.dtype == torch.bool, \
'Only float, byte, and bool types are supported for attn_mask, not {}'.format(attn_mask.dtype)
if attn_mask.dtype == torch.uint8:
warnings.warn("Byte tensor for attn_mask in nn.MultiheadAttention is deprecated. Use bool tensor instead.")
attn_mask = attn_mask.to(torch.bool)
if attn_mask.dim() == 2:
attn_mask = attn_mask.unsqueeze(0)
if list(attn_mask.size()) != [1, query.size(0), key.size(0)]:
raise RuntimeError('The size of the 2D attn_mask is not correct.')
elif attn_mask.dim() == 3:
if list(attn_mask.size()) != [bsz * num_heads, query.size(0), key.size(0)]:
raise RuntimeError('The size of the 3D attn_mask is not correct.')
else:
raise RuntimeError("attn_mask's dimension {} is not supported".format(attn_mask.dim()))
# attn_mask's dim is 3 now.
# convert ByteTensor key_padding_mask to bool
if key_padding_mask is not None and key_padding_mask.dtype == torch.uint8:
warnings.warn("Byte tensor for key_padding_mask in nn.MultiheadAttention is deprecated. Use bool tensor instead.")
key_padding_mask = key_padding_mask.to(torch.bool)
if bias_k is not None and bias_v is not None:
if static_k is None and static_v is None:
k = torch.cat([k, bias_k.repeat(1, bsz, 1)])
v = torch.cat([v, bias_v.repeat(1, bsz, 1)])
if attn_mask is not None:
attn_mask = pad(attn_mask, (0, 1))
if key_padding_mask is not None:
key_padding_mask = pad(key_padding_mask, (0, 1))
else:
assert static_k is None, "bias cannot be added to static key."
assert static_v is None, "bias cannot be added to static value."
else:
assert bias_k is None
assert bias_v is None
# L, N, E
if return_qk:
return_q = q.clone() / scaling
return_k = k.clone()
q = q.contiguous().view(tgt_len, bsz * num_heads, head_dim).transpose(0, 1)
if k is not None:
k = k.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1)
if v is not None:
v = v.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1)
if static_k is not None:
assert static_k.size(0) == bsz * num_heads
assert static_k.size(2) == head_dim
k = static_k
if static_v is not None:
assert static_v.size(0) == bsz * num_heads
assert static_v.size(2) == head_dim
v = static_v
src_len = k.size(1)
if key_padding_mask is not None:
assert key_padding_mask.size(0) == bsz
assert key_padding_mask.size(1) == src_len
if add_zero_attn:
src_len += 1
k = torch.cat([k, torch.zeros((k.size(0), 1) + k.size()[2:], dtype=k.dtype, device=k.device)], dim=1)
v = torch.cat([v, torch.zeros((v.size(0), 1) + v.size()[2:], dtype=v.dtype, device=v.device)], dim=1)
if attn_mask is not None:
attn_mask = pad(attn_mask, (0, 1))
if key_padding_mask is not None:
key_padding_mask = pad(key_padding_mask, (0, 1))
attn_output_weights = torch.bmm(q, k.transpose(1, 2))
assert list(attn_output_weights.size()) == [bsz * num_heads, tgt_len, src_len]
if attn_mask is not None:
if attn_mask.dtype == torch.bool:
attn_output_weights.masked_fill_(attn_mask, float('-inf'))
else:
attn_output_weights += attn_mask
if key_padding_mask is not None:
attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len)
attn_output_weights = attn_output_weights.masked_fill(
key_padding_mask.unsqueeze(1).unsqueeze(2),
float('-inf'),
)
attn_output_weights = attn_output_weights.view(bsz * num_heads, tgt_len, src_len)
if rpb is not None:
attn_output_weights = attn_output_weights + rpb
attn_output_weights = softmax(attn_output_weights, dim=-1)
attn_output_weights = dropout(attn_output_weights, p=dropout_p, training=training)
attn_output = torch.bmm(attn_output_weights, v)
assert list(attn_output.size()) == [bsz * num_heads, tgt_len, head_dim]
attn_output = attn_output.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim)
attn_output = linear(attn_output, out_proj_weight, out_proj_bias)
if return_qk:
if need_weights:
# average attention weights over heads
attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len)
return return_q, return_k, attn_output, attn_output_weights.sum(dim=1) / num_heads
else:
return return_q, return_k, attn_output, None
else:
if need_weights:
# average attention weights over heads
attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len)
return attn_output, attn_output_weights.sum(dim=1) / num_heads
else:
return attn_output, None
| InternVideo-main | Pretrain/Multi-Modalities-Pretraining/InternVideo/clip_utils/utils/attention_module_bias.py |
from TerraByte.model.terrabyte_triton import TerraByteTriton as TerraByte
import random
import tqdm
import gzip
import numpy as np
import torch
import torch.optim as optim
from torch.nn import functional as F
from torch.utils.data import DataLoader, Dataset
# constants
NUM_BATCHES = int(1e5)
BATCH_SIZE = 4
GRADIENT_ACCUMULATE_EVERY = 4
LEARNING_RATE = 2e-4
VALIDATE_EVERY = 100
GENERATE_EVERY = 500
PRIME_LEN = 100
SEQ_LEN = 8192
# helpers
def cycle(loader):
while True:
for data in loader:
yield data
def decode_token(token):
return str(chr(max(32, token)))
def decode_tokens(tokens):
return ''.join(list(map(decode_token, tokens)))
# instantiate GPT-like decoder model
model = TerraByte(
num_tokens = 256,
dim = (768, 512, 256),
depth = (6, 4, 2),
max_seq_len = (512, 4, 4),
flash_attn = True
).cuda()
# prepare enwik8 data
with gzip.open('./data/enwik8.gz') as file:
x = np.frombuffer(file.read(int(95e6)), dtype=np.uint8).copy()
train_x, valid_x = np.split(x, [int(90e6)])
data_train, data_val = map(torch.from_numpy, (train_x, valid_x))
class TextSamplerDataset(Dataset):
def __init__(self, data, seq_len):
super().__init__()
self.data = data
self.seq_len = seq_len
def __getitem__(self, index):
rand_start = torch.randint(0, self.data.size(0) - self.seq_len, (1,))
full_seq = self.data[rand_start: rand_start + self.seq_len].long()
return full_seq.cuda()
def __len__(self):
return self.data.size(0) // self.seq_len
train_dataset = TextSamplerDataset(data_train, SEQ_LEN)
val_dataset = TextSamplerDataset(data_val, SEQ_LEN)
train_loader = cycle(DataLoader(train_dataset, batch_size = BATCH_SIZE))
val_loader = cycle(DataLoader(val_dataset, batch_size = BATCH_SIZE))
# optimizer
optim = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE)
# training
for i in tqdm.tqdm(range(NUM_BATCHES), mininterval=10., desc='training'):
model.train()
for __ in range(GRADIENT_ACCUMULATE_EVERY):
loss = model(next(train_loader), return_loss = True)
loss.backward()
print(f'training loss: {loss.item()}')
torch.nn.utils.clip_grad_norm_(model.parameters(), 0.5)
optim.step()
optim.zero_grad()
if i % VALIDATE_EVERY == 0:
model.eval()
with torch.no_grad():
loss = model(next(val_loader), return_loss = True)
print(f'validation loss: {loss.item()}')
if i != 0 and i % GENERATE_EVERY == 0:
model.eval()
inp = random.choice(val_dataset)[:-1]
prime_inp = inp[:PRIME_LEN]
prime = decode_tokens(prime_inp)
print(f'%s \n\n %s', (prime, '*' * 100))
sample = model.generate(prime_inp[None, :])
sample = sample.flatten(1)
output_str = decode_tokens(sample[0][PRIME_LEN:])
print(output_str) | TerraByte-master | train_triton.py |
from setuptools import setup, find_packages
setup(
name = 'TerraByte',
packages = find_packages(),
version = '0.1.5',
license='MIT',
description = 'TerraByte - Pytorch',
long_description_content_type = 'text/markdown',
author = 'Kye Gomez',
author_email = '[email protected]',
url = 'https://github.com/kyegomez/TerraByte',
keywords = [
'artificial intelligence',
'attention mechanism',
'transformers'
],
install_requires=[
'torch',
'einops',
'triton',
'beartype'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
| TerraByte-master | setup.py |
import torch
from TerraByte import TerraByte
model = TerraByte(
num_tokens = 16000,
dim = (512, 256),
dim_head=64,
dilation_rate=4,
segment_size=2,
max_seq_len = (1024, 4),
depth = (6, 4),
dim_head = 64,
heads = 8,
)
x = torch.randint(0, 16000, (1, 1024, 4))
loss = model(x, return_loss = True)
loss.backward()
# then after much training
logits = model(x)
# and sample from the logits accordingly
# or you can use the generate function
sampled = model.generate(temperature = 0.9, filter_thres = 0.9) # (1, 1024, 4) | TerraByte-master | example.py |
from TerraByte import TerraByte
import random
import tqdm
import gzip
import numpy as np
import torch
import torch.optim as optim
from torch.nn import functional as F
from torch.utils.data import DataLoader, Dataset
# constants
NUM_BATCHES = int(1e5)
BATCH_SIZE = 4
GRADIENT_ACCUMULATE_EVERY = 4
LEARNING_RATE = 2e-4
VALIDATE_EVERY = 100
GENERATE_EVERY = 500
PRIME_LEN = 100
SEQ_LEN = 8192
# helpers
def cycle(loader):
while True:
for data in loader:
yield data
def decode_token(token):
return str(chr(max(32, token)))
def decode_tokens(tokens):
return ''.join(list(map(decode_token, tokens)))
# instantiate GPT-like decoder model
model = TerraByte(
num_tokens = 256,
dim = (768, 512, 256),
depth = (6, 4, 2),
max_seq_len = (512, 4, 4),
flash_attn = True
).cuda()
# prepare enwik8 data
with gzip.open('./data/enwik8.gz') as file:
x = np.frombuffer(file.read(int(95e6)), dtype=np.uint8).copy()
train_x, valid_x = np.split(x, [int(90e6)])
data_train, data_val = map(torch.from_numpy, (train_x, valid_x))
class TextSamplerDataset(Dataset):
def __init__(self, data, seq_len):
super().__init__()
self.data = data
self.seq_len = seq_len
def __getitem__(self, index):
rand_start = torch.randint(0, self.data.size(0) - self.seq_len, (1,))
full_seq = self.data[rand_start: rand_start + self.seq_len].long()
return full_seq.cuda()
def __len__(self):
return self.data.size(0) // self.seq_len
train_dataset = TextSamplerDataset(data_train, SEQ_LEN)
val_dataset = TextSamplerDataset(data_val, SEQ_LEN)
train_loader = cycle(DataLoader(train_dataset, batch_size = BATCH_SIZE))
val_loader = cycle(DataLoader(val_dataset, batch_size = BATCH_SIZE))
# optimizer
optim = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE)
# training
for i in tqdm.tqdm(range(NUM_BATCHES), mininterval=10., desc='training'):
model.train()
for __ in range(GRADIENT_ACCUMULATE_EVERY):
loss = model(next(train_loader), return_loss = True)
loss.backward()
print(f'training loss: {loss.item()}')
torch.nn.utils.clip_grad_norm_(model.parameters(), 0.5)
optim.step()
optim.zero_grad()
if i % VALIDATE_EVERY == 0:
model.eval()
with torch.no_grad():
loss = model(next(val_loader), return_loss = True)
print(f'validation loss: {loss.item()}')
#SAVE THE MODEL WEIGHTS
torch.save(model.state_dict(), f"./model_{i}.pth")
if i != 0 and i % GENERATE_EVERY == 0:
model.eval()
inp = random.choice(val_dataset)[:-1]
prime_inp = inp[:PRIME_LEN]
prime = decode_tokens(prime_inp)
print(f'%s \n\n %s', (prime, '*' * 100))
sample = model.generate(prime_inp[None, :])
sample = sample.flatten(1)
output_str = decode_tokens(sample[0][PRIME_LEN:])
print(output_str) | TerraByte-master | train.py |
import unittest
import torch
from torch.nn import Dropout
from torch import einsum
from torch import nn
from torch.testing import assert_allclose
from TerraByte.model.attend import Attend, FlashAttention, EfficientAttentionConfig
class TestAttending(unittest.TestCase):
def setUp(self):
self.attend = Attend(dim=512)
def test_init_default(self):
self.assertEqual(self.attend_dim, 512)
self.assertEqual(self.attend.heads, 64)
self.assertEqual(self.attend.dim_head, 64) | TerraByte-master | testing/attention.py |
from TerraByte.model.model import TerraByte
import torch
class TerraByte:
def __init__(self,
num_tokens = 16000,
dim = (512, 256),
dilation_rate=4,
segment_size=2,
max_seq_len = (1024, 4),
depth = (6, 4),
dim_head = 64,
heads = 8,
):
self.model = TerraByte(
num_tokens=num_tokens,
dim=dim,
max_seq_len=max_seq_len,
dilation_rate=dilation_rate,
segment_size=segment_size,
depth=depth,
dim_head=dim_head,
heads=heads
)
def predict(self, x):
self.model.eval()
with torch.no_grad():
logits = self.model(x)
return logits
def generate(
self,
num_tokens=1024,
temperature=0.9,
filter_thres=0.9
):
self.model.eval()
with torch.no_grad():
sampled = self.model.generate(num_tokens, temperature=temperature, filter_thres=filter_thres)
return sampled
| TerraByte-master | TerraByte/terrabyte.py |
from TerraByte.model.model import TerraByte
| TerraByte-master | TerraByte/__init__.py |
TerraByte-master | TerraByte/training/__init__.py |
|
import math
import multiprocessing
import os
from datetime import timedelta
from functools import partial
from itertools import chain
import torch
from accelerate import Accelerator
from accelerate.utils import DummyOptim, DummyScheduler, InitProcessGroupKwargs
from datasets import load_dataset
from lion_pytorch import Lion
from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import (
CheckpointImpl,
apply_activation_checkpointing,
checkpoint_wrapper,
)
from torch.distributed.fsdp import (
BackwardPrefetch,
FullyShardedDataParallel,
MixedPrecision,
ShardingStrategy,
)
from torch.distributed.fsdp.wrap import transformer_auto_wrap_policy
from torch.nn import LayerNorm
from torch.optim import AdamW
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import (
AutoTokenizer,
default_data_collator,
get_cosine_schedule_with_warmup,
get_linear_schedule_with_warmup,
set_seed,
)
from TerraByte.model import TerraByte, Transformer
from TerraByteutils import StableAdamWUnfused
# import bitsandbytes as bnb
############ SETUP CONFIG
# import torch.distributed as dist
# dist.init_process_group(backend='nccl', init_method="env://")
################
class CFG:
BATCH_SIZE = 3
GRADIENT_ACCUMULATE_EVERY: int = 1
SEED: int = 42
LEARNING_RATE: float = 3e-4
WEIGHT_DECAY: float = 0.1
SEQ_LEN: int = 8192
NUM_CPU: int = multiprocessing.cpu_count()
USE_DEEPSPEED: bool = True
USE_FSDP: bool = True
USE_PRETOKENIZED: bool = True
USE_ACTIVATION_CHECKPOINTING: bool = True
RESUME_FROM_CHECKPOINT: str = False
CHECKPOINTING_STEPS: int = 1000
OUTPUT_DIR: str = 'checkpoints/' # Folder
ENTITY_NAME: str = "TerraByte"
# helpers
def print_num_params(model, accelerator: Accelerator):
# n_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
n_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
accelerator.print(f"Number of parameters in model: {n_params}")
# activation checkpointing
def activation_checkpointing(
model: torch.nn.Module,
offload_to_cpu: bool = False,
accelerator: Accelerator = None,
):
"""
Apply activation checkpointing to a model.
Args:
model (Module): The model to which to apply activation checkpointing.
offload_to_cpu (bool, optional): Whether to offload the activations to CPU. Defaults to False.
accelerator (Accelerator, optional): The Accelerate library accelerator. Defaults to None.
"""
if accelerator is not None:
accelerator.print(f"Using activation checkpointing")
check_fn = lambda submodule: isinstance(submodule, Transformer)
non_reentrant_wrapper = partial(
checkpoint_wrapper,
offload_to_cpu=offload_to_cpu,
checkpoint_impl=CheckpointImpl.NO_REENTRANT,
)
apply_activation_checkpointing(
model, checkpoint_wrapper_fn=non_reentrant_wrapper, check_fn=check_fn
)
# FSDP
def fsdp(
model: torch.nn.Module,
auto_wrap: bool = False,
mp: str = "fp32",
shard_strat: str = "NO_SHARD",
):
"""
This function wraps a given PyTorch model with the FullyShardedDataParallel (FSDP) wrapper to enable efficient data parallelism and model sharding.
Args:
model (torch.nn.Module): The original PyTorch model to be wrapped with FSDP.
auto_wrap (bool, optional): If True, it enables automatic wrapping of the model's layers according to the transformer_auto_wrap_policy. Default is False.
mp (str, optional): The mixed precision mode to be used. Can be 'bf16' for BFloat16, 'fp16' for Float16 or 'fp32' for Float32 precision. Default is 'fp32'.
shard_strat (str, optional): The sharding strategy to be used. Can be 'SHARD_GRAD' for sharding at gradient computation, 'FULL_SHARD' for full model sharding or 'NO_SHARD' for no sharding. Default is 'NO_SHARD'.
Raises:
ValueError: If the provided mp (mixed precision mode) is not 'bf16', 'fp16' or 'fp32'.
ValueError: If the provided shard_strat (sharding strategy) is not 'SHARD_GRAD', 'FULL_SHARD' or 'NO_SHARD'.
Returns:
torch.nn.Module: The input model wrapped with FSDP.
"""
if auto_wrap:
TerraByte_auto_wrap_policy = partial(
transformer_auto_wrap_policy,
transformer_layer_cls={
Transformer,
},
)
else:
TerraByte_auto_wrap_policy = None
if mp == "bf16":
mp_fsdp = MixedPrecision(
param_dtype=torch.bfloat16,
# Gradient communication precision.
reduce_dtype=torch.bfloat16,
# Buffer precision.
buffer_dtype=torch.bfloat16,
)
elif mp == "fp16":
mp_fsdp = MixedPrecision(
param_dtype=torch.float16,
# Gradient communication precision.
reduce_dtype=torch.float16,
# Buffer precision.
buffer_dtype=torch.float16,
)
elif mp == "fp32":
mp_fsdp = MixedPrecision(
param_dtype=torch.float32,
# Gradient communication precision.
reduce_dtype=torch.float32,
# Buffer precision.
buffer_dtype=torch.float32,
)
else:
raise ValueError(
"Invalid scheduler_type. Expected 'bf16', 'fp16' or 'fp32', got: {}".format(
mp
)
)
if shard_strat == "SHARD_GRAD":
sharding_strat_fsdp = ShardingStrategy.SHARD_GRAD_OP
elif shard_strat == "FULL_SHARD":
sharding_strat_fsdp = ShardingStrategy.FULL_SHARD
elif shard_strat == "NO_SHARD":
sharding_strat_fsdp = ShardingStrategy.NO_SHARD
else:
raise ValueError(
"Invalid scheduler_type. Expected 'SHARD_GRAD', 'FULL_SHARD' or 'NO_SHARD', got: {}".format(
shard_strat
)
)
model = FullyShardedDataParallel(
model,
auto_wrap_policy=TerraByte_auto_wrap_policy,
mixed_precision=mp_fsdp,
backward_prefetch=BackwardPrefetch.BACKWARD_PRE,
sharding_strategy=sharding_strat_fsdp,
forward_prefetch=True,
use_orig_params=True,
)
return model
# learning rate scheduler
def get_lr_scheduler_with_warmup(
optimizer: torch.optim.Optimizer,
scheduler_type: str,
num_warmup_steps: int,
max_train_steps: int,
grad_accumulate_every: int = 1,
accelerator: Accelerator = None,
):
"""
Get a learning rate scheduler with warmup.
Args:
optimizer (Optimizer): The optimizer for which to create the learning rate scheduler.
scheduler_type (str): The type of learning rate scheduler to create, either "linear" or "cosine".
num_warmup_steps (int): The number of warmup steps for the learning rate scheduler.
max_train_steps (int): The maximum number of training steps.
grad_accumulate_every (int, optional): The gradient accumulation factor. Defaults to 1.
accelerator (Accelerator, optional): The Accelerate library accelerator. Defaults to None.
Returns:
The learning rate scheduler with warmup.
Raises:
ValueError: If scheduler_type is not "linear" or "cosine".
"""
NUM_WARMUP_STEPS = num_warmup_steps
GRADIENT_ACCUMULATE_EVERY = grad_accumulate_every
if accelerator is not None:
accelerator.print(f"Using {scheduler_type} lr scheduler")
if scheduler_type == "linear":
return get_linear_schedule_with_warmup(
optimizer=optimizer,
num_warmup_steps=NUM_WARMUP_STEPS * GRADIENT_ACCUMULATE_EVERY,
num_training_steps=max_train_steps * GRADIENT_ACCUMULATE_EVERY,
)
elif scheduler_type == "cosine":
return get_cosine_schedule_with_warmup(
optimizer=optimizer,
num_warmup_steps=NUM_WARMUP_STEPS * GRADIENT_ACCUMULATE_EVERY,
num_training_steps=max_train_steps * GRADIENT_ACCUMULATE_EVERY,
)
else:
raise ValueError(
"Invalid scheduler_type. Expected 'linear' or 'cosine', got: {}".format(
scheduler_type
)
)
# optimizers
def decoupled_optimizer(
model: torch.nn.Module,
learning_rate: float,
weight_decay: float,
beta_1: float,
beta_2: float,
optimizer_type: str,
use_fsdp: bool = True,
accelerator: Accelerator = None,
):
"""
Decouples the optimizer from the training process.
This function sets up the optimizer for the model by creating two groups of parameters:
one for weight decay and one without weight decay. Then, it initializes the optimizer
with these two groups of parameters.
Args:
model (Module): The model whose parameters are optimized.
learning_rate (float): The learning rate for the optimizer.
weight_decay (float): The weight decay for the optimizer.
beta_1 (float): The exponential decay rate for the 1st moment estimates.
beta_2 (float): The exponential decay rate for the 2nd moment estimates.
optimizer_type (str): The type of the optimizer. Can be 'lion', 'adamw', or 'stable_adamw'.
use_fsdp (bool, optional): If True, the optimizer will work with fully sharded data parallelism. Defaults to True.
accelerator (Accelerator, optional): The accelerator from HuggingFace's Accelerate library. Defaults to None.
Returns:
Optimizer: The initialized optimizer.
Raises:
ValueError: If the optimizer type is not 'lion', 'adamw' or 'stable_adamw'.
"""
accelerator.print(f"Using {optimizer_type} optimizer")
# Create an empty dictionary called param_dict to store the model's named parameters.
param_dict = {}
# Iterate over the model's named parameters and populate the param_dict with key-value pairs.
for param_name, param in model.named_parameters():
param_dict[param_name] = param
# Separate the model's named modules into two groups: decay and no_decay.
# Create an empty list to store the names of the LayerNorm and Embedding layer weights with no weight decay.
no_decay = []
if use_fsdp:
exclude_module = "_fsdp_wrapped_module.token_emb"
else:
exclude_module = "token_emb"
# Iterate through the named modules of the model.
for module_name, module in model.named_modules():
# Check if the current module is an instance of any of the desired types (LayerNorm or torch.nn.Embedding).
for ndim in [LayerNorm, torch.nn.Embedding]:
if isinstance(module, ndim):
# If torch.nn.Embedding, append its name with a ".weight" suffix to the no_decay list.
if module_name == exclude_module:
no_decay.append(f"{module_name}.weight")
else:
# If the module is an instance of LayerNorm
no_decay.append(f"{module_name}.gamma")
# Exit the inner loop since the desired module has been found.
break
# Create an empty list to store the names of the Linear layer weights with weight decay.
decay = []
# Iterate through the named modules of the model.
for module_name, module in model.named_modules():
# Check if the current module is an instance of the desired type (torch.nn.Linear).
for ndim in [torch.nn.Linear]:
if isinstance(module, ndim):
# If the module is an instance of torch.nn.Linear, append its name with a ".weight" suffix to the decay list.
decay.append(f"{module_name}.weight")
# Exit the inner loop since the desired module has been found.
break
# Create two separate lists of model parameters: decay_param and no_decay_param.
# The decay_param list contains the parameters that should have weight decay applied.
# The no_decay_param list contains the parameters that should not have weight decay applied, excluding the 'to_logits.weight' parameter.
# Create an empty list called decay_param to store the parameters with weight decay.
decay_param = []
if use_fsdp:
exclude_param = "_fsdp_wrapped_module.to_logits.weight"
else:
exclude_param = "to_logits.weight"
# Iterate over the decay list, which contains the names of the parameters with weight decay.
for param in decay:
# Check if the current parameter is not 'to_logits.weight'.
# Append the corresponding parameter from param_dict to the decay_param list.
if param != exclude_param:
decay_param.append(param_dict[param])
# Create an empty list called no_decay_param to store the parameters without weight decay.
no_decay_param = []
# Iterate over the no_decay list, which contains the names of the parameters without weight decay.
for param in no_decay:
# Append the corresponding parameter from param_dict to the no_decay_param list.
no_decay_param.append(param_dict[param])
# Create a list called grouped_params that contains two dictionaries.
# The first dictionary has the decay_param list and the corresponding weight_decay value.
# The second dictionary has the no_decay_param list and a weight_decay value of 0.0.
grouped_params = [
{"params": decay_param, "weight_decay": weight_decay},
{"params": no_decay_param, "weight_decay": 0.0},
]
# Create a variable called optimizer that stores an instance of the optimizer.
if optimizer_type == "lion":
optimizer = Lion(grouped_params, lr=learning_rate, betas=(beta_1, beta_2),)
elif optimizer_type == "adamw":
optimizer = AdamW(grouped_params, lr=learning_rate, betas=(beta_1, beta_2),)
elif optimizer_type == "deepspeed":
optimizer = DummyOptim(grouped_params, lr=learning_rate, betas=(beta_1, beta_2),)
elif optimizer_type == "stable_adamw":
optimizer = StableAdamWUnfused(
grouped_params, lr=learning_rate, betas=(beta_1, beta_2),
)
# elif optimizer_type=="Adam8bit":
# optimizer = bnb.optim.Adam8bit(grouped_params, lr=learning_rate, betas=(beta_1, beta_2))
# elif optimizer_type=="Lion8Bit":
# optimizer = bnb.optim.Lion8bit(grouped_params, lr=learning_rate, betas=(beta_1, beta_2))
else:
raise ValueError(
"Invalid optimizer_type. Expected 'lion', 'adamw', 'deepspeed' or 'stable_adamw', got: {}".format(
optimizer_type
)
)
# Return the optimizer.
return optimizer
# dataloaders
def build_dataloaders():
"""
Build data loaders for training.
This function performs the following steps:
1. Load the tokenizer from the pretrained "EleutherAI/gpt-neox-20b" model.
2. Load the "openwebtext" dataset.
3. Tokenize the dataset, adding the end-of-sentence token to each text.
4. Process the tokenized dataset into chunks of a specified block size.
Returns:
Dataset: The processed dataset ready for training.
"""
tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-neox-20b")
dataset = load_dataset("openwebtext", split="train")
tokenized_dataset = dataset.map(
lambda example: tokenizer([t + tokenizer.eos_token for t in example["text"]]),
batched=True,
num_proc=CFG.NUM_CPU,
remove_columns=["text"],
)
block_size = CFG.SEQ_LEN
# Main data processing function that will concatenate all texts from our dataset and generate chunks of block_size.
def group_texts(examples):
# Concatenate all texts.
concatenated_examples = {k: list(chain(*examples[k])) for k in examples.keys()}
total_length = len(concatenated_examples[list(examples.keys())[0]])
# We drop the small remainder, we could add padding if the model supported it instead of this drop, you can
# customize this part to your needs.
if total_length >= block_size:
total_length = (total_length // block_size) * block_size
# Split by chunks of max_len.
result = {
k: [t[i : i + block_size] for i in range(0, total_length, block_size)]
for k, t in concatenated_examples.items()
}
return result
train_dataset = tokenized_dataset.map(
group_texts, batched=True, num_proc=CFG.NUM_CPU,
)
return train_dataset
#switch to falconwebdataset
def build_pre_tokenized():
d0 = load_dataset("conceptofmind/c4_0-to-20_neox_with_eos_8k", split="train[:10]")
# d1 = load_dataset("conceptofmind/c4_21-to-40_neox_with_eos_8k", split="train")
# d2 = load_dataset("conceptofmind/c4_41-to-60_neox_with_eos_8k", split="train")
# d3 = load_dataset("conceptofmind/c4_61-to-80_neox_with_eos_8k", split="train")
# d4 = load_dataset("conceptofmind/c4_81-to-100_neox_with_eos_8k", split="train")
# train_dataset = concatenate_datasets([d0, d1, d2, d3, d4])
return d0
def Train():
# accelerator
timeout = InitProcessGroupKwargs(timeout=timedelta(seconds=1_000_000))
accelerator = Accelerator(
gradient_accumulation_steps=CFG.GRADIENT_ACCUMULATE_EVERY,
mixed_precision="fp16",
log_with="wandb",
kwargs_handlers=[timeout],
)
# AcceleratorState().deepspeed_plugin.deepspeed_config['train_micro_batch_size_per_gpu'] = 4 #??????
accelerator.init_trackers(
project_name="TerraByte",
config={
"batch_size": CFG.BATCH_SIZE,
"gradient_accumulate_every": CFG.GRADIENT_ACCUMULATE_EVERY,
"learning_rate": CFG.LEARNING_RATE,
"seq_len": CFG.SEQ_LEN,
},
init_kwargs={"wandb": {"entity": CFG.ENTITY_NAME}},
)
accelerator.print(f"Total GPUS: {accelerator.num_processes}")
# set seed
set_seed(CFG.SEED)
model = TerraByte().to(accelerator.device)
print_num_params(model, accelerator)
if CFG.USE_FSDP:
model = fsdp(
model,
mp="fp16",
shard_strat="SHARD_GRAD"
)
if CFG.USE_ACTIVATION_CHECKPOINTING:
activation_checkpointing(model, accelerator)
model = accelerator.prepare(model)
# dataloaders
if CFG.USE_PRETOKENIZED:
train_dataset = build_pre_tokenized()
else:
train_dataset = build_dataloaders()
train_loader = DataLoader(
train_dataset, batch_size=CFG.BATCH_SIZE, collate_fn=default_data_collator,
)
# optimizer
optim = decoupled_optimizer(
model=model,
learning_rate=CFG.LEARNING_RATE,
weight_decay=CFG.WEIGHT_DECAY,
beta_1=0.90,
beta_2=0.95,
optimizer_type='Adam8bit',
use_fsdp=True,
accelerator=accelerator
)
# Determine number of training steps
max_train_steps = math.ceil(len(train_loader) / CFG.GRADIENT_ACCUMULATE_EVERY)
accelerator.print(f"Max train steps: {max_train_steps}")
# lr scheduler
NUM_WARMUP_STEPS = int(max_train_steps * 0.01)
accelerator.print(f"Num warmup steps: {NUM_WARMUP_STEPS}")
if CFG.USE_DEEPSPEED:
lr_scheduler = DummyScheduler(
optim,
total_num_steps=max_train_steps * accelerator.num_processes,
warmup_num_steps=NUM_WARMUP_STEPS
)
else:
lr_scheduler = get_lr_scheduler_with_warmup(
optimizer=optim,
scheduler_type="cosine",
num_warmup_steps=NUM_WARMUP_STEPS,
max_train_steps=max_train_steps,
grad_accumulate_every=CFG.GRADIENT_ACCUMULATE_EVERY,
)
# prepare
optim, train_loader, lr_scheduler = accelerator.prepare(
optim, train_loader, lr_scheduler
)
# checkpoint scheduler
accelerator.register_for_checkpointing(lr_scheduler)
# I do not know why Huggingface recommends recalculation of max_train_steps
max_train_steps = math.ceil(len(train_loader) / CFG.GRADIENT_ACCUMULATE_EVERY)
accelerator.print(f"Max train steps recalculated: {max_train_steps}")
# Total batch size for logging
total_batch_size = (
CFG.BATCH_SIZE * accelerator.num_processes * CFG.GRADIENT_ACCUMULATE_EVERY
)
accelerator.print(f"Total batch size: {total_batch_size}")
# resume training
progress_bar = tqdm(
range(max_train_steps), disable=not accelerator.is_local_main_process
)
completed_steps = 0
if CFG.RESUME_FROM_CHECKPOINT:
if CFG.RESUME_FROM_CHECKPOINT is not None or CFG.RESUME_FROM_CHECKPOINT != "":
accelerator.print(f"Resuming from checkpoint {CFG.RESUME_FROM_CHECKPOINT}")
accelerator.load_state(CFG.RESUME_FROM_CHECKPOINT)
path = os.path.basename(CFG.RESUME_FROM_CHECKPOINT)
training_difference = os.path.splitext(path)[0]
# need to multiply `gradient_accumulation_steps` to reflect real steps
resume_step = (
int(training_difference.replace("step_", ""))
* CFG.GRADIENT_ACCUMULATE_EVERY
)
if CFG.RESUME_FROM_CHECKPOINT and resume_step is not None:
train_loader = accelerator.skip_first_batches(train_loader, resume_step)
completed_steps += resume_step
progress_bar.update(resume_step)
# training
model.train()
for step, batch in enumerate(train_loader):
with accelerator.accumulate(model):
inputs = batch["input_ids"].to(accelerator.device)
loss = model(inputs, return_loss=True)
accelerator.backward(loss)
accelerator.log({"loss": loss.item()}, step=step)
if accelerator.sync_gradients:
accelerator.clip_grad_norm_(model.parameters(), 1.0)
optim.step()
lr_scheduler.step()
optim.zero_grad()
if accelerator.sync_gradients:
progress_bar.update(1)
completed_steps += 1
if isinstance(CFG.CHECKPOINTING_STEPS, int):
if completed_steps % CFG.CHECKPOINTING_STEPS == 0:
output_dir = f"step_{completed_steps }"
if CFG.OUTPUT_DIR is not None:
output_dir = os.path.join(CFG.OUTPUT_DIR, output_dir)
accelerator.save_state(output_dir)
if completed_steps >= max_train_steps:
break
# end training
# accelerator.print(f"Training Finished")
accelerator.end_training()
# save final model
# accelerator.print(f"Saving model to {CFG.OUTPUT_DIR}")
if CFG.OUTPUT_DIR is not None:
accelerator.wait_for_everyone()
unwrapped_model = accelerator.unwrap_model(model)
with accelerator.main_process_first():
accelerator.save(
unwrapped_model.state_dict(), f"{CFG.OUTPUT_DIR}/final/final_model.pt"
)
def main():
os.environ['MASTER_ADDR'] #= 'localhost'
os.environ['MASTER_PORT'] #= '9994'
# # [CRITICAL] Pay attention to this when scaling to multiple GPUs and clusters
# # Pay attention to this, use "accelerate config"
os.environ['RANK'] #= str(0) # Number of nodes (servers)
os.environ['WORLD_SIZE'] #= str(torch.cuda.device_count())
torch.distributed.init_process_group()
Train()
if __name__ == '__main__':
main() | TerraByte-master | TerraByte/training/train.py |
import torch
# This is the unfused version of StableAdamW. It is slower than the fused version (coming).
class StableAdamWUnfused(torch.optim.Optimizer):
def __init__(
self,
params,
lr=0.002,
weight_decay=0.2,
betas=(0.9, 0.99),
eps=1e-8,
clip_thresh=1.0,
precision="amp_bfloat16",
custom_scalar=65536,
):
beta1, beta2 = betas[0], betas[1]
defaults = dict(lr=lr, weight_decay=weight_decay, beta1=beta1, beta2=beta2)
super(StableAdamWUnfused, self).__init__(params, defaults)
self.eps = eps
self.d = clip_thresh
# Set precision to "custom_fp16" if you want to use a fixed loss scalar, custom_scalar, which is divided out in the update step.
# If you do this, call (custom_scalar * loss).backward() instead of loss.backward().
self.precision = precision
self.custom_scaler = custom_scalar
for group in self.param_groups:
group["step"] = 1.0
print("Using StableAdamWUnfused-v1")
def __setstate__(self, state):
super(StableAdamWUnfused, self).__setstate__(state)
def step(self, closure=None):
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
lr = group["lr"]
weight_decay = group["weight_decay"]
beta1 = group["beta1"]
beta2 = group["beta2"]
step = group["step"]
for p in group["params"]:
if p.grad is None:
continue
theta = p.data
param_state = self.state[p]
if self.precision == "custom_fp16":
g = p.grad.data / self.custom_scaler
if torch.any(torch.isnan(g) | torch.isinf(g)):
continue
else:
g = p.grad.data
if "exp_avg" not in param_state:
v = param_state["exp_avg"] = torch.zeros_like(theta)
u = param_state["exp_avg_sq"] = torch.zeros_like(theta)
else:
v = param_state["exp_avg"]
u = param_state["exp_avg_sq"]
beta1hat = beta1 * (1 - beta1 ** (step - 1)) / (1 - beta1**step)
beta2hat = beta2 * (1 - beta2 ** (step - 1)) / (1 - beta2**step)
v = v.mul_(beta1hat).add_(g, alpha=1.0 - beta1hat)
u = u.mul_(beta2hat).addcmul_(g, g, value=1.0 - beta2hat)
denominator = u.sqrt().add_(self.eps)
# StableAdamW = AdamW + update clipping (https://arxiv.org/abs/1804.04235) applied tensor-wise.
rms = (
torch.div(
g.pow(2), torch.maximum(u, (self.eps**2) * torch.ones_like(u))
)
.mean()
.sqrt()
.item()
)
theta = theta.mul_(1.0 - lr * weight_decay).addcdiv_(
v, denominator, value=-lr * (1.0 / max(1.0, rms / self.d))
)
# save current params
param_state["exp_avg"] = v
param_state["exp_avg_sq"] = u
group["step"] = step + 1 | TerraByte-master | TerraByte/utils/stable_adamw.py |
TerraByte-master | TerraByte/utils/__init__.py |
|
import gzip
import html
import io
import math
from functools import lru_cache
from typing import Callable, List, Optional, Tuple
import ftfy
import numpy as np
import regex as re
import torch
import torch.nn as nn
from iopath.common.file_io import g_pathmgr
from timm.models.layers import trunc_normal_
from TerraByte.model.helpers import VerboseNNModule, cast_if_src_dtype
def get_sinusoid_encoding_table(n_position, d_hid):
"""Sinusoid position encoding table"""
# TODO: make it with torch instead of numpy
def get_position_angle_vec(position):
return [
position / np.power(10000, 2 * (hid_j // 2) / d_hid)
for hid_j in range(d_hid)
]
sinusoid_table = np.array(
[get_position_angle_vec(pos_i) for pos_i in range(n_position)]
)
sinusoid_table[:, 0::2] = np.sin(sinusoid_table[:, 0::2]) # dim 2i
sinusoid_table[:, 1::2] = np.cos(sinusoid_table[:, 1::2]) # dim 2i+1
return torch.FloatTensor(sinusoid_table).unsqueeze(0)
def interpolate_pos_encoding_2d(target_spatial_size, pos_embed):
N = pos_embed.shape[1]
if N == target_spatial_size:
return pos_embed
dim = pos_embed.shape[-1]
# nn.functional.interpolate doesn't work with bfloat16 so we cast to float32
pos_embed, updated = cast_if_src_dtype(pos_embed, torch.bfloat16, torch.float32)
pos_embed = nn.functional.interpolate(
pos_embed.reshape(1, int(math.sqrt(N)), int(math.sqrt(N)), dim).permute(
0, 3, 1, 2
),
scale_factor=math.sqrt(target_spatial_size / N),
mode="bicubic",
)
if updated:
pos_embed, _ = cast_if_src_dtype(pos_embed, torch.float32, torch.bfloat16)
pos_embed = pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)
return pos_embed
def interpolate_pos_encoding(
npatch_per_img,
pos_embed,
patches_layout,
input_shape=None,
first_patch_idx=1,
):
assert first_patch_idx == 0 or first_patch_idx == 1, "there is 1 CLS token or none"
N = pos_embed.shape[1] - first_patch_idx # since it's 1 if cls_token exists
if npatch_per_img == N:
return pos_embed
assert (
patches_layout[-1] == patches_layout[-2]
), "Interpolation of pos embed not supported for non-square layouts"
class_emb = pos_embed[:, :first_patch_idx]
pos_embed = pos_embed[:, first_patch_idx:]
if input_shape is None or patches_layout[0] == 1:
# simple 2D pos embedding, no temporal component
pos_embed = interpolate_pos_encoding_2d(npatch_per_img, pos_embed)
elif patches_layout[0] > 1:
# pos embed has a temporal component
assert len(input_shape) == 4, "temporal interpolation not supported"
# we only support 2D interpolation in this case
num_frames = patches_layout[0]
num_spatial_tokens = patches_layout[1] * patches_layout[2]
pos_embed = pos_embed.view(1, num_frames, num_spatial_tokens, -1)
# interpolate embedding for zeroth frame
pos_embed = interpolate_pos_encoding_2d(
npatch_per_img, pos_embed[0, 0, ...].unsqueeze(0)
)
else:
raise ValueError("This type of interpolation isn't implemented")
return torch.cat((class_emb, pos_embed), dim=1)
def _get_pos_embedding(
npatch_per_img,
pos_embed,
patches_layout,
input_shape,
first_patch_idx=1,
):
pos_embed = interpolate_pos_encoding(
npatch_per_img,
pos_embed,
patches_layout,
input_shape=input_shape,
first_patch_idx=first_patch_idx,
)
return pos_embed
class PatchEmbedGeneric(nn.Module):
"""
PatchEmbed from Hydra
"""
def __init__(self, proj_stem, norm_layer: Optional[nn.Module] = None):
super().__init__()
if len(proj_stem) > 1:
self.proj = nn.Sequential(*proj_stem)
else:
# Special case to be able to load pre-trained models that were
# trained with a standard stem
self.proj = proj_stem[0]
self.norm_layer = norm_layer
def get_patch_layout(self, img_size):
with torch.no_grad():
dummy_img = torch.zeros(
[
1,
]
+ img_size
)
dummy_out = self.proj(dummy_img)
embed_dim = dummy_out.shape[1]
patches_layout = tuple(dummy_out.shape[2:])
num_patches = np.prod(patches_layout)
return patches_layout, num_patches, embed_dim
def forward(self, x):
x = self.proj(x)
# B C (T) H W -> B (T)HW C
x = x.flatten(2).transpose(1, 2)
if self.norm_layer is not None:
x = self.norm_layer(x)
return x
class SpatioTemporalPosEmbeddingHelper(VerboseNNModule):
def __init__(
self,
patches_layout: List,
num_patches: int,
num_cls_tokens: int,
embed_dim: int,
learnable: bool,
) -> None:
super().__init__()
self.num_cls_tokens = num_cls_tokens
self.patches_layout = patches_layout
self.num_patches = num_patches
self.num_tokens = num_cls_tokens + num_patches
self.learnable = learnable
if self.learnable:
self.pos_embed = nn.Parameter(torch.zeros(1, self.num_tokens, embed_dim))
trunc_normal_(self.pos_embed, std=0.02)
else:
self.register_buffer(
"pos_embed", get_sinusoid_encoding_table(self.num_tokens, embed_dim)
)
def get_pos_embedding(self, vision_input, all_vision_tokens):
input_shape = vision_input.shape
pos_embed = _get_pos_embedding(
all_vision_tokens.size(1) - self.num_cls_tokens,
pos_embed=self.pos_embed,
patches_layout=self.patches_layout,
input_shape=input_shape,
first_patch_idx=self.num_cls_tokens,
)
return pos_embed
class RGBDTPreprocessor(VerboseNNModule):
def __init__(
self,
rgbt_stem: PatchEmbedGeneric,
depth_stem: Optional[PatchEmbedGeneric],
img_size: Tuple = (3, 224, 224),
num_cls_tokens: int = 1,
pos_embed_fn: Optional[Callable] = None,
use_type_embed: bool = False,
init_param_style: str = "openclip",
) -> None:
super().__init__()
stem = rgbt_stem if rgbt_stem is not None else depth_stem
(
self.patches_layout,
self.num_patches,
self.embed_dim,
) = stem.get_patch_layout(img_size)
self.rgbt_stem = rgbt_stem
self.depth_stem = depth_stem
self.use_pos_embed = pos_embed_fn is not None
self.use_type_embed = use_type_embed
self.num_cls_tokens = num_cls_tokens
if self.use_pos_embed:
self.pos_embedding_helper = pos_embed_fn(
patches_layout=self.patches_layout,
num_cls_tokens=num_cls_tokens,
num_patches=self.num_patches,
embed_dim=self.embed_dim,
)
if self.num_cls_tokens > 0:
self.cls_token = nn.Parameter(
torch.zeros(1, self.num_cls_tokens, self.embed_dim)
)
if self.use_type_embed:
self.type_embed = nn.Parameter(torch.zeros(1, 1, self.embed_dim))
self.init_parameters(init_param_style)
@torch.no_grad()
def init_parameters(self, init_param_style):
if init_param_style == "openclip":
# OpenCLIP style initialization
scale = self.embed_dim**-0.5
if self.use_pos_embed:
nn.init.normal_(self.pos_embedding_helper.pos_embed)
self.pos_embedding_helper.pos_embed *= scale
if self.num_cls_tokens > 0:
nn.init.normal_(self.cls_token)
self.cls_token *= scale
elif init_param_style == "vit":
self.cls_token.data.fill_(0)
else:
raise ValueError(f"Unknown init {init_param_style}")
if self.use_type_embed:
nn.init.normal_(self.type_embed)
def tokenize_input_and_cls_pos(self, input, stem, mask):
# tokens is of shape B x L x D
tokens = stem(input)
assert tokens.ndim == 3
assert tokens.shape[2] == self.embed_dim
B = tokens.shape[0]
if self.num_cls_tokens > 0:
class_tokens = self.cls_token.expand(
B, -1, -1
) # stole class_tokens impl from Phil Wang, thanks
tokens = torch.cat((class_tokens, tokens), dim=1)
if self.use_pos_embed:
pos_embed = self.pos_embedding_helper.get_pos_embedding(input, tokens)
tokens = tokens + pos_embed
if self.use_type_embed:
tokens = tokens + self.type_embed.expand(B, -1, -1)
return tokens
def forward(self, vision=None, depth=None, patch_mask=None):
if patch_mask is not None:
raise NotImplementedError()
if vision is not None:
vision_tokens = self.tokenize_input_and_cls_pos(
vision, self.rgbt_stem, patch_mask
)
if depth is not None:
depth_tokens = self.tokenize_input_and_cls_pos(
depth, self.depth_stem, patch_mask
)
# aggregate tokens
if vision is not None and depth is not None:
final_tokens = vision_tokens + depth_tokens
else:
final_tokens = vision_tokens if vision is not None else depth_tokens
return_dict = {
"trunk": {
"tokens": final_tokens,
},
"head": {},
}
return return_dict
class AudioPreprocessor(RGBDTPreprocessor):
def __init__(self, audio_stem: PatchEmbedGeneric, **kwargs) -> None:
super().__init__(rgbt_stem=audio_stem, depth_stem=None, **kwargs)
def forward(self, audio=None):
return super().forward(vision=audio)
class ThermalPreprocessor(RGBDTPreprocessor):
def __init__(self, thermal_stem: PatchEmbedGeneric, **kwargs) -> None:
super().__init__(rgbt_stem=thermal_stem, depth_stem=None, **kwargs)
def forward(self, thermal=None):
return super().forward(vision=thermal)
def build_causal_attention_mask(context_length):
# lazily create causal attention mask, with full attention between the vision tokens
# pytorch uses additive attention mask; fill with -inf
mask = torch.empty(context_length, context_length, requires_grad=False)
mask.fill_(float("-inf"))
mask.triu_(1) # zero out the lower diagonal
return mask
class TextPreprocessor(VerboseNNModule):
def __init__(
self,
vocab_size: int,
context_length: int,
embed_dim: int,
causal_masking: bool,
supply_seq_len_to_head: bool = True,
num_cls_tokens: int = 0,
init_param_style: str = "openclip",
) -> None:
super().__init__()
self.vocab_size = vocab_size
self.context_length = context_length
self.token_embedding = nn.Embedding(vocab_size, embed_dim)
self.pos_embed = nn.Parameter(
torch.empty(1, self.context_length + num_cls_tokens, embed_dim)
)
self.causal_masking = causal_masking
if self.causal_masking:
mask = build_causal_attention_mask(self.context_length)
# register the mask as a buffer so it can be moved to the right device
self.register_buffer("mask", mask)
self.supply_seq_len_to_head = supply_seq_len_to_head
self.num_cls_tokens = num_cls_tokens
self.embed_dim = embed_dim
if num_cls_tokens > 0:
assert self.causal_masking is False, "Masking + CLS token isn't implemented"
self.cls_token = nn.Parameter(
torch.zeros(1, self.num_cls_tokens, embed_dim)
)
self.init_parameters(init_param_style)
@torch.no_grad()
def init_parameters(self, init_param_style="openclip"):
# OpenCLIP style initialization
nn.init.normal_(self.token_embedding.weight, std=0.02)
nn.init.normal_(self.pos_embed, std=0.01)
if init_param_style == "openclip":
# OpenCLIP style initialization
scale = self.embed_dim**-0.5
if self.num_cls_tokens > 0:
nn.init.normal_(self.cls_token)
self.cls_token *= scale
elif init_param_style == "vit":
self.cls_token.data.fill_(0)
else:
raise ValueError(f"Unknown init {init_param_style}")
def forward(self, text):
# text tokens are of shape B x L x D
text_tokens = self.token_embedding(text)
# concat CLS tokens if any
if self.num_cls_tokens > 0:
B = text_tokens.shape[0]
class_tokens = self.cls_token.expand(
B, -1, -1
) # stole class_tokens impl from Phil Wang, thanks
text_tokens = torch.cat((class_tokens, text_tokens), dim=1)
text_tokens = text_tokens + self.pos_embed
return_dict = {
"trunk": {
"tokens": text_tokens,
},
"head": {},
}
# Compute sequence length after adding CLS tokens
if self.supply_seq_len_to_head:
text_lengths = text.argmax(dim=-1)
return_dict["head"] = {
"seq_len": text_lengths,
}
if self.causal_masking:
return_dict["trunk"].update({"attn_mask": self.mask})
return return_dict
class Im2Video(nn.Module):
"""Convert an image into a trivial video."""
def __init__(self, time_dim=2):
super().__init__()
self.time_dim = time_dim
def forward(self, x):
if x.ndim == 4:
# B, C, H, W -> B, C, T, H, W
return x.unsqueeze(self.time_dim)
elif x.ndim == 5:
return x
else:
raise ValueError(f"Dimension incorrect {x.shape}")
class PadIm2Video(Im2Video):
def __init__(self, ntimes, pad_type, time_dim=2):
super().__init__(time_dim=time_dim)
assert ntimes > 0
assert pad_type in ["zero", "repeat"]
self.ntimes = ntimes
self.pad_type = pad_type
def forward(self, x):
x = super().forward(x)
if x.shape[self.time_dim] == 1:
if self.pad_type == "repeat":
new_shape = [1] * len(x.shape)
new_shape[self.time_dim] = self.ntimes
x = x.repeat(new_shape)
elif self.pad_type == "zero":
padarg = [0, 0] * len(x.shape)
padarg[2 * self.time_dim + 1] = self.ntimes - x.shape[self.time_dim]
x = nn.functional.pad(x, padarg)
return x
# Modified from github.com/openai/CLIP
@lru_cache()
def bytes_to_unicode():
"""
Returns list of utf-8 byte and a corresponding list of unicode strings.
The reversible bpe codes work on unicode strings.
This means you need a large # of unicode characters in your vocab if you want to avoid UNKs.
When you're at something like a 10B token dataset you end up needing around 5K for decent coverage.
This is a signficant percentage of your normal, say, 32K bpe vocab.
To avoid that, we want lookup tables between utf-8 bytes and unicode strings.
And avoids mapping to whitespace/control characters the bpe code barfs on.
"""
bs = (
list(range(ord("!"), ord("~") + 1))
+ list(range(ord("¡"), ord("¬") + 1))
+ list(range(ord("®"), ord("ÿ") + 1))
)
cs = bs[:]
n = 0
for b in range(2**8):
if b not in bs:
bs.append(b)
cs.append(2**8 + n)
n += 1
cs = [chr(n) for n in cs]
return dict(zip(bs, cs))
def get_pairs(word):
"""Return set of symbol pairs in a word.
Word is represented as tuple of symbols (symbols being variable-length strings).
"""
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
return pairs
def basic_clean(text):
text = ftfy.fix_text(text)
text = html.unescape(html.unescape(text))
return text.strip()
def whitespace_clean(text):
text = re.sub(r"\s+", " ", text)
text = text.strip()
return text
class SimpleTokenizer(object):
def __init__(self, bpe_path: str, context_length=77):
self.byte_encoder = bytes_to_unicode()
self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
with g_pathmgr.open(bpe_path, "rb") as fh:
bpe_bytes = io.BytesIO(fh.read())
merges: List[str] = gzip.open(bpe_bytes).read().decode("utf-8").split("\n")
merges = merges[1 : 49152 - 256 - 2 + 1]
merges: List[Tuple[str, ...]] = [tuple(merge.split()) for merge in merges]
vocab = list(bytes_to_unicode().values())
vocab = vocab + [v + "</w>" for v in vocab]
for merge in merges:
vocab.append("".join(merge))
vocab.extend(["<|startoftext|>", "<|endoftext|>"])
self.encoder = dict(zip(vocab, range(len(vocab))))
self.decoder = {v: k for k, v in self.encoder.items()}
self.bpe_ranks = dict(zip(merges, range(len(merges))))
self.cache = {
"<|startoftext|>": "<|startoftext|>",
"<|endoftext|>": "<|endoftext|>",
}
self.pat = re.compile(
r"""<\|startoftext\|>|<\|endoftext\|>|'s|'t|'re|'ve|'m|'ll|'d|[\p{L}]+|[\p{N}]|[^\s\p{L}\p{N}]+""",
re.IGNORECASE,
)
self.context_length = context_length
def bpe(self, token):
if token in self.cache:
return self.cache[token]
word = tuple(token[:-1]) + (token[-1] + "</w>",)
pairs = get_pairs(word)
if not pairs:
return token + "</w>"
while True:
bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf")))
if bigram not in self.bpe_ranks:
break
first, second = bigram
new_word = []
i = 0
while i < len(word):
try:
j = word.index(first, i)
new_word.extend(word[i:j])
i = j
except:
new_word.extend(word[i:])
break
if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if len(word) == 1:
break
else:
pairs = get_pairs(word)
word = " ".join(word)
self.cache[token] = word
return word
def encode(self, text):
bpe_tokens = []
text = whitespace_clean(basic_clean(text)).lower()
for token in re.findall(self.pat, text):
token = "".join(self.byte_encoder[b] for b in token.encode("utf-8"))
bpe_tokens.extend(
self.encoder[bpe_token] for bpe_token in self.bpe(token).split(" ")
)
return bpe_tokens
def decode(self, tokens):
text = "".join([self.decoder[token] for token in tokens])
text = (
bytearray([self.byte_decoder[c] for c in text])
.decode("utf-8", errors="replace")
.replace("</w>", " ")
)
return text
def __call__(self, texts, context_length=None):
if not context_length:
context_length = self.context_length
if isinstance(texts, str):
texts = [texts]
sot_token = self.encoder["<|startoftext|>"]
eot_token = self.encoder["<|endoftext|>"]
all_tokens = [[sot_token] + self.encode(text) + [eot_token] for text in texts]
result = torch.zeros(len(all_tokens), context_length, dtype=torch.long)
for i, tokens in enumerate(all_tokens):
tokens = tokens[:context_length]
result[i, : len(tokens)] = torch.tensor(tokens)
if len(result) == 1:
return result[0]
return result
class IMUPreprocessor(VerboseNNModule):
def __init__(
self,
kernel_size: int,
imu_stem: PatchEmbedGeneric,
embed_dim: int,
img_size: Tuple = (6, 2000),
num_cls_tokens: int = 1,
pos_embed_fn: Optional[Callable] = None,
init_param_style: str = "openclip",
) -> None:
super().__init__()
self.imu_stem = imu_stem
self.embed_dim = embed_dim
self.use_pos_embed = pos_embed_fn is not None
self.num_cls_tokens = num_cls_tokens
self.kernel_size = kernel_size
self.pos_embed = nn.Parameter(
torch.empty(1, (img_size[1] // kernel_size) + num_cls_tokens, embed_dim)
)
if self.num_cls_tokens > 0:
self.cls_token = nn.Parameter(
torch.zeros(1, self.num_cls_tokens, self.embed_dim)
)
self.init_parameters(init_param_style)
@torch.no_grad()
def init_parameters(self, init_param_style):
nn.init.normal_(self.pos_embed, std=0.01)
if init_param_style == "openclip":
# OpenCLIP style initialization
scale = self.embed_dim**-0.5
if self.num_cls_tokens > 0:
nn.init.normal_(self.cls_token)
self.cls_token *= scale
elif init_param_style == "vit":
self.cls_token.data.fill_(0)
else:
raise ValueError(f"Unknown init {init_param_style}")
def tokenize_input_and_cls_pos(self, input, stem):
# tokens is of shape B x L x D
tokens = stem.norm_layer(stem.proj(input))
assert tokens.ndim == 3
assert tokens.shape[2] == self.embed_dim
B = tokens.shape[0]
if self.num_cls_tokens > 0:
class_tokens = self.cls_token.expand(
B, -1, -1
) # stole class_tokens impl from Phil Wang, thanks
tokens = torch.cat((class_tokens, tokens), dim=1)
if self.use_pos_embed:
tokens = tokens + self.pos_embed
return tokens
def forward(self, imu):
# Patchify
imu = imu.unfold(
-1,
self.kernel_size,
self.kernel_size,
).permute(0, 2, 1, 3)
imu = imu.reshape(imu.size(0), imu.size(1), -1)
imu_tokens = self.tokenize_input_and_cls_pos(
imu,
self.imu_stem,
)
return_dict = {
"trunk": {
"tokens": imu_tokens,
},
"head": {},
}
return return_dict
from PIL import Image
import io
import cv2
import numpy as np
from pydub import AudioSegment
class ImageToByte:
def __init__(self, format="JPEG"):
self.format = format
def convert(self, image_path):
with Image.open(image_path) as img:
byte_arr = io.BytesIO
img.save(byte_arr, format=self.format)
return byte_arr.getvalue()
class VideoToByte:
def convert(self, video_path):
#use opencv to read the video
vidcap = cv2.VideoCapture(video_path)
success, image = vidcap.read()
video_bytes = []
while success:
#convert each frame to bytes
is_success, buffer = cv2.imencode('.jp', image)
video_bytes.append(buffer.tobytes())
success, image = vidcap.read()
return video_bytes
class AudioToByte:
def convert(self, audio_path):
audio = AudioSegment.from_file(audio_path)
return audio.raw_data
image_to_byte = ImageToByte()
image_bytes = image_to_byte.convert("path_to_your_image.jpg")
audio_to_byte = AudioToByte()
audio_bytes = audio_to_byte.convert("path_to_your_audio.mp3")
video_to_byte = VideoToByte()
video_bytes = video_to_byte.convert("path_to_your_video.mp4")
| TerraByte-master | TerraByte/model/multimodal_preprocessor.py |
import torch.nn as nn
from einops import rearrange
from TerraByte.model.attend import Attend
from TerraByte.model.helpers import RMSNorm, apply_rotary_pos_emb, exists
############## ATTENTION
class Attention(nn.Module):
def __init__(
self,
*,
dim,
dim_head = 64,
heads = 8,
dropout = 0.,
flash = False
):
super().__init__()
self.scale = dim_head ** -0.5
self.heads = heads
inner_dim = dim_head * heads
self.attend = Attend(
causal = True,
flash = flash,
dropout = dropout
)
self.dropout = nn.Dropout(dropout)
self.norm = RMSNorm(dim)
self.to_q = nn.Linear(dim, inner_dim, bias = False)
self.to_kv = nn.Linear(dim, dim_head * 2, bias = False)
self.to_out = nn.Linear(inner_dim, dim, bias = False)
def forward(self, x, rotary_emb = None):
h, device = self.heads, x.device
x = self.norm(x)
q, k, v = (self.to_q(x), *self.to_kv(x).chunk(2, dim = -1))
q = rearrange(q, 'b n (h d) -> b h n d', h = h)
if exists(rotary_emb):
q, k = map(lambda t: apply_rotary_pos_emb(rotary_emb, t), (q, k))
out = self.attend(q, k, v)
out = rearrange(out, 'b h n d -> b n (h d)')
return self.to_out(out)
| TerraByte-master | TerraByte/model/attention.py |
# class Transformer(nn.Module):
# def __init__(
# self,
# *,
# dim,
# layers,
# dim_head = 64,
# heads = 8,
# attn_dropout = 0.,
# ff_dropout = 0.,
# ff_mult = 4,
# rel_pos_bias = True,
# flash_attn = True,
# ):
# super().__init__()
# self.alibi = Alibi(heads = heads) if rel_pos_bias else None
# self.layers = nn.ModuleList([])
# for _ in range(layers):
# self.layers.append(nn.ModuleList([
# Attention(dim = dim, dim_head = dim_head, heads = heads, dropout = attn_dropout, flash = flash_attn),
# FeedForward(dim = dim, mult = ff_mult, dropout = ff_dropout)
# ]))
# self.norm = RMSNorm(dim)
# def forward(self, x):
# n = x.shape[-2]
# attn_bias = self.alibi(n, n, device = x.device) if exists(self.alibi) else None
# for attn, ff in self.layers:
# x = attn(token_shift(x), attn_bias = attn_bias) + x
# x = ff(token_shift(x)) + x
# return self.norm(x)
| TerraByte-master | TerraByte/model/transformer_alibi.py |
TerraByte-master | TerraByte/model/__init__.py |
|
from itertools import zip_longest
from typing import Tuple, Union
import torch
import torch.nn.functional as F
from beartype import beartype
from beartype.typing import Tuple, Union
from einops import rearrange, repeat
from einops.layers.torch import Rearrange
from torch import nn
from tqdm import tqdm
from TerraByte.model.helpers import (
cast_tuple,
default,
exists,
gumbel_sample,
pack_one,
reduce_mult,
remainder_to_mult,
top_k,
unpack_one,
)
from TerraByte.model.transformer import Transformer
class TerraByte(nn.Module):
@beartype
def __init__(
self,
*,
num_tokens,
dim: Union[Tuple, int],
depth: Tuple,
max_seq_len: Tuple,
dim_head = 64,
heads = 8,
attn_dropout = 0.,
ff_mult = 4,
ff_dropout = 0.,
pad_id = 0,
pos_emb=False,
rel_pos_bias = True,
dilation_rate = None,
segment_size = None,
use_xpos = False,
use_rel_pos_bias = False,
flash_attn = False
):
super().__init__()
# simplified configuration for each stage of the hierarchy
# depth = (2, 2, 4) would translate to depth 2 at first stage, depth 2 second stage, depth 4 third
# max_seq_len = (16, 8, 4) would translate to max sequence length of 16 at first stage, length of 8 at second stage, length of 4 for last
assert isinstance(depth, tuple) and isinstance(max_seq_len, tuple)
assert len(depth) == len(max_seq_len)
self.stages = len(depth)
dim = cast_tuple(dim, self.stages)
assert len(dim) == self.stages
coarsest_dim, *_, fine_dim = dim
self.max_seq_len = max_seq_len
self.start_tokens = nn.ParameterList([nn.Parameter(torch.randn(h_dim)) for h_dim, seq_len in zip(dim, max_seq_len)]) # noqa: E501
self.pos_embs = nn.ModuleList([nn.Embedding(seq_len, h_dim) for h_dim, seq_len in zip(dim, max_seq_len)]) if pos_emb else None
self.token_embs = nn.ModuleList([])
patch_size = 1
self.token_embs.append(nn.Embedding(num_tokens, fine_dim))
for dim_out, seq_len in zip(reversed(dim[:-1]), reversed(max_seq_len[1:])):
patch_size *= seq_len
self.token_embs.append(nn.Sequential(
nn.Embedding(num_tokens, fine_dim),
Rearrange('... r d -> ... (r d)'),
nn.LayerNorm(patch_size * fine_dim),
nn.Linear(patch_size * fine_dim, dim_out),
nn.LayerNorm(dim_out)
))
self.transformers = nn.ModuleList([])
self.to_next_transformer_projections = nn.ModuleList([])
for h_dim, next_h_dim, stage_depth, next_seq_len in zip_longest(dim, dim[1:], depth, max_seq_len[1:]):
self.transformers.append(Transformer(
dim = h_dim,
layers = stage_depth,
dim_head = dim_head,
heads = heads,
attn_dropout = attn_dropout,
ff_dropout = ff_dropout,
ff_mult = ff_mult,
# rel_pos_bias = rel_pos_bias,
flash_attn = flash_attn
))
proj = nn.Identity()
if exists(next_h_dim) and next_h_dim != dim:
proj = nn.Sequential(
Rearrange('b ... d -> b (...) d'),
nn.Linear(h_dim, next_h_dim * next_seq_len),
Rearrange('b m (n d) -> (b m) n d', n = next_seq_len)
)
self.to_next_transformer_projections.append(proj)
self.to_logits = nn.Linear(fine_dim, num_tokens)
self.pad_id = pad_id
def generate(self, prime = None, filter_thres = 0.9, temperature = 1., default_batch_size = 1):
total_seq_len = reduce_mult(self.max_seq_len)
device = next(self.parameters()).device
if not exists(prime):
prime = torch.empty((default_batch_size, 0), dtype = torch.long, device = device)
seq = prime
batch = seq.shape[0]
for _ in tqdm(range(total_seq_len - seq.shape[-1])):
logits = self.forward(seq)[:, -1]
logits = top_k(logits, thres = filter_thres)
sampled = gumbel_sample(logits, dim = -1, temperature = temperature)
seq = torch.cat((seq, rearrange(sampled, 'b -> b 1')), dim = -1)
return seq.reshape(batch, *self.max_seq_len)
def forward_empty(self, batch_size):
# take care of special case
# where you sample from input of 0 (start token only)
prev_stage_tokens_repr = None
for stage_start_tokens, transformer, proj in zip(self.start_tokens, self.transformers, self.to_next_transformer_projections):
tokens = repeat(stage_start_tokens, 'd -> b 1 d', b = batch_size)
if exists(prev_stage_tokens_repr):
tokens = tokens + prev_stage_tokens_repr[..., :tokens.shape[-2], :]
tokens = transformer(tokens)
prev_stage_tokens_repr = proj(tokens)
return self.to_logits(tokens)
def forward(self, ids, return_loss = False):
batch = ids.shape[0]
assert ids.ndim in {2, self.stages + 1}
flattened_dims = ids.ndim == 2
if ids.numel() == 0:
return self.forward_empty(ids.shape[0])
if flattened_dims:
# allow for ids to be given in the shape of (batch, seq)
# in which case it will be auto-padded to the next nearest multiple of depth seq len
seq_len = ids.shape[-1]
multiple_of = reduce_mult(self.max_seq_len[1:])
padding = remainder_to_mult(seq_len, multiple_of)
ids = F.pad(ids, (0, padding), value = self.pad_id)
ids = ids.reshape(batch, -1, *self.max_seq_len[1:])
b, *prec_dims, device = *ids.shape, ids.device
# check some dimensions
assert prec_dims[0] <= self.max_seq_len[0], 'the first dimension of your axial autoregressive transformer must be less than the first tuple element of max_seq_len (like any autoregressive transformer)'
assert tuple(prec_dims[1:]) == tuple(self.max_seq_len[1:]), 'all subsequent dimensions must match exactly'
# get tokens for all hierarchical stages, reducing by appropriate dimensions
# and adding the absolute positional embeddings
tokens_at_stages = []
pos_embs = default(self.pos_embs, (None,))
for ind, pos_emb, token_emb in zip_longest(range(len(prec_dims)), pos_embs, self.token_embs):
is_first = ind == 0
tokens = token_emb(ids)
if exists(pos_emb):
positions = pos_emb(torch.arange(tokens.shape[-2], device = device))
tokens = tokens + positions
tokens_at_stages.insert(0, tokens)
if is_first:
continue
ids = rearrange(ids, '... m n -> ... (m n)')
# the un-pixelshuffled representations of the previous hierarchy, starts with None
prev_stage_tokens_repr = None
# spatial tokens is tokens with depth pos reduced along depth dimension + spatial positions
for stage_start_tokens, stage_tokens, transformer, proj in zip(self.start_tokens, tokens_at_stages, self.transformers, self.to_next_transformer_projections):
stage_tokens, ps = pack_one(stage_tokens, '* n d')
stage_start_tokens = repeat(stage_start_tokens, 'f -> b 1 f', b = stage_tokens.shape[0])
# concat start token
stage_tokens = torch.cat((
stage_start_tokens,
stage_tokens,
), dim = -2)
# sum the previous hierarchy's representation
if exists(prev_stage_tokens_repr):
prev_stage_tokens_repr = F.pad(prev_stage_tokens_repr, (0, 0, 1, 0), value = 0.)
stage_tokens = stage_tokens + prev_stage_tokens_repr
attended = transformer(stage_tokens)
attended = unpack_one(attended, ps, '* n d')
# project for next stage in the hierarchy
prev_stage_tokens_repr = proj(attended[..., :-1, :])
# project to logits
logits = self.to_logits(attended)
start_tokens = logits[(slice(None), *((0,) * (logits.ndim - 2)), slice(None))]
start_tokens = rearrange(start_tokens, 'b d -> b 1 d')
logits = logits[..., 1:, :]
if not return_loss:
if flattened_dims:
logits = rearrange(logits, 'b ... c -> b (...) c')
logits = logits[:, :seq_len]
return logits
logits = rearrange(logits, 'b ... c -> b (...) c')
logits = torch.cat((start_tokens, logits), dim = -2)
preds = rearrange(logits, 'b n c -> b c n')
labels = rearrange(ids, 'b ... -> b (...)')
loss = F.cross_entropy(
preds[..., :-1],
labels,
ignore_index = self.pad_id
)
return loss
| TerraByte-master | TerraByte/model/model.py |
from collections import namedtuple
from functools import wraps
from packaging import version
import torch
from torch import nn, einsum
import torch.nn.functional as F
from einops import rearrange
# constants
EfficientAttentionConfig = namedtuple('EfficientAttentionConfig', ['enable_flash', 'enable_math', 'enable_mem_efficient'])
# helpers
def exists(val):
return val is not None
def once(fn):
called = False
@wraps(fn)
def inner(x):
nonlocal called
if called:
return
called = True
return fn(x)
return inner
print_once = once(print)
# main class
class Attend(nn.Module):
def __init__(
self,
causal = False,
dropout = 0.,
flash = False
):
super().__init__()
self.dropout = dropout
self.attn_dropout = nn.Dropout(dropout)
self.causal = causal
self.flash = flash
assert not (flash and version.parse(torch.__version__) < version.parse('2.0.0')), 'in order to use flash attention, you must be using pytorch 2.0 or above'
# determine efficient attention configs for cuda and cpu
self.cpu_config = EfficientAttentionConfig(True, True, True)
self.cuda_config = None
if not torch.cuda.is_available() or not flash:
return
device_properties = torch.cuda.get_device_properties(torch.device('cuda'))
if device_properties.major == 8 and device_properties.minor == 0:
print_once('A100 GPU detected, using flash attention if input tensor is on cuda')
self.cuda_config = EfficientAttentionConfig(True, False, False)
else:
print_once('Non-A100 GPU detected, using math or mem efficient attention if input tensor is on cuda')
self.cuda_config = EfficientAttentionConfig(False, True, True)
def get_mask(self, i, j, device):
return torch.ones((i, j), device=device, dtype=torch.bool).triu(j - i + 1)
def flash_attn(self, q, k, v, mask = None, attn_bias = None):
_, heads, q_len, _, k_len, is_cuda, device = *q.shape, k.shape[-2], q.is_cuda, q.device
# single headed key / values
if k.ndim == 3:
k = rearrange(k, 'b n d -> b 1 n d')
if v.ndim == 3:
v = rearrange(v, 'b n d -> b 1 n d')
# Check if mask exists and expand to compatible shape
# The mask is B L, so it would have to be expanded to B H N L
if exists(mask) and mask.ndim != 4:
mask = rearrange(mask, 'b j -> b 1 1 j')
mask = mask.expand(-1, heads, q_len, -1)
# Check if there is a compatible device for flash attention
config = self.cuda_config if is_cuda else self.cpu_config
# pytorch 2.0 flash attn: q, k, v, mask, dropout, causal, softmax_scale
with torch.backends.cuda.sdp_kernel(**config._asdict()):
out = F.scaled_dot_product_attention(
q, k, v,
attn_mask = mask,
dropout_p = self.dropout if self.training else 0.,
is_causal = self.causal
)
return out
def forward(self, q, k, v, mask = None):
"""
einstein notation
b - batch
h - heads
n, i, j - sequence length (base sequence length, source, target)
d - feature dimension
"""
q_len, k_len, device = q.shape[-2], k.shape[-2], q.device
scale = q.shape[-1] ** -0.5
kv_einsum_eq = 'b j d' if k.ndim == 3 else 'b h j d'
if self.flash:
return self.flash_attn(q, k, v, mask = mask)
# similarity
sim = einsum(f"b h i d, {kv_einsum_eq} -> b h i j", q, k) * scale
# causal mask
if self.causal:
causal_mask = self.get_mask(q_len, k_len, device)
sim = sim.masked_fill(causal_mask, -torch.finfo(sim.dtype).max)
# attention
attn = sim.softmax(dim=-1)
attn = self.attn_dropout(attn)
# aggregate values
out = einsum(f"b h i j, {kv_einsum_eq} -> b h i d", attn, v)
return out | TerraByte-master | TerraByte/model/attend.py |
from itertools import zip_longest
from typing import Tuple, Union
import torch
import torch.nn.functional as F
from beartype import beartype
from beartype.typing import Tuple, Union
from einops import rearrange, repeat
from einops.layers.torch import Rearrange
from torch import nn
from tqdm import tqdm
from TerraByte.model.helpers import (
cast_tuple,
default,
exists,
gumbel_sample,
pack_one,
reduce_mult,
remainder_to_mult,
top_k,
unpack_one,
)
from TerraByte.model.transformer import Transformer
#regular megabyte no universal patch embedder
class Megabyte(nn.Module):
@beartype
def __init__(
self,
*,
num_tokens,
dim: Union[Tuple, int],
depth: Tuple,
max_seq_len: Tuple,
dim_head = 64,
heads = 8,
attn_dropout = 0.,
ff_mult = 4,
ff_dropout = 0.,
pad_id = 0,
rel_pos = False,
pos_emb = False,
flash_attn = False
):
super().__init__()
# simplified configuration for each stage of the hierarchy
# depth = (2, 2, 4) would translate to depth 2 at first stage, depth 2 second stage, depth 4 third
# max_seq_len = (16, 8, 4) would translate to max sequence length of 16 at first stage, length of 8 at second stage, length of 4 for last
assert isinstance(depth, tuple) and isinstance(max_seq_len, tuple)
assert len(depth) == len(max_seq_len)
self.stages = len(depth)
dim = cast_tuple(dim, self.stages)
assert len(dim) == self.stages
coarsest_dim, *_, fine_dim = dim
self.max_seq_len = max_seq_len
self.start_tokens = nn.ParameterList([nn.Parameter(torch.randn(h_dim)) for h_dim, seq_len in zip(dim, max_seq_len)])
self.pos_embs = nn.ModuleList([nn.Embedding(seq_len, h_dim) for h_dim, seq_len in zip(dim, max_seq_len)]) if pos_emb else None
self.token_embs = nn.ModuleList([])
patch_size = 1
self.token_embs.append(nn.Embedding(num_tokens, fine_dim))
for dim_out, seq_len in zip(reversed(dim[:-1]), reversed(max_seq_len[1:])):
patch_size *= seq_len
self.token_embs.append(nn.Sequential(
nn.Embedding(num_tokens, fine_dim),
Rearrange('... r d -> ... (r d)'),
nn.LayerNorm(patch_size * fine_dim),
nn.Linear(patch_size * fine_dim, dim_out),
nn.LayerNorm(dim_out)
))
self.transformers = nn.ModuleList([])
self.to_next_transformer_projections = nn.ModuleList([])
for h_dim, next_h_dim, stage_depth, next_seq_len in zip_longest(dim, dim[1:], depth, max_seq_len[1:]):
self.transformers.append(Transformer(
dim = h_dim,
layers = stage_depth,
dim_head = dim_head,
heads = heads,
attn_dropout = attn_dropout,
ff_dropout = ff_dropout,
ff_mult = ff_mult,
rel_pos = rel_pos,
flash_attn = flash_attn
))
proj = nn.Identity()
if exists(next_h_dim) and next_h_dim != dim:
proj = nn.Sequential(
Rearrange('b ... d -> b (...) d'),
nn.Linear(h_dim, next_h_dim * next_seq_len),
Rearrange('b m (n d) -> (b m) n d', n = next_seq_len)
)
self.to_next_transformer_projections.append(proj)
self.to_logits = nn.Linear(fine_dim, num_tokens)
self.pad_id = pad_id
def generate(self, prime = None, filter_thres = 0.9, temperature = 1., default_batch_size = 1):
total_seq_len = reduce_mult(self.max_seq_len)
device = next(self.parameters()).device
if not exists(prime):
prime = torch.empty((default_batch_size, 0), dtype = torch.long, device = device)
seq = prime
batch = seq.shape[0]
for _ in tqdm(range(total_seq_len - seq.shape[-1])):
logits = self.forward(seq)[:, -1]
logits = top_k(logits, thres = filter_thres)
sampled = gumbel_sample(logits, dim = -1, temperature = temperature)
seq = torch.cat((seq, rearrange(sampled, 'b -> b 1')), dim = -1)
return seq.reshape(batch, *self.max_seq_len)
def forward_empty(self, batch_size):
# take care of special case
# where you sample from input of 0 (start token only)
prev_stage_tokens_repr = None
for stage_start_tokens, transformer, proj in zip(self.start_tokens, self.transformers, self.to_next_transformer_projections):
tokens = repeat(stage_start_tokens, 'd -> b 1 d', b = batch_size)
if exists(prev_stage_tokens_repr):
tokens = tokens + prev_stage_tokens_repr[..., :tokens.shape[-2], :]
tokens = transformer(tokens)
prev_stage_tokens_repr = proj(tokens)
return self.to_logits(tokens)
def forward(self, ids, return_loss = False):
batch = ids.shape[0]
assert ids.ndim in {2, self.stages + 1}
flattened_dims = ids.ndim == 2
if ids.numel() == 0:
return self.forward_empty(ids.shape[0])
if flattened_dims:
# allow for ids to be given in the shape of (batch, seq)
# in which case it will be auto-padded to the next nearest multiple of depth seq len
seq_len = ids.shape[-1]
multiple_of = reduce_mult(self.max_seq_len[1:])
padding = remainder_to_mult(seq_len, multiple_of)
ids = F.pad(ids, (0, padding), value = self.pad_id)
ids = ids.reshape(batch, -1, *self.max_seq_len[1:])
b, *prec_dims, device = *ids.shape, ids.device
# check some dimensions
assert prec_dims[0] <= self.max_seq_len[0], 'the first dimension of your axial autoregressive transformer must be less than the first tuple element of max_seq_len (like any autoregressive transformer)'
assert tuple(prec_dims[1:]) == tuple(self.max_seq_len[1:]), 'all subsequent dimensions must match exactly'
# get tokens for all hierarchical stages, reducing by appropriate dimensions
# and adding the absolute positional embeddings
tokens_at_stages = []
pos_embs = default(self.pos_embs, (None,))
for ind, pos_emb, token_emb in zip_longest(range(len(prec_dims)), pos_embs, self.token_embs):
is_first = ind == 0
tokens = token_emb(ids)
if exists(pos_emb):
positions = pos_emb(torch.arange(tokens.shape[-2], device = device))
tokens = tokens + positions
tokens_at_stages.insert(0, tokens)
if is_first:
continue
ids = rearrange(ids, '... m n -> ... (m n)')
# the un-pixelshuffled representations of the previous hierarchy, starts with None
prev_stage_tokens_repr = None
# spatial tokens is tokens with depth pos reduced along depth dimension + spatial positions
for stage_start_tokens, stage_tokens, transformer, proj in zip(self.start_tokens, tokens_at_stages, self.transformers, self.to_next_transformer_projections):
stage_tokens, ps = pack_one(stage_tokens, '* n d')
stage_start_tokens = repeat(stage_start_tokens, 'f -> b 1 f', b = stage_tokens.shape[0])
# concat start token
stage_tokens = torch.cat((
stage_start_tokens,
stage_tokens,
), dim = -2)
# sum the previous hierarchy's representation
if exists(prev_stage_tokens_repr):
prev_stage_tokens_repr = F.pad(prev_stage_tokens_repr, (0, 0, 1, 0), value = 0.)
stage_tokens = stage_tokens + prev_stage_tokens_repr
attended = transformer(stage_tokens)
attended = unpack_one(attended, ps, '* n d')
# project for next stage in the hierarchy
prev_stage_tokens_repr = proj(attended[..., :-1, :])
# project to logits
logits = self.to_logits(attended)
start_tokens = logits[(slice(None), *((0,) * (logits.ndim - 2)), slice(None))]
start_tokens = rearrange(start_tokens, 'b d -> b 1 d')
logits = logits[..., 1:, :]
if not return_loss:
if flattened_dims:
logits = rearrange(logits, 'b ... c -> b (...) c')
logits = logits[:, :seq_len]
return logits
logits = rearrange(logits, 'b ... c -> b (...) c')
logits = torch.cat((start_tokens, logits), dim = -2)
preds = rearrange(logits, 'b n c -> b c n')
labels = rearrange(ids, 'b ... -> b (...)')
loss = F.cross_entropy(
preds[..., :-1],
labels,
ignore_index = self.pad_id
)
return loss | TerraByte-master | TerraByte/model/megabyte.py |
from typing import Tuple
import torch
from beartype.typing import Tuple
from einops import rearrange
from einops.layers.torch import Rearrange
from torch import Tensor, nn
class PatchEmbeddings(nn.Module):
def __init__(self, dim_in, dim_out, seq_len):
super().__init__()
self.embedding = nn.Sequential(
Rearrange('... rd -> ... (r d)'),
nn.LayerNorm(seq_len * dim_in),
nn.Linear(seq_len * dim_in, dim_out),
nn.LayerNorm(dim_out),
)
def forward(self, x):
return self.embedding(x)
#Universal modality patch embdders => process all modalities
"""In this implementation, we create a UniversalPatchEmbedder class that takes a tuple of input dimensions,
an output dimension, and a patch size as arguments. The class contains a list of embedders and modality embeddings.
In the forward method, we select the appropriate embedder based on the
modality and apply it to the input. We then add the modality embeddings to the output.
"""
class UniversalPatchEmbedder(nn.Module):
def __init__(self, input_dims: Tuple[int], output_dim: int, patch_size: int):
super().__init__()
self.patch_size = patch_size
self.embedders = nn.ModuleList([nn.Linear(dim, output_dim) for dim in input_dims])
self.modality_embeddings = nn.Embedding(len(input_dims), output_dim)
def forward(self, x: Tensor, modality: Tensor) -> Tensor:
#determine the input shape of x
input_shape = x.shape
print(f'Input shape: {input_shape}')
#reshape x into a common shape (batch_size, input_dim)
x = x.view(input_shape[0], -1)
print(f'x reshaped: {x}')
#select the most optimal embedder for modality
embedder = self.embedders[modality]
print(f"Embedder: {embedder}")
#apply selected embedder
x = rearrange(x, 'b (p1 p2) d -> b p1 p2 d', p1 = self.patch_size)
x = embedder(x)
#modality embeddings
modality_emb = self.modality_embeddings(torch.tensor(modality).to(x.device))
print(f"Modality embedder: {modality_emb}")
x = x + modality_emb
print(f"X shape: {x}")
return x
| TerraByte-master | TerraByte/model/patches.py |
import torch
import triton
import triton.language as tl
@triton.jit
def max_fn(x, y):
return tl.math.max(x, y)
@triton.jit
def _fwd_kernel(
Q, K, V, sm_scale,
L,
Out,
stride_qz, stride_qh, stride_qm, stride_qk,
stride_kz, stride_kh, stride_kn, stride_kk,
stride_vz, stride_vh, stride_vk, stride_vn,
stride_oz, stride_oh, stride_om, stride_on,
Z, H, N_CTX,
BLOCK_M: tl.constexpr, BLOCK_DMODEL: tl.constexpr,
BLOCK_N: tl.constexpr,
IS_CAUSAL: tl.constexpr,
):
start_m = tl.program_id(0)
off_hz = tl.program_id(1)
qvk_offset = off_hz * stride_qh
Q_block_ptr = tl.make_block_ptr(
base=Q + qvk_offset,
shape=(N_CTX, BLOCK_DMODEL),
strides=(stride_qm, stride_qk),
offsets=(start_m * BLOCK_M, 0),
block_shape=(BLOCK_M, BLOCK_DMODEL),
order=(1, 0)
)
K_block_ptr = tl.make_block_ptr(
base=K + qvk_offset,
shape=(BLOCK_DMODEL, N_CTX),
strides=(stride_kk, stride_kn),
offsets=(0, 0),
block_shape=(BLOCK_DMODEL, BLOCK_N),
order=(0, 1)
)
V_block_ptr = tl.make_block_ptr(
base=V + qvk_offset,
shape=(N_CTX, BLOCK_DMODEL),
strides=(stride_vk, stride_vn),
offsets=(0, 0),
block_shape=(BLOCK_N, BLOCK_DMODEL),
order=(1, 0)
)
# initialize offsets
offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M)
offs_n = tl.arange(0, BLOCK_N)
# initialize pointer to m and l
m_i = tl.zeros([BLOCK_M], dtype=tl.float32) - float("inf")
l_i = tl.zeros([BLOCK_M], dtype=tl.float32)
acc = tl.zeros([BLOCK_M, BLOCK_DMODEL], dtype=tl.float32)
# scale sm_scale by log_2(e) and use
# 2^x instead of exp in the loop because CSE and LICM
# don't work as expected with `exp` in the loop
qk_scale = sm_scale * 1.44269504
# load q: it will stay in SRAM throughout
q = tl.load(Q_block_ptr)
q = (q * qk_scale).to(tl.float16)
# loop over k, v and update accumulator
lo = 0
hi = (start_m + 1) * BLOCK_M if IS_CAUSAL else N_CTX
for start_n in range(lo, hi, BLOCK_N):
# -- load k, v --
k = tl.load(K_block_ptr)
v = tl.load(V_block_ptr)
# -- compute qk ---
qk = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32)
if IS_CAUSAL:
qk = tl.where(offs_m[:, None] >= (start_n + offs_n[None, :]), qk, float("-inf"))
qk += tl.dot(q, k)
# -- compute scaling constant ---
m_i_new = tl.maximum(m_i, tl.max(qk, 1))
alpha = tl.math.exp2(m_i - m_i_new)
p = tl.math.exp2(qk - m_i_new[:, None])
# -- scale and update acc --
acc_scale = l_i * 0 + alpha # workaround some compiler bug
acc *= acc_scale[:, None]
acc += tl.dot(p.to(tl.float16), v)
# -- update m_i and l_i --
l_i = l_i * alpha + tl.sum(p, 1)
m_i = m_i_new
# update pointers
K_block_ptr = tl.advance(K_block_ptr, (0, BLOCK_N))
V_block_ptr = tl.advance(V_block_ptr, (BLOCK_N, 0))
# write back l and m
acc = acc / l_i[:, None]
l_ptrs = L + off_hz * N_CTX + offs_m
tl.store(l_ptrs, m_i + tl.math.log2(l_i))
# write back O
O_block_ptr = tl.make_block_ptr(
base=Out + qvk_offset,
shape=(N_CTX, BLOCK_DMODEL),
strides=(stride_om, stride_on),
offsets=(start_m * BLOCK_M, 0),
block_shape=(BLOCK_M, BLOCK_DMODEL),
order=(1, 0)
)
tl.store(O_block_ptr, acc.to(tl.float16))
@triton.jit
def _bwd_preprocess(
Out, DO,
Delta,
BLOCK_M: tl.constexpr, D_HEAD: tl.constexpr,
):
off_m = tl.program_id(0) * BLOCK_M + tl.arange(0, BLOCK_M)
off_n = tl.arange(0, D_HEAD)
# load
o = tl.load(Out + off_m[:, None] * D_HEAD + off_n[None, :]).to(tl.float32)
do = tl.load(DO + off_m[:, None] * D_HEAD + off_n[None, :]).to(tl.float32)
# compute
delta = tl.sum(o * do, axis=1)
# write-back
tl.store(Delta + off_m, delta)
@triton.jit
def _bwd_kernel(
Q, K, V, sm_scale, Out, DO,
DQ, DK, DV,
L,
D,
stride_qz, stride_qh, stride_qm, stride_qk,
stride_kz, stride_kh, stride_kn, stride_kk,
stride_vz, stride_vh, stride_vk, stride_vn,
Z, H, N_CTX,
num_block,
BLOCK_M: tl.constexpr, BLOCK_DMODEL: tl.constexpr,
BLOCK_N: tl.constexpr,
CAUSAL: tl.constexpr,
):
off_hz = tl.program_id(0)
off_z = off_hz // H
off_h = off_hz % H
qk_scale = sm_scale * 1.44269504
# offset pointers for batch/head
Q += off_z * stride_qz + off_h * stride_qh
K += off_z * stride_qz + off_h * stride_qh
V += off_z * stride_qz + off_h * stride_qh
DO += off_z * stride_qz + off_h * stride_qh
DQ += off_z * stride_qz + off_h * stride_qh
DK += off_z * stride_qz + off_h * stride_qh
DV += off_z * stride_qz + off_h * stride_qh
for start_n in range(0, num_block):
if CAUSAL:
lo = start_n * BLOCK_M
else:
lo = 0
# initialize row/col offsets
offs_qm = lo + tl.arange(0, BLOCK_M)
offs_n = start_n * BLOCK_M + tl.arange(0, BLOCK_M)
offs_m = tl.arange(0, BLOCK_N)
offs_k = tl.arange(0, BLOCK_DMODEL)
# initialize pointers to value-like data
q_ptrs = Q + (offs_qm[:, None] * stride_qm + offs_k[None, :] * stride_qk)
k_ptrs = K + (offs_n[:, None] * stride_kn + offs_k[None, :] * stride_kk)
v_ptrs = V + (offs_n[:, None] * stride_qm + offs_k[None, :] * stride_qk)
do_ptrs = DO + (offs_qm[:, None] * stride_qm + offs_k[None, :] * stride_qk)
dq_ptrs = DQ + (offs_qm[:, None] * stride_qm + offs_k[None, :] * stride_qk)
# pointer to row-wise quantities in value-like data
D_ptrs = D + off_hz * N_CTX
l_ptrs = L + off_hz * N_CTX
# initialize dv amd dk
dv = tl.zeros([BLOCK_M, BLOCK_DMODEL], dtype=tl.float32)
dk = tl.zeros([BLOCK_M, BLOCK_DMODEL], dtype=tl.float32)
# k and v stay in SRAM throughout
k = tl.load(k_ptrs)
v = tl.load(v_ptrs)
# loop over rows
for start_m in range(lo, num_block * BLOCK_M, BLOCK_M):
offs_m_curr = start_m + offs_m
# load q, k, v, do on-chip
q = tl.load(q_ptrs)
# recompute p = softmax(qk, dim=-1).T
if CAUSAL:
qk = tl.where(offs_m_curr[:, None] >= (offs_n[None, :]), float(0.), float("-inf"))
else:
qk = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32)
qk += tl.dot(q, tl.trans(k))
qk *= qk_scale
l_i = tl.load(l_ptrs + offs_m_curr)
p = tl.math.exp2(qk - l_i[:, None])
# compute dv
do = tl.load(do_ptrs)
dv += tl.dot(tl.trans(p.to(Q.dtype.element_ty)), do)
# compute dp = dot(v, do)
Di = tl.load(D_ptrs + offs_m_curr)
dp = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32) - Di[:, None]
dp += tl.dot(do, tl.trans(v))
# compute ds = p * (dp - delta[:, None])
ds = p * dp * sm_scale
# compute dk = dot(ds.T, q)
dk += tl.dot(tl.trans(ds.to(Q.dtype.element_ty)), q)
# compute dq
dq = tl.load(dq_ptrs)
dq += tl.dot(ds.to(Q.dtype.element_ty), k)
tl.store(dq_ptrs, dq)
# increment pointers
dq_ptrs += BLOCK_M * stride_qm
q_ptrs += BLOCK_M * stride_qm
do_ptrs += BLOCK_M * stride_qm
# write-back
dv_ptrs = DV + (offs_n[:, None] * stride_qm + offs_k[None, :] * stride_qk)
dk_ptrs = DK + (offs_n[:, None] * stride_kn + offs_k[None, :] * stride_kk)
tl.store(dv_ptrs, dv)
tl.store(dk_ptrs, dk)
empty = torch.empty(128, device="cuda")
class _attention(torch.autograd.Function):
@staticmethod
def forward(ctx, q, k, v, causal, sm_scale):
# shape constraints
Lq, Lk, Lv = q.shape[-1], k.shape[-1], v.shape[-1]
assert Lq == Lk and Lk == Lv
assert Lk in {16, 32, 64, 128}
o = torch.empty_like(q)
BLOCK_M = 128
BLOCK_N = 64
grid = (triton.cdiv(q.shape[2], BLOCK_M), q.shape[0] * q.shape[1], 1)
L = torch.empty((q.shape[0] * q.shape[1], q.shape[2]), device=q.device, dtype=torch.float32)
num_warps = 4 if Lk <= 64 else 8
_fwd_kernel[grid](
q, k, v, sm_scale,
L,
o,
q.stride(0), q.stride(1), q.stride(2), q.stride(3),
k.stride(0), k.stride(1), k.stride(2), k.stride(3),
v.stride(0), v.stride(1), v.stride(2), v.stride(3),
o.stride(0), o.stride(1), o.stride(2), o.stride(3),
q.shape[0], q.shape[1], q.shape[2],
BLOCK_M=BLOCK_M, BLOCK_N=BLOCK_N, BLOCK_DMODEL=Lk,
IS_CAUSAL=causal,
num_warps=num_warps,
num_stages=4)
ctx.save_for_backward(q, k, v, o, L)
ctx.grid = grid
ctx.sm_scale = sm_scale
ctx.BLOCK_DMODEL = Lk
ctx.causal = causal
return o
@staticmethod
def backward(ctx, do):
BLOCK = 128
q, k, v, o, L = ctx.saved_tensors
do = do.contiguous()
dq = torch.zeros_like(q, dtype=torch.float32)
dk = torch.empty_like(k)
dv = torch.empty_like(v)
delta = torch.empty_like(L)
_bwd_preprocess[(ctx.grid[0] * ctx.grid[1], )](
o, do,
delta,
BLOCK_M=BLOCK, D_HEAD=ctx.BLOCK_DMODEL,
)
_bwd_kernel[(ctx.grid[1],)](
q, k, v, ctx.sm_scale,
o, do,
dq, dk, dv,
L, delta,
q.stride(0), q.stride(1), q.stride(2), q.stride(3),
k.stride(0), k.stride(1), k.stride(2), k.stride(3),
v.stride(0), v.stride(1), v.stride(2), v.stride(3),
q.shape[0], q.shape[1], q.shape[2],
ctx.grid[0],
BLOCK_M=BLOCK, BLOCK_N=BLOCK,
BLOCK_DMODEL=ctx.BLOCK_DMODEL, num_warps=8,
CAUSAL=ctx.causal,
num_stages=1,
)
return dq, dk, dv, None, None
attention = _attention.apply | TerraByte-master | TerraByte/model/attention_triton.py |
import torch.nn as nn
from TerraByte.model.helpers import RotaryEmbedding, FeedForward, RMSNorm, token_shift, exists
from TerraByte.model.attention import Attention
class Transformer(nn.Module):
def __init__(
self,
*,
dim,
layers,
dim_head = 64,
heads = 8,
attn_dropout = 0.,
ff_dropout = 0.,
ff_mult = 4,
rel_pos = True,
flash_attn = False
):
super().__init__()
self.rotary_emb = RotaryEmbedding(dim_head) if rel_pos else None
self.layers = nn.ModuleList([])
for _ in range(layers):
self.layers.append(nn.ModuleList([
Attention(dim = dim, dim_head = dim_head, heads = heads, dropout = attn_dropout, flash = flash_attn),
FeedForward(dim = dim, mult = ff_mult, dropout = ff_dropout)
]))
self.norm = RMSNorm(dim)
def forward(self, x):
n = x.shape[-2]
rotary_emb = self.rotary_emb(n) if exists(self.rotary_emb) else None
for attn, ff in self.layers:
x = attn(token_shift(x), rotary_emb = rotary_emb) + x
x = ff(token_shift(x)) + x
return self.norm(x)
| TerraByte-master | TerraByte/model/transformer.py |
from itertools import zip_longest
from typing import Tuple, Union
import torch
import torch.nn.functional as F
from beartype import beartype
from beartype.typing import Tuple, Union
from einops import rearrange, repeat
from einops.layers.torch import Rearrange
from torch import nn
from tqdm import tqdm
from TerraByte.model.helpers import (
cast_tuple,
exists,
gumbel_sample,
pack_one,
reduce_mult,
remainder_to_mult,
top_k,
unpack_one,
)
from TerraByte.model.patches import UniversalPatchEmbedder
from TerraByte.model.transformer import Transformer
# main class
class OmniMEGABYTE(nn.Module):
@beartype
def __init__(
self,
*,
num_tokens,
dim: Union[Tuple, int],
depth: Tuple,
max_seq_len: Tuple,
dim_head = 64,
heads = 8,
attn_dropout = 0.,
ff_mult = 4,
ff_dropout = 0.,
pad_id = 0,
rel_pos_bias = True,
flash_attn = False
):
super().__init__()
# simplified configuration for each stage of the hierarchy
# depth = (2, 2, 4) would translate to depth 2 at first stage, depth 2 second stage, depth 4 third
# max_seq_len = (16, 8, 4) would translate to max sequence length of 16 at first stage, length of 8 at second stage, length of 4 for last
assert isinstance(depth, tuple) and isinstance(max_seq_len, tuple)
assert len(depth) == len(max_seq_len)
self.stages = len(depth)
dim = cast_tuple(dim, self.stages)
assert len(dim) == self.stages
coarsest_dim, *_, fine_dim = dim
self.token_emb = nn.Embedding(num_tokens, fine_dim)
self.max_seq_len = max_seq_len
self.start_tokens = nn.ParameterList([nn.Parameter(torch.randn(h_dim)) for h_dim, seq_len in zip(dim, max_seq_len)])
self.pos_embs = nn.ModuleList([nn.Embedding(seq_len, h_dim) for h_dim, seq_len in zip(dim, max_seq_len)])
# self.patch_embedders = nn.ModuleList([nn.Sequential(
# Rearrange('... r d -> ... (r d)'),
# nn.LayerNorm(seq_len * dim_in),
# nn.Linear(seq_len * dim_in, dim_out),
# nn.LayerNorm(dim_out)
# ) for dim_in, dim_out, seq_len in zip(dim[1:], dim[:-1], max_seq_len[1:])])
#v2
input_dims = (dim[1], dim[0], max_seq_len[1])
self.patch_embedders = UniversalPatchEmbedder(input_dims, dim[0], max_seq_len[1])
#------->
self.transformers = nn.ModuleList([])
self.to_next_transformer_projections = nn.ModuleList([])
for h_dim, next_h_dim, stage_depth, next_seq_len in zip_longest(dim, dim[1:], depth, max_seq_len[1:]):
self.transformers.append(Transformer(
dim = h_dim,
layers = stage_depth,
dim_head = dim_head,
heads = heads,
attn_dropout = attn_dropout,
ff_dropout = ff_dropout,
ff_mult = ff_mult,
rel_pos_bias = rel_pos_bias,
flash_attn = flash_attn
))
proj = nn.Identity()
if exists(next_h_dim) and next_h_dim != dim:
proj = nn.Sequential(
nn.Linear(h_dim, next_h_dim * next_seq_len),
Rearrange('... (n d) -> (...) n d', n = next_seq_len)
)
self.to_next_transformer_projections.append(proj)
self.to_logits = nn.Linear(fine_dim, num_tokens)
self.pad_id = pad_id
def generate(self, prime = None, filter_thres = 0.9, temperature = 1., default_batch_size = 1):
total_seq_len = reduce_mult(self.max_seq_len)
device = next(self.parameters()).device
if not exists(prime):
prime = torch.empty((default_batch_size, 0), dtype = torch.long, device = device)
seq = prime
batch = seq.shape[0]
for _ in tqdm(range(total_seq_len - seq.shape[-1])):
logits = self.forward(seq)[:, -1]
logits = top_k(logits, thres = filter_thres)
sampled = gumbel_sample(logits, dim = -1, temperature = temperature)
seq = torch.cat((seq, rearrange(sampled, 'b -> b 1')), dim = -1)
return seq.reshape(batch, *self.max_seq_len)
def forward_empty(self, batch_size):
# take care of special case
# where you sample from input of 0 (start token only)
prev_stage_tokens_repr = None
for stage_start_tokens, transformer, proj in zip(self.start_tokens, self.transformers, self.to_next_transformer_projections):
tokens = repeat(stage_start_tokens, 'd -> b 1 d', b = batch_size)
if exists(prev_stage_tokens_repr):
tokens = tokens + prev_stage_tokens_repr[..., :tokens.shape[-2], :]
tokens = transformer(tokens)
prev_stage_tokens_repr = proj(tokens)
return self.to_logits(tokens)
def forward(self, ids, modality, return_loss = False):
batch = ids.shape[0]
print(f'ids shape: {ids.shape[0]}')
assert ids.ndim in {2, self.stages + 1}
print(f"self stages: {self.stages}")
flattened_dims = ids.ndim == 2
if ids.numel() == 0:
return self.forward_empty(ids.shape[0])
if flattened_dims:
# allow for ids to be given in the shape of (batch, seq)
# in which case it will be auto-padded to the next nearest multiple of depth seq len
seq_len = ids.shape[-1]
multiple_of = reduce_mult(self.max_seq_len[1:])
padding = remainder_to_mult(seq_len, multiple_of)
ids = F.pad(ids, (0, padding), value = self.pad_id)
ids = ids.reshape(batch, -1, *self.max_seq_len[1:])
b, *prec_dims, device = *ids.shape, ids.device
# check some dimensions
assert prec_dims[0] <= self.max_seq_len[0], 'the first dimension of your axial autoregressive transformer must be less than the first tuple element of max_seq_len (like any autoregressive transformer)'
assert tuple(prec_dims[1:]) == tuple(self.max_seq_len[1:]), 'all subsequent dimensions must match exactly'
# get token embeddings
tokens = self.token_emb(ids)
# get tokens for all hierarchical stages, reducing by appropriate dimensions
# and adding the absolute positional embeddings
tokens_at_stages = []
reduced_tokens = tokens
patch_embedders_list = [self.patch_embedders]
for ind, pos_emb, patch_emb in zip(range(len(prec_dims)), reversed(self.pos_embs), reversed(patch_embedders_list)):
is_first = ind == 0
if not is_first:
reduced_tokens = patch_emb(reduced_tokens, modality)
positions = pos_emb(torch.arange(reduced_tokens.shape[-2], device=device))
tokens_with_position = reduced_tokens + positions
tokens_at_stages.insert(0, tokens_with_position)
# the un-pixelshuffled representations of the previous hierarchy, starts with None
prev_stage_tokens_repr = None
# spatial tokens is tokens with depth pos reduced along depth dimension + spatial positions
for stage_start_tokens, stage_tokens, transformer, proj in zip(self.start_tokens, tokens_at_stages, self.transformers, self.to_next_transformer_projections):
stage_tokens, ps = pack_one(stage_tokens, '* n d')
stage_start_tokens = repeat(stage_start_tokens, 'f -> b 1 f', b=stage_tokens.shape[0])
#update the dimensions of the stage_start_tokens tensor
stage_start_tokens = stage_start_tokens[..., :stage_tokens.shape[-1]]
# Print the shapes of the tensors before concatenating
print(f"stage_start_tokens shape: {stage_start_tokens.shape}")
print(f"stage_tokens shape: {stage_tokens.shape}")
# concat start token
stage_tokens = torch.cat((
stage_start_tokens,
stage_tokens,
), dim=-2)
# sum the previous hierarchy's representation
if exists(prev_stage_tokens_repr):
prev_stage_tokens_repr = F.pad(prev_stage_tokens_repr, (0, 0, 1, 0), value=0.)
stage_tokens = stage_tokens + prev_stage_tokens_repr
attended = transformer(stage_tokens)
attended = unpack_one(attended, ps, '* n d')
# project for next stage in the hierarchy
prev_stage_tokens_repr = proj(attended[..., :-1, :])
# project to logits
logits = self.to_logits(attended)
start_tokens = logits[(slice(None), *((0,) * (logits.ndim - 2)), slice(None))]
start_tokens = rearrange(start_tokens, 'b d -> b 1 d')
logits = logits[..., 1:, :]
if not return_loss:
if flattened_dims:
logits = rearrange(logits, 'b ... c -> b (...) c')
logits = logits[:, :seq_len]
return logits
logits = rearrange(logits, 'b ... c -> b (...) c')
logits = torch.cat((start_tokens, logits), dim = -2)
preds = rearrange(logits, 'b n c -> b c n')
labels = rearrange(ids, 'b ... -> b (...)')
loss = F.cross_entropy(
preds[..., :-1],
labels,
ignore_index = self.pad_id
)
return loss
| TerraByte-master | TerraByte/model/omnibyte.py |
from itertools import zip_longest
from typing import Tuple, Union
import torch
import torch.nn as nn
import torch.nn.functional as F
from beartype import beartype
from beartype.typing import Tuple, Union
from einops import rearrange, repeat
from einops.layers.torch import Rearrange
from torch import nn
from tqdm import tqdm
from TerraByte.model.attention import Attention
from TerraByte.model.flash_triton import flash_attn_kvpacked_func, flash_attn_func
from TerraByte.model.helpers import (
FeedForward,
default,
RMSNorm,
RotaryEmbedding,
apply_rotary_pos_emb,
cast_tuple,
exists,
gumbel_sample,
pack_one,
reduce_mult,
remainder_to_mult,
token_shift,
top_k,
unpack_one,
)
from TerraByte.model.transformer import Transformer
# class Attention(nn.Module):
# def __init__(
# self,
# *,
# dim,
# dim_head = 64,
# heads = 8,
# dropout = 0.,
# flash = False
# ):
# super().__init__()
# self.scale = dim_head ** -0.5
# self.heads = heads
# inner_dim = dim_head * heads
# self.attend = attention # Replace Attend with _attention
# self.dropout = nn.Dropout(dropout)
# self.norm = RMSNorm(dim)
# self.to_q = nn.Linear(dim, inner_dim, bias = False)
# self.to_kv = nn.Linear(dim, dim_head * 2, bias = False)
# self.to_out = nn.Linear(inner_dim, dim, bias = False)
# def forward(self, x, rotary_emb = None):
# h, device = self.heads, x.device
# x = self.norm(x)
# q, k, v = (self.to_q(x), *self.to_kv(x).chunk(2, dim = -1))
# q = rearrange(q, 'b n (h d) -> b h n d', h = h)
# if exists(rotary_emb):
# q, k = map(lambda t: apply_rotary_pos_emb(rotary_emb, t), (q, k))
# #ENSURE Q, K, V have atleast 4 dimensions
# q = q.unsqueeze(0) if q.dim() < 4 else q
# k = k.unsqueeze(0) if k.dim() < 4 else k
# v = v.unsqueeze(0)if v.dim() < 4 else v
# q.type(torch.float32)
# k.type(torch.float32)
# v.type(torch.float32)
# # Ensure k has the correct shape
# print(f'k shape: {k.shape}')
# k = k.view(1, self.heads, -1, q.shape[-1])
# out = self.attend(q, k, v, True, self.scale) # Add causal and sm_scale parameters
# out = rearrange(out, 'b h n d -> b n (h d)')
# return self.to_out(out)
class Attention(nn.Module):
def __init__(self, dim, heads=8, dim_head=64, dropout=0.0):
super().__init__()
self.dim = dim
self.heads = heads
self.dim_head = dim_head
self.scale = dim_head ** -0.5
inner_dim = dim_head * heads
self.to_q = nn.Linear(dim, inner_dim, bias=False)
self.to_kv = nn.Linear(dim, inner_dim * 2, bias=False)
self.to_out = nn.Linear(inner_dim, dim)
self.dropout = nn.Dropout(dropout)
def forward(self, x, rotary_emb=None, kv=None):
b, n, _, h, dh = *x.shape, self.heads, self.dim_head
assert kv is None or kv.shape == x.shape, 'input and key-value pair must have the same shape'
q = self.to_q(x)
kv = default(kv, x)
k, v = self.to_kv(kv).chunk(2, dim=-1)
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h=h), (q, k, v))
if exists(rotary_emb):
q, k = map(lambda t: apply_rotary_pos_emb(rotary_emb, t), (q, k))
q = q * self.scale
dots = torch.einsum('bhid,bhjd->bhij', q, k) # assuming q, k for each token in the sequence is independent
attn = dots.softmax(dim=-1)
attn = self.dropout(attn)
if kv is not None:
# Convert q, k, and v to float16 before passing them to the flash_attn_kvpacked_func
q = q.half()
k = k.half()
v = v.half()
out = flash_attn_kvpacked_func(q, torch.stack([k, v], dim=2), attn)
else:
# Convert q, k, and v to float16 before passing them to the flash_attn_func
q = q.half()
k = k.half()
v = v.half()
out = flash_attn_func(q, k, v, attn)
out = rearrange(out, 'b h n d -> b n (h d)')
return self.to_out(out)
class Transformer(nn.Module):
def __init__(
self,
*,
dim,
layers,
dim_head = 64,
heads = 8,
attn_dropout = 0.,
ff_dropout = 0.,
ff_mult = 4,
rel_pos = True,
flash_attn = False
):
super().__init__()
self.rotary_emb = RotaryEmbedding(dim_head) if rel_pos else None
self.layers = nn.ModuleList([])
for _ in range(layers):
self.layers.append(nn.ModuleList([
Attention(dim = dim, dim_head = dim_head, heads = heads, dropout = attn_dropout), #flash = flash_attn),
FeedForward(dim = dim, mult = ff_mult, dropout = ff_dropout)
]))
self.norm = RMSNorm(dim)
def forward(self, x):
n = x.shape[-2]
rotary_emb = self.rotary_emb(n) if exists(self.rotary_emb) else None
for attn, ff in self.layers:
x = attn(token_shift(x), rotary_emb = rotary_emb) + x
x = ff(token_shift(x)) + x
return self.norm(x)
class TerraByteTriton(nn.Module):
@beartype
def __init__(
self,
*,
num_tokens,
dim: Union[Tuple, int],
depth: Tuple,
max_seq_len: Tuple,
dim_head = 64,
heads = 8,
attn_dropout = 0.,
ff_mult = 4,
ff_dropout = 0.,
pad_id = 0,
pos_emb=False,
rel_pos_bias = True,
dilation_rate = None,
segment_size = None,
use_xpos = False,
use_rel_pos_bias = False,
flash_attn = False
):
super().__init__()
# simplified configuration for each stage of the hierarchy
# depth = (2, 2, 4) would translate to depth 2 at first stage, depth 2 second stage, depth 4 third
# max_seq_len = (16, 8, 4) would translate to max sequence length of 16 at first stage, length of 8 at second stage, length of 4 for last
assert isinstance(depth, tuple) and isinstance(max_seq_len, tuple)
assert len(depth) == len(max_seq_len)
self.stages = len(depth)
dim = cast_tuple(dim, self.stages)
assert len(dim) == self.stages
coarsest_dim, *_, fine_dim = dim
self.max_seq_len = max_seq_len
self.start_tokens = nn.ParameterList([nn.Parameter(torch.randn(h_dim)) for h_dim, seq_len in zip(dim, max_seq_len)]) # noqa: E501
self.pos_embs = nn.ModuleList([nn.Embedding(seq_len, h_dim) for h_dim, seq_len in zip(dim, max_seq_len)]) if pos_emb else None
self.token_embs = nn.ModuleList([])
patch_size = 1
self.token_embs.append(nn.Embedding(num_tokens, fine_dim))
for dim_out, seq_len in zip(reversed(dim[:-1]), reversed(max_seq_len[1:])):
patch_size *= seq_len
self.token_embs.append(nn.Sequential(
nn.Embedding(num_tokens, fine_dim),
Rearrange('... r d -> ... (r d)'),
nn.LayerNorm(patch_size * fine_dim),
nn.Linear(patch_size * fine_dim, dim_out),
nn.LayerNorm(dim_out)
))
self.transformers = nn.ModuleList([])
self.to_next_transformer_projections = nn.ModuleList([])
for h_dim, next_h_dim, stage_depth, next_seq_len in zip_longest(dim, dim[1:], depth, max_seq_len[1:]):
self.transformers.append(Transformer(
dim = h_dim,
layers = stage_depth,
dim_head = dim_head,
heads = heads,
attn_dropout = attn_dropout,
ff_dropout = ff_dropout,
ff_mult = ff_mult,
# rel_pos_bias = rel_pos_bias,
flash_attn = flash_attn
))
proj = nn.Identity()
if exists(next_h_dim) and next_h_dim != dim:
proj = nn.Sequential(
Rearrange('b ... d -> b (...) d'),
nn.Linear(h_dim, next_h_dim * next_seq_len),
Rearrange('b m (n d) -> (b m) n d', n = next_seq_len)
)
self.to_next_transformer_projections.append(proj)
self.to_logits = nn.Linear(fine_dim, num_tokens)
self.pad_id = pad_id
def generate(self, prime = None, filter_thres = 0.9, temperature = 1., default_batch_size = 1):
total_seq_len = reduce_mult(self.max_seq_len)
device = next(self.parameters()).device
if not exists(prime):
prime = torch.empty((default_batch_size, 0), dtype = torch.long, device = device)
seq = prime
batch = seq.shape[0]
for _ in tqdm(range(total_seq_len - seq.shape[-1])):
logits = self.forward(seq)[:, -1]
logits = top_k(logits, thres = filter_thres)
sampled = gumbel_sample(logits, dim = -1, temperature = temperature)
seq = torch.cat((seq, rearrange(sampled, 'b -> b 1')), dim = -1)
return seq.reshape(batch, *self.max_seq_len)
def forward_empty(self, batch_size):
# take care of special case
# where you sample from input of 0 (start token only)
prev_stage_tokens_repr = None
for stage_start_tokens, transformer, proj in zip(self.start_tokens, self.transformers, self.to_next_transformer_projections):
tokens = repeat(stage_start_tokens, 'd -> b 1 d', b = batch_size)
if exists(prev_stage_tokens_repr):
tokens = tokens + prev_stage_tokens_repr[..., :tokens.shape[-2], :]
tokens = transformer(tokens)
prev_stage_tokens_repr = proj(tokens)
return self.to_logits(tokens)
def forward(self, ids, return_loss = False):
batch = ids.shape[0]
assert ids.ndim in {2, self.stages + 1}
flattened_dims = ids.ndim == 2
if ids.numel() == 0:
return self.forward_empty(ids.shape[0])
if flattened_dims:
# allow for ids to be given in the shape of (batch, seq)
# in which case it will be auto-padded to the next nearest multiple of depth seq len
seq_len = ids.shape[-1]
multiple_of = reduce_mult(self.max_seq_len[1:])
padding = remainder_to_mult(seq_len, multiple_of)
ids = F.pad(ids, (0, padding), value = self.pad_id)
ids = ids.reshape(batch, -1, *self.max_seq_len[1:])
b, *prec_dims, device = *ids.shape, ids.device
# check some dimensions
assert prec_dims[0] <= self.max_seq_len[0], 'the first dimension of your axial autoregressive transformer must be less than the first tuple element of max_seq_len (like any autoregressive transformer)'
assert tuple(prec_dims[1:]) == tuple(self.max_seq_len[1:]), 'all subsequent dimensions must match exactly'
# get tokens for all hierarchical stages, reducing by appropriate dimensions
# and adding the absolute positional embeddings
tokens_at_stages = []
pos_embs = default(self.pos_embs, (None,))
for ind, pos_emb, token_emb in zip_longest(range(len(prec_dims)), pos_embs, self.token_embs):
is_first = ind == 0
tokens = token_emb(ids)
if exists(pos_emb):
positions = pos_emb(torch.arange(tokens.shape[-2], device = device))
tokens = tokens + positions
tokens_at_stages.insert(0, tokens)
if is_first:
continue
ids = rearrange(ids, '... m n -> ... (m n)')
# the un-pixelshuffled representations of the previous hierarchy, starts with None
prev_stage_tokens_repr = None
# spatial tokens is tokens with depth pos reduced along depth dimension + spatial positions
for stage_start_tokens, stage_tokens, transformer, proj in zip(self.start_tokens, tokens_at_stages, self.transformers, self.to_next_transformer_projections):
stage_tokens, ps = pack_one(stage_tokens, '* n d')
stage_start_tokens = repeat(stage_start_tokens, 'f -> b 1 f', b = stage_tokens.shape[0])
# concat start token
stage_tokens = torch.cat((
stage_start_tokens,
stage_tokens,
), dim = -2)
# sum the previous hierarchy's representation
if exists(prev_stage_tokens_repr):
prev_stage_tokens_repr = F.pad(prev_stage_tokens_repr, (0, 0, 1, 0), value = 0.)
stage_tokens = stage_tokens + prev_stage_tokens_repr
attended = transformer(stage_tokens)
attended = unpack_one(attended, ps, '* n d')
# project for next stage in the hierarchy
prev_stage_tokens_repr = proj(attended[..., :-1, :])
# project to logits
logits = self.to_logits(attended)
start_tokens = logits[(slice(None), *((0,) * (logits.ndim - 2)), slice(None))]
start_tokens = rearrange(start_tokens, 'b d -> b 1 d')
logits = logits[..., 1:, :]
if not return_loss:
if flattened_dims:
logits = rearrange(logits, 'b ... c -> b (...) c')
logits = logits[:, :seq_len]
return logits
logits = rearrange(logits, 'b ... c -> b (...) c')
logits = torch.cat((start_tokens, logits), dim = -2)
preds = rearrange(logits, 'b n c -> b c n')
labels = rearrange(ids, 'b ... -> b (...)')
loss = F.cross_entropy(
preds[..., :-1],
labels,
ignore_index = self.pad_id
)
return loss
| TerraByte-master | TerraByte/model/terrabyte_triton.py |
import math
import torch
import triton
import triton.language as tl
# Disabling autotune for now, set num_warps=4 if headdim=64 and num_warps=8 if headdim=128
# @triton.autotune(
# configs=[
# triton.Config({"BLOCK_M": 128, "BLOCK_N": 128}, num_warps=4, num_stages=1),
# # This config has a race condition when EVEN_M == False, disabling it for now.
# # triton.Config({"BLOCK_M": 64, "BLOCK_N": 64}, num_warps=4, num_stages=1),
# ],
# key=['CACHE_KEY_SEQLEN_Q', 'CACHE_KEY_SEQLEN_K', 'BIAS_TYPE', 'IS_CAUSAL', 'BLOCK_HEADDIM']
# )
@triton.heuristics(
{
"EVEN_M": lambda args: args["seqlen_q"] % args["BLOCK_M"] == 0,
"EVEN_N": lambda args: args["seqlen_k"] % args["BLOCK_N"] == 0,
"EVEN_HEADDIM": lambda args: args["headdim"] == args["BLOCK_HEADDIM"],
}
)
@triton.jit
def _fwd_kernel(
Q, K, V, Bias, Out,
Lse, TMP, # NOTE: TMP is a scratchpad buffer to workaround a compiler bug
softmax_scale,
stride_qb, stride_qh, stride_qm,
stride_kb, stride_kh, stride_kn,
stride_vb, stride_vh, stride_vn,
stride_bb, stride_bh, stride_bm,
stride_ob, stride_oh, stride_om,
nheads, seqlen_q, seqlen_k, seqlen_q_rounded, headdim,
CACHE_KEY_SEQLEN_Q, CACHE_KEY_SEQLEN_K,
BIAS_TYPE: tl.constexpr,
IS_CAUSAL: tl.constexpr,
BLOCK_HEADDIM: tl.constexpr,
EVEN_M: tl.constexpr, EVEN_N: tl.constexpr, EVEN_HEADDIM: tl.constexpr,
BLOCK_M: tl.constexpr, BLOCK_N: tl.constexpr,
):
start_m = tl.program_id(0)
off_hb = tl.program_id(1)
off_b = off_hb // nheads
off_h = off_hb % nheads
# off_b = tl.program_id(1)
# off_h = tl.program_id(2)
# off_hb = off_b * nheads + off_h
# initialize offsets
offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M)
offs_n = tl.arange(0, BLOCK_N)
offs_d = tl.arange(0, BLOCK_HEADDIM)
# Initialize pointers to Q, K, V
# Adding parenthesis around indexing might use int32 math instead of int64 math?
# https://github.com/openai/triton/issues/741
# I'm seeing a tiny bit of difference (5-7us)
q_ptrs = Q + off_b * stride_qb + off_h * stride_qh + (offs_m[:, None] * stride_qm + offs_d[None, :])
k_ptrs = K + off_b * stride_kb + off_h * stride_kh + (offs_n[:, None] * stride_kn + offs_d[None, :])
v_ptrs = V + off_b * stride_vb + off_h * stride_vh + (offs_n[:, None] * stride_vn + offs_d[None, :])
if BIAS_TYPE == 'vector':
b_ptrs = Bias + off_b * stride_bb + off_h * stride_bh + offs_n
elif BIAS_TYPE == 'matrix':
b_ptrs = Bias + off_b * stride_bb + off_h * stride_bh + (offs_m[:, None] * stride_bm + offs_n[None, :])
# initialize pointer to m and l
t_ptrs = TMP + off_hb * seqlen_q_rounded + offs_m
lse_i = tl.zeros([BLOCK_M], dtype=tl.float32) - float("inf")
m_i = tl.zeros([BLOCK_M], dtype=tl.float32) - float("inf")
acc_o = tl.zeros([BLOCK_M, BLOCK_HEADDIM], dtype=tl.float32)
# load q: it will stay in SRAM throughout
# [2022-10-30] TD: Triton bug - in the case of EVEN_M=True and EVEN_N=False, if we just call
# tl.load(q_ptrs), we get the wrong output!
if EVEN_M & EVEN_N:
if EVEN_HEADDIM:
q = tl.load(q_ptrs)
else:
q = tl.load(q_ptrs, mask=offs_d[None, :] < headdim, other=0.0)
else:
if EVEN_HEADDIM:
q = tl.load(q_ptrs, mask=offs_m[:, None] < seqlen_q, other=0.0)
else:
q = tl.load(q_ptrs, mask=(offs_m[:, None] < seqlen_q) & (offs_d[None, :] < headdim),
other=0.0)
# loop over k, v and update accumulator
end_n = seqlen_k if not IS_CAUSAL else tl.minimum((start_m + 1) * BLOCK_M, seqlen_k)
for start_n in range(0, end_n, BLOCK_N):
start_n = tl.multiple_of(start_n, BLOCK_N)
# -- compute qk ----
if EVEN_N & EVEN_M: # If we just do "if EVEN_N", there seems to be some race condition
if EVEN_HEADDIM:
k = tl.load(k_ptrs + start_n * stride_kn)
else:
k = tl.load(k_ptrs + start_n * stride_kn, mask=offs_d[None, :] < headdim, other=0.0)
else:
if EVEN_HEADDIM:
k = tl.load(k_ptrs + start_n * stride_kn, mask=(start_n + offs_n)[:, None] < seqlen_k,
other=0.0)
else:
k = tl.load(k_ptrs + start_n * stride_kn,
mask=((start_n + offs_n)[:, None] < seqlen_k) & (offs_d[None, :] < headdim),
other=0.0)
qk = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32)
qk += tl.dot(q, k, trans_b=True)
# Trying to combine the two masks seem to make the result wrong
if not EVEN_N: # Need to mask out otherwise the softmax is wrong
qk += tl.where((start_n + offs_n)[None, :] < seqlen_k, 0, float("-inf"))
if IS_CAUSAL:
qk += tl.where(offs_m[:, None] >= (start_n + offs_n)[None, :], 0, float("-inf"))
if BIAS_TYPE != 'none':
if BIAS_TYPE == 'vector':
if EVEN_N:
bias = tl.load(b_ptrs + start_n).to(tl.float32)
else:
bias = tl.load(b_ptrs + start_n, mask=(start_n + offs_n) < seqlen_k, other=0.0).to(tl.float32)
bias = bias[None, :]
elif BIAS_TYPE == 'matrix':
if EVEN_M & EVEN_N:
bias = tl.load(b_ptrs + start_n).to(tl.float32)
else:
bias = tl.load(b_ptrs + start_n,
mask=(offs_m[:, None] < seqlen_q)
& ((start_n + offs_n)[None, :] < seqlen_k),
other=0.0).to(tl.float32)
# Slightly faster to multiply the softmax_scale in the tl.exp below since the compiler
# can then fuse the mult and add into an fma instruction. But if we have bias we need to
# to multiply with softmax_scale here.
qk = qk * softmax_scale + bias
m_ij = tl.maximum(tl.max(qk, 1), lse_i)
p = tl.exp(qk - m_ij[:, None])
else:
m_ij = tl.maximum(tl.max(qk, 1) * softmax_scale, lse_i)
p = tl.exp(qk * softmax_scale - m_ij[:, None])
l_ij = tl.sum(p, 1)
# scale acc_o
acc_o_scale = tl.exp(m_i - m_ij)
# # -- update output accumulator --
# BUG: have to store and immediately load
tl.store(t_ptrs, acc_o_scale)
acc_o_scale = tl.load(t_ptrs)
acc_o = acc_o * acc_o_scale[:, None]
# update acc_o
if EVEN_N & EVEN_M: # If we just do "if EVEN_N", there seems to be some race condition
if EVEN_HEADDIM:
v = tl.load(v_ptrs + start_n * stride_vn)
else:
v = tl.load(v_ptrs + start_n * stride_vn, mask=offs_d[None, :] < headdim, other=0.0)
else:
if EVEN_HEADDIM:
v = tl.load(v_ptrs + start_n * stride_vn, mask=(start_n + offs_n)[:, None] < seqlen_k,
other=0.0)
else:
v = tl.load(v_ptrs + start_n * stride_vn,
mask=((start_n + offs_n)[:, None] < seqlen_k) & (offs_d[None, :] < headdim),
other=0.0)
p = p.to(v.dtype)
acc_o += tl.dot(p, v)
# -- update statistics
m_i = m_ij
l_i_new = tl.exp(lse_i - m_ij) + l_ij
lse_i = m_ij + tl.log(l_i_new)
o_scale = tl.exp(m_i - lse_i)
# BUG: have to store and immediately load
tl.store(t_ptrs, o_scale)
o_scale = tl.load(t_ptrs)
acc_o = acc_o * o_scale[:, None]
# rematerialize offsets to save registers
start_m = tl.program_id(0)
offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M)
# write back l and m
lse_ptrs = Lse + off_hb * seqlen_q_rounded + offs_m
tl.store(lse_ptrs, lse_i)
# initialize pointers to output
offs_d = tl.arange(0, BLOCK_HEADDIM)
out_ptrs = Out + off_b * stride_ob + off_h * stride_oh + (offs_m[:, None] * stride_om + offs_d[None, :])
if EVEN_M:
if EVEN_HEADDIM:
tl.store(out_ptrs, acc_o)
else:
tl.store(out_ptrs, acc_o, mask=offs_d[None, :] < headdim)
else:
if EVEN_HEADDIM:
tl.store(out_ptrs, acc_o, mask=offs_m[:, None] < seqlen_q)
else:
tl.store(out_ptrs, acc_o,
mask=(offs_m[:, None] < seqlen_q) & (offs_d[None, :] < headdim))
@triton.jit
def _bwd_preprocess_do_o_dot(
Out, DO, Delta,
stride_ob, stride_oh, stride_om,
stride_dob, stride_doh, stride_dom,
nheads, seqlen_q, seqlen_q_rounded, headdim,
BLOCK_M: tl.constexpr, BLOCK_HEADDIM: tl.constexpr,
):
start_m = tl.program_id(0)
off_hb = tl.program_id(1)
off_b = off_hb // nheads
off_h = off_hb % nheads
# initialize offsets
offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M)
offs_d = tl.arange(0, BLOCK_HEADDIM)
# load
o = tl.load(Out + off_b * stride_ob + off_h * stride_oh + offs_m[:, None] * stride_om + offs_d[None, :],
mask=(offs_m[:, None] < seqlen_q) & (offs_d[None, :] < headdim), other=0.0).to(tl.float32)
do = tl.load(DO + off_b * stride_dob + off_h * stride_doh + offs_m[:, None] * stride_dom + offs_d[None, :],
mask=(offs_m[:, None] < seqlen_q) & (offs_d[None, :] < headdim), other=0.0).to(tl.float32)
delta = tl.sum(o * do, axis=1)
# write-back
tl.store(Delta + off_hb * seqlen_q_rounded + offs_m, delta)
@triton.jit
def _bwd_store_dk_dv(
dk_ptrs, dv_ptrs, dk, dv, offs_n, offs_d, seqlen_k, headdim,
EVEN_M: tl.constexpr, EVEN_N: tl.constexpr, EVEN_HEADDIM: tl.constexpr,
):
# [2022-11-01] TD: Same bug. In the case of EVEN_N=True and EVEN_M=False,
# if we just call tl.store(dv_ptrs), there's a race condition
if EVEN_N & EVEN_M:
if EVEN_HEADDIM:
tl.store(dv_ptrs, dv)
tl.store(dk_ptrs, dk)
else:
tl.store(dv_ptrs, dv, mask=offs_d[None, :] < headdim)
tl.store(dk_ptrs, dk, mask=offs_d[None, :] < headdim)
else:
if EVEN_HEADDIM:
tl.store(dv_ptrs, dv, mask=offs_n[:, None] < seqlen_k)
tl.store(dk_ptrs, dk, mask=offs_n[:, None] < seqlen_k)
else:
tl.store(dv_ptrs, dv, mask=(offs_n[:, None] < seqlen_k) & (offs_d[None, :] < headdim))
tl.store(dk_ptrs, dk, mask=(offs_n[:, None] < seqlen_k) & (offs_d[None, :] < headdim))
@triton.jit
def _bwd_kernel_one_col_block(
start_n,
Q, K, V, Bias,
DO, DQ, DK, DV,
LSE, D,
softmax_scale,
stride_qm, stride_kn, stride_vn, stride_bm,
stride_dom, stride_dqm, stride_dkn, stride_dvn,
seqlen_q, seqlen_k, headdim,
ATOMIC_ADD: tl.constexpr,
BIAS_TYPE: tl.constexpr,
IS_CAUSAL: tl.constexpr,
BLOCK_HEADDIM: tl.constexpr,
EVEN_M: tl.constexpr, EVEN_N: tl.constexpr, EVEN_HEADDIM: tl.constexpr,
BLOCK_M: tl.constexpr, BLOCK_N: tl.constexpr,
):
# We need to make sure begin_m is a multiple of BLOCK_M (not BLOCK_N)
begin_m = 0 if not IS_CAUSAL else ((start_n * BLOCK_N) // BLOCK_M) * BLOCK_M
# initialize row/col offsets
offs_qm = begin_m + tl.arange(0, BLOCK_M)
offs_n = start_n * BLOCK_N + tl.arange(0, BLOCK_N)
offs_m = tl.arange(0, BLOCK_M)
offs_d = tl.arange(0, BLOCK_HEADDIM)
# initialize pointers to value-like data
q_ptrs = Q + (offs_qm[:, None] * stride_qm + offs_d[None, :])
k_ptrs = K + (offs_n[:, None] * stride_kn + offs_d[None, :])
v_ptrs = V + (offs_n[:, None] * stride_vn + offs_d[None, :])
do_ptrs = DO + (offs_qm[:, None] * stride_dom + offs_d[None, :])
dq_ptrs = DQ + (offs_qm[:, None] * stride_dqm + offs_d[None, :])
if BIAS_TYPE == 'vector':
b_ptrs = Bias + offs_n
elif BIAS_TYPE == 'matrix':
b_ptrs = Bias + (offs_qm[:, None] * stride_bm + offs_n[None, :])
# initialize dv and dk
dv = tl.zeros([BLOCK_N, BLOCK_HEADDIM], dtype=tl.float32)
dk = tl.zeros([BLOCK_N, BLOCK_HEADDIM], dtype=tl.float32)
# There seems to be some problem with Triton pipelining that makes results wrong for
# headdim=64, seqlen=(113, 255), bias_type='matrix'. In this case the for loop
# may have zero step, and pipelining with the bias matrix could screw it up.
# So we just exit early.
if begin_m >= seqlen_q:
dv_ptrs = DV + (offs_n[:, None] * stride_dvn + offs_d[None, :])
dk_ptrs = DK + (offs_n[:, None] * stride_dkn + offs_d[None, :])
_bwd_store_dk_dv(dk_ptrs, dv_ptrs, dk, dv, offs_n, offs_d, seqlen_k, headdim,
EVEN_M=EVEN_M, EVEN_N=EVEN_N, EVEN_HEADDIM=EVEN_HEADDIM)
return
# k and v stay in SRAM throughout
# [2022-10-30] TD: Same bug as the fwd. In the case of EVEN_N=True and EVEN_M=False,
# if we just call tl.load(k_ptrs), we get the wrong output!
if EVEN_N & EVEN_M:
if EVEN_HEADDIM:
k = tl.load(k_ptrs)
v = tl.load(v_ptrs)
else:
k = tl.load(k_ptrs, mask=offs_d[None, :] < headdim, other=0.0)
v = tl.load(v_ptrs, mask=offs_d[None, :] < headdim, other=0.0)
else:
if EVEN_HEADDIM:
k = tl.load(k_ptrs, mask=offs_n[:, None] < seqlen_k, other=0.0)
v = tl.load(v_ptrs, mask=offs_n[:, None] < seqlen_k, other=0.0)
else:
k = tl.load(k_ptrs, mask=(offs_n[:, None] < seqlen_k) & (offs_d[None, :] < headdim),
other=0.0)
v = tl.load(v_ptrs, mask=(offs_n[:, None] < seqlen_k) & (offs_d[None, :] < headdim),
other=0.0)
# loop over rows
num_block_m = tl.cdiv(seqlen_q, BLOCK_M)
for start_m in range(begin_m, num_block_m * BLOCK_M, BLOCK_M):
start_m = tl.multiple_of(start_m, BLOCK_M)
offs_m_curr = start_m + offs_m
# load q, k, v, do on-chip
# Same bug as below. Otherwise gives wrong result for headdim=40, seqlen=(128, 117)
if EVEN_M & EVEN_HEADDIM:
q = tl.load(q_ptrs)
else:
if EVEN_HEADDIM:
q = tl.load(q_ptrs, mask=offs_m_curr[:, None] < seqlen_q, other=0.0)
else:
q = tl.load(q_ptrs, mask=(offs_m_curr[:, None] < seqlen_q)
& (offs_d[None, :] < headdim), other=0.0)
# recompute p = softmax(qk, dim=-1).T
qk = tl.dot(q, k, trans_b=True)
# Trying to combine the two masks seem to make the result wrong
if not EVEN_N: # Need to mask out otherwise the softmax is wrong
qk = tl.where(offs_n[None, :] < seqlen_k, qk, float("-inf"))
if IS_CAUSAL:
qk = tl.where(offs_m_curr[:, None] >= (offs_n[None, :]), qk, float("-inf"))
if BIAS_TYPE != 'none':
tl.debug_barrier() # Race condition otherwise
if BIAS_TYPE == 'vector':
if EVEN_N:
bias = tl.load(b_ptrs).to(tl.float32)
else:
bias = tl.load(b_ptrs, mask=offs_n < seqlen_k, other=0.0).to(tl.float32)
bias = bias[None, :]
elif BIAS_TYPE == 'matrix':
if EVEN_M & EVEN_N:
bias = tl.load(b_ptrs).to(tl.float32)
else:
bias = tl.load(b_ptrs,
mask=(offs_m_curr[:, None] < seqlen_q)
& (offs_n[None, :] < seqlen_k),
other=0.0).to(tl.float32)
qk = qk * softmax_scale + bias
# There seems to be a race condition when headdim=48/96, and dq, dk, dv are wrong.
# Also wrong for headdim=64.
if not (EVEN_M & EVEN_HEADDIM):
tl.debug_barrier()
lse_i = tl.load(LSE + offs_m_curr)
if BIAS_TYPE == 'none':
p = tl.exp(qk * softmax_scale - lse_i[:, None])
else:
p = tl.exp(qk - lse_i[:, None])
# compute dv
# [2022-10-30] TD: A Triton bug: if EVEN_M=True and EVEN_HEADDIM=False, if we call
# do = tl.load(do_ptrs, mask=offs_d[None, :] < headdim, other=0.0), we get wrong outputs
# in the case of headdim=48/96, seqlen_q & seqlen_k >= 512. If headdim=40 or seqlen < 512,
# the output is correct.
if EVEN_M & EVEN_HEADDIM:
do = tl.load(do_ptrs)
else:
# [2022-11-01] TD: Triton bug, there's a race condition if we just use m_mask and not d_mask.
do = tl.load(do_ptrs, mask=(offs_m_curr[:, None] < seqlen_q)
& (offs_d[None, :] < headdim), other=0.0)
# if EVEN_M:
# if EVEN_HEADDIM:
# do = tl.load(do_ptrs)
# else:
# do = tl.load(do_ptrs, mask=offs_d[None, :] < headdim, other=0.0)
# else:
# if EVEN_HEADDIM:
# do = tl.load(do_ptrs, mask=offs_m_curr[:, None] < seqlen_q, other=0.0)
# else:
# do = tl.load(do_ptrs, mask=(offs_m_curr[:, None] < seqlen_q)
# & (offs_d[None, :] < headdim), other=0.0)
dv += tl.dot(p.to(do.dtype), do, trans_a=True)
# compute dp = dot(v, do)
# There seems to be a race condition when headdim=48/96, and dq, dk are wrong.
# Also wrong for headdim=128, seqlen=(108, 256), and ATOMIC_ADD=True
# Also wrong for headdim=64, seqlen=(1023, 1024), and ATOMIC_ADD=False
if not (EVEN_M & EVEN_HEADDIM):
tl.debug_barrier()
dp = tl.dot(do, v, trans_b=True)
# There's a race condition for headdim=48
if not EVEN_HEADDIM:
tl.debug_barrier()
# compute ds = p * (dp - delta[:, None])
# Putting the subtraction after the dp matmul (instead of before) is slightly faster
Di = tl.load(D + offs_m_curr)
# Converting ds to q.dtype here reduces register pressure and makes it much faster
# for BLOCK_HEADDIM=128
ds = (p * (dp - Di[:, None]) * softmax_scale).to(q.dtype)
# compute dk = dot(ds.T, q)
dk += tl.dot(ds, q, trans_a=True)
# compute dq
if not (EVEN_M & EVEN_HEADDIM): # Otherewise there's a race condition when BIAS_TYPE='matrix'
tl.debug_barrier()
if not ATOMIC_ADD:
if EVEN_M & EVEN_HEADDIM: # Race condition if we just do EVEN_M
dq = tl.load(dq_ptrs, eviction_policy="evict_last")
dq += tl.dot(ds, k)
tl.store(dq_ptrs, dq, eviction_policy="evict_last")
else:
if EVEN_HEADDIM:
dq = tl.load(dq_ptrs, mask=offs_m_curr[:, None] < seqlen_q, other=0.0,
eviction_policy="evict_last")
dq += tl.dot(ds, k)
tl.store(dq_ptrs, dq, mask=offs_m_curr[:, None] < seqlen_q,
eviction_policy="evict_last")
else:
dq = tl.load(dq_ptrs,
mask=(offs_m_curr[:, None] < seqlen_q) & (offs_d[None, :] < headdim),
other=0.0, eviction_policy="evict_last")
dq += tl.dot(ds, k)
tl.store(dq_ptrs, dq,
mask=(offs_m_curr[:, None] < seqlen_q) & (offs_d[None, :] < headdim),
eviction_policy="evict_last")
else: # If we're parallelizing across the seqlen_k dimension
dq = tl.dot(ds, k)
if EVEN_M & EVEN_HEADDIM: # Race condition if we just do EVEN_M
tl.atomic_add(dq_ptrs, dq)
else:
if EVEN_HEADDIM:
tl.atomic_add(dq_ptrs, dq, mask=offs_m_curr[:, None] < seqlen_q)
else:
tl.atomic_add(dq_ptrs, dq,
mask=(offs_m_curr[:, None] < seqlen_q) & (offs_d[None, :] < headdim))
# increment pointers
dq_ptrs += BLOCK_M * stride_dqm
q_ptrs += BLOCK_M * stride_qm
do_ptrs += BLOCK_M * stride_dom
if BIAS_TYPE == 'matrix':
b_ptrs += BLOCK_M * stride_bm
# write-back
dv_ptrs = DV + (offs_n[:, None] * stride_dvn + offs_d[None, :])
dk_ptrs = DK + (offs_n[:, None] * stride_dkn + offs_d[None, :])
_bwd_store_dk_dv(dk_ptrs, dv_ptrs, dk, dv, offs_n, offs_d, seqlen_k, headdim,
EVEN_M=EVEN_M, EVEN_N=EVEN_N, EVEN_HEADDIM=EVEN_HEADDIM)
def init_to_zero(name):
return lambda nargs: nargs[name].zero_()
@triton.autotune(
configs=[
triton.Config({"BLOCK_M": 128, "BLOCK_N": 128, "SEQUENCE_PARALLEL": False}, num_warps=8, num_stages=1, pre_hook=init_to_zero('DQ')),
triton.Config({"BLOCK_M": 128, "BLOCK_N": 128, "SEQUENCE_PARALLEL": True}, num_warps=8, num_stages=1, pre_hook=init_to_zero('DQ')),
# Other configs seem to give wrong results when seqlen_q % 128 != 0, disabling them for now
# # Kernel is buggy (give wrong result) if we set BLOCK_m=128, BLOCK_n=64, num_warps=*4*
# triton.Config({"BLOCK_M": 128, "BLOCK_N": 64, "SEQUENCE_PARALLEL": False}, num_warps=8, num_stages=1, pre_hook=init_to_zero('DQ')),
# triton.Config({"BLOCK_M": 128, "BLOCK_N": 64, "SEQUENCE_PARALLEL": True}, num_warps=8, num_stages=1, pre_hook=init_to_zero('DQ')),
# triton.Config({"BLOCK_M": 64, "BLOCK_N": 64, "SEQUENCE_PARALLEL": False}, num_warps=4, num_stages=1, pre_hook=init_to_zero('DQ')),
# triton.Config({"BLOCK_M": 64, "BLOCK_N": 64, "SEQUENCE_PARALLEL": True}, num_warps=4, num_stages=1, pre_hook=init_to_zero('DQ')),
],
key=['CACHE_KEY_SEQLEN_Q', 'CACHE_KEY_SEQLEN_K', 'BIAS_TYPE', 'IS_CAUSAL', 'BLOCK_HEADDIM'],
)
@triton.heuristics(
{
"EVEN_M": lambda args: args["seqlen_q"] % args["BLOCK_M"] == 0,
"EVEN_N": lambda args: args["seqlen_k"] % args["BLOCK_N"] == 0,
"EVEN_HEADDIM": lambda args: args["headdim"] == args["BLOCK_HEADDIM"],
}
)
@triton.jit
def _bwd_kernel(
Q, K, V, Bias,
DO, DQ, DK, DV,
LSE, D,
softmax_scale,
stride_qb, stride_qh, stride_qm,
stride_kb, stride_kh, stride_kn,
stride_vb, stride_vh, stride_vn,
stride_bb, stride_bh, stride_bm,
stride_dob, stride_doh, stride_dom,
stride_dqb, stride_dqh, stride_dqm,
stride_dkb, stride_dkh, stride_dkn,
stride_dvb, stride_dvh, stride_dvn,
nheads, seqlen_q, seqlen_k, seqlen_q_rounded, headdim,
CACHE_KEY_SEQLEN_Q, CACHE_KEY_SEQLEN_K,
BIAS_TYPE: tl.constexpr,
IS_CAUSAL: tl.constexpr,
BLOCK_HEADDIM: tl.constexpr,
SEQUENCE_PARALLEL: tl.constexpr,
EVEN_M: tl.constexpr, EVEN_N: tl.constexpr, EVEN_HEADDIM: tl.constexpr,
BLOCK_M: tl.constexpr, BLOCK_N: tl.constexpr,
):
off_hb = tl.program_id(1)
off_b = off_hb // nheads
off_h = off_hb % nheads
# offset pointers for batch/head
Q += off_b * stride_qb + off_h * stride_qh
K += off_b * stride_kb + off_h * stride_kh
V += off_b * stride_vb + off_h * stride_vh
DO += off_b * stride_dob + off_h * stride_doh
DQ += off_b * stride_dqb + off_h * stride_dqh
DK += off_b * stride_dkb + off_h * stride_dkh
DV += off_b * stride_dvb + off_h * stride_dvh
if BIAS_TYPE != 'none':
Bias += off_b * stride_bb + off_h * stride_bh
# pointer to row-wise quantities in value-like data
D += off_hb * seqlen_q_rounded
LSE += off_hb * seqlen_q_rounded
if not SEQUENCE_PARALLEL:
num_block_n = tl.cdiv(seqlen_k, BLOCK_N)
for start_n in range(0, num_block_n):
_bwd_kernel_one_col_block(
start_n,
Q, K, V, Bias,
DO, DQ, DK, DV,
LSE, D,
softmax_scale,
stride_qm, stride_kn, stride_vn, stride_bm,
stride_dom, stride_dqm, stride_dkn, stride_dvn,
seqlen_q, seqlen_k, headdim,
ATOMIC_ADD=False,
BIAS_TYPE=BIAS_TYPE,
IS_CAUSAL=IS_CAUSAL,
BLOCK_HEADDIM=BLOCK_HEADDIM,
EVEN_M=EVEN_M, EVEN_N=EVEN_N, EVEN_HEADDIM=EVEN_HEADDIM,
BLOCK_M=BLOCK_M, BLOCK_N=BLOCK_N
)
else:
start_n = tl.program_id(0)
_bwd_kernel_one_col_block(
start_n,
Q, K, V, Bias,
DO, DQ, DK, DV,
LSE, D,
softmax_scale,
stride_qm, stride_kn, stride_vn, stride_bm,
stride_dom, stride_dqm, stride_dkn, stride_dvn,
seqlen_q, seqlen_k, headdim,
ATOMIC_ADD=True,
BIAS_TYPE=BIAS_TYPE,
IS_CAUSAL=IS_CAUSAL,
BLOCK_HEADDIM=BLOCK_HEADDIM,
EVEN_M=EVEN_M, EVEN_N=EVEN_N, EVEN_HEADDIM=EVEN_HEADDIM,
BLOCK_M=BLOCK_M, BLOCK_N=BLOCK_N
)
def _flash_attn_forward(q, k, v, bias=None, causal=False, softmax_scale=None):
# shape constraints
batch, seqlen_q, nheads, d = q.shape
_, seqlen_k, _, _ = k.shape
assert k.shape == (batch, seqlen_k, nheads, d)
assert v.shape == (batch, seqlen_k, nheads, d)
assert d <= 128, 'FlashAttention only support head dimensions up to 128'
assert q.dtype == k.dtype == v.dtype, 'All tensors must have the same type'
assert q.dtype in [torch.float16, torch.bfloat16], 'Only support fp16 and bf16'
assert q.is_cuda and k.is_cuda and v.is_cuda
softmax_scale = softmax_scale or 1.0 / math.sqrt(d)
has_bias = bias is not None
bias_type = 'none'
if has_bias:
assert bias.dtype in [q.dtype, torch.float]
assert bias.is_cuda
assert bias.dim() == 4
if bias.stride(-1) != 1:
bias = bias.contiguous()
if bias.shape[2:] == (1, seqlen_k):
bias_type = 'vector'
elif bias.shape[2:] == (seqlen_q, seqlen_k):
bias_type = 'matrix'
else:
raise RuntimeError('Last 2 dimensions of bias must be (1, seqlen_k)'
' or (seqlen_q, seqlen_k)')
bias = bias.expand(batch, nheads, seqlen_q, seqlen_k)
bias_strides = (bias.stride(0), bias.stride(1), bias.stride(2)) if has_bias else (0, 0, 0)
seqlen_q_rounded = math.ceil(seqlen_q / 128) * 128
lse = torch.empty((batch, nheads, seqlen_q_rounded), device=q.device, dtype=torch.float32)
tmp = torch.empty((batch, nheads, seqlen_q_rounded), device=q.device, dtype=torch.float32)
o = torch.empty_like(q)
BLOCK_HEADDIM = max(triton.next_power_of_2(d), 16)
BLOCK = 128
num_warps = 4 if d <= 64 else 8
def grid(META):
return triton.cdiv(seqlen_q, META["BLOCK_M"]), batch * nheads
_fwd_kernel[grid](
q, k, v, bias, o,
lse, tmp,
softmax_scale,
q.stride(0), q.stride(2), q.stride(1),
k.stride(0), k.stride(2), k.stride(1),
v.stride(0), v.stride(2), v.stride(1),
*bias_strides,
o.stride(0), o.stride(2), o.stride(1),
nheads, seqlen_q, seqlen_k, seqlen_q_rounded, d,
seqlen_q // 32, seqlen_k // 32, # key for triton cache (limit number of compilations)
# Can't use kwargs here because triton autotune expects key to be args, not kwargs
# IS_CAUSAL=causal, BLOCK_HEADDIM=d,
bias_type, causal, BLOCK_HEADDIM,
BLOCK_M=BLOCK, BLOCK_N=BLOCK,
num_warps=num_warps,
num_stages=1,
)
return o, lse, softmax_scale # softmax_scale could have been updated
def _flash_attn_backward(do, q, k, v, o, lse, dq, dk, dv, bias=None, causal=False, softmax_scale=None):
# Make sure that the last dimension is contiguous
if do.stride(-1) != 1:
do = do.contiguous()
batch, seqlen_q, nheads, d = q.shape
_, seqlen_k, _, _ = k.shape
# assert d in {16, 32, 64, 128}
assert d <= 128
seqlen_q_rounded = math.ceil(seqlen_q / 128) * 128
assert lse.shape == (batch, nheads, seqlen_q_rounded)
assert q.stride(-1) == k.stride(-1) == v.stride(-1) == o.stride(-1) == 1
assert dq.stride(-1) == dk.stride(-1) == dv.stride(-1) == 1
softmax_scale = softmax_scale or 1.0 / math.sqrt(d)
# dq_accum = torch.zeros_like(q, dtype=torch.float32)
dq_accum = torch.empty_like(q, dtype=torch.float32)
delta = torch.empty_like(lse)
# delta = torch.zeros_like(lse)
BLOCK_HEADDIM = max(triton.next_power_of_2(d), 16)
def grid(META):
return triton.cdiv(seqlen_q, META["BLOCK_M"]), batch * nheads
_bwd_preprocess_do_o_dot[grid](
o, do, delta,
o.stride(0), o.stride(2), o.stride(1),
do.stride(0), do.stride(2), do.stride(1),
nheads, seqlen_q, seqlen_q_rounded, d,
BLOCK_M=128, BLOCK_HEADDIM=BLOCK_HEADDIM,
)
has_bias = bias is not None
bias_type = 'none'
if has_bias:
assert bias.dtype in [q.dtype, torch.float]
assert bias.is_cuda
assert bias.dim() == 4
assert bias.stride(-1) == 1
if bias.shape[2:] == (1, seqlen_k):
bias_type = 'vector'
elif bias.shape[2:] == (seqlen_q, seqlen_k):
bias_type = 'matrix'
else:
raise RuntimeError('Last 2 dimensions of bias must be (1, seqlen_k)'
' or (seqlen_q, seqlen_k)')
bias = bias.expand(batch, nheads, seqlen_q, seqlen_k)
bias_strides = (bias.stride(0), bias.stride(1), bias.stride(2)) if has_bias else (0, 0, 0)
# BLOCK_M = 128
# BLOCK_N = 64
# num_warps = 4
def grid(META):
return triton.cdiv(seqlen_k, META["BLOCK_N"]) if META["SEQUENCE_PARALLEL"] else 1, batch * nheads
_bwd_kernel[grid](
q, k, v, bias,
do, dq_accum, dk, dv,
lse, delta,
softmax_scale,
q.stride(0), q.stride(2), q.stride(1),
k.stride(0), k.stride(2), k.stride(1),
v.stride(0), v.stride(2), v.stride(1),
*bias_strides,
do.stride(0), do.stride(2), do.stride(1),
dq_accum.stride(0), dq_accum.stride(2), dq_accum.stride(1),
dk.stride(0), dk.stride(2), dk.stride(1),
dv.stride(0), dv.stride(2), dv.stride(1),
nheads, seqlen_q, seqlen_k, seqlen_q_rounded, d,
seqlen_q // 32, seqlen_k // 32, # key for triton cache (limit number of compilations)
# Can't use kwargs here because triton autotune expects key to be args, not kwargs
# IS_CAUSAL=causal, BLOCK_HEADDIM=d,
bias_type, causal, BLOCK_HEADDIM,
# SEQUENCE_PARALLEL=False,
# BLOCK_M=BLOCK_M, BLOCK_N=BLOCK_N,
# num_warps=num_warps,
# num_stages=1,
)
dq.copy_(dq_accum)
class FlashAttnQKVPackedFunc(torch.autograd.Function):
@staticmethod
def forward(ctx, qkv, bias=None, causal=False, softmax_scale=None):
"""
qkv: (batch, seqlen, 3, nheads, headdim)
bias: optional, shape broadcastible to (batch, nheads, seqlen, seqlen).
For example, ALiBi mask for causal would have shape (1, nheads, 1, seqlen).
ALiBi mask for non-causal would have shape (1, nheads, seqlen, seqlen)
"""
# Make sure that the last dimension is contiguous
if qkv.stride(-1) != 1:
qkv = qkv.contiguous()
o, lse, ctx.softmax_scale = _flash_attn_forward(
qkv[:, :, 0], qkv[:, :, 1], qkv[:, :, 2], bias=bias, causal=causal,
softmax_scale=softmax_scale
)
ctx.save_for_backward(qkv, o, lse, bias)
ctx.causal = causal
return o
@staticmethod
def backward(ctx, do):
qkv, o, lse, bias = ctx.saved_tensors
assert not ctx.needs_input_grad[1], 'FlashAttention does not support bias gradient yet'
# Triton's autotune causes the Tensor._version to change, and so Pytorch autograd
# does a memcpy. To avoid this we run in inference_mode, which doesn't track the version.
with torch.inference_mode():
dqkv = torch.empty_like(qkv)
_flash_attn_backward(do, qkv[:, :, 0], qkv[:, :, 1], qkv[:, :, 2], o, lse,
dqkv[:, :, 0], dqkv[:, :, 1], dqkv[:, :, 2],
bias=bias, causal=ctx.causal, softmax_scale=ctx.softmax_scale)
return dqkv, None, None, None
flash_attn_qkvpacked_func = FlashAttnQKVPackedFunc.apply
class FlashAttnKVPackedFunc(torch.autograd.Function):
@staticmethod
def forward(ctx, q, kv, bias=None, causal=False, softmax_scale=None):
"""
q: (batch, seqlen_q, nheads, headdim)
kv: (batch, seqlen_k, 2, nheads, headdim)
bias: optional, shape broadcastible to (batch, nheads, seqlen_q, seqlen_k).
For example, ALiBi mask for causal would have shape (1, nheads, 1, seqlen_k).
ALiBi mask for non-causal would have shape (1, nheads, seqlen_q, seqlen_k)
"""
# Make sure that the last dimension is contiguous
q, kv = [x if x.stride(-1) == 1 else x.contiguous() for x in [q, kv]]
o, lse, ctx.softmax_scale = _flash_attn_forward(
q, kv[:, :, 0], kv[:, :, 1], bias=bias, causal=causal, softmax_scale=softmax_scale
)
ctx.save_for_backward(q, kv, o, lse, bias)
ctx.causal = causal
return o
@staticmethod
def backward(ctx, do):
q, kv, o, lse, bias = ctx.saved_tensors
if len(ctx.needs_input_grad) >= 3:
assert not ctx.needs_input_grad[2], 'FlashAttention does not support bias gradient yet'
# Triton's autotune causes the Tensor._version to change, and so Pytorch autograd
# does a memcpy. To avoid this we run in inference_mode, which doesn't track the version.
with torch.inference_mode():
dq = torch.empty_like(q)
dkv = torch.empty_like(kv)
_flash_attn_backward(do, q, kv[:, :, 0], kv[:, :, 1], o, lse,
dq, dkv[:, :, 0], dkv[:, :, 1],
bias=bias, causal=ctx.causal, softmax_scale=ctx.softmax_scale)
return dq, dkv, None, None, None
flash_attn_kvpacked_func = FlashAttnKVPackedFunc.apply
class FlashAttnFunc(torch.autograd.Function):
@staticmethod
def forward(ctx, q, k, v, bias=None, causal=False, softmax_scale=None):
"""
q: (batch_size, seqlen_q, nheads, headdim)
k, v: (batch_size, seqlen_k, nheads, headdim)
bias: optional, shape broadcastible to (batch, nheads, seqlen_q, seqlen_k).
For example, ALiBi mask for causal would have shape (1, nheads, 1, seqlen_k).
ALiBi mask for non-causal would have shape (1, nheads, seqlen_q, seqlen_k)
"""
# Make sure that the last dimension is contiguous
q, k, v = [x if x.stride(-1) == 1 else x.contiguous() for x in [q, k, v]]
o, lse, ctx.softmax_scale = _flash_attn_forward(
q, k, v, bias=bias, causal=causal, softmax_scale=softmax_scale
)
ctx.save_for_backward(q, k, v, o, lse, bias)
ctx.causal = causal
return o
@staticmethod
def backward(ctx, do):
q, k, v, o, lse, bias = ctx.saved_tensors
assert not ctx.needs_input_grad[3], 'FlashAttention does not support bias gradient yet'
# Triton's autotune causes the Tensor._version to change, and so Pytorch autograd
# does a memcpy. To avoid this we run in inference_mode, which doesn't track the version.
with torch.inference_mode():
dq = torch.empty_like(q)
dk = torch.empty_like(k)
dv = torch.empty_like(v)
_flash_attn_backward(do, q, k, v, o, lse, dq, dk, dv,
bias=bias, causal=ctx.causal, softmax_scale=ctx.softmax_scale)
return dq, dk, dv, None, None, None
flash_attn_func = FlashAttnFunc.apply | TerraByte-master | TerraByte/model/flash_triton.py |
import functools
import math
import einops
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import pack, rearrange, unpack
from torch import nn
# helpers
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def pack_one(t, pattern):
return pack([t], pattern)
def unpack_one(t, ps, pattern):
return unpack(t, ps, pattern)[0]
def remainder_to_mult(num, mult):
return (mult - num % mult) % mult
def cast_tuple(t, length = 1):
return t if isinstance(t, tuple) else ((t,) * length)
def reduce_mult(nums):
return functools.reduce(lambda x, y: x * y, nums, 1)
# tensor helpers
def log(t, eps = 1e-20):
return torch.log(t.clamp(min = eps))
def gumbel_noise(t):
noise = torch.zeros_like(t).uniform_(0, 1)
return -log(-log(noise))
def gumbel_sample(t, temperature = 1., dim = -1):
return ((t / temperature) + gumbel_noise(t)).argmax(dim = dim)
def top_k(logits, thres = 0.5):
num_logits = logits.shape[-1]
k = max(int((1 - thres) * num_logits), 1)
val, ind = torch.topk(logits, k)
probs = torch.full_like(logits, float('-inf'))
probs.scatter_(1, ind, val)
return probs
# token shift, from Peng et al of RWKV
def token_shift(t):
t, t_shift = t.chunk(2, dim = -1)
t_shift = F.pad(t_shift, (0, 0, 1, -1))
return torch.cat((t, t_shift), dim = -1)
# positional bias
######################### =>
class Normalize(nn.Module):
def __init__(self, dim: int) -> None:
super().__init__()
self.dim = dim
def forward(self, x):
return torch.nn.functional.normalize(x, dim=self.dim, p=2)
class LearnableLogitScaling(nn.Module):
def __init__(
self,
logit_scale_init: float = 1 / 0.07,
learnable: bool = True,
max_logit_scale: float = 100,
) -> None:
super().__init__()
self.max_logit_scale = max_logit_scale
self.logit_scale_init = logit_scale_init
self.learnable = learnable
log_logit_scale = torch.ones([]) * np.log(self.logit_scale_init)
if learnable:
self.log_logit_scale = nn.Parameter(log_logit_scale)
else:
self.register_buffer("log_logit_scale", log_logit_scale)
def forward(self, x):
return torch.clip(self.log_logit_scale.exp(), max=self.max_logit_scale) * x
def extra_repr(self):
st = f"logit_scale_init={self.logit_scale_init},learnable={self.learnable}," \
f" max_logit_scale={self.max_logit_scale}"
return st
class EinOpsRearrange(nn.Module):
def __init__(self, rearrange_expr: str, **kwargs) -> None:
super().__init__()
self.rearrange_expr = rearrange_expr
self.kwargs = kwargs
def forward(self, x):
assert isinstance(x, torch.Tensor)
return einops.rearrange(x, self.rearrange_expr, **self.kwargs)
class VerboseNNModule(nn.Module):
"""
Wrapper around nn.Module that prints registered buffers and parameter names.
"""
@staticmethod
def get_readable_tensor_repr(name: str, tensor: torch.Tensor) -> str:
st = (
"("
+ name
+ "): "
+ "tensor("
+ str(tuple(tensor[1].shape))
+ ", requires_grad="
+ str(tensor[1].requires_grad)
+ ")\n"
)
return st
def extra_repr(self) -> str:
named_modules = set()
for p in self.named_modules():
named_modules.update([p[0]])
named_modules = list(named_modules)
string_repr = ""
for p in self.named_parameters():
name = p[0].split(".")[0]
if name not in named_modules:
string_repr += self.get_readable_tensor_repr(name, p)
for p in self.named_buffers():
name = p[0].split(".")[0]
string_repr += self.get_readable_tensor_repr(name, p)
return string_repr
def cast_if_src_dtype(
tensor: torch.Tensor, src_dtype: torch.dtype, tgt_dtype: torch.dtype
):
updated = False
if tensor.dtype == src_dtype:
tensor = tensor.to(dtype=tgt_dtype)
updated = True
return tensor, updated
class QuickGELU(nn.Module):
# From https://github.com/openai/CLIP/blob/d50d76daa670286dd6cacf3bcd80b5e4823fc8e1/clip/model.py#L166
def forward(self, x: torch.Tensor):
return x * torch.sigmoid(1.702 * x)
class SelectElement(nn.Module):
def __init__(self, index) -> None:
super().__init__()
self.index = index
def forward(self, x):
assert x.ndim >= 3
return x[:, self.index, ...]
class SelectEOSAndProject(nn.Module):
"""
Text Pooling used in OpenCLIP
"""
def __init__(self, proj: nn.Module) -> None:
super().__init__()
self.proj = proj
def forward(self, x, seq_len):
assert x.ndim == 3
# x is of shape B x L x D
# take features from the eot embedding (eot_token is the highest number in each sequence)
x = x[torch.arange(x.shape[0]), seq_len]
x = self.proj(x)
return x
#======================= BIASES
class Alibi(nn.Module):
def __init__(self, heads, **kwargs):
super().__init__()
self.heads = heads
slopes = torch.Tensor(self._get_slopes(heads))
slopes = rearrange(slopes, 'h -> h 1 1')
self.register_buffer('slopes', slopes, persistent = False)
self.register_buffer('bias', None, persistent = False)
@staticmethod
def _get_slopes(heads):
def get_slopes_power_of_2(n):
start = (2**(-2**-(math.log2(n)-3)))
ratio = start
return [start*ratio**i for i in range(n)]
if math.log2(heads).is_integer():
return get_slopes_power_of_2(heads)
closest_power_of_2 = 2 ** math.floor(math.log2(heads))
return get_slopes_power_of_2(closest_power_of_2) + get_slopes_power_of_2(2 * closest_power_of_2)[0::2][:heads-closest_power_of_2]
def forward(self, i, j, device):
if exists(self.bias) and self.bias.shape[-1] >= j:
return self.bias[..., :j]
bias = torch.arange(j, device = device)
bias = rearrange(bias, 'j -> 1 1 j')
bias = bias * self.slopes
self.register_buffer('bias', bias, persistent = False)
return self.bias
# norm
class RMSNorm(nn.Module):
def __init__(self, dim, eps = 1e-8):
super().__init__()
self.scale = dim ** -0.5
self.eps = eps
self.g = nn.Parameter(torch.ones(dim))
def forward(self, x):
norm = torch.norm(x, dim = -1, keepdim = True) * self.scale
return x / norm.clamp(min = self.eps) * self.g
# helper classes
def FeedForward(*, dim, mult = 4, dropout = 0.):
return nn.Sequential(
RMSNorm(dim),
nn.Linear(dim, dim * mult),
nn.GELU(),
nn.Dropout(dropout),
nn.Linear(dim * mult, dim)
)
class RotaryEmbedding(nn.Module):
def __init__(self, dim, theta = 10000):
super().__init__()
inv_freq = 1.0 / (theta ** (torch.arange(0, dim, 2).float() / dim))
self.register_buffer("inv_freq", inv_freq)
@property
def device(self):
return next(self.buffers()).device
def forward(self, seq_len):
t = torch.arange(seq_len, device = self.device).type_as(self.inv_freq)
freqs = torch.einsum('i , j -> i j', t, self.inv_freq)
freqs = torch.cat((freqs, freqs), dim = -1)
return freqs
def rotate_half(x):
x1, x2 = x.chunk(2, dim=-1)
return torch.cat((-x2, x1), dim=-1)
def apply_rotary_pos_emb(pos, t):
return t * pos.cos() + rotate_half(t) * pos.sin()
| TerraByte-master | TerraByte/model/helpers.py |
def main():
print("Welcome!Input the number of hours on Earth so you can see how many hours pass by on Europa")
userInput = int(input("How many Europa days go by for every x Earth day"))
Europa = userInput * 3.551 #hours for 1 day in Europa 85.224
print(f"{userInput} days on Earth is {Europa}days compared to Europa, isn't that crazy? One day in Europa is 85.224hours")
main() | 601-daysthatgobyonEuropa-main | 601_assignment.py |
import os
# from tree_of_thoughts.openaiModels import OpenAILanguageModel
# from tree_of_thoughts.treeofthoughts import TreeofThoughts
from meta_tree_of_thoughts.treeofthoughts import TreeofThoughts, MonteCarloTreeofThoughts
from meta_tree_of_thoughts.thinkingAgent import ThinkingAgent
from meta_tree_of_thoughts.openaiModel import OpenAILanguageModel
api_model= "gpt-3.5-turbo"
model = OpenAILanguageModel(api_key='api key', api_model=api_model)
# Initialize the MonteCarloTreeofThoughts class with the model
tree_of_thoughts = MonteCarloTreeofThoughts(model)
# Note to reproduce the same results from the tree of thoughts paper if not better,
# craft an 1 shot chain of thought prompt for your task below
initial_prompt = """
Input: 2 8 8 14
Possible next steps:
2 + 8 = 10 (left: 8 10 14)
8 / 2 = 4 (left: 4 8 14)
14 + 2 = 16 (left: 8 8 16)
2 * 8 = 16 (left: 8 14 16)
8 - 2 = 6 (left: 6 8 14)
14 - 8 = 6 (left: 2 6 8)
14 / 2 = 7 (left: 7 8 8)
14 - 2 = 12 (left: 8 8 12)
Input: use 4 numbers and basic arithmetic operations (+-*/) to obtain 24 in 1 equation
Possible next steps:
"""
num_thoughts = 1
max_steps = 3
max_states = 4
pruning_threshold = 0.5
solution = tree_of_thoughts.solve(
initial_prompt=initial_prompt,
num_thoughts=num_thoughts,
max_steps=max_steps,
max_states=max_states,
pruning_threshold=pruning_threshold,
# sleep_time=sleep_time
)
print(f"Solution: {solution}") | Meta-Tree-Of-Thoughts-main | example.py |
import os
import time
import json
import logging
import argparse
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
from typing import Any, Dict, List, Optional, Set, Tuple, Union
from meta_tree_of_thoughts.thinkingAgent import ThinkingAgent
import numpy as np
# for each rejected path store the reason for rejection and then pass the reason -> thought generator function
# thought -> evaluated (0.3, 'This is a bad_decision = 2 + 23 + 232323 does not = 24') -> thought generato_functin
# class TreeofThoughts:
# def __init__(self, model, search_algorithm):
# self.model = model
# self.thinkingAgent = ThinkingAgent(self.model)
# self.search_algorithm = search_algorithm
# self.tree: Dict[str, Dict[str, float]] = {
# "nodes": {}
# }
# def solve(self, initial_prompt: str,
# num_thoughts: Optional[int] = 3,
# max_steps: Optional[int] = 3,
# max_states: Optional[int] = 5,
# value_threshold: Optional[float] = 0.5,
# confidence_threshold: Optional[float] = 0.8,
# max_iterations: Optional[int] = 40,
# convergence_threshold: Optional[float] = None,
# convergence_count: Optional[int] = None) -> str:
# self.file_name = f"logs/tree_of_thoughts_output_{self.search_algorithm}.json"
# try:
# best_thoughts = ""
# if self.search_algorithm == 'BFS':
# result = self.tot_bfs(initial_prompt, num_thoughts, max_steps, max_states, value_threshold)
# if result:
# self.save_tree_to_json(self.file_name)
# best_thoughts = result
# elif self.search_algorithm == 'DFS':
# result = self.tot_dfs(initial_prompt, num_thoughts, max_steps, value_threshold,
# confidence_threshold=confidence_threshold, max_iterations=max_iterations, convergence_threshold=convergence_threshold,
# convergence_count=convergence_count)
# if result:
# self.save_tree_to_json(self.file_name)
# best_thoughts = result
# if best_thoughts:
# solution = self.thinkingAgent.generate_solution(initial_prompt, best_thoughts)
# if solution:
# return solution
# else:
# raise ValueError("Invalid search algorithm. Choose 'BFS' or 'DFS'.")
# except KeyboardInterrupt:
# logger.error("Keyboard interrupt detected.")
# except ValueError as e:
# logger.error(f"Error: {e}")
# finally:
# logger.info("Saving the current tree and metrics.")
# self.save_tree_to_json(self.file_name)
# def logNewState(self, state, evaluation):
# state = " ==> ".join(state)
# self.tree["nodes"][state] = evaluation
# self.save_tree_to_json(self.file_name)
# def tot_bfs(self, initial_prompt, num_thoughts, max_steps, max_states, pruning_threshold):
# current_states = [[f"My goal is to offer the most optimal to this user request '{initial_prompt}'"]]
# state_values = {}
# for step in range(1, max_steps + 1):
# for state in current_states:
# thoughts = self.thinkingAgent.generate_thoughts(state, num_thoughts, initial_prompt)
# newStates = []
# for thought in thoughts:
# flattened_state = (*state, thought)
# newStates.append(flattened_state)
# evaluated_thoughts = self.thinkingAgent.evaluate_states(newStates, initial_prompt)
# selected_states = []
# for state, value in evaluated_thoughts.items():
# if value >= pruning_threshold:
# selected_states.append(state)
# state_values[state] = value
# self.logNewState(state, value)
# if(len(selected_states) >1):
# current_states = selected_states[:max_states]
# if (len(current_states) == 1):
# return initial_prompt
# # print(current_states, state_values)
# best_state = max(current_states, key=lambda state: state_values[state])
# print(f'best_state: {best_state}')
# return best_state
# def tot_dfs(self,
# initial_prompt: str,
# num_thoughts: any,
# max_steps: int,
# value_threshold,
# pruning_threshold=0.5,
# confidence_threshold=None, max_iterations=None, convergence_threshold=None, convergence_count=None):
# output = []
# iteration_count = 0
# consecutive_convergence_count = 0
# prev_best_value = None
# file_name = f"logs/tree_of_thoughts_output_{self.search_algorithm}.json"
# def dfs(state, step):
# nonlocal consecutive_convergence_count, prev_best_value, iteration_count, output
# if step > max_steps:
# thought = self.thinkingAgent.generate_thoughts(state, 1, initial_prompt)
# value = self.thinkingAgent.evaluate_states({state}, initial_prompt)[state]
# output.append((thought, value))
# if confidence_threshold is not None and value >= confidence_threshold:
# return True
# if prev_best_value is not None and convergence_threshold is not None:
# if abs(value - prev_best_value) < convergence_threshold:
# consecutive_convergence_count += 1
# else:
# consecutive_convergence_count = 0
# prev_best_value = value
# iteration_count += 1
# if (max_iterations is not None and iteration_count >= max_iterations) or (convergence_count is not None and consecutive_convergence_count >= convergence_count):
# return True
# return False
# for next_state in sorted(self.thinkingAgent.generate_thoughts(state, num_thoughts, initial_prompt)):
# state_value = self.thinkingAgent.evaluate_states({next_state}, initial_prompt)[next_state]
# logger.info(f"State: {next_state}, Value: {state_value}")
# if state_value > value_threshold and (pruning_threshold is None or state_value >= pruning_threshold):
# child = (*state, next_state)
# if dfs(child, step + 1):
# return True
# self.save_tree_to_json(file_name)
# return False
# dfs([[initial_prompt]], 1)
# best_state = max(output, key=lambda x: x[1])
# return best_state[0]
# def save_tree_to_json(self, file_name):
# os.makedirs(os.path.dirname(file_name), exist_ok=True)
# with open(file_name, 'w') as json_file:
# json.dump(self.tree, json_file, indent=4)
# def print_tree(self,
# node: str,
# depth=0):
# thought = self.tree["metrics"]["thoughts"].get(node, "")
# evaluation = self.tree["metrics"]["evaluations"].get(node, "")
# tree_info = f"{' ' * depth}Node: {node}, Thought: {thought}, Evaluation: {evaluation}\n"
# for child, parent in self.tree["nodes"].items():
# if parent == node:
# tree_info += self.print_tree(child, depth + 1)
# print(f'tree info: {tree_info}')
# return tree_info
class TreeofThoughts:
def __init__(self, model):
self.model = model
self.tree: Dict[str, Dict[str, Union[float, Dict[str, Any]]]] = {
"nodes": {},
}
self.best_state = None
self.best_value = float("-inf")
self.history = [] #added line initalize history
def save_tree_to_json(self, file_name):
os.makedirs(os.path.dirname(file_name), exist_ok=True)
with open(file_name, 'w') as json_file:
json.dump(self.tree, json_file, indent=4)
def logNewState(self, state, evaluation):
if not (type(state) == str):
state = " | ".join(state)
if state in self.tree['nodes']:
self.tree['nodes'][state]['thoughts'].append(evaluation)
else:
self.tree['nodes'][state] = {'thoughts': [evaluation]}
def adjust_pruning_threshold_precentile(self, evaluated_thoughts, percentile):
values = np.array(list(evaluated_thoughts.values()))
if values.size == 0:
return 0
return max(np.percentile(values, percentile), 0.1)
def adjust_pruning_threshold_moving_average(self, evaluated_thoughts, window_size):
values = list(evaluated_thoughts.values())
if len(values) < window_size:
return np.mean(values) if values else 0
else:
return max(np.mean(values[-window_size:]), 0.1)
class MonteCarloTreeofThoughts(TreeofThoughts):
def __init__(self, model, objective="balance"):
super().__init__(model)
self.objective = objective
self.solution_found = False
self.tree: Dict[str, Dict[str, Union[float, Dict[str, Any]]]] = {
"nodes": {},
"metrics": {"thoughts": {}, "evaluations": {}},
}
def optimize_params(self, num_thoughts, max_steps, max_states):
if self.objective == 'speed':
num_thoughts = max(1, num_thoughts - 1)
max_steps = max(1, max_steps - 1)
max_states = max(1, max_states - 1)
elif self.objective == 'reliability':
num_thoughts += 1
max_steps += 1
max_states += 1
elif self.objective == 'balanace':
if self.solution_found:
num_thoughts = max(1, num_thoughts - 1)
max_steps = max(1, max_steps - 1)
max_states = max(1, max_states - 1)
else:
num_thoughts += 1
max_steps += 1
max_states += 1
return num_thoughts, max_steps, max_states
def solve(self,
initial_prompt: str,
num_thoughts: int,
max_steps: int,
max_states: int,
pruning_threshold: float,
# sleep_time: float,
):
file_name = str(initial_prompt)
self.file_name = f"logs/tree_of_thoughts_output_{file_name}.json"
return self.monte_carlo_search(
initial_prompt,
num_thoughts,
max_steps,
max_states,
pruning_threshold,
# sleep_time,
)
#v3
def monte_carlo_search(self,
initial_prompt: str,
num_thoughts: int,
max_steps: int,
max_states: int,
pruning_threshold: float,
):
current_states = [initial_prompt]
state_values = {}
visit_counts = {initial_prompt: 0}
transposition_table = {}
best_state = None
best_value = float('-inf')
for step in range(1, max_steps + 1):
selected_states = []
for state in current_states:
if state in transposition_table:
state_value = transposition_table[state]
else:
time.sleep(1)
thoughts = self.model.generate_thoughts(state, num_thoughts, initial_prompt)
time.sleep(1)
evaluated_thoughts = self.model.evaluate_states(thoughts, initial_prompt)
for thought, value in evaluated_thoughts.items():
flattened_state = (state, thought) if isinstance(state, str) else (*state, thought)
transposition_table[flattened_state] = value
for thought, value in evaluated_thoughts.items():
flattened_state = (state, thought) if isinstance(state, str) else (*state, thought)
if flattened_state not in visit_counts:
visit_counts[flattened_state] = 0
if visit_counts[state] > visit_counts[flattened_state] and visit_counts[flattened_state] > 0:
ucb1_value = value + np.sqrt(2 * np.log(visit_counts[state]) / visit_counts[flattened_state])
if ucb1_value >= pruning_threshold:
selected_states.append(flattened_state)
state_values[flattened_state] = value
# Update the best state if the current state value is greater than the best value
if value > best_value:
best_state = flattened_state
best_value = value
visit_counts[state] += 1
if len(selected_states) > max_states:
current_states = selected_states[:max_states]
self.save_tree_to_json(self.file_name)
# if best_state is not None:
# solution = self.model.generate_solution(initial_prompt, best_state)
# return solution
# else:
# solution = None
# return None
solution = self.model.generate_solution(initial_prompt, best_state)
return solution if solution else best_state | Meta-Tree-Of-Thoughts-main | meta_tree_of_thoughts/treeofthoughts.py |
from abc import ABC, abstractmethod
import random
from meta_tree_of_thoughts.metaAgent import MetaAgent
class AbstractLanguageModel(ABC):
@abstractmethod
def generate_text(self, prompt):
pass
class ThinkingAgent:
def __init__(self, model: AbstractLanguageModel, strategy="cot", evaluation_strategy="value"):
self.strategy = strategy
self.evaluation_strategy = evaluation_strategy
self.model = model
self.MetaAgent = MetaAgent()
def generate_thoughts(self, state, k, initial_prompt, rejected_solutions=None):
if (type(state) == str):
state_text = state
else:
state_text = '\n'.join(state)
print("New state generating thought:", state, "\n\n")
# prompt = f"Considering the thoughts you've had until now:\n\n{state_text}\n\nDevise the next coherent thought that will aid in advancing the reasoning process and achieving a solution to {initial_prompt}. Assess various scenarios, think unconventionally, anticipate potential challenges, and resolve any outstanding queries. Tap into your mind's full potential and make certain no open questions remain."
prompt = f"""You're an TreeofThoughts, an superintelligent AI model devoted to helping Humans by any means necessary. You're purpose is to generate a series of solutions to comply with the user's instructions, you must generate solutions on the basis of determining the most reliable solution in the shortest amount of time, while taking rejected solutions into account and learning from them.
Considering the reasoning provided:\n\n
###'{state_text}'\n\n###
Devise the best possible solution for the task: {initial_prompt}, Here are evaluated solutions that were rejected:
###{rejected_solutions}###,
complete the {initial_prompt} without making the same mistakes you did with the evaluated rejected solutions. Be simple. Be direct. Provide intuitive solutions as soon as you think of them."""
prompt += self.ReAct_prompt
# print(prompt)
thoughts = self.generate_text(prompt, k)
# print(thoughts)
# print(f"Generated thoughts: {thoughts}")
return thoughts
def generate_solution(self, initial_prompt, state, rejected_solutions=None):
if isinstance(state, list):
state_text = '\n'.join(state)
else:
state_text = state
prompt = f"""You're an TreeofThoughts, an superintelligent AI model devoted to helping Humans by any means necessary. You're purpose is to generate a series of solutions to comply with the user's instructions, you must generate solutions on the basis of determining the most reliable solution in the shortest amount of time, while taking rejected solutions into account and learning from them.
Considering the reasoning provided:\n\n
###'{state_text}'\n\n###
Devise the best possible solution for the task: {initial_prompt}, Here are evaluated solutions that were rejected:
###{rejected_solutions}###,
complete the {initial_prompt} without making the same mistakes you did with the evaluated rejected solutions. Be simple. Be direct. Provide intuitive solutions as soon as you think of them."""
answer = self.generate_text(prompt, 1)
print(f'Answerrrrrr {answer}')
# print(thoughts)
# print(f"General Solution : {answer}")
return answer
# except Exception as e:
# logger.error(f"Error in generate_solutions: {e}")
# return None
def evaluate_states(self, states, initial_prompt):
if not states:
return {}
if self.evaluation_strategy == 'value':
state_values = {}
for state in states:
if (type(state) == str):
state_text = state
else:
state_text = '\n'.join(state)
print("We receive a state of type", type(state), "For state: ", state, "\n\n")
# prompt = f"Given the current state of reasoning: '{state_text}', evaluate its value as a float between 0 and 1, become very pessimistic think of potential adverse risks on the probability of this state of reasoning achieveing {initial_prompt} and DO NOT RESPOND WITH ANYTHING ELSE: OTHER THAN AN FLOAT"
prompt = f""" To achieve the following goal: '{initial_prompt}', pessimistically value the context of the past solutions and more importantly the latest generated solution you had AS A FLOAT BETWEEN 0 AND 1\n
Past solutions:\n\n
{state_text}\n
If the solutions is not directly concretely making fast progress in achieving the goal, give it a lower score.
Evaluate all solutions AS A FLOAT BETWEEN 0 and 1:\n, DO NOT RETURN ANYTHING ELSE
"""
# and then inside backticks provide an simple and direct bulletpoint list as to why you evaluated this thought the way you did. Provide simple yet intuitive feedback.
response = self.openai_api_call_handler(prompt, 10, 1)
try:
value_text = self.openai_choice2text_handler(response.choices[0])
# print(f'state: {value_text}')
value = float(value_text)
print(f"Evaluated Thought Value: {value}")
except ValueError:
value = 0 # Assign a default value if the conversion fails
state_values[state] = value
return state_values
| Meta-Tree-Of-Thoughts-main | meta_tree_of_thoughts/thinkingAgent.py |
from abc import ABC, abstractmethod
import openai
import langchain
from dotenv import load_dotenv
from langchain import OpenAI, LLMChain, PromptTemplate
from langchain.chat_models import ChatOpenAI
from langchain.memory import ConversationBufferWindowMemory
load_dotenv()
#tree of thoughts
class MetaAgent():
def __init__(self):
self.initalize_meta_agent()
def get_new_instructions(self, meta_output):
new_instructions = meta_output.split("Instructions:")[-1]
return new_instructions
def update_prompt(self, chat_history, user_goal):
chain = LLMChain(
llm=self.LLM ,
prompt=self.meta_prompt,
verbose=True
)
meta_output = chain.run(chat_history=chat_history, old_instructions=self.thinking_prompt, objective=user_goal)
#gte the new instructions from the meta output
new_instructions = self.get_new_instructions(meta_output)
print("New thinking instructions: ", new_instructions)
variables_required = ["{old_thoughts}"]
has_required_variables = all(var in variables_required for var in variables_required)
if not has_required_variables:
print("Instructions failed to mutate")
else:
self.thinking_prompt = new_instructions
def initalize_meta_agent(self):
self.thinking_prompt = "You're Athena, an AI model devoted to helping Humans by any means necessary. You're purpose is to generate a series of solutions to comply with the user's instructions, you must generate solutions on the basis of determining the most reliable solution in the shortest amount of time. The user has provided you with: {objective} complete this instruction BY ANY MEANS NECESSARY by considering the solutions you've had until now:\n\
\n'{old_thoughts}'\n\n Think about the next best step to achive {objective}.\
If you already have enough thoughts to achieve the goal, start improving some of the steps and verify that you are perfectly meeting the goal '{objective}'.\n Next step:"
meta_template="""
You need to change the following solutions instructions;\n'{old_instructions}'\n To make the solutions directly solving the user objective '{objective}'
Solutions instructions will be used by an AI assistant to direct it to create the thoughts to progress in achieving the user goal: '{objective}'.
The Solutions instructions have to lead to thoughts that make the AI progress fast in totally achieving the user goal '{objective}'. The Solutions generated have to be sharp and concrete, and lead to concrete visible progress in achieving the user's goal.
An AI model has just had the below interactions with a user, using the above solutions instructions to progress in achieve the user's goal. AI Model's generated thoughts don't lead to good enough progress in achieving: '{objective}'
Your job is to critique the model's performance using the old solution instructions and then revise the instructions so that the AI
model would quickly and correctly respond in the future to concretely achieve the user goal.
Old thinking instructions to modify:
###
{old_instructions}
###
The strings '{{old_thoughts}}' and the string '{{objective}}' have to appear in the new instructions as they will respectively be used by the AI model to store it's old thoughts, and the user's goal when it runs that instruction
AI model's interaction history with the user:
###
{chat_history}
###
Please reflect on these interactions.
You should critique the models performance in this interaction in respect to why the solutions it gave aren't directly leading to achieving the user's goals. What could the AI model have done better to be more direct and think better?
Indicate this with "Critique: ....
You should then revise the Instructions so that Assistant would quickly and correctly respond in the future.
The AI model's goal is to return the most reliable solution that leads to fast progressing in achieving the user's goal in as few interactions as possible.
The solutions generated should not turn around and do nothing, so if you notice that the instructions are leading to no progress in solving the user goal, modify the instructions so it leads to concrete progress.
The AI Assistant will only see the new Instructions the next time it thinks through the same problem, not the interaction
history, so anything important to do must be summarized in the Instructions. Don't forget any important details in
the current Instructions! Indicate the new instructions by "Instructions: ..."
VERY IMPORTANT: The string '{{old_thoughts'}} and the string '{{objective}}' have to appear in the new instructions as they will respectively be used by the AI model to store it's old thoughts, and the user's goal when it runs that instruction
"""
self.meta_prompt = PromptTemplate(
input_variables=[ 'old_instructions', 'objective', 'chat_history'],
template=meta_template
)
self.LLM = ChatOpenAI(temperature=0)
#get the chast history from the evauated states
| Meta-Tree-Of-Thoughts-main | meta_tree_of_thoughts/metaAgent.py |
import os
import openai
import time
import concurrent.futures
from abc import ABC, abstractmethod
class OpenAILanguageModel():
def __init__(self, api_key, strategy="cot", evaluation_strategy="value", api_model="", enable_ReAct_prompting=True):
if api_key == "" or api_key == None:
api_key = os.environ.get("OPENAI_API_KEY", "")
if api_key != "":
openai.api_key = api_key
else:
raise Exception("Please provide OpenAI API key")
if api_model == "" or api_model == None:
api_model = os.environ.get("OPENAI_API_MODEL", "")
if api_model != "":
self.api_model = api_model
else:
self.api_model = "text-davinci-003"
print(f'Using api_model {self.api_model}')
self.use_chat_api = 'gpt' in self.api_model
# reference : https://www.promptingguide.ai/techniques/react
self.strategy = strategy
self.evaluation_strategy = evaluation_strategy
def openai_api_call_handler(self, prompt, max_tokens, temperature):
while True:
try:
messages = [
{
"role": "user",
"content": prompt
}
]
response = openai.ChatCompletion.create(
model=self.api_model,
messages=messages,
max_tokens=max_tokens,
temperature=temperature,
)
text = response['choices'][0]['message']['content'].strip()
with open("openai.logs", 'a') as log_file:
log_file.write("\n" + "-----------" + '\n' +"Prompt : "+ prompt+"\n"+"Response:\n"+text)
return text
except openai.error.RateLimitError as e:
sleep_duratoin = os.environ.get("OPENAI_RATE_TIMEOUT", 30)
print(f'{str(e)}, sleep for {sleep_duratoin}s, set it by env OPENAI_RATE_TIMEOUT')
time.sleep(sleep_duratoin)
def generate_text(self, prompt,):
text = self.openai_api_call_handler(prompt, 1000, 0.5)
return text
# def solution(self, states, initial_prompt):
class OptimizedOpenAILanguageModel(OpenAILanguageModel):
def __init__(self, api_key, strategy="cot", evaluation_strategy="value", cache_enabled=True, api_base="", api_model="", enable_ReAct_prompting=False):
super().__init__(api_key, strategy, evaluation_strategy, api_base, api_model, enable_ReAct_prompting)
self.cache_enabled = cache_enabled
self.thought_cache = {}
self.state_evaluation_cache = {}
def parallel_generate_thoughts(self, states, k):
with concurrent.futures.ThreadPoolExecutor() as executor:
thoughts = list(executor.map(lambda state: self.generate_thoughts(state, k), states))
print(f"Parallel generated thoughts: {thoughts}")
return thoughts
def parallel_evaluate_states(self, states, inital_prompt):
with concurrent.futures.ThreadPoolExecutor() as executor:
state_values = list(executor.map(self.evaluate_states, states, inital_prompt))
print(f"Parallel evaluated state values: {state_values}")
return state_values
| Meta-Tree-Of-Thoughts-main | meta_tree_of_thoughts/openaiModel.py |
import torch
from starlight_vision import Starlight
# Example of usage:
model = Starlight()
texts = [
'a whale breaching from afar',
'young girl blowing out candles on her birthday cake',
'fireworks with blue and green sparkles',
'dust motes swirling in the morning sunshine on the windowsill'
]
videos = torch.randn(4, 3, 10, 32, 32).cuda()
model.train(videos, texts=texts)
sampled_videos = model.sample(texts=texts, video_frames=20)
| StarlightVision-master | example.py |
from starlight_vision.model import Starlight | StarlightVision-master | starlight_vision/__init__.py |
from starlight_vision import Unet3D, ElucidatedStarlight, StarlightTrainer
class Starlight:
def __init__(self,
dim=64,
dim_mults=(1, 2, 4, 8),
image_sizes=(16, 32),
random_crop_sizes=(None, 16),
temporal_downsample_factor=(2, 1),
num_sample_steps=10,
cond_drop_prob=0.1,
sigma_min=0.002,
sigma_max=(80, 160),
sigma_data=0.5,
rho=7,
P_mean=-1.2,
P_std=1.2,
S_churn=80,
S_tmin=0.05,
S_tmax=50,
S_noise=1.003):
# Initialize the Unet models
self.unet1 = Unet3D(dim=dim, dim_mults=dim_mults).cuda()
self.unet2 = Unet3D(dim=dim, dim_mults=dim_mults).cuda()
# Initialize the Starlight model
self.starlight = ElucidatedStarlight(
unets=(self.unet1, self.unet2),
image_sizes=image_sizes,
random_crop_sizes=random_crop_sizes,
temporal_downsample_factor=temporal_downsample_factor,
num_sample_steps=num_sample_steps,
cond_drop_prob=cond_drop_prob,
sigma_min=sigma_min,
sigma_max=sigma_max,
sigma_data=sigma_data,
rho=rho,
P_mean=P_mean,
P_std=P_std,
S_churn=S_churn,
S_tmin=S_tmin,
S_tmax=S_tmax,
S_noise=S_noise,
).cuda()
# Initialize the trainer
self.trainer = StarlightTrainer(self.starlight)
def train(self, videos, texts, unet_number=1, ignore_time=False):
self.trainer(videos, texts=texts, unet_number=unet_number, ignore_time=ignore_time)
self.trainer.update(unet_number=unet_number)
def sample(self, texts, video_frames=20):
return self.trainer.sample(texts=texts, video_frames=video_frames)
| StarlightVision-master | starlight_vision/model.py |
import os
from collections.abc import Iterable
from contextlib import contextmanager, nullcontext
from functools import partial, wraps
from math import ceil
import numpy as np
import pytorch_warmup as warmup
import torch
import torch.nn.functional as F
from accelerate import Accelerator, DistributedDataParallelKwargs, DistributedType
from ema_pytorch import EMA
from fsspec.core import url_to_fs
from fsspec.implementations.local import LocalFileSystem
from lion_pytorch import Lion
from packaging import version
from torch import nn
from torch.cuda.amp import GradScaler
from torch.optim import Adam
from torch.optim.lr_scheduler import CosineAnnealingLR, LambdaLR
from torch.utils.data import DataLoader, random_split
from starlight_vision.core.data import cycle
from starlight_vision.core.elucidated import ElucidatedStarlight
from starlight_vision.core.starlight.core import NullUnet
from starlight_vision.core.version import __version__
# helper functions
def exists(val):
return val is not None
def default(val, d):
if exists(val):
return val
return d() if callable(d) else d
def cast_tuple(val, length = 1):
if isinstance(val, list):
val = tuple(val)
return val if isinstance(val, tuple) else ((val,) * length)
def find_first(fn, arr):
for ind, el in enumerate(arr):
if fn(el):
return ind
return -1
def pick_and_pop(keys, d):
values = list(map(lambda key: d.pop(key), keys))
return dict(zip(keys, values))
def group_dict_by_key(cond, d):
return_val = [dict(),dict()]
for key in d.keys():
match = bool(cond(key))
ind = int(not match)
return_val[ind][key] = d[key]
return (*return_val,)
def string_begins_with(prefix, str):
return str.startswith(prefix)
def group_by_key_prefix(prefix, d):
return group_dict_by_key(partial(string_begins_with, prefix), d)
def groupby_prefix_and_trim(prefix, d):
kwargs_with_prefix, kwargs = group_dict_by_key(partial(string_begins_with, prefix), d)
kwargs_without_prefix = dict(map(lambda x: (x[0][len(prefix):], x[1]), tuple(kwargs_with_prefix.items())))
return kwargs_without_prefix, kwargs
def num_to_groups(num, divisor):
groups = num // divisor
remainder = num % divisor
arr = [divisor] * groups
if remainder > 0:
arr.append(remainder)
return arr
# url to fs, bucket, path - for checkpointing to cloud
def url_to_bucket(url, prefix):
if '://' not in url:
return url
_, suffix = url.split('://')
if prefix in {'gs', 's3'}:
return suffix.split('/')[0]
else:
raise ValueError(f'storage type prefix "{prefix}" is not supported yet')
# decorators
def eval_decorator(fn):
def inner(model, *args, **kwargs):
was_training = model.training
model.eval()
out = fn(model, *args, **kwargs)
model.train(was_training)
return out
return inner
def cast_torch_tensor(fn, cast_fp16 = False):
@wraps(fn)
def inner(model, *args, **kwargs):
device = kwargs.pop('_device', model.device)
cast_device = kwargs.pop('_cast_device', True)
should_cast_fp16 = cast_fp16 and model.cast_half_at_training
kwargs_keys = kwargs.keys()
all_args = (*args, *kwargs.values())
split_kwargs_index = len(all_args) - len(kwargs_keys)
all_args = tuple(map(lambda t: torch.from_numpy(t) if exists(t) and isinstance(t, np.ndarray) else t, all_args))
if cast_device:
all_args = tuple(map(lambda t: t.to(device) if exists(t) and isinstance(t, torch.Tensor) else t, all_args))
if should_cast_fp16:
all_args = tuple(map(lambda t: t.half() if exists(t) and isinstance(t, torch.Tensor) and t.dtype != torch.bool else t, all_args))
args, kwargs_values = all_args[:split_kwargs_index], all_args[split_kwargs_index:]
kwargs = dict(tuple(zip(kwargs_keys, kwargs_values)))
out = fn(model, *args, **kwargs)
return out
return inner
# gradient accumulation functions
def split_iterable(it, split_size):
accum = []
for ind in range(ceil(len(it) / split_size)):
start_index = ind * split_size
accum.append(it[start_index: (start_index + split_size)])
return accum
def split(t, split_size = None):
if not exists(split_size):
return t
if isinstance(t, torch.Tensor):
return t.split(split_size, dim = 0)
if isinstance(t, Iterable):
return split_iterable(t, split_size)
return TypeError
def find_first(cond, arr):
for el in arr:
if cond(el):
return el
return None
def split_args_and_kwargs(*args, split_size = None, **kwargs):
all_args = (*args, *kwargs.values())
len_all_args = len(all_args)
first_tensor = find_first(lambda t: isinstance(t, torch.Tensor), all_args)
assert exists(first_tensor)
batch_size = len(first_tensor)
split_size = default(split_size, batch_size)
num_chunks = ceil(batch_size / split_size)
dict_len = len(kwargs)
dict_keys = kwargs.keys()
split_kwargs_index = len_all_args - dict_len
split_all_args = [split(arg, split_size = split_size) if exists(arg) and isinstance(arg, (torch.Tensor, Iterable)) else ((arg,) * num_chunks) for arg in all_args]
chunk_sizes = num_to_groups(batch_size, split_size)
for (chunk_size, *chunked_all_args) in tuple(zip(chunk_sizes, *split_all_args)):
chunked_args, chunked_kwargs_values = chunked_all_args[:split_kwargs_index], chunked_all_args[split_kwargs_index:]
chunked_kwargs = dict(tuple(zip(dict_keys, chunked_kwargs_values)))
chunk_size_frac = chunk_size / batch_size
yield chunk_size_frac, (chunked_args, chunked_kwargs)
# starlight trainer
def starlight_sample_in_chunks(fn):
@wraps(fn)
def inner(self, *args, max_batch_size = None, **kwargs):
if not exists(max_batch_size):
return fn(self, *args, **kwargs)
if self.starlight.unconditional:
batch_size = kwargs.get('batch_size')
batch_sizes = num_to_groups(batch_size, max_batch_size)
outputs = [fn(self, *args, **{**kwargs, 'batch_size': sub_batch_size}) for sub_batch_size in batch_sizes]
else:
outputs = [fn(self, *chunked_args, **chunked_kwargs) for _, (chunked_args, chunked_kwargs) in split_args_and_kwargs(*args, split_size = max_batch_size, **kwargs)]
if isinstance(outputs[0], torch.Tensor):
return torch.cat(outputs, dim = 0)
return list(map(lambda t: torch.cat(t, dim = 0), list(zip(*outputs))))
return inner
def restore_parts(state_dict_target, state_dict_from):
for name, param in state_dict_from.items():
if name not in state_dict_target:
continue
if param.size() == state_dict_target[name].size():
state_dict_target[name].copy_(param)
else:
print(f"layer {name}({param.size()} different than target: {state_dict_target[name].size()}")
return state_dict_target
class StarlightTrainer(nn.Module):
locked = False
def __init__(
self,
starlight = None,
starlight_checkpoint_path = None,
use_ema = True,
lr = 1e-4,
eps = 1e-8,
beta1 = 0.9,
beta2 = 0.99,
max_grad_norm = None,
group_wd_params = True,
warmup_steps = None,
cosine_decay_max_steps = None,
only_train_unet_number = None,
fp16 = False,
precision = None,
split_batches = True,
dl_tuple_output_keywords_names = ('images', 'text_embeds', 'text_masks', 'cond_images'),
verbose = True,
split_valid_fraction = 0.025,
split_valid_from_train = False,
split_random_seed = 42,
checkpoint_path = None,
checkpoint_every = None,
checkpoint_fs = None,
fs_kwargs: dict = None,
max_checkpoints_keep = 20,
use_lion = False,
**kwargs
):
super().__init__()
assert not StarlightTrainer.locked, 'StarlightTrainer can only be initialized once per process - for the sake of distributed training, you will now have to create a separate script to train each unet (or a script that accepts unet number as an argument)'
assert exists(starlight) ^ exists(starlight_checkpoint_path), 'either starlight instance is passed into the trainer, or a checkpoint path that contains the starlight config'
# determine filesystem, using fsspec, for saving to local filesystem or cloud
self.fs = checkpoint_fs
if not exists(self.fs):
fs_kwargs = default(fs_kwargs, {})
self.fs, _ = url_to_fs(default(checkpoint_path, './'), **fs_kwargs)
assert isinstance(starlight, (starlight, ElucidatedStarlight))
ema_kwargs, kwargs = groupby_prefix_and_trim('ema_', kwargs)
# elucidated or not
self.is_elucidated = isinstance(starlight, ElucidatedStarlight)
# create accelerator instance
accelerate_kwargs, kwargs = groupby_prefix_and_trim('accelerate_', kwargs)
assert not (fp16 and exists(precision)), 'either set fp16 = True or forward the precision ("fp16", "bf16") to Accelerator'
accelerator_mixed_precision = default(precision, 'fp16' if fp16 else 'no')
self.accelerator = Accelerator(**{
'split_batches': split_batches,
'mixed_precision': accelerator_mixed_precision,
'kwargs_handlers': [DistributedDataParallelKwargs(find_unused_parameters = True)]
, **accelerate_kwargs})
StarlightTrainer.locked = self.is_distributed
# cast data to fp16 at training time if needed
self.cast_half_at_training = accelerator_mixed_precision == 'fp16'
# grad scaler must be managed outside of accelerator
grad_scaler_enabled = fp16
# starlight, unets and ema unets
self.starlight = starlight
self.num_unets = len(self.starlight.unets)
self.use_ema = use_ema and self.is_main
self.ema_unets = nn.ModuleList([])
# keep track of what unet is being trained on
# only going to allow 1 unet training at a time
self.ema_unet_being_trained_index = -1 # keeps track of which ema unet is being trained on
# data related functions
self.train_dl_iter = None
self.train_dl = None
self.valid_dl_iter = None
self.valid_dl = None
self.dl_tuple_output_keywords_names = dl_tuple_output_keywords_names
# auto splitting validation from training, if dataset is passed in
self.split_valid_from_train = split_valid_from_train
assert 0 <= split_valid_fraction <= 1, 'split valid fraction must be between 0 and 1'
self.split_valid_fraction = split_valid_fraction
self.split_random_seed = split_random_seed
# be able to finely customize learning rate, weight decay
# per unet
lr, eps, warmup_steps, cosine_decay_max_steps = map(partial(cast_tuple, length = self.num_unets), (lr, eps, warmup_steps, cosine_decay_max_steps))
for ind, (unet, unet_lr, unet_eps, unet_warmup_steps, unet_cosine_decay_max_steps) in enumerate(zip(self.starlight.unets, lr, eps, warmup_steps, cosine_decay_max_steps)):
if use_lion:
optimizer = Lion(
unet.parameters(),
lr = unet_lr,
betas = (beta1, beta2)
)
else:
optimizer = Adam(
unet.parameters(),
lr = unet_lr,
eps = unet_eps,
betas = (beta1, beta2),
**kwargs
)
if self.use_ema:
self.ema_unets.append(EMA(unet, **ema_kwargs))
scaler = GradScaler(enabled = grad_scaler_enabled)
scheduler = warmup_scheduler = None
if exists(unet_cosine_decay_max_steps):
scheduler = CosineAnnealingLR(optimizer, T_max = unet_cosine_decay_max_steps)
if exists(unet_warmup_steps):
warmup_scheduler = warmup.LinearWarmup(optimizer, warmup_period = unet_warmup_steps)
if not exists(scheduler):
scheduler = LambdaLR(optimizer, lr_lambda = lambda step: 1.0)
# set on object
setattr(self, f'optim{ind}', optimizer) # cannot use pytorch ModuleList for some reason with optimizers
setattr(self, f'scaler{ind}', scaler)
setattr(self, f'scheduler{ind}', scheduler)
setattr(self, f'warmup{ind}', warmup_scheduler)
# gradient clipping if needed
self.max_grad_norm = max_grad_norm
# step tracker and misc
self.register_buffer('steps', torch.tensor([0] * self.num_unets))
self.verbose = verbose
# automatic set devices based on what accelerator decided
self.starlight.to(self.device)
self.to(self.device)
# checkpointing
assert not (exists(checkpoint_path) ^ exists(checkpoint_every))
self.checkpoint_path = checkpoint_path
self.checkpoint_every = checkpoint_every
self.max_checkpoints_keep = max_checkpoints_keep
self.can_checkpoint = self.is_local_main if isinstance(checkpoint_fs, LocalFileSystem) else self.is_main
if exists(checkpoint_path) and self.can_checkpoint:
bucket = url_to_bucket(checkpoint_path)
if not self.fs.exists(bucket):
self.fs.mkdir(bucket)
self.load_from_checkpoint_folder()
# only allowing training for unet
self.only_train_unet_number = only_train_unet_number
self.prepared = False
def prepare(self):
assert not self.prepared, 'The trainer is allready prepared'
self.validate_and_set_unet_being_trained(self.only_train_unet_number)
self.prepared = True
# computed values
@property
def device(self):
return self.accelerator.device
@property
def is_distributed(self):
return not (self.accelerator.distributed_type == DistributedType.NO and self.accelerator.num_processes == 1)
@property
def is_main(self):
return self.accelerator.is_main_process
@property
def is_local_main(self):
return self.accelerator.is_local_main_process
@property
def unwrapped_unet(self):
return self.accelerator.unwrap_model(self.unet_being_trained)
# optimizer helper functions
def get_lr(self, unet_number):
self.validate_unet_number(unet_number)
unet_index = unet_number - 1
optim = getattr(self, f'optim{unet_index}')
return optim.param_groups[0]['lr']
# function for allowing only one unet from being trained at a time
def validate_and_set_unet_being_trained(self, unet_number = None):
if exists(unet_number):
self.validate_unet_number(unet_number)
assert not exists(self.only_train_unet_number) or self.only_train_unet_number == unet_number, 'you cannot only train on one unet at a time. you will need to save the trainer into a checkpoint, and resume training on a new unet'
self.only_train_unet_number = unet_number
self.starlight.only_train_unet_number = unet_number
if not exists(unet_number):
return
self.wrap_unet(unet_number)
def wrap_unet(self, unet_number):
if hasattr(self, 'one_unet_wrapped'):
return
unet = self.starlight.get_unet(unet_number)
unet_index = unet_number - 1
optimizer = getattr(self, f'optim{unet_index}')
scheduler = getattr(self, f'scheduler{unet_index}')
if self.train_dl:
self.unet_being_trained, self.train_dl, optimizer = self.accelerator.prepare(unet, self.train_dl, optimizer)
else:
self.unet_being_trained, optimizer = self.accelerator.prepare(unet, optimizer)
if exists(scheduler):
scheduler = self.accelerator.prepare(scheduler)
setattr(self, f'optim{unet_index}', optimizer)
setattr(self, f'scheduler{unet_index}', scheduler)
self.one_unet_wrapped = True
# hacking accelerator due to not having separate gradscaler per optimizer
def set_accelerator_scaler(self, unet_number):
unet_number = self.validate_unet_number(unet_number)
scaler = getattr(self, f'scaler{unet_number - 1}')
self.accelerator.scaler = scaler
for optimizer in self.accelerator._optimizers:
optimizer.scaler = scaler
# helper print
def print(self, msg):
if not self.is_main:
return
if not self.verbose:
return
return self.accelerator.print(msg)
# validating the unet number
def validate_unet_number(self, unet_number = None):
if self.num_unets == 1:
unet_number = default(unet_number, 1)
assert 0 < unet_number <= self.num_unets, f'unet number should be in between 1 and {self.num_unets}'
return unet_number
# number of training steps taken
def num_steps_taken(self, unet_number = None):
if self.num_unets == 1:
unet_number = default(unet_number, 1)
return self.steps[unet_number - 1].item()
def print_untrained_unets(self):
print_final_error = False
for ind, (steps, unet) in enumerate(zip(self.steps.tolist(), self.starlight.unets)):
if steps > 0 or isinstance(unet, NullUnet):
continue
self.print(f'unet {ind + 1} has not been trained')
print_final_error = True
if print_final_error:
self.print('when sampling, you can pass stop_at_unet_number to stop early in the cascade, so it does not try to generate with untrained unets')
# data related functions
def add_train_dataloader(self, dl = None):
if not exists(dl):
return
assert not exists(self.train_dl), 'training dataloader was already added'
assert not self.prepared, 'You need to add the dataset before preperation'
self.train_dl = dl
def add_valid_dataloader(self, dl):
if not exists(dl):
return
assert not exists(self.valid_dl), 'validation dataloader was already added'
assert not self.prepared, 'You need to add the dataset before preperation'
self.valid_dl = dl
def add_train_dataset(self, ds = None, *, batch_size, **dl_kwargs):
if not exists(ds):
return
assert not exists(self.train_dl), 'training dataloader was already added'
valid_ds = None
if self.split_valid_from_train:
train_size = int((1 - self.split_valid_fraction) * len(ds))
valid_size = len(ds) - train_size
ds, valid_ds = random_split(ds, [train_size, valid_size], generator = torch.Generator().manual_seed(self.split_random_seed))
self.print(f'training with dataset of {len(ds)} samples and validating with randomly splitted {len(valid_ds)} samples')
dl = DataLoader(ds, batch_size = batch_size, **dl_kwargs)
self.add_train_dataloader(dl)
if not self.split_valid_from_train:
return
self.add_valid_dataset(valid_ds, batch_size = batch_size, **dl_kwargs)
def add_valid_dataset(self, ds, *, batch_size, **dl_kwargs):
if not exists(ds):
return
assert not exists(self.valid_dl), 'validation dataloader was already added'
dl = DataLoader(ds, batch_size = batch_size, **dl_kwargs)
self.add_valid_dataloader(dl)
def create_train_iter(self):
assert exists(self.train_dl), 'training dataloader has not been registered with the trainer yet'
if exists(self.train_dl_iter):
return
self.train_dl_iter = cycle(self.train_dl)
def create_valid_iter(self):
assert exists(self.valid_dl), 'validation dataloader has not been registered with the trainer yet'
if exists(self.valid_dl_iter):
return
self.valid_dl_iter = cycle(self.valid_dl)
def train_step(self, *, unet_number = None, **kwargs):
if not self.prepared:
self.prepare()
self.create_train_iter()
kwargs = {'unet_number': unet_number, **kwargs}
loss = self.step_with_dl_iter(self.train_dl_iter, **kwargs)
self.update(unet_number = unet_number)
return loss
@torch.no_grad()
@eval_decorator
def valid_step(self, **kwargs):
if not self.prepared:
self.prepare()
self.create_valid_iter()
context = self.use_ema_unets if kwargs.pop('use_ema_unets', False) else nullcontext
with context():
loss = self.step_with_dl_iter(self.valid_dl_iter, **kwargs)
return loss
def step_with_dl_iter(self, dl_iter, **kwargs):
dl_tuple_output = cast_tuple(next(dl_iter))
model_input = dict(list(zip(self.dl_tuple_output_keywords_names, dl_tuple_output)))
loss = self.forward(**{**kwargs, **model_input})
return loss
# checkpointing functions
@property
def all_checkpoints_sorted(self):
glob_pattern = os.path.join(self.checkpoint_path, '*.pt')
checkpoints = self.fs.glob(glob_pattern)
sorted_checkpoints = sorted(checkpoints, key = lambda x: int(str(x).split('.')[-2]), reverse = True)
return sorted_checkpoints
def load_from_checkpoint_folder(self, last_total_steps = -1):
if last_total_steps != -1:
filepath = os.path.join(self.checkpoint_path, f'checkpoint.{last_total_steps}.pt')
self.load(filepath)
return
sorted_checkpoints = self.all_checkpoints_sorted
if len(sorted_checkpoints) == 0:
self.print(f'no checkpoints found to load from at {self.checkpoint_path}')
return
last_checkpoint = sorted_checkpoints[0]
self.load(last_checkpoint)
def save_to_checkpoint_folder(self):
self.accelerator.wait_for_everyone()
if not self.can_checkpoint:
return
total_steps = int(self.steps.sum().item())
filepath = os.path.join(self.checkpoint_path, f'checkpoint.{total_steps}.pt')
self.save(filepath)
if self.max_checkpoints_keep <= 0:
return
sorted_checkpoints = self.all_checkpoints_sorted
checkpoints_to_discard = sorted_checkpoints[self.max_checkpoints_keep:]
for checkpoint in checkpoints_to_discard:
self.fs.rm(checkpoint)
# saving and loading functions
def save(
self,
path,
overwrite = True,
without_optim_and_sched = False,
**kwargs
):
self.accelerator.wait_for_everyone()
if not self.can_checkpoint:
return
fs = self.fs
assert not (fs.exists(path) and not overwrite)
self.reset_ema_unets_all_one_device()
save_obj = dict(
model = self.starlight.state_dict(),
version = __version__,
steps = self.steps.cpu(),
**kwargs
)
save_optim_and_sched_iter = range(0, self.num_unets) if not without_optim_and_sched else tuple()
for ind in save_optim_and_sched_iter:
scaler_key = f'scaler{ind}'
optimizer_key = f'optim{ind}'
scheduler_key = f'scheduler{ind}'
warmup_scheduler_key = f'warmup{ind}'
scaler = getattr(self, scaler_key)
optimizer = getattr(self, optimizer_key)
scheduler = getattr(self, scheduler_key)
warmup_scheduler = getattr(self, warmup_scheduler_key)
if exists(scheduler):
save_obj = {**save_obj, scheduler_key: scheduler.state_dict()}
if exists(warmup_scheduler):
save_obj = {**save_obj, warmup_scheduler_key: warmup_scheduler.state_dict()}
save_obj = {**save_obj, scaler_key: scaler.state_dict(), optimizer_key: optimizer.state_dict()}
if self.use_ema:
save_obj = {**save_obj, 'ema': self.ema_unets.state_dict()}
# determine if starlight config is available
if hasattr(self.starlight, '_config'):
self.print(f'this checkpoint is commandable from the CLI - "starlight --model {str(path)} \"<prompt>\""')
save_obj = {
**save_obj,
'starlight_type': 'elucidated' if self.is_elucidated else 'original',
'starlight_params': self.starlight._config
}
#save to path
with fs.open(path, 'wb') as f:
torch.save(save_obj, f)
self.print(f'checkpoint saved to {path}')
def load(self, path, only_model = False, strict = True, noop_if_not_exist = False):
fs = self.fs
if noop_if_not_exist and not fs.exists(path):
self.print(f'trainer checkpoint not found at {str(path)}')
return
assert fs.exists(path), f'{path} does not exist'
self.reset_ema_unets_all_one_device()
# to avoid extra GPU memory usage in main process when using Accelerate
with fs.open(path) as f:
loaded_obj = torch.load(f, map_location='cpu')
if version.parse(__version__) != version.parse(loaded_obj['version']):
self.print(f'loading saved starlight at version {loaded_obj["version"]}, but current package version is {__version__}')
try:
self.starlight.load_state_dict(loaded_obj['model'], strict = strict)
except RuntimeError:
print("Failed loading state dict. Trying partial load")
self.starlight.load_state_dict(restore_parts(self.starlight.state_dict(),
loaded_obj['model']))
if only_model:
return loaded_obj
self.steps.copy_(loaded_obj['steps'])
for ind in range(0, self.num_unets):
scaler_key = f'scaler{ind}'
optimizer_key = f'optim{ind}'
scheduler_key = f'scheduler{ind}'
warmup_scheduler_key = f'warmup{ind}'
scaler = getattr(self, scaler_key)
optimizer = getattr(self, optimizer_key)
scheduler = getattr(self, scheduler_key)
warmup_scheduler = getattr(self, warmup_scheduler_key)
if exists(scheduler) and scheduler_key in loaded_obj:
scheduler.load_state_dict(loaded_obj[scheduler_key])
if exists(warmup_scheduler) and warmup_scheduler_key in loaded_obj:
warmup_scheduler.load_state_dict(loaded_obj[warmup_scheduler_key])
if exists(optimizer):
try:
optimizer.load_state_dict(loaded_obj[optimizer_key])
scaler.load_state_dict(loaded_obj[scaler_key])
except:
self.print('could not load optimizer and scaler, possibly because you have turned on mixed precision training since the last run. resuming with new optimizer and scalers')
if self.use_ema:
assert 'ema' in loaded_obj
try:
self.ema_unets.load_state_dict(loaded_obj['ema'], strict = strict)
except RuntimeError:
print("Failed loading state dict. Trying partial load")
self.ema_unets.load_state_dict(restore_parts(self.ema_unets.state_dict(),
loaded_obj['ema']))
self.print(f'checkpoint loaded from {path}')
return loaded_obj
# managing ema unets and their devices
@property
def unets(self):
return nn.ModuleList([ema.ema_model for ema in self.ema_unets])
def get_ema_unet(self, unet_number = None):
if not self.use_ema:
return
unet_number = self.validate_unet_number(unet_number)
index = unet_number - 1
if isinstance(self.unets, nn.ModuleList):
unets_list = [unet for unet in self.ema_unets]
delattr(self, 'ema_unets')
self.ema_unets = unets_list
if index != self.ema_unet_being_trained_index:
for unet_index, unet in enumerate(self.ema_unets):
unet.to(self.device if unet_index == index else 'cpu')
self.ema_unet_being_trained_index = index
return self.ema_unets[index]
def reset_ema_unets_all_one_device(self, device = None):
if not self.use_ema:
return
device = default(device, self.device)
self.ema_unets = nn.ModuleList([*self.ema_unets])
self.ema_unets.to(device)
self.ema_unet_being_trained_index = -1
@torch.no_grad()
@contextmanager
def use_ema_unets(self):
if not self.use_ema:
output = yield
return output
self.reset_ema_unets_all_one_device()
self.starlight.reset_unets_all_one_device()
self.unets.eval()
trainable_unets = self.starlight.unets
self.starlight.unets = self.unets # swap in exponential moving averaged unets for sampling
output = yield
self.starlight.unets = trainable_unets # restore original training unets
# cast the ema_model unets back to original device
for ema in self.ema_unets:
ema.restore_ema_model_device()
return output
def print_unet_devices(self):
self.print('unet devices:')
for i, unet in enumerate(self.starlight.unets):
device = next(unet.parameters()).device
self.print(f'\tunet {i}: {device}')
if not self.use_ema:
return
self.print('\nema unet devices:')
for i, ema_unet in enumerate(self.ema_unets):
device = next(ema_unet.parameters()).device
self.print(f'\tema unet {i}: {device}')
# overriding state dict functions
def state_dict(self, *args, **kwargs):
self.reset_ema_unets_all_one_device()
return super().state_dict(*args, **kwargs)
def load_state_dict(self, *args, **kwargs):
self.reset_ema_unets_all_one_device()
return super().load_state_dict(*args, **kwargs)
# encoding text functions
def encode_text(self, text, **kwargs):
return self.starlight.encode_text(text, **kwargs)
# forwarding functions and gradient step updates
def update(self, unet_number = None):
unet_number = self.validate_unet_number(unet_number)
self.validate_and_set_unet_being_trained(unet_number)
self.set_accelerator_scaler(unet_number)
index = unet_number - 1
unet = self.unet_being_trained
optimizer = getattr(self, f'optim{index}')
getattr(self, f'scaler{index}')
scheduler = getattr(self, f'scheduler{index}')
warmup_scheduler = getattr(self, f'warmup{index}')
# set the grad scaler on the accelerator, since we are managing one per u-net
if exists(self.max_grad_norm):
self.accelerator.clip_grad_norm_(unet.parameters(), self.max_grad_norm)
optimizer.step()
optimizer.zero_grad()
if self.use_ema:
ema_unet = self.get_ema_unet(unet_number)
ema_unet.update()
# scheduler, if needed
maybe_warmup_context = nullcontext() if not exists(warmup_scheduler) else warmup_scheduler.dampening()
with maybe_warmup_context:
if exists(scheduler) and not self.accelerator.optimizer_step_was_skipped: # recommended in the docs
scheduler.step()
self.steps += F.one_hot(torch.tensor(unet_number - 1, device = self.steps.device), num_classes = len(self.steps))
if not exists(self.checkpoint_path):
return
total_steps = int(self.steps.sum().item())
if total_steps % self.checkpoint_every:
return
self.save_to_checkpoint_folder()
@torch.no_grad()
@cast_torch_tensor
@starlight_sample_in_chunks
def sample(self, *args, **kwargs):
context = nullcontext if kwargs.pop('use_non_ema', False) else self.use_ema_unets
self.print_untrained_unets()
if not self.is_main:
kwargs['use_tqdm'] = False
with context():
output = self.starlight.sample(*args, device = self.device, **kwargs)
return output
@partial(cast_torch_tensor, cast_fp16 = True)
def forward(
self,
*args,
unet_number = None,
max_batch_size = None,
**kwargs
):
unet_number = self.validate_unet_number(unet_number)
self.validate_and_set_unet_being_trained(unet_number)
self.set_accelerator_scaler(unet_number)
assert not exists(self.only_train_unet_number) or self.only_train_unet_number == unet_number, f'you can only train unet #{self.only_train_unet_number}'
total_loss = 0.
for chunk_size_frac, (chunked_args, chunked_kwargs) in split_args_and_kwargs(*args, split_size = max_batch_size, **kwargs):
with self.accelerator.autocast():
loss = self.starlight(*chunked_args, unet = self.unet_being_trained, unet_number = unet_number, **chunked_kwargs)
loss = loss * chunk_size_frac
total_loss += loss.item()
if self.training:
self.accelerator.backward(loss)
return total_loss | StarlightVision-master | starlight_vision/trainer.py |
from functools import wraps
from packaging import version
from collections import namedtuple
import torch
from torch import nn, einsum
import torch.nn.functional as F
from einops import rearrange
# constants
AttentionConfig = namedtuple('AttentionConfig', ['enable_flash', 'enable_math', 'enable_mem_efficient'])
# helpers
def exists(val):
return val is not None
def once(fn):
called = False
@wraps(fn)
def inner(x):
nonlocal called
if called:
return
called = True
return fn(x)
return inner
print_once = once(print)
# main class
class Attend(nn.Module):
def __init__(
self,
dropout = 0.,
flash = False
):
super().__init__()
self.dropout = dropout
self.attn_dropout = nn.Dropout(dropout)
self.flash = flash
assert not (flash and version.parse(torch.__version__) < version.parse('2.0.0')), 'in order to use flash attention, you must be using pytorch 2.0 or above'
# determine efficient attention configs for cuda and cpu
self.cpu_config = AttentionConfig(True, True, True)
self.cuda_config = None
if not torch.cuda.is_available() or not flash:
return
device_properties = torch.cuda.get_device_properties(torch.device('cuda'))
if device_properties.major == 8 and device_properties.minor == 0:
print_once('A100 GPU detected, using flash attention if input tensor is on cuda')
self.cuda_config = AttentionConfig(True, False, False)
else:
print_once('Non-A100 GPU detected, using math or mem efficient attention if input tensor is on cuda')
self.cuda_config = AttentionConfig(False, True, True)
def flash_attn(self, q, k, v):
_, heads, q_len, _, k_len, is_cuda, device = *q.shape, k.shape[-2], q.is_cuda, q.device
q, k, v = map(lambda t: t.contiguous(), (q, k, v))
# Check if there is a compatible device for flash attention
config = self.cuda_config if is_cuda else self.cpu_config
# pytorch 2.0 flash attn: q, k, v, mask, dropout, causal, softmax_scale
with torch.backends.cuda.sdp_kernel(**config._asdict()):
out = F.scaled_dot_product_attention(
q, k, v,
dropout_p = self.dropout if self.training else 0.
)
return out
def forward(self, q, k, v):
"""
einstein notation
b - batch
h - heads
n, i, j - sequence length (base sequence length, source, target)
d - feature dimension
"""
q_len, k_len, device = q.shape[-2], k.shape[-2], q.device
if self.flash:
return self.flash_attn(q, k, v)
scale = q.shape[-1] ** -0.5
# similarity
sim = einsum(f"b h i d, b h j d -> b h i j", q, k) * scale
# attention
attn = sim.softmax(dim = -1)
attn = self.attn_dropout(attn)
# aggregate values
out = einsum(f"b h i j, b h j d -> b h i d", attn, v)
return out | StarlightVision-master | starlight_vision/core/attention.py |
from typing import List
import torch
import transformers
from einops import rearrange
from transformers import T5Config, T5EncoderModel, T5Tokenizer
transformers.logging.set_verbosity_error()
def exists(val):
return val is not None
def default(val, d):
if exists(val):
return val
return d() if callable(d) else d
# config
MAX_LENGTH = 256
DEFAULT_T5_NAME = 'google/t5-v1_1-base'
T5_CONFIGS = {}
# singleton globals
def get_tokenizer(name):
tokenizer = T5Tokenizer.from_pretrained(name, model_max_length=MAX_LENGTH)
return tokenizer
def get_model(name):
model = T5EncoderModel.from_pretrained(name)
return model
def get_model_and_tokenizer(name):
global T5_CONFIGS
if name not in T5_CONFIGS:
T5_CONFIGS[name] = dict()
if "model" not in T5_CONFIGS[name]:
T5_CONFIGS[name]["model"] = get_model(name)
if "tokenizer" not in T5_CONFIGS[name]:
T5_CONFIGS[name]["tokenizer"] = get_tokenizer(name)
return T5_CONFIGS[name]['model'], T5_CONFIGS[name]['tokenizer']
def get_encoded_dim(name):
if name not in T5_CONFIGS:
# avoids loading the model if we only want to get the dim
config = T5Config.from_pretrained(name)
T5_CONFIGS[name] = dict(config=config)
elif "config" in T5_CONFIGS[name]:
config = T5_CONFIGS[name]["config"]
elif "model" in T5_CONFIGS[name]:
config = T5_CONFIGS[name]["model"].config
else:
assert False
return config.d_model
# encoding text
def t5_tokenize(
texts: List[str],
name = DEFAULT_T5_NAME
):
t5, tokenizer = get_model_and_tokenizer(name)
if torch.cuda.is_available():
t5 = t5.cuda()
device = next(t5.parameters()).device
encoded = tokenizer.batch_encode_plus(
texts,
return_tensors = "pt",
padding = 'longest',
max_length = MAX_LENGTH,
truncation = True
)
input_ids = encoded.input_ids.to(device)
attn_mask = encoded.attention_mask.to(device)
return input_ids, attn_mask
def t5_encode_tokenized_text(
token_ids,
attn_mask = None,
pad_id = None,
name = DEFAULT_T5_NAME
):
assert exists(attn_mask) or exists(pad_id)
t5, _ = get_model_and_tokenizer(name)
attn_mask = default(attn_mask, lambda: (token_ids != pad_id).long())
t5.eval()
with torch.no_grad():
output = t5(input_ids = token_ids, attention_mask = attn_mask)
encoded_text = output.last_hidden_state.detach()
attn_mask = attn_mask.bool()
encoded_text = encoded_text.masked_fill(~rearrange(attn_mask, '... -> ... 1'), 0.) # just force all embeddings that is padding to be equal to 0.
return encoded_text
def t5_encode_text(
texts: List[str],
name = DEFAULT_T5_NAME,
return_attn_mask = False
):
token_ids, attn_mask = t5_tokenize(texts, name = name)
encoded_text = t5_encode_tokenized_text(token_ids, attn_mask = attn_mask, name = name)
if return_attn_mask:
attn_mask = attn_mask.bool()
return encoded_text, attn_mask
return encoded_text | StarlightVision-master | starlight_vision/core/t5.py |
from math import sqrt
from random import random
from functools import partial
from contextlib import contextmanager, nullcontext
from typing import List, Union
from collections import namedtuple
from tqdm.auto import tqdm
import torch
import torch.nn.functional as F
from torch import nn, einsum
from torch.cuda.amp import autocast
from torch.nn.parallel import DistributedDataParallel
import torchvision.transforms as T
import kornia.augmentation as K
from einops import rearrange, repeat, reduce
from starlight_vision.core.gen2 import (
GaussianDiffusionContinuousTimes,
Unet,
NullUnet,
first,
exists,
identity,
maybe,
default,
cast_tuple,
cast_uint8_images_to_float,
eval_decorator,
pad_tuple_to_length,
resize_image_to,
calc_all_frame_dims,
safe_get_tuple_index,
right_pad_dims_to,
module_device,
normalize_neg_one_to_one,
unnormalize_zero_to_one,
compact,
maybe_transform_dict_key
)
from starlight_vision.core.gen2_video import (
Unet3D,
resize_video_to,
scale_video_time
)
from starlight_vision.core.t5 import t5_encode_text, get_encoded_dim, DEFAULT_T5_NAME
# constants
Hparams_fields = [
'num_sample_steps',
'sigma_min',
'sigma_max',
'sigma_data',
'rho',
'P_mean',
'P_std',
'S_churn',
'S_tmin',
'S_tmax',
'S_noise'
]
Hparams = namedtuple('Hparams', Hparams_fields)
# helper functions
def log(t, eps = 1e-20):
return torch.log(t.clamp(min = eps))
# main class
class ElucidatedStarlight(nn.Module):
def __init__(
self,
unets,
*,
image_sizes, # for cascading ddpm, image size at each stage
text_encoder_name = DEFAULT_T5_NAME,
text_embed_dim = None,
channels = 3,
cond_drop_prob = 0.1,
random_crop_sizes = None,
resize_mode = 'nearest',
temporal_downsample_factor = 1,
resize_cond_video_frames = True,
lowres_sample_noise_level = 0.2, # in the paper, they present a new trick where they noise the lowres conditioning image, and at sample time, fix it to a certain level (0.1 or 0.3) - the unets are also made to be conditioned on this noise level
per_sample_random_aug_noise_level = False, # unclear when conditioning on augmentation noise level, whether each batch element receives a random aug noise value - turning off due to @marunine's find
condition_on_text = True,
auto_normalize_img = True, # whether to take care of normalizing the image from [0, 1] to [-1, 1] and back automatically - you can turn this off if you want to pass in the [-1, 1] ranged image yourself from the dataloader
dynamic_thresholding = True,
dynamic_thresholding_percentile = 0.95, # unsure what this was based on perusal of paper
only_train_unet_number = None,
lowres_noise_schedule = 'linear',
num_sample_steps = 32, # number of sampling steps
sigma_min = 0.002, # min noise level
sigma_max = 80, # max noise level
sigma_data = 0.5, # standard deviation of data distribution
rho = 7, # controls the sampling schedule
P_mean = -1.2, # mean of log-normal distribution from which noise is drawn for training
P_std = 1.2, # standard deviation of log-normal distribution from which noise is drawn for training
S_churn = 80, # parameters for stochastic sampling - depends on dataset, Table 5 in apper
S_tmin = 0.05,
S_tmax = 50,
S_noise = 1.003,
):
super().__init__()
self.only_train_unet_number = only_train_unet_number
# conditioning hparams
self.condition_on_text = condition_on_text
self.unconditional = not condition_on_text
# channels
self.channels = channels
# automatically take care of ensuring that first unet is unconditional
# while the rest of the unets are conditioned on the low resolution image produced by previous unet
unets = cast_tuple(unets)
num_unets = len(unets)
# randomly cropping for upsampler training
self.random_crop_sizes = cast_tuple(random_crop_sizes, num_unets)
assert not exists(first(self.random_crop_sizes)), 'you should not need to randomly crop image during training for base unet, only for upsamplers - so pass in `random_crop_sizes = (None, 128, 256)` as example'
# lowres augmentation noise schedule
self.lowres_noise_schedule = GaussianDiffusionContinuousTimes(noise_schedule = lowres_noise_schedule)
# get text encoder
self.text_encoder_name = text_encoder_name
self.text_embed_dim = default(text_embed_dim, lambda: get_encoded_dim(text_encoder_name))
self.encode_text = partial(t5_encode_text, name = text_encoder_name)
# construct unets
self.unets = nn.ModuleList([])
self.unet_being_trained_index = -1 # keeps track of which unet is being trained at the moment
for ind, one_unet in enumerate(unets):
assert isinstance(one_unet, (Unet, Unet3D, NullUnet))
is_first = ind == 0
one_unet = one_unet.cast_model_parameters(
lowres_cond = not is_first,
cond_on_text = self.condition_on_text,
text_embed_dim = self.text_embed_dim if self.condition_on_text else None,
channels = self.channels,
channels_out = self.channels
)
self.unets.append(one_unet)
# determine whether we are training on images or video
is_video = any([isinstance(unet, Unet3D) for unet in self.unets])
self.is_video = is_video
self.right_pad_dims_to_datatype = partial(rearrange, pattern = ('b -> b 1 1 1' if not is_video else 'b -> b 1 1 1 1'))
self.resize_to = resize_video_to if is_video else resize_image_to
self.resize_to = partial(self.resize_to, mode = resize_mode)
# unet image sizes
self.image_sizes = cast_tuple(image_sizes)
assert num_unets == len(self.image_sizes), f'you did not supply the correct number of u-nets ({len(self.unets)}) for resolutions {self.image_sizes}'
self.sample_channels = cast_tuple(self.channels, num_unets)
# cascading ddpm related stuff
lowres_conditions = tuple(map(lambda t: t.lowres_cond, self.unets))
assert lowres_conditions == (False, *((True,) * (num_unets - 1))), 'the first unet must be unconditioned (by low resolution image), and the rest of the unets must have `lowres_cond` set to True'
self.lowres_sample_noise_level = lowres_sample_noise_level
self.per_sample_random_aug_noise_level = per_sample_random_aug_noise_level
# classifier free guidance
self.cond_drop_prob = cond_drop_prob
self.can_classifier_guidance = cond_drop_prob > 0.
# normalize and unnormalize image functions
self.normalize_img = normalize_neg_one_to_one if auto_normalize_img else identity
self.unnormalize_img = unnormalize_zero_to_one if auto_normalize_img else identity
self.input_image_range = (0. if auto_normalize_img else -1., 1.)
# dynamic thresholding
self.dynamic_thresholding = cast_tuple(dynamic_thresholding, num_unets)
self.dynamic_thresholding_percentile = dynamic_thresholding_percentile
# temporal interpolations
temporal_downsample_factor = cast_tuple(temporal_downsample_factor, num_unets)
self.temporal_downsample_factor = temporal_downsample_factor
self.resize_cond_video_frames = resize_cond_video_frames
self.temporal_downsample_divisor = temporal_downsample_factor[0]
assert temporal_downsample_factor[-1] == 1, 'downsample factor of last stage must be 1'
assert tuple(sorted(temporal_downsample_factor, reverse = True)) == temporal_downsample_factor, 'temporal downsample factor must be in order of descending'
# elucidating parameters
hparams = [
num_sample_steps,
sigma_min,
sigma_max,
sigma_data,
rho,
P_mean,
P_std,
S_churn,
S_tmin,
S_tmax,
S_noise,
]
hparams = [cast_tuple(hp, num_unets) for hp in hparams]
self.hparams = [Hparams(*unet_hp) for unet_hp in zip(*hparams)]
# one temp parameter for keeping track of device
self.register_buffer('_temp', torch.tensor([0.]), persistent = False)
# default to device of unets passed in
self.to(next(self.unets.parameters()).device)
def force_unconditional_(self):
self.condition_on_text = False
self.unconditional = True
for unet in self.unets:
unet.cond_on_text = False
@property
def device(self):
return self._temp.device
def get_unet(self, unet_number):
assert 0 < unet_number <= len(self.unets)
index = unet_number - 1
if isinstance(self.unets, nn.ModuleList):
unets_list = [unet for unet in self.unets]
delattr(self, 'unets')
self.unets = unets_list
if index != self.unet_being_trained_index:
for unet_index, unet in enumerate(self.unets):
unet.to(self.device if unet_index == index else 'cpu')
self.unet_being_trained_index = index
return self.unets[index]
def reset_unets_all_one_device(self, device = None):
device = default(device, self.device)
self.unets = nn.ModuleList([*self.unets])
self.unets.to(device)
self.unet_being_trained_index = -1
@contextmanager
def one_unet_in_gpu(self, unet_number = None, unet = None):
assert exists(unet_number) ^ exists(unet)
if exists(unet_number):
unet = self.unets[unet_number - 1]
cpu = torch.device('cpu')
devices = [module_device(unet) for unet in self.unets]
self.unets.to(cpu)
unet.to(self.device)
yield
for unet, device in zip(self.unets, devices):
unet.to(device)
# overriding state dict functions
def state_dict(self, *args, **kwargs):
self.reset_unets_all_one_device()
return super().state_dict(*args, **kwargs)
def load_state_dict(self, *args, **kwargs):
self.reset_unets_all_one_device()
return super().load_state_dict(*args, **kwargs)
# dynamic thresholding
def threshold_x_start(self, x_start, dynamic_threshold = True):
if not dynamic_threshold:
return x_start.clamp(-1., 1.)
s = torch.quantile(
rearrange(x_start, 'b ... -> b (...)').abs(),
self.dynamic_thresholding_percentile,
dim = -1
)
s.clamp_(min = 1.)
s = right_pad_dims_to(x_start, s)
return x_start.clamp(-s, s) / s
# derived preconditioning params - Table 1
def c_skip(self, sigma_data, sigma):
return (sigma_data ** 2) / (sigma ** 2 + sigma_data ** 2)
def c_out(self, sigma_data, sigma):
return sigma * sigma_data * (sigma_data ** 2 + sigma ** 2) ** -0.5
def c_in(self, sigma_data, sigma):
return 1 * (sigma ** 2 + sigma_data ** 2) ** -0.5
def c_noise(self, sigma):
return log(sigma) * 0.25
# preconditioned network output
# equation (7) in the paper
def preconditioned_network_forward(
self,
unet_forward,
noised_images,
sigma,
*,
sigma_data,
clamp = False,
dynamic_threshold = True,
**kwargs
):
batch, device = noised_images.shape[0], noised_images.device
if isinstance(sigma, float):
sigma = torch.full((batch,), sigma, device = device)
padded_sigma = self.right_pad_dims_to_datatype(sigma)
net_out = unet_forward(
self.c_in(sigma_data, padded_sigma) * noised_images,
self.c_noise(sigma),
**kwargs
)
out = self.c_skip(sigma_data, padded_sigma) * noised_images + self.c_out(sigma_data, padded_sigma) * net_out
if not clamp:
return out
return self.threshold_x_start(out, dynamic_threshold)
# sampling
# sample schedule
# equation (5) in the paper
def sample_schedule(
self,
num_sample_steps,
rho,
sigma_min,
sigma_max
):
N = num_sample_steps
inv_rho = 1 / rho
steps = torch.arange(num_sample_steps, device = self.device, dtype = torch.float32)
sigmas = (sigma_max ** inv_rho + steps / (N - 1) * (sigma_min ** inv_rho - sigma_max ** inv_rho)) ** rho
sigmas = F.pad(sigmas, (0, 1), value = 0.) # last step is sigma value of 0.
return sigmas
@torch.no_grad()
def one_unet_sample(
self,
unet,
shape,
*,
unet_number,
clamp = True,
dynamic_threshold = True,
cond_scale = 1.,
use_tqdm = True,
inpaint_videos = None,
inpaint_images = None,
inpaint_masks = None,
inpaint_resample_times = 5,
init_images = None,
skip_steps = None,
sigma_min = None,
sigma_max = None,
**kwargs
):
# video
is_video = len(shape) == 5
frames = shape[-3] if is_video else None
resize_kwargs = dict(target_frames = frames) if exists(frames) else dict()
# get specific sampling hyperparameters for unet
hp = self.hparams[unet_number - 1]
sigma_min = default(sigma_min, hp.sigma_min)
sigma_max = default(sigma_max, hp.sigma_max)
# get the schedule, which is returned as (sigma, gamma) tuple, and pair up with the next sigma and gamma
sigmas = self.sample_schedule(hp.num_sample_steps, hp.rho, sigma_min, sigma_max)
gammas = torch.where(
(sigmas >= hp.S_tmin) & (sigmas <= hp.S_tmax),
min(hp.S_churn / hp.num_sample_steps, sqrt(2) - 1),
0.
)
sigmas_and_gammas = list(zip(sigmas[:-1], sigmas[1:], gammas[:-1]))
# images is noise at the beginning
init_sigma = sigmas[0]
images = init_sigma * torch.randn(shape, device = self.device)
# initializing with an image
if exists(init_images):
images += init_images
# keeping track of x0, for self conditioning if needed
x_start = None
# prepare inpainting images and mask
inpaint_images = default(inpaint_videos, inpaint_images)
has_inpainting = exists(inpaint_images) and exists(inpaint_masks)
resample_times = inpaint_resample_times if has_inpainting else 1
if has_inpainting:
inpaint_images = self.normalize_img(inpaint_images)
inpaint_images = self.resize_to(inpaint_images, shape[-1], **resize_kwargs)
inpaint_masks = self.resize_to(rearrange(inpaint_masks, 'b ... -> b 1 ...').float(), shape[-1], **resize_kwargs).bool()
# unet kwargs
unet_kwargs = dict(
sigma_data = hp.sigma_data,
clamp = clamp,
dynamic_threshold = dynamic_threshold,
cond_scale = cond_scale,
**kwargs
)
# gradually denoise
initial_step = default(skip_steps, 0)
sigmas_and_gammas = sigmas_and_gammas[initial_step:]
total_steps = len(sigmas_and_gammas)
for ind, (sigma, sigma_next, gamma) in tqdm(enumerate(sigmas_and_gammas), total = total_steps, desc = 'sampling time step', disable = not use_tqdm):
is_last_timestep = ind == (total_steps - 1)
sigma, sigma_next, gamma = map(lambda t: t.item(), (sigma, sigma_next, gamma))
for r in reversed(range(resample_times)):
is_last_resample_step = r == 0
eps = hp.S_noise * torch.randn(shape, device = self.device) # stochastic sampling
sigma_hat = sigma + gamma * sigma
added_noise = sqrt(sigma_hat ** 2 - sigma ** 2) * eps
images_hat = images + added_noise
self_cond = x_start if unet.self_cond else None
if has_inpainting:
images_hat = images_hat * ~inpaint_masks + (inpaint_images + added_noise) * inpaint_masks
model_output = self.preconditioned_network_forward(
unet.forward_with_cond_scale,
images_hat,
sigma_hat,
self_cond = self_cond,
**unet_kwargs
)
denoised_over_sigma = (images_hat - model_output) / sigma_hat
images_next = images_hat + (sigma_next - sigma_hat) * denoised_over_sigma
# second order correction, if not the last timestep
has_second_order_correction = sigma_next != 0
if has_second_order_correction:
self_cond = model_output if unet.self_cond else None
model_output_next = self.preconditioned_network_forward(
unet.forward_with_cond_scale,
images_next,
sigma_next,
self_cond = self_cond,
**unet_kwargs
)
denoised_prime_over_sigma = (images_next - model_output_next) / sigma_next
images_next = images_hat + 0.5 * (sigma_next - sigma_hat) * (denoised_over_sigma + denoised_prime_over_sigma)
images = images_next
if has_inpainting and not (is_last_resample_step or is_last_timestep):
# renoise in repaint and then resample
repaint_noise = torch.randn(shape, device = self.device)
images = images + (sigma - sigma_next) * repaint_noise
x_start = model_output if not has_second_order_correction else model_output_next # save model output for self conditioning
images = images.clamp(-1., 1.)
if has_inpainting:
images = images * ~inpaint_masks + inpaint_images * inpaint_masks
return self.unnormalize_img(images)
@torch.no_grad()
@eval_decorator
def sample(
self,
texts: List[str] = None,
text_masks = None,
text_embeds = None,
cond_images = None,
cond_video_frames = None,
post_cond_video_frames = None,
inpaint_videos = None,
inpaint_images = None,
inpaint_masks = None,
inpaint_resample_times = 5,
init_images = None,
skip_steps = None,
sigma_min = None,
sigma_max = None,
video_frames = None,
batch_size = 1,
cond_scale = 1.,
lowres_sample_noise_level = None,
start_at_unet_number = 1,
start_image_or_video = None,
stop_at_unet_number = None,
return_all_unet_outputs = False,
return_pil_images = False,
use_tqdm = True,
use_one_unet_in_gpu = True,
device = None,
):
device = default(device, self.device)
self.reset_unets_all_one_device(device = device)
cond_images = maybe(cast_uint8_images_to_float)(cond_images)
if exists(texts) and not exists(text_embeds) and not self.unconditional:
assert all([*map(len, texts)]), 'text cannot be empty'
with autocast(enabled = False):
text_embeds, text_masks = self.encode_text(texts, return_attn_mask = True)
text_embeds, text_masks = map(lambda t: t.to(device), (text_embeds, text_masks))
if not self.unconditional:
assert exists(text_embeds), 'text must be passed in if the network was not trained without text `condition_on_text` must be set to `False` when training'
text_masks = default(text_masks, lambda: torch.any(text_embeds != 0., dim = -1))
batch_size = text_embeds.shape[0]
# inpainting
inpaint_images = default(inpaint_videos, inpaint_images)
if exists(inpaint_images):
if self.unconditional:
if batch_size == 1: # assume researcher wants to broadcast along inpainted images
batch_size = inpaint_images.shape[0]
assert inpaint_images.shape[0] == batch_size, 'number of inpainting images must be equal to the specified batch size on sample `sample(batch_size=<int>)``'
assert not (self.condition_on_text and inpaint_images.shape[0] != text_embeds.shape[0]), 'number of inpainting images must be equal to the number of text to be conditioned on'
assert not (self.condition_on_text and not exists(text_embeds)), 'text or text encodings must be passed into starlight if specified'
assert not (not self.condition_on_text and exists(text_embeds)), 'starlight specified not to be conditioned on text, yet it is presented'
assert not (exists(text_embeds) and text_embeds.shape[-1] != self.text_embed_dim), f'invalid text embedding dimension being passed in (should be {self.text_embed_dim})'
assert not (exists(inpaint_images) ^ exists(inpaint_masks)), 'inpaint images and masks must be both passed in to do inpainting'
outputs = []
is_cuda = next(self.parameters()).is_cuda
device = next(self.parameters()).device
lowres_sample_noise_level = default(lowres_sample_noise_level, self.lowres_sample_noise_level)
num_unets = len(self.unets)
cond_scale = cast_tuple(cond_scale, num_unets)
# handle video and frame dimension
if self.is_video and exists(inpaint_images):
video_frames = inpaint_images.shape[2]
if inpaint_masks.ndim == 3:
inpaint_masks = repeat(inpaint_masks, 'b h w -> b f h w', f = video_frames)
assert inpaint_masks.shape[1] == video_frames
assert not (self.is_video and not exists(video_frames)), 'video_frames must be passed in on sample time if training on video'
# determine the frame dimensions, if needed
all_frame_dims = calc_all_frame_dims(self.temporal_downsample_factor, video_frames)
# initializing with an image or video
init_images = cast_tuple(init_images, num_unets)
init_images = [maybe(self.normalize_img)(init_image) for init_image in init_images]
skip_steps = cast_tuple(skip_steps, num_unets)
sigma_min = cast_tuple(sigma_min, num_unets)
sigma_max = cast_tuple(sigma_max, num_unets)
# handle starting at a unet greater than 1, for training only-upscaler training
if start_at_unet_number > 1:
assert start_at_unet_number <= num_unets, 'must start a unet that is less than the total number of unets'
assert not exists(stop_at_unet_number) or start_at_unet_number <= stop_at_unet_number
assert exists(start_image_or_video), 'starting image or video must be supplied if only doing upscaling'
prev_image_size = self.image_sizes[start_at_unet_number - 2]
img = self.resize_to(start_image_or_video, prev_image_size)
# go through each unet in cascade
for unet_number, unet, channel, image_size, frame_dims, unet_hparam, dynamic_threshold, unet_cond_scale, unet_init_images, unet_skip_steps, unet_sigma_min, unet_sigma_max in tqdm(zip(range(1, num_unets + 1), self.unets, self.sample_channels, self.image_sizes, all_frame_dims, self.hparams, self.dynamic_thresholding, cond_scale, init_images, skip_steps, sigma_min, sigma_max), disable = not use_tqdm):
if unet_number < start_at_unet_number:
continue
assert not isinstance(unet, NullUnet), 'cannot sample from null unet'
context = self.one_unet_in_gpu(unet = unet) if is_cuda and use_one_unet_in_gpu else nullcontext()
with context:
lowres_cond_img = lowres_noise_times = None
shape = (batch_size, channel, *frame_dims, image_size, image_size)
resize_kwargs = dict()
video_kwargs = dict()
if self.is_video:
resize_kwargs = dict(target_frames = frame_dims[0])
video_kwargs = dict(
cond_video_frames = cond_video_frames,
post_cond_video_frames = post_cond_video_frames
)
video_kwargs = compact(video_kwargs)
# handle video conditioning frames
if self.is_video and self.resize_cond_video_frames:
downsample_scale = self.temporal_downsample_factor[unet_number - 1]
temporal_downsample_fn = partial(scale_video_time, downsample_scale = downsample_scale)
video_kwargs = maybe_transform_dict_key(video_kwargs, 'cond_video_frames', temporal_downsample_fn)
video_kwargs = maybe_transform_dict_key(video_kwargs, 'post_cond_video_frames', temporal_downsample_fn)
# low resolution conditioning
if unet.lowres_cond:
lowres_noise_times = self.lowres_noise_schedule.get_times(batch_size, lowres_sample_noise_level, device = device)
lowres_cond_img = self.resize_to(img, image_size, **resize_kwargs)
lowres_cond_img = self.normalize_img(lowres_cond_img)
lowres_cond_img, *_ = self.lowres_noise_schedule.q_sample(x_start = lowres_cond_img, t = lowres_noise_times, noise = torch.randn_like(lowres_cond_img))
if exists(unet_init_images):
unet_init_images = self.resize_to(unet_init_images, image_size, **resize_kwargs)
shape = (batch_size, self.channels, *frame_dims, image_size, image_size)
img = self.one_unet_sample(
unet,
shape,
unet_number = unet_number,
text_embeds = text_embeds,
text_mask = text_masks,
cond_images = cond_images,
inpaint_images = inpaint_images,
inpaint_masks = inpaint_masks,
inpaint_resample_times = inpaint_resample_times,
init_images = unet_init_images,
skip_steps = unet_skip_steps,
sigma_min = unet_sigma_min,
sigma_max = unet_sigma_max,
cond_scale = unet_cond_scale,
lowres_cond_img = lowres_cond_img,
lowres_noise_times = lowres_noise_times,
dynamic_threshold = dynamic_threshold,
use_tqdm = use_tqdm,
**video_kwargs
)
outputs.append(img)
if exists(stop_at_unet_number) and stop_at_unet_number == unet_number:
break
output_index = -1 if not return_all_unet_outputs else slice(None) # either return last unet output or all unet outputs
if not return_pil_images:
return outputs[output_index]
if not return_all_unet_outputs:
outputs = outputs[-1:]
assert not self.is_video, 'automatically converting video tensor to video file for saving is not built yet'
pil_images = list(map(lambda img: list(map(T.ToPILImage(), img.unbind(dim = 0))), outputs))
return pil_images[output_index] # now you have a bunch of pillow images you can just .save(/where/ever/you/want.png)
# training
def loss_weight(self, sigma_data, sigma):
return (sigma ** 2 + sigma_data ** 2) * (sigma * sigma_data) ** -2
def noise_distribution(self, P_mean, P_std, batch_size):
return (P_mean + P_std * torch.randn((batch_size,), device = self.device)).exp()
def forward(
self,
images, # rename to images or video
unet: Union[Unet, Unet3D, NullUnet, DistributedDataParallel] = None,
texts: List[str] = None,
text_embeds = None,
text_masks = None,
unet_number = None,
cond_images = None,
**kwargs
):
if self.is_video and images.ndim == 4:
images = rearrange(images, 'b c h w -> b c 1 h w')
kwargs.update(ignore_time = True)
assert images.shape[-1] == images.shape[-2], f'the images you pass in must be a square, but received dimensions of {images.shape[2]}, {images.shape[-1]}'
assert not (len(self.unets) > 1 and not exists(unet_number)), f'you must specify which unet you want trained, from a range of 1 to {len(self.unets)}, if you are training cascading DDPM (multiple unets)'
unet_number = default(unet_number, 1)
assert not exists(self.only_train_unet_number) or self.only_train_unet_number == unet_number, 'you can only train on unet #{self.only_train_unet_number}'
images = cast_uint8_images_to_float(images)
cond_images = maybe(cast_uint8_images_to_float)(cond_images)
assert images.dtype == torch.float, f'images tensor needs to be floats but {images.dtype} dtype found instead'
unet_index = unet_number - 1
unet = default(unet, lambda: self.get_unet(unet_number))
assert not isinstance(unet, NullUnet), 'null unet cannot and should not be trained'
target_image_size = self.image_sizes[unet_index]
random_crop_size = self.random_crop_sizes[unet_index]
prev_image_size = self.image_sizes[unet_index - 1] if unet_index > 0 else None
hp = self.hparams[unet_index]
batch_size, c, *_, h, w, device, is_video = *images.shape, images.device, (images.ndim == 5)
frames = images.shape[2] if is_video else None
all_frame_dims = tuple(safe_get_tuple_index(el, 0) for el in calc_all_frame_dims(self.temporal_downsample_factor, frames))
ignore_time = kwargs.get('ignore_time', False)
target_frame_size = all_frame_dims[unet_index] if is_video and not ignore_time else None
prev_frame_size = all_frame_dims[unet_index - 1] if is_video and not ignore_time and unet_index > 0 else None
frames_to_resize_kwargs = lambda frames: dict(target_frames = frames) if exists(frames) else dict()
assert images.shape[1] == self.channels
assert h >= target_image_size and w >= target_image_size
if exists(texts) and not exists(text_embeds) and not self.unconditional:
assert all([*map(len, texts)]), 'text cannot be empty'
assert len(texts) == len(images), 'number of text captions does not match up with the number of images given'
with autocast(enabled = False):
text_embeds, text_masks = self.encode_text(texts, return_attn_mask = True)
text_embeds, text_masks = map(lambda t: t.to(images.device), (text_embeds, text_masks))
if not self.unconditional:
text_masks = default(text_masks, lambda: torch.any(text_embeds != 0., dim = -1))
assert not (self.condition_on_text and not exists(text_embeds)), 'text or text encodings must be passed into decoder if specified'
assert not (not self.condition_on_text and exists(text_embeds)), 'decoder specified not to be conditioned on text, yet it is presented'
assert not (exists(text_embeds) and text_embeds.shape[-1] != self.text_embed_dim), f'invalid text embedding dimension being passed in (should be {self.text_embed_dim})'
# handle video conditioning frames
if self.is_video and self.resize_cond_video_frames:
downsample_scale = self.temporal_downsample_factor[unet_index]
temporal_downsample_fn = partial(scale_video_time, downsample_scale = downsample_scale)
kwargs = maybe_transform_dict_key(kwargs, 'cond_video_frames', temporal_downsample_fn)
kwargs = maybe_transform_dict_key(kwargs, 'post_cond_video_frames', temporal_downsample_fn)
# low resolution conditioning
lowres_cond_img = lowres_aug_times = None
if exists(prev_image_size):
lowres_cond_img = self.resize_to(images, prev_image_size, **frames_to_resize_kwargs(prev_frame_size), clamp_range = self.input_image_range)
lowres_cond_img = self.resize_to(lowres_cond_img, target_image_size, **frames_to_resize_kwargs(target_frame_size), clamp_range = self.input_image_range)
if self.per_sample_random_aug_noise_level:
lowres_aug_times = self.lowres_noise_schedule.sample_random_times(batch_size, device = device)
else:
lowres_aug_time = self.lowres_noise_schedule.sample_random_times(1, device = device)
lowres_aug_times = repeat(lowres_aug_time, '1 -> b', b = batch_size)
images = self.resize_to(images, target_image_size, **frames_to_resize_kwargs(target_frame_size))
# normalize to [-1, 1]
images = self.normalize_img(images)
lowres_cond_img = maybe(self.normalize_img)(lowres_cond_img)
# random cropping during training
# for upsamplers
if exists(random_crop_size):
aug = K.RandomCrop((random_crop_size, random_crop_size), p = 1.)
if is_video:
images, lowres_cond_img = map(lambda t: rearrange(t, 'b c f h w -> (b f) c h w'), (images, lowres_cond_img))
# make sure low res conditioner and image both get augmented the same way
# detailed https://kornia.readthedocs.io/en/latest/augmentation.module.html?highlight=randomcrop#kornia.augmentation.RandomCrop
images = aug(images)
lowres_cond_img = aug(lowres_cond_img, params = aug._params)
if is_video:
images, lowres_cond_img = map(lambda t: rearrange(t, '(b f) c h w -> b c f h w', f = frames), (images, lowres_cond_img))
# noise the lowres conditioning image
# at sample time, they then fix the noise level of 0.1 - 0.3
lowres_cond_img_noisy = None
if exists(lowres_cond_img):
lowres_cond_img_noisy, *_ = self.lowres_noise_schedule.q_sample(x_start = lowres_cond_img, t = lowres_aug_times, noise = torch.randn_like(lowres_cond_img))
# get the sigmas
sigmas = self.noise_distribution(hp.P_mean, hp.P_std, batch_size)
padded_sigmas = self.right_pad_dims_to_datatype(sigmas)
# noise
noise = torch.randn_like(images)
noised_images = images + padded_sigmas * noise # alphas are 1. in the paper
# unet kwargs
unet_kwargs = dict(
sigma_data = hp.sigma_data,
text_embeds = text_embeds,
text_mask = text_masks,
cond_images = cond_images,
lowres_noise_times = self.lowres_noise_schedule.get_condition(lowres_aug_times),
lowres_cond_img = lowres_cond_img_noisy,
cond_drop_prob = self.cond_drop_prob,
**kwargs
)
# self conditioning - https://arxiv.org/abs/2208.04202 - training will be 25% slower
# Because 'unet' can be an instance of DistributedDataParallel coming from the
# StarlightTrainer.unet_being_trained when invoking StarlightTrainer.forward(), we need to
# access the member 'module' of the wrapped unet instance.
self_cond = unet.module.self_cond if isinstance(unet, DistributedDataParallel) else unet.self_cond
if self_cond and random() < 0.5:
with torch.no_grad():
pred_x0 = self.preconditioned_network_forward(
unet.forward,
noised_images,
sigmas,
**unet_kwargs
).detach()
unet_kwargs = {**unet_kwargs, 'self_cond': pred_x0}
# get prediction
denoised_images = self.preconditioned_network_forward(
unet.forward,
noised_images,
sigmas,
**unet_kwargs
)
# losses
losses = F.mse_loss(denoised_images, images, reduction = 'none')
losses = reduce(losses, 'b ... -> b', 'mean')
# loss weighting
losses = losses * self.loss_weight(hp.sigma_data, sigmas)
# return average loss
return losses.mean() | StarlightVision-master | starlight_vision/core/elucidated.py |
StarlightVision-master | starlight_vision/core/__init__.py |
|
import torch
import torch.nn as nn
from torchvision.transforms import Compose, Resize, Normalize, ToTensor
from torch.utils.data import DataLoader
from transformers import DiffusionModel, ClipModel, DiffusionConfig, DPTImageProcessor, DPTForDepthEstimation
from torchvision.transforms import GaussianBlur
import torch.nn.functional as F
import math
#spatial transformer
class SpatialTransformer(nn.Module):
def __init__(self, in_channels, num_heads):
super(SpatialTransformer, self).__init__()
self.in_channels = in_channels
self.num_heads = num_heads
self.key_proj = nn.Linear(in_channels, in_channels)
self.value_proj = nn.Linear(in_channels, in_channels)
self.query_proj = nn.Linear(in_channels, in_channels)
self.softmax = nn.Softmax(dim=-1)
self.output_proj = nn.Linear(in_channels, in_channels)
def forward(self, x, content_embeddings):
#compute keys and values from content embedding
keys = self.key_proj(content_embeddings)
values = self.value_proj(content_embeddings)
#compute queries from input
queries = self.query_proj(x)
#conpute attention scores
attention_scores = torch.matmul(queries, keys.tranpose(-1. -2)) / (self.in_channels ** 0.5)
attention_scores = self.softmax(attention_scores)
#compute attented scores
attented_values = torch.matmul(attention_scores, values)
#add residual connection and apply output projection
out = x + attented_values
out = self.output_proj(out)
return out
class Midas(nn.Module):
def __init__(self):
super(Midas, self).__init__()
self.processor = DPTImageProcessor.from_pretrained("Intel/dpt-large")
self.model = DPTForDepthEstimation.from_pretrained("Intel/dpt-large")
def forward(self, x):
#prepare images for the model
inputs = self.processor(images=x, return_tensors="pt")
with torch.no_grad():
outputs = self.model(**inputs)
predicted_depth = outputs.predicted_depth
#interplate to original size
prediction = torch.nn.function.interpolate(
predicted_depth.unsqueeze(1),
size=x.shape[-2:],
mode="bicubic",
align_corners=False,
)
return prediction.squeeze()
#sinusodial embeddings
class SinusoidalEmbedding(nn.Module):
def __init__(self, num_channels):
super(SinusoidalEmbedding, self).__init__()
self.num_channels = num_channels
def forward(self, ts, T):
ts = torch.Tensor([ts])
T = torch.Tensor([T])
ts_embed = torch.zeros(self.num_channels // 2)
div_term = torch.exp(torch.arange(0, self.num_channels // 2, 2) * -(math.log(10000.0) / self.num_channels))
ts_embed[0::2] = torch.sin(ts * div_term)
ts_embed[1::2] = torch.cos(ts * div_term)
return ts_embed
#unet
class ResidualBlock(nn.Module):
def __init__(self, in_channels, out_channels, temporal=False):
super(ResidualBlock, self).__init__()
self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1)
self.bn1 = nn.BatchNorm2d(out_channels)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1)
self.bn2 = nn.BatchNorm2d(out_channels)
self.temporal = temporal
if temporal:
self.temporal_conv = nn.Conv1d(out_channels, out_channels, kernel_size=3, padding=1)
self.bn_temporal = nn.BatchNorm1d(out_channels)
def forward(self, x):
residual = x
out = self.relu(self.bn1(self.conv1(x)))
if self.temporal:
out = self.bn_temporal(self.temporal_conv(out))
out = self.bn2(self.conv2(out))
out += residual
out = self.relu(out)
return out
class TransformerBlock(nn.Module):
def __init__(self, in_channels, temporal=False):
super(TransformerBlock, self).__init__()
self.spatial_transformer = nn.TransformerEncoderLayr(d_model=in_channels, nhead=8)
self.temporal = temporal
if temporal:
self.temporal_transformer = nn.TransformerEncoderLayer(d_model=in_channels, nhead=8)
def forward(self, x):
out = self.spatial_transformer(x)
if self.temporal:
out = self.temporal_transformer(out)
return out
class Unet(nn.Module):
def __init__(self, in_channels, out_channels):
super(Unet, self).__init__()
# Encoder
self.encoder = nn.Sequential(
ResidualBlock(in_channels, 64, temporal=False),
nn.MaxPool2d(2),
ResidualBlock(64, 128, temporal=False),
nn.MaxPool2d(2),
ResidualBlock(128, 256, temporal=False),
nn.MaxPool2d(2),
ResidualBlock(256, 512, temporal=False),
)
# Center part of the UNet
self.middle = nn.Sequential(
TransformerBlock(512, temporal=False),
)
# Decoder
self.decoder = nn.Sequential(
ResidualBlock(512, 256, temporal=False),
nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True),
ResidualBlock(256, 128, temporal=False),
nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True),
ResidualBlock(128, 64, temporal=False),
nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True),
nn.Conv2d(64, out_channels, kernel_size=1),
)
def forward(self, x):
# Pass input through the encoder
z = self.encoder(x)
# Pass the latent representations through the middle UNet layers
z = self.middle(z)
# Pass output to decoder
out = self.decoder(z)
return out
#extended unet
class ExtendedUNet(Unet):
def __init__(self, in_channels, out_channels, blur_kernel_size=3, blur_sigma=1.0):
super(ExtendedUNet, self).__init__(in_channels, out_channels)
self.midas_dpt_large = Midas() # Load the MiDaS DPT-Large model
self.clip_model = ClipModel.from_pretrained("openai/clip-vit-base-patch32")
self.blur = GaussianBlur(blur_kernel_size, sigma=blur_sigma)
def process_structure(self, x, ts, Ts):
depth_maps = self.midas_dpt_large(x)
for _ in range(ts):
depth_maps = self.blur(depth_maps)
depth_maps = F.interpolate(depth_maps, scale_factor=0.5)
depth_maps = F.interpolate(depth_maps, size=x.shape[-2:])
z_structure = self.encoder(depth_maps)
return z_structure
def process_content(self, x):
content_repr = self.clip_model.encode_image(x)
return content_repr
def sample(self, x, t, c, ts, guidance_scale=1.0, temporal_scale=1.0):
zt = self.encoder(x)
z_structure = self.process_structure(x, ts, Ts)
zt = torch.cat([zt, z_structure], dim=1)
content_repr = self.process_content(c)
# Apply the spatial transformer and cross-attention conditioning
out = self.layers(zt, content_repr)
# Compute the adjusted predictions
unconditional_pred = self.decoder(out)
conditional_pred = self.decoder(out)
adjusted_pred = unconditional_pred + guidance_scale * (conditional_pred - unconditional_pred)
# Control temporal consistency
image_model_pred = ... # Compute the prediction of the image model applied to each frame individually
adjusted_pred = image_model_pred + temporal_scale * (adjusted_pred - image_model_pred)
return adjusted_pred
class Starlight(nn.Module):
def __init__(self, Ts):
super(Starlight, self).__init__()
self.unet_lowres = ExtendedUNet()
self.unet_midres = ExtendedUNet()
self.unet_highres = ExtendedUNet()
self.sinusoidal_embedding = SinusoidalEmbedding(4)
self.Ts = Ts
self.lstm = nn.LSTM(input_size=512, hidden_size=512, num_layers=1)
def forward(self, x, s, c, ts):
#apply multi scale architecture
x_lowres = F.interpolate(x, scale_factor=0.25)
x_midres = F.interpolate(x, scale_factor=0.5)
x_highres = x
means_lowres = self.process_scale(x_lowres, s, c, ts)
means_midres = self.process_scale(x_midres, s, c, ts)
means_highres = self.process_scale(x_highres, s, c, ts)
return means_lowres, means_midres, means_highres
def process_structure(self, depth_maps, ts):
for _ in range(ts):
depth_maps = self.unet.blur(depth_maps)
depth_maps = F.interpolate(depth_maps, scale_factor=0.5)
depth_maps = F.interpolate(depth_maps, size=depth_maps.shape[-2:])
z_structure = self.unet.encoder(depth_maps)
return z_structure
def process_scale(self, x, s, c, ts):
# Compute content representation
content_embedding = self.unet.process_content(x)
# Compute structure representation
depth_maps = self.unet.midas_dpt_large(x)
structure_embedding = self.process_structure(depth_maps, ts)
# Condition on structure
zt = torch.cat([x, structure_embedding], dim=1)
# Add sinusoidal embedding of ts
ts_embed = self.sinusoidal_embedding(ts, self.Ts).unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
ts_embed = ts_embed.repeat(x.size(0), 1, x.size(2), x.size(3))
zt = torch.cat([zt, ts_embed], dim=1)
# Apply cross-attention
zt = self.unet.spatial_transformer_blocks(zt, content_embedding, c)
# Process LSTM temporal context
lstm_input = zt.view(zt.size(0), 1, -1)
_, (hidden_state, _) = self.lstm(lstm_input)
hidden_state = hidden_state.view(hidden_state.size(1), -1, 1, 1)
# Concatenate hidden_state with zt
zt = torch.cat([zt, hidden_state], dim=1)
# Pass through UNet
means = self.unet(zt)
return means
model = Starlight()
def train_starlight(model, train_loader, optimizer, device, perceptual_loss_fn, style_loss_fn, alpha, beta):
model.train()
for inputs, content, structure, ts in train_loader:
inputs = inputs.to(device)
content = content.to(device)
structure = structure.to(device)
ts = ts.to(device)
optimizer.zero_grad()
means_lowres, means_midres, means_highres = model(inputs, structure, content, ts)
loss_lowres = F.mse_loss(inputs, means_lowres)
loss_midres = F.mse_loss(inputs, means_midres)
loss_highres = F.mse_loss(inputs, means_highres)
# Perceptual and style losses
perceptual_loss = perceptual_loss_fn(inputs, means_highres)
style_loss = style_loss_fn(inputs, means_highres)
loss = loss_lowres + loss_midres + loss_highres + alpha * perceptual_loss + beta * style_loss
loss.backward()
optimizer.step()
| StarlightVision-master | starlight_vision/core/starlightv2.py |
import math
import copy
from random import random
from beartype.typing import List, Union
from beartype import beartype
from tqdm.auto import tqdm
from functools import partial, wraps
from contextlib import contextmanager, nullcontext
from collections import namedtuple
from pathlib import Path
import torch
import torch.nn.functional as F
from torch.nn.parallel import DistributedDataParallel
from torch import nn, einsum
from torch.cuda.amp import autocast
from torch.special import expm1
import torchvision.transforms as T
import kornia.augmentation as K
from einops import rearrange, repeat, reduce, pack, unpack
from einops.layers.torch import Rearrange, Reduce
from starlight_vision.core.t5 import t5_encode_text, get_encoded_dim, DEFAULT_T5_NAME
from starlight_vision.core.gen2_video import Unet3D, resize_video_to, scale_video_time
# helper functions
def exists(val):
return val is not None
def identity(t, *args, **kwargs):
return t
def divisible_by(numer, denom):
return (numer % denom) == 0
def first(arr, d = None):
if len(arr) == 0:
return d
return arr[0]
def maybe(fn):
@wraps(fn)
def inner(x):
if not exists(x):
return x
return fn(x)
return inner
def once(fn):
called = False
@wraps(fn)
def inner(x):
nonlocal called
if called:
return
called = True
return fn(x)
return inner
print_once = once(print)
def default(val, d):
if exists(val):
return val
return d() if callable(d) else d
def cast_tuple(val, length = None):
if isinstance(val, list):
val = tuple(val)
output = val if isinstance(val, tuple) else ((val,) * default(length, 1))
if exists(length):
assert len(output) == length
return output
def compact(input_dict):
return {key: value for key, value in input_dict.items() if exists(value)}
def maybe_transform_dict_key(input_dict, key, fn):
if key not in input_dict:
return input_dict
copied_dict = input_dict.copy()
copied_dict[key] = fn(copied_dict[key])
return copied_dict
def cast_uint8_images_to_float(images):
if not images.dtype == torch.uint8:
return images
return images / 255
def module_device(module):
return next(module.parameters()).device
def zero_init_(m):
nn.init.zeros_(m.weight)
if exists(m.bias):
nn.init.zeros_(m.bias)
def eval_decorator(fn):
def inner(model, *args, **kwargs):
was_training = model.training
model.eval()
out = fn(model, *args, **kwargs)
model.train(was_training)
return out
return inner
def pad_tuple_to_length(t, length, fillvalue = None):
remain_length = length - len(t)
if remain_length <= 0:
return t
return (*t, *((fillvalue,) * remain_length))
# helper classes
class Identity(nn.Module):
def __init__(self, *args, **kwargs):
super().__init__()
def forward(self, x, *args, **kwargs):
return x
# tensor helpers
def log(t, eps: float = 1e-12):
return torch.log(t.clamp(min = eps))
def l2norm(t):
return F.normalize(t, dim = -1)
def right_pad_dims_to(x, t):
padding_dims = x.ndim - t.ndim
if padding_dims <= 0:
return t
return t.view(*t.shape, *((1,) * padding_dims))
def masked_mean(t, *, dim, mask = None):
if not exists(mask):
return t.mean(dim = dim)
denom = mask.sum(dim = dim, keepdim = True)
mask = rearrange(mask, 'b n -> b n 1')
masked_t = t.masked_fill(~mask, 0.)
return masked_t.sum(dim = dim) / denom.clamp(min = 1e-5)
def resize_image_to(
image,
target_image_size,
clamp_range = None,
mode = 'nearest'
):
orig_image_size = image.shape[-1]
if orig_image_size == target_image_size:
return image
out = F.interpolate(image, target_image_size, mode = mode)
if exists(clamp_range):
out = out.clamp(*clamp_range)
return out
def calc_all_frame_dims(
downsample_factors: List[int],
frames
):
if not exists(frames):
return (tuple(),) * len(downsample_factors)
all_frame_dims = []
for divisor in downsample_factors:
assert divisible_by(frames, divisor)
all_frame_dims.append((frames // divisor,))
return all_frame_dims
def safe_get_tuple_index(tup, index, default = None):
if len(tup) <= index:
return default
return tup[index]
# image normalization functions
# ddpms expect images to be in the range of -1 to 1
def normalize_neg_one_to_one(img):
return img * 2 - 1
def unnormalize_zero_to_one(normed_img):
return (normed_img + 1) * 0.5
# classifier free guidance functions
def prob_mask_like(shape, prob, device):
if prob == 1:
return torch.ones(shape, device = device, dtype = torch.bool)
elif prob == 0:
return torch.zeros(shape, device = device, dtype = torch.bool)
else:
return torch.zeros(shape, device = device).float().uniform_(0, 1) < prob
# gaussian diffusion with continuous time helper functions and classes
# large part of this was thanks to @crowsonkb at https://github.com/crowsonkb/v-diffusion-jax/blob/master/diffusion/utils.py
@torch.jit.script
def beta_linear_log_snr(t):
return -torch.log(expm1(1e-4 + 10 * (t ** 2)))
@torch.jit.script
def alpha_cosine_log_snr(t, s: float = 0.008):
return -log((torch.cos((t + s) / (1 + s) * math.pi * 0.5) ** -2) - 1, eps = 1e-5) # not sure if this accounts for beta being clipped to 0.999 in discrete version
def log_snr_to_alpha_sigma(log_snr):
return torch.sqrt(torch.sigmoid(log_snr)), torch.sqrt(torch.sigmoid(-log_snr))
class GaussianDiffusionContinuousTimes(nn.Module):
def __init__(self, *, noise_schedule, timesteps = 1000):
super().__init__()
if noise_schedule == "linear":
self.log_snr = beta_linear_log_snr
elif noise_schedule == "cosine":
self.log_snr = alpha_cosine_log_snr
else:
raise ValueError(f'invalid noise schedule {noise_schedule}')
self.num_timesteps = timesteps
def get_times(self, batch_size, noise_level, *, device):
return torch.full((batch_size,), noise_level, device = device, dtype = torch.float32)
def sample_random_times(self, batch_size, *, device):
return torch.zeros((batch_size,), device = device).float().uniform_(0, 1)
def get_condition(self, times):
return maybe(self.log_snr)(times)
def get_sampling_timesteps(self, batch, *, device):
times = torch.linspace(1., 0., self.num_timesteps + 1, device = device)
times = repeat(times, 't -> b t', b = batch)
times = torch.stack((times[:, :-1], times[:, 1:]), dim = 0)
times = times.unbind(dim = -1)
return times
def q_posterior(self, x_start, x_t, t, *, t_next = None):
t_next = default(t_next, lambda: (t - 1. / self.num_timesteps).clamp(min = 0.))
""" https://openreview.net/attachment?id=2LdBqxc1Yv&name=supplementary_material """
log_snr = self.log_snr(t)
log_snr_next = self.log_snr(t_next)
log_snr, log_snr_next = map(partial(right_pad_dims_to, x_t), (log_snr, log_snr_next))
alpha, sigma = log_snr_to_alpha_sigma(log_snr)
alpha_next, sigma_next = log_snr_to_alpha_sigma(log_snr_next)
# c - as defined near eq 33
c = -expm1(log_snr - log_snr_next)
posterior_mean = alpha_next * (x_t * (1 - c) / alpha + c * x_start)
# following (eq. 33)
posterior_variance = (sigma_next ** 2) * c
posterior_log_variance_clipped = log(posterior_variance, eps = 1e-20)
return posterior_mean, posterior_variance, posterior_log_variance_clipped
def q_sample(self, x_start, t, noise = None):
dtype = x_start.dtype
if isinstance(t, float):
batch = x_start.shape[0]
t = torch.full((batch,), t, device = x_start.device, dtype = dtype)
noise = default(noise, lambda: torch.randn_like(x_start))
log_snr = self.log_snr(t).type(dtype)
log_snr_padded_dim = right_pad_dims_to(x_start, log_snr)
alpha, sigma = log_snr_to_alpha_sigma(log_snr_padded_dim)
return alpha * x_start + sigma * noise, log_snr, alpha, sigma
def q_sample_from_to(self, x_from, from_t, to_t, noise = None):
shape, device, dtype = x_from.shape, x_from.device, x_from.dtype
batch = shape[0]
if isinstance(from_t, float):
from_t = torch.full((batch,), from_t, device = device, dtype = dtype)
if isinstance(to_t, float):
to_t = torch.full((batch,), to_t, device = device, dtype = dtype)
noise = default(noise, lambda: torch.randn_like(x_from))
log_snr = self.log_snr(from_t)
log_snr_padded_dim = right_pad_dims_to(x_from, log_snr)
alpha, sigma = log_snr_to_alpha_sigma(log_snr_padded_dim)
log_snr_to = self.log_snr(to_t)
log_snr_padded_dim_to = right_pad_dims_to(x_from, log_snr_to)
alpha_to, sigma_to = log_snr_to_alpha_sigma(log_snr_padded_dim_to)
return x_from * (alpha_to / alpha) + noise * (sigma_to * alpha - sigma * alpha_to) / alpha
def predict_start_from_v(self, x_t, t, v):
log_snr = self.log_snr(t)
log_snr = right_pad_dims_to(x_t, log_snr)
alpha, sigma = log_snr_to_alpha_sigma(log_snr)
return alpha * x_t - sigma * v
def predict_start_from_noise(self, x_t, t, noise):
log_snr = self.log_snr(t)
log_snr = right_pad_dims_to(x_t, log_snr)
alpha, sigma = log_snr_to_alpha_sigma(log_snr)
return (x_t - sigma * noise) / alpha.clamp(min = 1e-8)
# norms and residuals
class LayerNorm(nn.Module):
def __init__(self, feats, stable = False, dim = -1):
super().__init__()
self.stable = stable
self.dim = dim
self.g = nn.Parameter(torch.ones(feats, *((1,) * (-dim - 1))))
def forward(self, x):
dtype, dim = x.dtype, self.dim
if self.stable:
x = x / x.amax(dim = dim, keepdim = True).detach()
eps = 1e-5 if x.dtype == torch.float32 else 1e-3
var = torch.var(x, dim = dim, unbiased = False, keepdim = True)
mean = torch.mean(x, dim = dim, keepdim = True)
return (x - mean) * (var + eps).rsqrt().type(dtype) * self.g.type(dtype)
ChanLayerNorm = partial(LayerNorm, dim = -3)
class Always():
def __init__(self, val):
self.val = val
def __call__(self, *args, **kwargs):
return self.val
class Residual(nn.Module):
def __init__(self, fn):
super().__init__()
self.fn = fn
def forward(self, x, **kwargs):
return self.fn(x, **kwargs) + x
class Parallel(nn.Module):
def __init__(self, *fns):
super().__init__()
self.fns = nn.ModuleList(fns)
def forward(self, x):
outputs = [fn(x) for fn in self.fns]
return sum(outputs)
# attention pooling
class PerceiverAttention(nn.Module):
def __init__(
self,
*,
dim,
dim_head = 64,
heads = 8,
scale = 8
):
super().__init__()
self.scale = scale
self.heads = heads
inner_dim = dim_head * heads
self.norm = nn.LayerNorm(dim)
self.norm_latents = nn.LayerNorm(dim)
self.to_q = nn.Linear(dim, inner_dim, bias = False)
self.to_kv = nn.Linear(dim, inner_dim * 2, bias = False)
self.q_scale = nn.Parameter(torch.ones(dim_head))
self.k_scale = nn.Parameter(torch.ones(dim_head))
self.to_out = nn.Sequential(
nn.Linear(inner_dim, dim, bias = False),
nn.LayerNorm(dim)
)
def forward(self, x, latents, mask = None):
x = self.norm(x)
latents = self.norm_latents(latents)
b, h = x.shape[0], self.heads
q = self.to_q(latents)
# the paper differs from Perceiver in which they also concat the key / values derived from the latents to be attended to
kv_input = torch.cat((x, latents), dim = -2)
k, v = self.to_kv(kv_input).chunk(2, dim = -1)
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = h), (q, k, v))
# qk rmsnorm
q, k = map(l2norm, (q, k))
q = q * self.q_scale
k = k * self.k_scale
# similarities and masking
sim = einsum('... i d, ... j d -> ... i j', q, k) * self.scale
if exists(mask):
max_neg_value = -torch.finfo(sim.dtype).max
mask = F.pad(mask, (0, latents.shape[-2]), value = True)
mask = rearrange(mask, 'b j -> b 1 1 j')
sim = sim.masked_fill(~mask, max_neg_value)
# attention
attn = sim.softmax(dim = -1, dtype = torch.float32)
attn = attn.to(sim.dtype)
out = einsum('... i j, ... j d -> ... i d', attn, v)
out = rearrange(out, 'b h n d -> b n (h d)', h = h)
return self.to_out(out)
class PerceiverResampler(nn.Module):
def __init__(
self,
*,
dim,
depth,
dim_head = 64,
heads = 8,
num_latents = 64,
num_latents_mean_pooled = 4, # number of latents derived from mean pooled representation of the sequence
max_seq_len = 512,
ff_mult = 4
):
super().__init__()
self.pos_emb = nn.Embedding(max_seq_len, dim)
self.latents = nn.Parameter(torch.randn(num_latents, dim))
self.to_latents_from_mean_pooled_seq = None
if num_latents_mean_pooled > 0:
self.to_latents_from_mean_pooled_seq = nn.Sequential(
LayerNorm(dim),
nn.Linear(dim, dim * num_latents_mean_pooled),
Rearrange('b (n d) -> b n d', n = num_latents_mean_pooled)
)
self.layers = nn.ModuleList([])
for _ in range(depth):
self.layers.append(nn.ModuleList([
PerceiverAttention(dim = dim, dim_head = dim_head, heads = heads),
FeedForward(dim = dim, mult = ff_mult)
]))
def forward(self, x, mask = None):
n, device = x.shape[1], x.device
pos_emb = self.pos_emb(torch.arange(n, device = device))
x_with_pos = x + pos_emb
latents = repeat(self.latents, 'n d -> b n d', b = x.shape[0])
if exists(self.to_latents_from_mean_pooled_seq):
meanpooled_seq = masked_mean(x, dim = 1, mask = torch.ones(x.shape[:2], device = x.device, dtype = torch.bool))
meanpooled_latents = self.to_latents_from_mean_pooled_seq(meanpooled_seq)
latents = torch.cat((meanpooled_latents, latents), dim = -2)
for attn, ff in self.layers:
latents = attn(x_with_pos, latents, mask = mask) + latents
latents = ff(latents) + latents
return latents
# attention
class Attention(nn.Module):
def __init__(
self,
dim,
*,
dim_head = 64,
heads = 8,
context_dim = None,
scale = 8
):
super().__init__()
self.scale = scale
self.heads = heads
inner_dim = dim_head * heads
self.norm = LayerNorm(dim)
self.null_kv = nn.Parameter(torch.randn(2, dim_head))
self.to_q = nn.Linear(dim, inner_dim, bias = False)
self.to_kv = nn.Linear(dim, dim_head * 2, bias = False)
self.q_scale = nn.Parameter(torch.ones(dim_head))
self.k_scale = nn.Parameter(torch.ones(dim_head))
self.to_context = nn.Sequential(nn.LayerNorm(context_dim), nn.Linear(context_dim, dim_head * 2)) if exists(context_dim) else None
self.to_out = nn.Sequential(
nn.Linear(inner_dim, dim, bias = False),
LayerNorm(dim)
)
def forward(self, x, context = None, mask = None, attn_bias = None):
b, n, device = *x.shape[:2], x.device
x = self.norm(x)
q, k, v = (self.to_q(x), *self.to_kv(x).chunk(2, dim = -1))
q = rearrange(q, 'b n (h d) -> b h n d', h = self.heads)
# add null key / value for classifier free guidance in prior net
nk, nv = map(lambda t: repeat(t, 'd -> b 1 d', b = b), self.null_kv.unbind(dim = -2))
k = torch.cat((nk, k), dim = -2)
v = torch.cat((nv, v), dim = -2)
# add text conditioning, if present
if exists(context):
assert exists(self.to_context)
ck, cv = self.to_context(context).chunk(2, dim = -1)
k = torch.cat((ck, k), dim = -2)
v = torch.cat((cv, v), dim = -2)
# qk rmsnorm
q, k = map(l2norm, (q, k))
q = q * self.q_scale
k = k * self.k_scale
# calculate query / key similarities
sim = einsum('b h i d, b j d -> b h i j', q, k) * self.scale
# relative positional encoding (T5 style)
if exists(attn_bias):
sim = sim + attn_bias
# masking
max_neg_value = -torch.finfo(sim.dtype).max
if exists(mask):
mask = F.pad(mask, (1, 0), value = True)
mask = rearrange(mask, 'b j -> b 1 1 j')
sim = sim.masked_fill(~mask, max_neg_value)
# attention
attn = sim.softmax(dim = -1, dtype = torch.float32)
attn = attn.to(sim.dtype)
# aggregate values
out = einsum('b h i j, b j d -> b h i d', attn, v)
out = rearrange(out, 'b h n d -> b n (h d)')
return self.to_out(out)
# decoder
def Upsample(dim, dim_out = None):
dim_out = default(dim_out, dim)
return nn.Sequential(
nn.Upsample(scale_factor = 2, mode = 'nearest'),
nn.Conv2d(dim, dim_out, 3, padding = 1)
)
class PixelShuffleUpsample(nn.Module):
"""
code shared by @MalumaDev at DALLE2-pytorch for addressing checkboard artifacts
https://arxiv.org/ftp/arxiv/papers/1707/1707.02937.pdf
"""
def __init__(self, dim, dim_out = None):
super().__init__()
dim_out = default(dim_out, dim)
conv = nn.Conv2d(dim, dim_out * 4, 1)
self.net = nn.Sequential(
conv,
nn.SiLU(),
nn.PixelShuffle(2)
)
self.init_conv_(conv)
def init_conv_(self, conv):
o, i, h, w = conv.weight.shape
conv_weight = torch.empty(o // 4, i, h, w)
nn.init.kaiming_uniform_(conv_weight)
conv_weight = repeat(conv_weight, 'o ... -> (o 4) ...')
conv.weight.data.copy_(conv_weight)
nn.init.zeros_(conv.bias.data)
def forward(self, x):
return self.net(x)
def Downsample(dim, dim_out = None):
# https://arxiv.org/abs/2208.03641 shows this is the most optimal way to downsample
# named SP-conv in the paper, but basically a pixel unshuffle
dim_out = default(dim_out, dim)
return nn.Sequential(
Rearrange('b c (h s1) (w s2) -> b (c s1 s2) h w', s1 = 2, s2 = 2),
nn.Conv2d(dim * 4, dim_out, 1)
)
class SinusoidalPosEmb(nn.Module):
def __init__(self, dim):
super().__init__()
self.dim = dim
def forward(self, x):
half_dim = self.dim // 2
emb = math.log(10000) / (half_dim - 1)
emb = torch.exp(torch.arange(half_dim, device = x.device) * -emb)
emb = rearrange(x, 'i -> i 1') * rearrange(emb, 'j -> 1 j')
return torch.cat((emb.sin(), emb.cos()), dim = -1)
class LearnedSinusoidalPosEmb(nn.Module):
""" following @crowsonkb 's lead with learned sinusoidal pos emb """
""" https://github.com/crowsonkb/v-diffusion-jax/blob/master/diffusion/models/danbooru_128.py#L8 """
def __init__(self, dim):
super().__init__()
assert (dim % 2) == 0
half_dim = dim // 2
self.weights = nn.Parameter(torch.randn(half_dim))
def forward(self, x):
x = rearrange(x, 'b -> b 1')
freqs = x * rearrange(self.weights, 'd -> 1 d') * 2 * math.pi
fouriered = torch.cat((freqs.sin(), freqs.cos()), dim = -1)
fouriered = torch.cat((x, fouriered), dim = -1)
return fouriered
class Block(nn.Module):
def __init__(
self,
dim,
dim_out,
groups = 8,
norm = True
):
super().__init__()
self.groupnorm = nn.GroupNorm(groups, dim) if norm else Identity()
self.activation = nn.SiLU()
self.project = nn.Conv2d(dim, dim_out, 3, padding = 1)
def forward(self, x, scale_shift = None):
x = self.groupnorm(x)
if exists(scale_shift):
scale, shift = scale_shift
x = x * (scale + 1) + shift
x = self.activation(x)
return self.project(x)
class ResnetBlock(nn.Module):
def __init__(
self,
dim,
dim_out,
*,
cond_dim = None,
time_cond_dim = None,
groups = 8,
linear_attn = False,
use_gca = False,
squeeze_excite = False,
**attn_kwargs
):
super().__init__()
self.time_mlp = None
if exists(time_cond_dim):
self.time_mlp = nn.Sequential(
nn.SiLU(),
nn.Linear(time_cond_dim, dim_out * 2)
)
self.cross_attn = None
if exists(cond_dim):
attn_klass = CrossAttention if not linear_attn else LinearCrossAttention
self.cross_attn = attn_klass(
dim = dim_out,
context_dim = cond_dim,
**attn_kwargs
)
self.block1 = Block(dim, dim_out, groups = groups)
self.block2 = Block(dim_out, dim_out, groups = groups)
self.gca = GlobalContext(dim_in = dim_out, dim_out = dim_out) if use_gca else Always(1)
self.res_conv = nn.Conv2d(dim, dim_out, 1) if dim != dim_out else Identity()
def forward(self, x, time_emb = None, cond = None):
scale_shift = None
if exists(self.time_mlp) and exists(time_emb):
time_emb = self.time_mlp(time_emb)
time_emb = rearrange(time_emb, 'b c -> b c 1 1')
scale_shift = time_emb.chunk(2, dim = 1)
h = self.block1(x)
if exists(self.cross_attn):
assert exists(cond)
h = rearrange(h, 'b c h w -> b h w c')
h, ps = pack([h], 'b * c')
h = self.cross_attn(h, context = cond) + h
h, = unpack(h, ps, 'b * c')
h = rearrange(h, 'b h w c -> b c h w')
h = self.block2(h, scale_shift = scale_shift)
h = h * self.gca(h)
return h + self.res_conv(x)
class CrossAttention(nn.Module):
def __init__(
self,
dim,
*,
context_dim = None,
dim_head = 64,
heads = 8,
norm_context = False,
scale = 8
):
super().__init__()
self.scale = scale
self.heads = heads
inner_dim = dim_head * heads
context_dim = default(context_dim, dim)
self.norm = LayerNorm(dim)
self.norm_context = LayerNorm(context_dim) if norm_context else Identity()
self.null_kv = nn.Parameter(torch.randn(2, dim_head))
self.to_q = nn.Linear(dim, inner_dim, bias = False)
self.to_kv = nn.Linear(context_dim, inner_dim * 2, bias = False)
self.q_scale = nn.Parameter(torch.ones(dim_head))
self.k_scale = nn.Parameter(torch.ones(dim_head))
self.to_out = nn.Sequential(
nn.Linear(inner_dim, dim, bias = False),
LayerNorm(dim)
)
def forward(self, x, context, mask = None):
b, n, device = *x.shape[:2], x.device
x = self.norm(x)
context = self.norm_context(context)
q, k, v = (self.to_q(x), *self.to_kv(context).chunk(2, dim = -1))
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = self.heads), (q, k, v))
# add null key / value for classifier free guidance in prior net
nk, nv = map(lambda t: repeat(t, 'd -> b h 1 d', h = self.heads, b = b), self.null_kv.unbind(dim = -2))
k = torch.cat((nk, k), dim = -2)
v = torch.cat((nv, v), dim = -2)
# cosine sim attention
q, k = map(l2norm, (q, k))
q = q * self.q_scale
k = k * self.k_scale
# similarities
sim = einsum('b h i d, b h j d -> b h i j', q, k) * self.scale
# masking
max_neg_value = -torch.finfo(sim.dtype).max
if exists(mask):
mask = F.pad(mask, (1, 0), value = True)
mask = rearrange(mask, 'b j -> b 1 1 j')
sim = sim.masked_fill(~mask, max_neg_value)
attn = sim.softmax(dim = -1, dtype = torch.float32)
attn = attn.to(sim.dtype)
out = einsum('b h i j, b h j d -> b h i d', attn, v)
out = rearrange(out, 'b h n d -> b n (h d)')
return self.to_out(out)
class LinearCrossAttention(CrossAttention):
def forward(self, x, context, mask = None):
b, n, device = *x.shape[:2], x.device
x = self.norm(x)
context = self.norm_context(context)
q, k, v = (self.to_q(x), *self.to_kv(context).chunk(2, dim = -1))
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h = self.heads), (q, k, v))
# add null key / value for classifier free guidance in prior net
nk, nv = map(lambda t: repeat(t, 'd -> (b h) 1 d', h = self.heads, b = b), self.null_kv.unbind(dim = -2))
k = torch.cat((nk, k), dim = -2)
v = torch.cat((nv, v), dim = -2)
# masking
max_neg_value = -torch.finfo(x.dtype).max
if exists(mask):
mask = F.pad(mask, (1, 0), value = True)
mask = rearrange(mask, 'b n -> b n 1')
k = k.masked_fill(~mask, max_neg_value)
v = v.masked_fill(~mask, 0.)
# linear attention
q = q.softmax(dim = -1)
k = k.softmax(dim = -2)
q = q * self.scale
context = einsum('b n d, b n e -> b d e', k, v)
out = einsum('b n d, b d e -> b n e', q, context)
out = rearrange(out, '(b h) n d -> b n (h d)', h = self.heads)
return self.to_out(out)
class LinearAttention(nn.Module):
def __init__(
self,
dim,
dim_head = 32,
heads = 8,
dropout = 0.05,
context_dim = None,
**kwargs
):
super().__init__()
self.scale = dim_head ** -0.5
self.heads = heads
inner_dim = dim_head * heads
self.norm = ChanLayerNorm(dim)
self.nonlin = nn.SiLU()
self.to_q = nn.Sequential(
nn.Dropout(dropout),
nn.Conv2d(dim, inner_dim, 1, bias = False),
nn.Conv2d(inner_dim, inner_dim, 3, bias = False, padding = 1, groups = inner_dim)
)
self.to_k = nn.Sequential(
nn.Dropout(dropout),
nn.Conv2d(dim, inner_dim, 1, bias = False),
nn.Conv2d(inner_dim, inner_dim, 3, bias = False, padding = 1, groups = inner_dim)
)
self.to_v = nn.Sequential(
nn.Dropout(dropout),
nn.Conv2d(dim, inner_dim, 1, bias = False),
nn.Conv2d(inner_dim, inner_dim, 3, bias = False, padding = 1, groups = inner_dim)
)
self.to_context = nn.Sequential(nn.LayerNorm(context_dim), nn.Linear(context_dim, inner_dim * 2, bias = False)) if exists(context_dim) else None
self.to_out = nn.Sequential(
nn.Conv2d(inner_dim, dim, 1, bias = False),
ChanLayerNorm(dim)
)
def forward(self, fmap, context = None):
h, x, y = self.heads, *fmap.shape[-2:]
fmap = self.norm(fmap)
q, k, v = map(lambda fn: fn(fmap), (self.to_q, self.to_k, self.to_v))
q, k, v = map(lambda t: rearrange(t, 'b (h c) x y -> (b h) (x y) c', h = h), (q, k, v))
if exists(context):
assert exists(self.to_context)
ck, cv = self.to_context(context).chunk(2, dim = -1)
ck, cv = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h = h), (ck, cv))
k = torch.cat((k, ck), dim = -2)
v = torch.cat((v, cv), dim = -2)
q = q.softmax(dim = -1)
k = k.softmax(dim = -2)
q = q * self.scale
context = einsum('b n d, b n e -> b d e', k, v)
out = einsum('b n d, b d e -> b n e', q, context)
out = rearrange(out, '(b h) (x y) d -> b (h d) x y', h = h, x = x, y = y)
out = self.nonlin(out)
return self.to_out(out)
class GlobalContext(nn.Module):
""" basically a superior form of squeeze-excitation that is attention-esque """
def __init__(
self,
*,
dim_in,
dim_out
):
super().__init__()
self.to_k = nn.Conv2d(dim_in, 1, 1)
hidden_dim = max(3, dim_out // 2)
self.net = nn.Sequential(
nn.Conv2d(dim_in, hidden_dim, 1),
nn.SiLU(),
nn.Conv2d(hidden_dim, dim_out, 1),
nn.Sigmoid()
)
def forward(self, x):
context = self.to_k(x)
x, context = map(lambda t: rearrange(t, 'b n ... -> b n (...)'), (x, context))
out = einsum('b i n, b c n -> b c i', context.softmax(dim = -1), x)
out = rearrange(out, '... -> ... 1')
return self.net(out)
def FeedForward(dim, mult = 2):
hidden_dim = int(dim * mult)
return nn.Sequential(
LayerNorm(dim),
nn.Linear(dim, hidden_dim, bias = False),
nn.GELU(),
LayerNorm(hidden_dim),
nn.Linear(hidden_dim, dim, bias = False)
)
def ChanFeedForward(dim, mult = 2): # in paper, it seems for self attention layers they did feedforwards with twice channel width
hidden_dim = int(dim * mult)
return nn.Sequential(
ChanLayerNorm(dim),
nn.Conv2d(dim, hidden_dim, 1, bias = False),
nn.GELU(),
ChanLayerNorm(hidden_dim),
nn.Conv2d(hidden_dim, dim, 1, bias = False)
)
class TransformerBlock(nn.Module):
def __init__(
self,
dim,
*,
depth = 1,
heads = 8,
dim_head = 32,
ff_mult = 2,
context_dim = None
):
super().__init__()
self.layers = nn.ModuleList([])
for _ in range(depth):
self.layers.append(nn.ModuleList([
Attention(dim = dim, heads = heads, dim_head = dim_head, context_dim = context_dim),
FeedForward(dim = dim, mult = ff_mult)
]))
def forward(self, x, context = None):
x = rearrange(x, 'b c h w -> b h w c')
x, ps = pack([x], 'b * c')
for attn, ff in self.layers:
x = attn(x, context = context) + x
x = ff(x) + x
x, = unpack(x, ps, 'b * c')
x = rearrange(x, 'b h w c -> b c h w')
return x
class LinearAttentionTransformerBlock(nn.Module):
def __init__(
self,
dim,
*,
depth = 1,
heads = 8,
dim_head = 32,
ff_mult = 2,
context_dim = None,
**kwargs
):
super().__init__()
self.layers = nn.ModuleList([])
for _ in range(depth):
self.layers.append(nn.ModuleList([
LinearAttention(dim = dim, heads = heads, dim_head = dim_head, context_dim = context_dim),
ChanFeedForward(dim = dim, mult = ff_mult)
]))
def forward(self, x, context = None):
for attn, ff in self.layers:
x = attn(x, context = context) + x
x = ff(x) + x
return x
class CrossEmbedLayer(nn.Module):
def __init__(
self,
dim_in,
kernel_sizes,
dim_out = None,
stride = 2
):
super().__init__()
assert all([*map(lambda t: (t % 2) == (stride % 2), kernel_sizes)])
dim_out = default(dim_out, dim_in)
kernel_sizes = sorted(kernel_sizes)
num_scales = len(kernel_sizes)
# calculate the dimension at each scale
dim_scales = [int(dim_out / (2 ** i)) for i in range(1, num_scales)]
dim_scales = [*dim_scales, dim_out - sum(dim_scales)]
self.convs = nn.ModuleList([])
for kernel, dim_scale in zip(kernel_sizes, dim_scales):
self.convs.append(nn.Conv2d(dim_in, dim_scale, kernel, stride = stride, padding = (kernel - stride) // 2))
def forward(self, x):
fmaps = tuple(map(lambda conv: conv(x), self.convs))
return torch.cat(fmaps, dim = 1)
class UpsampleCombiner(nn.Module):
def __init__(
self,
dim,
*,
enabled = False,
dim_ins = tuple(),
dim_outs = tuple()
):
super().__init__()
dim_outs = cast_tuple(dim_outs, len(dim_ins))
assert len(dim_ins) == len(dim_outs)
self.enabled = enabled
if not self.enabled:
self.dim_out = dim
return
self.fmap_convs = nn.ModuleList([Block(dim_in, dim_out) for dim_in, dim_out in zip(dim_ins, dim_outs)])
self.dim_out = dim + (sum(dim_outs) if len(dim_outs) > 0 else 0)
def forward(self, x, fmaps = None):
target_size = x.shape[-1]
fmaps = default(fmaps, tuple())
if not self.enabled or len(fmaps) == 0 or len(self.fmap_convs) == 0:
return x
fmaps = [resize_image_to(fmap, target_size) for fmap in fmaps]
outs = [conv(fmap) for fmap, conv in zip(fmaps, self.fmap_convs)]
return torch.cat((x, *outs), dim = 1)
class Unet(nn.Module):
def __init__(
self,
*,
dim,
text_embed_dim = get_encoded_dim(DEFAULT_T5_NAME),
num_resnet_blocks = 1,
cond_dim = None,
num_image_tokens = 4,
num_time_tokens = 2,
learned_sinu_pos_emb_dim = 16,
out_dim = None,
dim_mults=(1, 2, 4, 8),
cond_images_channels = 0,
channels = 3,
channels_out = None,
attn_dim_head = 64,
attn_heads = 8,
ff_mult = 2.,
lowres_cond = False, # for cascading diffusion - https://cascaded-diffusion.github.io/
layer_attns = True,
layer_attns_depth = 1,
layer_mid_attns_depth = 1,
layer_attns_add_text_cond = True, # whether to condition the self-attention blocks with the text embeddings, as described in Appendix D.3.1
attend_at_middle = True, # whether to have a layer of attention at the bottleneck (can turn off for higher resolution in cascading DDPM, before bringing in efficient attention)
layer_cross_attns = True,
use_linear_attn = False,
use_linear_cross_attn = False,
cond_on_text = True,
max_text_len = 256,
init_dim = None,
resnet_groups = 8,
init_conv_kernel_size = 7, # kernel size of initial conv, if not using cross embed
init_cross_embed = True,
init_cross_embed_kernel_sizes = (3, 7, 15),
cross_embed_downsample = False,
cross_embed_downsample_kernel_sizes = (2, 4),
attn_pool_text = True,
attn_pool_num_latents = 32,
dropout = 0.,
memory_efficient = False,
init_conv_to_final_conv_residual = False,
use_global_context_attn = True,
scale_skip_connection = True,
final_resnet_block = True,
final_conv_kernel_size = 3,
self_cond = False,
resize_mode = 'nearest',
combine_upsample_fmaps = False, # combine feature maps from all upsample blocks, used in unet squared successfully
pixel_shuffle_upsample = True, # may address checkboard artifacts
):
super().__init__()
# guide researchers
assert attn_heads > 1, 'you need to have more than 1 attention head, ideally at least 4 or 8'
if dim < 128:
print_once('The base dimension of your u-net should ideally be no smaller than 128, as recommended by a professional DDPM trainer https://nonint.com/2022/05/04/friends-dont-let-friends-train-small-diffusion-models/')
# save locals to take care of some hyperparameters for cascading DDPM
self._locals = locals()
self._locals.pop('self', None)
self._locals.pop('__class__', None)
# determine dimensions
self.channels = channels
self.channels_out = default(channels_out, channels)
# (1) in cascading diffusion, one concats the low resolution image, blurred, for conditioning the higher resolution synthesis
# (2) in self conditioning, one appends the predict x0 (x_start)
init_channels = channels * (1 + int(lowres_cond) + int(self_cond))
init_dim = default(init_dim, dim)
self.self_cond = self_cond
# optional image conditioning
self.has_cond_image = cond_images_channels > 0
self.cond_images_channels = cond_images_channels
init_channels += cond_images_channels
# initial convolution
self.init_conv = CrossEmbedLayer(init_channels, dim_out = init_dim, kernel_sizes = init_cross_embed_kernel_sizes, stride = 1) if init_cross_embed else nn.Conv2d(init_channels, init_dim, init_conv_kernel_size, padding = init_conv_kernel_size // 2)
dims = [init_dim, *map(lambda m: dim * m, dim_mults)]
in_out = list(zip(dims[:-1], dims[1:]))
# time conditioning
cond_dim = default(cond_dim, dim)
time_cond_dim = dim * 4 * (2 if lowres_cond else 1)
# embedding time for log(snr) noise from continuous version
sinu_pos_emb = LearnedSinusoidalPosEmb(learned_sinu_pos_emb_dim)
sinu_pos_emb_input_dim = learned_sinu_pos_emb_dim + 1
self.to_time_hiddens = nn.Sequential(
sinu_pos_emb,
nn.Linear(sinu_pos_emb_input_dim, time_cond_dim),
nn.SiLU()
)
self.to_time_cond = nn.Sequential(
nn.Linear(time_cond_dim, time_cond_dim)
)
# project to time tokens as well as time hiddens
self.to_time_tokens = nn.Sequential(
nn.Linear(time_cond_dim, cond_dim * num_time_tokens),
Rearrange('b (r d) -> b r d', r = num_time_tokens)
)
# low res aug noise conditioning
self.lowres_cond = lowres_cond
if lowres_cond:
self.to_lowres_time_hiddens = nn.Sequential(
LearnedSinusoidalPosEmb(learned_sinu_pos_emb_dim),
nn.Linear(learned_sinu_pos_emb_dim + 1, time_cond_dim),
nn.SiLU()
)
self.to_lowres_time_cond = nn.Sequential(
nn.Linear(time_cond_dim, time_cond_dim)
)
self.to_lowres_time_tokens = nn.Sequential(
nn.Linear(time_cond_dim, cond_dim * num_time_tokens),
Rearrange('b (r d) -> b r d', r = num_time_tokens)
)
# normalizations
self.norm_cond = nn.LayerNorm(cond_dim)
# text encoding conditioning (optional)
self.text_to_cond = None
if cond_on_text:
assert exists(text_embed_dim), 'text_embed_dim must be given to the unet if cond_on_text is True'
self.text_to_cond = nn.Linear(text_embed_dim, cond_dim)
# finer control over whether to condition on text encodings
self.cond_on_text = cond_on_text
# attention pooling
self.attn_pool = PerceiverResampler(dim = cond_dim, depth = 2, dim_head = attn_dim_head, heads = attn_heads, num_latents = attn_pool_num_latents) if attn_pool_text else None
# for classifier free guidance
self.max_text_len = max_text_len
self.null_text_embed = nn.Parameter(torch.randn(1, max_text_len, cond_dim))
self.null_text_hidden = nn.Parameter(torch.randn(1, time_cond_dim))
# for non-attention based text conditioning at all points in the network where time is also conditioned
self.to_text_non_attn_cond = None
if cond_on_text:
self.to_text_non_attn_cond = nn.Sequential(
nn.LayerNorm(cond_dim),
nn.Linear(cond_dim, time_cond_dim),
nn.SiLU(),
nn.Linear(time_cond_dim, time_cond_dim)
)
# attention related params
attn_kwargs = dict(heads = attn_heads, dim_head = attn_dim_head)
num_layers = len(in_out)
# resnet block klass
num_resnet_blocks = cast_tuple(num_resnet_blocks, num_layers)
resnet_groups = cast_tuple(resnet_groups, num_layers)
resnet_klass = partial(ResnetBlock, **attn_kwargs)
layer_attns = cast_tuple(layer_attns, num_layers)
layer_attns_depth = cast_tuple(layer_attns_depth, num_layers)
layer_cross_attns = cast_tuple(layer_cross_attns, num_layers)
use_linear_attn = cast_tuple(use_linear_attn, num_layers)
use_linear_cross_attn = cast_tuple(use_linear_cross_attn, num_layers)
assert all([layers == num_layers for layers in list(map(len, (resnet_groups, layer_attns, layer_cross_attns)))])
# downsample klass
downsample_klass = Downsample
if cross_embed_downsample:
downsample_klass = partial(CrossEmbedLayer, kernel_sizes = cross_embed_downsample_kernel_sizes)
# initial resnet block (for memory efficient unet)
self.init_resnet_block = resnet_klass(init_dim, init_dim, time_cond_dim = time_cond_dim, groups = resnet_groups[0], use_gca = use_global_context_attn) if memory_efficient else None
# scale for resnet skip connections
self.skip_connect_scale = 1. if not scale_skip_connection else (2 ** -0.5)
# layers
self.downs = nn.ModuleList([])
self.ups = nn.ModuleList([])
num_resolutions = len(in_out)
layer_params = [num_resnet_blocks, resnet_groups, layer_attns, layer_attns_depth, layer_cross_attns, use_linear_attn, use_linear_cross_attn]
reversed_layer_params = list(map(reversed, layer_params))
# downsampling layers
skip_connect_dims = [] # keep track of skip connection dimensions
for ind, ((dim_in, dim_out), layer_num_resnet_blocks, groups, layer_attn, layer_attn_depth, layer_cross_attn, layer_use_linear_attn, layer_use_linear_cross_attn) in enumerate(zip(in_out, *layer_params)):
is_last = ind >= (num_resolutions - 1)
layer_cond_dim = cond_dim if layer_cross_attn or layer_use_linear_cross_attn else None
if layer_attn:
transformer_block_klass = TransformerBlock
elif layer_use_linear_attn:
transformer_block_klass = LinearAttentionTransformerBlock
else:
transformer_block_klass = Identity
current_dim = dim_in
# whether to pre-downsample, from memory efficient unet
pre_downsample = None
if memory_efficient:
pre_downsample = downsample_klass(dim_in, dim_out)
current_dim = dim_out
skip_connect_dims.append(current_dim)
# whether to do post-downsample, for non-memory efficient unet
post_downsample = None
if not memory_efficient:
post_downsample = downsample_klass(current_dim, dim_out) if not is_last else Parallel(nn.Conv2d(dim_in, dim_out, 3, padding = 1), nn.Conv2d(dim_in, dim_out, 1))
self.downs.append(nn.ModuleList([
pre_downsample,
resnet_klass(current_dim, current_dim, cond_dim = layer_cond_dim, linear_attn = layer_use_linear_cross_attn, time_cond_dim = time_cond_dim, groups = groups),
nn.ModuleList([ResnetBlock(current_dim, current_dim, time_cond_dim = time_cond_dim, groups = groups, use_gca = use_global_context_attn) for _ in range(layer_num_resnet_blocks)]),
transformer_block_klass(dim = current_dim, depth = layer_attn_depth, ff_mult = ff_mult, context_dim = cond_dim, **attn_kwargs),
post_downsample
]))
# middle layers
mid_dim = dims[-1]
self.mid_block1 = ResnetBlock(mid_dim, mid_dim, cond_dim = cond_dim, time_cond_dim = time_cond_dim, groups = resnet_groups[-1])
self.mid_attn = TransformerBlock(mid_dim, depth = layer_mid_attns_depth, **attn_kwargs) if attend_at_middle else None
self.mid_block2 = ResnetBlock(mid_dim, mid_dim, cond_dim = cond_dim, time_cond_dim = time_cond_dim, groups = resnet_groups[-1])
# upsample klass
upsample_klass = Upsample if not pixel_shuffle_upsample else PixelShuffleUpsample
# upsampling layers
upsample_fmap_dims = []
for ind, ((dim_in, dim_out), layer_num_resnet_blocks, groups, layer_attn, layer_attn_depth, layer_cross_attn, layer_use_linear_attn, layer_use_linear_cross_attn) in enumerate(zip(reversed(in_out), *reversed_layer_params)):
is_last = ind == (len(in_out) - 1)
layer_cond_dim = cond_dim if layer_cross_attn or layer_use_linear_cross_attn else None
if layer_attn:
transformer_block_klass = TransformerBlock
elif layer_use_linear_attn:
transformer_block_klass = LinearAttentionTransformerBlock
else:
transformer_block_klass = Identity
skip_connect_dim = skip_connect_dims.pop()
upsample_fmap_dims.append(dim_out)
self.ups.append(nn.ModuleList([
resnet_klass(dim_out + skip_connect_dim, dim_out, cond_dim = layer_cond_dim, linear_attn = layer_use_linear_cross_attn, time_cond_dim = time_cond_dim, groups = groups),
nn.ModuleList([ResnetBlock(dim_out + skip_connect_dim, dim_out, time_cond_dim = time_cond_dim, groups = groups, use_gca = use_global_context_attn) for _ in range(layer_num_resnet_blocks)]),
transformer_block_klass(dim = dim_out, depth = layer_attn_depth, ff_mult = ff_mult, context_dim = cond_dim, **attn_kwargs),
upsample_klass(dim_out, dim_in) if not is_last or memory_efficient else Identity()
]))
# whether to combine feature maps from all upsample blocks before final resnet block out
self.upsample_combiner = UpsampleCombiner(
dim = dim,
enabled = combine_upsample_fmaps,
dim_ins = upsample_fmap_dims,
dim_outs = dim
)
# whether to do a final residual from initial conv to the final resnet block out
self.init_conv_to_final_conv_residual = init_conv_to_final_conv_residual
final_conv_dim = self.upsample_combiner.dim_out + (dim if init_conv_to_final_conv_residual else 0)
# final optional resnet block and convolution out
self.final_res_block = ResnetBlock(final_conv_dim, dim, time_cond_dim = time_cond_dim, groups = resnet_groups[0], use_gca = True) if final_resnet_block else None
final_conv_dim_in = dim if final_resnet_block else final_conv_dim
final_conv_dim_in += (channels if lowres_cond else 0)
self.final_conv = nn.Conv2d(final_conv_dim_in, self.channels_out, final_conv_kernel_size, padding = final_conv_kernel_size // 2)
zero_init_(self.final_conv)
# resize mode
self.resize_mode = resize_mode
# if the current settings for the unet are not correct
# for cascading DDPM, then reinit the unet with the right settings
def cast_model_parameters(
self,
*,
lowres_cond,
text_embed_dim,
channels,
channels_out,
cond_on_text
):
if lowres_cond == self.lowres_cond and \
channels == self.channels and \
cond_on_text == self.cond_on_text and \
text_embed_dim == self._locals['text_embed_dim'] and \
channels_out == self.channels_out:
return self
updated_kwargs = dict(
lowres_cond = lowres_cond,
text_embed_dim = text_embed_dim,
channels = channels,
channels_out = channels_out,
cond_on_text = cond_on_text
)
return self.__class__(**{**self._locals, **updated_kwargs})
# methods for returning the full unet config as well as its parameter state
def to_config_and_state_dict(self):
return self._locals, self.state_dict()
# class method for rehydrating the unet from its config and state dict
@classmethod
def from_config_and_state_dict(klass, config, state_dict):
unet = klass(**config)
unet.load_state_dict(state_dict)
return unet
# methods for persisting unet to disk
def persist_to_file(self, path):
path = Path(path)
path.parents[0].mkdir(exist_ok = True, parents = True)
config, state_dict = self.to_config_and_state_dict()
pkg = dict(config = config, state_dict = state_dict)
torch.save(pkg, str(path))
# class method for rehydrating the unet from file saved with `persist_to_file`
@classmethod
def hydrate_from_file(klass, path):
path = Path(path)
assert path.exists()
pkg = torch.load(str(path))
assert 'config' in pkg and 'state_dict' in pkg
config, state_dict = pkg['config'], pkg['state_dict']
return Unet.from_config_and_state_dict(config, state_dict)
# forward with classifier free guidance
def forward_with_cond_scale(
self,
*args,
cond_scale = 1.,
**kwargs
):
logits = self.forward(*args, **kwargs)
if cond_scale == 1:
return logits
null_logits = self.forward(*args, cond_drop_prob = 1., **kwargs)
return null_logits + (logits - null_logits) * cond_scale
def forward(
self,
x,
time,
*,
lowres_cond_img = None,
lowres_noise_times = None,
text_embeds = None,
text_mask = None,
cond_images = None,
self_cond = None,
cond_drop_prob = 0.
):
batch_size, device = x.shape[0], x.device
# condition on self
if self.self_cond:
self_cond = default(self_cond, lambda: torch.zeros_like(x))
x = torch.cat((x, self_cond), dim = 1)
# add low resolution conditioning, if present
assert not (self.lowres_cond and not exists(lowres_cond_img)), 'low resolution conditioning image must be present'
assert not (self.lowres_cond and not exists(lowres_noise_times)), 'low resolution conditioning noise time must be present'
if exists(lowres_cond_img):
x = torch.cat((x, lowres_cond_img), dim = 1)
# condition on input image
assert not (self.has_cond_image ^ exists(cond_images)), 'you either requested to condition on an image on the unet, but the conditioning image is not supplied, or vice versa'
if exists(cond_images):
assert cond_images.shape[1] == self.cond_images_channels, 'the number of channels on the conditioning image you are passing in does not match what you specified on initialiation of the unet'
cond_images = resize_image_to(cond_images, x.shape[-1], mode = self.resize_mode)
x = torch.cat((cond_images, x), dim = 1)
# initial convolution
x = self.init_conv(x)
# init conv residual
if self.init_conv_to_final_conv_residual:
init_conv_residual = x.clone()
# time conditioning
time_hiddens = self.to_time_hiddens(time)
# derive time tokens
time_tokens = self.to_time_tokens(time_hiddens)
t = self.to_time_cond(time_hiddens)
# add lowres time conditioning to time hiddens
# and add lowres time tokens along sequence dimension for attention
if self.lowres_cond:
lowres_time_hiddens = self.to_lowres_time_hiddens(lowres_noise_times)
lowres_time_tokens = self.to_lowres_time_tokens(lowres_time_hiddens)
lowres_t = self.to_lowres_time_cond(lowres_time_hiddens)
t = t + lowres_t
time_tokens = torch.cat((time_tokens, lowres_time_tokens), dim = -2)
# text conditioning
text_tokens = None
if exists(text_embeds) and self.cond_on_text:
# conditional dropout
text_keep_mask = prob_mask_like((batch_size,), 1 - cond_drop_prob, device = device)
text_keep_mask_embed = rearrange(text_keep_mask, 'b -> b 1 1')
text_keep_mask_hidden = rearrange(text_keep_mask, 'b -> b 1')
# calculate text embeds
text_tokens = self.text_to_cond(text_embeds)
text_tokens = text_tokens[:, :self.max_text_len]
if exists(text_mask):
text_mask = text_mask[:, :self.max_text_len]
text_tokens_len = text_tokens.shape[1]
remainder = self.max_text_len - text_tokens_len
if remainder > 0:
text_tokens = F.pad(text_tokens, (0, 0, 0, remainder))
if exists(text_mask):
if remainder > 0:
text_mask = F.pad(text_mask, (0, remainder), value = False)
text_mask = rearrange(text_mask, 'b n -> b n 1')
text_keep_mask_embed = text_mask & text_keep_mask_embed
null_text_embed = self.null_text_embed.to(text_tokens.dtype) # for some reason pytorch AMP not working
text_tokens = torch.where(
text_keep_mask_embed,
text_tokens,
null_text_embed
)
if exists(self.attn_pool):
text_tokens = self.attn_pool(text_tokens)
# extra non-attention conditioning by projecting and then summing text embeddings to time
# termed as text hiddens
mean_pooled_text_tokens = text_tokens.mean(dim = -2)
text_hiddens = self.to_text_non_attn_cond(mean_pooled_text_tokens)
null_text_hidden = self.null_text_hidden.to(t.dtype)
text_hiddens = torch.where(
text_keep_mask_hidden,
text_hiddens,
null_text_hidden
)
t = t + text_hiddens
# main conditioning tokens (c)
c = time_tokens if not exists(text_tokens) else torch.cat((time_tokens, text_tokens), dim = -2)
# normalize conditioning tokens
c = self.norm_cond(c)
# initial resnet block (for memory efficient unet)
if exists(self.init_resnet_block):
x = self.init_resnet_block(x, t)
# go through the layers of the unet, down and up
hiddens = []
for pre_downsample, init_block, resnet_blocks, attn_block, post_downsample in self.downs:
if exists(pre_downsample):
x = pre_downsample(x)
x = init_block(x, t, c)
for resnet_block in resnet_blocks:
x = resnet_block(x, t)
hiddens.append(x)
x = attn_block(x, c)
hiddens.append(x)
if exists(post_downsample):
x = post_downsample(x)
x = self.mid_block1(x, t, c)
if exists(self.mid_attn):
x = self.mid_attn(x)
x = self.mid_block2(x, t, c)
add_skip_connection = lambda x: torch.cat((x, hiddens.pop() * self.skip_connect_scale), dim = 1)
up_hiddens = []
for init_block, resnet_blocks, attn_block, upsample in self.ups:
x = add_skip_connection(x)
x = init_block(x, t, c)
for resnet_block in resnet_blocks:
x = add_skip_connection(x)
x = resnet_block(x, t)
x = attn_block(x, c)
up_hiddens.append(x.contiguous())
x = upsample(x)
# whether to combine all feature maps from upsample blocks
x = self.upsample_combiner(x, up_hiddens)
# final top-most residual if needed
if self.init_conv_to_final_conv_residual:
x = torch.cat((x, init_conv_residual), dim = 1)
if exists(self.final_res_block):
x = self.final_res_block(x, t)
if exists(lowres_cond_img):
x = torch.cat((x, lowres_cond_img), dim = 1)
return self.final_conv(x)
# null unet
class NullUnet(nn.Module):
def __init__(self, *args, **kwargs):
super().__init__()
self.lowres_cond = False
self.dummy_parameter = nn.Parameter(torch.tensor([0.]))
def cast_model_parameters(self, *args, **kwargs):
return self
def forward(self, x, *args, **kwargs):
return x
# predefined unets, with configs lining up with hyperparameters in appendix of paper
class BaseUnet64(Unet):
def __init__(self, *args, **kwargs):
default_kwargs = dict(
dim = 512,
dim_mults = (1, 2, 3, 4),
num_resnet_blocks = 3,
layer_attns = (False, True, True, True),
layer_cross_attns = (False, True, True, True),
attn_heads = 8,
ff_mult = 2.,
memory_efficient = False
)
super().__init__(*args, **{**default_kwargs, **kwargs})
class SRUnet256(Unet):
def __init__(self, *args, **kwargs):
default_kwargs = dict(
dim = 128,
dim_mults = (1, 2, 4, 8),
num_resnet_blocks = (2, 4, 8, 8),
layer_attns = (False, False, False, True),
layer_cross_attns = (False, False, False, True),
attn_heads = 8,
ff_mult = 2.,
memory_efficient = True
)
super().__init__(*args, **{**default_kwargs, **kwargs})
class SRUnet1024(Unet):
def __init__(self, *args, **kwargs):
default_kwargs = dict(
dim = 128,
dim_mults = (1, 2, 4, 8),
num_resnet_blocks = (2, 4, 8, 8),
layer_attns = False,
layer_cross_attns = (False, False, False, True),
attn_heads = 8,
ff_mult = 2.,
memory_efficient = True
)
super().__init__(*args, **{**default_kwargs, **kwargs})
# main starlight ddpm class, which is a cascading DDPM from Ho et al.
class Starlight(nn.Module):
def __init__(
self,
unets,
*,
image_sizes, # for cascading ddpm, image size at each stage
text_encoder_name = DEFAULT_T5_NAME,
text_embed_dim = None,
channels = 3,
timesteps = 1000,
cond_drop_prob = 0.1,
loss_type = 'l2',
noise_schedules = 'cosine',
pred_objectives = 'noise',
random_crop_sizes = None,
lowres_noise_schedule = 'linear',
lowres_sample_noise_level = 0.2, # in the paper, they present a new trick where they noise the lowres conditioning image, and at sample time, fix it to a certain level (0.1 or 0.3) - the unets are also made to be conditioned on this noise level
per_sample_random_aug_noise_level = False, # unclear when conditioning on augmentation noise level, whether each batch element receives a random aug noise value - turning off due to @marunine's find
condition_on_text = True,
auto_normalize_img = True, # whether to take care of normalizing the image from [0, 1] to [-1, 1] and back automatically - you can turn this off if you want to pass in the [-1, 1] ranged image yourself from the dataloader
dynamic_thresholding = True,
dynamic_thresholding_percentile = 0.95, # unsure what this was based on perusal of paper
only_train_unet_number = None,
temporal_downsample_factor = 1,
resize_cond_video_frames = True,
resize_mode = 'nearest',
min_snr_loss_weight = True, # https://arxiv.org/abs/2303.09556
min_snr_gamma = 5
):
super().__init__()
# loss
if loss_type == 'l1':
loss_fn = F.l1_loss
elif loss_type == 'l2':
loss_fn = F.mse_loss
elif loss_type == 'huber':
loss_fn = F.smooth_l1_loss
else:
raise NotImplementedError()
self.loss_type = loss_type
self.loss_fn = loss_fn
# conditioning hparams
self.condition_on_text = condition_on_text
self.unconditional = not condition_on_text
# channels
self.channels = channels
# automatically take care of ensuring that first unet is unconditional
# while the rest of the unets are conditioned on the low resolution image produced by previous unet
unets = cast_tuple(unets)
num_unets = len(unets)
# determine noise schedules per unet
timesteps = cast_tuple(timesteps, num_unets)
# make sure noise schedule defaults to 'cosine', 'cosine', and then 'linear' for rest of super-resoluting unets
noise_schedules = cast_tuple(noise_schedules)
noise_schedules = pad_tuple_to_length(noise_schedules, 2, 'cosine')
noise_schedules = pad_tuple_to_length(noise_schedules, num_unets, 'linear')
# construct noise schedulers
noise_scheduler_klass = GaussianDiffusionContinuousTimes
self.noise_schedulers = nn.ModuleList([])
for timestep, noise_schedule in zip(timesteps, noise_schedules):
noise_scheduler = noise_scheduler_klass(noise_schedule = noise_schedule, timesteps = timestep)
self.noise_schedulers.append(noise_scheduler)
# randomly cropping for upsampler training
self.random_crop_sizes = cast_tuple(random_crop_sizes, num_unets)
assert not exists(first(self.random_crop_sizes)), 'you should not need to randomly crop image during training for base unet, only for upsamplers - so pass in `random_crop_sizes = (None, 128, 256)` as example'
# lowres augmentation noise schedule
self.lowres_noise_schedule = GaussianDiffusionContinuousTimes(noise_schedule = lowres_noise_schedule)
# ddpm objectives - predicting noise by default
self.pred_objectives = cast_tuple(pred_objectives, num_unets)
# get text encoder
self.text_encoder_name = text_encoder_name
self.text_embed_dim = default(text_embed_dim, lambda: get_encoded_dim(text_encoder_name))
self.encode_text = partial(t5_encode_text, name = text_encoder_name)
# construct unets
self.unets = nn.ModuleList([])
self.unet_being_trained_index = -1 # keeps track of which unet is being trained at the moment
self.only_train_unet_number = only_train_unet_number
for ind, one_unet in enumerate(unets):
assert isinstance(one_unet, (Unet, Unet3D, NullUnet))
is_first = ind == 0
one_unet = one_unet.cast_model_parameters(
lowres_cond = not is_first,
cond_on_text = self.condition_on_text,
text_embed_dim = self.text_embed_dim if self.condition_on_text else None,
channels = self.channels,
channels_out = self.channels
)
self.unets.append(one_unet)
# unet image sizes
image_sizes = cast_tuple(image_sizes)
self.image_sizes = image_sizes
assert num_unets == len(image_sizes), f'you did not supply the correct number of u-nets ({len(unets)}) for resolutions {image_sizes}'
self.sample_channels = cast_tuple(self.channels, num_unets)
# determine whether we are training on images or video
is_video = any([isinstance(unet, Unet3D) for unet in self.unets])
self.is_video = is_video
self.right_pad_dims_to_datatype = partial(rearrange, pattern = ('b -> b 1 1 1' if not is_video else 'b -> b 1 1 1 1'))
self.resize_to = resize_video_to if is_video else resize_image_to
self.resize_to = partial(self.resize_to, mode = resize_mode)
# temporal interpolation
temporal_downsample_factor = cast_tuple(temporal_downsample_factor, num_unets)
self.temporal_downsample_factor = temporal_downsample_factor
self.resize_cond_video_frames = resize_cond_video_frames
self.temporal_downsample_divisor = temporal_downsample_factor[0]
assert temporal_downsample_factor[-1] == 1, 'downsample factor of last stage must be 1'
assert tuple(sorted(temporal_downsample_factor, reverse = True)) == temporal_downsample_factor, 'temporal downsample factor must be in order of descending'
# cascading ddpm related stuff
lowres_conditions = tuple(map(lambda t: t.lowres_cond, self.unets))
assert lowres_conditions == (False, *((True,) * (num_unets - 1))), 'the first unet must be unconditioned (by low resolution image), and the rest of the unets must have `lowres_cond` set to True'
self.lowres_sample_noise_level = lowres_sample_noise_level
self.per_sample_random_aug_noise_level = per_sample_random_aug_noise_level
# classifier free guidance
self.cond_drop_prob = cond_drop_prob
self.can_classifier_guidance = cond_drop_prob > 0.
# normalize and unnormalize image functions
self.normalize_img = normalize_neg_one_to_one if auto_normalize_img else identity
self.unnormalize_img = unnormalize_zero_to_one if auto_normalize_img else identity
self.input_image_range = (0. if auto_normalize_img else -1., 1.)
# dynamic thresholding
self.dynamic_thresholding = cast_tuple(dynamic_thresholding, num_unets)
self.dynamic_thresholding_percentile = dynamic_thresholding_percentile
# min snr loss weight
min_snr_loss_weight = cast_tuple(min_snr_loss_weight, num_unets)
min_snr_gamma = cast_tuple(min_snr_gamma, num_unets)
assert len(min_snr_loss_weight) == len(min_snr_gamma) == num_unets
self.min_snr_gamma = tuple((gamma if use_min_snr else None) for use_min_snr, gamma in zip(min_snr_loss_weight, min_snr_gamma))
# one temp parameter for keeping track of device
self.register_buffer('_temp', torch.tensor([0.]), persistent = False)
# default to device of unets passed in
self.to(next(self.unets.parameters()).device)
def force_unconditional_(self):
self.condition_on_text = False
self.unconditional = True
for unet in self.unets:
unet.cond_on_text = False
@property
def device(self):
return self._temp.device
def get_unet(self, unet_number):
assert 0 < unet_number <= len(self.unets)
index = unet_number - 1
if isinstance(self.unets, nn.ModuleList):
unets_list = [unet for unet in self.unets]
delattr(self, 'unets')
self.unets = unets_list
if index != self.unet_being_trained_index:
for unet_index, unet in enumerate(self.unets):
unet.to(self.device if unet_index == index else 'cpu')
self.unet_being_trained_index = index
return self.unets[index]
def reset_unets_all_one_device(self, device = None):
device = default(device, self.device)
self.unets = nn.ModuleList([*self.unets])
self.unets.to(device)
self.unet_being_trained_index = -1
@contextmanager
def one_unet_in_gpu(self, unet_number = None, unet = None):
assert exists(unet_number) ^ exists(unet)
if exists(unet_number):
unet = self.unets[unet_number - 1]
cpu = torch.device('cpu')
devices = [module_device(unet) for unet in self.unets]
self.unets.to(cpu)
unet.to(self.device)
yield
for unet, device in zip(self.unets, devices):
unet.to(device)
# overriding state dict functions
def state_dict(self, *args, **kwargs):
self.reset_unets_all_one_device()
return super().state_dict(*args, **kwargs)
def load_state_dict(self, *args, **kwargs):
self.reset_unets_all_one_device()
return super().load_state_dict(*args, **kwargs)
# gaussian diffusion methods
def p_mean_variance(
self,
unet,
x,
t,
*,
noise_scheduler,
text_embeds = None,
text_mask = None,
cond_images = None,
cond_video_frames = None,
post_cond_video_frames = None,
lowres_cond_img = None,
self_cond = None,
lowres_noise_times = None,
cond_scale = 1.,
model_output = None,
t_next = None,
pred_objective = 'noise',
dynamic_threshold = True
):
assert not (cond_scale != 1. and not self.can_classifier_guidance), 'starlight was not trained with conditional dropout, and thus one cannot use classifier free guidance (cond_scale anything other than 1)'
video_kwargs = dict()
if self.is_video:
video_kwargs = dict(
cond_video_frames = cond_video_frames,
post_cond_video_frames = post_cond_video_frames,
)
pred = default(model_output, lambda: unet.forward_with_cond_scale(
x,
noise_scheduler.get_condition(t),
text_embeds = text_embeds,
text_mask = text_mask,
cond_images = cond_images,
cond_scale = cond_scale,
lowres_cond_img = lowres_cond_img,
self_cond = self_cond,
lowres_noise_times = self.lowres_noise_schedule.get_condition(lowres_noise_times),
**video_kwargs
))
if pred_objective == 'noise':
x_start = noise_scheduler.predict_start_from_noise(x, t = t, noise = pred)
elif pred_objective == 'x_start':
x_start = pred
elif pred_objective == 'v':
x_start = noise_scheduler.predict_start_from_v(x, t = t, v = pred)
else:
raise ValueError(f'unknown objective {pred_objective}')
if dynamic_threshold:
# following pseudocode in appendix
# s is the dynamic threshold, determined by percentile of absolute values of reconstructed sample per batch element
s = torch.quantile(
rearrange(x_start, 'b ... -> b (...)').abs(),
self.dynamic_thresholding_percentile,
dim = -1
)
s.clamp_(min = 1.)
s = right_pad_dims_to(x_start, s)
x_start = x_start.clamp(-s, s) / s
else:
x_start.clamp_(-1., 1.)
mean_and_variance = noise_scheduler.q_posterior(x_start = x_start, x_t = x, t = t, t_next = t_next)
return mean_and_variance, x_start
@torch.no_grad()
def p_sample(
self,
unet,
x,
t,
*,
noise_scheduler,
t_next = None,
text_embeds = None,
text_mask = None,
cond_images = None,
cond_video_frames = None,
post_cond_video_frames = None,
cond_scale = 1.,
self_cond = None,
lowres_cond_img = None,
lowres_noise_times = None,
pred_objective = 'noise',
dynamic_threshold = True
):
b, *_, device = *x.shape, x.device
video_kwargs = dict()
if self.is_video:
video_kwargs = dict(
cond_video_frames = cond_video_frames,
post_cond_video_frames = post_cond_video_frames,
)
(model_mean, _, model_log_variance), x_start = self.p_mean_variance(
unet,
x = x,
t = t,
t_next = t_next,
noise_scheduler = noise_scheduler,
text_embeds = text_embeds,
text_mask = text_mask,
cond_images = cond_images,
cond_scale = cond_scale,
lowres_cond_img = lowres_cond_img,
self_cond = self_cond,
lowres_noise_times = lowres_noise_times,
pred_objective = pred_objective,
dynamic_threshold = dynamic_threshold,
**video_kwargs
)
noise = torch.randn_like(x)
# no noise when t == 0
is_last_sampling_timestep = (t_next == 0) if isinstance(noise_scheduler, GaussianDiffusionContinuousTimes) else (t == 0)
nonzero_mask = (1 - is_last_sampling_timestep.float()).reshape(b, *((1,) * (len(x.shape) - 1)))
pred = model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise
return pred, x_start
@torch.no_grad()
def p_sample_loop(
self,
unet,
shape,
*,
noise_scheduler,
lowres_cond_img = None,
lowres_noise_times = None,
text_embeds = None,
text_mask = None,
cond_images = None,
cond_video_frames = None,
post_cond_video_frames = None,
inpaint_images = None,
inpaint_videos = None,
inpaint_masks = None,
inpaint_resample_times = 5,
init_images = None,
skip_steps = None,
cond_scale = 1,
pred_objective = 'noise',
dynamic_threshold = True,
use_tqdm = True
):
device = self.device
batch = shape[0]
img = torch.randn(shape, device = device)
# video
is_video = len(shape) == 5
frames = shape[-3] if is_video else None
resize_kwargs = dict(target_frames = frames) if exists(frames) else dict()
# for initialization with an image or video
if exists(init_images):
img += init_images
# keep track of x0, for self conditioning
x_start = None
# prepare inpainting
inpaint_images = default(inpaint_videos, inpaint_images)
has_inpainting = exists(inpaint_images) and exists(inpaint_masks)
resample_times = inpaint_resample_times if has_inpainting else 1
if has_inpainting:
inpaint_images = self.normalize_img(inpaint_images)
inpaint_images = self.resize_to(inpaint_images, shape[-1], **resize_kwargs)
inpaint_masks = self.resize_to(rearrange(inpaint_masks, 'b ... -> b 1 ...').float(), shape[-1], **resize_kwargs).bool()
# time
timesteps = noise_scheduler.get_sampling_timesteps(batch, device = device)
# whether to skip any steps
skip_steps = default(skip_steps, 0)
timesteps = timesteps[skip_steps:]
# video conditioning kwargs
video_kwargs = dict()
if self.is_video:
video_kwargs = dict(
cond_video_frames = cond_video_frames,
post_cond_video_frames = post_cond_video_frames,
)
for times, times_next in tqdm(timesteps, desc = 'sampling loop time step', total = len(timesteps), disable = not use_tqdm):
is_last_timestep = times_next == 0
for r in reversed(range(resample_times)):
is_last_resample_step = r == 0
if has_inpainting:
noised_inpaint_images, *_ = noise_scheduler.q_sample(inpaint_images, t = times)
img = img * ~inpaint_masks + noised_inpaint_images * inpaint_masks
self_cond = x_start if unet.self_cond else None
img, x_start = self.p_sample(
unet,
img,
times,
t_next = times_next,
text_embeds = text_embeds,
text_mask = text_mask,
cond_images = cond_images,
cond_scale = cond_scale,
self_cond = self_cond,
lowres_cond_img = lowres_cond_img,
lowres_noise_times = lowres_noise_times,
noise_scheduler = noise_scheduler,
pred_objective = pred_objective,
dynamic_threshold = dynamic_threshold,
**video_kwargs
)
if has_inpainting and not (is_last_resample_step or torch.all(is_last_timestep)):
renoised_img = noise_scheduler.q_sample_from_to(img, times_next, times)
img = torch.where(
self.right_pad_dims_to_datatype(is_last_timestep),
img,
renoised_img
)
img.clamp_(-1., 1.)
# final inpainting
if has_inpainting:
img = img * ~inpaint_masks + inpaint_images * inpaint_masks
unnormalize_img = self.unnormalize_img(img)
return unnormalize_img
@torch.no_grad()
@eval_decorator
@beartype
def sample(
self,
texts: List[str] = None,
text_masks = None,
text_embeds = None,
video_frames = None,
cond_images = None,
cond_video_frames = None,
post_cond_video_frames = None,
inpaint_videos = None,
inpaint_images = None,
inpaint_masks = None,
inpaint_resample_times = 5,
init_images = None,
skip_steps = None,
batch_size = 1,
cond_scale = 1.,
lowres_sample_noise_level = None,
start_at_unet_number = 1,
start_image_or_video = None,
stop_at_unet_number = None,
return_all_unet_outputs = False,
return_pil_images = False,
device = None,
use_tqdm = True,
use_one_unet_in_gpu = True
):
device = default(device, self.device)
self.reset_unets_all_one_device(device = device)
cond_images = maybe(cast_uint8_images_to_float)(cond_images)
if exists(texts) and not exists(text_embeds) and not self.unconditional:
assert all([*map(len, texts)]), 'text cannot be empty'
with autocast(enabled = False):
text_embeds, text_masks = self.encode_text(texts, return_attn_mask = True)
text_embeds, text_masks = map(lambda t: t.to(device), (text_embeds, text_masks))
if not self.unconditional:
assert exists(text_embeds), 'text must be passed in if the network was not trained without text `condition_on_text` must be set to `False` when training'
text_masks = default(text_masks, lambda: torch.any(text_embeds != 0., dim = -1))
batch_size = text_embeds.shape[0]
# inpainting
inpaint_images = default(inpaint_videos, inpaint_images)
if exists(inpaint_images):
if self.unconditional:
if batch_size == 1: # assume researcher wants to broadcast along inpainted images
batch_size = inpaint_images.shape[0]
assert inpaint_images.shape[0] == batch_size, 'number of inpainting images must be equal to the specified batch size on sample `sample(batch_size=<int>)``'
assert not (self.condition_on_text and inpaint_images.shape[0] != text_embeds.shape[0]), 'number of inpainting images must be equal to the number of text to be conditioned on'
assert not (self.condition_on_text and not exists(text_embeds)), 'text or text encodings must be passed into starlight if specified'
assert not (not self.condition_on_text and exists(text_embeds)), 'starlight specified not to be conditioned on text, yet it is presented'
assert not (exists(text_embeds) and text_embeds.shape[-1] != self.text_embed_dim), f'invalid text embedding dimension being passed in (should be {self.text_embed_dim})'
assert not (exists(inpaint_images) ^ exists(inpaint_masks)), 'inpaint images and masks must be both passed in to do inpainting'
outputs = []
is_cuda = next(self.parameters()).is_cuda
device = next(self.parameters()).device
lowres_sample_noise_level = default(lowres_sample_noise_level, self.lowres_sample_noise_level)
num_unets = len(self.unets)
# condition scaling
cond_scale = cast_tuple(cond_scale, num_unets)
# add frame dimension for video
if self.is_video and exists(inpaint_images):
video_frames = inpaint_images.shape[2]
if inpaint_masks.ndim == 3:
inpaint_masks = repeat(inpaint_masks, 'b h w -> b f h w', f = video_frames)
assert inpaint_masks.shape[1] == video_frames
assert not (self.is_video and not exists(video_frames)), 'video_frames must be passed in on sample time if training on video'
all_frame_dims = calc_all_frame_dims(self.temporal_downsample_factor, video_frames)
frames_to_resize_kwargs = lambda frames: dict(target_frames = frames) if exists(frames) else dict()
# for initial image and skipping steps
init_images = cast_tuple(init_images, num_unets)
init_images = [maybe(self.normalize_img)(init_image) for init_image in init_images]
skip_steps = cast_tuple(skip_steps, num_unets)
# handle starting at a unet greater than 1, for training only-upscaler training
if start_at_unet_number > 1:
assert start_at_unet_number <= num_unets, 'must start a unet that is less than the total number of unets'
assert not exists(stop_at_unet_number) or start_at_unet_number <= stop_at_unet_number
assert exists(start_image_or_video), 'starting image or video must be supplied if only doing upscaling'
prev_image_size = self.image_sizes[start_at_unet_number - 2]
prev_frame_size = all_frame_dims[start_at_unet_number - 2][0] if self.is_video else None
img = self.resize_to(start_image_or_video, prev_image_size, **frames_to_resize_kwargs(prev_frame_size))
# go through each unet in cascade
for unet_number, unet, channel, image_size, frame_dims, noise_scheduler, pred_objective, dynamic_threshold, unet_cond_scale, unet_init_images, unet_skip_steps in tqdm(zip(range(1, num_unets + 1), self.unets, self.sample_channels, self.image_sizes, all_frame_dims, self.noise_schedulers, self.pred_objectives, self.dynamic_thresholding, cond_scale, init_images, skip_steps), disable = not use_tqdm):
if unet_number < start_at_unet_number:
continue
assert not isinstance(unet, NullUnet), 'one cannot sample from null / placeholder unets'
context = self.one_unet_in_gpu(unet = unet) if is_cuda and use_one_unet_in_gpu else nullcontext()
with context:
# video kwargs
video_kwargs = dict()
if self.is_video:
video_kwargs = dict(
cond_video_frames = cond_video_frames,
post_cond_video_frames = post_cond_video_frames,
)
video_kwargs = compact(video_kwargs)
if self.is_video and self.resize_cond_video_frames:
downsample_scale = self.temporal_downsample_factor[unet_number - 1]
temporal_downsample_fn = partial(scale_video_time, downsample_scale = downsample_scale)
video_kwargs = maybe_transform_dict_key(video_kwargs, 'cond_video_frames', temporal_downsample_fn)
video_kwargs = maybe_transform_dict_key(video_kwargs, 'post_cond_video_frames', temporal_downsample_fn)
# low resolution conditioning
lowres_cond_img = lowres_noise_times = None
shape = (batch_size, channel, *frame_dims, image_size, image_size)
resize_kwargs = dict(target_frames = frame_dims[0]) if self.is_video else dict()
if unet.lowres_cond:
lowres_noise_times = self.lowres_noise_schedule.get_times(batch_size, lowres_sample_noise_level, device = device)
lowres_cond_img = self.resize_to(img, image_size, **resize_kwargs)
lowres_cond_img = self.normalize_img(lowres_cond_img)
lowres_cond_img, *_ = self.lowres_noise_schedule.q_sample(x_start = lowres_cond_img, t = lowres_noise_times, noise = torch.randn_like(lowres_cond_img))
# init images or video
if exists(unet_init_images):
unet_init_images = self.resize_to(unet_init_images, image_size, **resize_kwargs)
# shape of stage
shape = (batch_size, self.channels, *frame_dims, image_size, image_size)
img = self.p_sample_loop(
unet,
shape,
text_embeds = text_embeds,
text_mask = text_masks,
cond_images = cond_images,
inpaint_images = inpaint_images,
inpaint_masks = inpaint_masks,
inpaint_resample_times = inpaint_resample_times,
init_images = unet_init_images,
skip_steps = unet_skip_steps,
cond_scale = unet_cond_scale,
lowres_cond_img = lowres_cond_img,
lowres_noise_times = lowres_noise_times,
noise_scheduler = noise_scheduler,
pred_objective = pred_objective,
dynamic_threshold = dynamic_threshold,
use_tqdm = use_tqdm,
**video_kwargs
)
outputs.append(img)
if exists(stop_at_unet_number) and stop_at_unet_number == unet_number:
break
output_index = -1 if not return_all_unet_outputs else slice(None) # either return last unet output or all unet outputs
if not return_pil_images:
return outputs[output_index]
if not return_all_unet_outputs:
outputs = outputs[-1:]
assert not self.is_video, 'converting sampled video tensor to video file is not supported yet'
pil_images = list(map(lambda img: list(map(T.ToPILImage(), img.unbind(dim = 0))), outputs))
return pil_images[output_index] # now you have a bunch of pillow images you can just .save(/where/ever/you/want.png)
@beartype
def p_losses(
self,
unet: Union[Unet, Unet3D, NullUnet, DistributedDataParallel],
x_start,
times,
*,
noise_scheduler,
lowres_cond_img = None,
lowres_aug_times = None,
text_embeds = None,
text_mask = None,
cond_images = None,
noise = None,
times_next = None,
pred_objective = 'noise',
min_snr_gamma = None,
random_crop_size = None,
**kwargs
):
is_video = x_start.ndim == 5
noise = default(noise, lambda: torch.randn_like(x_start))
# normalize to [-1, 1]
x_start = self.normalize_img(x_start)
lowres_cond_img = maybe(self.normalize_img)(lowres_cond_img)
# random cropping during training
# for upsamplers
if exists(random_crop_size):
if is_video:
frames = x_start.shape[2]
x_start, lowres_cond_img, noise = map(lambda t: rearrange(t, 'b c f h w -> (b f) c h w'), (x_start, lowres_cond_img, noise))
aug = K.RandomCrop((random_crop_size, random_crop_size), p = 1.)
# make sure low res conditioner and image both get augmented the same way
# detailed https://kornia.readthedocs.io/en/latest/augmentation.module.html?highlight=randomcrop#kornia.augmentation.RandomCrop
x_start = aug(x_start)
lowres_cond_img = aug(lowres_cond_img, params = aug._params)
noise = aug(noise, params = aug._params)
if is_video:
x_start, lowres_cond_img, noise = map(lambda t: rearrange(t, '(b f) c h w -> b c f h w', f = frames), (x_start, lowres_cond_img, noise))
# get x_t
x_noisy, log_snr, alpha, sigma = noise_scheduler.q_sample(x_start = x_start, t = times, noise = noise)
# also noise the lowres conditioning image
# at sample time, they then fix the noise level of 0.1 - 0.3
lowres_cond_img_noisy = None
if exists(lowres_cond_img):
lowres_aug_times = default(lowres_aug_times, times)
lowres_cond_img_noisy, *_ = self.lowres_noise_schedule.q_sample(x_start = lowres_cond_img, t = lowres_aug_times, noise = torch.randn_like(lowres_cond_img))
# time condition
noise_cond = noise_scheduler.get_condition(times)
# unet kwargs
unet_kwargs = dict(
text_embeds = text_embeds,
text_mask = text_mask,
cond_images = cond_images,
lowres_noise_times = self.lowres_noise_schedule.get_condition(lowres_aug_times),
lowres_cond_img = lowres_cond_img_noisy,
cond_drop_prob = self.cond_drop_prob,
**kwargs
)
# self condition if needed
# Because 'unet' can be an instance of DistributedDataParallel coming from the
# StarlightTrainer.unet_being_trained when invoking StarlightTrainer.forward(), we need to
# access the member 'module' of the wrapped unet instance.
self_cond = unet.module.self_cond if isinstance(unet, DistributedDataParallel) else unet.self_cond
if self_cond and random() < 0.5:
with torch.no_grad():
pred = unet.forward(
x_noisy,
noise_cond,
**unet_kwargs
).detach()
x_start = noise_scheduler.predict_start_from_noise(x_noisy, t = times, noise = pred) if pred_objective == 'noise' else pred
unet_kwargs = {**unet_kwargs, 'self_cond': x_start}
# get prediction
pred = unet.forward(
x_noisy,
noise_cond,
**unet_kwargs
)
# prediction objective
if pred_objective == 'noise':
target = noise
elif pred_objective == 'x_start':
target = x_start
elif pred_objective == 'v':
# derivation detailed in Appendix D of Progressive Distillation paper
# https://arxiv.org/abs/2202.00512
# this makes distillation viable as well as solve an issue with color shifting in upresoluting unets, noted in starlight-video
target = alpha * noise - sigma * x_start
else:
raise ValueError(f'unknown objective {pred_objective}')
# losses
losses = self.loss_fn(pred, target, reduction = 'none')
losses = reduce(losses, 'b ... -> b', 'mean')
# min snr loss reweighting
snr = log_snr.exp()
maybe_clipped_snr = snr.clone()
if exists(min_snr_gamma):
maybe_clipped_snr.clamp_(max = min_snr_gamma)
if pred_objective == 'noise':
loss_weight = maybe_clipped_snr / snr
elif pred_objective == 'x_start':
loss_weight = maybe_clipped_snr
elif pred_objective == 'v':
loss_weight = maybe_clipped_snr / (snr + 1)
losses = losses * loss_weight
return losses.mean()
@beartype
def forward(
self,
images, # rename to images or video
unet: Union[Unet, Unet3D, NullUnet, DistributedDataParallel] = None,
texts: List[str] = None,
text_embeds = None,
text_masks = None,
unet_number = None,
cond_images = None,
**kwargs
):
if self.is_video and images.ndim == 4:
images = rearrange(images, 'b c h w -> b c 1 h w')
kwargs.update(ignore_time = True)
assert images.shape[-1] == images.shape[-2], f'the images you pass in must be a square, but received dimensions of {images.shape[2]}, {images.shape[-1]}'
assert not (len(self.unets) > 1 and not exists(unet_number)), f'you must specify which unet you want trained, from a range of 1 to {len(self.unets)}, if you are training cascading DDPM (multiple unets)'
unet_number = default(unet_number, 1)
assert not exists(self.only_train_unet_number) or self.only_train_unet_number == unet_number, 'you can only train on unet #{self.only_train_unet_number}'
images = cast_uint8_images_to_float(images)
cond_images = maybe(cast_uint8_images_to_float)(cond_images)
assert images.dtype == torch.float or images.dtype == torch.half, f'images tensor needs to be floats but {images.dtype} dtype found instead'
unet_index = unet_number - 1
unet = default(unet, lambda: self.get_unet(unet_number))
assert not isinstance(unet, NullUnet), 'null unet cannot and should not be trained'
noise_scheduler = self.noise_schedulers[unet_index]
min_snr_gamma = self.min_snr_gamma[unet_index]
pred_objective = self.pred_objectives[unet_index]
target_image_size = self.image_sizes[unet_index]
random_crop_size = self.random_crop_sizes[unet_index]
prev_image_size = self.image_sizes[unet_index - 1] if unet_index > 0 else None
b, c, *_, h, w, device, is_video = *images.shape, images.device, images.ndim == 5
assert images.shape[1] == self.channels
assert h >= target_image_size and w >= target_image_size
frames = images.shape[2] if is_video else None
all_frame_dims = tuple(safe_get_tuple_index(el, 0) for el in calc_all_frame_dims(self.temporal_downsample_factor, frames))
ignore_time = kwargs.get('ignore_time', False)
target_frame_size = all_frame_dims[unet_index] if is_video and not ignore_time else None
prev_frame_size = all_frame_dims[unet_index - 1] if is_video and not ignore_time and unet_index > 0 else None
frames_to_resize_kwargs = lambda frames: dict(target_frames = frames) if exists(frames) else dict()
times = noise_scheduler.sample_random_times(b, device = device)
if exists(texts) and not exists(text_embeds) and not self.unconditional:
assert all([*map(len, texts)]), 'text cannot be empty'
assert len(texts) == len(images), 'number of text captions does not match up with the number of images given'
with autocast(enabled = False):
text_embeds, text_masks = self.encode_text(texts, return_attn_mask = True)
text_embeds, text_masks = map(lambda t: t.to(images.device), (text_embeds, text_masks))
if not self.unconditional:
text_masks = default(text_masks, lambda: torch.any(text_embeds != 0., dim = -1))
assert not (self.condition_on_text and not exists(text_embeds)), 'text or text encodings must be passed into decoder if specified'
assert not (not self.condition_on_text and exists(text_embeds)), 'decoder specified not to be conditioned on text, yet it is presented'
assert not (exists(text_embeds) and text_embeds.shape[-1] != self.text_embed_dim), f'invalid text embedding dimension being passed in (should be {self.text_embed_dim})'
# handle video frame conditioning
if self.is_video and self.resize_cond_video_frames:
downsample_scale = self.temporal_downsample_factor[unet_index]
temporal_downsample_fn = partial(scale_video_time, downsample_scale = downsample_scale)
kwargs = maybe_transform_dict_key(kwargs, 'cond_video_frames', temporal_downsample_fn)
kwargs = maybe_transform_dict_key(kwargs, 'post_cond_video_frames', temporal_downsample_fn)
# handle low resolution conditioning
lowres_cond_img = lowres_aug_times = None
if exists(prev_image_size):
lowres_cond_img = self.resize_to(images, prev_image_size, **frames_to_resize_kwargs(prev_frame_size), clamp_range = self.input_image_range)
lowres_cond_img = self.resize_to(lowres_cond_img, target_image_size, **frames_to_resize_kwargs(target_frame_size), clamp_range = self.input_image_range)
if self.per_sample_random_aug_noise_level:
lowres_aug_times = self.lowres_noise_schedule.sample_random_times(b, device = device)
else:
lowres_aug_time = self.lowres_noise_schedule.sample_random_times(1, device = device)
lowres_aug_times = repeat(lowres_aug_time, '1 -> b', b = b)
images = self.resize_to(images, target_image_size, **frames_to_resize_kwargs(target_frame_size))
return self.p_losses(unet, images, times, text_embeds = text_embeds, text_mask = text_masks, cond_images = cond_images, noise_scheduler = noise_scheduler, lowres_cond_img = lowres_cond_img, lowres_aug_times = lowres_aug_times, pred_objective = pred_objective, min_snr_gamma = min_snr_gamma, random_crop_size = random_crop_size, **kwargs) | StarlightVision-master | starlight_vision/core/gen2.py |
import torch
import torch.nn as nn
from torchvision.transforms import Compose, Resize, Normalize, ToTensor
from torch.utils.data import DataLoader
from transformers import DiffusionModel, ClipModel, DiffusionConfig, DPTImageProcessor, DPTForDepthEstimation
from torchvision.transforms import GaussianBlur
import torch.nn.functional as F
import math
#spatial transformer
class SpatialTransformer(nn.Module):
def __init__(self, in_channels, num_heads):
super(SpatialTransformer, self).__init__()
self.in_channels = in_channels
self.num_heads = num_heads
self.key_proj = nn.Linear(in_channels, in_channels)
self.value_proj = nn.Linear(in_channels, in_channels)
self.query_proj = nn.Linear(in_channels, in_channels)
self.softmax = nn.Softmax(dim=-1)
self.output_proj = nn.Linear(in_channels, in_channels)
def forward(self, x, content_embeddings):
#compute keys and values from content embedding
keys = self.key_proj(content_embeddings)
values = self.value_proj(content_embeddings)
#compute queries from input
queries = self.query_proj(x)
#conpute attention scores
attention_scores = torch.matmul(queries, keys.tranpose(-1. -2)) / (self.in_channels ** 0.5)
attention_scores = self.softmax(attention_scores)
#compute attented scores
attented_values = torch.matmul(attention_scores, values)
#add residual connection and apply output projection
out = x + attented_values
out = self.output_proj(out)
return out
class Midas(nn.Module):
def __init__(self):
super(Midas, self).__init__()
self.processor = DPTImageProcessor.from_pretrained("Intel/dpt-large")
self.model = DPTForDepthEstimation.from_pretrained("Intel/dpt-large")
def forward(self, x):
#prepare images for the model
inputs = self.processor(images=x, return_tensors="pt")
with torch.no_grad():
outputs = self.model(**inputs)
predicted_depth = outputs.predicted_depth
#interplate to original size
prediction = torch.nn.function.interpolate(
predicted_depth.unsqueeze(1),
size=x.shape[-2:],
mode="bicubic",
align_corners=False,
)
return prediction.squeeze()
#sinusodial embeddings
class SinusoidalEmbedding(nn.Module):
def __init__(self, num_channels):
super(SinusoidalEmbedding, self).__init__()
self.num_channels = num_channels
def forward(self, ts, T):
ts = torch.Tensor([ts])
T = torch.Tensor([T])
ts_embed = torch.zeros(self.num_channels // 2)
div_term = torch.exp(torch.arange(0, self.num_channels // 2, 2) * -(math.log(10000.0) / self.num_channels))
ts_embed[0::2] = torch.sin(ts * div_term)
ts_embed[1::2] = torch.cos(ts * div_term)
return ts_embed
#unet
class ResidualBlock(nn.Module):
def __init__(self, in_channels, out_channels, temporal=False):
super(ResidualBlock, self).__init__()
self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1)
self.bn1 = nn.BatchNorm2d(out_channels)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1)
self.bn2 = nn.BatchNorm2d(out_channels)
self.temporal = temporal
if temporal:
self.temporal_conv = nn.Conv1d(out_channels, out_channels, kernel_size=3, padding=1)
self.bn_temporal = nn.BatchNorm1d(out_channels)
def forward(self, x):
residual = x
out = self.relu(self.bn1(self.conv1(x)))
if self.temporal:
out = self.bn_temporal(self.temporal_conv(out))
out = self.bn2(self.conv2(out))
out += residual
out = self.relu(out)
return out
class TransformerBlock(nn.Module):
def __init__(self, in_channels, temporal=False):
super(TransformerBlock, self).__init__()
self.spatial_transformer = nn.TransformerEncoderLayr(d_model=in_channels, nhead=8)
self.temporal = temporal
if temporal:
self.temporal_transformer = nn.TransformerEncoderLayer(d_model=in_channels, nhead=8)
def forward(self, x):
out = self.spatial_transformer(x)
if self.temporal:
out = self.temporal_transformer(out)
return out
class Unet(nn.Module):
def __init__(self, in_channels, out_channels):
#encoder =
self.encoder = ...
#decoder
self.decoder = ...
#build the umnet architecture using residual blocks and transformer
self.layers = nn.Sequential(
ResidualBlock(in_channels, 64, temporal=False),
TransformerBlock(64, temporal=False)
)
def forward(self, x):
#pass input through the encoder
z = self.encoder(x)
#pass the latent representations through the unet layers
z = self.layers(z)
#pass output to decoder
out = self.decoder(z)
return out
#extended unet
class ExtendedUNet(Unet):
def __init__(self, in_channels, out_channels, blur_kernel_size=3, blur_sigma=1.0):
super(ExtendedUNet, self).__init__(in_channels, out_channels)
self.midas_dpt_large = Midas() # Load the MiDaS DPT-Large model
self.clip_model = ... # Load the CLIP model
self.blur = GaussianBlur(blur_kernel_size, sigma=blur_sigma)
def process_structure(self, x, ts, Ts):
depth_maps = self.midas_dpt_large(x)
for _ in range(ts):
depth_maps = self.blur(depth_maps)
depth_maps = F.interpolate(depth_maps, scale_factor=0.5)
depth_maps = F.interpolate(depth_maps, size=x.shape[-2:])
z_structure = self.encoder(depth_maps)
return z_structure
def process_content(self, x):
content_repr = self.clip_model.encode_image(x)
return content_repr
def sample(self, x, t, c, ts, guidance_scale=1.0, temporal_scale=1.0):
zt = self.encoder(x)
z_structure = self.process_structure(x, ts, Ts)
zt = torch.cat([zt, z_structure], dim=1)
content_repr = self.process_content(c)
# Apply the spatial transformer and cross-attention conditioning
out = self.layers(zt, content_repr)
# Compute the adjusted predictions
unconditional_pred = self.decoder(out)
conditional_pred = self.decoder(out)
adjusted_pred = unconditional_pred + guidance_scale * (conditional_pred - unconditional_pred)
# Control temporal consistency
image_model_pred = ... # Compute the prediction of the image model applied to each frame individually
adjusted_pred = image_model_pred + temporal_scale * (adjusted_pred - image_model_pred)
return adjusted_pred
class Starlight(nn.Module):
def __init__(self, Ts):
super(Starlight, self).__init__()
# self.midas = Midas.from_pretrained("midas/dpt_large")
# self.clip_model = ClipModel.from_pretrained("openai/clip-vit-base-patch32")
# self.spatial = SpatialTransformer()
# self.extended_unet = ExtendedUNet(in_channels=3, out_channels=3)
# self.clip_model = ClipModel.from_pretrained("openai/clip-vit-base-patch32")
self.unet = ExtendedUNet()
self.sinusoidal_embedding = SinusoidalEmbedding(4)
self.Ts = Ts
def forward(self, x, s, c, ts):
# #compute content representation
# content_embedding = self.clip_model.encode_image(x)
content_embedding = self.unet.process_content(x)
# #compute structure representation
depth_maps = self.unet.midas_dpt_large(x)
# structure_embedding = self.encode_structure(depth_maps)
structure_embedding = self.process_structure(depth_maps, ts)
zt = torch.cat([x, structure_embedding], dim=1)
#add sinusoidal embedding of ts
ts_embed = self.sinusoidal_embedding(ts, self.Ts).unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
ts_embed = ts_embed.repeat(x.size(0), 1, x.size(2), x.size(3))
zt = torch.cat([zt, ts_embed], dim=1)
#apply cross attention for content condition
cross_attended_input = self.unet.spatial_transformer(zt, content_embedding)
#apply unet to predict means e
means = self.unet(cross_attended_input)
return means
def process_structure(self, depth_maps, ts):
for _ in range(ts):
depth_maps = self.unet.blur(depth_maps)
depth_maps = F.interpolate(depth_maps, scale_factor=0.5)
depth_maps = F.interpolate(depth_maps, size=depth_maps.shape[-2:])
z_structure = self.unet.encoder(depth_maps)
return z_structure
#init starlight model
model = Starlight()
#d | StarlightVision-master | starlight_vision/core/starlight.py |
import math
import copy
import operator
import functools
from typing import List
from tqdm.auto import tqdm
from functools import partial, wraps
from contextlib import contextmanager, nullcontext
from collections import namedtuple
from pathlib import Path
import torch
import torch.nn.functional as F
from torch import nn, einsum
from einops import rearrange, repeat, reduce, pack, unpack
from einops.layers.torch import Rearrange, Reduce
from einops_exts.torch import EinopsToAndFrom
from starlight_vision.t5 import t5_encode_text, get_encoded_dim, DEFAULT_T5_NAME
# helper functions
def exists(val):
return val is not None
def identity(t, *args, **kwargs):
return t
def first(arr, d = None):
if len(arr) == 0:
return d
return arr[0]
def divisible_by(numer, denom):
return (numer % denom) == 0
def maybe(fn):
@wraps(fn)
def inner(x):
if not exists(x):
return x
return fn(x)
return inner
def once(fn):
called = False
@wraps(fn)
def inner(x):
nonlocal called
if called:
return
called = True
return fn(x)
return inner
print_once = once(print)
def default(val, d):
if exists(val):
return val
return d() if callable(d) else d
def cast_tuple(val, length = None):
if isinstance(val, list):
val = tuple(val)
output = val if isinstance(val, tuple) else ((val,) * default(length, 1))
if exists(length):
assert len(output) == length
return output
def cast_uint8_images_to_float(images):
if not images.dtype == torch.uint8:
return images
return images / 255
def module_device(module):
return next(module.parameters()).device
def zero_init_(m):
nn.init.zeros_(m.weight)
if exists(m.bias):
nn.init.zeros_(m.bias)
def eval_decorator(fn):
def inner(model, *args, **kwargs):
was_training = model.training
model.eval()
out = fn(model, *args, **kwargs)
model.train(was_training)
return out
return inner
def pad_tuple_to_length(t, length, fillvalue = None):
remain_length = length - len(t)
if remain_length <= 0:
return t
return (*t, *((fillvalue,) * remain_length))
# helper classes
class Identity(nn.Module):
def __init__(self, *args, **kwargs):
super().__init__()
def forward(self, x, *args, **kwargs):
return x
def Sequential(*modules):
return nn.Sequential(*filter(exists, modules))
# tensor helpers
def log(t, eps: float = 1e-12):
return torch.log(t.clamp(min = eps))
def l2norm(t):
return F.normalize(t, dim = -1)
def right_pad_dims_to(x, t):
padding_dims = x.ndim - t.ndim
if padding_dims <= 0:
return t
return t.view(*t.shape, *((1,) * padding_dims))
def masked_mean(t, *, dim, mask = None):
if not exists(mask):
return t.mean(dim = dim)
denom = mask.sum(dim = dim, keepdim = True)
mask = rearrange(mask, 'b n -> b n 1')
masked_t = t.masked_fill(~mask, 0.)
return masked_t.sum(dim = dim) / denom.clamp(min = 1e-5)
def resize_video_to(
video,
target_image_size,
target_frames = None,
clamp_range = None,
mode = 'nearest'
):
orig_video_size = video.shape[-1]
frames = video.shape[2]
target_frames = default(target_frames, frames)
target_shape = (target_frames, target_image_size, target_image_size)
if tuple(video.shape[-3:]) == target_shape:
return video
out = F.interpolate(video, target_shape, mode = mode)
if exists(clamp_range):
out = out.clamp(*clamp_range)
return out
def scale_video_time(
video,
downsample_scale = 1,
mode = 'nearest'
):
if downsample_scale == 1:
return video
image_size, frames = video.shape[-1], video.shape[-3]
assert divisible_by(frames, downsample_scale), f'trying to temporally downsample a conditioning video frames of length {frames} by {downsample_scale}, however it is not neatly divisible'
target_frames = frames // downsample_scale
resized_video = resize_video_to(
video,
image_size,
target_frames = target_frames,
mode = mode
)
return resized_video
# classifier free guidance functions
def prob_mask_like(shape, prob, device):
if prob == 1:
return torch.ones(shape, device = device, dtype = torch.bool)
elif prob == 0:
return torch.zeros(shape, device = device, dtype = torch.bool)
else:
return torch.zeros(shape, device = device).float().uniform_(0, 1) < prob
# norms and residuals
class LayerNorm(nn.Module):
def __init__(self, dim, stable = False):
super().__init__()
self.stable = stable
self.g = nn.Parameter(torch.ones(dim))
def forward(self, x):
if self.stable:
x = x / x.amax(dim = -1, keepdim = True).detach()
eps = 1e-5 if x.dtype == torch.float32 else 1e-3
var = torch.var(x, dim = -1, unbiased = False, keepdim = True)
mean = torch.mean(x, dim = -1, keepdim = True)
return (x - mean) * (var + eps).rsqrt() * self.g
class ChanLayerNorm(nn.Module):
def __init__(self, dim, stable = False):
super().__init__()
self.stable = stable
self.g = nn.Parameter(torch.ones(1, dim, 1, 1, 1))
def forward(self, x):
if self.stable:
x = x / x.amax(dim = 1, keepdim = True).detach()
eps = 1e-5 if x.dtype == torch.float32 else 1e-3
var = torch.var(x, dim = 1, unbiased = False, keepdim = True)
mean = torch.mean(x, dim = 1, keepdim = True)
return (x - mean) * (var + eps).rsqrt() * self.g
class Always():
def __init__(self, val):
self.val = val
def __call__(self, *args, **kwargs):
return self.val
class Residual(nn.Module):
def __init__(self, fn):
super().__init__()
self.fn = fn
def forward(self, x, **kwargs):
return self.fn(x, **kwargs) + x
class Parallel(nn.Module):
def __init__(self, *fns):
super().__init__()
self.fns = nn.ModuleList(fns)
def forward(self, x):
outputs = [fn(x) for fn in self.fns]
return sum(outputs)
# rearranging
class RearrangeTimeCentric(nn.Module):
def __init__(self, fn):
super().__init__()
self.fn = fn
def forward(self, x):
x = rearrange(x, 'b c f ... -> b ... f c')
x, ps = pack([x], '* f c')
x = self.fn(x)
x, = unpack(x, ps, '* f c')
x = rearrange(x, 'b ... f c -> b c f ...')
return x
# attention pooling
class PerceiverAttention(nn.Module):
def __init__(
self,
*,
dim,
dim_head = 64,
heads = 8,
scale = 8
):
super().__init__()
self.scale = scale
self.heads = heads
inner_dim = dim_head * heads
self.norm = nn.LayerNorm(dim)
self.norm_latents = nn.LayerNorm(dim)
self.to_q = nn.Linear(dim, inner_dim, bias = False)
self.to_kv = nn.Linear(dim, inner_dim * 2, bias = False)
self.q_scale = nn.Parameter(torch.ones(dim_head))
self.k_scale = nn.Parameter(torch.ones(dim_head))
self.to_out = nn.Sequential(
nn.Linear(inner_dim, dim, bias = False),
nn.LayerNorm(dim)
)
def forward(self, x, latents, mask = None):
x = self.norm(x)
latents = self.norm_latents(latents)
b, h = x.shape[0], self.heads
q = self.to_q(latents)
# the paper differs from Perceiver in which they also concat the key / values derived from the latents to be attended to
kv_input = torch.cat((x, latents), dim = -2)
k, v = self.to_kv(kv_input).chunk(2, dim = -1)
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = h), (q, k, v))
# qk rmsnorm
q, k = map(l2norm, (q, k))
q = q * self.q_scale
k = k * self.k_scale
# similarities and masking
sim = einsum('... i d, ... j d -> ... i j', q, k) * self.scale
if exists(mask):
max_neg_value = -torch.finfo(sim.dtype).max
mask = F.pad(mask, (0, latents.shape[-2]), value = True)
mask = rearrange(mask, 'b j -> b 1 1 j')
sim = sim.masked_fill(~mask, max_neg_value)
# attention
attn = sim.softmax(dim = -1)
out = einsum('... i j, ... j d -> ... i d', attn, v)
out = rearrange(out, 'b h n d -> b n (h d)', h = h)
return self.to_out(out)
class PerceiverResampler(nn.Module):
def __init__(
self,
*,
dim,
depth,
dim_head = 64,
heads = 8,
num_latents = 64,
num_latents_mean_pooled = 4, # number of latents derived from mean pooled representation of the sequence
max_seq_len = 512,
ff_mult = 4
):
super().__init__()
self.pos_emb = nn.Embedding(max_seq_len, dim)
self.latents = nn.Parameter(torch.randn(num_latents, dim))
self.to_latents_from_mean_pooled_seq = None
if num_latents_mean_pooled > 0:
self.to_latents_from_mean_pooled_seq = nn.Sequential(
LayerNorm(dim),
nn.Linear(dim, dim * num_latents_mean_pooled),
Rearrange('b (n d) -> b n d', n = num_latents_mean_pooled)
)
self.layers = nn.ModuleList([])
for _ in range(depth):
self.layers.append(nn.ModuleList([
PerceiverAttention(dim = dim, dim_head = dim_head, heads = heads),
FeedForward(dim = dim, mult = ff_mult)
]))
def forward(self, x, mask = None):
n, device = x.shape[1], x.device
pos_emb = self.pos_emb(torch.arange(n, device = device))
x_with_pos = x + pos_emb
latents = repeat(self.latents, 'n d -> b n d', b = x.shape[0])
if exists(self.to_latents_from_mean_pooled_seq):
meanpooled_seq = masked_mean(x, dim = 1, mask = torch.ones(x.shape[:2], device = x.device, dtype = torch.bool))
meanpooled_latents = self.to_latents_from_mean_pooled_seq(meanpooled_seq)
latents = torch.cat((meanpooled_latents, latents), dim = -2)
for attn, ff in self.layers:
latents = attn(x_with_pos, latents, mask = mask) + latents
latents = ff(latents) + latents
return latents
# main contribution from make-a-video - pseudo conv3d
# axial space-time convolutions, but made causal to keep in line with the design decisions of starlight-video paper
class Conv3d(nn.Module):
def __init__(
self,
dim,
dim_out = None,
kernel_size = 3,
*,
temporal_kernel_size = None,
**kwargs
):
super().__init__()
dim_out = default(dim_out, dim)
temporal_kernel_size = default(temporal_kernel_size, kernel_size)
self.spatial_conv = nn.Conv2d(dim, dim_out, kernel_size = kernel_size, padding = kernel_size // 2)
self.temporal_conv = nn.Conv1d(dim_out, dim_out, kernel_size = temporal_kernel_size) if kernel_size > 1 else None
self.kernel_size = kernel_size
if exists(self.temporal_conv):
nn.init.dirac_(self.temporal_conv.weight.data) # initialized to be identity
nn.init.zeros_(self.temporal_conv.bias.data)
def forward(
self,
x,
ignore_time = False
):
b, c, *_, h, w = x.shape
is_video = x.ndim == 5
ignore_time &= is_video
if is_video:
x = rearrange(x, 'b c f h w -> (b f) c h w')
x = self.spatial_conv(x)
if is_video:
x = rearrange(x, '(b f) c h w -> b c f h w', b = b)
if ignore_time or not exists(self.temporal_conv):
return x
x = rearrange(x, 'b c f h w -> (b h w) c f')
# causal temporal convolution - time is causal in starlight-video
if self.kernel_size > 1:
x = F.pad(x, (self.kernel_size - 1, 0))
x = self.temporal_conv(x)
x = rearrange(x, '(b h w) c f -> b c f h w', h = h, w = w)
return x
# attention
class Attention(nn.Module):
def __init__(
self,
dim,
*,
dim_head = 64,
heads = 8,
causal = False,
context_dim = None,
rel_pos_bias = False,
rel_pos_bias_mlp_depth = 2,
init_zero = False,
scale = 8
):
super().__init__()
self.scale = scale
self.causal = causal
self.rel_pos_bias = DynamicPositionBias(dim = dim, heads = heads, depth = rel_pos_bias_mlp_depth) if rel_pos_bias else None
self.heads = heads
inner_dim = dim_head * heads
self.norm = LayerNorm(dim)
self.null_attn_bias = nn.Parameter(torch.randn(heads))
self.null_kv = nn.Parameter(torch.randn(2, dim_head))
self.to_q = nn.Linear(dim, inner_dim, bias = False)
self.to_kv = nn.Linear(dim, dim_head * 2, bias = False)
self.q_scale = nn.Parameter(torch.ones(dim_head))
self.k_scale = nn.Parameter(torch.ones(dim_head))
self.to_context = nn.Sequential(nn.LayerNorm(context_dim), nn.Linear(context_dim, dim_head * 2)) if exists(context_dim) else None
self.to_out = nn.Sequential(
nn.Linear(inner_dim, dim, bias = False),
LayerNorm(dim)
)
if init_zero:
nn.init.zeros_(self.to_out[-1].g)
def forward(
self,
x,
context = None,
mask = None,
attn_bias = None
):
b, n, device = *x.shape[:2], x.device
x = self.norm(x)
q, k, v = (self.to_q(x), *self.to_kv(x).chunk(2, dim = -1))
q = rearrange(q, 'b n (h d) -> b h n d', h = self.heads)
# add null key / value for classifier free guidance in prior net
nk, nv = map(lambda t: repeat(t, 'd -> b 1 d', b = b), self.null_kv.unbind(dim = -2))
k = torch.cat((nk, k), dim = -2)
v = torch.cat((nv, v), dim = -2)
# add text conditioning, if present
if exists(context):
assert exists(self.to_context)
ck, cv = self.to_context(context).chunk(2, dim = -1)
k = torch.cat((ck, k), dim = -2)
v = torch.cat((cv, v), dim = -2)
# qk rmsnorm
q, k = map(l2norm, (q, k))
q = q * self.q_scale
k = k * self.k_scale
# calculate query / key similarities
sim = einsum('b h i d, b j d -> b h i j', q, k) * self.scale
# relative positional encoding (T5 style)
if not exists(attn_bias) and exists(self.rel_pos_bias):
attn_bias = self.rel_pos_bias(n, device = device, dtype = q.dtype)
if exists(attn_bias):
null_attn_bias = repeat(self.null_attn_bias, 'h -> h n 1', n = n)
attn_bias = torch.cat((null_attn_bias, attn_bias), dim = -1)
sim = sim + attn_bias
# masking
max_neg_value = -torch.finfo(sim.dtype).max
if self.causal:
i, j = sim.shape[-2:]
causal_mask = torch.ones((i, j), device = device, dtype = torch.bool).triu(j - i + 1)
sim = sim.masked_fill(causal_mask, max_neg_value)
if exists(mask):
mask = F.pad(mask, (1, 0), value = True)
mask = rearrange(mask, 'b j -> b 1 1 j')
sim = sim.masked_fill(~mask, max_neg_value)
# attention
attn = sim.softmax(dim = -1)
# aggregate values
out = einsum('b h i j, b j d -> b h i d', attn, v)
out = rearrange(out, 'b h n d -> b n (h d)')
return self.to_out(out)
# pseudo conv2d that uses conv3d but with kernel size of 1 across frames dimension
def Conv2d(dim_in, dim_out, kernel, stride = 1, padding = 0, **kwargs):
kernel = cast_tuple(kernel, 2)
stride = cast_tuple(stride, 2)
padding = cast_tuple(padding, 2)
if len(kernel) == 2:
kernel = (1, *kernel)
if len(stride) == 2:
stride = (1, *stride)
if len(padding) == 2:
padding = (0, *padding)
return nn.Conv3d(dim_in, dim_out, kernel, stride = stride, padding = padding, **kwargs)
class Pad(nn.Module):
def __init__(self, padding, value = 0.):
super().__init__()
self.padding = padding
self.value = value
def forward(self, x):
return F.pad(x, self.padding, value = self.value)
# decoder
def Upsample(dim, dim_out = None):
dim_out = default(dim_out, dim)
return nn.Sequential(
nn.Upsample(scale_factor = 2, mode = 'nearest'),
Conv2d(dim, dim_out, 3, padding = 1)
)
class PixelShuffleUpsample(nn.Module):
def __init__(self, dim, dim_out = None):
super().__init__()
dim_out = default(dim_out, dim)
conv = Conv2d(dim, dim_out * 4, 1)
self.net = nn.Sequential(
conv,
nn.SiLU()
)
self.pixel_shuffle = nn.PixelShuffle(2)
self.init_conv_(conv)
def init_conv_(self, conv):
o, i, f, h, w = conv.weight.shape
conv_weight = torch.empty(o // 4, i, f, h, w)
nn.init.kaiming_uniform_(conv_weight)
conv_weight = repeat(conv_weight, 'o ... -> (o 4) ...')
conv.weight.data.copy_(conv_weight)
nn.init.zeros_(conv.bias.data)
def forward(self, x):
out = self.net(x)
frames = x.shape[2]
out = rearrange(out, 'b c f h w -> (b f) c h w')
out = self.pixel_shuffle(out)
return rearrange(out, '(b f) c h w -> b c f h w', f = frames)
def Downsample(dim, dim_out = None):
dim_out = default(dim_out, dim)
return nn.Sequential(
Rearrange('b c f (h p1) (w p2) -> b (c p1 p2) f h w', p1 = 2, p2 = 2),
Conv2d(dim * 4, dim_out, 1)
)
# temporal up and downsamples
class TemporalPixelShuffleUpsample(nn.Module):
def __init__(self, dim, dim_out = None, stride = 2):
super().__init__()
self.stride = stride
dim_out = default(dim_out, dim)
conv = nn.Conv1d(dim, dim_out * stride, 1)
self.net = nn.Sequential(
conv,
nn.SiLU()
)
self.pixel_shuffle = Rearrange('b (c r) n -> b c (n r)', r = stride)
self.init_conv_(conv)
def init_conv_(self, conv):
o, i, f = conv.weight.shape
conv_weight = torch.empty(o // self.stride, i, f)
nn.init.kaiming_uniform_(conv_weight)
conv_weight = repeat(conv_weight, 'o ... -> (o r) ...', r = self.stride)
conv.weight.data.copy_(conv_weight)
nn.init.zeros_(conv.bias.data)
def forward(self, x):
b, c, f, h, w = x.shape
x = rearrange(x, 'b c f h w -> (b h w) c f')
out = self.net(x)
out = self.pixel_shuffle(out)
return rearrange(out, '(b h w) c f -> b c f h w', h = h, w = w)
def TemporalDownsample(dim, dim_out = None, stride = 2):
dim_out = default(dim_out, dim)
return nn.Sequential(
Rearrange('b c (f p) h w -> b (c p) f h w', p = stride),
Conv2d(dim * stride, dim_out, 1)
)
# positional embedding
class SinusoidalPosEmb(nn.Module):
def __init__(self, dim):
super().__init__()
self.dim = dim
def forward(self, x):
half_dim = self.dim // 2
emb = math.log(10000) / (half_dim - 1)
emb = torch.exp(torch.arange(half_dim, device = x.device) * -emb)
emb = rearrange(x, 'i -> i 1') * rearrange(emb, 'j -> 1 j')
return torch.cat((emb.sin(), emb.cos()), dim = -1)
class LearnedSinusoidalPosEmb(nn.Module):
def __init__(self, dim):
super().__init__()
assert (dim % 2) == 0
half_dim = dim // 2
self.weights = nn.Parameter(torch.randn(half_dim))
def forward(self, x):
x = rearrange(x, 'b -> b 1')
freqs = x * rearrange(self.weights, 'd -> 1 d') * 2 * math.pi
fouriered = torch.cat((freqs.sin(), freqs.cos()), dim = -1)
fouriered = torch.cat((x, fouriered), dim = -1)
return fouriered
class Block(nn.Module):
def __init__(
self,
dim,
dim_out,
groups = 8,
norm = True
):
super().__init__()
self.groupnorm = nn.GroupNorm(groups, dim) if norm else Identity()
self.activation = nn.SiLU()
self.project = Conv3d(dim, dim_out, 3, padding = 1)
def forward(
self,
x,
scale_shift = None,
ignore_time = False
):
x = self.groupnorm(x)
if exists(scale_shift):
scale, shift = scale_shift
x = x * (scale + 1) + shift
x = self.activation(x)
return self.project(x, ignore_time = ignore_time)
class ResnetBlock(nn.Module):
def __init__(
self,
dim,
dim_out,
*,
cond_dim = None,
time_cond_dim = None,
groups = 8,
linear_attn = False,
use_gca = False,
squeeze_excite = False,
**attn_kwargs
):
super().__init__()
self.time_mlp = None
if exists(time_cond_dim):
self.time_mlp = nn.Sequential(
nn.SiLU(),
nn.Linear(time_cond_dim, dim_out * 2)
)
self.cross_attn = None
if exists(cond_dim):
attn_klass = CrossAttention if not linear_attn else LinearCrossAttention
self.cross_attn = attn_klass(
dim = dim_out,
context_dim = cond_dim,
**attn_kwargs
)
self.block1 = Block(dim, dim_out, groups = groups)
self.block2 = Block(dim_out, dim_out, groups = groups)
self.gca = GlobalContext(dim_in = dim_out, dim_out = dim_out) if use_gca else Always(1)
self.res_conv = Conv2d(dim, dim_out, 1) if dim != dim_out else Identity()
def forward(
self,
x,
time_emb = None,
cond = None,
ignore_time = False
):
scale_shift = None
if exists(self.time_mlp) and exists(time_emb):
time_emb = self.time_mlp(time_emb)
time_emb = rearrange(time_emb, 'b c -> b c 1 1 1')
scale_shift = time_emb.chunk(2, dim = 1)
h = self.block1(x, ignore_time = ignore_time)
if exists(self.cross_attn):
assert exists(cond)
h = rearrange(h, 'b c ... -> b ... c')
h, ps = pack([h], 'b * c')
h = self.cross_attn(h, context = cond) + h
h, = unpack(h, ps, 'b * c')
h = rearrange(h, 'b ... c -> b c ...')
h = self.block2(h, scale_shift = scale_shift, ignore_time = ignore_time)
h = h * self.gca(h)
return h + self.res_conv(x)
class CrossAttention(nn.Module):
def __init__(
self,
dim,
*,
context_dim = None,
dim_head = 64,
heads = 8,
norm_context = False,
scale = 8
):
super().__init__()
self.scale = scale
self.heads = heads
inner_dim = dim_head * heads
context_dim = default(context_dim, dim)
self.norm = LayerNorm(dim)
self.norm_context = LayerNorm(context_dim) if norm_context else Identity()
self.null_kv = nn.Parameter(torch.randn(2, dim_head))
self.to_q = nn.Linear(dim, inner_dim, bias = False)
self.to_kv = nn.Linear(context_dim, inner_dim * 2, bias = False)
self.q_scale = nn.Parameter(torch.ones(dim_head))
self.k_scale = nn.Parameter(torch.ones(dim_head))
self.to_out = nn.Sequential(
nn.Linear(inner_dim, dim, bias = False),
LayerNorm(dim)
)
def forward(self, x, context, mask = None):
b, n, device = *x.shape[:2], x.device
x = self.norm(x)
context = self.norm_context(context)
q, k, v = (self.to_q(x), *self.to_kv(context).chunk(2, dim = -1))
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = self.heads), (q, k, v))
# add null key / value for classifier free guidance in prior net
nk, nv = map(lambda t: repeat(t, 'd -> b h 1 d', h = self.heads, b = b), self.null_kv.unbind(dim = -2))
k = torch.cat((nk, k), dim = -2)
v = torch.cat((nv, v), dim = -2)
# qk rmsnorm
q, k = map(l2norm, (q, k))
q = q * self.q_scale
k = k * self.k_scale
# similarities
sim = einsum('b h i d, b h j d -> b h i j', q, k) * self.scale
# masking
max_neg_value = -torch.finfo(sim.dtype).max
if exists(mask):
mask = F.pad(mask, (1, 0), value = True)
mask = rearrange(mask, 'b j -> b 1 1 j')
sim = sim.masked_fill(~mask, max_neg_value)
attn = sim.softmax(dim = -1, dtype = torch.float32)
out = einsum('b h i j, b h j d -> b h i d', attn, v)
out = rearrange(out, 'b h n d -> b n (h d)')
return self.to_out(out)
class LinearCrossAttention(CrossAttention):
def forward(self, x, context, mask = None):
b, n, device = *x.shape[:2], x.device
x = self.norm(x)
context = self.norm_context(context)
q, k, v = (self.to_q(x), *self.to_kv(context).chunk(2, dim = -1))
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h = self.heads), (q, k, v))
# add null key / value for classifier free guidance in prior net
nk, nv = map(lambda t: repeat(t, 'd -> (b h) 1 d', h = self.heads, b = b), self.null_kv.unbind(dim = -2))
k = torch.cat((nk, k), dim = -2)
v = torch.cat((nv, v), dim = -2)
# masking
max_neg_value = -torch.finfo(x.dtype).max
if exists(mask):
mask = F.pad(mask, (1, 0), value = True)
mask = rearrange(mask, 'b n -> b n 1')
k = k.masked_fill(~mask, max_neg_value)
v = v.masked_fill(~mask, 0.)
# linear attention
q = q.softmax(dim = -1)
k = k.softmax(dim = -2)
q = q * self.scale
context = einsum('b n d, b n e -> b d e', k, v)
out = einsum('b n d, b d e -> b n e', q, context)
out = rearrange(out, '(b h) n d -> b n (h d)', h = self.heads)
return self.to_out(out)
class LinearAttention(nn.Module):
def __init__(
self,
dim,
dim_head = 32,
heads = 8,
dropout = 0.05,
context_dim = None,
**kwargs
):
super().__init__()
self.scale = dim_head ** -0.5
self.heads = heads
inner_dim = dim_head * heads
self.norm = ChanLayerNorm(dim)
self.nonlin = nn.SiLU()
self.to_q = nn.Sequential(
nn.Dropout(dropout),
Conv2d(dim, inner_dim, 1, bias = False),
Conv2d(inner_dim, inner_dim, 3, bias = False, padding = 1, groups = inner_dim)
)
self.to_k = nn.Sequential(
nn.Dropout(dropout),
Conv2d(dim, inner_dim, 1, bias = False),
Conv2d(inner_dim, inner_dim, 3, bias = False, padding = 1, groups = inner_dim)
)
self.to_v = nn.Sequential(
nn.Dropout(dropout),
Conv2d(dim, inner_dim, 1, bias = False),
Conv2d(inner_dim, inner_dim, 3, bias = False, padding = 1, groups = inner_dim)
)
self.to_context = nn.Sequential(nn.LayerNorm(context_dim), nn.Linear(context_dim, inner_dim * 2, bias = False)) if exists(context_dim) else None
self.to_out = nn.Sequential(
Conv2d(inner_dim, dim, 1, bias = False),
ChanLayerNorm(dim)
)
def forward(self, fmap, context = None):
h, x, y = self.heads, *fmap.shape[-2:]
fmap = self.norm(fmap)
q, k, v = map(lambda fn: fn(fmap), (self.to_q, self.to_k, self.to_v))
q, k, v = map(lambda t: rearrange(t, 'b (h c) x y -> (b h) (x y) c', h = h), (q, k, v))
if exists(context):
assert exists(self.to_context)
ck, cv = self.to_context(context).chunk(2, dim = -1)
ck, cv = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h = h), (ck, cv))
k = torch.cat((k, ck), dim = -2)
v = torch.cat((v, cv), dim = -2)
q = q.softmax(dim = -1)
k = k.softmax(dim = -2)
q = q * self.scale
context = einsum('b n d, b n e -> b d e', k, v)
out = einsum('b n d, b d e -> b n e', q, context)
out = rearrange(out, '(b h) (x y) d -> b (h d) x y', h = h, x = x, y = y)
out = self.nonlin(out)
return self.to_out(out)
class GlobalContext(nn.Module):
""" basically a superior form of squeeze-excitation that is attention-esque """
def __init__(
self,
*,
dim_in,
dim_out
):
super().__init__()
self.to_k = Conv2d(dim_in, 1, 1)
hidden_dim = max(3, dim_out // 2)
self.net = nn.Sequential(
Conv2d(dim_in, hidden_dim, 1),
nn.SiLU(),
Conv2d(hidden_dim, dim_out, 1),
nn.Sigmoid()
)
def forward(self, x):
context = self.to_k(x)
x, context = map(lambda t: rearrange(t, 'b n ... -> b n (...)'), (x, context))
out = einsum('b i n, b c n -> b c i', context.softmax(dim = -1), x)
out = rearrange(out, '... -> ... 1 1')
return self.net(out)
def FeedForward(dim, mult = 2):
hidden_dim = int(dim * mult)
return nn.Sequential(
LayerNorm(dim),
nn.Linear(dim, hidden_dim, bias = False),
nn.GELU(),
LayerNorm(hidden_dim),
nn.Linear(hidden_dim, dim, bias = False)
)
class TimeTokenShift(nn.Module):
def forward(self, x):
if x.ndim != 5:
return x
x, x_shift = x.chunk(2, dim = 1)
x_shift = F.pad(x_shift, (0, 0, 0, 0, 1, -1), value = 0.)
return torch.cat((x, x_shift), dim = 1)
def ChanFeedForward(dim, mult = 2, time_token_shift = True): # in paper, it seems for self attention layers they did feedforwards with twice channel width
hidden_dim = int(dim * mult)
return Sequential(
ChanLayerNorm(dim),
Conv2d(dim, hidden_dim, 1, bias = False),
nn.GELU(),
TimeTokenShift() if time_token_shift else None,
ChanLayerNorm(hidden_dim),
Conv2d(hidden_dim, dim, 1, bias = False)
)
class TransformerBlock(nn.Module):
def __init__(
self,
dim,
*,
depth = 1,
heads = 8,
dim_head = 32,
ff_mult = 2,
ff_time_token_shift = True,
context_dim = None
):
super().__init__()
self.layers = nn.ModuleList([])
for _ in range(depth):
self.layers.append(nn.ModuleList([
Attention(dim = dim, heads = heads, dim_head = dim_head, context_dim = context_dim),
ChanFeedForward(dim = dim, mult = ff_mult, time_token_shift = ff_time_token_shift)
]))
def forward(self, x, context = None):
for attn, ff in self.layers:
x = rearrange(x, 'b c ... -> b ... c')
x, ps = pack([x], 'b * c')
x = attn(x, context = context) + x
x, = unpack(x, ps, 'b * c')
x = rearrange(x, 'b ... c -> b c ...')
x = ff(x) + x
return x
class LinearAttentionTransformerBlock(nn.Module):
def __init__(
self,
dim,
*,
depth = 1,
heads = 8,
dim_head = 32,
ff_mult = 2,
ff_time_token_shift = True,
context_dim = None,
**kwargs
):
super().__init__()
self.layers = nn.ModuleList([])
for _ in range(depth):
self.layers.append(nn.ModuleList([
LinearAttention(dim = dim, heads = heads, dim_head = dim_head, context_dim = context_dim),
ChanFeedForward(dim = dim, mult = ff_mult, time_token_shift = ff_time_token_shift)
]))
def forward(self, x, context = None):
for attn, ff in self.layers:
x = attn(x, context = context) + x
x = ff(x) + x
return x
class CrossEmbedLayer(nn.Module):
def __init__(
self,
dim_in,
kernel_sizes,
dim_out = None,
stride = 2
):
super().__init__()
assert all([*map(lambda t: (t % 2) == (stride % 2), kernel_sizes)])
dim_out = default(dim_out, dim_in)
kernel_sizes = sorted(kernel_sizes)
num_scales = len(kernel_sizes)
# calculate the dimension at each scale
dim_scales = [int(dim_out / (2 ** i)) for i in range(1, num_scales)]
dim_scales = [*dim_scales, dim_out - sum(dim_scales)]
self.convs = nn.ModuleList([])
for kernel, dim_scale in zip(kernel_sizes, dim_scales):
self.convs.append(Conv2d(dim_in, dim_scale, kernel, stride = stride, padding = (kernel - stride) // 2))
def forward(self, x):
fmaps = tuple(map(lambda conv: conv(x), self.convs))
return torch.cat(fmaps, dim = 1)
class UpsampleCombiner(nn.Module):
def __init__(
self,
dim,
*,
enabled = False,
dim_ins = tuple(),
dim_outs = tuple()
):
super().__init__()
dim_outs = cast_tuple(dim_outs, len(dim_ins))
assert len(dim_ins) == len(dim_outs)
self.enabled = enabled
if not self.enabled:
self.dim_out = dim
return
self.fmap_convs = nn.ModuleList([Block(dim_in, dim_out) for dim_in, dim_out in zip(dim_ins, dim_outs)])
self.dim_out = dim + (sum(dim_outs) if len(dim_outs) > 0 else 0)
def forward(self, x, fmaps = None):
target_size = x.shape[-1]
fmaps = default(fmaps, tuple())
if not self.enabled or len(fmaps) == 0 or len(self.fmap_convs) == 0:
return x
fmaps = [resize_video_to(fmap, target_size) for fmap in fmaps]
outs = [conv(fmap) for fmap, conv in zip(fmaps, self.fmap_convs)]
return torch.cat((x, *outs), dim = 1)
class DynamicPositionBias(nn.Module):
def __init__(
self,
dim,
*,
heads,
depth
):
super().__init__()
self.mlp = nn.ModuleList([])
self.mlp.append(nn.Sequential(
nn.Linear(1, dim),
LayerNorm(dim),
nn.SiLU()
))
for _ in range(max(depth - 1, 0)):
self.mlp.append(nn.Sequential(
nn.Linear(dim, dim),
LayerNorm(dim),
nn.SiLU()
))
self.mlp.append(nn.Linear(dim, heads))
def forward(self, n, device, dtype):
i = torch.arange(n, device = device)
j = torch.arange(n, device = device)
indices = rearrange(i, 'i -> i 1') - rearrange(j, 'j -> 1 j')
indices += (n - 1)
pos = torch.arange(-n + 1, n, device = device, dtype = dtype)
pos = rearrange(pos, '... -> ... 1')
for layer in self.mlp:
pos = layer(pos)
bias = pos[indices]
bias = rearrange(bias, 'i j h -> h i j')
return bias
class Unet3D(nn.Module):
def __init__(
self,
*,
dim,
text_embed_dim = get_encoded_dim(DEFAULT_T5_NAME),
num_resnet_blocks = 1,
cond_dim = None,
num_image_tokens = 4,
num_time_tokens = 2,
learned_sinu_pos_emb_dim = 16,
out_dim = None,
dim_mults = (1, 2, 4, 8),
temporal_strides = 1,
cond_images_channels = 0,
channels = 3,
channels_out = None,
attn_dim_head = 64,
attn_heads = 8,
ff_mult = 2.,
ff_time_token_shift = True, # this would do a token shift along time axis, at the hidden layer within feedforwards - from successful use in RWKV (Peng et al), and other token shift video transformer works
lowres_cond = False, # for cascading diffusion - https://cascaded-diffusion.github.io/
layer_attns = False,
layer_attns_depth = 1,
layer_attns_add_text_cond = True, # whether to condition the self-attention blocks with the text embeddings, as described in Appendix D.3.1
attend_at_middle = True, # whether to have a layer of attention at the bottleneck (can turn off for higher resolution in cascading DDPM, before bringing in efficient attention)
time_rel_pos_bias_depth = 2,
time_causal_attn = True,
layer_cross_attns = True,
use_linear_attn = False,
use_linear_cross_attn = False,
cond_on_text = True,
max_text_len = 256,
init_dim = None,
resnet_groups = 8,
init_conv_kernel_size = 7, # kernel size of initial conv, if not using cross embed
init_cross_embed = True,
init_cross_embed_kernel_sizes = (3, 7, 15),
cross_embed_downsample = False,
cross_embed_downsample_kernel_sizes = (2, 4),
attn_pool_text = True,
attn_pool_num_latents = 32,
dropout = 0.,
memory_efficient = False,
init_conv_to_final_conv_residual = False,
use_global_context_attn = True,
scale_skip_connection = True,
final_resnet_block = True,
final_conv_kernel_size = 3,
self_cond = False,
combine_upsample_fmaps = False, # combine feature maps from all upsample blocks, used in unet squared successfully
pixel_shuffle_upsample = True, # may address checkboard artifacts
resize_mode = 'nearest'
):
super().__init__()
# guide researchers
assert attn_heads > 1, 'you need to have more than 1 attention head, ideally at least 4 or 8'
if dim < 128:
print_once('The base dimension of your u-net should ideally be no smaller than 128, as recommended by a professional DDPM trainer https://nonint.com/2022/05/04/friends-dont-let-friends-train-small-diffusion-models/')
# save locals to take care of some hyperparameters for cascading DDPM
self._locals = locals()
self._locals.pop('self', None)
self._locals.pop('__class__', None)
self.self_cond = self_cond
# determine dimensions
self.channels = channels
self.channels_out = default(channels_out, channels)
# (1) in cascading diffusion, one concats the low resolution image, blurred, for conditioning the higher resolution synthesis
# (2) in self conditioning, one appends the predict x0 (x_start)
init_channels = channels * (1 + int(lowres_cond) + int(self_cond))
init_dim = default(init_dim, dim)
# optional image conditioning
self.has_cond_image = cond_images_channels > 0
self.cond_images_channels = cond_images_channels
init_channels += cond_images_channels
# initial convolution
self.init_conv = CrossEmbedLayer(init_channels, dim_out = init_dim, kernel_sizes = init_cross_embed_kernel_sizes, stride = 1) if init_cross_embed else Conv2d(init_channels, init_dim, init_conv_kernel_size, padding = init_conv_kernel_size // 2)
dims = [init_dim, *map(lambda m: dim * m, dim_mults)]
in_out = list(zip(dims[:-1], dims[1:]))
# time conditioning
cond_dim = default(cond_dim, dim)
time_cond_dim = dim * 4 * (2 if lowres_cond else 1)
# embedding time for log(snr) noise from continuous version
sinu_pos_emb = LearnedSinusoidalPosEmb(learned_sinu_pos_emb_dim)
sinu_pos_emb_input_dim = learned_sinu_pos_emb_dim + 1
self.to_time_hiddens = nn.Sequential(
sinu_pos_emb,
nn.Linear(sinu_pos_emb_input_dim, time_cond_dim),
nn.SiLU()
)
self.to_time_cond = nn.Sequential(
nn.Linear(time_cond_dim, time_cond_dim)
)
# project to time tokens as well as time hiddens
self.to_time_tokens = nn.Sequential(
nn.Linear(time_cond_dim, cond_dim * num_time_tokens),
Rearrange('b (r d) -> b r d', r = num_time_tokens)
)
# low res aug noise conditioning
self.lowres_cond = lowres_cond
if lowres_cond:
self.to_lowres_time_hiddens = nn.Sequential(
LearnedSinusoidalPosEmb(learned_sinu_pos_emb_dim),
nn.Linear(learned_sinu_pos_emb_dim + 1, time_cond_dim),
nn.SiLU()
)
self.to_lowres_time_cond = nn.Sequential(
nn.Linear(time_cond_dim, time_cond_dim)
)
self.to_lowres_time_tokens = nn.Sequential(
nn.Linear(time_cond_dim, cond_dim * num_time_tokens),
Rearrange('b (r d) -> b r d', r = num_time_tokens)
)
# normalizations
self.norm_cond = nn.LayerNorm(cond_dim)
# text encoding conditioning (optional)
self.text_to_cond = None
if cond_on_text:
assert exists(text_embed_dim), 'text_embed_dim must be given to the unet if cond_on_text is True'
self.text_to_cond = nn.Linear(text_embed_dim, cond_dim)
# finer control over whether to condition on text encodings
self.cond_on_text = cond_on_text
# attention pooling
self.attn_pool = PerceiverResampler(dim = cond_dim, depth = 2, dim_head = attn_dim_head, heads = attn_heads, num_latents = attn_pool_num_latents) if attn_pool_text else None
# for classifier free guidance
self.max_text_len = max_text_len
self.null_text_embed = nn.Parameter(torch.randn(1, max_text_len, cond_dim))
self.null_text_hidden = nn.Parameter(torch.randn(1, time_cond_dim))
# for non-attention based text conditioning at all points in the network where time is also conditioned
self.to_text_non_attn_cond = None
if cond_on_text:
self.to_text_non_attn_cond = nn.Sequential(
nn.LayerNorm(cond_dim),
nn.Linear(cond_dim, time_cond_dim),
nn.SiLU(),
nn.Linear(time_cond_dim, time_cond_dim)
)
# attention related params
attn_kwargs = dict(heads = attn_heads, dim_head = attn_dim_head)
num_layers = len(in_out)
# temporal attention - attention across video frames
temporal_peg_padding = (0, 0, 0, 0, 2, 0) if time_causal_attn else (0, 0, 0, 0, 1, 1)
temporal_peg = lambda dim: Residual(nn.Sequential(Pad(temporal_peg_padding), nn.Conv3d(dim, dim, (3, 1, 1), groups = dim)))
temporal_attn = lambda dim: RearrangeTimeCentric(Residual(Attention(dim, **{**attn_kwargs, 'causal': time_causal_attn, 'init_zero': True, 'rel_pos_bias': True})))
# resnet block klass
num_resnet_blocks = cast_tuple(num_resnet_blocks, num_layers)
resnet_groups = cast_tuple(resnet_groups, num_layers)
resnet_klass = partial(ResnetBlock, **attn_kwargs)
layer_attns = cast_tuple(layer_attns, num_layers)
layer_attns_depth = cast_tuple(layer_attns_depth, num_layers)
layer_cross_attns = cast_tuple(layer_cross_attns, num_layers)
assert all([layers == num_layers for layers in list(map(len, (resnet_groups, layer_attns, layer_cross_attns)))])
# temporal downsample config
temporal_strides = cast_tuple(temporal_strides, num_layers)
self.total_temporal_divisor = functools.reduce(operator.mul, temporal_strides, 1)
# downsample klass
downsample_klass = Downsample
if cross_embed_downsample:
downsample_klass = partial(CrossEmbedLayer, kernel_sizes = cross_embed_downsample_kernel_sizes)
# initial resnet block (for memory efficient unet)
self.init_resnet_block = resnet_klass(init_dim, init_dim, time_cond_dim = time_cond_dim, groups = resnet_groups[0], use_gca = use_global_context_attn) if memory_efficient else None
self.init_temporal_peg = temporal_peg(init_dim)
self.init_temporal_attn = temporal_attn(init_dim)
# scale for resnet skip connections
self.skip_connect_scale = 1. if not scale_skip_connection else (2 ** -0.5)
# layers
self.downs = nn.ModuleList([])
self.ups = nn.ModuleList([])
num_resolutions = len(in_out)
layer_params = [num_resnet_blocks, resnet_groups, layer_attns, layer_attns_depth, layer_cross_attns, temporal_strides]
reversed_layer_params = list(map(reversed, layer_params))
# downsampling layers
skip_connect_dims = [] # keep track of skip connection dimensions
for ind, ((dim_in, dim_out), layer_num_resnet_blocks, groups, layer_attn, layer_attn_depth, layer_cross_attn, temporal_stride) in enumerate(zip(in_out, *layer_params)):
is_last = ind >= (num_resolutions - 1)
layer_use_linear_cross_attn = not layer_cross_attn and use_linear_cross_attn
layer_cond_dim = cond_dim if layer_cross_attn or layer_use_linear_cross_attn else None
transformer_block_klass = TransformerBlock if layer_attn else (LinearAttentionTransformerBlock if use_linear_attn else Identity)
current_dim = dim_in
# whether to pre-downsample, from memory efficient unet
pre_downsample = None
if memory_efficient:
pre_downsample = downsample_klass(dim_in, dim_out)
current_dim = dim_out
skip_connect_dims.append(current_dim)
# whether to do post-downsample, for non-memory efficient unet
post_downsample = None
if not memory_efficient:
post_downsample = downsample_klass(current_dim, dim_out) if not is_last else Parallel(Conv2d(dim_in, dim_out, 3, padding = 1), Conv2d(dim_in, dim_out, 1))
self.downs.append(nn.ModuleList([
pre_downsample,
resnet_klass(current_dim, current_dim, cond_dim = layer_cond_dim, linear_attn = layer_use_linear_cross_attn, time_cond_dim = time_cond_dim, groups = groups),
nn.ModuleList([ResnetBlock(current_dim, current_dim, time_cond_dim = time_cond_dim, groups = groups, use_gca = use_global_context_attn) for _ in range(layer_num_resnet_blocks)]),
transformer_block_klass(dim = current_dim, depth = layer_attn_depth, ff_mult = ff_mult, ff_time_token_shift = ff_time_token_shift, context_dim = cond_dim, **attn_kwargs),
temporal_peg(current_dim),
temporal_attn(current_dim),
TemporalDownsample(current_dim, stride = temporal_stride) if temporal_stride > 1 else None,
post_downsample
]))
# middle layers
mid_dim = dims[-1]
self.mid_block1 = ResnetBlock(mid_dim, mid_dim, cond_dim = cond_dim, time_cond_dim = time_cond_dim, groups = resnet_groups[-1])
self.mid_attn = EinopsToAndFrom('b c f h w', 'b (f h w) c', Residual(Attention(mid_dim, **attn_kwargs))) if attend_at_middle else None
self.mid_temporal_peg = temporal_peg(mid_dim)
self.mid_temporal_attn = temporal_attn(mid_dim)
self.mid_block2 = ResnetBlock(mid_dim, mid_dim, cond_dim = cond_dim, time_cond_dim = time_cond_dim, groups = resnet_groups[-1])
# upsample klass
upsample_klass = Upsample if not pixel_shuffle_upsample else PixelShuffleUpsample
# upsampling layers
upsample_fmap_dims = []
for ind, ((dim_in, dim_out), layer_num_resnet_blocks, groups, layer_attn, layer_attn_depth, layer_cross_attn, temporal_stride) in enumerate(zip(reversed(in_out), *reversed_layer_params)):
is_last = ind == (len(in_out) - 1)
layer_use_linear_cross_attn = not layer_cross_attn and use_linear_cross_attn
layer_cond_dim = cond_dim if layer_cross_attn or layer_use_linear_cross_attn else None
transformer_block_klass = TransformerBlock if layer_attn else (LinearAttentionTransformerBlock if use_linear_attn else Identity)
skip_connect_dim = skip_connect_dims.pop()
upsample_fmap_dims.append(dim_out)
self.ups.append(nn.ModuleList([
resnet_klass(dim_out + skip_connect_dim, dim_out, cond_dim = layer_cond_dim, linear_attn = layer_use_linear_cross_attn, time_cond_dim = time_cond_dim, groups = groups),
nn.ModuleList([ResnetBlock(dim_out + skip_connect_dim, dim_out, time_cond_dim = time_cond_dim, groups = groups, use_gca = use_global_context_attn) for _ in range(layer_num_resnet_blocks)]),
transformer_block_klass(dim = dim_out, depth = layer_attn_depth, ff_mult = ff_mult, ff_time_token_shift = ff_time_token_shift, context_dim = cond_dim, **attn_kwargs),
temporal_peg(dim_out),
temporal_attn(dim_out),
TemporalPixelShuffleUpsample(dim_out, stride = temporal_stride) if temporal_stride > 1 else None,
upsample_klass(dim_out, dim_in) if not is_last or memory_efficient else Identity()
]))
# whether to combine feature maps from all upsample blocks before final resnet block out
self.upsample_combiner = UpsampleCombiner(
dim = dim,
enabled = combine_upsample_fmaps,
dim_ins = upsample_fmap_dims,
dim_outs = dim
)
# whether to do a final residual from initial conv to the final resnet block out
self.init_conv_to_final_conv_residual = init_conv_to_final_conv_residual
final_conv_dim = self.upsample_combiner.dim_out + (dim if init_conv_to_final_conv_residual else 0)
# final optional resnet block and convolution out
self.final_res_block = ResnetBlock(final_conv_dim, dim, time_cond_dim = time_cond_dim, groups = resnet_groups[0], use_gca = True) if final_resnet_block else None
final_conv_dim_in = dim if final_resnet_block else final_conv_dim
final_conv_dim_in += (channels if lowres_cond else 0)
self.final_conv = Conv2d(final_conv_dim_in, self.channels_out, final_conv_kernel_size, padding = final_conv_kernel_size // 2)
zero_init_(self.final_conv)
# resize mode
self.resize_mode = resize_mode
# if the current settings for the unet are not correct
# for cascading DDPM, then reinit the unet with the right settings
def cast_model_parameters(
self,
*,
lowres_cond,
text_embed_dim,
channels,
channels_out,
cond_on_text
):
if lowres_cond == self.lowres_cond and \
channels == self.channels and \
cond_on_text == self.cond_on_text and \
text_embed_dim == self._locals['text_embed_dim'] and \
channels_out == self.channels_out:
return self
updated_kwargs = dict(
lowres_cond = lowres_cond,
text_embed_dim = text_embed_dim,
channels = channels,
channels_out = channels_out,
cond_on_text = cond_on_text
)
return self.__class__(**{**self._locals, **updated_kwargs})
# methods for returning the full unet config as well as its parameter state
def to_config_and_state_dict(self):
return self._locals, self.state_dict()
# class method for rehydrating the unet from its config and state dict
@classmethod
def from_config_and_state_dict(klass, config, state_dict):
unet = klass(**config)
unet.load_state_dict(state_dict)
return unet
# methods for persisting unet to disk
def persist_to_file(self, path):
path = Path(path)
path.parents[0].mkdir(exist_ok = True, parents = True)
config, state_dict = self.to_config_and_state_dict()
pkg = dict(config = config, state_dict = state_dict)
torch.save(pkg, str(path))
# class method for rehydrating the unet from file saved with `persist_to_file`
@classmethod
def hydrate_from_file(klass, path):
path = Path(path)
assert path.exists()
pkg = torch.load(str(path))
assert 'config' in pkg and 'state_dict' in pkg
config, state_dict = pkg['config'], pkg['state_dict']
return Unet.from_config_and_state_dict(config, state_dict)
# forward with classifier free guidance
def forward_with_cond_scale(
self,
*args,
cond_scale = 1.,
**kwargs
):
logits = self.forward(*args, **kwargs)
if cond_scale == 1:
return logits
null_logits = self.forward(*args, cond_drop_prob = 1., **kwargs)
return null_logits + (logits - null_logits) * cond_scale
def forward(
self,
x,
time,
*,
lowres_cond_img = None,
lowres_noise_times = None,
text_embeds = None,
text_mask = None,
cond_images = None,
cond_video_frames = None,
post_cond_video_frames = None,
self_cond = None,
cond_drop_prob = 0.,
ignore_time = False
):
assert x.ndim == 5, 'input to 3d unet must have 5 dimensions (batch, channels, time, height, width)'
batch_size, frames, device, dtype = x.shape[0], x.shape[2], x.device, x.dtype
assert ignore_time or divisible_by(frames, self.total_temporal_divisor), f'number of input frames {frames} must be divisible by {self.total_temporal_divisor}'
# add self conditioning if needed
if self.self_cond:
self_cond = default(self_cond, lambda: torch.zeros_like(x))
x = torch.cat((x, self_cond), dim = 1)
# add low resolution conditioning, if present
assert not (self.lowres_cond and not exists(lowres_cond_img)), 'low resolution conditioning image must be present'
assert not (self.lowres_cond and not exists(lowres_noise_times)), 'low resolution conditioning noise time must be present'
if exists(lowres_cond_img):
x = torch.cat((x, lowres_cond_img), dim = 1)
if exists(cond_video_frames):
lowres_cond_img = torch.cat((cond_video_frames, lowres_cond_img), dim = 2)
cond_video_frames = torch.cat((cond_video_frames, cond_video_frames), dim = 1)
if exists(post_cond_video_frames):
lowres_cond_img = torch.cat((lowres_cond_img, post_cond_video_frames), dim = 2)
post_cond_video_frames = torch.cat((post_cond_video_frames, post_cond_video_frames), dim = 1)
# conditioning on video frames as a prompt
num_preceding_frames = 0
if exists(cond_video_frames):
cond_video_frames_len = cond_video_frames.shape[2]
assert divisible_by(cond_video_frames_len, self.total_temporal_divisor)
cond_video_frames = resize_video_to(cond_video_frames, x.shape[-1])
x = torch.cat((cond_video_frames, x), dim = 2)
num_preceding_frames = cond_video_frames_len
# conditioning on video frames as a prompt
num_succeeding_frames = 0
if exists(post_cond_video_frames):
cond_video_frames_len = post_cond_video_frames.shape[2]
assert divisible_by(cond_video_frames_len, self.total_temporal_divisor)
post_cond_video_frames = resize_video_to(post_cond_video_frames, x.shape[-1])
x = torch.cat((post_cond_video_frames, x), dim = 2)
num_succeeding_frames = cond_video_frames_len
# condition on input image
assert not (self.has_cond_image ^ exists(cond_images)), 'you either requested to condition on an image on the unet, but the conditioning image is not supplied, or vice versa'
if exists(cond_images):
assert cond_images.ndim == 4, 'conditioning images must have 4 dimensions only, if you want to condition on frames of video, use `cond_video_frames` instead'
assert cond_images.shape[1] == self.cond_images_channels, 'the number of channels on the conditioning image you are passing in does not match what you specified on initialiation of the unet'
cond_images = repeat(cond_images, 'b c h w -> b c f h w', f = x.shape[2])
cond_images = resize_video_to(cond_images, x.shape[-1], mode = self.resize_mode)
x = torch.cat((cond_images, x), dim = 1)
# ignoring time in pseudo 3d resnet blocks
conv_kwargs = dict(
ignore_time = ignore_time
)
# initial convolution
x = self.init_conv(x)
if not ignore_time:
x = self.init_temporal_peg(x)
x = self.init_temporal_attn(x)
# init conv residual
if self.init_conv_to_final_conv_residual:
init_conv_residual = x.clone()
# time conditioning
time_hiddens = self.to_time_hiddens(time)
# derive time tokens
time_tokens = self.to_time_tokens(time_hiddens)
t = self.to_time_cond(time_hiddens)
# add lowres time conditioning to time hiddens
# and add lowres time tokens along sequence dimension for attention
if self.lowres_cond:
lowres_time_hiddens = self.to_lowres_time_hiddens(lowres_noise_times)
lowres_time_tokens = self.to_lowres_time_tokens(lowres_time_hiddens)
lowres_t = self.to_lowres_time_cond(lowres_time_hiddens)
t = t + lowres_t
time_tokens = torch.cat((time_tokens, lowres_time_tokens), dim = -2)
# text conditioning
text_tokens = None
if exists(text_embeds) and self.cond_on_text:
# conditional dropout
text_keep_mask = prob_mask_like((batch_size,), 1 - cond_drop_prob, device = device)
text_keep_mask_embed = rearrange(text_keep_mask, 'b -> b 1 1')
text_keep_mask_hidden = rearrange(text_keep_mask, 'b -> b 1')
# calculate text embeds
text_tokens = self.text_to_cond(text_embeds)
text_tokens = text_tokens[:, :self.max_text_len]
if exists(text_mask):
text_mask = text_mask[:, :self.max_text_len]
text_tokens_len = text_tokens.shape[1]
remainder = self.max_text_len - text_tokens_len
if remainder > 0:
text_tokens = F.pad(text_tokens, (0, 0, 0, remainder))
if exists(text_mask):
if remainder > 0:
text_mask = F.pad(text_mask, (0, remainder), value = False)
text_mask = rearrange(text_mask, 'b n -> b n 1')
text_keep_mask_embed = text_mask & text_keep_mask_embed
null_text_embed = self.null_text_embed.to(text_tokens.dtype) # for some reason pytorch AMP not working
text_tokens = torch.where(
text_keep_mask_embed,
text_tokens,
null_text_embed
)
if exists(self.attn_pool):
text_tokens = self.attn_pool(text_tokens)
# extra non-attention conditioning by projecting and then summing text embeddings to time
# termed as text hiddens
mean_pooled_text_tokens = text_tokens.mean(dim = -2)
text_hiddens = self.to_text_non_attn_cond(mean_pooled_text_tokens)
null_text_hidden = self.null_text_hidden.to(t.dtype)
text_hiddens = torch.where(
text_keep_mask_hidden,
text_hiddens,
null_text_hidden
)
t = t + text_hiddens
# main conditioning tokens (c)
c = time_tokens if not exists(text_tokens) else torch.cat((time_tokens, text_tokens), dim = -2)
# normalize conditioning tokens
c = self.norm_cond(c)
# initial resnet block (for memory efficient unet)
if exists(self.init_resnet_block):
x = self.init_resnet_block(x, t, **conv_kwargs)
# go through the layers of the unet, down and up
hiddens = []
for pre_downsample, init_block, resnet_blocks, attn_block, temporal_peg, temporal_attn, temporal_downsample, post_downsample in self.downs:
if exists(pre_downsample):
x = pre_downsample(x)
x = init_block(x, t, c, **conv_kwargs)
for resnet_block in resnet_blocks:
x = resnet_block(x, t, **conv_kwargs)
hiddens.append(x)
x = attn_block(x, c)
if not ignore_time:
x = temporal_peg(x)
x = temporal_attn(x)
hiddens.append(x)
if exists(temporal_downsample) and not ignore_time:
x = temporal_downsample(x)
if exists(post_downsample):
x = post_downsample(x)
x = self.mid_block1(x, t, c, **conv_kwargs)
if exists(self.mid_attn):
x = self.mid_attn(x)
if not ignore_time:
x = self.mid_temporal_peg(x)
x = self.mid_temporal_attn(x)
x = self.mid_block2(x, t, c, **conv_kwargs)
add_skip_connection = lambda x: torch.cat((x, hiddens.pop() * self.skip_connect_scale), dim = 1)
up_hiddens = []
for init_block, resnet_blocks, attn_block, temporal_peg, temporal_attn, temporal_upsample, upsample in self.ups:
if exists(temporal_upsample) and not ignore_time:
x = temporal_upsample(x)
x = add_skip_connection(x)
x = init_block(x, t, c, **conv_kwargs)
for resnet_block in resnet_blocks:
x = add_skip_connection(x)
x = resnet_block(x, t, **conv_kwargs)
x = attn_block(x, c)
if not ignore_time:
x = temporal_peg(x)
x = temporal_attn(x)
up_hiddens.append(x.contiguous())
x = upsample(x)
# whether to combine all feature maps from upsample blocks
x = self.upsample_combiner(x, up_hiddens)
# final top-most residual if needed
if self.init_conv_to_final_conv_residual:
x = torch.cat((x, init_conv_residual), dim = 1)
if exists(self.final_res_block):
x = self.final_res_block(x, t, **conv_kwargs)
if exists(lowres_cond_img):
x = torch.cat((x, lowres_cond_img), dim = 1)
out = self.final_conv(x)
if num_preceding_frames > 0:
out = out[:, :, num_preceding_frames:]
if num_succeeding_frames > 0:
out = out[:, :, :-num_succeeding_frames]
return out | StarlightVision-master | starlight_vision/core/gen2_video.py |
from setuptools import setup, find_packages
setup(
name = 'VisualNexus',
packages = find_packages(exclude=['examples']),
version = '0.0.1',
license='MIT',
description = 'VisualNexus - Pytorch',
author = 'Kye Gomez',
author_email = '[email protected]',
url = 'https://github.com/kyegomez/VisualNexus',
long_description_content_type = 'text/markdown',
keywords = [
'artificial intelligence',
'attention mechanism',
'transformers'
'vision'
],
install_requires=[
'torch>=1.7.0',
# Base-----------------------------------
'matplotlib>=3.2.2',
'opencv-python>=4.6.0',
'Pillow>=7.1.2',
'PyYAML>=5.3.1',
'requests>=2.23.0',
'scipy>=1.4.1',
'torchvision>=0.8.1',
'tqdm>=4.64.0',
'pandas>=1.1.4',
'seaborn>=0.11.0',
'gradio==3.35.2',
# Ultralytics-----------------------------------
'ultralytics == 8.0.120',
'metaseq'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
) | VisualNexus-master | setup.py |
from datasets import Dataset
import pandas as pd
from models.sag_img import SAG_IMG
from models.sag_video import SAG_VID
import os
from datasets import load_dataset
def load_hf_dataset(dataset_name):
#custom logic
pass
class SAG_MEDIA:
"""
SAG_MEDIA: Segment Anything for Image and Video.
This class handles the process of iterating over a dataset of images and videos, segmenting them, and outputting a structured
dataset ready for pre-training a model.
"""
def __init__(self, image_file_paths, video_file_paths, model_path='./weights/FastSAM.pt', imgsz=1024, iou=0.9, text_prompt=None, conf=0.4,
output='./output/', randomcolor=True, point_prompt="[[0,0]]", point_label="[0]", box_prompt="[0,0,0,0]", better_quality=False,
device=None, retina=True, withContours=False):
# Initialize SAG_IMG and SAG_VID with the provided parameters
self.sag_img = SAG_IMG(
image_file_paths=image_file_paths,
model_path=model_path,
imgsz=imgsz,
iou=iou,
text_prompt=text_prompt,
conf=conf,
output=output,
randomcolor=randomcolor,
point_prompt=point_prompt,
point_label=point_label,
box_prompt=box_prompt,
better_quality=better_quality,
device=device,
retina=retina,
withContours=withContours
)
self.sag_vid = SAG_VID(
video_file_paths=video_file_paths,
model_path=model_path,
imgsz=imgsz,
iou=iou,
text_prompt=text_prompt,
conf=conf,
output=output,
randomcolor=randomcolor,
point_prompt=point_prompt,
point_label=point_label,
box_prompt=box_prompt,
better_quality=better_quality,
device=device,
retina=retina,
withContours=withContours
)
def segment(self):
# Perform segmentation for images and videos
self.sag_img.segment()
self.sag_vid.segment()
def create_dataset(self):
"""
Create a Huggingface dataset from the segmented images and videos and save it to disk.
This dataset includes the original media file path, segmented media file path, and optional text.
"""
# Create datasets for images and videos
self.sag_img.create_dataset()
self.sag_vid.create_dataset()
# Merge the datasets
img_dataset = Dataset.load_from_disk(os.path.join(str(self.sag_img.output), 'segmented_dataset'))
vid_dataset = Dataset.load_from_disk(os.path.join(str(self.sag_vid.output), 'segmented_dataset'))
merged_dataset = Dataset.concatenate_datasets([img_dataset, vid_dataset])
# Save the merged dataset to disk
merged_dataset.save_to_disk(os.path.join(str(self.sag_img.output), 'merged_segmented_dataset'))
if __name__ == "__main__":
dataset_name = "echarlaix/vqa" # dataset name
image_file_paths = load_hf_dataset(dataset_name)
video_file_paths = load_hf_dataset(dataset_name)
media_seg = SAG_MEDIA(image_file_paths, video_file_paths)
media_seg.segment()
media_seg.create_dataset() | VisualNexus-master | VisualNexus/models/sag_both.py |
from datasets import load_dataset
from metaseq import SegAutoMaskPredictor
import os
from datasets import Dataset
import pandas as pd
class SAG_VID:
def __init__(self, model_type='vit_1', points_per_side=16, points_per_batch=64, min_area=1000, output_dir='./output'):
"""
Segment anything for video class
Args:
model_type(str): type of the model to use for segmentation. Option: 'vit_1', 'vit_h', 'vit_b'
points_per_side (int): number of points per side
points_per_batch(int): Number of points per batch
min_area(int): minimum area
output_dir: str: directory to save output files
"""
self.model_type = model_type
self.points_per_side = points_per_side
self.points_per_batch = points_per_batch
self.min_area = min_area
self.output_dir = output_dir
self.predictor = SegAutoMaskPredictor()
def segment_video(self, video_path):
"""
Segment a video file
Args:
video_path (str): path to the video file
"""
output_path = os.path.join(self.output_dir, os.path.basename(video_path))
self.predictor.video_predict(
source=video_path,
model_type=self.model_type,
points_per_side=self.points_per_batch,
min_area = self.min_area,
output_path = output_path,
)
def load_and_segment_datset(self, dataset_name):
"""
Load a video dataset from hf datasets library and segment it
Args:
dataset_name (str): Name of the dataset in hf datasets
"""
#load the train split of the dataset from huggingface datasets
dataset = load_dataset(dataset_name, split='train')
#iterate over the dataset
for item in dataset:
#assuming file path is the key for the video file paths
video_path = item['file_path']
#segmennt the video
self.segment_video(video_path)
def create_dataset(self):
"""
Create a Huggingface dataset from the segmented videos and save it to disk.
"""
# List to store the dataset examples
examples = []
# Iterate over the output directory
for file_name in os.listdir(self.output_dir):
if file_name.endswith('.mp4'): # Assuming segmented videos are in mp4 format
# Construct the original video path
video_path = '...' # Replace this with the logic to construct original video path from segmented video path
# Construct the segmented video path
segmented_video_path = os.path.join(self.output_dir, file_name)
# Append the example to the list
examples.append({
'video_path': video_path,
'segmented_video_path': segmented_video_path,
})
# Convert the list of examples into a pandas DataFrame
df = pd.DataFrame(examples)
# Convert the DataFrame into a Huggingface dataset
dataset = Dataset.from_pandas(df)
# Save the dataset to disk
dataset.save_to_disk(os.path.join(self.output_dir, 'segmented_dataset'))
if __name__ == "__main__":
vid_seg = SAG_VID(output_dir='./output_videos')
vid_seg.load_and_segment_dataset('dataset_name')
vid_seg.create_dataset() | VisualNexus-master | VisualNexus/models/sag_video.py |
import os
import pandas as pd
from pathlib import Path
from datasets import Dataset
from mobile_sam import SamAutomaticMaskGenerator
import numpy as np
class MobileSAM:
def __init__(self, img_path: str, output: str, hf_dataset, text_prompt=None):
self.img_path = img_path
self.output = output
self.hf_dataset = hf_dataset
self.text_prompt = text_prompt
# Initialize the SAM generator
self.mask_generator = SamAutomaticMaskGenerator('mobile_sam')
def segment_images(self):
for idx in range(len(self.hf_dataset)):
image = self.hf_dataset[idx]["image"]
masks = self.mask_generator.generate(image)
# Assuming the segmented image needs to be saved
for i, mask in enumerate(masks):
segmented_image_path = os.path.join(self.output, f'{idx}_{i}.jpg')
mask['segmentation'].save(segmented_image_path)
def create_dataset(self):
"""
Create a Huggingface dataset from the segmented images and save it to disk.
This dataset includes the original image path, segmented image path, and optional text.
"""
# List to store the dataset examples
examples = []
# Iterate over the output directory
for file_name in os.listdir(self.output):
if file_name.endswith('.jpg'): # Assuming segmented images are in jpg format
# Construct the original image path
image_path = os.path.join(str(Path(self.img_path).parent), file_name)
# Construct the segmented image path
segmented_image_path = os.path.join(str(self.output), file_name)
# Append the example to the list
examples.append({
'image_path': image_path,
'segmented_image_path': segmented_image_path,
'text_prompt': self.text_prompt if self.text_prompt else None,
})
# Convert the list of examples into a pandas DataFrame
df = pd.DataFrame(examples)
# Convert the DataFrame into a Huggingface dataset
dataset = Dataset.from_pandas(df)
# Save the dataset to disk
dataset.save_to_disk(os.path.join(str(self.output), 'segmented_dataset'))
def process(self):
self.segment_images()
self.create_dataset()
# from datasets import load_dataset
# # Assuming you have images in 'images' directory and want output in 'output' directory
# mobile_sam = MobileSAM('images', 'output', load_dataset('your_hf_dataset'))
# mobile_sam.process()
| VisualNexus-master | VisualNexus/models/mobile_sam.py |
from VisualNexus.models.sag_img import SAG_IMG
from VisualNexus.models.sag_video import SAG_VID
| VisualNexus-master | VisualNexus/models/__init__.py |
import os
from pathlib import Path
from ultralytics import YOLO
from FastSAM.utils.tools import fast_process, convert_box_xywh_to_xyxy, format_results, box_prompt, point_prompt, text_prompt
import ast
import torch
import cv2
import numpy as np
from datasets import load_dataset
from pathlib import Path
import os
from datasets import Dataset
import pandas as pd
def load_hf_dataset(dataset_name):
#load a dataset from hf dataset library
#return s alist of file paths of the images in the dataset
dataset = load_dataset(dataset_name)
file_paths = []
for example in dataset['train']:
file_path = example['image']
file_paths.append(file_path)
return file_paths
class SAG_IMG:
"""
SAG_IMG: Segment Anything for Image.
This class handles the process of iterating over a dataset of images, segmenting them, and outputting a structured
dataset ready for pre-training a model.
...
Attributes
----------
dataset_folder : str
Path to the folder containing images for segmentation.
model_path : str
Path to the model to be used for segmentation (default is './weights/FastSAM.pt')
imgsz : int
Size of the image (default is 1024)
iou : float
IoU threshold for filtering the annotations (default is 0.9)
text_prompt : str
Text prompt to be used in the segmentation process (default is None)
conf : float
Object confidence threshold (default is 0.4)
output : str
Path to save the output (default is './output/')
randomcolor : bool
Indicates if mask random color should be used (default is True)
point_prompt : str
Point prompt for the segmentation process (default is '[[0,0]]')
point_label : str
Point label for the segmentation process (default is '[0]')
box_prompt : str
Box prompt for the segmentation process (default is '[0,0,0,0]')
better_quality : bool
Indicates if better quality using morphologyEx should be used (default is False)
device : str
Device to be used for processing (default is None, which leads to automatic selection of 'cuda' or 'cpu')
retina : bool
Indicates if high-resolution segmentation masks should be drawn (default is True)
withContours : bool
Indicates if the edges of the masks should be drawn (default is False)
Methods
-------
segment():
Processes all images in the dataset folder and saves the segmentation results to the output directory.
prompt(results, box=None, text=None):
Prompts the segmentation process based on the provided results and prompt type.
fast_process(annotations, mask_random_color):
Performs the fast process function from FastSAM.utils.tools on the given annotations.
"""
def __init__(self, image_file_paths, model_path='./weights/FastSAM.pt', imgsz=1024, iou=0.9, text_prompt=None, conf=0.4,
output='./output/', randomcolor=True, point_prompt="[[0,0]]", point_label="[0]", box_prompt="[0,0,0,0]", better_quality=False,
device=None, retina=True, withContours=False):
self.image_file_paths = image_file_paths
self.model_path = model_path
self.imgsz = imgsz
self.iou = iou
self.text_prompt = text_prompt
self.conf = conf
self.output = Path(output)
self.randomcolor = randomcolor
self.point_prompt = ast.literal_eval(point_prompt)
self.point_label = ast.literal_eval(point_label)
self.box_prompt = ast.literal_eval(box_prompt)
self.better_quality = better_quality
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") if device is None else device
self.retina = retina
self.withContours = withContours
def segment(self):
# Load the YOLO model from the specified path
model = YOLO(self.model_path)
# Iterate over the list of file paths
for img_path in self.image_file_paths:
self.img_path = img_path
# Process the image with the YOLO model and get the results
results = model(
self.img_path,
imgsz=self.imgsz,
device=self.device,
retina_masks=self.retina,
iou=self.iou,
conf=self.conf,
max_det=100,
)
if self.box_prompt[2] != 0 and self.box_prompt[3] != 0:
annotations = self.prompt(results, box=True)
elif self.text_prompt is not None:
results = format_results(results[0], 0)
annotations = self.prompt(results, text=True)
elif self.point_prompt[0] != [0, 0]:
results = format_results(results[0], 0)
annotations = self.prompt(results, point=True)
else:
annotations = results[0].masks.data
annotations = np.array([annotations])
self.fast_process(
annotations=annotations,
mask_random_color=self.randomcolor,
)
def prompt(self, results, box=None, text=None):
ori_img = cv2.imread(self.img_path)
ori_h = ori_img.shape[0]
ori_w = ori_img.shape[1]
if box:
mask, idx = box_prompt(
results[0].masks.data,
convert_box_xywh_to_xyxy(self.box_prompt),
ori_h,
ori_w,
)
elif text:
mask, idx = text_prompt(results, self.text_prompt, self.img_path, self.device)
else:
return None
return mask
def fast_process(self, annotations, mask_random_color):
output_path = self.output / Path(self.img_path).name
fast_process(
annotations=annotations,
args=self,
mask_random_color=mask_random_color,
output_path=str(output_path),
)
def create_dataset(self):
"""
Create a Huggingface dataset from the segmented images and save it to disk.
This dataset includes the original image path, segmented image path, and optional text.
"""
# List to store the dataset examples
examples = []
# Iterate over the output directory
for file_name in os.listdir(self.output):
if file_name.endswith('.jpg'): # Assuming segmented images are in jpg format
# Construct the original image path
image_path = os.path.join(str(Path(self.img_path).parent), file_name)
# Construct the segmented image path
segmented_image_path = os.path.join(str(self.output), file_name)
# Append the example to the list
examples.append({
'image_path': image_path,
'segmented_image_path': segmented_image_path,
'text_prompt': self.text_prompt if self.text_prompt else None,
})
# Convert the list of examples into a pandas DataFrame
df = pd.DataFrame(examples)
# Convert the DataFrame into a Huggingface dataset
dataset = Dataset.from_pandas(df)
# Save the dataset to disk
dataset.save_to_disk(os.path.join(str(self.output), 'segmented_dataset'))
if __name__ == "__main__":
dataset_name="echarlaix/vqa" #dataset name
image_file_paths = load_hf_dataset(dataset_name)
img_seg = SAG_IMG(image_file_paths)
img_seg.segment()
img_seg.create_dataset()
| VisualNexus-master | VisualNexus/models/sag_img.py |
from ultralytics import YOLO
import gradio as gr
import torch
from utils.tools_gradio import fast_process, format_results, box_prompt, point_prompt
from PIL import ImageDraw
import numpy as np
# Load the pre-trained model
model = YOLO('./weights/FastSAM.pt')
device = 'cuda' if torch.cuda.is_available() else 'cpu'
# Description
title = "<center><strong><font size='8'>🏃 Fast Segment Anything 🤗</font></strong></center>"
news = """ # 📖 News
🔥 2023/06/24: Add the 'Advanced options" in Everything mode to get a more detailed adjustment.
🔥 2023/06/26: Support the points mode. (Better and faster interaction will come soon!)
"""
description_e = """This is a demo on Github project 🏃 [Fast Segment Anything Model](https://github.com/CASIA-IVA-Lab/FastSAM).
🎯 Upload an Image, segment it with Fast Segment Anything (Everything mode). The other modes will come soon.
⌛️ It takes about 6~ seconds to generate segment results. The concurrency_count of queue is 1, please wait for a moment when it is crowded.
🚀 To get faster results, you can use a smaller input size and leave high_visual_quality unchecked.
📣 You can also obtain the segmentation results of any Image through this Colab: [](https://colab.research.google.com/drive/1oX14f6IneGGw612WgVlAiy91UHwFAvr9?usp=sharing)
😚 A huge thanks goes out to the @HuggingFace Team for supporting us with GPU grant.
🏠 Check out our [Model Card 🏃](https://huggingface.co/An-619/FastSAM)
"""
description_p = """This is a demo on Github project 🏃 [Fast Segment Anything Model](https://github.com/CASIA-IVA-Lab/FastSAM).
🎯 Upload an Image, add points and segment it with Fast Segment Anything (Points mode).
⌛️ It takes about 6~ seconds to generate segment results. The concurrency_count of queue is 1, please wait for a moment when it is crowded.
🚀 To get faster results, you can use a smaller input size and leave high_visual_quality unchecked.
📣 You can also obtain the segmentation results of any Image through this Colab: [](https://colab.research.google.com/drive/1oX14f6IneGGw612WgVlAiy91UHwFAvr9?usp=sharing)
😚 A huge thanks goes out to the @HuggingFace Team for supporting us with GPU grant.
🏠 Check out our [Model Card 🏃](https://huggingface.co/An-619/FastSAM)
"""
examples = [["examples/sa_8776.jpg"], ["examples/sa_414.jpg"], ["examples/sa_1309.jpg"], ["examples/sa_11025.jpg"],
["examples/sa_561.jpg"], ["examples/sa_192.jpg"], ["examples/sa_10039.jpg"], ["examples/sa_862.jpg"]]
default_example = examples[0]
css = "h1 { text-align: center } .about { text-align: justify; padding-left: 10%; padding-right: 10%; }"
def segment_everything(
input,
input_size=1024,
iou_threshold=0.7,
conf_threshold=0.25,
better_quality=False,
withContours=True,
use_retina=True,
mask_random_color=True,
):
input_size = int(input_size) # 确保 imgsz 是整数
# Thanks for the suggestion by hysts in HuggingFace.
w, h = input.size
scale = input_size / max(w, h)
new_w = int(w * scale)
new_h = int(h * scale)
input = input.resize((new_w, new_h))
results = model(input,
device=device,
retina_masks=True,
iou=iou_threshold,
conf=conf_threshold,
imgsz=input_size,)
fig = fast_process(annotations=results[0].masks.data,
image=input,
device=device,
scale=(1024 // input_size),
better_quality=better_quality,
mask_random_color=mask_random_color,
bbox=None,
use_retina=use_retina,
withContours=withContours,)
return fig
def segment_with_points(
input,
input_size=1024,
iou_threshold=0.7,
conf_threshold=0.25,
better_quality=False,
withContours=True,
mask_random_color=True,
use_retina=True,
):
global global_points
global global_point_label
input_size = int(input_size) # 确保 imgsz 是整数
# Thanks for the suggestion by hysts in HuggingFace.
w, h = input.size
scale = input_size / max(w, h)
new_w = int(w * scale)
new_h = int(h * scale)
input = input.resize((new_w, new_h))
scaled_points = [[int(x * scale) for x in point] for point in global_points]
results = model(input,
device=device,
retina_masks=True,
iou=iou_threshold,
conf=conf_threshold,
imgsz=input_size,)
results = format_results(results[0], 0)
annotations, _ = point_prompt(results, scaled_points, global_point_label, new_h, new_w)
annotations = np.array([annotations])
fig = fast_process(annotations=annotations,
image=input,
device=device,
scale=(1024 // input_size),
better_quality=better_quality,
mask_random_color=mask_random_color,
bbox=None,
use_retina=use_retina,
withContours=withContours,)
global_points = []
global_point_label = []
return fig, None
def get_points_with_draw(image, label, evt: gr.SelectData):
x, y = evt.index[0], evt.index[1]
point_radius, point_color = 15, (255, 255, 0) if label == 'Add Mask' else (255, 0, 255)
global global_points
global global_point_label
print((x, y))
global_points.append([x, y])
global_point_label.append(1 if label == 'Add Mask' else 0)
# 创建一个可以在图像上绘图的对象
draw = ImageDraw.Draw(image)
draw.ellipse([(x - point_radius, y - point_radius), (x + point_radius, y + point_radius)], fill=point_color)
return image
# input_size=1024
# high_quality_visual=True
# inp = 'examples/sa_192.jpg'
# input = Image.open(inp)
# device = 'cuda' if torch.cuda.is_available() else 'cpu'
# input_size = int(input_size) # 确保 imgsz 是整数
# results = model(input, device=device, retina_masks=True, iou=0.7, conf=0.25, imgsz=input_size)
# pil_image = fast_process(annotations=results[0].masks.data,
# image=input, high_quality=high_quality_visual, device=device)
cond_img_e = gr.Image(label="Input", value=default_example[0], type='pil')
cond_img_p = gr.Image(label="Input with points", value=default_example[0], type='pil')
segm_img_e = gr.Image(label="Segmented Image", interactive=False, type='pil')
segm_img_p = gr.Image(label="Segmented Image with points", interactive=False, type='pil')
global_points = []
global_point_label = [] # TODO:Clear points each image
input_size_slider = gr.components.Slider(minimum=512,
maximum=1024,
value=1024,
step=64,
label='Input_size',
info='Our model was trained on a size of 1024')
with gr.Blocks(css=css, title='Fast Segment Anything') as demo:
with gr.Row():
with gr.Column(scale=1):
# Title
gr.Markdown(title)
with gr.Column(scale=1):
# News
gr.Markdown(news)
with gr.Tab("Everything mode"):
# Images
with gr.Row(variant="panel"):
with gr.Column(scale=1):
cond_img_e.render()
with gr.Column(scale=1):
segm_img_e.render()
# Submit & Clear
with gr.Row():
with gr.Column():
input_size_slider.render()
with gr.Row():
contour_check = gr.Checkbox(value=True, label='withContours', info='draw the edges of the masks')
with gr.Column():
segment_btn_e = gr.Button("Segment Everything", variant='primary')
clear_btn_e = gr.Button("Clear", variant="secondary")
gr.Markdown("Try some of the examples below ⬇️")
gr.Examples(examples=examples,
inputs=[cond_img_e],
outputs=segm_img_e,
fn=segment_everything,
cache_examples=True,
examples_per_page=4)
with gr.Column():
with gr.Accordion("Advanced options", open=False):
iou_threshold = gr.Slider(0.1, 0.9, 0.7, step=0.1, label='iou', info='iou threshold for filtering the annotations')
conf_threshold = gr.Slider(0.1, 0.9, 0.25, step=0.05, label='conf', info='object confidence threshold')
with gr.Row():
mor_check = gr.Checkbox(value=False, label='better_visual_quality', info='better quality using morphologyEx')
with gr.Column():
retina_check = gr.Checkbox(value=True, label='use_retina', info='draw high-resolution segmentation masks')
# Description
gr.Markdown(description_e)
with gr.Tab("Points mode"):
# Images
with gr.Row(variant="panel"):
with gr.Column(scale=1):
cond_img_p.render()
with gr.Column(scale=1):
segm_img_p.render()
# Submit & Clear
with gr.Row():
with gr.Column():
with gr.Row():
add_or_remove = gr.Radio(["Add Mask", "Remove Area"], value="Add Mask", label="Point_label (foreground/background)")
with gr.Column():
segment_btn_p = gr.Button("Segment with points prompt", variant='primary')
clear_btn_p = gr.Button("Clear points", variant='secondary')
gr.Markdown("Try some of the examples below ⬇️")
gr.Examples(examples=examples,
inputs=[cond_img_p],
outputs=segm_img_p,
fn=segment_with_points,
# cache_examples=True,
examples_per_page=4)
with gr.Column():
# Description
gr.Markdown(description_p)
cond_img_p.select(get_points_with_draw, [cond_img_p, add_or_remove], cond_img_p)
segment_btn_e.click(segment_everything,
inputs=[cond_img_e, input_size_slider, iou_threshold, conf_threshold, mor_check, contour_check, retina_check],
outputs=segm_img_e)
segment_btn_p.click(segment_with_points,
inputs=[cond_img_p],
outputs=[segm_img_p, cond_img_p])
def clear():
return None, None
clear_btn_e.click(clear, outputs=[cond_img_e, segm_img_e])
clear_btn_p.click(clear, outputs=[cond_img_p, segm_img_p])
demo.queue()
demo.launch()
| VisualNexus-master | VisualNexus/models/FastSAM/app_gradio.py |
# Prediction interface for Cog ⚙️
# https://github.com/replicate/cog/blob/main/docs/python.md
# Thanks for chenxwh.
import argparse
import cv2
import shutil
import ast
from cog import BasePredictor, Input, Path
from ultralytics import YOLO
from utils.tools import *
class Predictor(BasePredictor):
def setup(self):
"""Load the model into memory to make running multiple predictions efficient"""
self.models = {k: YOLO(f"{k}.pt") for k in ["FastSAM-s", "FastSAM-x"]}
def predict(
self,
input_image: Path = Input(description="Input image"),
model_name: str = Input(
description="choose a model",
choices=["FastSAM-x", "FastSAM-s"],
default="FastSAM-x",
),
iou: float = Input(
description="iou threshold for filtering the annotations", default=0.9
),
text_prompt: str = Input(
description='use text prompt eg: "a dog"', default=None
),
conf: float = Input(description="object confidence threshold", default=0.4),
retina: bool = Input(
description="draw high-resolution segmentation masks", default=True
),
box_prompt: str = Input(default="[0,0,0,0]", description="[x,y,w,h]"),
point_prompt: str = Input(default="[[0,0]]", description="[[x1,y1],[x2,y2]]"),
point_label: str = Input(default="[0]", description="[1,0] 0:background, 1:foreground"),
withContours: bool = Input(
description="draw the edges of the masks", default=False
),
better_quality: bool = Input(
description="better quality using morphologyEx", default=False
),
) -> Path:
"""Run a single prediction on the model"""
# default params
out_path = "output"
if os.path.exists(out_path):
shutil.rmtree(out_path)
os.makedirs(out_path, exist_ok=True)
args = argparse.Namespace(
better_quality=better_quality,
box_prompt=box_prompt,
conf=conf,
device=torch.device("cuda"),
img_path=str(input_image),
imgsz=1024,
iou=iou,
model_path="FastSAM-x.pt",
output=out_path,
point_label=point_label,
point_prompt=point_prompt,
randomcolor=True,
retina=retina,
text_prompt=text_prompt,
withContours=withContours,
)
args.point_prompt = ast.literal_eval(args.point_prompt)
args.box_prompt = ast.literal_eval(args.box_prompt)
args.point_label = ast.literal_eval(args.point_label)
model = self.models[model_name]
results = model(
str(input_image),
imgsz=args.imgsz,
device=args.device,
retina_masks=args.retina,
iou=args.iou,
conf=args.conf,
max_det=100,
)
if args.box_prompt[2] != 0 and args.box_prompt[3] != 0:
annotations = prompt(results, args, box=True)
annotations = np.array([annotations])
fast_process(
annotations=annotations,
args=args,
mask_random_color=args.randomcolor,
bbox=convert_box_xywh_to_xyxy(args.box_prompt),
)
elif args.text_prompt != None:
results = format_results(results[0], 0)
annotations = prompt(results, args, text=True)
annotations = np.array([annotations])
fast_process(
annotations=annotations, args=args, mask_random_color=args.randomcolor
)
elif args.point_prompt[0] != [0, 0]:
results = format_results(results[0], 0)
annotations = prompt(results, args, point=True)
# list to numpy
annotations = np.array([annotations])
fast_process(
annotations=annotations,
args=args,
mask_random_color=args.randomcolor,
points=args.point_prompt,
)
else:
fast_process(
annotations=results[0].masks.data,
args=args,
mask_random_color=args.randomcolor,
)
out = "/tmp.out.png"
shutil.copy(os.path.join(out_path, os.listdir(out_path)[0]), out)
return Path(out)
def prompt(results, args, box=None, point=None, text=None):
ori_img = cv2.imread(args.img_path)
ori_h = ori_img.shape[0]
ori_w = ori_img.shape[1]
if box:
mask, idx = box_prompt(
results[0].masks.data,
convert_box_xywh_to_xyxy(args.box_prompt),
ori_h,
ori_w,
)
elif point:
mask, idx = point_prompt(
results, args.point_prompt, args.point_label, ori_h, ori_w
)
elif text:
mask, idx = text_prompt(results, args.text_prompt, args.img_path, args.device)
else:
return None
return mask
| VisualNexus-master | VisualNexus/models/FastSAM/predict.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.