python_code
stringlengths 0
992k
| repo_name
stringlengths 8
46
| file_path
stringlengths 5
162
|
---|---|---|
from .tin_lr_hook import TINLrUpdaterHook
__all__ = ['TINLrUpdaterHook']
| InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/core/lr/__init__.py |
from mmcv.runner import HOOKS, LrUpdaterHook
from mmcv.runner.hooks.lr_updater import annealing_cos
@HOOKS.register_module()
class TINLrUpdaterHook(LrUpdaterHook):
def __init__(self, min_lr, **kwargs):
self.min_lr = min_lr
super(TINLrUpdaterHook, self).__init__(**kwargs)
def get_warmup_lr(self, cur_iters):
if self.warmup == 'linear':
# 'linear' warmup is rewritten according to TIN repo:
# https://github.com/deepcs233/TIN/blob/master/main.py#L409-L412
k = (cur_iters / self.warmup_iters) * (
1 - self.warmup_ratio) + self.warmup_ratio
warmup_lr = [_lr * k for _lr in self.regular_lr]
elif self.warmup == 'constant':
warmup_lr = [_lr * self.warmup_ratio for _lr in self.regular_lr]
elif self.warmup == 'exp':
k = self.warmup_ratio**(1 - cur_iters / self.warmup_iters)
warmup_lr = [_lr * k for _lr in self.regular_lr]
return warmup_lr
def get_lr(self, runner, base_lr):
if self.by_epoch:
progress = runner.epoch
max_progress = runner.max_epochs
else:
progress = runner.iter
max_progress = runner.max_iters
target_lr = self.min_lr
if self.warmup is not None:
progress = progress - self.warmup_iters
max_progress = max_progress - self.warmup_iters
factor = progress / max_progress
return annealing_cos(base_lr, target_lr, factor)
| InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/core/lr/tin_lr_hook.py |
import os.path as osp
import torch
from .base import BaseDataset
from .registry import DATASETS
@DATASETS.register_module()
class AudioDataset(BaseDataset):
"""Audio dataset for video recognition. Extracts the audio feature on-the-
fly. Annotation file can be that of the rawframe dataset, or:
.. code-block:: txt
some/directory-1.wav 163 1
some/directory-2.wav 122 1
some/directory-3.wav 258 2
some/directory-4.wav 234 2
some/directory-5.wav 295 3
some/directory-6.wav 121 3
Args:
ann_file (str): Path to the annotation file.
pipeline (list[dict | callable]): A sequence of data transforms.
suffix (str): The suffix of the audio file. Default: '.wav'.
kwargs (dict): Other keyword args for `BaseDataset`.
"""
def __init__(self, ann_file, pipeline, suffix='.wav', **kwargs):
self.suffix = suffix
super().__init__(ann_file, pipeline, modality='Audio', **kwargs)
def load_annotations(self):
"""Load annotation file to get video information."""
if self.ann_file.endswith('.json'):
return self.load_json_annotations()
video_infos = []
with open(self.ann_file, 'r') as fin:
for line in fin:
line_split = line.strip().split()
video_info = {}
idx = 0
filename = line_split[idx]
if self.data_prefix is not None:
if not filename.endswith(self.suffix):
filename = osp.join(self.data_prefix,
filename + self.suffix)
else:
filename = osp.join(self.data_prefix, filename)
video_info['audio_path'] = filename
idx += 1
# idx for total_frames
video_info['total_frames'] = int(line_split[idx])
idx += 1
# idx for label[s]
label = [int(x) for x in line_split[idx:]]
assert label, f'missing label in line: {line}'
if self.multi_class:
assert self.num_classes is not None
onehot = torch.zeros(self.num_classes)
onehot[label] = 1.0
video_info['label'] = onehot
else:
assert len(label) == 1
video_info['label'] = label[0]
video_infos.append(video_info)
return video_infos
| InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/datasets/audio_dataset.py |
import copy
import os
import os.path as osp
import warnings
import mmcv
import numpy as np
from ..core import average_recall_at_avg_proposals
from .base import BaseDataset
from .registry import DATASETS
@DATASETS.register_module()
class ActivityNetDataset(BaseDataset):
"""ActivityNet dataset for temporal action localization.
The dataset loads raw features and apply specified transforms to return a
dict containing the frame tensors and other information.
The ann_file is a json file with multiple objects, and each object has a
key of the name of a video, and value of total frames of the video, total
seconds of the video, annotations of a video, feature frames (frames
covered by features) of the video, fps and rfps. Example of a
annotation file:
.. code-block:: JSON
{
"v_--1DO2V4K74": {
"duration_second": 211.53,
"duration_frame": 6337,
"annotations": [
{
"segment": [
30.025882995319815,
205.2318595943838
],
"label": "Rock climbing"
}
],
"feature_frame": 6336,
"fps": 30.0,
"rfps": 29.9579255898
},
"v_--6bJUbfpnQ": {
"duration_second": 26.75,
"duration_frame": 647,
"annotations": [
{
"segment": [
2.578755070202808,
24.914101404056165
],
"label": "Drinking beer"
}
],
"feature_frame": 624,
"fps": 24.0,
"rfps": 24.1869158879
},
...
}
Args:
ann_file (str): Path to the annotation file.
pipeline (list[dict | callable]): A sequence of data transforms.
data_prefix (str | None): Path to a directory where videos are held.
Default: None.
test_mode (bool): Store True when building test or validation dataset.
Default: False.
"""
def __init__(self, ann_file, pipeline, data_prefix=None, test_mode=False):
super().__init__(ann_file, pipeline, data_prefix, test_mode)
def load_annotations(self):
"""Load the annotation according to ann_file into video_infos."""
video_infos = []
anno_database = mmcv.load(self.ann_file)
for video_name in anno_database:
video_info = anno_database[video_name]
video_info['video_name'] = video_name
video_infos.append(video_info)
return video_infos
def prepare_test_frames(self, idx):
"""Prepare the frames for testing given the index."""
results = copy.deepcopy(self.video_infos[idx])
results['data_prefix'] = self.data_prefix
return self.pipeline(results)
def prepare_train_frames(self, idx):
"""Prepare the frames for training given the index."""
results = copy.deepcopy(self.video_infos[idx])
results['data_prefix'] = self.data_prefix
return self.pipeline(results)
def __len__(self):
"""Get the size of the dataset."""
return len(self.video_infos)
def _import_ground_truth(self):
"""Read ground truth data from video_infos."""
ground_truth = {}
for video_info in self.video_infos:
video_id = video_info['video_name'][2:]
this_video_ground_truths = []
for ann in video_info['annotations']:
t_start, t_end = ann['segment']
label = ann['label']
this_video_ground_truths.append([t_start, t_end, label])
ground_truth[video_id] = np.array(this_video_ground_truths)
return ground_truth
@staticmethod
def proposals2json(results, show_progress=False):
"""Convert all proposals to a final dict(json) format.
Args:
results (list[dict]): All proposals.
show_progress (bool): Whether to show the progress bar.
Defaults: False.
Returns:
dict: The final result dict. E.g.
.. code-block:: Python
dict(video-1=[dict(segment=[1.1,2.0]. score=0.9),
dict(segment=[50.1, 129.3], score=0.6)])
"""
result_dict = {}
print('Convert proposals to json format')
if show_progress:
prog_bar = mmcv.ProgressBar(len(results))
for result in results:
video_name = result['video_name']
result_dict[video_name[2:]] = result['proposal_list']
if show_progress:
prog_bar.update()
return result_dict
@staticmethod
def _import_proposals(results):
"""Read predictions from results."""
proposals = {}
num_proposals = 0
for result in results:
video_id = result['video_name'][2:]
this_video_proposals = []
for proposal in result['proposal_list']:
t_start, t_end = proposal['segment']
score = proposal['score']
this_video_proposals.append([t_start, t_end, score])
num_proposals += 1
proposals[video_id] = np.array(this_video_proposals)
return proposals, num_proposals
def dump_results(self, results, out, output_format, version='VERSION 1.3'):
"""Dump data to json/csv files."""
if output_format == 'json':
result_dict = self.proposals2json(results)
output_dict = {
'version': version,
'results': result_dict,
'external_data': {}
}
mmcv.dump(output_dict, out)
elif output_format == 'csv':
# TODO: add csv handler to mmcv and use mmcv.dump
os.makedirs(out, exist_ok=True)
header = 'action,start,end,tmin,tmax'
for result in results:
video_name, outputs = result
output_path = osp.join(out, video_name + '.csv')
np.savetxt(
output_path,
outputs,
header=header,
delimiter=',',
comments='')
else:
raise ValueError(
f'The output format {output_format} is not supported.')
def evaluate(
self,
results,
metrics='AR@AN',
metric_options={
'AR@AN':
dict(
max_avg_proposals=100,
temporal_iou_thresholds=np.linspace(0.5, 0.95, 10))
},
logger=None,
**deprecated_kwargs):
"""Evaluation in feature dataset.
Args:
results (list[dict]): Output results.
metrics (str | sequence[str]): Metrics to be performed.
Defaults: 'AR@AN'.
metric_options (dict): Dict for metric options. Options are
``max_avg_proposals``, ``temporal_iou_thresholds`` for
``AR@AN``.
default: ``{'AR@AN': dict(max_avg_proposals=100,
temporal_iou_thresholds=np.linspace(0.5, 0.95, 10))}``.
logger (logging.Logger | None): Training logger. Defaults: None.
deprecated_kwargs (dict): Used for containing deprecated arguments.
See 'https://github.com/open-mmlab/mmaction2/pull/286'.
Returns:
dict: Evaluation results for evaluation metrics.
"""
# Protect ``metric_options`` since it uses mutable value as default
metric_options = copy.deepcopy(metric_options)
if deprecated_kwargs != {}:
warnings.warn(
'Option arguments for metrics has been changed to '
"`metric_options`, See 'https://github.com/open-mmlab/mmaction2/pull/286' " # noqa: E501
'for more details')
metric_options['AR@AN'] = dict(metric_options['AR@AN'],
**deprecated_kwargs)
if not isinstance(results, list):
raise TypeError(f'results must be a list, but got {type(results)}')
assert len(results) == len(self), (
f'The length of results is not equal to the dataset len: '
f'{len(results)} != {len(self)}')
metrics = metrics if isinstance(metrics, (list, tuple)) else [metrics]
allowed_metrics = ['AR@AN']
for metric in metrics:
if metric not in allowed_metrics:
raise KeyError(f'metric {metric} is not supported')
eval_results = {}
ground_truth = self._import_ground_truth()
proposal, num_proposals = self._import_proposals(results)
for metric in metrics:
if metric == 'AR@AN':
temporal_iou_thresholds = metric_options.setdefault(
'AR@AN', {}).setdefault('temporal_iou_thresholds',
np.linspace(0.5, 0.95, 10))
max_avg_proposals = metric_options.setdefault(
'AR@AN', {}).setdefault('max_avg_proposals', 100)
if isinstance(temporal_iou_thresholds, list):
temporal_iou_thresholds = np.array(temporal_iou_thresholds)
recall, _, _, auc = (
average_recall_at_avg_proposals(
ground_truth,
proposal,
num_proposals,
max_avg_proposals=max_avg_proposals,
temporal_iou_thresholds=temporal_iou_thresholds))
eval_results['auc'] = auc
eval_results['AR@1'] = np.mean(recall[:, 0])
eval_results['AR@5'] = np.mean(recall[:, 4])
eval_results['AR@10'] = np.mean(recall[:, 9])
eval_results['AR@100'] = np.mean(recall[:, 99])
return eval_results
| InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/datasets/activitynet_dataset.py |
import copy
import os.path as osp
import numpy as np
import torch
from .base import BaseDataset
from .registry import DATASETS
@DATASETS.register_module()
class RawframeDataset(BaseDataset):
"""Rawframe dataset for action recognition.
The dataset loads raw frames and apply specified transforms to return a
dict containing the frame tensors and other information.
The ann_file is a text file with multiple lines, and each line indicates
the directory to frames of a video, total frames of the video and
the label of a video, which are split with a whitespace.
Example of a annotation file:
.. code-block:: txt
some/directory-1 163 1
some/directory-2 122 1
some/directory-3 258 2
some/directory-4 234 2
some/directory-5 295 3
some/directory-6 121 3
Example of a multi-class annotation file:
.. code-block:: txt
some/directory-1 163 1 3 5
some/directory-2 122 1 2
some/directory-3 258 2
some/directory-4 234 2 4 6 8
some/directory-5 295 3
some/directory-6 121 3
Example of a with_offset annotation file (clips from long videos), each
line indicates the directory to frames of a video, the index of the start
frame, total frames of the video clip and the label of a video clip, which
are split with a whitespace.
.. code-block:: txt
some/directory-1 12 163 3
some/directory-2 213 122 4
some/directory-3 100 258 5
some/directory-4 98 234 2
some/directory-5 0 295 3
some/directory-6 50 121 3
Args:
ann_file (str): Path to the annotation file.
pipeline (list[dict | callable]): A sequence of data transforms.
data_prefix (str | None): Path to a directory where videos are held.
Default: None.
test_mode (bool): Store True when building test or validation dataset.
Default: False.
filename_tmpl (str): Template for each filename.
Default: 'img_{:05}.jpg'.
with_offset (bool): Determines whether the offset information is in
ann_file. Default: False.
multi_class (bool): Determines whether it is a multi-class
recognition dataset. Default: False.
num_classes (int | None): Number of classes in the dataset.
Default: None.
modality (str): Modality of data. Support 'RGB', 'Flow'.
Default: 'RGB'.
sample_by_class (bool): Sampling by class, should be set `True` when
performing inter-class data balancing. Only compatible with
`multi_class == False`. Only applies for training. Default: False.
power (float | None): We support sampling data with the probability
proportional to the power of its label frequency (freq ^ power)
when sampling data. `power == 1` indicates uniformly sampling all
data; `power == 0` indicates uniformly sampling all classes.
Default: None.
"""
def __init__(self,
ann_file,
pipeline,
data_prefix=None,
test_mode=False,
filename_tmpl='img_{:05}.jpg',
with_offset=False,
multi_class=False,
num_classes=None,
start_index=1,
modality='RGB',
sample_by_class=False,
power=None):
self.filename_tmpl = filename_tmpl
self.with_offset = with_offset
super().__init__(
ann_file,
pipeline,
data_prefix,
test_mode,
multi_class,
num_classes,
start_index,
modality,
sample_by_class=sample_by_class,
power=power)
def load_annotations(self):
"""Load annotation file to get video information."""
if self.ann_file.endswith('.json'):
return self.load_json_annotations()
video_infos = []
with open(self.ann_file, 'r') as fin:
for line in fin:
line_split = line.strip().split()
video_info = {}
idx = 0
# idx for frame_dir
frame_dir = line_split[idx]
if self.data_prefix is not None:
frame_dir = osp.join(self.data_prefix, frame_dir)
video_info['frame_dir'] = frame_dir
idx += 1
if self.with_offset:
# idx for offset and total_frames
video_info['offset'] = int(line_split[idx])
video_info['total_frames'] = int(line_split[idx + 1])
idx += 2
else:
# idx for total_frames
video_info['total_frames'] = int(line_split[idx])
idx += 1
# idx for label[s]
label = [int(x) for x in line_split[idx:]]
assert label, f'missing label in line: {line}'
if self.multi_class:
assert self.num_classes is not None
video_info['label'] = label
else:
assert len(label) == 1
video_info['label'] = label[0]
video_infos.append(video_info)
return video_infos
def prepare_train_frames(self, idx):
"""Prepare the frames for training given the index."""
if self.sample_by_class:
# Then, the idx is the class index
samples = self.video_infos_by_class[idx]
results = copy.deepcopy(np.random.choice(samples))
else:
results = copy.deepcopy(self.video_infos[idx])
results['filename_tmpl'] = self.filename_tmpl
results['modality'] = self.modality
results['start_index'] = self.start_index
# prepare tensor in getitem
if self.multi_class:
onehot = torch.zeros(self.num_classes)
onehot[results['label']] = 1.
results['label'] = onehot
return self.pipeline(results)
def prepare_test_frames(self, idx):
"""Prepare the frames for testing given the index."""
if self.sample_by_class:
# Then, the idx is the class index
samples = self.video_infos_by_class[idx]
results = copy.deepcopy(np.random.choice(samples))
else:
results = copy.deepcopy(self.video_infos[idx])
results['filename_tmpl'] = self.filename_tmpl
results['modality'] = self.modality
results['start_index'] = self.start_index
# prepare tensor in getitem
if self.multi_class:
onehot = torch.zeros(self.num_classes)
onehot[results['label']] = 1.
results['label'] = onehot
return self.pipeline(results)
| InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/datasets/rawframe_dataset.py |
import copy
import os.path as osp
import mmcv
import numpy as np
from mmcv.utils import print_log
from ..core import mean_average_precision
from .base import BaseDataset
from .registry import DATASETS
@DATASETS.register_module()
class HVUDataset(BaseDataset):
"""HVU dataset, which supports the recognition tags of multiple categories.
Accept both video annotation files or rawframe annotation files.
The dataset loads videos or raw frames and applies specified transforms to
return a dict containing the frame tensors and other information.
The ann_file is a json file with multiple dictionaries, and each dictionary
indicates a sample video with the filename and tags, the tags are organized
as different categories. Example of a video dictionary:
.. code-block:: txt
{
'filename': 'gD_G1b0wV5I_001015_001035.mp4',
'label': {
'concept': [250, 131, 42, 51, 57, 155, 122],
'object': [1570, 508],
'event': [16],
'action': [180],
'scene': [206]
}
}
Example of a rawframe dictionary:
.. code-block:: txt
{
'frame_dir': 'gD_G1b0wV5I_001015_001035',
'total_frames': 61
'label': {
'concept': [250, 131, 42, 51, 57, 155, 122],
'object': [1570, 508],
'event': [16],
'action': [180],
'scene': [206]
}
}
Args:
ann_file (str): Path to the annotation file, should be a json file.
pipeline (list[dict | callable]): A sequence of data transforms.
tag_categories (list[str]): List of category names of tags.
tag_category_nums (list[int]): List of number of tags in each category.
filename_tmpl (str | None): Template for each filename. If set to None,
video dataset is used. Default: None.
**kwargs: Keyword arguments for ``BaseDataset``.
"""
def __init__(self,
ann_file,
pipeline,
tag_categories,
tag_category_nums,
filename_tmpl=None,
**kwargs):
assert len(tag_categories) == len(tag_category_nums)
self.tag_categories = tag_categories
self.tag_category_nums = tag_category_nums
self.filename_tmpl = filename_tmpl
self.num_categories = len(self.tag_categories)
self.num_tags = sum(self.tag_category_nums)
self.category2num = dict(zip(tag_categories, tag_category_nums))
self.start_idx = [0]
for i in range(self.num_categories - 1):
self.start_idx.append(self.start_idx[-1] +
self.tag_category_nums[i])
self.category2startidx = dict(zip(tag_categories, self.start_idx))
self.start_index = kwargs.pop('start_index', 0)
self.dataset_type = None
super().__init__(
ann_file, pipeline, start_index=self.start_index, **kwargs)
def load_annotations(self):
"""Load annotation file to get video information."""
assert self.ann_file.endswith('.json')
return self.load_json_annotations()
def load_json_annotations(self):
video_infos = mmcv.load(self.ann_file)
num_videos = len(video_infos)
video_info0 = video_infos[0]
assert ('filename' in video_info0) != ('frame_dir' in video_info0)
path_key = 'filename' if 'filename' in video_info0 else 'frame_dir'
self.dataset_type = 'video' if path_key == 'filename' else 'rawframe'
if self.dataset_type == 'rawframe':
assert self.filename_tmpl is not None
for i in range(num_videos):
path_value = video_infos[i][path_key]
if self.data_prefix is not None:
path_value = osp.join(self.data_prefix, path_value)
video_infos[i][path_key] = path_value
# We will convert label to torch tensors in the pipeline
video_infos[i]['categories'] = self.tag_categories
video_infos[i]['category_nums'] = self.tag_category_nums
if self.dataset_type == 'rawframe':
video_infos[i]['filename_tmpl'] = self.filename_tmpl
video_infos[i]['start_index'] = self.start_index
video_infos[i]['modality'] = self.modality
return video_infos
@staticmethod
def label2array(num, label):
arr = np.zeros(num, dtype=np.float32)
arr[label] = 1.
return arr
def evaluate(self,
results,
metrics='mean_average_precision',
metric_options=None,
logger=None):
"""Evaluation in HVU Video Dataset. We only support evaluating mAP for
each tag categories. Since some tag categories are missing for some
videos, we can not evaluate mAP for all tags.
Args:
results (list): Output results.
metrics (str | sequence[str]): Metrics to be performed.
Defaults: 'mean_average_precision'.
metric_options (dict | None): Dict for metric options.
Default: None.
logger (logging.Logger | None): Logger for recording.
Default: None.
Returns:
dict: Evaluation results dict.
"""
# Protect ``metric_options`` since it uses mutable value as default
metric_options = copy.deepcopy(metric_options)
if not isinstance(results, list):
raise TypeError(f'results must be a list, but got {type(results)}')
assert len(results) == len(self), (
f'The length of results is not equal to the dataset len: '
f'{len(results)} != {len(self)}')
metrics = metrics if isinstance(metrics, (list, tuple)) else [metrics]
# There should be only one metric in the metrics list:
# 'mean_average_precision'
assert len(metrics) == 1
metric = metrics[0]
assert metric == 'mean_average_precision'
gt_labels = [ann['label'] for ann in self.video_infos]
eval_results = {}
for category in self.tag_categories:
start_idx = self.category2startidx[category]
num = self.category2num[category]
preds = [
result[start_idx:start_idx + num]
for video_idx, result in enumerate(results)
if category in gt_labels[video_idx]
]
gts = [
gt_label[category] for gt_label in gt_labels
if category in gt_label
]
gts = [self.label2array(num, item) for item in gts]
mAP = mean_average_precision(preds, gts)
eval_results[f'{category}_mAP'] = mAP
log_msg = f'\n{category}_mAP\t{mAP:.4f}'
print_log(log_msg, logger=logger)
return eval_results
| InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/datasets/hvu_dataset.py |
from .registry import DATASETS
from .video_dataset import VideoDataset
@DATASETS.register_module()
class ImageDataset(VideoDataset):
"""Image dataset for action recognition, used in the Project OmniSource.
The dataset loads image list and apply specified transforms to return a
dict containing the image tensors and other information. For the
ImageDataset
The ann_file is a text file with multiple lines, and each line indicates
the image path and the image label, which are split with a whitespace.
Example of a annotation file:
.. code-block:: txt
path/to/image1.jpg 1
path/to/image2.jpg 1
path/to/image3.jpg 2
path/to/image4.jpg 2
path/to/image5.jpg 3
path/to/image6.jpg 3
Example of a multi-class annotation file:
.. code-block:: txt
path/to/image1.jpg 1 3 5
path/to/image2.jpg 1 2
path/to/image3.jpg 2
path/to/image4.jpg 2 4 6 8
path/to/image5.jpg 3
path/to/image6.jpg 3
Args:
ann_file (str): Path to the annotation file.
pipeline (list[dict | callable]): A sequence of data transforms.
**kwargs: Keyword arguments for ``BaseDataset``.
"""
def __init__(self, ann_file, pipeline, **kwargs):
super().__init__(ann_file, pipeline, start_index=None, **kwargs)
# use `start_index=None` to indicate it is for `ImageDataset`
| InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/datasets/image_dataset.py |
from mmcv.utils import Registry
DATASETS = Registry('dataset')
PIPELINES = Registry('pipeline')
| InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/datasets/registry.py |
import os.path as osp
import torch
from .base import BaseDataset
from .registry import DATASETS
@DATASETS.register_module()
class AudioFeatureDataset(BaseDataset):
"""Audio feature dataset for video recognition. Reads the features
extracted off-line. Annotation file can be that of the rawframe dataset,
or:
.. code-block:: txt
some/directory-1.npy 163 1
some/directory-2.npy 122 1
some/directory-3.npy 258 2
some/directory-4.npy 234 2
some/directory-5.npy 295 3
some/directory-6.npy 121 3
Args:
ann_file (str): Path to the annotation file.
pipeline (list[dict | callable]): A sequence of data transforms.
suffix (str): The suffix of the audio feature file. Default: '.npy'.
kwargs (dict): Other keyword args for `BaseDataset`.
"""
def __init__(self, ann_file, pipeline, suffix='.npy', **kwargs):
self.suffix = suffix
super().__init__(ann_file, pipeline, modality='Audio', **kwargs)
def load_annotations(self):
"""Load annotation file to get video information."""
if self.ann_file.endswith('.json'):
return self.load_json_annotations()
video_infos = []
with open(self.ann_file, 'r') as fin:
for line in fin:
line_split = line.strip().split()
video_info = {}
idx = 0
filename = line_split[idx]
if self.data_prefix is not None:
if not filename.endswith(self.suffix):
filename = osp.join(self.data_prefix,
filename) + self.suffix
else:
filename = osp.join(self.data_prefix, filename)
video_info['audio_path'] = filename
idx += 1
# idx for total_frames
video_info['total_frames'] = int(line_split[idx])
idx += 1
# idx for label[s]
label = [int(x) for x in line_split[idx:]]
assert label, f'missing label in line: {line}'
if self.multi_class:
assert self.num_classes is not None
onehot = torch.zeros(self.num_classes)
onehot[label] = 1.0
video_info['label'] = onehot
else:
assert len(label) == 1
video_info['label'] = label[0]
video_infos.append(video_info)
return video_infos
| InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/datasets/audio_feature_dataset.py |
from .activitynet_dataset import ActivityNetDataset
from .audio_dataset import AudioDataset
from .audio_feature_dataset import AudioFeatureDataset
from .audio_visual_dataset import AudioVisualDataset
from .ava_dataset import AVADataset
from .base import BaseDataset
from .builder import build_dataloader, build_dataset
from .dataset_wrappers import RepeatDataset
from .hvu_dataset import HVUDataset
from .image_dataset import ImageDataset
from .rawframe_dataset import RawframeDataset
from .rawvideo_dataset import RawVideoDataset
from .ssn_dataset import SSNDataset
from .video_dataset import VideoDataset
__all__ = [
'VideoDataset', 'build_dataloader', 'build_dataset', 'RepeatDataset',
'RawframeDataset', 'BaseDataset', 'ActivityNetDataset', 'SSNDataset',
'HVUDataset', 'AudioDataset', 'AudioFeatureDataset', 'ImageDataset',
'RawVideoDataset', 'AVADataset', 'AudioVisualDataset'
]
| InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/datasets/__init__.py |
import platform
import random
from functools import partial
import numpy as np
from mmcv.parallel import collate
from mmcv.runner import get_dist_info
from mmcv.utils import build_from_cfg
from torch.utils.data import DataLoader
from .dataset_wrappers import RepeatDataset
from .registry import DATASETS
from .samplers import DistributedPowerSampler, DistributedSampler
if platform.system() != 'Windows':
# https://github.com/pytorch/pytorch/issues/973
import resource
rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)
hard_limit = rlimit[1]
soft_limit = min(4096, hard_limit)
resource.setrlimit(resource.RLIMIT_NOFILE, (soft_limit, hard_limit))
def build_dataset(cfg, default_args=None):
"""Build a dataset from config dict.
Args:
cfg (dict): Config dict. It should at least contain the key "type".
default_args (dict | None, optional): Default initialization arguments.
Default: None.
Returns:
Dataset: The constructed dataset.
"""
if cfg['type'] == 'RepeatDataset':
dataset = RepeatDataset(
build_dataset(cfg['dataset'], default_args), cfg['times'])
else:
dataset = build_from_cfg(cfg, DATASETS, default_args)
return dataset
def build_dataloader(dataset,
videos_per_gpu,
workers_per_gpu,
num_gpus=1,
dist=True,
shuffle=True,
seed=None,
drop_last=False,
pin_memory=True,
**kwargs):
"""Build PyTorch DataLoader.
In distributed training, each GPU/process has a dataloader.
In non-distributed training, there is only one dataloader for all GPUs.
Args:
dataset (:obj:`Dataset`): A PyTorch dataset.
videos_per_gpu (int): Number of videos on each GPU, i.e.,
batch size of each GPU.
workers_per_gpu (int): How many subprocesses to use for data
loading for each GPU.
num_gpus (int): Number of GPUs. Only used in non-distributed
training. Default: 1.
dist (bool): Distributed training/test or not. Default: True.
shuffle (bool): Whether to shuffle the data at every epoch.
Default: True.
seed (int | None): Seed to be used. Default: None.
drop_last (bool): Whether to drop the last incomplete batch in epoch.
Default: False
pin_memory (bool): Whether to use pin_memory in DataLoader.
Default: True
kwargs (dict, optional): Any keyword argument to be used to initialize
DataLoader.
Returns:
DataLoader: A PyTorch dataloader.
"""
rank, world_size = get_dist_info()
sample_by_class = getattr(dataset, 'sample_by_class', False)
power = getattr(dataset, 'power', None)
if dist:
if sample_by_class:
assert power is not None
sampler = DistributedPowerSampler(dataset, world_size, rank, power)
else:
sampler = DistributedSampler(
dataset, world_size, rank, shuffle=shuffle)
shuffle = False
batch_size = videos_per_gpu
num_workers = workers_per_gpu
else:
sampler = None
batch_size = num_gpus * videos_per_gpu
num_workers = num_gpus * workers_per_gpu
init_fn = partial(
worker_init_fn, num_workers=num_workers, rank=rank,
seed=seed) if seed is not None else None
data_loader = DataLoader(
dataset,
batch_size=batch_size,
sampler=sampler,
num_workers=num_workers,
collate_fn=partial(collate, samples_per_gpu=videos_per_gpu),
pin_memory=pin_memory,
shuffle=shuffle,
worker_init_fn=init_fn,
drop_last=drop_last,
**kwargs)
return data_loader
def worker_init_fn(worker_id, num_workers, rank, seed):
"""Init the random seed for various workers."""
# The seed of each worker equals to
# num_worker * rank + worker_id + user_seed
worker_seed = num_workers * rank + worker_id + seed
np.random.seed(worker_seed)
random.seed(worker_seed)
| InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/datasets/builder.py |
import os.path as osp
from .rawframe_dataset import RawframeDataset
from .registry import DATASETS
@DATASETS.register_module()
class AudioVisualDataset(RawframeDataset):
"""Dataset that reads both audio and visual data, supporting both rawframes
and videos. The annotation file is same as that of the rawframe dataset,
such as:
.. code-block:: txt
some/directory-1 163 1
some/directory-2 122 1
some/directory-3 258 2
some/directory-4 234 2
some/directory-5 295 3
some/directory-6 121 3
Args:
ann_file (str): Path to the annotation file.
pipeline (list[dict | callable]): A sequence of data transforms.
audio_prefix (str): Directory of the audio files.
kwargs (dict): Other keyword args for `RawframeDataset`. `video_prefix`
is also allowed if pipeline is designed for videos.
"""
def __init__(self, ann_file, pipeline, audio_prefix, **kwargs):
self.audio_prefix = audio_prefix
self.video_prefix = kwargs.pop('video_prefix', None)
self.data_prefix = kwargs.get('data_prefix', None)
super().__init__(ann_file, pipeline, **kwargs)
def load_annotations(self):
video_infos = []
with open(self.ann_file, 'r') as fin:
for line in fin:
line_split = line.strip().split()
video_info = {}
idx = 0
# idx for frame_dir
frame_dir = line_split[idx]
if self.audio_prefix is not None:
audio_path = osp.join(self.audio_prefix,
frame_dir + '.npy')
video_info['audio_path'] = audio_path
if self.video_prefix:
video_path = osp.join(self.video_prefix,
frame_dir + '.mp4')
video_info['filename'] = video_path
if self.data_prefix is not None:
frame_dir = osp.join(self.data_prefix, frame_dir)
video_info['frame_dir'] = frame_dir
idx += 1
if self.with_offset:
# idx for offset and total_frames
video_info['offset'] = int(line_split[idx])
video_info['total_frames'] = int(line_split[idx + 1])
idx += 2
else:
# idx for total_frames
video_info['total_frames'] = int(line_split[idx])
idx += 1
# idx for label[s]
label = [int(x) for x in line_split[idx:]]
assert len(label), f'missing label in line: {line}'
if self.multi_class:
assert self.num_classes is not None
video_info['label'] = label
else:
assert len(label) == 1
video_info['label'] = label[0]
video_infos.append(video_info)
return video_infos
| InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/datasets/audio_visual_dataset.py |
from .registry import DATASETS
@DATASETS.register_module()
class RepeatDataset:
"""A wrapper of repeated dataset.
The length of repeated dataset will be ``times`` larger than the original
dataset. This is useful when the data loading time is long but the dataset
is small. Using RepeatDataset can reduce the data loading time between
epochs.
Args:
dataset (:obj:`Dataset`): The dataset to be repeated.
times (int): Repeat times.
"""
def __init__(self, dataset, times):
self.dataset = dataset
self.times = times
self._ori_len = len(self.dataset)
def __getitem__(self, idx):
"""Get data."""
return self.dataset[idx % self._ori_len]
def __len__(self):
"""Length after repetition."""
return self.times * self._ori_len
| InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/datasets/dataset_wrappers.py |
import os.path as osp
import torch
from .base import BaseDataset
from .registry import DATASETS
@DATASETS.register_module()
class VideoDataset(BaseDataset):
"""Video dataset for action recognition.
The dataset loads raw videos and apply specified transforms to return a
dict containing the frame tensors and other information.
The ann_file is a text file with multiple lines, and each line indicates
a sample video with the filepath and label, which are split with a
whitespace. Example of a annotation file:
.. code-block:: txt
some/path/000.mp4 1
some/path/001.mp4 1
some/path/002.mp4 2
some/path/003.mp4 2
some/path/004.mp4 3
some/path/005.mp4 3
Args:
ann_file (str): Path to the annotation file.
pipeline (list[dict | callable]): A sequence of data transforms.
start_index (int): Specify a start index for frames in consideration of
different filename format. However, when taking videos as input,
it should be set to 0, since frames loaded from videos count
from 0. Default: 0.
**kwargs: Keyword arguments for ``BaseDataset``.
"""
def __init__(self, ann_file, pipeline, start_index=0, **kwargs):
super().__init__(ann_file, pipeline, start_index=start_index, **kwargs)
def load_annotations(self):
"""Load annotation file to get video information."""
if self.ann_file.endswith('.json'):
return self.load_json_annotations()
video_infos = []
with open(self.ann_file, 'r') as fin:
for line in fin:
line_split = line.strip().split()
if self.multi_class:
assert self.num_classes is not None
filename, label = line_split[0], line_split[1:]
label = list(map(int, label))
onehot = torch.zeros(self.num_classes)
onehot[label] = 1.0
else:
filename, label = line_split
label = int(label)
if self.data_prefix is not None:
filename = osp.join(self.data_prefix, filename)
video_infos.append(
dict(
filename=filename,
label=onehot if self.multi_class else label))
return video_infos
| InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/datasets/video_dataset.py |
import copy
import os.path as osp
import warnings
import mmcv
import numpy as np
from torch.nn.modules.utils import _pair
from ..core import softmax
from ..localization import (eval_ap, load_localize_proposal_file,
perform_regression, temporal_iou, temporal_nms)
from ..utils import get_root_logger
from .base import BaseDataset
from .registry import DATASETS
class SSNInstance:
"""Proposal instance of SSN.
Args:
start_frame (int): Index of the proposal's start frame.
end_frame (int): Index of the proposal's end frame.
num_video_frames (int): Total frames of the video.
label (int | None): The category label of the proposal. Default: None.
best_iou (float): The highest IOU with the groundtruth instance.
Default: 0.
overlap_self (float): Percent of the proposal's own span contained
in a groundtruth instance. Default: 0.
"""
def __init__(self,
start_frame,
end_frame,
num_video_frames,
label=None,
best_iou=0,
overlap_self=0):
self.start_frame = start_frame
self.end_frame = min(end_frame, num_video_frames)
self.num_video_frames = num_video_frames
self.label = label if label is not None else -1
self.coverage = (end_frame - start_frame) / num_video_frames
self.best_iou = best_iou
self.overlap_self = overlap_self
self.loc_reg = None
self.size_reg = None
self.regression_targets = [0., 0.]
def compute_regression_targets(self, gt_list):
"""Compute regression targets of positive proposals.
Args:
gt_list (list): The list of groundtruth instances.
"""
# Find the groundtruth instance with the highest IOU.
ious = [
temporal_iou(self.start_frame, self.end_frame, gt.start_frame,
gt.end_frame) for gt in gt_list
]
best_gt = gt_list[np.argmax(ious)]
# interval: [start_frame, end_frame)
proposal_center = (self.start_frame + self.end_frame - 1) / 2
gt_center = (best_gt.start_frame + best_gt.end_frame - 1) / 2
proposal_size = self.end_frame - self.start_frame
gt_size = best_gt.end_frame - best_gt.start_frame
# Get regression targets:
# (1). Localization regression target:
# center shift proportional to the proposal duration
# (2). Duration/Size regression target:
# logarithm of the groundtruth duration over proposal duration
self.loc_reg = (gt_center - proposal_center) / proposal_size
self.size_reg = np.log(gt_size / proposal_size)
self.regression_targets = ([self.loc_reg, self.size_reg]
if self.loc_reg is not None else [0., 0.])
@DATASETS.register_module()
class SSNDataset(BaseDataset):
"""Proposal frame dataset for Structured Segment Networks.
Based on proposal information, the dataset loads raw frames and applies
specified transforms to return a dict containing the frame tensors and
other information.
The ann_file is a text file with multiple lines and each
video's information takes up several lines. This file can be a normalized
file with percent or standard file with specific frame indexes. If the file
is a normalized file, it will be converted into a standard file first.
Template information of a video in a standard file:
.. code-block:: txt
# index
video_id
num_frames
fps
num_gts
label, start_frame, end_frame
label, start_frame, end_frame
...
num_proposals
label, best_iou, overlap_self, start_frame, end_frame
label, best_iou, overlap_self, start_frame, end_frame
...
Example of a standard annotation file:
.. code-block:: txt
# 0
video_validation_0000202
5666
1
3
8 130 185
8 832 1136
8 1303 1381
5
8 0.0620 0.0620 790 5671
8 0.1656 0.1656 790 2619
8 0.0833 0.0833 3945 5671
8 0.0960 0.0960 4173 5671
8 0.0614 0.0614 3327 5671
Args:
ann_file (str): Path to the annotation file.
pipeline (list[dict | callable]): A sequence of data transforms.
train_cfg (dict): Config for training.
test_cfg (dict): Config for testing.
data_prefix (str): Path to a directory where videos are held.
test_mode (bool): Store True when building test or validation dataset.
Default: False.
filename_tmpl (str): Template for each filename.
Default: 'img_{:05}.jpg'.
start_index (int): Specify a start index for frames in consideration of
different filename format. Default: 1.
modality (str): Modality of data. Support 'RGB', 'Flow'.
Default: 'RGB'.
video_centric (bool): Whether to sample proposals just from
this video or sample proposals randomly from the entire dataset.
Default: True.
reg_normalize_constants (list): Regression target normalized constants,
including mean and standard deviation of location and duration.
body_segments (int): Number of segments in course period.
Default: 5.
aug_segments (list[int]): Number of segments in starting and
ending period. Default: (2, 2).
aug_ratio (int | float | tuple[int | float]): The ratio of the length
of augmentation to that of the proposal. Defualt: (0.5, 0.5).
clip_len (int): Frames of each sampled output clip.
Default: 1.
frame_interval (int): Temporal interval of adjacent sampled frames.
Default: 1.
filter_gt (bool): Whether to filter videos with no annotation
during training. Default: True.
use_regression (bool): Whether to perform regression. Default: True.
verbose (bool): Whether to print full information or not.
Default: False.
"""
def __init__(self,
ann_file,
pipeline,
train_cfg,
test_cfg,
data_prefix,
test_mode=False,
filename_tmpl='img_{:05d}.jpg',
start_index=1,
modality='RGB',
video_centric=True,
reg_normalize_constants=None,
body_segments=5,
aug_segments=(2, 2),
aug_ratio=(0.5, 0.5),
clip_len=1,
frame_interval=1,
filter_gt=True,
use_regression=True,
verbose=False):
self.logger = get_root_logger()
super().__init__(
ann_file,
pipeline,
data_prefix=data_prefix,
test_mode=test_mode,
start_index=start_index,
modality=modality)
self.train_cfg = train_cfg
self.test_cfg = test_cfg
self.assigner = train_cfg.ssn.assigner
self.sampler = train_cfg.ssn.sampler
self.evaluater = test_cfg.ssn.evaluater
self.verbose = verbose
self.filename_tmpl = filename_tmpl
if filter_gt or not test_mode:
valid_inds = [
i for i, video_info in enumerate(self.video_infos)
if len(video_info['gts']) > 0
]
self.logger.info(f'{len(valid_inds)} out of {len(self.video_infos)} '
f'videos are valid.')
self.video_infos = [self.video_infos[i] for i in valid_inds]
# construct three pools:
# 1. Positive(Foreground)
# 2. Background
# 3. Incomplete
self.positive_pool = []
self.background_pool = []
self.incomplete_pool = []
self.construct_proposal_pools()
if reg_normalize_constants is None:
self.reg_norm_consts = self._compute_reg_normalize_constants()
else:
self.reg_norm_consts = reg_normalize_constants
self.video_centric = video_centric
self.body_segments = body_segments
self.aug_segments = aug_segments
self.aug_ratio = _pair(aug_ratio)
if not mmcv.is_tuple_of(self.aug_ratio, (int, float)):
raise TypeError(f'aug_ratio should be int, float'
f'or tuple of int and float, '
f'but got {type(aug_ratio)}')
assert len(self.aug_ratio) == 2
total_ratio = (
self.sampler.positive_ratio + self.sampler.background_ratio +
self.sampler.incomplete_ratio)
self.positive_per_video = int(
self.sampler.num_per_video *
(self.sampler.positive_ratio / total_ratio))
self.background_per_video = int(
self.sampler.num_per_video *
(self.sampler.background_ratio / total_ratio))
self.incomplete_per_video = (
self.sampler.num_per_video - self.positive_per_video -
self.background_per_video)
self.test_interval = self.test_cfg.ssn.sampler.test_interval
# number of consecutive frames
self.clip_len = clip_len
# number of steps (sparse sampling for efficiency of io)
self.frame_interval = frame_interval
# test mode or not
self.filter_gt = filter_gt
self.use_regression = use_regression
self.test_mode = test_mode
# yapf: disable
if self.verbose:
self.logger.info(f"""
SSNDataset: proposal file {self.proposal_file} parsed.
There are {len(self.positive_pool) + len(self.background_pool) +
len(self.incomplete_pool)} usable proposals from {len(self.video_infos)} videos.
{len(self.positive_pool)} positive proposals
{len(self.incomplete_pool)} incomplete proposals
{len(self.background_pool)} background proposals
Sample config:
FG/BG/INCOMP: {self.positive_per_video}/{self.background_per_video}/{self.incomplete_per_video} # noqa:E501
Video Centric: {self.video_centric}
Regression Normalization Constants:
Location: mean {self.reg_norm_consts[0][0]:.05f} std {self.reg_norm_consts[1][0]:.05f} # noqa: E501
Duration: mean {self.reg_norm_consts[0][1]:.05f} std {self.reg_norm_consts[1][1]:.05f} # noqa: E501
""")
# yapf: enable
else:
self.logger.info(
f'SSNDataset: proposal file {self.proposal_file} parsed.')
def load_annotations(self):
"""Load annotation file to get video information."""
video_infos = []
if 'normalized_' in self.ann_file:
self.proposal_file = self.ann_file.replace('normalized_', '')
if not osp.exists(self.proposal_file):
raise Exception(f'Please refer to `$MMACTION2/tools/data` to'
f'denormalize {self.ann_file}.')
else:
self.proposal_file = self.ann_file
proposal_infos = load_localize_proposal_file(self.proposal_file)
# proposal_info:[video_id, num_frames, gt_list, proposal_list]
# gt_list member: [label, start_frame, end_frame]
# proposal_list member: [label, best_iou, overlap_self,
# start_frame, end_frame]
for proposal_info in proposal_infos:
if self.data_prefix is not None:
frame_dir = osp.join(self.data_prefix, proposal_info[0])
num_frames = int(proposal_info[1])
# gts:start, end, num_frames, class_label, tIoU=1
gts = []
for x in proposal_info[2]:
if int(x[2]) > int(x[1]) and int(x[1]) < num_frames:
ssn_instance = SSNInstance(
int(x[1]),
int(x[2]),
num_frames,
label=int(x[0]),
best_iou=1.0)
gts.append(ssn_instance)
# proposals:start, end, num_frames, class_label
# tIoU=best_iou, overlap_self
proposals = []
for x in proposal_info[3]:
if int(x[4]) > int(x[3]) and int(x[3]) < num_frames:
ssn_instance = SSNInstance(
int(x[3]),
int(x[4]),
num_frames,
label=int(x[0]),
best_iou=float(x[1]),
overlap_self=float(x[2]))
proposals.append(ssn_instance)
video_infos.append(
dict(
frame_dir=frame_dir,
video_id=proposal_info[0],
total_frames=num_frames,
gts=gts,
proposals=proposals))
return video_infos
def results_to_detections(self, results, top_k=2000, **kwargs):
"""Convert prediction results into detections.
Args:
results (list): Prediction results.
top_k (int): Number of top results. Default: 2000.
Returns:
list: Detection results.
"""
num_classes = results[0]['activity_scores'].shape[1] - 1
detections = [dict() for _ in range(num_classes)]
for idx in range(len(self)):
video_id = self.video_infos[idx]['video_id']
relative_proposals = results[idx]['relative_proposal_list']
if len(relative_proposals[0].shape) == 3:
relative_proposals = np.squeeze(relative_proposals, 0)
activity_scores = results[idx]['activity_scores']
completeness_scores = results[idx]['completeness_scores']
regression_scores = results[idx]['bbox_preds']
if regression_scores is None:
regression_scores = np.zeros(
(len(relative_proposals), num_classes, 2),
dtype=np.float32)
regression_scores = regression_scores.reshape((-1, num_classes, 2))
if top_k <= 0:
combined_scores = (
softmax(activity_scores[:, 1:], dim=1) *
np.exp(completeness_scores))
for i in range(num_classes):
center_scores = regression_scores[:, i, 0][:, None]
duration_scores = regression_scores[:, i, 1][:, None]
detections[i][video_id] = np.concatenate(
(relative_proposals, combined_scores[:, i][:, None],
center_scores, duration_scores),
axis=1)
else:
combined_scores = (
softmax(activity_scores[:, 1:], dim=1) *
np.exp(completeness_scores))
keep_idx = np.argsort(combined_scores.ravel())[-top_k:]
for k in keep_idx:
class_idx = k % num_classes
proposal_idx = k // num_classes
new_item = [
relative_proposals[proposal_idx, 0],
relative_proposals[proposal_idx,
1], combined_scores[proposal_idx,
class_idx],
regression_scores[proposal_idx, class_idx,
0], regression_scores[proposal_idx,
class_idx, 1]
]
if video_id not in detections[class_idx]:
detections[class_idx][video_id] = np.array([new_item])
else:
detections[class_idx][video_id] = np.vstack(
[detections[class_idx][video_id], new_item])
return detections
def evaluate(self,
results,
metrics='mAP',
metric_options=dict(mAP=dict(eval_dataset='thumos14')),
logger=None,
**deprecated_kwargs):
"""Evaluation in SSN proposal dataset.
Args:
results (list[dict]): Output results.
metrics (str | sequence[str]): Metrics to be performed.
Defaults: 'mAP'.
metric_options (dict): Dict for metric options. Options are
``eval_dataset`` for ``mAP``.
Default: ``dict(mAP=dict(eval_dataset='thumos14'))``.
logger (logging.Logger | None): Logger for recording.
Default: None.
deprecated_kwargs (dict): Used for containing deprecated arguments.
See 'https://github.com/open-mmlab/mmaction2/pull/286'.
Returns:
dict: Evaluation results for evaluation metrics.
"""
# Protect ``metric_options`` since it uses mutable value as default
metric_options = copy.deepcopy(metric_options)
if deprecated_kwargs != {}:
warnings.warn(
'Option arguments for metrics has been changed to '
"`metric_options`, See 'https://github.com/open-mmlab/mmaction2/pull/286' " # noqa: E501
'for more details')
metric_options['mAP'] = dict(metric_options['mAP'],
**deprecated_kwargs)
if not isinstance(results, list):
raise TypeError(f'results must be a list, but got {type(results)}')
assert len(results) == len(self), (
f'The length of results is not equal to the dataset len: '
f'{len(results)} != {len(self)}')
metrics = metrics if isinstance(metrics, (list, tuple)) else [metrics]
allowed_metrics = ['mAP']
for metric in metrics:
if metric not in allowed_metrics:
raise KeyError(f'metric {metric} is not supported')
detections = self.results_to_detections(results, **self.evaluater)
if self.use_regression:
self.logger.info('Performing location regression')
for class_idx, _ in enumerate(detections):
detections[class_idx] = {
k: perform_regression(v)
for k, v in detections[class_idx].items()
}
self.logger.info('Regression finished')
self.logger.info('Performing NMS')
for class_idx, _ in enumerate(detections):
detections[class_idx] = {
k: temporal_nms(v, self.evaluater.nms)
for k, v in detections[class_idx].items()
}
self.logger.info('NMS finished')
# get gts
all_gts = self.get_all_gts()
for class_idx, _ in enumerate(detections):
if class_idx not in all_gts:
all_gts[class_idx] = dict()
# get predictions
plain_detections = {}
for class_idx, _ in enumerate(detections):
detection_list = []
for video, dets in detections[class_idx].items():
detection_list.extend([[video, class_idx] + x[:3]
for x in dets.tolist()])
plain_detections[class_idx] = detection_list
eval_results = {}
for metric in metrics:
if metric == 'mAP':
eval_dataset = metric_options.setdefault('mAP', {}).setdefault(
'eval_dataset', 'thumos14')
if eval_dataset == 'thumos14':
iou_range = np.arange(0.1, 1.0, .1)
ap_values = eval_ap(plain_detections, all_gts, iou_range)
map_ious = ap_values.mean(axis=0)
self.logger.info('Evaluation finished')
for iou, map_iou in zip(iou_range, map_ious):
eval_results[f'mAP@{iou:.02f}'] = map_iou
return eval_results
def construct_proposal_pools(self):
"""Construct positve proposal pool, incomplete proposal pool and
background proposal pool of the entire dataset."""
for video_info in self.video_infos:
positives = self.get_positives(
video_info['gts'], video_info['proposals'],
self.assigner.positive_iou_threshold,
self.sampler.add_gt_as_proposals)
self.positive_pool.extend([(video_info['video_id'], proposal)
for proposal in positives])
incompletes, backgrounds = self.get_negatives(
video_info['proposals'],
self.assigner.incomplete_iou_threshold,
self.assigner.background_iou_threshold,
self.assigner.background_coverage_threshold,
self.assigner.incomplete_overlap_threshold)
self.incomplete_pool.extend([(video_info['video_id'], proposal)
for proposal in incompletes])
self.background_pool.extend([video_info['video_id'], proposal]
for proposal in backgrounds)
def get_all_gts(self):
"""Fetch groundtruth instances of the entire dataset."""
gts = {}
for video_info in self.video_infos:
video = video_info['video_id']
for gt in video_info['gts']:
class_idx = gt.label - 1
# gt_info: [relative_start, relative_end]
gt_info = [
gt.start_frame / video_info['total_frames'],
gt.end_frame / video_info['total_frames']
]
gts.setdefault(class_idx, {}).setdefault(video,
[]).append(gt_info)
return gts
@staticmethod
def get_positives(gts, proposals, positive_threshold, with_gt=True):
"""Get positive/foreground proposals.
Args:
gts (list): List of groundtruth instances(:obj:`SSNInstance`).
proposals (list): List of proposal instances(:obj:`SSNInstance`).
positive_threshold (float): Minimum threshold of overlap of
positive/foreground proposals and groundtruths.
with_gt (bool): Whether to include groundtruth instances in
positive proposals. Default: True.
Returns:
list[:obj:`SSNInstance`]: (positives), positives is a list
comprised of positive proposal instances.
"""
positives = [
proposal for proposal in proposals
if proposal.best_iou > positive_threshold
]
if with_gt:
positives.extend(gts)
for proposal in positives:
proposal.compute_regression_targets(gts)
return positives
@staticmethod
def get_negatives(proposals,
incomplete_iou_threshold,
background_iou_threshold,
background_coverage_threshold=0.01,
incomplete_overlap_threshold=0.7):
"""Get negative proposals, including incomplete proposals and
background proposals.
Args:
proposals (list): List of proposal instances(:obj:`SSNInstance`).
incomplete_iou_threshold (float): Maximum threshold of overlap
of incomplete proposals and groundtruths.
background_iou_threshold (float): Maximum threshold of overlap
of background proposals and groundtruths.
background_coverage_threshold (float): Minimum coverage
of background proposals in video duration. Default: 0.01.
incomplete_overlap_threshold (float): Minimum percent of incomplete
proposals' own span contained in a groundtruth instance.
Default: 0.7.
Returns:
list[:obj:`SSNInstance`]: (incompletes, backgrounds), incompletes
and backgrounds are lists comprised of incomplete
proposal instances and background proposal instances.
"""
incompletes = []
backgrounds = []
for proposal in proposals:
if (proposal.best_iou < incomplete_iou_threshold
and proposal.overlap_self > incomplete_overlap_threshold):
incompletes.append(proposal)
elif (proposal.best_iou < background_iou_threshold
and proposal.coverage > background_coverage_threshold):
backgrounds.append(proposal)
return incompletes, backgrounds
def _video_centric_sampling(self, record):
"""Sample proposals from the this video instance.
Args:
record (dict): Information of the video instance(video_info[idx]).
key: frame_dir, video_id, total_frames,
gts: List of groundtruth instances(:obj:`SSNInstance`).
proposals: List of proposal instances(:obj:`SSNInstance`).
"""
positives = self.get_positives(record['gts'], record['proposals'],
self.assigner.positive_iou_threshold,
self.sampler.add_gt_as_proposals)
incompletes, backgrounds = self.get_negatives(
record['proposals'], self.assigner.incomplete_iou_threshold,
self.assigner.background_iou_threshold,
self.assigner.background_coverage_threshold,
self.assigner.incomplete_overlap_threshold)
def sample_video_proposals(proposal_type, video_id, video_pool,
num_requested_proposals, dataset_pool):
"""This method will sample proposals from the this video pool. If
the video pool is empty, it will fetch from the dataset pool
(collect proposal of the entire dataset).
Args:
proposal_type (int): Type id of proposal.
Positive/Foreground: 0
Negative:
Incomplete: 1
Background: 2
video_id (str): Name of the video.
video_pool (list): Pool comprised of proposals in this video.
num_requested_proposals (int): Number of proposals
to be sampled.
dataset_pool (list): Proposals of the entire dataset.
Returns:
list[(str, :obj:`SSNInstance`), int]:
video_id (str): Name of the video.
:obj:`SSNInstance`: Instance of class SSNInstance.
proposal_type (int): Type of proposal.
"""
if len(video_pool) == 0:
idx = np.random.choice(
len(dataset_pool), num_requested_proposals, replace=False)
return [(dataset_pool[x], proposal_type) for x in idx]
replicate = len(video_pool) < num_requested_proposals
idx = np.random.choice(
len(video_pool), num_requested_proposals, replace=replicate)
return [((video_id, video_pool[x]), proposal_type) for x in idx]
out_proposals = []
out_proposals.extend(
sample_video_proposals(0, record['video_id'], positives,
self.positive_per_video,
self.positive_pool))
out_proposals.extend(
sample_video_proposals(1, record['video_id'], incompletes,
self.incomplete_per_video,
self.incomplete_pool))
out_proposals.extend(
sample_video_proposals(2, record['video_id'], backgrounds,
self.background_per_video,
self.background_pool))
return out_proposals
def _random_sampling(self):
"""Randomly sample proposals from the entire dataset."""
out_proposals = []
positive_idx = np.random.choice(
len(self.positive_pool),
self.positive_per_video,
replace=len(self.positive_pool) < self.positive_per_video)
out_proposals.extend([(self.positive_pool[x], 0)
for x in positive_idx])
incomplete_idx = np.random.choice(
len(self.incomplete_pool),
self.incomplete_per_video,
replace=len(self.incomplete_pool) < self.incomplete_per_video)
out_proposals.extend([(self.incomplete_pool[x], 1)
for x in incomplete_idx])
background_idx = np.random.choice(
len(self.background_pool),
self.background_per_video,
replace=len(self.background_pool) < self.background_per_video)
out_proposals.extend([(self.background_pool[x], 2)
for x in background_idx])
return out_proposals
def _get_stage(self, proposal, num_frames):
"""Fetch the scale factor of starting and ending stage and get the
stage split.
Args:
proposal (:obj:`SSNInstance`): Proposal instance.
num_frames (int): Total frames of the video.
Returns:
tuple[float, float, list]: (starting_scale_factor,
ending_scale_factor, stage_split), starting_scale_factor is
the ratio of the effective sampling length to augment length
in starting stage, ending_scale_factor is the ratio of the
effective sampling length to augment length in ending stage,
stage_split is ending segment id of starting, course and
ending stage.
"""
# proposal interval: [start_frame, end_frame)
start_frame = proposal.start_frame
end_frame = proposal.end_frame
ori_clip_len = self.clip_len * self.frame_interval
duration = end_frame - start_frame
assert duration != 0
valid_starting = max(0,
start_frame - int(duration * self.aug_ratio[0]))
valid_ending = min(num_frames - ori_clip_len + 1,
end_frame - 1 + int(duration * self.aug_ratio[1]))
valid_starting_length = start_frame - valid_starting - ori_clip_len
valid_ending_length = (valid_ending - end_frame + 1) - ori_clip_len
starting_scale_factor = ((valid_starting_length + ori_clip_len + 1) /
(duration * self.aug_ratio[0]))
ending_scale_factor = (valid_ending_length + ori_clip_len + 1) / (
duration * self.aug_ratio[1])
aug_start, aug_end = self.aug_segments
stage_split = [
aug_start, aug_start + self.body_segments,
aug_start + self.body_segments + aug_end
]
return starting_scale_factor, ending_scale_factor, stage_split
def _compute_reg_normalize_constants(self):
"""Compute regression target normalized constants."""
if self.verbose:
self.logger.info('Compute regression target normalized constants')
targets = []
for video_info in self.video_infos:
positives = self.get_positives(
video_info['gts'], video_info['proposals'],
self.assigner.positive_iou_threshold, False)
for positive in positives:
targets.append(list(positive.regression_targets))
return np.array((np.mean(targets, axis=0), np.std(targets, axis=0)))
def prepare_train_frames(self, idx):
"""Prepare the frames for training given the index."""
results = copy.deepcopy(self.video_infos[idx])
results['filename_tmpl'] = self.filename_tmpl
results['modality'] = self.modality
results['start_index'] = self.start_index
if self.video_centric:
# yapf: disable
results['out_proposals'] = self._video_centric_sampling(self.video_infos[idx]) # noqa: E501
# yapf: enable
else:
results['out_proposals'] = self._random_sampling()
out_proposal_scale_factor = []
out_proposal_type = []
out_proposal_labels = []
out_proposal_reg_targets = []
for idx, proposal in enumerate(results['out_proposals']):
# proposal: [(video_id, SSNInstance), proposal_type]
num_frames = proposal[0][1].num_video_frames
(starting_scale_factor, ending_scale_factor,
_) = self._get_stage(proposal[0][1], num_frames)
# proposal[1]: Type id of proposal.
# Positive/Foreground: 0
# Negative:
# Incomplete: 1
# Background: 2
# Positivte/Foreground proposal
if proposal[1] == 0:
label = proposal[0][1].label
# Incomplete proposal
elif proposal[1] == 1:
label = proposal[0][1].label
# Background proposal
elif proposal[1] == 2:
label = 0
else:
raise ValueError(f'Proposal type should be 0, 1, or 2,'
f'but got {proposal[1]}')
out_proposal_scale_factor.append(
[starting_scale_factor, ending_scale_factor])
if not isinstance(label, int):
raise TypeError(f'proposal_label must be an int,'
f'but got {type(label)}')
out_proposal_labels.append(label)
out_proposal_type.append(proposal[1])
reg_targets = proposal[0][1].regression_targets
if proposal[1] == 0:
# Normalize regression targets of positive proposals.
reg_targets = ((reg_targets[0] - self.reg_norm_consts[0][0]) /
self.reg_norm_consts[1][0],
(reg_targets[1] - self.reg_norm_consts[0][1]) /
self.reg_norm_consts[1][1])
out_proposal_reg_targets.append(reg_targets)
results['reg_targets'] = np.array(
out_proposal_reg_targets, dtype=np.float32)
results['proposal_scale_factor'] = np.array(
out_proposal_scale_factor, dtype=np.float32)
results['proposal_labels'] = np.array(out_proposal_labels)
results['proposal_type'] = np.array(out_proposal_type)
return self.pipeline(results)
def prepare_test_frames(self, idx):
"""Prepare the frames for testing given the index."""
results = copy.deepcopy(self.video_infos[idx])
results['filename_tmpl'] = self.filename_tmpl
results['modality'] = self.modality
results['start_index'] = self.start_index
proposals = results['proposals']
num_frames = results['total_frames']
ori_clip_len = self.clip_len * self.frame_interval
frame_ticks = np.arange(
0, num_frames - ori_clip_len, self.test_interval, dtype=int) + 1
num_sampled_frames = len(frame_ticks)
if len(proposals) == 0:
proposals.append(SSNInstance(0, num_frames - 1, num_frames))
relative_proposal_list = []
proposal_tick_list = []
scale_factor_list = []
for proposal in proposals:
relative_proposal = (proposal.start_frame / num_frames,
proposal.end_frame / num_frames)
relative_duration = relative_proposal[1] - relative_proposal[0]
relative_starting_duration = relative_duration * self.aug_ratio[0]
relative_ending_duration = relative_duration * self.aug_ratio[1]
relative_starting = (
relative_proposal[0] - relative_starting_duration)
relative_ending = relative_proposal[1] + relative_ending_duration
real_relative_starting = max(0.0, relative_starting)
real_relative_ending = min(1.0, relative_ending)
starting_scale_factor = (
(relative_proposal[0] - real_relative_starting) /
relative_starting_duration)
ending_scale_factor = (
(real_relative_ending - relative_proposal[1]) /
relative_ending_duration)
proposal_ranges = (real_relative_starting, *relative_proposal,
real_relative_ending)
proposal_ticks = (np.array(proposal_ranges) *
num_sampled_frames).astype(np.int32)
relative_proposal_list.append(relative_proposal)
proposal_tick_list.append(proposal_ticks)
scale_factor_list.append(
(starting_scale_factor, ending_scale_factor))
results['relative_proposal_list'] = np.array(
relative_proposal_list, dtype=np.float32)
results['scale_factor_list'] = np.array(
scale_factor_list, dtype=np.float32)
results['proposal_tick_list'] = np.array(
proposal_tick_list, dtype=np.int32)
results['reg_norm_consts'] = self.reg_norm_consts
return self.pipeline(results)
| InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/datasets/ssn_dataset.py |
import copy
import os.path as osp
from collections import defaultdict
import mmcv
import numpy as np
from ..utils import get_root_logger
from .base import BaseDataset
from .registry import DATASETS
@DATASETS.register_module()
class AVADataset(BaseDataset):
"""AVA dataset for spatial temporal detection.
Based on official AVA annotation files, the dataset loads raw frames,
bounding boxes, proposals and applies specified transformations to return
a dict containing the frame tensors and other information.
This datasets can load information from the following files:
.. code-block:: txt
ann_file -> ava_{train, val}_{v2.1, v2.2}.csv
exclude_file -> ava_{train, val}_excluded_timestamps_{v2.1, v2.2}.csv
label_file -> ava_action_list_{v2.1, v2.2}.pbtxt /
ava_action_list_{v2.1, v2.2}_for_activitynet_2019.pbtxt
proposal_file -> ava_dense_proposals_{train, val}.FAIR.recall_93.9.pkl
Particularly, the proposal_file is a pickle file which contains
``img_key`` (in format of ``{video_id},{timestamp}``). Example of a pickle
file:
.. code-block:: JSON
{
...
'0f39OWEqJ24,0902':
array([[0.011 , 0.157 , 0.655 , 0.983 , 0.998163]]),
'0f39OWEqJ24,0912':
array([[0.054 , 0.088 , 0.91 , 0.998 , 0.068273],
[0.016 , 0.161 , 0.519 , 0.974 , 0.984025],
[0.493 , 0.283 , 0.981 , 0.984 , 0.983621]]),
...
}
Args:
ann_file (str): Path to the annotation file like
``ava_{train, val}_{v2.1, v2.2}.csv``.
exclude_file (str): Path to the excluded timestamp file like
``ava_{train, val}_excluded_timestamps_{v2.1, v2.2}.csv``.
pipeline (list[dict | callable]): A sequence of data transforms.
label_file (str): Path to the label file like
``ava_action_list_{v2.1, v2.2}.pbtxt`` or
``ava_action_list_{v2.1, v2.2}_for_activitynet_2019.pbtxt``.
Default: None.
filename_tmpl (str): Template for each filename.
Default: 'img_{:05}.jpg'.
proposal_file (str): Path to the proposal file like
``ava_dense_proposals_{train, val}.FAIR.recall_93.9.pkl``.
Default: None.
data_prefix (str): Path to a directory where videos are held.
Default: None.
test_mode (bool): Store True when building test or validation dataset.
Default: False.
modality (str): Modality of data. Support 'RGB', 'Flow'.
Default: 'RGB'.
num_max_proposals (int): Max proposals number to store. Default: 1000.
timestamp_start (int): The start point of included timestamps. The
default value is referred from the official website. Default: 902.
timestamp_end (int): The end point of included timestamps. The
default value is referred from the official website. Default: 1798.
"""
_FPS = 30
_NUM_CLASSES = 81
def __init__(self,
ann_file,
exclude_file,
pipeline,
label_file=None,
filename_tmpl='img_{:05}.jpg',
proposal_file=None,
data_prefix=None,
test_mode=False,
modality='RGB',
num_max_proposals=1000,
timestamp_start=902,
timestamp_end=1798):
# since it inherits from `BaseDataset`, some arguments
# should be assigned before performing `load_annotations()`
self.exclude_file = exclude_file
self.label_file = label_file
self.proposal_file = proposal_file
self.filename_tmpl = filename_tmpl
self.num_max_proposals = num_max_proposals
self.timestamp_start = timestamp_start
self.timestamp_end = timestamp_end
self.logger = get_root_logger()
super().__init__(
ann_file, pipeline, data_prefix, test_mode, modality=modality)
if self.proposal_file is not None:
self.proposals = mmcv.load(self.proposal_file)
else:
self.proposals = None
if not test_mode:
valid_indexes = self.filter_exclude_file()
self.logger.info(
f'{len(valid_indexes)} out of {len(self.video_infos)} '
f'frames are valid.')
self.video_infos = self.video_infos = [
self.video_infos[i] for i in valid_indexes
]
def parse_img_record(self, img_records):
bboxes, labels, entity_ids = [], [], []
while len(img_records) > 0:
img_record = img_records[0]
num_img_records = len(img_records)
selected_records = list(
filter(
lambda x: np.array_equal(x['entity_box'], img_record[
'entity_box']), img_records))
num_selected_records = len(selected_records)
img_records = list(
filter(
lambda x: not np.array_equal(x['entity_box'], img_record[
'entity_box']), img_records))
assert len(img_records) + num_selected_records == num_img_records
bboxes.append(img_record['entity_box'])
valid_labels = np.array([
selected_record['label']
for selected_record in selected_records
])
padded_labels = np.pad(
valid_labels, (0, self._NUM_CLASSES - valid_labels.shape[0]),
'constant',
constant_values=-1)
labels.append(padded_labels)
entity_ids.append(img_record['entity_id'])
bboxes = np.stack(bboxes)
labels = np.stack(labels)
entity_ids = np.stack(entity_ids)
return bboxes, labels, entity_ids
def filter_exclude_file(self):
valid_indexes = []
if self.exclude_file is None:
valid_indexes = list(range(len(self.video_infos)))
else:
exclude_video_infos = [
x.strip().split(',') for x in open(self.exclude_file)
]
for i, video_info in enumerate(self.video_infos):
valid_indexes.append(i)
for video_id, timestamp in exclude_video_infos:
if (video_info['video_id'] == video_id
and video_info['timestamp'] == int(timestamp)):
valid_indexes.pop()
break
return valid_indexes
def load_annotations(self):
video_infos = []
records_dict_by_img = defaultdict(list)
with open(self.ann_file, 'r') as fin:
for line in fin:
line_split = line.strip().split(',')
video_id = line_split[0]
timestamp = int(line_split[1])
img_key = f'{video_id},{timestamp:04d}'
entity_box = np.array(list(map(float, line_split[2:6])))
label = int(line_split[6])
entity_id = int(line_split[7])
shot_info = (0, (self.timestamp_end - self.timestamp_start) *
self._FPS)
video_info = dict(
video_id=video_id,
timestamp=timestamp,
entity_box=entity_box,
label=label,
entity_id=entity_id,
shot_info=shot_info)
records_dict_by_img[img_key].append(video_info)
for img_key in records_dict_by_img:
video_id, timestamp = img_key.split(',')
bboxes, labels, entity_ids = self.parse_img_record(
records_dict_by_img[img_key])
ann = dict(
entity_boxes=bboxes, labels=labels, entity_ids=entity_ids)
frame_dir = video_id
if self.data_prefix is not None:
frame_dir = osp.join(self.data_prefix, frame_dir)
video_info = dict(
frame_dir=frame_dir,
video_id=video_id,
timestamp=int(timestamp),
img_key=img_key,
shot_info=shot_info,
fps=self._FPS,
ann=ann)
video_infos.append(video_info)
return video_infos
def prepare_train_frames(self, idx):
"""Prepare the frames for training given the index."""
results = copy.deepcopy(self.video_infos[idx])
img_key = results['img_key']
results['filename_tmpl'] = self.filename_tmpl
results['modality'] = self.modality
results['start_index'] = self.start_index
results['timestamp_start'] = self.timestamp_start
results['timestamp_end'] = self.timestamp_end
results['proposals'] = self.proposals[img_key][:self.num_max_proposals]
return self.pipeline(results)
def prepare_test_frames(self, idx):
"""Prepare the frames for testing given the index."""
results = copy.deepcopy(self.video_infos[idx])
img_key = results['img_key']
results['filename_tmpl'] = self.filename_tmpl
results['modality'] = self.modality
results['start_index'] = self.start_index
results['timestamp_start'] = self.timestamp_start
results['timestamp_end'] = self.timestamp_end
results['proposals'] = self.proposals[img_key][:self.num_max_proposals]
return self.pipeline(results)
def evaluate(self, results, metrics, metric_options, logger):
raise NotImplementedError
| InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/datasets/ava_dataset.py |
import copy
import os.path as osp
import warnings
from abc import ABCMeta, abstractmethod
from collections import defaultdict
import mmcv
import numpy as np
import torch
from mmcv.utils import print_log
from torch.utils.data import Dataset
from ..core import (mean_average_precision, mean_class_accuracy,
mmit_mean_average_precision, top_k_accuracy)
from .pipelines import Compose
class BaseDataset(Dataset, metaclass=ABCMeta):
"""Base class for datasets.
All datasets to process video should subclass it.
All subclasses should overwrite:
- Methods:`load_annotations`, supporting to load information from an
annotation file.
- Methods:`prepare_train_frames`, providing train data.
- Methods:`prepare_test_frames`, providing test data.
Args:
ann_file (str): Path to the annotation file.
pipeline (list[dict | callable]): A sequence of data transforms.
data_prefix (str | None): Path to a directory where videos are held.
Default: None.
test_mode (bool): Store True when building test or validation dataset.
Default: False.
multi_class (bool): Determines whether the dataset is a multi-class
dataset. Default: False.
num_classes (int | None): Number of classes of the dataset, used in
multi-class datasets. Default: None.
start_index (int): Specify a start index for frames in consideration of
different filename format. However, when taking videos as input,
it should be set to 0, since frames loaded from videos count
from 0. Default: 1.
modality (str): Modality of data. Support 'RGB', 'Flow', 'Audio'.
Default: 'RGB'.
sample_by_class (bool): Sampling by class, should be set `True` when
performing inter-class data balancing. Only compatible with
`multi_class == False`. Only applies for training. Default: False.
power (float | None): We support sampling data with the probability
proportional to the power of its label frequency (freq ^ power)
when sampling data. `power == 1` indicates uniformly sampling all
data; `power == 0` indicates uniformly sampling all classes.
Default: None.
"""
def __init__(self,
ann_file,
pipeline,
data_prefix=None,
test_mode=False,
multi_class=False,
num_classes=None,
start_index=1,
modality='RGB',
sample_by_class=False,
power=None):
super().__init__()
self.ann_file = ann_file
self.data_prefix = osp.realpath(
data_prefix) if data_prefix is not None and osp.isdir(
data_prefix) else data_prefix
self.test_mode = test_mode
self.multi_class = multi_class
self.num_classes = num_classes
self.start_index = start_index
self.modality = modality
self.sample_by_class = sample_by_class
self.power = power
assert not (self.multi_class and self.sample_by_class)
self.pipeline = Compose(pipeline)
self.video_infos = self.load_annotations()
if self.sample_by_class:
self.video_infos_by_class = self.parse_by_class()
@abstractmethod
def load_annotations(self):
"""Load the annotation according to ann_file into video_infos."""
# json annotations already looks like video_infos, so for each dataset,
# this func should be the same
def load_json_annotations(self):
"""Load json annotation file to get video information."""
video_infos = mmcv.load(self.ann_file)
num_videos = len(video_infos)
path_key = 'frame_dir' if 'frame_dir' in video_infos[0] else 'filename'
for i in range(num_videos):
path_value = video_infos[i][path_key]
if self.data_prefix is not None:
path_value = osp.join(self.data_prefix, path_value)
video_infos[i][path_key] = path_value
if self.multi_class:
assert self.num_classes is not None
else:
assert len(video_infos[i]['label']) == 1
video_infos[i]['label'] = video_infos[i]['label'][0]
return video_infos
def parse_by_class(self):
video_infos_by_class = defaultdict(list)
for item in self.video_infos:
label = item['label']
video_infos_by_class[label].append(item)
return video_infos_by_class
@staticmethod
def label2array(num, label):
arr = np.zeros(num, dtype=np.float32)
arr[label] = 1.
return arr
def evaluate(self,
results,
metrics='top_k_accuracy',
metric_options=dict(top_k_accuracy=dict(topk=(1, 5))),
logger=None,
**deprecated_kwargs):
"""Perform evaluation for common datasets.
Args:
results (list): Output results.
metrics (str | sequence[str]): Metrics to be performed.
Defaults: 'top_k_accuracy'.
metric_options (dict): Dict for metric options. Options are
``topk`` for ``top_k_accuracy``.
Default: ``dict(top_k_accuracy=dict(topk=(1, 5)))``.
logger (logging.Logger | None): Logger for recording.
Default: None.
deprecated_kwargs (dict): Used for containing deprecated arguments.
See 'https://github.com/open-mmlab/mmaction2/pull/286'.
Returns:
dict: Evaluation results dict.
"""
# Protect ``metric_options`` since it uses mutable value as default
metric_options = copy.deepcopy(metric_options)
if deprecated_kwargs != {}:
warnings.warn(
'Option arguments for metrics has been changed to '
"`metric_options`, See 'https://github.com/open-mmlab/mmaction2/pull/286' " # noqa: E501
'for more details')
metric_options['top_k_accuracy'] = dict(
metric_options['top_k_accuracy'], **deprecated_kwargs)
if not isinstance(results, list):
raise TypeError(f'results must be a list, but got {type(results)}')
assert len(results) == len(self), (
f'The length of results is not equal to the dataset len: '
f'{len(results)} != {len(self)}')
metrics = metrics if isinstance(metrics, (list, tuple)) else [metrics]
allowed_metrics = [
'top_k_accuracy', 'mean_class_accuracy', 'mean_average_precision',
'mmit_mean_average_precision'
]
for metric in metrics:
if metric not in allowed_metrics:
raise KeyError(f'metric {metric} is not supported')
eval_results = {}
gt_labels = [ann['label'] for ann in self.video_infos]
for metric in metrics:
msg = f'Evaluating {metric} ...'
if logger is None:
msg = '\n' + msg
print_log(msg, logger=logger)
if metric == 'top_k_accuracy':
topk = metric_options.setdefault('top_k_accuracy',
{}).setdefault(
'topk', (1, 5))
if not isinstance(topk, (int, tuple)):
raise TypeError('topk must be int or tuple of int, '
f'but got {type(topk)}')
if isinstance(topk, int):
topk = (topk, )
top_k_acc = top_k_accuracy(results, gt_labels, topk)
log_msg = []
for k, acc in zip(topk, top_k_acc):
eval_results[f'top{k}_acc'] = acc
log_msg.append(f'\ntop{k}_acc\t{acc:.4f}')
log_msg = ''.join(log_msg)
print_log(log_msg, logger=logger)
continue
if metric == 'mean_class_accuracy':
mean_acc = mean_class_accuracy(results, gt_labels)
eval_results['mean_class_accuracy'] = mean_acc
log_msg = f'\nmean_acc\t{mean_acc:.4f}'
print_log(log_msg, logger=logger)
continue
if metric in [
'mean_average_precision', 'mmit_mean_average_precision'
]:
gt_labels = [
self.label2array(self.num_classes, label)
for label in gt_labels
]
if metric == 'mean_average_precision':
mAP = mean_average_precision(results, gt_labels)
elif metric == 'mmit_mean_average_precision':
mAP = mmit_mean_average_precision(results, gt_labels)
eval_results['mean_average_precision'] = mAP
log_msg = f'\nmean_average_precision\t{mAP:.4f}'
print_log(log_msg, logger=logger)
continue
return eval_results
@staticmethod
def dump_results(results, out):
"""Dump data to json/yaml/pickle strings or files."""
return mmcv.dump(results, out)
def prepare_train_frames(self, idx):
"""Prepare the frames for training given the index."""
if self.sample_by_class:
# Then, the idx is the class index
samples = self.video_infos_by_class[idx]
results = copy.deepcopy(np.random.choice(samples))
else:
results = copy.deepcopy(self.video_infos[idx])
results['modality'] = self.modality
results['start_index'] = self.start_index
# prepare tensor in getitem
# If HVU, type(results['label']) is dict
if self.multi_class and isinstance(results['label'], list):
onehot = torch.zeros(self.num_classes)
onehot[results['label']] = 1.
results['label'] = onehot
return self.pipeline(results)
def prepare_test_frames(self, idx):
"""Prepare the frames for testing given the index."""
if self.sample_by_class:
# Then, the idx is the class index
samples = self.video_infos_by_class[idx]
results = copy.deepcopy(np.random.choice(samples))
else:
results = copy.deepcopy(self.video_infos[idx])
results['modality'] = self.modality
results['start_index'] = self.start_index
# prepare tensor in getitem
# If HVU, type(results['label']) is dict
if self.multi_class and isinstance(results['label'], list):
onehot = torch.zeros(self.num_classes)
onehot[results['label']] = 1.
results['label'] = onehot
return self.pipeline(results)
def __len__(self):
"""Get the size of the dataset."""
return len(self.video_infos)
def __getitem__(self, idx):
"""Get the sample for either training or testing given index."""
if self.test_mode:
return self.prepare_test_frames(idx)
return self.prepare_train_frames(idx)
| InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/datasets/base.py |
import copy
import os.path as osp
import random
import mmcv
from .base import BaseDataset
from .registry import DATASETS
@DATASETS.register_module()
class RawVideoDataset(BaseDataset):
"""RawVideo dataset for action recognition, used in the Project OmniSource.
The dataset loads clips of raw videos and apply specified transforms to
return a dict containing the frame tensors and other information. Not that
for this dataset, `multi_class` should be False.
The ann_file is a text file with multiple lines, and each line indicates
a sample video with the filepath (without suffix), label, number of clips
and index of positive clips (starting from 0), which are split with a
whitespace. Raw videos should be first trimmed into 10 second clips,
organized in the following format:
.. code-block:: txt
some/path/D32_1gwq35E/part_0.mp4
some/path/D32_1gwq35E/part_1.mp4
......
some/path/D32_1gwq35E/part_n.mp4
Example of a annotation file:
.. code-block:: txt
some/path/D32_1gwq35E 66 10 0 1 2
some/path/-G-5CJ0JkKY 254 5 3 4
some/path/T4h1bvOd9DA 33 1 0
some/path/4uZ27ivBl00 341 2 0 1
some/path/0LfESFkfBSw 186 234 7 9 11
some/path/-YIsNpBEx6c 169 100 9 10 11
The first line indicates that the raw video `some/path/D32_1gwq35E` has
action label `66`, consists of 10 clips (from `part_0.mp4` to
`part_9.mp4`). The 1st, 2nd and 3rd clips are positive clips.
Args:
ann_file (str): Path to the annotation file.
pipeline (list[dict | callable]): A sequence of data transforms.
sampling_strategy (str): The strategy to sample clips from raw videos.
Choices are 'random' or 'positive'. Default: 'positive'.
clipname_tmpl (str): The template of clip name in the raw video.
Default: 'part_{}.mp4'.
**kwargs: Keyword arguments for ``BaseDataset``.
"""
def __init__(self,
ann_file,
pipeline,
clipname_tmpl='part_{}.mp4',
sampling_strategy='positive',
**kwargs):
super().__init__(ann_file, pipeline, start_index=0, **kwargs)
assert self.multi_class is False
self.sampling_strategy = sampling_strategy
self.clipname_tmpl = clipname_tmpl
# If positive, we should only keep those raw videos with positive
# clips
if self.sampling_strategy == 'positive':
self.video_infos = [
x for x in self.video_infos if len(x['positive_clip_inds'])
]
# do not support multi_class
def load_annotations(self):
"""Load annotation file to get video information."""
if self.ann_file.endswith('.json'):
return self.load_json_annotations()
video_infos = []
with open(self.ann_file, 'r') as fin:
for line in fin:
line_split = line.strip().split()
video_dir = line_split[0]
label = int(line_split[1])
num_clips = int(line_split[2])
positive_clip_inds = [int(ind) for ind in line_split[3:]]
if self.data_prefix is not None:
video_dir = osp.join(self.data_prefix, video_dir)
video_infos.append(
dict(
video_dir=video_dir,
label=label,
num_clips=num_clips,
positive_clip_inds=positive_clip_inds))
return video_infos
# do not support multi_class
def load_json_annotations(self):
"""Load json annotation file to get video information."""
video_infos = mmcv.load(self.ann_file)
num_videos = len(video_infos)
path_key = 'video_dir'
for i in range(num_videos):
if self.data_prefix is not None:
path_value = video_infos[i][path_key]
path_value = osp.join(self.data_prefix, path_value)
video_infos[i][path_key] = path_value
return video_infos
def sample_clip(self, results):
"""Sample a clip from the raw video given the sampling strategy."""
assert self.sampling_strategy in ['positive', 'random']
if self.sampling_strategy == 'positive':
assert results['positive_clip_inds']
ind = random.choice(results['positive_clip_inds'])
else:
ind = random.randint(0, results['num_clips'] - 1)
clipname = self.clipname_tmpl.format(ind)
# if the first char of self.clipname_tmpl is a letter, use osp.join;
# otherwise, directly concat them
if self.clipname_tmpl[0].isalpha():
filename = osp.join(results['video_dir'], clipname)
else:
filename = results['video_dir'] + clipname
results['filename'] = filename
return results
def prepare_train_frames(self, idx):
"""Prepare the frames for training given the index."""
results = copy.deepcopy(self.video_infos[idx])
results = self.sample_clip(results)
results['modality'] = self.modality
results['start_index'] = self.start_index
return self.pipeline(results)
def prepare_test_frames(self, idx):
"""Prepare the frames for testing given the index."""
results = copy.deepcopy(self.video_infos[idx])
results = self.sample_clip(results)
results['modality'] = self.modality
results['start_index'] = self.start_index
return self.pipeline(results)
| InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/datasets/rawvideo_dataset.py |
import io
import os
import os.path as osp
import shutil
import warnings
import mmcv
import numpy as np
import torch
from mmcv.fileio import FileClient
from torch.nn.modules.utils import _pair
from ...utils import get_random_string, get_shm_dir, get_thread_id
from ..registry import PIPELINES
@PIPELINES.register_module()
class LoadHVULabel:
"""Convert the HVU label from dictionaries to torch tensors.
Required keys are "label", "categories", "category_nums", added or modified
keys are "label", "mask" and "category_mask".
"""
def __init__(self, **kwargs):
self.hvu_initialized = False
self.kwargs = kwargs
def init_hvu_info(self, categories, category_nums):
assert len(categories) == len(category_nums)
self.categories = categories
self.category_nums = category_nums
self.num_categories = len(self.categories)
self.num_tags = sum(self.category_nums)
self.category2num = dict(zip(categories, category_nums))
self.start_idx = [0]
for i in range(self.num_categories - 1):
self.start_idx.append(self.start_idx[-1] + self.category_nums[i])
self.category2startidx = dict(zip(categories, self.start_idx))
self.hvu_initialized = True
def __call__(self, results):
"""Convert the label dictionary to 3 tensors: "label", "mask" and
"category_mask".
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
if not self.hvu_initialized:
self.init_hvu_info(results['categories'], results['category_nums'])
onehot = torch.zeros(self.num_tags)
onehot_mask = torch.zeros(self.num_tags)
category_mask = torch.zeros(self.num_categories)
for category, tags in results['label'].items():
category_mask[self.categories.index(category)] = 1.
start_idx = self.category2startidx[category]
category_num = self.category2num[category]
tags = [idx + start_idx for idx in tags]
onehot[tags] = 1.
onehot_mask[start_idx:category_num + start_idx] = 1.
results['label'] = onehot
results['mask'] = onehot_mask
results['category_mask'] = category_mask
return results
def __repr__(self):
repr_str = (f'{self.__class__.__name__}('
f'hvu_initialized={self.hvu_initialized})')
return repr_str
@PIPELINES.register_module()
class SampleFrames:
"""Sample frames from the video.
Required keys are "filename", "total_frames", "start_index" , added or
modified keys are "frame_inds", "frame_interval" and "num_clips".
Args:
clip_len (int): Frames of each sampled output clip.
frame_interval (int): Temporal interval of adjacent sampled frames.
Default: 1.
num_clips (int): Number of clips to be sampled. Default: 1.
temporal_jitter (bool): Whether to apply temporal jittering.
Default: False.
twice_sample (bool): Whether to use twice sample when testing.
If set to True, it will sample frames with and without fixed shift,
which is commonly used for testing in TSM model. Default: False.
out_of_bound_opt (str): The way to deal with out of bounds frame
indexes. Available options are 'loop', 'repeat_last'.
Default: 'loop'.
test_mode (bool): Store True when building test or validation dataset.
Default: False.
start_index (None): This argument is deprecated and moved to dataset
class (``BaseDataset``, ``VideoDatset``, ``RawframeDataset``, etc),
see this: https://github.com/open-mmlab/mmaction2/pull/89.
"""
def __init__(self,
clip_len,
frame_interval=1,
num_clips=1,
temporal_jitter=False,
twice_sample=False,
out_of_bound_opt='loop',
test_mode=False,
start_index=None):
self.clip_len = clip_len
self.frame_interval = frame_interval
self.num_clips = num_clips
self.temporal_jitter = temporal_jitter
self.twice_sample = twice_sample
self.out_of_bound_opt = out_of_bound_opt
self.test_mode = test_mode
assert self.out_of_bound_opt in ['loop', 'repeat_last']
if start_index is not None:
warnings.warn('No longer support "start_index" in "SampleFrames", '
'it should be set in dataset class, see this pr: '
'https://github.com/open-mmlab/mmaction2/pull/89')
def _get_train_clips(self, num_frames):
"""Get clip offsets in train mode.
It will calculate the average interval for selected frames,
and randomly shift them within offsets between [0, avg_interval].
If the total number of frames is smaller than clips num or origin
frames length, it will return all zero indices.
Args:
num_frames (int): Total number of frame in the video.
Returns:
np.ndarray: Sampled frame indices in train mode.
"""
ori_clip_len = self.clip_len * self.frame_interval
avg_interval = (num_frames - ori_clip_len + 1) // self.num_clips
if avg_interval > 0:
base_offsets = np.arange(self.num_clips) * avg_interval
clip_offsets = base_offsets + np.random.randint(
avg_interval, size=self.num_clips)
elif num_frames > max(self.num_clips, ori_clip_len):
clip_offsets = np.sort(
np.random.randint(
num_frames - ori_clip_len + 1, size=self.num_clips))
elif avg_interval == 0:
ratio = (num_frames - ori_clip_len + 1.0) / self.num_clips
clip_offsets = np.around(np.arange(self.num_clips) * ratio)
else:
clip_offsets = np.zeros((self.num_clips, ), dtype=np.int)
return clip_offsets
def _get_test_clips(self, num_frames):
"""Get clip offsets in test mode.
Calculate the average interval for selected frames, and shift them
fixedly by avg_interval/2. If set twice_sample True, it will sample
frames together without fixed shift. If the total number of frames is
not enough, it will return all zero indices.
Args:
num_frames (int): Total number of frame in the video.
Returns:
np.ndarray: Sampled frame indices in test mode.
"""
ori_clip_len = self.clip_len * self.frame_interval
avg_interval = (num_frames - ori_clip_len + 1) / float(self.num_clips)
if num_frames > ori_clip_len - 1:
base_offsets = np.arange(self.num_clips) * avg_interval
clip_offsets = (base_offsets + avg_interval / 2.0).astype(np.int)
if self.twice_sample:
clip_offsets = np.concatenate([clip_offsets, base_offsets])
else:
clip_offsets = np.zeros((self.num_clips, ), dtype=np.int)
return clip_offsets
def _sample_clips(self, num_frames):
"""Choose clip offsets for the video in a given mode.
Args:
num_frames (int): Total number of frame in the video.
Returns:
np.ndarray: Sampled frame indices.
"""
if self.test_mode:
clip_offsets = self._get_test_clips(num_frames)
else:
clip_offsets = self._get_train_clips(num_frames)
return clip_offsets
def __call__(self, results):
"""Perform the SampleFrames loading.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
total_frames = results['total_frames']
clip_offsets = self._sample_clips(total_frames)
frame_inds = clip_offsets[:, None] + np.arange(
self.clip_len)[None, :] * self.frame_interval
frame_inds = np.concatenate(frame_inds)
if self.temporal_jitter:
perframe_offsets = np.random.randint(
self.frame_interval, size=len(frame_inds))
frame_inds += perframe_offsets
frame_inds = frame_inds.reshape((-1, self.clip_len))
if self.out_of_bound_opt == 'loop':
frame_inds = np.mod(frame_inds, total_frames)
elif self.out_of_bound_opt == 'repeat_last':
safe_inds = frame_inds < total_frames
unsafe_inds = 1 - safe_inds
last_ind = np.max(safe_inds * frame_inds, axis=1)
new_inds = (safe_inds * frame_inds + (unsafe_inds.T * last_ind).T)
frame_inds = new_inds
else:
raise ValueError('Illegal out_of_bound option.')
start_index = results['start_index']
frame_inds = np.concatenate(frame_inds) + start_index
results['frame_inds'] = frame_inds.astype(np.int)
results['clip_len'] = self.clip_len
results['frame_interval'] = self.frame_interval
results['num_clips'] = self.num_clips
return results
def __repr__(self):
repr_str = (f'{self.__class__.__name__}('
f'clip_len={self.clip_len}, '
f'frame_interval={self.frame_interval}, '
f'num_clips={self.num_clips}, '
f'temporal_jitter={self.temporal_jitter}, '
f'twice_sample={self.twice_sample}, '
f'out_of_bound_opt={self.out_of_bound_opt}, '
f'test_mode={self.test_mode})')
return repr_str
@PIPELINES.register_module()
class UntrimmedSampleFrames:
"""Sample frames from the untrimmed video.
Required keys are "filename", "total_frames", added or modified keys are
"frame_inds", "frame_interval" and "num_clips".
Args:
clip_len (int): The length of sampled clips. Default: 1.
frame_interval (int): Temporal interval of adjacent sampled frames.
Default: 16.
start_index (None): This argument is deprecated and moved to dataset
class (``BaseDataset``, ``VideoDatset``, ``RawframeDataset``, etc),
see this: https://github.com/open-mmlab/mmaction2/pull/89.
"""
def __init__(self, clip_len=1, frame_interval=16, start_index=None):
self.clip_len = clip_len
self.frame_interval = frame_interval
if start_index is not None:
warnings.warn('No longer support "start_index" in "SampleFrames", '
'it should be set in dataset class, see this pr: '
'https://github.com/open-mmlab/mmaction2/pull/89')
def __call__(self, results):
"""Perform the SampleFrames loading.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
total_frames = results['total_frames']
start_index = results['start_index']
clip_centers = np.arange(self.frame_interval // 2, total_frames,
self.frame_interval)
num_clips = clip_centers.shape[0]
frame_inds = clip_centers[:, None] + np.arange(
-(self.clip_len // 2), self.clip_len -
(self.clip_len // 2))[None, :]
# clip frame_inds to legal range
frame_inds = np.clip(frame_inds, 0, total_frames - 1)
frame_inds = np.concatenate(frame_inds) + start_index
results['frame_inds'] = frame_inds.astype(np.int)
results['clip_len'] = self.clip_len
results['frame_interval'] = self.frame_interval
results['num_clips'] = num_clips
return results
def __repr__(self):
repr_str = (f'{self.__class__.__name__}('
f'clip_len={self.clip_len}, '
f'frame_interval={self.frame_interval})')
return repr_str
@PIPELINES.register_module()
class DenseSampleFrames(SampleFrames):
"""Select frames from the video by dense sample strategy.
Required keys are "filename", added or modified keys are "total_frames",
"frame_inds", "frame_interval" and "num_clips".
Args:
clip_len (int): Frames of each sampled output clip.
frame_interval (int): Temporal interval of adjacent sampled frames.
Default: 1.
num_clips (int): Number of clips to be sampled. Default: 1.
sample_range (int): Total sample range for dense sample.
Default: 64.
num_sample_positions (int): Number of sample start positions, Which is
only used in test mode. Default: 10. That is to say, by default,
there are at least 10 clips for one input sample in test mode.
temporal_jitter (bool): Whether to apply temporal jittering.
Default: False.
test_mode (bool): Store True when building test or validation dataset.
Default: False.
"""
def __init__(self,
clip_len,
frame_interval=1,
num_clips=1,
sample_range=64,
num_sample_positions=10,
temporal_jitter=False,
out_of_bound_opt='loop',
test_mode=False):
super().__init__(
clip_len,
frame_interval,
num_clips,
temporal_jitter,
out_of_bound_opt=out_of_bound_opt,
test_mode=test_mode)
self.sample_range = sample_range
self.num_sample_positions = num_sample_positions
def _get_train_clips(self, num_frames):
"""Get clip offsets by dense sample strategy in train mode.
It will calculate a sample position and sample interval and set
start index 0 when sample_pos == 1 or randomly choose from
[0, sample_pos - 1]. Then it will shift the start index by each
base offset.
Args:
num_frames (int): Total number of frame in the video.
Returns:
np.ndarray: Sampled frame indices in train mode.
"""
sample_position = max(1, 1 + num_frames - self.sample_range)
interval = self.sample_range // self.num_clips
start_idx = 0 if sample_position == 1 else np.random.randint(
0, sample_position - 1)
base_offsets = np.arange(self.num_clips) * interval
clip_offsets = (base_offsets + start_idx) % num_frames
return clip_offsets
def _get_test_clips(self, num_frames):
"""Get clip offsets by dense sample strategy in test mode.
It will calculate a sample position and sample interval and evenly
sample several start indexes as start positions between
[0, sample_position-1]. Then it will shift each start index by the
base offsets.
Args:
num_frames (int): Total number of frame in the video.
Returns:
np.ndarray: Sampled frame indices in train mode.
"""
sample_position = max(1, 1 + num_frames - self.sample_range)
interval = self.sample_range // self.num_clips
start_list = np.linspace(
0, sample_position - 1, num=self.num_sample_positions, dtype=int)
base_offsets = np.arange(self.num_clips) * interval
clip_offsets = list()
for start_idx in start_list:
clip_offsets.extend((base_offsets + start_idx) % num_frames)
clip_offsets = np.array(clip_offsets)
return clip_offsets
def __repr__(self):
repr_str = (f'{self.__class__.__name__}('
f'clip_len={self.clip_len}, '
f'frame_interval={self.frame_interval}, '
f'num_clips={self.num_clips}, '
f'sample_range={self.sample_range}, '
f'num_sample_positions={self.num_sample_positions}, '
f'temporal_jitter={self.temporal_jitter}, '
f'out_of_bound_opt={self.out_of_bound_opt}, '
f'test_mode={self.test_mode})')
return repr_str
@PIPELINES.register_module()
class SampleAVAFrames(SampleFrames):
def __init__(self, clip_len, frame_interval=2, test_mode=False):
super().__init__(clip_len, frame_interval, test_mode=test_mode)
def _get_clips(self, center_index, skip_offsets, shot_info):
start = center_index - (self.clip_len // 2) * self.frame_interval
end = center_index + ((self.clip_len + 1) // 2) * self.frame_interval
frame_inds = list(range(start, end, self.frame_interval))
frame_inds = frame_inds + skip_offsets
frame_inds = np.clip(frame_inds, shot_info[0], shot_info[1] - 1)
return frame_inds
def __call__(self, results):
fps = results['fps']
timestamp = results['timestamp']
timestamp_start = results['timestamp_start']
shot_info = results['shot_info']
center_index = fps * (timestamp - timestamp_start) + 1
skip_offsets = np.random.randint(
-self.frame_interval // 2, (self.frame_interval + 1) // 2,
size=self.clip_len)
frame_inds = self._get_clips(center_index, skip_offsets, shot_info)
results['frame_inds'] = np.array(frame_inds, dtype=np.int)
results['clip_len'] = self.clip_len
results['frame_interval'] = self.frame_interval
return results
def __repr__(self):
repr_str = (f'{self.__class__.__name__}('
f'clip_len={self.clip_len}, '
f'frame_interval={self.frame_interval}, '
f'test_mode={self.test_mode})')
return repr_str
@PIPELINES.register_module()
class SampleProposalFrames(SampleFrames):
"""Sample frames from proposals in the video.
Required keys are "total_frames" and "out_proposals", added or
modified keys are "frame_inds", "frame_interval", "num_clips",
'clip_len' and 'num_proposals'.
Args:
clip_len (int): Frames of each sampled output clip.
body_segments (int): Number of segments in course period.
aug_segments (list[int]): Number of segments in starting and
ending period.
aug_ratio (int | float | tuple[int | float]): The ratio
of the length of augmentation to that of the proposal.
frame_interval (int): Temporal interval of adjacent sampled frames.
Default: 1.
test_interval (int): Temporal interval of adjacent sampled frames
in test mode. Default: 6.
temporal_jitter (bool): Whether to apply temporal jittering.
Default: False.
mode (str): Choose 'train', 'val' or 'test' mode.
Default: 'train'.
"""
def __init__(self,
clip_len,
body_segments,
aug_segments,
aug_ratio,
frame_interval=1,
test_interval=6,
temporal_jitter=False,
mode='train'):
super().__init__(
clip_len,
frame_interval=frame_interval,
temporal_jitter=temporal_jitter)
self.body_segments = body_segments
self.aug_segments = aug_segments
self.aug_ratio = _pair(aug_ratio)
if not mmcv.is_tuple_of(self.aug_ratio, (int, float)):
raise TypeError(f'aug_ratio should be int, float'
f'or tuple of int and float, '
f'but got {type(aug_ratio)}')
assert len(self.aug_ratio) == 2
assert mode in ['train', 'val', 'test']
self.mode = mode
self.test_interval = test_interval
@staticmethod
def _get_train_indices(valid_length, num_segments):
"""Get indices of different stages of proposals in train mode.
It will calculate the average interval for each segment,
and randomly shift them within offsets between [0, average_duration].
If the total number of frames is smaller than num segments, it will
return all zero indices.
Args:
valid_length (int): The length of the starting point's
valid interval.
num_segments (int): Total number of segments.
Returns:
np.ndarray: Sampled frame indices in train mode.
"""
avg_interval = (valid_length + 1) // num_segments
if avg_interval > 0:
base_offsets = np.arange(num_segments) * avg_interval
offsets = base_offsets + np.random.randint(
avg_interval, size=num_segments)
else:
offsets = np.zeros((num_segments, ), dtype=np.int)
return offsets
@staticmethod
def _get_val_indices(valid_length, num_segments):
"""Get indices of different stages of proposals in validation mode.
It will calculate the average interval for each segment.
If the total number of valid length is smaller than num segments,
it will return all zero indices.
Args:
valid_length (int): The length of the starting point's
valid interval.
num_segments (int): Total number of segments.
Returns:
np.ndarray: Sampled frame indices in validation mode.
"""
if valid_length >= num_segments:
avg_interval = valid_length / float(num_segments)
base_offsets = np.arange(num_segments) * avg_interval
offsets = (base_offsets + avg_interval / 2.0).astype(np.int)
else:
offsets = np.zeros((num_segments, ), dtype=np.int)
return offsets
def _get_proposal_clips(self, proposal, num_frames):
"""Get clip offsets in train mode.
It will calculate sampled frame indices in the proposal's three
stages: starting, course and ending stage.
Args:
proposal (obj): The proposal object.
num_frames (int): Total number of frame in the video.
Returns:
np.ndarray: Sampled frame indices in train mode.
"""
# proposal interval: [start_frame, end_frame)
start_frame = proposal.start_frame
end_frame = proposal.end_frame
ori_clip_len = self.clip_len * self.frame_interval
duration = end_frame - start_frame
assert duration != 0
valid_length = duration - ori_clip_len
valid_starting = max(0,
start_frame - int(duration * self.aug_ratio[0]))
valid_ending = min(num_frames - ori_clip_len + 1,
end_frame - 1 + int(duration * self.aug_ratio[1]))
valid_starting_length = start_frame - valid_starting - ori_clip_len
valid_ending_length = (valid_ending - end_frame + 1) - ori_clip_len
if self.mode == 'train':
starting_offsets = self._get_train_indices(valid_starting_length,
self.aug_segments[0])
course_offsets = self._get_train_indices(valid_length,
self.body_segments)
ending_offsets = self._get_train_indices(valid_ending_length,
self.aug_segments[1])
elif self.mode == 'val':
starting_offsets = self._get_val_indices(valid_starting_length,
self.aug_segments[0])
course_offsets = self._get_val_indices(valid_length,
self.body_segments)
ending_offsets = self._get_val_indices(valid_ending_length,
self.aug_segments[1])
starting_offsets += valid_starting
course_offsets += start_frame
ending_offsets += end_frame
offsets = np.concatenate(
(starting_offsets, course_offsets, ending_offsets))
return offsets
def _get_train_clips(self, num_frames, proposals):
"""Get clip offsets in train mode.
It will calculate sampled frame indices of each proposal, and then
assemble them.
Args:
num_frames (int): Total number of frame in the video.
proposals (list): Proposals fetched.
Returns:
np.ndarray: Sampled frame indices in train mode.
"""
clip_offsets = []
for proposal in proposals:
proposal_clip_offsets = self._get_proposal_clips(
proposal[0][1], num_frames)
clip_offsets = np.concatenate(
[clip_offsets, proposal_clip_offsets])
return clip_offsets
def _get_test_clips(self, num_frames):
"""Get clip offsets in test mode.
It will calculate sampled frame indices based on test interval.
Args:
num_frames (int): Total number of frame in the video.
Returns:
np.ndarray: Sampled frame indices in test mode.
"""
ori_clip_len = self.clip_len * self.frame_interval
return np.arange(
0, num_frames - ori_clip_len, self.test_interval, dtype=np.int)
def _sample_clips(self, num_frames, proposals):
"""Choose clip offsets for the video in a given mode.
Args:
num_frames (int): Total number of frame in the video.
proposals (list | None): Proposals fetched.
It is set to None in test mode.
Returns:
np.ndarray: Sampled frame indices.
"""
if self.mode == 'test':
clip_offsets = self._get_test_clips(num_frames)
else:
assert proposals is not None
clip_offsets = self._get_train_clips(num_frames, proposals)
return clip_offsets
def __call__(self, results):
"""Perform the SampleFrames loading.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
total_frames = results['total_frames']
out_proposals = results.get('out_proposals', None)
clip_offsets = self._sample_clips(total_frames, out_proposals)
frame_inds = clip_offsets[:, None] + np.arange(
self.clip_len)[None, :] * self.frame_interval
frame_inds = np.concatenate(frame_inds)
if self.temporal_jitter:
perframe_offsets = np.random.randint(
self.frame_interval, size=len(frame_inds))
frame_inds += perframe_offsets
start_index = results['start_index']
frame_inds = np.mod(frame_inds, total_frames) + start_index
results['frame_inds'] = np.array(frame_inds).astype(np.int)
results['clip_len'] = self.clip_len
results['frame_interval'] = self.frame_interval
results['num_clips'] = (
self.body_segments + self.aug_segments[0] + self.aug_segments[1])
if self.mode in ['train', 'val']:
results['num_proposals'] = len(results['out_proposals'])
return results
def __repr__(self):
repr_str = (f'{self.__class__.__name__}('
f'clip_len={self.clip_len}, '
f'body_segments={self.body_segments}, '
f'aug_segments={self.aug_segments}, '
f'aug_ratio={self.aug_ratio}, '
f'frame_interval={self.frame_interval}, '
f'test_interval={self.test_interval}, '
f'temporal_jitter={self.temporal_jitter}, '
f'mode={self.mode})')
return repr_str
@PIPELINES.register_module()
class PyAVInit:
"""Using pyav to initialize the video.
PyAV: https://github.com/mikeboers/PyAV
Required keys are "filename",
added or modified keys are "video_reader", and "total_frames".
Args:
io_backend (str): io backend where frames are store.
Default: 'disk'.
kwargs (dict): Args for file client.
"""
def __init__(self, io_backend='disk', **kwargs):
self.io_backend = io_backend
self.kwargs = kwargs
self.file_client = None
def __call__(self, results):
"""Perform the PyAV initialization.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
try:
import av
except ImportError:
raise ImportError('Please run "conda install av -c conda-forge" '
'or "pip install av" to install PyAV first.')
if self.file_client is None:
self.file_client = FileClient(self.io_backend, **self.kwargs)
file_obj = io.BytesIO(self.file_client.get(results['filename']))
container = av.open(file_obj)
results['video_reader'] = container
results['total_frames'] = container.streams.video[0].frames
return results
def __repr__(self):
repr_str = f'{self.__class__.__name__}(io_backend=disk)'
return repr_str
@PIPELINES.register_module()
class PyAVDecode:
"""Using pyav to decode the video.
PyAV: https://github.com/mikeboers/PyAV
Required keys are "video_reader" and "frame_inds",
added or modified keys are "imgs", "img_shape" and "original_shape".
Args:
multi_thread (bool): If set to True, it will apply multi
thread processing. Default: False.
"""
def __init__(self, multi_thread=False):
self.multi_thread = multi_thread
def __call__(self, results):
"""Perform the PyAV decoding.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
container = results['video_reader']
imgs = list()
if self.multi_thread:
container.streams.video[0].thread_type = 'AUTO'
if results['frame_inds'].ndim != 1:
results['frame_inds'] = np.squeeze(results['frame_inds'])
# set max indice to make early stop
max_inds = max(results['frame_inds'])
i = 0
for frame in container.decode(video=0):
if i > max_inds + 1:
break
imgs.append(frame.to_rgb().to_ndarray())
i += 1
results['video_reader'] = None
del container
# the available frame in pyav may be less than its length,
# which may raise error
results['imgs'] = [imgs[i % len(imgs)] for i in results['frame_inds']]
results['original_shape'] = imgs[0].shape[:2]
results['img_shape'] = imgs[0].shape[:2]
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(multi_thread={self.multi_thread})'
return repr_str
@PIPELINES.register_module()
class PyAVDecodeMotionVector(PyAVDecode):
"""Using pyav to decode the motion vectors from video.
Reference: https://github.com/PyAV-Org/PyAV/
blob/main/tests/test_decode.py
Required keys are "video_reader" and "frame_inds",
added or modified keys are "motion_vectors", "frame_inds".
Args:
multi_thread (bool): If set to True, it will apply multi
thread processing. Default: False.
"""
@staticmethod
def _parse_vectors(mv, vectors, height, width):
"""Parse the returned vectors."""
(w, h, src_x, src_y, dst_x,
dst_y) = (vectors['w'], vectors['h'], vectors['src_x'],
vectors['src_y'], vectors['dst_x'], vectors['dst_y'])
val_x = dst_x - src_x
val_y = dst_y - src_y
start_x = dst_x - w // 2
start_y = dst_y - h // 2
end_x = start_x + w
end_y = start_y + h
for sx, ex, sy, ey, vx, vy in zip(start_x, end_x, start_y, end_y,
val_x, val_y):
if (sx >= 0 and ex < width and sy >= 0 and ey < height):
mv[sy:ey, sx:ex] = (vx, vy)
return mv
def __call__(self, results):
"""Perform the PyAV motion vector decoding.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
container = results['video_reader']
imgs = list()
if self.multi_thread:
container.streams.video[0].thread_type = 'AUTO'
if results['frame_inds'].ndim != 1:
results['frame_inds'] = np.squeeze(results['frame_inds'])
# set max index to make early stop
max_idx = max(results['frame_inds'])
i = 0
stream = container.streams.video[0]
codec_context = stream.codec_context
codec_context.options = {'flags2': '+export_mvs'}
for packet in container.demux(stream):
for frame in packet.decode():
if i > max_idx + 1:
break
i += 1
height = frame.height
width = frame.width
mv = np.zeros((height, width, 2), dtype=np.int8)
vectors = frame.side_data.get('MOTION_VECTORS')
if frame.key_frame:
# Key frame don't have motion vectors
assert vectors is None
if vectors is not None and len(vectors) > 0:
mv = self._parse_vectors(mv, vectors.to_ndarray(), height,
width)
imgs.append(mv)
results['video_reader'] = None
del container
# the available frame in pyav may be less than its length,
# which may raise error
results['motion_vectors'] = np.array(
[imgs[i % len(imgs)] for i in results['frame_inds']])
return results
@PIPELINES.register_module()
class DecordInit:
"""Using decord to initialize the video_reader.
Decord: https://github.com/dmlc/decord
Required keys are "filename",
added or modified keys are "video_reader" and "total_frames".
"""
def __init__(self, io_backend='disk', num_threads=1, **kwargs):
self.io_backend = io_backend
self.num_threads = num_threads
self.kwargs = kwargs
self.file_client = None
def __call__(self, results):
"""Perform the Decord initialization.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
try:
import decord
except ImportError:
raise ImportError(
'Please run "pip install decord" to install Decord first.')
if self.file_client is None:
self.file_client = FileClient(self.io_backend, **self.kwargs)
file_obj = io.BytesIO(self.file_client.get(results['filename']))
container = decord.VideoReader(file_obj, num_threads=self.num_threads)
results['video_reader'] = container
results['total_frames'] = len(container)
return results
def __repr__(self):
repr_str = (f'{self.__class__.__name__}('
f'io_backend={self.io_backend}, '
f'num_threads={self.num_threads})')
return repr_str
@PIPELINES.register_module()
class DecordDecode:
"""Using decord to decode the video.
Decord: https://github.com/dmlc/decord
Required keys are "video_reader", "filename" and "frame_inds",
added or modified keys are "imgs" and "original_shape".
"""
def __call__(self, results):
"""Perform the Decord decoding.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
container = results['video_reader']
if results['frame_inds'].ndim != 1:
results['frame_inds'] = np.squeeze(results['frame_inds'])
frame_inds = results['frame_inds']
# Generate frame index mapping in order
frame_dict = {
idx: container[idx].asnumpy()
for idx in np.unique(frame_inds)
}
imgs = [frame_dict[idx] for idx in frame_inds]
results['video_reader'] = None
del container
results['imgs'] = imgs
results['original_shape'] = imgs[0].shape[:2]
results['img_shape'] = imgs[0].shape[:2]
return results
@PIPELINES.register_module()
class OpenCVInit:
"""Using OpenCV to initialize the video_reader.
Required keys are "filename", added or modified keys are "new_path",
"video_reader" and "total_frames".
"""
def __init__(self, io_backend='disk', **kwargs):
self.io_backend = io_backend
self.kwargs = kwargs
self.file_client = None
self.tmp_folder = None
if self.io_backend != 'disk':
random_string = get_random_string()
thread_id = get_thread_id()
self.tmp_folder = osp.join(get_shm_dir(),
f'{random_string}_{thread_id}')
os.mkdir(self.tmp_folder)
def __call__(self, results):
"""Perform the OpenCV initialization.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
if self.io_backend == 'disk':
new_path = results['filename']
else:
if self.file_client is None:
self.file_client = FileClient(self.io_backend, **self.kwargs)
thread_id = get_thread_id()
# save the file of same thread at the same place
new_path = osp.join(self.tmp_folder, f'tmp_{thread_id}.mp4')
with open(new_path, 'wb') as f:
f.write(self.file_client.get(results['filename']))
container = mmcv.VideoReader(new_path)
results['new_path'] = new_path
results['video_reader'] = container
results['total_frames'] = len(container)
return results
def __del__(self):
if self.tmp_folder and osp.exists(self.tmp_folder):
shutil.rmtree(self.tmp_folder)
def __repr__(self):
repr_str = (f'{self.__class__.__name__}('
f'io_backend={self.io_backend})')
return repr_str
@PIPELINES.register_module()
class OpenCVDecode:
"""Using OpenCV to decode the video.
Required keys are "video_reader", "filename" and "frame_inds", added or
modified keys are "imgs", "img_shape" and "original_shape".
"""
def __call__(self, results):
"""Perform the OpenCV decoding.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
container = results['video_reader']
imgs = list()
if results['frame_inds'].ndim != 1:
results['frame_inds'] = np.squeeze(results['frame_inds'])
for frame_ind in results['frame_inds']:
cur_frame = container[frame_ind]
# last frame may be None in OpenCV
while isinstance(cur_frame, type(None)):
frame_ind -= 1
cur_frame = container[frame_ind]
imgs.append(cur_frame)
results['video_reader'] = None
del container
imgs = np.array(imgs)
# The default channel order of OpenCV is BGR, thus we change it to RGB
imgs = imgs[:, :, :, ::-1]
results['imgs'] = list(imgs)
results['original_shape'] = imgs[0].shape[:2]
results['img_shape'] = imgs[0].shape[:2]
return results
@PIPELINES.register_module()
class RawFrameDecode:
"""Load and decode frames with given indices.
Required keys are "frame_dir", "filename_tmpl" and "frame_inds",
added or modified keys are "imgs", "img_shape" and "original_shape".
Args:
io_backend (str): IO backend where frames are stored. Default: 'disk'.
decoding_backend (str): Backend used for image decoding.
Default: 'cv2'.
kwargs (dict, optional): Arguments for FileClient.
"""
def __init__(self, io_backend='disk', decoding_backend='cv2', **kwargs):
self.io_backend = io_backend
self.decoding_backend = decoding_backend
self.kwargs = kwargs
self.file_client = None
def __call__(self, results):
"""Perform the ``RawFrameDecode`` to pick frames given indices.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
mmcv.use_backend(self.decoding_backend)
directory = results['frame_dir']
filename_tmpl = results['filename_tmpl']
modality = results['modality']
if self.file_client is None:
self.file_client = FileClient(self.io_backend, **self.kwargs)
imgs = list()
if results['frame_inds'].ndim != 1:
results['frame_inds'] = np.squeeze(results['frame_inds'])
offset = results.get('offset', 0)
for frame_idx in results['frame_inds']:
frame_idx += offset
if modality == 'RGB':
filepath = osp.join(directory, filename_tmpl.format(frame_idx))
img_bytes = self.file_client.get(filepath)
# Get frame with channel order RGB directly.
cur_frame = mmcv.imfrombytes(img_bytes, channel_order='rgb')
imgs.append(cur_frame)
elif modality == 'Flow':
x_filepath = osp.join(directory,
filename_tmpl.format('x', frame_idx))
y_filepath = osp.join(directory,
filename_tmpl.format('y', frame_idx))
x_img_bytes = self.file_client.get(x_filepath)
x_frame = mmcv.imfrombytes(x_img_bytes, flag='grayscale')
y_img_bytes = self.file_client.get(y_filepath)
y_frame = mmcv.imfrombytes(y_img_bytes, flag='grayscale')
imgs.extend([x_frame, y_frame])
else:
raise NotImplementedError
results['imgs'] = imgs
results['original_shape'] = imgs[0].shape[:2]
results['img_shape'] = imgs[0].shape[:2]
return results
def __repr__(self):
repr_str = (f'{self.__class__.__name__}('
f'io_backend={self.io_backend}, '
f'decoding_backend={self.decoding_backend})')
return repr_str
@PIPELINES.register_module()
class ImageDecode:
"""Load and decode images.
Required key is "filename", added or modified keys are "imgs", "img_shape"
and "original_shape".
Args:
io_backend (str): IO backend where frames are stored. Default: 'disk'.
decoding_backend (str): Backend used for image decoding.
Default: 'cv2'.
kwargs (dict, optional): Arguments for FileClient.
"""
def __init__(self, io_backend='disk', decoding_backend='cv2', **kwargs):
self.io_backend = io_backend
self.decoding_backend = decoding_backend
self.kwargs = kwargs
self.file_client = None
def __call__(self, results):
"""Perform the ``ImageDecode`` to load image given the file path.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
mmcv.use_backend(self.decoding_backend)
filename = results['filename']
if self.file_client is None:
self.file_client = FileClient(self.io_backend, **self.kwargs)
imgs = list()
img_bytes = self.file_client.get(filename)
img = mmcv.imfrombytes(img_bytes, channel_order='rgb')
imgs.append(img)
results['imgs'] = imgs
results['original_shape'] = imgs[0].shape[:2]
results['img_shape'] = imgs[0].shape[:2]
return results
@PIPELINES.register_module()
class AudioDecodeInit:
"""Using librosa to initialize the audio reader.
Required keys are "audio_path", added or modified keys are "length",
"sample_rate", "audios".
Args:
io_backend (str): io backend where frames are store.
Default: 'disk'.
sample_rate (int): Audio sampling times per second. Default: 16000.
"""
def __init__(self,
io_backend='disk',
sample_rate=16000,
pad_method='zero',
**kwargs):
self.io_backend = io_backend
self.sample_rate = sample_rate
if pad_method in ['random', 'zero']:
self.pad_method = pad_method
else:
raise NotImplementedError
self.kwargs = kwargs
self.file_client = None
@staticmethod
def _zero_pad(shape):
return np.zeros(shape, dtype=np.float32)
@staticmethod
def _random_pad(shape):
# librosa load raw audio file into a distribution of -1~+1
return np.random.rand(shape).astype(np.float32) * 2 - 1
def __call__(self, results):
"""Perform the librosa initialization.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
try:
import librosa
except ImportError:
raise ImportError('Please install librosa first.')
if self.file_client is None:
self.file_client = FileClient(self.io_backend, **self.kwargs)
if osp.exists(results['audio_path']):
file_obj = io.BytesIO(self.file_client.get(results['audio_path']))
y, sr = librosa.load(file_obj, sr=self.sample_rate)
else:
# Generate a random dummy 10s input
pad_func = getattr(self, f'_{self.pad_method}_pad')
y = pad_func(int(round(10.0 * self.sample_rate)))
sr = self.sample_rate
results['length'] = y.shape[0]
results['sample_rate'] = sr
results['audios'] = y
return results
def __repr__(self):
repr_str = (f'{self.__class__.__name__}('
f'io_backend={self.io_backend}, '
f'sample_rate={self.sample_rate}, '
f'pad_method={self.pad_method})')
return repr_str
@PIPELINES.register_module()
class LoadAudioFeature:
"""Load offline extracted audio features.
Required keys are "audio_path", added or modified keys are "length",
audios".
"""
def __init__(self, pad_method='zero'):
if pad_method not in ['zero', 'random']:
raise NotImplementedError
self.pad_method = pad_method
@staticmethod
def _zero_pad(shape):
return np.zeros(shape, dtype=np.float32)
@staticmethod
def _random_pad(shape):
# spectrogram is normalized into a distribution of 0~1
return np.random.rand(shape).astype(np.float32)
def __call__(self, results):
"""Perform the numpy loading.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
if osp.exists(results['audio_path']):
feature_map = np.load(results['audio_path'])
else:
# Generate a random dummy 10s input
# Some videos do not have audio stream
pad_func = getattr(self, f'_{self.pad_method}_pad')
feature_map = pad_func((640, 80))
results['length'] = feature_map.shape[0]
results['audios'] = feature_map
return results
def __repr__(self):
repr_str = (f'{self.__class__.__name__}('
f'pad_method={self.pad_method})')
return repr_str
@PIPELINES.register_module()
class AudioDecode:
"""Sample the audio w.r.t. the frames selected.
Args:
fixed_length (int): As the audio clip selected by frames sampled may
not be exactly the same, `fixed_length` will truncate or pad them
into the same size. Default: 32000.
Required keys are "frame_inds", "num_clips", "total_frames", "length",
added or modified keys are "audios", "audios_shape".
"""
def __init__(self, fixed_length=32000):
self.fixed_length = fixed_length
def __call__(self, results):
"""Perform the ``AudioDecode`` to pick audio clips."""
audio = results['audios']
frame_inds = results['frame_inds']
num_clips = results['num_clips']
resampled_clips = list()
frame_inds = frame_inds.reshape(num_clips, -1)
for clip_idx in range(num_clips):
clip_frame_inds = frame_inds[clip_idx]
start_idx = max(
0,
int(
round((clip_frame_inds[0] + 1) / results['total_frames'] *
results['length'])))
end_idx = min(
results['length'],
int(
round((clip_frame_inds[-1] + 1) / results['total_frames'] *
results['length'])))
cropped_audio = audio[start_idx:end_idx]
if cropped_audio.shape[0] >= self.fixed_length:
truncated_audio = cropped_audio[:self.fixed_length]
else:
truncated_audio = np.pad(
cropped_audio,
((0, self.fixed_length - cropped_audio.shape[0])),
mode='constant')
resampled_clips.append(truncated_audio)
results['audios'] = np.array(resampled_clips)
results['audios_shape'] = results['audios'].shape
return results
@PIPELINES.register_module()
class BuildPseudoClip:
"""Build pseudo clips with one single image by repeating it n times.
Required key is "imgs", added or modified key is "imgs", "num_clips",
"clip_len".
Args:
clip_len (int): Frames of the generated pseudo clips.
"""
def __init__(self, clip_len):
self.clip_len = clip_len
def __call__(self, results):
# the input should be one single image
assert len(results['imgs']) == 1
im = results['imgs'][0]
for _ in range(1, self.clip_len):
results['imgs'].append(np.copy(im))
results['clip_len'] = self.clip_len
results['num_clips'] = 1
return results
def __repr__(self):
repr_str = (f'{self.__class__.__name__}('
f'fix_length={self.fixed_length})')
return repr_str
@PIPELINES.register_module()
class FrameSelector(RawFrameDecode):
"""Deprecated class for ``RawFrameDecode``."""
def __init__(self, *args, **kwargs):
warnings.warn('"FrameSelector" is deprecated, please switch to'
'"RawFrameDecode"')
super().__init__(*args, **kwargs)
@PIPELINES.register_module()
class AudioFeatureSelector:
"""Sample the audio feature w.r.t. the frames selected.
Required keys are "audios", "frame_inds", "num_clips", "length",
"total_frames", added or modified keys are "audios", "audios_shape".
Args:
fixed_length (int): As the features selected by frames sampled may
not be extactly the same, `fixed_length` will truncate or pad them
into the same size. Default: 128.
"""
def __init__(self, fixed_length=128):
self.fixed_length = fixed_length
def __call__(self, results):
"""Perform the ``AudioFeatureSelector`` to pick audio feature clips.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
audio = results['audios']
frame_inds = results['frame_inds']
num_clips = results['num_clips']
resampled_clips = list()
frame_inds = frame_inds.reshape(num_clips, -1)
for clip_idx in range(num_clips):
clip_frame_inds = frame_inds[clip_idx]
start_idx = max(
0,
int(
round((clip_frame_inds[0] + 1) / results['total_frames'] *
results['length'])))
end_idx = min(
results['length'],
int(
round((clip_frame_inds[-1] + 1) / results['total_frames'] *
results['length'])))
cropped_audio = audio[start_idx:end_idx, :]
if cropped_audio.shape[0] >= self.fixed_length:
truncated_audio = cropped_audio[:self.fixed_length, :]
else:
truncated_audio = np.pad(
cropped_audio,
((0, self.fixed_length - cropped_audio.shape[0]), (0, 0)),
mode='constant')
resampled_clips.append(truncated_audio)
results['audios'] = np.array(resampled_clips)
results['audios_shape'] = results['audios'].shape
return results
def __repr__(self):
repr_str = (f'{self.__class__.__name__}('
f'fix_length={self.fixed_length})')
return repr_str
@PIPELINES.register_module()
class LoadLocalizationFeature:
"""Load Video features for localizer with given video_name list.
Required keys are "video_name" and "data_prefix", added or modified keys
are "raw_feature".
Args:
raw_feature_ext (str): Raw feature file extension. Default: '.csv'.
"""
def __init__(self, raw_feature_ext='.csv'):
valid_raw_feature_ext = ('.csv', )
if raw_feature_ext not in valid_raw_feature_ext:
raise NotImplementedError
self.raw_feature_ext = raw_feature_ext
def __call__(self, results):
"""Perform the LoadLocalizationFeature loading.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
video_name = results['video_name']
data_prefix = results['data_prefix']
data_path = osp.join(data_prefix, video_name + self.raw_feature_ext)
raw_feature = np.loadtxt(
data_path, dtype=np.float32, delimiter=',', skiprows=1)
results['raw_feature'] = np.transpose(raw_feature, (1, 0))
return results
def __repr__(self):
repr_str = (f'{self.__class__.__name__}('
f'raw_feature_ext={self.raw_feature_ext})')
return repr_str
@PIPELINES.register_module()
class GenerateLocalizationLabels:
"""Load video label for localizer with given video_name list.
Required keys are "duration_frame", "duration_second", "feature_frame",
"annotations", added or modified keys are "gt_bbox".
"""
def __call__(self, results):
"""Perform the GenerateLocalizationLabels loading.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
video_frame = results['duration_frame']
video_second = results['duration_second']
feature_frame = results['feature_frame']
corrected_second = float(feature_frame) / video_frame * video_second
annotations = results['annotations']
gt_bbox = []
for annotation in annotations:
current_start = max(
min(1, annotation['segment'][0] / corrected_second), 0)
current_end = max(
min(1, annotation['segment'][1] / corrected_second), 0)
gt_bbox.append([current_start, current_end])
gt_bbox = np.array(gt_bbox)
results['gt_bbox'] = gt_bbox
return results
@PIPELINES.register_module()
class LoadProposals:
"""Loading proposals with given proposal results.
Required keys are "video_name", added or modified keys are 'bsp_feature',
'tmin', 'tmax', 'tmin_score', 'tmax_score' and 'reference_temporal_iou'.
Args:
top_k (int): The top k proposals to be loaded.
pgm_proposals_dir (str): Directory to load proposals.
pgm_features_dir (str): Directory to load proposal features.
proposal_ext (str): Proposal file extension. Default: '.csv'.
feature_ext (str): Feature file extension. Default: '.npy'.
"""
def __init__(self,
top_k,
pgm_proposals_dir,
pgm_features_dir,
proposal_ext='.csv',
feature_ext='.npy'):
self.top_k = top_k
self.pgm_proposals_dir = pgm_proposals_dir
self.pgm_features_dir = pgm_features_dir
valid_proposal_ext = ('.csv', )
if proposal_ext not in valid_proposal_ext:
raise NotImplementedError
self.proposal_ext = proposal_ext
valid_feature_ext = ('.npy', )
if feature_ext not in valid_feature_ext:
raise NotImplementedError
self.feature_ext = feature_ext
def __call__(self, results):
"""Perform the LoadProposals loading.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
video_name = results['video_name']
proposal_path = osp.join(self.pgm_proposals_dir,
video_name + self.proposal_ext)
if self.proposal_ext == '.csv':
pgm_proposals = np.loadtxt(
proposal_path, dtype=np.float32, delimiter=',', skiprows=1)
pgm_proposals = np.array(pgm_proposals[:self.top_k])
tmin = pgm_proposals[:, 0]
tmax = pgm_proposals[:, 1]
tmin_score = pgm_proposals[:, 2]
tmax_score = pgm_proposals[:, 3]
reference_temporal_iou = pgm_proposals[:, 5]
feature_path = osp.join(self.pgm_features_dir,
video_name + self.feature_ext)
if self.feature_ext == '.npy':
bsp_feature = np.load(feature_path).astype(np.float32)
bsp_feature = bsp_feature[:self.top_k, :]
results['bsp_feature'] = bsp_feature
results['tmin'] = tmin
results['tmax'] = tmax
results['tmin_score'] = tmin_score
results['tmax_score'] = tmax_score
results['reference_temporal_iou'] = reference_temporal_iou
return results
def __repr__(self):
repr_str = (f'{self.__class__.__name__}('
f'top_k={self.top_k}, '
f'pgm_proposals_dir={self.pgm_proposals_dir}, '
f'pgm_features_dir={self.pgm_features_dir}, '
f'proposal_ext={self.proposal_ext}, '
f'feature_ext={self.feature_ext})')
return repr_str
| InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/datasets/pipelines/loading.py |
from collections.abc import Sequence
from mmcv.utils import build_from_cfg
from ..registry import PIPELINES
@PIPELINES.register_module()
class Compose:
"""Compose a data pipeline with a sequence of transforms.
Args:
transforms (list[dict | callable]):
Either config dicts of transforms or transform objects.
"""
def __init__(self, transforms):
assert isinstance(transforms, Sequence)
self.transforms = []
for transform in transforms:
if isinstance(transform, dict):
transform = build_from_cfg(transform, PIPELINES)
self.transforms.append(transform)
elif callable(transform):
self.transforms.append(transform)
else:
raise TypeError(f'transform must be callable or a dict, '
f'but got {type(transform)}')
def __call__(self, data):
"""Call function to apply transforms sequentially.
Args:
data (dict): A result dict contains the data to transform.
Returns:
dict: Transformed data.
"""
for t in self.transforms:
data = t(data)
if data is None:
return None
return data
def __repr__(self):
format_string = self.__class__.__name__ + '('
for t in self.transforms:
format_string += '\n'
format_string += ' {0}'.format(t)
format_string += '\n)'
return format_string
| InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/datasets/pipelines/compose.py |
from .augmentations import (AudioAmplify, CenterCrop, ColorJitter,
EntityBoxClip, EntityBoxCrop, EntityBoxFlip,
EntityBoxPad, EntityBoxRescale, Flip, Fuse,
MelSpectrogram, MultiGroupCrop, MultiScaleCrop,
Normalize, RandomCrop, RandomRescale,
RandomResizedCrop, RandomScale, Resize, TenCrop,
ThreeCrop)
from .compose import Compose
from .formating import (Collect, FormatAudioShape, FormatShape, ImageToTensor,
ToDataContainer, ToTensor, Transpose)
from .loading import (AudioDecode, AudioDecodeInit, AudioFeatureSelector,
BuildPseudoClip, DecordDecode, DecordInit,
DenseSampleFrames, FrameSelector,
GenerateLocalizationLabels, ImageDecode,
LoadAudioFeature, LoadHVULabel, LoadLocalizationFeature,
LoadProposals, OpenCVDecode, OpenCVInit, PyAVDecode,
PyAVDecodeMotionVector, PyAVInit, RawFrameDecode,
SampleAVAFrames, SampleFrames, SampleProposalFrames,
UntrimmedSampleFrames)
__all__ = [
'SampleFrames', 'PyAVDecode', 'DecordDecode', 'DenseSampleFrames',
'OpenCVDecode', 'FrameSelector', 'MultiGroupCrop', 'MultiScaleCrop',
'RandomResizedCrop', 'RandomCrop', 'Resize', 'Flip', 'Fuse', 'Normalize',
'ThreeCrop', 'CenterCrop', 'TenCrop', 'ImageToTensor', 'Transpose',
'Collect', 'FormatShape', 'Compose', 'ToTensor', 'ToDataContainer',
'GenerateLocalizationLabels', 'LoadLocalizationFeature', 'LoadProposals',
'DecordInit', 'OpenCVInit', 'PyAVInit', 'SampleProposalFrames',
'UntrimmedSampleFrames', 'RawFrameDecode', 'DecordInit', 'OpenCVInit',
'PyAVInit', 'SampleProposalFrames', 'ColorJitter', 'LoadHVULabel',
'SampleAVAFrames', 'AudioAmplify', 'MelSpectrogram', 'AudioDecode',
'FormatAudioShape', 'LoadAudioFeature', 'AudioFeatureSelector',
'AudioDecodeInit', 'EntityBoxPad', 'EntityBoxFlip', 'EntityBoxCrop',
'EntityBoxRescale', 'EntityBoxClip', 'RandomScale', 'ImageDecode',
'BuildPseudoClip', 'RandomRescale', 'PyAVDecodeMotionVector'
]
| InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/datasets/pipelines/__init__.py |
import random
from collections.abc import Sequence
import mmcv
import numpy as np
from torch.nn.modules.utils import _pair
from ..registry import PIPELINES
def _init_lazy_if_proper(results, lazy):
"""Initialize lazy operation properly.
Make sure that a lazy operation is properly initialized,
and avoid a non-lazy operation accidentally getting mixed in.
Required keys in results are "imgs" if "img_shape" not in results,
otherwise, Required keys in results are "img_shape", add or modified keys
are "img_shape", "lazy".
Add or modified keys in "lazy" are "original_shape", "crop_bbox", "flip",
"flip_direction", "interpolation".
Args:
results (dict): A dict stores data pipeline result.
lazy (bool): Determine whether to apply lazy operation. Default: False.
"""
if 'img_shape' not in results:
results['img_shape'] = results['imgs'][0].shape[:2]
if lazy:
if 'lazy' not in results:
img_h, img_w = results['img_shape']
lazyop = dict()
lazyop['original_shape'] = results['img_shape']
lazyop['crop_bbox'] = np.array([0, 0, img_w, img_h],
dtype=np.float32)
lazyop['flip'] = False
lazyop['flip_direction'] = None
lazyop['interpolation'] = None
results['lazy'] = lazyop
else:
assert 'lazy' not in results, 'Use Fuse after lazy operations'
@PIPELINES.register_module()
class Fuse:
"""Fuse lazy operations.
Fusion order:
crop -> resize -> flip
Required keys are "imgs", "img_shape" and "lazy", added or modified keys
are "imgs", "lazy".
Required keys in "lazy" are "crop_bbox", "interpolation", "flip_direction".
"""
def __call__(self, results):
if 'lazy' not in results:
raise ValueError('No lazy operation detected')
lazyop = results['lazy']
imgs = results['imgs']
# crop
left, top, right, bottom = lazyop['crop_bbox'].round().astype(int)
imgs = [img[top:bottom, left:right] for img in imgs]
# resize
img_h, img_w = results['img_shape']
if lazyop['interpolation'] is None:
interpolation = 'bilinear'
else:
interpolation = lazyop['interpolation']
imgs = [
mmcv.imresize(img, (img_w, img_h), interpolation=interpolation)
for img in imgs
]
# flip
if lazyop['flip']:
for img in imgs:
mmcv.imflip_(img, lazyop['flip_direction'])
results['imgs'] = imgs
del results['lazy']
return results
@PIPELINES.register_module()
class RandomScale:
"""Resize images by a random scale.
Required keys are "imgs", "img_shape", "modality", added or modified
keys are "imgs", "img_shape", "keep_ratio", "scale_factor", "lazy",
"scale", "resize_size". Required keys in "lazy" is None, added or
modified key is "interpolation".
Args:
scales (tuple[int]): Tuple of scales to be chosen for resize.
mode (str): Selection mode for choosing the scale. Options are "range"
and "value". If set to "range", The short edge will be randomly
chosen from the range of minimum and maximum on the shorter one
in all tuples. Otherwise, the longer edge will be randomly chosen
from the range of minimum and maximum on the longer one in all
tuples. Default: 'range'.
"""
def __init__(self, scales, mode='range', **kwargs):
self.mode = mode
if self.mode not in ['range', 'value']:
raise ValueError(f"mode should be 'range' or 'value', "
f'but got {self.mode}')
self.scales = scales
self.kwargs = kwargs
def select_scale(self, scales):
num_scales = len(scales)
if num_scales == 1:
# specify a fixed scale
scale = scales[0]
elif num_scales == 2:
if self.mode == 'range':
scale_long = [max(s) for s in scales]
scale_short = [min(s) for s in scales]
long_edge = np.random.randint(
min(scale_long),
max(scale_long) + 1)
short_edge = np.random.randint(
min(scale_short),
max(scale_short) + 1)
scale = (long_edge, short_edge)
elif self.mode == 'value':
scale = random.choice(scales)
else:
if self.mode != 'value':
raise ValueError("Only 'value' mode supports more than "
'2 image scales')
scale = random.choice(scales)
return scale
def __call__(self, results):
scale = self.select_scale(self.scales)
results['scale'] = scale
resize = Resize(scale, **self.kwargs)
results = resize(results)
return results
def __repr__(self):
repr_str = (f'{self.__class__.__name__}('
f'scales={self.scales}, mode={self.mode})')
return repr_str
@PIPELINES.register_module()
class EntityBoxRescale:
"""Rescale the entity box and proposals according to the image shape.
Required keys are "img_shape", "scale_factor", "proposals",
"ann.entity_boxes", added or modified keys are "ann.entity_boxes". If
original "proposals" is not None, "proposals" and "scores" will be added or
modified.
"""
def __call__(self, results):
img_h, img_w = results['img_shape']
scale_factor = results['scale_factor']
scale_factor = np.array([
scale_factor[0], scale_factor[1], scale_factor[0], scale_factor[1]
])
proposals = results['proposals']
entity_boxes = results['ann']['entity_boxes']
img_scale = np.array([img_w, img_h, img_w, img_h])
entity_boxes = (entity_boxes * img_scale).astype(np.float32)
results['ann']['entity_boxes'] = entity_boxes * scale_factor
if proposals is not None:
if proposals.shape[1] not in (4, 5):
raise AssertionError('proposals shape should be in (n, 4) or '
f'(n, 5), but got {proposals.shape}')
if proposals.shape[1] == 5:
scores = proposals[:, 4].astype(np.float32)
proposals = proposals[:, :4]
else:
scores = None
proposals = (proposals * img_scale).astype(np.float32)
results['proposals'] = proposals * scale_factor
results['scores'] = scores
return results
@PIPELINES.register_module()
class EntityBoxCrop:
"""Crop the entity boxes and proposals according to the cropped images.
Required keys are "proposals", "ann.entity_boxes", "crop_bbox", added or
modified keys are "ann.entity_boxes". If original "proposals" is not None,
"proposals" will be modified.
"""
def __call__(self, results):
proposals = results['proposals']
entity_boxes = results['ann']['entity_boxes']
crop_bboxes = results['crop_bbox']
if crop_bboxes is None:
return results
x1, y1, _, _ = crop_bboxes
assert entity_boxes.shape[-1] % 4 == 0
entity_boxes_ = entity_boxes.copy()
entity_boxes_[..., 0::2] = entity_boxes[..., 0::2] - x1
entity_boxes_[..., 1::2] = entity_boxes[..., 1::2] - y1
results['ann']['entity_boxes'] = entity_boxes_
if proposals is not None:
assert proposals.shape[-1] % 4 == 0
proposals_ = proposals.copy()
proposals_[..., 0::2] = proposals[..., 0::2] - x1
proposals_[..., 1::2] = proposals[..., 1::2] - y1
results['proposals'] = proposals_
return results
@PIPELINES.register_module()
class EntityBoxFlip:
"""Flip the entity boxes and proposals with a probability.
Reverse the order of elements in the given bounding boxes and proposals
with a specific direction. The shape of them are preserved, but the
elements are reordered.
Required keys are "proposals", "img_shape", "ann.entity_boxes", added or
modified keys are "flip", "flip_direction", "ann.entity_boxes". If
"proposals" is not, it will also be modified.
Args:
flip_ratio (float): Probability of implementing flip. Default: 0.5.
direction (str): Flip imgs horizontally or vertically. Options are
"horizontal" | "vertical". Default: "horizontal".
"""
_directions = ['horizontal', 'vertical']
def __init__(self, flip_ratio=0.5, direction='horizontal'):
if direction not in self._directions:
raise ValueError(f'Direction {direction} is not supported. '
f'Currently support ones are {self._directions}')
self.flip_ratio = flip_ratio
self.direction = direction
def __call__(self, results):
flip = np.random.rand() < self.flip_ratio
results['flip'] = flip
results['flip_direction'] = self.direction
proposals = results['proposals']
entity_boxes = results['ann']['entity_boxes']
img_h, img_w = results['img_shape']
if flip:
if self.direction == 'horizontal':
assert entity_boxes.shape[-1] % 4 == 0
entity_boxes_ = entity_boxes.copy()
entity_boxes_[..., 0::4] = img_w - entity_boxes[..., 2::4] - 1
entity_boxes_[..., 2::4] = img_w - entity_boxes[..., 0::4] - 1
if proposals is not None:
assert proposals.shape[-1] % 4 == 0
proposals_ = proposals.copy()
proposals_[..., 0::4] = img_w - proposals[..., 2::4] - 1
proposals_[..., 2::4] = img_w - proposals[..., 0::4] - 1
else:
proposals_ = None
else:
assert entity_boxes.shape[-1] % 4 == 0
entity_boxes_ = entity_boxes.copy()
entity_boxes_[..., 1::4] = img_h - entity_boxes[..., 3::4] - 1
entity_boxes_[..., 3::4] = img_h - entity_boxes[..., 1::4] - 1
if proposals is not None:
assert proposals.shape[-1] % 4 == 0
proposals_ = proposals.copy()
proposals_[..., 1::4] = img_h - proposals[..., 3::4] - 1
proposals_[..., 3::4] = img_h - proposals[..., 1::4] - 1
else:
proposals_ = None
results['proposals'] = proposals_
results['ann']['entity_boxes'] = entity_boxes_
return results
def __repr__(self):
repr_str = (f'{self.__class__.__name__}('
f'flip_ratio={self.flip_ratio}, '
f'direction={self.direction})')
return repr_str
@PIPELINES.register_module()
class EntityBoxClip:
"""Clip (limit) the values in the entity boxes and proposals.
Required keys are "img_shape", "proposals" and "ann.entity_boxes", added or
modified keys are "ann.entity_boxes". If "proposals" is None, it will also
be modified.
"""
def __call__(self, results):
proposals = results['proposals']
entity_boxes = results['ann']['entity_boxes']
img_h, img_w = results['img_shape']
entity_boxes[:, 0::2] = np.clip(entity_boxes[:, 0::2], 0, img_w - 1)
entity_boxes[:, 1::2] = np.clip(entity_boxes[:, 1::2], 0, img_h - 1)
if proposals is not None:
proposals[:, 0::2] = np.clip(proposals[:, 0::2], 0, img_w - 1)
proposals[:, 1::2] = np.clip(proposals[:, 1::2], 0, img_h - 1)
results['ann']['entity_boxes'] = entity_boxes
results['proposals'] = proposals
return results
@PIPELINES.register_module()
class EntityBoxPad:
"""Pad entity boxes and proposals with zeros.
Required keys are "proposals" and "ann.entity_boxes", added or modified
keys are "ann.entity_boxes". If "proposals" is not None, it is also
modified.
Args:
max_num_gts (int | None): maximum of ground truth proposals.
Default: None.
"""
def __init__(self, max_num_gts=None):
self.max_num_gts = max_num_gts
def __call__(self, results):
if self.max_num_gts is None:
return results
proposals = results['proposals']
entity_boxes = results['ann']['entity_boxes']
num_gts = entity_boxes.shape[0]
padded_entity_boxes = np.zeros((self.max_num_gts, 4), dtype=np.float32)
padded_entity_boxes[:num_gts, :] = entity_boxes
if proposals is not None:
padded_proposals = np.zeros((self.max_num_gts, 4),
dtype=np.float32)
padded_proposals[:num_gts, :] = proposals
else:
padded_proposals = None
results['proposals'] = padded_proposals
results['ann']['entity_boxes'] = padded_entity_boxes
return results
def __repr__(self):
repr_str = f'{self.__class__.__name__}(max_num_gts={self.max_num_gts})'
return repr_str
@PIPELINES.register_module()
class RandomCrop:
"""Vanilla square random crop that specifics the output size.
Required keys in results are "imgs" and "img_shape", added or
modified keys are "imgs", "lazy"; Required keys in "lazy" are "flip",
"crop_bbox", added or modified key is "crop_bbox".
Args:
size (int): The output size of the images.
lazy (bool): Determine whether to apply lazy operation. Default: False.
"""
def __init__(self, size, lazy=False):
if not isinstance(size, int):
raise TypeError(f'Size must be an int, but got {type(size)}')
self.size = size
self.lazy = lazy
def __call__(self, results):
"""Performs the RandomCrop augmentation.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
_init_lazy_if_proper(results, self.lazy)
img_h, img_w = results['img_shape']
assert self.size <= img_h and self.size <= img_w
y_offset = 0
x_offset = 0
if img_h > self.size:
y_offset = int(np.random.randint(0, img_h - self.size))
if img_w > self.size:
x_offset = int(np.random.randint(0, img_w - self.size))
new_h, new_w = self.size, self.size
results['crop_bbox'] = np.array(
[x_offset, y_offset, x_offset + new_w, y_offset + new_h])
results['img_shape'] = (new_h, new_w)
if not self.lazy:
results['imgs'] = [
img[y_offset:y_offset + new_h, x_offset:x_offset + new_w]
for img in results['imgs']
]
else:
lazyop = results['lazy']
if lazyop['flip']:
raise NotImplementedError('Put Flip at last for now')
# record crop_bbox in lazyop dict to ensure only crop once in Fuse
lazy_left, lazy_top, lazy_right, lazy_bottom = lazyop['crop_bbox']
left = x_offset * (lazy_right - lazy_left) / img_w
right = (x_offset + new_w) * (lazy_right - lazy_left) / img_w
top = y_offset * (lazy_bottom - lazy_top) / img_h
bottom = (y_offset + new_h) * (lazy_bottom - lazy_top) / img_h
lazyop['crop_bbox'] = np.array([(lazy_left + left),
(lazy_top + top),
(lazy_left + right),
(lazy_top + bottom)],
dtype=np.float32)
return results
def __repr__(self):
repr_str = (f'{self.__class__.__name__}(size={self.size}, '
f'lazy={self.lazy})')
return repr_str
@PIPELINES.register_module()
class RandomResizedCrop:
"""Random crop that specifics the area and height-weight ratio range.
Required keys in results are "imgs", "img_shape", "crop_bbox" and "lazy",
added or modified keys are "imgs", "crop_bbox" and "lazy"; Required keys
in "lazy" are "flip", "crop_bbox", added or modified key is "crop_bbox".
Args:
area_range (Tuple[float]): The candidate area scales range of
output cropped images. Default: (0.08, 1.0).
aspect_ratio_range (Tuple[float]): The candidate aspect ratio range of
output cropped images. Default: (3 / 4, 4 / 3).
lazy (bool): Determine whether to apply lazy operation. Default: False.
"""
def __init__(self,
area_range=(0.08, 1.0),
aspect_ratio_range=(3 / 4, 4 / 3),
lazy=False):
self.area_range = area_range
self.aspect_ratio_range = aspect_ratio_range
self.lazy = lazy
if not mmcv.is_tuple_of(self.area_range, float):
raise TypeError(f'Area_range must be a tuple of float, '
f'but got {type(area_range)}')
if not mmcv.is_tuple_of(self.aspect_ratio_range, float):
raise TypeError(f'Aspect_ratio_range must be a tuple of float, '
f'but got {type(aspect_ratio_range)}')
@staticmethod
def get_crop_bbox(img_shape,
area_range,
aspect_ratio_range,
max_attempts=10):
"""Get a crop bbox given the area range and aspect ratio range.
Args:
img_shape (Tuple[int]): Image shape
area_range (Tuple[float]): The candidate area scales range of
output cropped images. Default: (0.08, 1.0).
aspect_ratio_range (Tuple[float]): The candidate aspect
ratio range of output cropped images. Default: (3 / 4, 4 / 3).
max_attempts (int): The maximum of attempts. Default: 10.
max_attempts (int): Max attempts times to generate random candidate
bounding box. If it doesn't qualified one, the center bounding
box will be used.
Returns:
(list[int]) A random crop bbox within the area range and aspect
ratio range.
"""
assert 0 < area_range[0] <= area_range[1] <= 1
assert 0 < aspect_ratio_range[0] <= aspect_ratio_range[1]
img_h, img_w = img_shape
area = img_h * img_w
min_ar, max_ar = aspect_ratio_range
aspect_ratios = np.exp(
np.random.uniform(
np.log(min_ar), np.log(max_ar), size=max_attempts))
target_areas = np.random.uniform(*area_range, size=max_attempts) * area
candidate_crop_w = np.round(np.sqrt(target_areas *
aspect_ratios)).astype(np.int32)
candidate_crop_h = np.round(np.sqrt(target_areas /
aspect_ratios)).astype(np.int32)
for i in range(max_attempts):
crop_w = candidate_crop_w[i]
crop_h = candidate_crop_h[i]
if crop_h <= img_h and crop_w <= img_w:
x_offset = random.randint(0, img_w - crop_w)
y_offset = random.randint(0, img_h - crop_h)
return x_offset, y_offset, x_offset + crop_w, y_offset + crop_h
# Fallback
crop_size = min(img_h, img_w)
x_offset = (img_w - crop_size) // 2
y_offset = (img_h - crop_size) // 2
return x_offset, y_offset, x_offset + crop_size, y_offset + crop_size
def __call__(self, results):
"""Performs the RandomResizeCrop augmentation.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
_init_lazy_if_proper(results, self.lazy)
img_h, img_w = results['img_shape']
left, top, right, bottom = self.get_crop_bbox(
(img_h, img_w), self.area_range, self.aspect_ratio_range)
new_h, new_w = bottom - top, right - left
results['crop_bbox'] = np.array([left, top, right, bottom])
results['img_shape'] = (new_h, new_w)
if not self.lazy:
results['imgs'] = [
img[top:bottom, left:right] for img in results['imgs']
]
else:
lazyop = results['lazy']
if lazyop['flip']:
raise NotImplementedError('Put Flip at last for now')
# record crop_bbox in lazyop dict to ensure only crop once in Fuse
lazy_left, lazy_top, lazy_right, lazy_bottom = lazyop['crop_bbox']
left = left * (lazy_right - lazy_left) / img_w
right = right * (lazy_right - lazy_left) / img_w
top = top * (lazy_bottom - lazy_top) / img_h
bottom = bottom * (lazy_bottom - lazy_top) / img_h
lazyop['crop_bbox'] = np.array([(lazy_left + left),
(lazy_top + top),
(lazy_left + right),
(lazy_top + bottom)],
dtype=np.float32)
return results
def __repr__(self):
repr_str = (f'{self.__class__.__name__}('
f'area_range={self.area_range}, '
f'aspect_ratio_range={self.aspect_ratio_range}, '
f'lazy={self.lazy})')
return repr_str
@PIPELINES.register_module()
class MultiScaleCrop:
"""Crop images with a list of randomly selected scales.
Randomly select the w and h scales from a list of scales. Scale of 1 means
the base size, which is the minimal of image weight and height. The scale
level of w and h is controlled to be smaller than a certain value to
prevent too large or small aspect ratio.
Required keys are "imgs", "img_shape", added or modified keys are "imgs",
"crop_bbox", "img_shape", "lazy" and "scales". Required keys in "lazy" are
"crop_bbox", added or modified key is "crop_bbox".
Args:
input_size (int | tuple[int]): (w, h) of network input.
scales (tuple[float]): Weight and height scales to be selected.
max_wh_scale_gap (int): Maximum gap of w and h scale levels.
Default: 1.
random_crop (bool): If set to True, the cropping bbox will be randomly
sampled, otherwise it will be sampler from fixed regions.
Default: False.
num_fixed_crops (int): If set to 5, the cropping bbox will keep 5
basic fixed regions: "upper left", "upper right", "lower left",
"lower right", "center". If set to 13, the cropping bbox will
append another 8 fix regions: "center left", "center right",
"lower center", "upper center", "upper left quarter",
"upper right quarter", "lower left quarter", "lower right quarter".
Default: 5.
lazy (bool): Determine whether to apply lazy operation. Default: False.
"""
def __init__(self,
input_size,
scales=(1, ),
max_wh_scale_gap=1,
random_crop=False,
num_fixed_crops=5,
lazy=False):
self.input_size = _pair(input_size)
if not mmcv.is_tuple_of(self.input_size, int):
raise TypeError(f'Input_size must be int or tuple of int, '
f'but got {type(input_size)}')
if not isinstance(scales, tuple):
raise TypeError(f'Scales must be tuple, but got {type(scales)}')
if num_fixed_crops not in [5, 13]:
raise ValueError(f'Num_fix_crops must be in {[5, 13]}, '
f'but got {num_fixed_crops}')
self.scales = scales
self.max_wh_scale_gap = max_wh_scale_gap
self.random_crop = random_crop
self.num_fixed_crops = num_fixed_crops
self.lazy = lazy
def __call__(self, results):
"""Performs the MultiScaleCrop augmentation.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
_init_lazy_if_proper(results, self.lazy)
img_h, img_w = results['img_shape']
base_size = min(img_h, img_w)
crop_sizes = [int(base_size * s) for s in self.scales]
candidate_sizes = []
for i, h in enumerate(crop_sizes):
for j, w in enumerate(crop_sizes):
if abs(i - j) <= self.max_wh_scale_gap:
candidate_sizes.append([w, h])
crop_size = random.choice(candidate_sizes)
for i in range(2):
if abs(crop_size[i] - self.input_size[i]) < 3:
crop_size[i] = self.input_size[i]
crop_w, crop_h = crop_size
if self.random_crop:
x_offset = random.randint(0, img_w - crop_w)
y_offset = random.randint(0, img_h - crop_h)
else:
w_step = (img_w - crop_w) // 4
h_step = (img_h - crop_h) // 4
candidate_offsets = [
(0, 0), # upper left
(4 * w_step, 0), # upper right
(0, 4 * h_step), # lower left
(4 * w_step, 4 * h_step), # lower right
(2 * w_step, 2 * h_step), # center
]
if self.num_fixed_crops == 13:
extra_candidate_offsets = [
(0, 2 * h_step), # center left
(4 * w_step, 2 * h_step), # center right
(2 * w_step, 4 * h_step), # lower center
(2 * w_step, 0 * h_step), # upper center
(1 * w_step, 1 * h_step), # upper left quarter
(3 * w_step, 1 * h_step), # upper right quarter
(1 * w_step, 3 * h_step), # lower left quarter
(3 * w_step, 3 * h_step) # lower right quarter
]
candidate_offsets.extend(extra_candidate_offsets)
x_offset, y_offset = random.choice(candidate_offsets)
new_h, new_w = crop_h, crop_w
results['crop_bbox'] = np.array(
[x_offset, y_offset, x_offset + new_w, y_offset + new_h])
results['img_shape'] = (new_h, new_w)
results['scales'] = self.scales
if not self.lazy:
results['imgs'] = [
img[y_offset:y_offset + new_h, x_offset:x_offset + new_w]
for img in results['imgs']
]
else:
lazyop = results['lazy']
if lazyop['flip']:
raise NotImplementedError('Put Flip at last for now')
# record crop_bbox in lazyop dict to ensure only crop once in Fuse
lazy_left, lazy_top, lazy_right, lazy_bottom = lazyop['crop_bbox']
left = x_offset * (lazy_right - lazy_left) / img_w
right = (x_offset + new_w) * (lazy_right - lazy_left) / img_w
top = y_offset * (lazy_bottom - lazy_top) / img_h
bottom = (y_offset + new_h) * (lazy_bottom - lazy_top) / img_h
lazyop['crop_bbox'] = np.array([(lazy_left + left),
(lazy_top + top),
(lazy_left + right),
(lazy_top + bottom)],
dtype=np.float32)
return results
def __repr__(self):
repr_str = (f'{self.__class__.__name__}('
f'input_size={self.input_size}, scales={self.scales}, '
f'max_wh_scale_gap={self.max_wh_scale_gap}, '
f'random_crop={self.random_crop}, '
f'num_fixed_crops={self.num_fixed_crops}, '
f'lazy={self.lazy})')
return repr_str
@PIPELINES.register_module()
class Resize:
"""Resize images to a specific size.
Required keys are "imgs", "img_shape", "modality", added or modified
keys are "imgs", "img_shape", "keep_ratio", "scale_factor", "lazy",
"resize_size". Required keys in "lazy" is None, added or modified key is
"interpolation".
Args:
scale (float | Tuple[int]): If keep_ratio is True, it serves as scaling
factor or maximum size:
If it is a float number, the image will be rescaled by this
factor, else if it is a tuple of 2 integers, the image will
be rescaled as large as possible within the scale.
Otherwise, it serves as (w, h) of output size.
keep_ratio (bool): If set to True, Images will be resized without
changing the aspect ratio. Otherwise, it will resize images to a
given size. Default: True.
interpolation (str): Algorithm used for interpolation:
"nearest" | "bilinear". Default: "bilinear".
lazy (bool): Determine whether to apply lazy operation. Default: False.
"""
def __init__(self,
scale,
keep_ratio=True,
interpolation='bilinear',
lazy=False):
if isinstance(scale, float):
if scale <= 0:
raise ValueError(f'Invalid scale {scale}, must be positive.')
elif isinstance(scale, tuple):
max_long_edge = max(scale)
max_short_edge = min(scale)
if max_short_edge == -1:
# assign np.inf to long edge for rescaling short edge later.
scale = (np.inf, max_long_edge)
else:
raise TypeError(
f'Scale must be float or tuple of int, but got {type(scale)}')
self.scale = scale
self.keep_ratio = keep_ratio
self.interpolation = interpolation
self.lazy = lazy
def __call__(self, results):
"""Performs the Resize augmentation.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
_init_lazy_if_proper(results, self.lazy)
if 'scale_factor' not in results:
results['scale_factor'] = np.array([1, 1], dtype=np.float32)
img_h, img_w = results['img_shape']
if self.keep_ratio:
new_w, new_h = mmcv.rescale_size((img_w, img_h), self.scale)
else:
new_w, new_h = self.scale
self.scale_factor = np.array([new_w / img_w, new_h / img_h],
dtype=np.float32)
results['img_shape'] = (new_h, new_w)
results['keep_ratio'] = self.keep_ratio
results['scale_factor'] = results['scale_factor'] * self.scale_factor
if not self.lazy:
results['imgs'] = [
mmcv.imresize(
img, (new_w, new_h), interpolation=self.interpolation)
for img in results['imgs']
]
else:
lazyop = results['lazy']
if lazyop['flip']:
raise NotImplementedError('Put Flip at last for now')
lazyop['interpolation'] = self.interpolation
return results
def __repr__(self):
repr_str = (f'{self.__class__.__name__}('
f'scale={self.scale}, keep_ratio={self.keep_ratio}, '
f'interpolation={self.interpolation}, '
f'lazy={self.lazy})')
return repr_str
@PIPELINES.register_module()
class RandomRescale:
"""Randomly resize images so that the short_edge is resized to a specific
size in a given range. The scale ratio is unchanged after resizing.
Required keys are "imgs", "img_shape", "modality", added or modified
keys are "imgs", "img_shape", "keep_ratio", "scale_factor", "resize_size",
"short_edge".
Args:
scale_range (tuple[int]): The range of short edge length. A closed
interval.
interpolation (str): Algorithm used for interpolation:
"nearest" | "bilinear". Default: "bilinear".
"""
def __init__(self, scale_range, interpolation='bilinear'):
self.scale_range = scale_range
# make sure scale_range is legal, first make sure the type is OK
assert mmcv.is_tuple_of(scale_range, int)
assert len(scale_range) == 2
assert scale_range[0] < scale_range[1]
assert np.all([x > 0 for x in scale_range])
self.keep_ratio = True
self.interpolation = interpolation
def __call__(self, results):
"""Performs the Resize augmentation.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
short_edge = np.random.randint(self.scale_range[0],
self.scale_range[1] + 1)
resize = Resize((-1, short_edge),
keep_ratio=True,
interpolation=self.interpolation,
lazy=False)
results = resize(results)
results['short_edge'] = short_edge
return results
def __repr__(self):
scale_range = self.scale_range
repr_str = (f'{self.__class__.__name__}('
f'scale_range=({scale_range[0]}, {scale_range[1]}), '
f'interpolation={self.interpolation})')
return repr_str
@PIPELINES.register_module()
class Flip:
"""Flip the input images with a probability.
Reverse the order of elements in the given imgs with a specific direction.
The shape of the imgs is preserved, but the elements are reordered.
Required keys are "imgs", "img_shape", "modality", added or modified
keys are "imgs", "lazy" and "flip_direction". Required keys in "lazy" is
None, added or modified key are "flip" and "flip_direction".
Args:
flip_ratio (float): Probability of implementing flip. Default: 0.5.
direction (str): Flip imgs horizontally or vertically. Options are
"horizontal" | "vertical". Default: "horizontal".
lazy (bool): Determine whether to apply lazy operation. Default: False.
"""
_directions = ['horizontal', 'vertical']
def __init__(self, flip_ratio=0.5, direction='horizontal', lazy=False):
if direction not in self._directions:
raise ValueError(f'Direction {direction} is not supported. '
f'Currently support ones are {self._directions}')
self.flip_ratio = flip_ratio
self.direction = direction
self.lazy = lazy
def __call__(self, results):
"""Performs the Flip augmentation.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
_init_lazy_if_proper(results, self.lazy)
modality = results['modality']
if modality == 'Flow':
assert self.direction == 'horizontal'
flip = np.random.rand() < self.flip_ratio
results['flip'] = flip
results['flip_direction'] = self.direction
if not self.lazy:
if flip:
for i, img in enumerate(results['imgs']):
mmcv.imflip_(img, self.direction)
lt = len(results['imgs'])
for i in range(0, lt, 2):
# flow with even indexes are x_flow, which need to be
# inverted when doing horizontal flip
if modality == 'Flow':
results['imgs'][i] = mmcv.iminvert(results['imgs'][i])
else:
results['imgs'] = list(results['imgs'])
else:
lazyop = results['lazy']
if lazyop['flip']:
raise NotImplementedError('Use one Flip please')
lazyop['flip'] = flip
lazyop['flip_direction'] = self.direction
return results
def __repr__(self):
repr_str = (
f'{self.__class__.__name__}('
f'flip_ratio={self.flip_ratio}, direction={self.direction}, '
f'lazy={self.lazy})')
return repr_str
@PIPELINES.register_module()
class Normalize:
"""Normalize images with the given mean and std value.
Required keys are "imgs", "img_shape", "modality", added or modified
keys are "imgs" and "img_norm_cfg". If modality is 'Flow', additional
keys "scale_factor" is required
Args:
mean (Sequence[float]): Mean values of different channels.
std (Sequence[float]): Std values of different channels.
to_bgr (bool): Whether to convert channels from RGB to BGR.
Default: False.
adjust_magnitude (bool): Indicate whether to adjust the flow magnitude
on 'scale_factor' when modality is 'Flow'. Default: False.
"""
def __init__(self, mean, std, to_bgr=False, adjust_magnitude=False):
if not isinstance(mean, Sequence):
raise TypeError(
f'Mean must be list, tuple or np.ndarray, but got {type(mean)}'
)
if not isinstance(std, Sequence):
raise TypeError(
f'Std must be list, tuple or np.ndarray, but got {type(std)}')
self.mean = np.array(mean, dtype=np.float32)
self.std = np.array(std, dtype=np.float32)
self.to_bgr = to_bgr
self.adjust_magnitude = adjust_magnitude
def __call__(self, results):
modality = results['modality']
if modality == 'RGB':
n = len(results['imgs'])
h, w, c = results['imgs'][0].shape
imgs = np.empty((n, h, w, c), dtype=np.float32)
for i, img in enumerate(results['imgs']):
imgs[i] = img
for img in imgs:
mmcv.imnormalize_(img, self.mean, self.std, self.to_bgr)
results['imgs'] = imgs
results['img_norm_cfg'] = dict(
mean=self.mean, std=self.std, to_bgr=self.to_bgr)
return results
if modality == 'Flow':
num_imgs = len(results['imgs'])
assert num_imgs % 2 == 0
assert self.mean.shape[0] == 2
assert self.std.shape[0] == 2
n = num_imgs // 2
h, w = results['imgs'][0].shape
x_flow = np.empty((n, h, w), dtype=np.float32)
y_flow = np.empty((n, h, w), dtype=np.float32)
for i in range(n):
x_flow[i] = results['imgs'][2 * i]
y_flow[i] = results['imgs'][2 * i + 1]
x_flow = (x_flow - self.mean[0]) / self.std[0]
y_flow = (y_flow - self.mean[1]) / self.std[1]
if self.adjust_magnitude:
x_flow = x_flow * results['scale_factor'][0]
y_flow = y_flow * results['scale_factor'][1]
imgs = np.stack([x_flow, y_flow], axis=-1)
results['imgs'] = imgs
args = dict(
mean=self.mean,
std=self.std,
to_bgr=self.to_bgr,
adjust_magnitude=self.adjust_magnitude)
results['img_norm_cfg'] = args
return results
raise NotImplementedError
def __repr__(self):
repr_str = (f'{self.__class__.__name__}('
f'mean={self.mean}, '
f'std={self.std}, '
f'to_bgr={self.to_bgr}, '
f'adjust_magnitude={self.adjust_magnitude})')
return repr_str
@PIPELINES.register_module()
class ColorJitter:
"""Randomly distort the brightness, contrast, saturation and hue of images,
and add PCA based noise into images.
Note: The input images should be in RGB channel order.
Code Reference:
https://gluon-cv.mxnet.io/_modules/gluoncv/data/transforms/experimental/image.html
https://mxnet.apache.org/api/python/docs/_modules/mxnet/image/image.html#LightingAug
If specified to apply color space augmentation, it will distort the image
color space by changing brightness, contrast and saturation. Then, it will
add some random distort to the images in different color channels.
Note that the input images should be in original range [0, 255] and in RGB
channel sequence.
Required keys are "imgs", added or modified keys are "imgs", "eig_val",
"eig_vec", "alpha_std" and "color_space_aug".
Args:
color_space_aug (bool): Whether to apply color space augmentations. If
specified, it will change the brightness, contrast, saturation and
hue of images, then add PCA based noise to images. Otherwise, it
will directly add PCA based noise to images. Default: False.
alpha_std (float): Std in the normal Gaussian distribution of alpha.
eig_val (np.ndarray | None): Eigenvalues of [1 x 3] size for RGB
channel jitter. If set to None, it will use the default
eigenvalues. Default: None.
eig_vec (np.ndarray | None): Eigenvectors of [3 x 3] size for RGB
channel jitter. If set to None, it will use the default
eigenvectors. Default: None.
"""
def __init__(self,
color_space_aug=False,
alpha_std=0.1,
eig_val=None,
eig_vec=None):
if eig_val is None:
# note that the data range should be [0, 255]
self.eig_val = np.array([55.46, 4.794, 1.148], dtype=np.float32)
else:
self.eig_val = eig_val
if eig_vec is None:
self.eig_vec = np.array([[-0.5675, 0.7192, 0.4009],
[-0.5808, -0.0045, -0.8140],
[-0.5836, -0.6948, 0.4203]],
dtype=np.float32)
else:
self.eig_vec = eig_vec
self.alpha_std = alpha_std
self.color_space_aug = color_space_aug
@staticmethod
def brightness(img, delta):
"""Brightness distortion.
Args:
img (np.ndarray): An input image.
delta (float): Delta value to distort brightness.
It ranges from [-32, 32).
Returns:
np.ndarray: A brightness distorted image.
"""
if np.random.rand() > 0.5:
img = img + np.float32(delta)
return img
@staticmethod
def contrast(img, alpha):
"""Contrast distortion.
Args:
img (np.ndarray): An input image.
alpha (float): Alpha value to distort contrast.
It ranges from [0.6, 1.4).
Returns:
np.ndarray: A contrast distorted image.
"""
if np.random.rand() > 0.5:
img = img * np.float32(alpha)
return img
@staticmethod
def saturation(img, alpha):
"""Saturation distortion.
Args:
img (np.ndarray): An input image.
alpha (float): Alpha value to distort the saturation.
It ranges from [0.6, 1.4).
Returns:
np.ndarray: A saturation distorted image.
"""
if np.random.rand() > 0.5:
gray = img * np.array([0.299, 0.587, 0.114], dtype=np.float32)
gray = np.sum(gray, 2, keepdims=True)
gray *= (1.0 - alpha)
img = img * alpha
img = img + gray
return img
@staticmethod
def hue(img, alpha):
"""Hue distortion.
Args:
img (np.ndarray): An input image.
alpha (float): Alpha value to control the degree of rotation
for hue. It ranges from [-18, 18).
Returns:
np.ndarray: A hue distorted image.
"""
if np.random.rand() > 0.5:
u = np.cos(alpha * np.pi)
w = np.sin(alpha * np.pi)
bt = np.array([[1.0, 0.0, 0.0], [0.0, u, -w], [0.0, w, u]],
dtype=np.float32)
tyiq = np.array([[0.299, 0.587, 0.114], [0.596, -0.274, -0.321],
[0.211, -0.523, 0.311]],
dtype=np.float32)
ityiq = np.array([[1.0, 0.956, 0.621], [1.0, -0.272, -0.647],
[1.0, -1.107, 1.705]],
dtype=np.float32)
t = np.dot(np.dot(ityiq, bt), tyiq).T
t = np.array(t, dtype=np.float32)
img = np.dot(img, t)
return img
def __call__(self, results):
imgs = results['imgs']
out = []
if self.color_space_aug:
bright_delta = np.random.uniform(-32, 32)
contrast_alpha = np.random.uniform(0.6, 1.4)
saturation_alpha = np.random.uniform(0.6, 1.4)
hue_alpha = np.random.uniform(-18, 18)
jitter_coin = np.random.rand()
for img in imgs:
img = self.brightness(img, delta=bright_delta)
if jitter_coin > 0.5:
img = self.contrast(img, alpha=contrast_alpha)
img = self.saturation(img, alpha=saturation_alpha)
img = self.hue(img, alpha=hue_alpha)
else:
img = self.saturation(img, alpha=saturation_alpha)
img = self.hue(img, alpha=hue_alpha)
img = self.contrast(img, alpha=contrast_alpha)
out.append(img)
else:
out = imgs
# Add PCA based noise
alpha = np.random.normal(0, self.alpha_std, size=(3, ))
rgb = np.array(
np.dot(self.eig_vec * alpha, self.eig_val), dtype=np.float32)
rgb = rgb[None, None, ...]
results['imgs'] = [img + rgb for img in out]
results['eig_val'] = self.eig_val
results['eig_vec'] = self.eig_vec
results['alpha_std'] = self.alpha_std
results['color_space_aug'] = self.color_space_aug
return results
def __repr__(self):
repr_str = (f'{self.__class__.__name__}('
f'color_space_aug={self.color_space_aug}, '
f'alpha_std={self.alpha_std}, '
f'eig_val={self.eig_val}, '
f'eig_vec={self.eig_vec})')
return repr_str
@PIPELINES.register_module()
class CenterCrop:
"""Crop the center area from images.
Required keys are "imgs", "img_shape", added or modified keys are "imgs",
"crop_bbox", "lazy" and "img_shape". Required keys in "lazy" is
"crop_bbox", added or modified key is "crop_bbox".
Args:
crop_size (int | tuple[int]): (w, h) of crop size.
lazy (bool): Determine whether to apply lazy operation. Default: False.
"""
def __init__(self, crop_size, lazy=False):
self.crop_size = _pair(crop_size)
self.lazy = lazy
if not mmcv.is_tuple_of(self.crop_size, int):
raise TypeError(f'Crop_size must be int or tuple of int, '
f'but got {type(crop_size)}')
def __call__(self, results):
"""Performs the CenterCrop augmentation.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
_init_lazy_if_proper(results, self.lazy)
img_h, img_w = results['img_shape']
crop_w, crop_h = self.crop_size
left = (img_w - crop_w) // 2
top = (img_h - crop_h) // 2
right = left + crop_w
bottom = top + crop_h
new_h, new_w = bottom - top, right - left
results['crop_bbox'] = np.array([left, top, right, bottom])
results['img_shape'] = (new_h, new_w)
if not self.lazy:
results['imgs'] = [
img[top:bottom, left:right] for img in results['imgs']
]
else:
lazyop = results['lazy']
if lazyop['flip']:
raise NotImplementedError('Put Flip at last for now')
# record crop_bbox in lazyop dict to ensure only crop once in Fuse
lazy_left, lazy_top, lazy_right, lazy_bottom = lazyop['crop_bbox']
left = left * (lazy_right - lazy_left) / img_w
right = right * (lazy_right - lazy_left) / img_w
top = top * (lazy_bottom - lazy_top) / img_h
bottom = bottom * (lazy_bottom - lazy_top) / img_h
lazyop['crop_bbox'] = np.array([(lazy_left + left),
(lazy_top + top),
(lazy_left + right),
(lazy_top + bottom)],
dtype=np.float32)
return results
def __repr__(self):
repr_str = (f'{self.__class__.__name__}(crop_size={self.crop_size}, '
f'lazy={self.lazy})')
return repr_str
@PIPELINES.register_module()
class ThreeCrop:
"""Crop images into three crops.
Crop the images equally into three crops with equal intervals along the
shorter side.
Required keys are "imgs", "img_shape", added or modified keys are "imgs",
"crop_bbox" and "img_shape".
Args:
crop_size(int | tuple[int]): (w, h) of crop size.
"""
def __init__(self, crop_size):
self.crop_size = _pair(crop_size)
if not mmcv.is_tuple_of(self.crop_size, int):
raise TypeError(f'Crop_size must be int or tuple of int, '
f'but got {type(crop_size)}')
def __call__(self, results):
"""Performs the ThreeCrop augmentation.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
_init_lazy_if_proper(results, False)
imgs = results['imgs']
img_h, img_w = results['imgs'][0].shape[:2]
crop_w, crop_h = self.crop_size
assert crop_h == img_h or crop_w == img_w
if crop_h == img_h:
w_step = (img_w - crop_w) // 2
offsets = [
(0, 0), # left
(2 * w_step, 0), # right
(w_step, 0), # middle
]
elif crop_w == img_w:
h_step = (img_h - crop_h) // 2
offsets = [
(0, 0), # top
(0, 2 * h_step), # down
(0, h_step), # middle
]
cropped = []
crop_bboxes = []
for x_offset, y_offset in offsets:
bbox = [x_offset, y_offset, x_offset + crop_w, y_offset + crop_h]
crop = [
img[y_offset:y_offset + crop_h, x_offset:x_offset + crop_w]
for img in imgs
]
cropped.extend(crop)
crop_bboxes.extend([bbox for _ in range(len(imgs))])
crop_bboxes = np.array(crop_bboxes)
results['imgs'] = cropped
results['crop_bbox'] = crop_bboxes
results['img_shape'] = results['imgs'][0].shape[:2]
return results
def __repr__(self):
repr_str = f'{self.__class__.__name__}(crop_size={self.crop_size})'
return repr_str
@PIPELINES.register_module()
class TenCrop:
"""Crop the images into 10 crops (corner + center + flip).
Crop the four corners and the center part of the image with the same
given crop_size, and flip it horizontally.
Required keys are "imgs", "img_shape", added or modified keys are "imgs",
"crop_bbox" and "img_shape".
Args:
crop_size(int | tuple[int]): (w, h) of crop size.
"""
def __init__(self, crop_size):
self.crop_size = _pair(crop_size)
if not mmcv.is_tuple_of(self.crop_size, int):
raise TypeError(f'Crop_size must be int or tuple of int, '
f'but got {type(crop_size)}')
def __call__(self, results):
"""Performs the TenCrop augmentation.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
_init_lazy_if_proper(results, False)
imgs = results['imgs']
img_h, img_w = results['imgs'][0].shape[:2]
crop_w, crop_h = self.crop_size
w_step = (img_w - crop_w) // 4
h_step = (img_h - crop_h) // 4
offsets = [
(0, 0), # upper left
(4 * w_step, 0), # upper right
(0, 4 * h_step), # lower left
(4 * w_step, 4 * h_step), # lower right
(2 * w_step, 2 * h_step), # center
]
img_crops = list()
crop_bboxes = list()
for x_offset, y_offsets in offsets:
crop = [
img[y_offsets:y_offsets + crop_h, x_offset:x_offset + crop_w]
for img in imgs
]
flip_crop = [np.flip(c, axis=1).copy() for c in crop]
bbox = [x_offset, y_offsets, x_offset + crop_w, y_offsets + crop_h]
img_crops.extend(crop)
img_crops.extend(flip_crop)
crop_bboxes.extend([bbox for _ in range(len(imgs) * 2)])
crop_bboxes = np.array(crop_bboxes)
results['imgs'] = img_crops
results['crop_bbox'] = crop_bboxes
results['img_shape'] = results['imgs'][0].shape[:2]
return results
def __repr__(self):
repr_str = f'{self.__class__.__name__}(crop_size={self.crop_size})'
return repr_str
@PIPELINES.register_module()
class MultiGroupCrop:
"""Randomly crop the images into several groups.
Crop the random region with the same given crop_size and bounding box
into several groups.
Required keys are "imgs", added or modified keys are "imgs", "crop_bbox"
and "img_shape".
Args:
crop_size(int | tuple[int]): (w, h) of crop size.
groups(int): Number of groups.
"""
def __init__(self, crop_size, groups):
self.crop_size = _pair(crop_size)
self.groups = groups
if not mmcv.is_tuple_of(self.crop_size, int):
raise TypeError('Crop size must be int or tuple of int, '
f'but got {type(crop_size)}')
if not isinstance(groups, int):
raise TypeError(f'Groups must be int, but got {type(groups)}.')
if groups <= 0:
raise ValueError('Groups must be positive.')
def __call__(self, results):
"""Performs the MultiGroupCrop augmentation.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
imgs = results['imgs']
img_h, img_w = imgs[0].shape[:2]
crop_w, crop_h = self.crop_size
img_crops = []
crop_bboxes = []
for _ in range(self.groups):
x_offset = random.randint(0, img_w - crop_w)
y_offset = random.randint(0, img_h - crop_h)
bbox = [x_offset, y_offset, x_offset + crop_w, y_offset + crop_h]
crop = [
img[y_offset:y_offset + crop_h, x_offset:x_offset + crop_w]
for img in imgs
]
img_crops.extend(crop)
crop_bboxes.extend([bbox for _ in range(len(imgs))])
crop_bboxes = np.array(crop_bboxes)
results['imgs'] = img_crops
results['crop_bbox'] = crop_bboxes
results['img_shape'] = results['imgs'][0].shape[:2]
return results
def __repr__(self):
repr_str = (f'{self.__class__.__name__}'
f'(crop_size={self.crop_size}, '
f'groups={self.groups})')
return repr_str
@PIPELINES.register_module()
class AudioAmplify:
"""Amplify the waveform.
Required keys are "audios", added or modified keys are "audios",
"amplify_ratio".
Args:
ratio (float): The ratio used to amplify the audio waveform.
"""
def __init__(self, ratio):
if isinstance(ratio, float):
self.ratio = ratio
else:
raise TypeError('Amplification ratio should be float.')
def __call__(self, results):
"""Perfrom the audio amplification.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
assert 'audios' in results
results['audios'] *= self.ratio
results['amplify_ratio'] = self.ratio
return results
def __repr__(self):
repr_str = f'{self.__class__.__name__}(ratio={self.ratio})'
return repr_str
@PIPELINES.register_module()
class MelSpectrogram:
"""MelSpectrogram. Transfer an audio wave into a melspectogram figure.
Required keys are "audios", "sample_rate", "num_clips", added or modified
keys are "audios".
Args:
window_size (int): The window size in milisecond. Default: 32.
step_size (int): The step size in milisecond. Default: 16.
n_mels (int): Number of mels. Default: 80.
fixed_length (int): The sample length of melspectrogram maybe not
exactly as wished due to different fps, fix the length for batch
collation by truncating or padding. Default: 128.
"""
def __init__(self,
window_size=32,
step_size=16,
n_mels=80,
fixed_length=128):
if all(
isinstance(x, int)
for x in [window_size, step_size, n_mels, fixed_length]):
self.window_size = window_size
self.step_size = step_size
self.n_mels = n_mels
self.fixed_length = fixed_length
else:
raise TypeError('All arguments should be int.')
def __call__(self, results):
"""Perform MelSpectrogram transformation.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
try:
import librosa
except ImportError:
raise ImportError('Install librosa first.')
signals = results['audios']
sample_rate = results['sample_rate']
n_fft = int(round(sample_rate * self.window_size / 1000))
hop_length = int(round(sample_rate * self.step_size / 1000))
melspectrograms = list()
for clip_idx in range(results['num_clips']):
clip_signal = signals[clip_idx]
mel = librosa.feature.melspectrogram(
y=clip_signal,
sr=sample_rate,
n_fft=n_fft,
hop_length=hop_length,
n_mels=self.n_mels)
if mel.shape[0] >= self.fixed_length:
mel = mel[:self.fixed_length, :]
else:
mel = np.pad(
mel, ((0, mel.shape[-1] - self.fixed_length), (0, 0)),
mode='edge')
melspectrograms.append(mel)
results['audios'] = np.array(melspectrograms)
return results
def __repr__(self):
repr_str = (f'{self.__class__.__name__}'
f'(window_size={self.window_size}), '
f'step_size={self.step_size}, '
f'n_mels={self.n_mels}, '
f'fixed_length={self.fixed_length})')
return repr_str
| InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/datasets/pipelines/augmentations.py |
from collections.abc import Sequence
import mmcv
import numpy as np
import torch
from mmcv.parallel import DataContainer as DC
from ..registry import PIPELINES
def to_tensor(data):
"""Convert objects of various python types to :obj:`torch.Tensor`.
Supported types are: :class:`numpy.ndarray`, :class:`torch.Tensor`,
:class:`Sequence`, :class:`int` and :class:`float`.
"""
if isinstance(data, torch.Tensor):
return data
if isinstance(data, np.ndarray):
return torch.from_numpy(data)
if isinstance(data, Sequence) and not mmcv.is_str(data):
return torch.tensor(data)
if isinstance(data, int):
return torch.LongTensor([data])
if isinstance(data, float):
return torch.FloatTensor([data])
raise TypeError(f'type {type(data)} cannot be converted to tensor.')
@PIPELINES.register_module()
class ToTensor:
"""Convert some values in results dict to `torch.Tensor` type in data
loader pipeline.
Args:
keys (Sequence[str]): Required keys to be converted.
"""
def __init__(self, keys):
self.keys = keys
def __call__(self, results):
"""Performs the ToTensor formating.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
for key in self.keys:
results[key] = to_tensor(results[key])
return results
def __repr__(self):
return f'{self.__class__.__name__}(keys={self.keys})'
@PIPELINES.register_module()
class ToDataContainer:
"""Convert the data to DataContainer.
Args:
fields (Sequence[dict]): Required fields to be converted
with keys and attributes. E.g.
fields=(dict(key='gt_bbox', stack=False),).
"""
def __init__(self, fields):
self.fields = fields
def __call__(self, results):
"""Performs the ToDataContainer formating.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
for field in self.fields:
_field = field.copy()
key = _field.pop('key')
results[key] = DC(results[key], **_field)
return results
def __repr__(self):
return self.__class__.__name__ + f'(fields={self.fields})'
@PIPELINES.register_module()
class ImageToTensor:
"""Convert image type to `torch.Tensor` type.
Args:
keys (Sequence[str]): Required keys to be converted.
"""
def __init__(self, keys):
self.keys = keys
def __call__(self, results):
"""Performs the ImageToTensor formating.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
for key in self.keys:
results[key] = to_tensor(results[key].transpose(2, 0, 1))
return results
def __repr__(self):
return f'{self.__class__.__name__}(keys={self.keys})'
@PIPELINES.register_module()
class Transpose:
"""Transpose image channels to a given order.
Args:
keys (Sequence[str]): Required keys to be converted.
order (Sequence[int]): Image channel order.
"""
def __init__(self, keys, order):
self.keys = keys
self.order = order
def __call__(self, results):
"""Performs the Transpose formatting.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
for key in self.keys:
results[key] = results[key].transpose(self.order)
return results
def __repr__(self):
return (f'{self.__class__.__name__}('
f'keys={self.keys}, order={self.order})')
@PIPELINES.register_module()
class Collect:
"""Collect data from the loader relevant to the specific task.
This keeps the items in ``keys`` as it is, and collect items in
``meta_keys`` into a meta item called ``meta_name``.This is usually
the last stage of the data loader pipeline.
For example, when keys='imgs', meta_keys=('filename', 'label',
'original_shape'), meta_name='img_meta', the results will be a dict with
keys 'imgs' and 'img_meta', where 'img_meta' is a DataContainer of another
dict with keys 'filename', 'label', 'original_shape'.
Args:
keys (Sequence[str]): Required keys to be collected.
meta_name (str): The name of the key that contains meta infomation.
This key is always populated. Default: "img_meta".
meta_keys (Sequence[str]): Keys that are collected under meta_name.
The contents of the ``meta_name`` dictionary depends on
``meta_keys``.
By default this includes:
- "filename": path to the image file
- "label": label of the image file
- "original_shape": original shape of the image as a tuple
(h, w, c)
- "img_shape": shape of the image input to the network as a tuple
(h, w, c). Note that images may be zero padded on the
bottom/right, if the batch tensor is larger than this shape.
- "pad_shape": image shape after padding
- "flip_direction": a str in ("horiziontal", "vertival") to
indicate if the image is fliped horizontally or vertically.
- "img_norm_cfg": a dict of normalization information:
- mean - per channel mean subtraction
- std - per channel std divisor
- to_rgb - bool indicating if bgr was converted to rgb
"""
def __init__(self,
keys,
meta_keys=('filename', 'label', 'original_shape', 'img_shape',
'pad_shape', 'flip_direction', 'img_norm_cfg'),
meta_name='img_meta'):
self.keys = keys
self.meta_keys = meta_keys
self.meta_name = meta_name
def __call__(self, results):
"""Performs the Collect formating.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
data = {}
for key in self.keys:
data[key] = results[key]
if len(self.meta_keys) != 0:
meta = {}
for key in self.meta_keys:
meta[key] = results[key]
data[self.meta_name] = DC(meta, cpu_only=True)
return data
def __repr__(self):
return (f'{self.__class__.__name__}('
f'keys={self.keys}, meta_keys={self.meta_keys})')
@PIPELINES.register_module()
class FormatShape:
"""Format final imgs shape to the given input_format.
Required keys are "imgs", "num_clips" and "clip_len", added or modified
keys are "imgs" and "input_shape".
Args:
input_format (str): Define the final imgs format.
"""
def __init__(self, input_format):
self.input_format = input_format
if self.input_format not in ['NCTHW', 'NCHW', 'NCHW_Flow', 'NPTCHW']:
raise ValueError(
f'The input format {self.input_format} is invalid.')
def __call__(self, results):
"""Performs the FormatShape formating.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
imgs = results['imgs']
# [M x H x W x C]
# M = 1 * N_crops * N_clips * L
if self.input_format == 'NCTHW':
num_clips = results['num_clips']
clip_len = results['clip_len']
imgs = imgs.reshape((-1, num_clips, clip_len) + imgs.shape[1:])
# N_crops x N_clips x L x H x W x C
imgs = np.transpose(imgs, (0, 1, 5, 2, 3, 4))
# N_crops x N_clips x C x L x H x W
imgs = imgs.reshape((-1, ) + imgs.shape[2:])
# M' x C x L x H x W
# M' = N_crops x N_clips
elif self.input_format == 'NCHW':
imgs = np.transpose(imgs, (0, 3, 1, 2))
# M x C x H x W
elif self.input_format == 'NCHW_Flow':
num_clips = results['num_clips']
clip_len = results['clip_len']
imgs = imgs.reshape((-1, num_clips, clip_len) + imgs.shape[1:])
# N_crops x N_clips x L x H x W x C
imgs = np.transpose(imgs, (0, 1, 2, 5, 3, 4))
# N_crops x N_clips x L x C x H x W
imgs = imgs.reshape((-1, imgs.shape[2] * imgs.shape[3]) +
imgs.shape[4:])
# M' x C' x H x W
# M' = N_crops x N_clips
# C' = L x C
elif self.input_format == 'NPTCHW':
num_proposals = results['num_proposals']
num_clips = results['num_clips']
clip_len = results['clip_len']
imgs = imgs.reshape((num_proposals, num_clips * clip_len) +
imgs.shape[1:])
# P x M x H x W x C
# M = N_clips x L
imgs = np.transpose(imgs, (0, 1, 4, 2, 3))
# P x M x C x H x W
results['imgs'] = imgs
results['input_shape'] = imgs.shape
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f"(input_format='{self.input_format}')"
return repr_str
@PIPELINES.register_module()
class FormatAudioShape:
"""Format final audio shape to the given input_format.
Required keys are "imgs", "num_clips" and "clip_len", added or modified
keys are "imgs" and "input_shape".
Args:
input_format (str): Define the final imgs format.
"""
def __init__(self, input_format):
self.input_format = input_format
if self.input_format not in ['NCTF']:
raise ValueError(
f'The input format {self.input_format} is invalid.')
def __call__(self, results):
"""Performs the FormatShape formatting.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
audios = results['audios']
# clip x sample x freq -> clip x channel x sample x freq
clip, sample, freq = audios.shape
audios = audios.reshape(clip, 1, sample, freq)
results['audios'] = audios
results['input_shape'] = audios.shape
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f"(input_format='{self.input_format}')"
return repr_str
| InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/datasets/pipelines/formating.py |
from .distributed_sampler import DistributedPowerSampler, DistributedSampler
__all__ = ['DistributedSampler', 'DistributedPowerSampler']
| InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/datasets/samplers/__init__.py |
import torch
from torch.utils.data import DistributedSampler as _DistributedSampler
class DistributedSampler(_DistributedSampler):
"""DistributedSampler inheriting from
``torch.utils.data.DistributedSampler``.
In pytorch of lower versions, there is no ``shuffle`` argument. This child
class will port one to DistributedSampler.
"""
def __init__(self, dataset, num_replicas=None, rank=None, shuffle=True):
super().__init__(dataset, num_replicas=num_replicas, rank=rank)
self.shuffle = shuffle
def __iter__(self):
# deterministically shuffle based on epoch
if self.shuffle:
g = torch.Generator()
g.manual_seed(self.epoch)
indices = torch.randperm(len(self.dataset), generator=g).tolist()
else:
indices = torch.arange(len(self.dataset)).tolist()
# add extra samples to make it evenly divisible
indices += indices[:(self.total_size - len(indices))]
assert len(indices) == self.total_size
# subsample
indices = indices[self.rank:self.total_size:self.num_replicas]
assert len(indices) == self.num_samples
return iter(indices)
class DistributedPowerSampler(_DistributedSampler):
"""DistributedPowerSampler inheriting from
``torch.utils.data.DistributedSampler``.
Samples are sampled with the probability that is proportional to the power
of label frequency (freq ^ power). The sampler only applies to single class
recognition dataset.
The default value of power is 1, which is equivalent to bootstrap sampling
from the entire dataset.
"""
def __init__(self, dataset, num_replicas=None, rank=None, power=1):
super().__init__(dataset, num_replicas=num_replicas, rank=rank)
self.power = power
def __iter__(self):
# deterministically shuffle based on epoch
g = torch.Generator()
g.manual_seed(self.epoch)
video_infos_by_class = self.dataset.video_infos_by_class
num_classes = self.dataset.num_classes
# For simplicity, discontinuous labels are not permitted
assert set(video_infos_by_class) == set(range(num_classes))
counts = [len(video_infos_by_class[i]) for i in range(num_classes)]
counts = [cnt**self.power for cnt in counts]
indices = torch.multinomial(
torch.Tensor(counts),
self.total_size,
replacement=True,
generator=g)
indices = indices.data.numpy().tolist()
assert len(indices) == self.total_size
indices = indices[self.rank:self.total_size:self.num_replicas]
assert len(indices) == self.num_samples
return iter(indices)
| InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/datasets/samplers/distributed_sampler.py |
import ctypes
import random
import string
def get_random_string(length=15):
"""Get random string with letters and digits.
Args:
length (int): Length of random string. Default: 15.
"""
return ''.join(
random.choice(string.ascii_letters + string.digits)
for _ in range(length))
def get_thread_id():
"""Get current thread id."""
# use ctype to find thread id
thread_id = ctypes.CDLL('libc.so.6').syscall(186)
return thread_id
def get_shm_dir():
"""Get shm dir for temporary usage."""
return '/dev/shm'
| InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/utils/misc.py |
from mmcv.utils import collect_env as collect_basic_env
from mmcv.utils import get_git_hash
import mmaction
def collect_env():
env_info = collect_basic_env()
env_info['MMAction2'] = (
mmaction.__version__ + '+' + get_git_hash(digits=7))
return env_info
if __name__ == '__main__':
for name, val in collect_env().items():
print(f'{name}: {val}')
| InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/utils/collect_env.py |
from .collect_env import collect_env
from .gradcam_utils import GradCAM
from .logger import get_root_logger
from .misc import get_random_string, get_shm_dir, get_thread_id
__all__ = [
'get_root_logger', 'collect_env', 'get_random_string', 'get_thread_id',
'get_shm_dir', 'GradCAM'
]
| InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/utils/__init__.py |
import logging
from mmcv.utils import get_logger
def get_root_logger(log_file=None, log_level=logging.INFO):
"""Use ``get_logger`` method in mmcv to get the root logger.
The logger will be initialized if it has not been initialized. By default a
StreamHandler will be added. If ``log_file`` is specified, a FileHandler
will also be added. The name of the root logger is the top-level package
name, e.g., "mmaction".
Args:
log_file (str | None): The log filename. If specified, a FileHandler
will be added to the root logger.
log_level (int): The root logger level. Note that only the process of
rank 0 is affected, while other processes will set the level to
"Error" and be silent most of the time.
Returns:
:obj:`logging.Logger`: The root logger.
"""
return get_logger(__name__.split('.')[0], log_file, log_level)
| InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/utils/logger.py |
import torch
import torch.nn.functional as F
class GradCAM:
"""GradCAM class helps create visualization results.
Visualization results are blended by heatmaps and input images.
This class is modified from
https://github.com/facebookresearch/SlowFast/blob/master/slowfast/visualization/gradcam_utils.py # noqa
For more information about GradCAM, please visit:
https://arxiv.org/pdf/1610.02391.pdf
"""
def __init__(self, model, target_layer_name, colormap='viridis'):
"""Create GradCAM class with recognizer, target layername & colormap.
Args:
model (nn.Module): the recognizer model to be used.
target_layer_name (str): name of convolutional layer to
be used to get gradients and feature maps from for creating
localization maps.
colormap (Optional[str]): matplotlib colormap used to create
heatmap. Default: 'viridis'. For more information, please visit
https://matplotlib.org/3.3.0/tutorials/colors/colormaps.html
"""
from ..models.recognizers import Recognizer2D, Recognizer3D
if isinstance(model, Recognizer2D):
self.is_recognizer2d = True
elif isinstance(model, Recognizer3D):
self.is_recognizer2d = False
else:
raise ValueError(
'GradCAM utils only support Recognizer2D & Recognizer3D.')
self.model = model
self.model.eval()
self.target_gradients = None
self.target_activations = None
import matplotlib.pyplot as plt
self.colormap = plt.get_cmap(colormap)
self.data_mean = torch.tensor(model.cfg.img_norm_cfg['mean'])
self.data_std = torch.tensor(model.cfg.img_norm_cfg['std'])
self._register_hooks(target_layer_name)
def _register_hooks(self, layer_name):
"""Register forward and backward hook to a layer, given layer_name, to
obtain gradients and activations.
Args:
layer_name (str): name of the layer.
"""
def get_gradients(module, grad_input, grad_output):
self.target_gradients = grad_output[0].detach()
def get_activations(module, input, output):
self.target_activations = output.clone().detach()
layer_ls = layer_name.split('/')
prev_module = self.model
for layer in layer_ls:
prev_module = prev_module._modules[layer]
target_layer = prev_module
target_layer.register_forward_hook(get_activations)
target_layer.register_backward_hook(get_gradients)
def _calculate_localization_map(self, inputs, use_labels, delta=1e-20):
"""Calculate localization map for all inputs with Grad-CAM.
Args:
inputs (dict): model inputs, generated by test pipeline,
at least including two keys, ``imgs`` and ``label``.
use_labels (bool): Whether to use given labels to generate
localization map. Labels are in ``inputs['label']``.
delta (float): used in localization map normalization,
must be small enough. Please make sure
`localization_map_max - localization_map_min >> delta`
Returns:
tuple[torch.Tensor, torch.Tensor]: (localization_map, preds)
localization_map (torch.Tensor): the localization map for
input imgs.
preds (torch.Tensor): Model predictions for `inputs` with
shape (batch_size, num_classes).
"""
inputs['imgs'] = inputs['imgs'].clone()
# model forward & backward
preds = self.model(gradcam=True, **inputs)
if use_labels:
labels = inputs['label']
if labels.ndim == 1:
labels = labels.unsqueeze(-1)
score = torch.gather(preds, dim=1, index=labels)
else:
score = torch.max(preds, dim=-1)[0]
self.model.zero_grad()
score = torch.sum(score)
score.backward()
if self.is_recognizer2d:
# [batch_size, num_segments, 3, H, W]
b, t, _, h, w = inputs['imgs'].size()
else:
# [batch_size, num_crops*num_clips, 3, clip_len, H, W]
b1, b2, _, t, h, w = inputs['imgs'].size()
b = b1 * b2
gradients = self.target_gradients
activations = self.target_activations
if self.is_recognizer2d:
# [B*Tg, C', H', W']
b_tg, c, _, _ = gradients.size()
tg = b_tg // b
else:
# source shape: [B, C', Tg, H', W']
_, c, tg, _, _ = gradients.size()
# target shape: [B, Tg, C', H', W']
gradients = gradients.permute(0, 2, 1, 3, 4)
activations = activations.permute(0, 2, 1, 3, 4)
# calculate & resize to [B, 1, T, H, W]
weights = torch.mean(gradients.view(b, tg, c, -1), dim=3)
weights = weights.view(b, tg, c, 1, 1)
activations = activations.view([b, tg, c] +
list(activations.size()[-2:]))
localization_map = torch.sum(
weights * activations, dim=2, keepdim=True)
localization_map = F.relu(localization_map)
localization_map = localization_map.permute(0, 2, 1, 3, 4)
localization_map = F.interpolate(
localization_map,
size=(t, h, w),
mode='trilinear',
align_corners=False)
# Normalize the localization map.
localization_map_min, localization_map_max = (
torch.min(localization_map.view(b, -1), dim=-1, keepdim=True)[0],
torch.max(localization_map.view(b, -1), dim=-1, keepdim=True)[0])
localization_map_min = torch.reshape(
localization_map_min, shape=(b, 1, 1, 1, 1))
localization_map_max = torch.reshape(
localization_map_max, shape=(b, 1, 1, 1, 1))
localization_map = (localization_map - localization_map_min) / (
localization_map_max - localization_map_min + delta)
localization_map = localization_map.data
return localization_map.squeeze(dim=1), preds
def _alpha_blending(self, localization_map, input_imgs, alpha):
"""Blend heatmaps and model input images and get visulization results.
Args:
localization_map (torch.Tensor): localization map for all inputs,
generated with Grad-CAM
input_imgs (torch.Tensor): model inputs, normed images.
alpha (float): transparency level of the heatmap,
in the range [0, 1].
Returns:
torch.Tensor: blending results for localization map and input
images, with shape [B, T, H, W, 3] and pixel values in
RGB order within range [0, 1].
"""
# localization_map shape [B, T, H, W]
localization_map = localization_map.cpu()
# heatmap shape [B, T, H, W, 3] in RGB order
heatmap = self.colormap(localization_map.detach().numpy())
heatmap = heatmap[:, :, :, :, :3]
heatmap = torch.from_numpy(heatmap)
# Permute input imgs to [B, T, H, W, 3], like heatmap
if self.is_recognizer2d:
# Recognizer2D input (B, T, C, H, W)
curr_inp = input_imgs.permute(0, 1, 3, 4, 2)
else:
# Recognizer3D input (B', num_clips*num_crops, C, T, H, W)
# B = B' * num_clips * num_crops
curr_inp = input_imgs.view([-1] + list(input_imgs.size()[2:]))
curr_inp = curr_inp.permute(0, 2, 3, 4, 1)
# renormalize input imgs to [0, 1]
curr_inp = curr_inp.cpu()
curr_inp *= self.data_std
curr_inp += self.data_mean
curr_inp /= 255.
# alpha blending
blended_imgs = alpha * heatmap + (1 - alpha) * curr_inp
return blended_imgs
def __call__(self, inputs, use_labels=False, alpha=0.5):
"""Visualize the localization maps on their corresponding inputs as
heatmap, using Grad-CAM.
Generate visualization results for **ALL CROPS**.
For example, for I3D model, if `clip_len=32, num_clips=10` and
use `ThreeCrop` in test pipeline, then for every model inputs,
there are 960(32*10*3) images generated.
Args:
inputs (dict): model inputs, generated by test pipeline,
at least including two keys, ``imgs`` and ``label``.
use_labels (bool): Whether to use given labels to generate
localization map. Labels are in ``inputs['label']``.
alpha (float): transparency level of the heatmap,
in the range [0, 1].
Returns:
blended_imgs (torch.Tensor): Visualization results, blended by
localization maps and model inputs.
preds (torch.Tensor): Model predictions for inputs.
"""
# localization_map shape [B, T, H, W]
# preds shape [batch_size, num_classes]
localization_map, preds = self._calculate_localization_map(
inputs, use_labels=use_labels)
# blended_imgs shape [B, T, H, W, 3]
blended_imgs = self._alpha_blending(localization_map, inputs['imgs'],
alpha)
# blended_imgs shape [B, T, H, W, 3]
# preds shape [batch_size, num_classes]
# Recognizer2D: B = batch_size, T = num_segments
# Recognizer3D: B = batch_size * num_crops * num_clips, T = clip_len
return blended_imgs, preds
| InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/utils/gradcam_utils.py |
from mmcv.utils import Registry
BACKBONES = Registry('backbone')
NECKS = Registry('neck')
HEADS = Registry('head')
RECOGNIZERS = Registry('recognizer')
LOSSES = Registry('loss')
LOCALIZERS = Registry('localizer')
| InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/models/registry.py |
from .backbones import (C3D, X3D, ResNet, ResNet2Plus1d, ResNet3d, ResNet3dCSN,
ResNet3dSlowFast, ResNet3dSlowOnly, ResNetAudio,
ResNetTIN, ResNetTSM)
from .builder import (build_backbone, build_head, build_localizer, build_loss,
build_model, build_neck, build_recognizer)
from .common import Conv2plus1d, ConvAudio
from .heads import (AudioTSNHead, BaseHead, I3DHead, SlowFastHead, TPNHead,
TSMHead, TSNHead, X3DHead)
from .localizers import BMN, PEM, TEM
from .losses import (BCELossWithLogits, BinaryLogisticRegressionLoss, BMNLoss,
CrossEntropyLoss, HVULoss, NLLLoss, OHEMHingeLoss,
SSNLoss)
from .necks import TPN
from .recognizers import (AudioRecognizer, BaseRecognizer, recognizer2d,
recognizer3d)
from .registry import BACKBONES, HEADS, LOCALIZERS, LOSSES, RECOGNIZERS
__all__ = [
'BACKBONES', 'HEADS', 'RECOGNIZERS', 'build_recognizer', 'build_head',
'build_backbone', 'recognizer2d', 'recognizer3d', 'C3D', 'ResNet',
'ResNet3d', 'ResNet2Plus1d', 'I3DHead', 'TSNHead', 'TSMHead', 'BaseHead',
'BaseRecognizer', 'LOSSES', 'CrossEntropyLoss', 'NLLLoss', 'HVULoss',
'ResNetTSM', 'ResNet3dSlowFast', 'SlowFastHead', 'Conv2plus1d',
'ResNet3dSlowOnly', 'BCELossWithLogits', 'LOCALIZERS', 'build_localizer',
'PEM', 'TEM', 'BinaryLogisticRegressionLoss', 'BMN', 'BMNLoss',
'build_model', 'OHEMHingeLoss', 'SSNLoss', 'ResNet3dCSN', 'ResNetTIN',
'TPN', 'TPNHead', 'build_loss', 'build_neck', 'AudioRecognizer',
'AudioTSNHead', 'X3D', 'X3DHead', 'ResNetAudio', 'ConvAudio'
]
| InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/models/__init__.py |
import torch.nn as nn
from mmcv.utils import build_from_cfg
from .registry import BACKBONES, HEADS, LOCALIZERS, LOSSES, NECKS, RECOGNIZERS
def build(cfg, registry, default_args=None):
"""Build a module.
Args:
cfg (dict, list[dict]): The config of modules, it is either a dict
or a list of configs.
registry (:obj:`Registry`): A registry the module belongs to.
default_args (dict, optional): Default arguments to build the module.
Defaults to None.
Returns:
nn.Module: A built nn module.
"""
if isinstance(cfg, list):
modules = [
build_from_cfg(cfg_, registry, default_args) for cfg_ in cfg
]
return nn.Sequential(*modules)
return build_from_cfg(cfg, registry, default_args)
def build_backbone(cfg):
"""Build backbone."""
return build(cfg, BACKBONES)
def build_head(cfg):
"""Build head."""
return build(cfg, HEADS)
def build_recognizer(cfg, train_cfg=None, test_cfg=None):
"""Build recognizer."""
return build(cfg, RECOGNIZERS,
dict(train_cfg=train_cfg, test_cfg=test_cfg))
def build_loss(cfg):
"""Build loss."""
return build(cfg, LOSSES)
def build_localizer(cfg):
"""Build localizer."""
return build(cfg, LOCALIZERS)
def build_model(cfg, train_cfg=None, test_cfg=None):
"""Build model."""
args = cfg.copy()
obj_type = args.pop('type')
if obj_type in LOCALIZERS:
return build_localizer(cfg)
if obj_type in RECOGNIZERS:
return build_recognizer(cfg, train_cfg, test_cfg)
raise ValueError(f'{obj_type} is not registered in '
'LOCALIZERS or RECOGNIZERS')
def build_neck(cfg):
"""Build neck."""
return build(cfg, NECKS)
| InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/models/builder.py |
import torch
import torch.nn as nn
import torch.nn.functional as F
from ..registry import LOSSES
from ...core import top_k_accuracy
@LOSSES.register_module()
class BayesianNNLoss(nn.Module):
"""Bayesian NN Loss."""
def forward(self, cls_score, labels, output_dict, beta=1.0, **kwargs):
"""Forward function.
Args:
cls_score (torch.Tensor): The class score.
label (torch.Tensor): The ground truth label.
prior (torch.Tensor): The log prior
posterior (torch.Tensor): The log variational posterior
kwargs: Any keyword argument to be used to calculate
Bayesian NN loss.
Returns:
torch.Tensor: The returned Bayesian NN loss.
"""
losses = dict()
if labels.shape == torch.Size([]):
labels = labels.unsqueeze(0)
# negative log-likelihood (BCE loss)
loss_cls = F.cross_entropy(cls_score, labels, **kwargs)
# parse the output
log_prior = output_dict['log_prior']
log_posterior = output_dict['log_posterior']
# complexity regularizer
loss_complexity = beta * (log_posterior - log_prior)
# total loss
loss = loss_cls + loss_complexity
# accuracy metrics
top_k_acc = top_k_accuracy(cls_score.detach().cpu().numpy(),
labels.detach().cpu().numpy(), (1, 5))
losses = {'loss_cls': loss_cls, 'loss_complexity': loss_complexity, # items to be backwarded
'LOSS_total': loss, # items for monitoring
'log_posterior': beta * log_posterior,
'log_prior': beta * log_prior,
'top1_acc': torch.tensor(top_k_acc[0], device=cls_score.device),
'top5_acc': torch.tensor(top_k_acc[1], device=cls_score.device)
}
if 'aleatoric' in output_dict: losses.update({'aleatoric': output_dict['aleatoric']})
if 'epistemic' in output_dict: losses.update({'epistemic': output_dict['epistemic']})
return losses
| InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/models/losses/bnn_loss.py |
import torch
import torch.nn.functional as F
from ..registry import LOSSES
from .base import BaseWeightedLoss
def relu_evidence(y):
return F.relu(y)
def exp_evidence(y):
return torch.exp(torch.clamp(y, -10, 10))
def softplus_evidence(y):
return F.softplus(y)
@LOSSES.register_module()
class EvidenceLoss(BaseWeightedLoss):
"""Evidential MSE Loss."""
def __init__(self, num_classes,
evidence='relu',
loss_type='mse',
with_kldiv=True,
with_avuloss=False,
disentangle=False,
annealing_method='step',
annealing_start=0.01,
annealing_step=10):
super().__init__()
self.num_classes = num_classes
self.evidence = evidence
self.loss_type = loss_type
self.with_kldiv = with_kldiv
self.with_avuloss = with_avuloss
self.disentangle = disentangle
self.annealing_method = annealing_method
self.annealing_start = annealing_start
self.annealing_step = annealing_step
self.eps = 1e-10
def kl_divergence(self, alpha):
beta = torch.ones([1, self.num_classes], dtype=torch.float32).to(alpha.device)
S_alpha = torch.sum(alpha, dim=1, keepdim=True)
S_beta = torch.sum(beta, dim=1, keepdim=True)
lnB = torch.lgamma(S_alpha) - \
torch.sum(torch.lgamma(alpha), dim=1, keepdim=True)
lnB_uni = torch.sum(torch.lgamma(beta), dim=1,
keepdim=True) - torch.lgamma(S_beta)
dg0 = torch.digamma(S_alpha)
dg1 = torch.digamma(alpha)
kl = torch.sum((alpha - beta) * (dg1 - dg0), dim=1,
keepdim=True) + lnB + lnB_uni
return kl
def loglikelihood_loss(self, y, alpha):
S = torch.sum(alpha, dim=1, keepdim=True)
loglikelihood_err = torch.sum(
(y - (alpha / S)) ** 2, dim=1, keepdim=True)
loglikelihood_var = torch.sum(
alpha * (S - alpha) / (S * S * (S + 1)), dim=1, keepdim=True)
return loglikelihood_err, loglikelihood_var
def mse_loss(self, y, alpha, annealing_coef):
"""Used only for loss_type == 'mse'
y: the one-hot labels (batchsize, num_classes)
alpha: the predictions (batchsize, num_classes)
epoch_num: the current training epoch
"""
losses = {}
loglikelihood_err, loglikelihood_var = self.loglikelihood_loss(y, alpha)
losses.update({'loss_cls': loglikelihood_err, 'loss_var': loglikelihood_var})
losses.update({'lambda': annealing_coef})
if self.with_kldiv:
kl_alpha = (alpha - 1) * (1 - y) + 1
kl_div = annealing_coef * \
self.kl_divergence(kl_alpha)
losses.update({'loss_kl': kl_div})
if self.with_avuloss:
S = torch.sum(alpha, dim=1, keepdim=True) # Dirichlet strength
pred_score = alpha / S
uncertainty = self.num_classes / S
# avu_loss = annealing_coef *
return losses
def ce_loss(self, target, y, alpha, annealing_coef):
"""Used only for loss_type == 'ce'
target: the scalar labels (batchsize,)
alpha: the predictions (batchsize, num_classes), alpha >= 1
epoch_num: the current training epoch
"""
losses = {}
# (1) the classification loss term
S = torch.sum(alpha, dim=1, keepdim=True)
pred_score = alpha / S
loss_cls = F.nll_loss(torch.log(pred_score), target, reduction='none')
losses.update({'loss_cls': loss_cls})
# (2) the likelihood variance term
loglikelihood_var = torch.sum(
alpha * (S - alpha) / (S * S * (S + 1)), dim=1, keepdim=True)
losses.update({'loss_var': loglikelihood_var})
# (3) the KL divergence term
kl_alpha = (alpha - 1) * (1 - y) + 1
kl_div = annealing_coef * \
self.kl_divergence(kl_alpha)
losses.update({'loss_kl': kl_div, 'lambda': annealing_coef})
return losses
def edl_loss(self, func, y, alpha, annealing_coef, target):
"""Used for both loss_type == 'log' and loss_type == 'digamma'
func: function handler (torch.log, or torch.digamma)
y: the one-hot labels (batchsize, num_classes)
alpha: the predictions (batchsize, num_classes)
epoch_num: the current training epoch
"""
losses = {}
S = torch.sum(alpha, dim=1, keepdim=True)
A = torch.sum(y * (func(S) - func(alpha)), dim=1, keepdim=True)
losses.update({'loss_cls': A})
losses.update({'lambda': annealing_coef})
if self.with_kldiv:
kl_alpha = (alpha - 1) * (1 - y) + 1
kl_div = annealing_coef * \
self.kl_divergence(kl_alpha)
losses.update({'loss_kl': kl_div})
if self.with_avuloss:
pred_scores, pred_cls = torch.max(alpha / S, 1, keepdim=True)
uncertainty = self.num_classes / S
acc_match = torch.reshape(torch.eq(pred_cls, target.unsqueeze(1)).float(), (-1, 1))
if self.disentangle:
acc_uncertain = - torch.log(pred_scores * (1 - uncertainty) + self.eps)
inacc_certain = - torch.log((1 - pred_scores) * uncertainty + self.eps)
else:
acc_uncertain = - pred_scores * torch.log(1 - uncertainty + self.eps)
inacc_certain = - (1 - pred_scores) * torch.log(uncertainty + self.eps)
avu_loss = annealing_coef * acc_match * acc_uncertain + (1 - annealing_coef) * (1 - acc_match) * inacc_certain
losses.update({'loss_avu': avu_loss})
return losses
def compute_annealing_coef(self, **kwargs):
assert 'epoch' in kwargs, "epoch number is missing!"
assert 'total_epoch' in kwargs, "total epoch number is missing!"
epoch_num, total_epoch = kwargs['epoch'], kwargs['total_epoch']
# annealing coefficient
if self.annealing_method == 'step':
annealing_coef = torch.min(torch.tensor(
1.0, dtype=torch.float32), torch.tensor(epoch_num / self.annealing_step, dtype=torch.float32))
elif self.annealing_method == 'exp':
annealing_start = torch.tensor(self.annealing_start, dtype=torch.float32)
annealing_coef = annealing_start * torch.exp(-torch.log(annealing_start) / total_epoch * epoch_num)
else:
raise NotImplementedError
return annealing_coef
def _forward(self, output, target, **kwargs):
"""Forward function.
Args:
output (torch.Tensor): The class score (before softmax).
target (torch.Tensor): The ground truth label.
epoch_num: The number of epochs during training.
Returns:
torch.Tensor: The returned EvidenceLoss loss.
"""
# get evidence
if self.evidence == 'relu':
evidence = relu_evidence(output)
elif self.evidence == 'exp':
evidence = exp_evidence(output)
elif self.evidence == 'softplus':
evidence = softplus_evidence(output)
else:
raise NotImplementedError
alpha = evidence + 1
# one-hot embedding for the target
y = torch.eye(self.num_classes).to(output.device)
y = y[target]
# compute annealing coefficient
annealing_coef = self.compute_annealing_coef(**kwargs)
# compute the EDL loss
if self.loss_type == 'mse':
results = self.mse_loss(y, alpha, annealing_coef)
elif self.loss_type == 'log':
results = self.edl_loss(torch.log, y, alpha, annealing_coef, target)
elif self.loss_type == 'digamma':
results = self.edl_loss(torch.digamma, y, alpha, annealing_coef, target)
elif self.loss_type == 'cross_entropy':
results = self.ce_loss(target, y, alpha, annealing_coef)
else:
raise NotImplementedError
# compute uncertainty and evidence
_, preds = torch.max(output, 1)
match = torch.reshape(torch.eq(preds, target).float(), (-1, 1))
uncertainty = self.num_classes / torch.sum(alpha, dim=1, keepdim=True)
total_evidence = torch.sum(evidence, 1, keepdim=True)
evidence_succ = torch.sum(total_evidence * match) / torch.sum(match + 1e-20)
evidence_fail = torch.sum(total_evidence * (1 - match)) / (torch.sum(torch.abs(1 - match)) + 1e-20)
results.update({'uncertainty': uncertainty,
'evidence_succ': evidence_succ,
'evidence_fail': evidence_fail})
return results | InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/models/losses/edl_loss.py |
import torch
import torch.nn.functional as F
import numpy as np
from ..registry import LOSSES
from .base import BaseWeightedLoss
@LOSSES.register_module()
class RebiasLoss(BaseWeightedLoss):
"""Rebias Loss."""
def __init__(self, lambda_g=1.0, criteria='hsic'):
super().__init__()
self.lambda_g = lambda_g
self.criteria = criteria
def _kernel(self, X, sigma):
X = X.view(len(X), -1)
XX = X @ X.t()
X_sqnorms = torch.diag(XX)
X_L2 = -2 * XX + X_sqnorms.unsqueeze(1) + X_sqnorms.unsqueeze(0)
gamma = 1 / (2 * sigma ** 2)
kernel_XX = torch.exp(-gamma * X_L2)
return kernel_XX
def hsic_loss(self, input1, input2, unbiased=False):
N = len(input1)
if N < 4:
return torch.tensor(0.0).to(input1.device)
# we simply use the squared dimension of feature as the sigma for RBF kernel
sigma_x = np.sqrt(input1.size()[1])
sigma_y = np.sqrt(input2.size()[1])
# compute the kernels
kernel_XX = self._kernel(input1, sigma_x)
kernel_YY = self._kernel(input2, sigma_y)
if unbiased:
"""Unbiased estimator of Hilbert-Schmidt Independence Criterion
Song, Le, et al. "Feature selection via dependence maximization." 2012.
"""
tK = kernel_XX - torch.diag(kernel_XX)
tL = kernel_YY - torch.diag(kernel_YY)
hsic = (
torch.trace(tK @ tL)
+ (torch.sum(tK) * torch.sum(tL) / (N - 1) / (N - 2))
- (2 * torch.sum(tK, 0).dot(torch.sum(tL, 0)) / (N - 2))
)
loss = hsic / (N * (N - 3))
else:
"""Biased estimator of Hilbert-Schmidt Independence Criterion
Gretton, Arthur, et al. "Measuring statistical dependence with Hilbert-Schmidt norms." 2005.
"""
KH = kernel_XX - kernel_XX.mean(0, keepdim=True)
LH = kernel_YY - kernel_YY.mean(0, keepdim=True)
loss = torch.trace(KH @ LH / (N - 1) ** 2)
return loss
def cosine_loss(self, input1, input2):
# normalize the inputs with L2-norm
norm1 = F.normalize(input1, dim=1, p=2)
norm2 = F.normalize(input2, dim=1, p=2)
# cosine distance
cos_batch = torch.bmm(norm1.unsqueeze(1), norm2.unsqueeze(2)).squeeze(-1).squeeze(-1)
loss = torch.mean(torch.abs(cos_batch))
return loss
def _forward(self, x, xs, y, ys, label, **kwargs):
"""Forward function.
Returns:
torch.Tensor: The returned Rebias loss.
"""
# L(f)
loss_f = F.cross_entropy(y, label, **kwargs)
# L(g)
loss_g = self.lambda_g * F.cross_entropy(ys, label, **kwargs)
losses = {'loss_f': loss_f, 'loss_g': loss_g}
# all losses
if self.criteria == 'hsic':
# negative HSIC loss
loss_hsic = - self.hsic_loss(x, xs) # small returned value means high dependency
losses.update({'loss_hsic': loss_hsic})
elif self.criteria == 'cosine':
loss_cos = self.cosine_loss(x, xs) # large returned value means high dependency
losses.update({'loss_cos': loss_cos})
else:
raise NotImplementedError
return losses | InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/models/losses/rebias_loss.py |
from .base import BaseWeightedLoss
from .binary_logistic_regression_loss import BinaryLogisticRegressionLoss
from .bmn_loss import BMNLoss
from .cross_entropy_loss import BCELossWithLogits, CrossEntropyLoss
from .bnn_loss import BayesianNNLoss
from .edl_loss import EvidenceLoss
from .hvu_loss import HVULoss
from .nll_loss import NLLLoss
from .ohem_hinge_loss import OHEMHingeLoss
from .ssn_loss import SSNLoss
from .rebias_loss import RebiasLoss
from .rpl_loss import RPLoss
from .gcp_loss import GCPLoss
__all__ = [
'BaseWeightedLoss', 'CrossEntropyLoss', 'NLLLoss', 'BCELossWithLogits',
'BinaryLogisticRegressionLoss', 'BMNLoss', 'OHEMHingeLoss', 'SSNLoss',
'HVULoss', "BayesianNNLoss", "EvidenceLoss", "RebiasLoss", 'RPLoss', 'GCPLoss'
]
| InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/models/losses/__init__.py |
import torch
import torch.nn as nn
import torch.nn.functional as F
from ..registry import LOSSES
from .ohem_hinge_loss import OHEMHingeLoss
@LOSSES.register_module()
class SSNLoss(nn.Module):
@staticmethod
def activity_loss(activity_score, labels, activity_indexer):
"""Activity Loss.
It will calculate activity loss given activity_score and label.
Args:
activity_score (torch.Tensor): Predicted activity score.
labels (torch.Tensor): Groundtruth class label.
activity_indexer (torch.Tensor): Index slices of proposals.
Returns:
torch.Tensor: Returned cross entropy loss.
"""
pred = activity_score[activity_indexer, :]
gt = labels[activity_indexer]
return F.cross_entropy(pred, gt)
@staticmethod
def completeness_loss(completeness_score,
labels,
completeness_indexer,
positive_per_video,
incomplete_per_video,
ohem_ratio=0.17):
"""Completeness Loss.
It will calculate completeness loss given completeness_score and label.
Args:
completeness_score (torch.Tensor): Predicted completeness score.
labels (torch.Tensor): Groundtruth class label.
completeness_indexer (torch.Tensor): Index slices of positive and
incomplete proposals.
positive_per_video (int): Number of positive proposals sampled
per video.
incomplete_per_video (int): Number of incomplete proposals sampled
pre video.
ohem_ratio (float): Ratio of online hard example mining.
Default: 0.17.
Returns:
torch.Tensor: Returned class-wise completeness loss.
"""
pred = completeness_score[completeness_indexer, :]
gt = labels[completeness_indexer]
pred_dim = pred.size(1)
pred = pred.view(-1, positive_per_video + incomplete_per_video,
pred_dim)
gt = gt.view(-1, positive_per_video + incomplete_per_video)
# yapf:disable
positive_pred = pred[:, :positive_per_video, :].contiguous().view(-1, pred_dim) # noqa:E501
incomplete_pred = pred[:, positive_per_video:, :].contiguous().view(-1, pred_dim) # noqa:E501
# yapf:enable
positive_loss = OHEMHingeLoss.apply(
positive_pred, gt[:, :positive_per_video].contiguous().view(-1), 1,
1.0, positive_per_video)
incomplete_loss = OHEMHingeLoss.apply(
incomplete_pred, gt[:, positive_per_video:].contiguous().view(-1),
-1, ohem_ratio, incomplete_per_video)
num_positives = positive_pred.size(0)
num_incompletes = int(incomplete_pred.size(0) * ohem_ratio)
return ((positive_loss + incomplete_loss) /
float(num_positives + num_incompletes))
@staticmethod
def classwise_regression_loss(bbox_pred, labels, bbox_targets,
regression_indexer):
"""Classwise Regression Loss.
It will calculate classwise_regression loss given
class_reg_pred and targets.
Args:
bbox_pred (torch.Tensor): Predicted interval center and span
of positive proposals.
labels (torch.Tensor): Groundtruth class label.
bbox_targets (torch.Tensor): Groundtruth center and span
of positive proposals.
regression_indexer (torch.Tensor): Index slices of
positive proposals.
Returns:
torch.Tensor: Returned class-wise regression loss.
"""
pred = bbox_pred[regression_indexer, :, :]
gt = labels[regression_indexer]
reg_target = bbox_targets[regression_indexer, :]
class_idx = gt.data - 1
classwise_pred = pred[:, class_idx, :]
classwise_reg_pred = torch.cat(
(torch.diag(classwise_pred[:, :, 0]).view(
-1, 1), torch.diag(classwise_pred[:, :, 1]).view(-1, 1)),
dim=1)
loss = F.smooth_l1_loss(
classwise_reg_pred.view(-1), reg_target.view(-1)) * 2
return loss
def forward(self, activity_score, completeness_score, bbox_pred,
proposal_type, labels, bbox_targets, train_cfg):
"""Calculate Boundary Matching Network Loss.
Args:
activity_score (torch.Tensor): Predicted activity score.
completeness_score (torch.Tensor): Predicted completeness score.
bbox_pred (torch.Tensor): Predicted interval center and span
of positive proposals.
proposal_type (torch.Tensor): Type index slices of proposals.
labels (torch.Tensor): Groundtruth class label.
bbox_targets (torch.Tensor): Groundtruth center and span
of positive proposals.
train_cfg (dict): Config for training.
Returns:
dict([torch.Tensor, torch.Tensor, torch.Tensor]):
(loss_activity, loss_completeness, loss_reg).
Loss_activity is the activity loss, loss_completeness is
the class-wise completeness loss,
loss_reg is the class-wise regression loss.
"""
self.sampler = train_cfg.ssn.sampler
self.loss_weight = train_cfg.ssn.loss_weight
losses = dict()
proposal_type = proposal_type.view(-1)
labels = labels.view(-1)
activity_indexer = ((proposal_type == 0) +
(proposal_type == 2)).nonzero().squeeze(1)
completeness_indexer = ((proposal_type == 0) +
(proposal_type == 1)).nonzero().squeeze(1)
total_ratio = (
self.sampler.positive_ratio + self.sampler.background_ratio +
self.sampler.incomplete_ratio)
positive_per_video = int(self.sampler.num_per_video *
(self.sampler.positive_ratio / total_ratio))
background_per_video = int(
self.sampler.num_per_video *
(self.sampler.background_ratio / total_ratio))
incomplete_per_video = (
self.sampler.num_per_video - positive_per_video -
background_per_video)
losses['loss_activity'] = self.activity_loss(activity_score, labels,
activity_indexer)
losses['loss_completeness'] = self.completeness_loss(
completeness_score,
labels,
completeness_indexer,
positive_per_video,
incomplete_per_video,
ohem_ratio=positive_per_video / incomplete_per_video)
losses['loss_completeness'] *= self.loss_weight.comp_loss_weight
if bbox_pred is not None:
regression_indexer = (proposal_type == 0).nonzero().squeeze(1)
bbox_targets = bbox_targets.view(-1, 2)
losses['loss_reg'] = self.classwise_regression_loss(
bbox_pred, labels, bbox_targets, regression_indexer)
losses['loss_reg'] *= self.loss_weight.reg_loss_weight
return losses
| InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/models/losses/ssn_loss.py |
import torch
import torch.nn.functional as F
from ..registry import LOSSES
from .base import BaseWeightedLoss
from ...core import top_k_accuracy
@LOSSES.register_module()
class GCPLoss(BaseWeightedLoss):
"""Reciprocal Point Learning Loss."""
def __init__(self, temperature=1, weight_pl=0.1, radius_init=1):
super().__init__()
self.temperature = temperature
self.weight_pl = weight_pl
def _forward(self, head_outs, labels, **kwargs):
"""Forward function.
Args:
head_outs (torch.Tensor): outputs of the RPL head
label (torch.Tensor): The ground truth label.
kwargs: Any keyword argument to be used to calculate
CrossEntropy loss.
Returns:
torch.Tensor: The returned CrossEntropy loss.
"""
dist, feature, centers = head_outs['dist'], head_outs['feature'], head_outs['centers']
if labels.shape == torch.Size([]):
labels = labels.unsqueeze(0)
# compute losses
logits = F.softmax(dist, dim=1)
loss_closed = F.cross_entropy(dist / self.temperature, labels, **kwargs)
center_batch = centers[labels, :]
loss_r = F.mse_loss(feature, center_batch) / 2
# gather losses
losses = {'loss_cls': loss_closed, 'loss_open': self.weight_pl * loss_r}
# compute top-K accuracy using CPU numpy
top_k_acc = top_k_accuracy(logits.detach().cpu().numpy(),
labels.detach().cpu().numpy(), (1, 5))
losses.update({'top1_acc': torch.tensor(top_k_acc[0], device=dist.device)})
losses.update({'top5_acc': torch.tensor(top_k_acc[1], device=dist.device)})
return losses | InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/models/losses/gcp_loss.py |
import torch
import torch.nn as nn
from ..registry import LOSSES
def binary_logistic_regression_loss(reg_score,
label,
threshold=0.5,
ratio_range=(1.05, 21),
eps=1e-5):
"""Binary Logistic Regression Loss."""
label = label.view(-1).to(reg_score.device)
reg_score = reg_score.contiguous().view(-1)
pmask = (label > threshold).float().to(reg_score.device)
num_positive = max(torch.sum(pmask), 1)
num_entries = len(label)
ratio = num_entries / num_positive
# clip ratio value between ratio_range
ratio = min(max(ratio, ratio_range[0]), ratio_range[1])
coef_0 = 0.5 * ratio / (ratio - 1)
coef_1 = 0.5 * ratio
loss = coef_1 * pmask * torch.log(reg_score + eps) + coef_0 * (
1.0 - pmask) * torch.log(1.0 - reg_score + eps)
loss = -torch.mean(loss)
return loss
@LOSSES.register_module()
class BinaryLogisticRegressionLoss(nn.Module):
"""Binary Logistic Regression Loss.
It will calculate binary logistic regression loss given reg_score and
label.
"""
def forward(self,
reg_score,
label,
threshold=0.5,
ratio_range=(1.05, 21),
eps=1e-5):
"""Calculate Binary Logistic Regression Loss.
Args:
reg_score (torch.Tensor): Predicted score by model.
label (torch.Tensor): Groundtruth labels.
threshold (float): Threshold for positive instances.
Default: 0.5.
ratio_range (tuple): Lower bound and upper bound for ratio.
Default: (1.05, 21)
eps (float): Epsilon for small value. Default: 1e-5.
Returns:
torch.Tensor: Returned binary logistic loss.
"""
return binary_logistic_regression_loss(reg_score, label, threshold,
ratio_range, eps)
| InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/models/losses/binary_logistic_regression_loss.py |
import torch.nn.functional as F
from ..registry import LOSSES
from .base import BaseWeightedLoss
@LOSSES.register_module()
class CrossEntropyLoss(BaseWeightedLoss):
"""Cross Entropy Loss."""
def _forward(self, cls_score, label, **kwargs):
"""Forward function.
Args:
cls_score (torch.Tensor): The class score.
label (torch.Tensor): The ground truth label.
kwargs: Any keyword argument to be used to calculate
CrossEntropy loss.
Returns:
torch.Tensor: The returned CrossEntropy loss.
"""
loss_cls = F.cross_entropy(cls_score, label, **kwargs)
return loss_cls
@LOSSES.register_module()
class BCELossWithLogits(BaseWeightedLoss):
"""Binary Cross Entropy Loss with logits."""
def _forward(self, cls_score, label, **kwargs):
"""Forward function.
Args:
cls_score (torch.Tensor): The class score.
label (torch.Tensor): The ground truth label.
kwargs: Any keyword argument to be used to calculate
bce loss with logits.
Returns:
torch.Tensor: The returned bce loss with logits.
"""
loss_cls = F.binary_cross_entropy_with_logits(cls_score, label,
**kwargs)
return loss_cls
| InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/models/losses/cross_entropy_loss.py |
import torch
class OHEMHingeLoss(torch.autograd.Function):
"""This class is the core implementation for the completeness loss in
paper.
It compute class-wise hinge loss and performs online hard example mining
(OHEM).
"""
@staticmethod
def forward(ctx, pred, labels, is_positive, ohem_ratio, group_size):
"""Calculate OHEM hinge loss.
Args:
pred (torch.Tensor): Predicted completeness score.
labels (torch.Tensor): Groundtruth class label.
is_positive (int): Set to 1 when proposals are positive and
set to -1 when proposals are incomplete.
ohem_ratio (float): Ratio of hard examples.
group_size (int): Number of proposals sampled per video.
Returns:
torch.Tensor: Returned class-wise hinge loss.
"""
num_samples = pred.size(0)
if num_samples != len(labels):
raise ValueError(f'Number of samples should be equal to that '
f'of labels, but got {num_samples} samples and '
f'{len(labels)} labels.')
losses = torch.zeros(num_samples, device=pred.device)
slopes = torch.zeros(num_samples, device=pred.device)
for i in range(num_samples):
losses[i] = max(0, 1 - is_positive * pred[i, labels[i] - 1])
slopes[i] = -is_positive if losses[i] != 0 else 0
losses = losses.view(-1, group_size).contiguous()
sorted_losses, indices = torch.sort(losses, dim=1, descending=True)
keep_length = int(group_size * ohem_ratio)
loss = torch.zeros(1, device=pred.device)
for i in range(losses.size(0)):
loss += sorted_losses[i, :keep_length].sum()
ctx.loss_index = indices[:, :keep_length]
ctx.labels = labels
ctx.slopes = slopes
ctx.shape = pred.size()
ctx.group_size = group_size
ctx.num_groups = losses.size(0)
return loss
@staticmethod
def backward(ctx, grad_output):
labels = ctx.labels
slopes = ctx.slopes
grad_in = torch.zeros(ctx.shape, device=ctx.slopes.device)
for group in range(ctx.num_groups):
for idx in ctx.loss_index[group]:
loc = idx + group * ctx.group_size
grad_in[loc, labels[loc] - 1] = (
slopes[loc] * grad_output.data[0])
return torch.autograd.Variable(grad_in), None, None, None, None
| InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/models/losses/ohem_hinge_loss.py |
import torch.nn.functional as F
from ..registry import LOSSES
from .base import BaseWeightedLoss
@LOSSES.register_module()
class NLLLoss(BaseWeightedLoss):
"""NLL Loss.
It will calculate NLL loss given cls_score and label.
"""
def _forward(self, cls_score, label, **kwargs):
"""Forward function.
Args:
cls_score (torch.Tensor): The class score.
label (torch.Tensor): The ground truth label.
kwargs: Any keyword argument to be used to calculate nll loss.
Returns:
torch.Tensor: The returned nll loss.
"""
loss_cls = F.nll_loss(cls_score, label, **kwargs)
return loss_cls
| InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/models/losses/nll_loss.py |
import torch
import torch.nn.functional as F
from ..registry import LOSSES
from .base import BaseWeightedLoss
@LOSSES.register_module()
class HVULoss(BaseWeightedLoss):
"""Calculate the BCELoss for HVU.
Args:
categories (tuple[str]): Names of tag categories, tags are organized in
this order. Default: ['action', 'attribute', 'concept', 'event',
'object', 'scene'].
category_nums (tuple[int]): Number of tags for each category. Default:
(739, 117, 291, 69, 1678, 248).
category_loss_weights (tuple[float]): Loss weights of categories, it
applies only if `loss_type == 'individual'`. The loss weights will
be normalized so that the sum equals to 1, so that you can give any
positive number as loss weight. Default: (1, 1, 1, 1, 1, 1).
loss_type (str): The loss type we calculate, we can either calculate
the BCELoss for all tags, or calculate the BCELoss for tags in each
category. Choices are 'individual' or 'all'. Default: 'all'.
with_mask (bool): Since some tag categories are missing for some video
clips. If `with_mask == True`, we will not calculate loss for these
missing categories. Otherwise, these missing categories are treated
as negative samples.
reduction (str): Reduction way. Choices are 'mean' or 'sum'. Default:
'mean'.
loss_weight (float): The loss weight. Default: 1.0.
"""
def __init__(self,
categories=('action', 'attribute', 'concept', 'event',
'object', 'scene'),
category_nums=(739, 117, 291, 69, 1678, 248),
category_loss_weights=(1, 1, 1, 1, 1, 1),
loss_type='all',
with_mask=False,
reduction='mean',
loss_weight=1.0):
super().__init__(loss_weight)
self.categories = categories
self.category_nums = category_nums
self.category_loss_weights = category_loss_weights
assert len(self.category_nums) == len(self.category_loss_weights)
for category_loss_weight in self.category_loss_weights:
assert category_loss_weight >= 0
self.loss_type = loss_type
self.with_mask = with_mask
self.reduction = reduction
self.category_startidx = [0]
for i in range(len(self.category_nums) - 1):
self.category_startidx.append(self.category_startidx[-1] +
self.category_nums[i])
assert self.loss_type in ['individual', 'all']
assert self.reduction in ['mean', 'sum']
def _forward(self, cls_score, label, mask, category_mask):
"""Forward function.
Args:
cls_score (torch.Tensor): The class score.
label (torch.Tensor): The ground truth label.
mask (torch.Tensor): The mask of tags. 0 indicates that the
category of this tag is missing in the label of the video.
category_mask (torch.Tensor): The category mask. For each sample,
it's a tensor with length `len(self.categories)`, denotes that
if the category is labeled for this video.
Returns:
torch.Tensor: The returned CrossEntropy loss.
"""
if self.loss_type == 'all':
loss_cls = F.binary_cross_entropy_with_logits(
cls_score, label, reduction='none')
if self.with_mask:
w_loss_cls = mask * loss_cls
w_loss_cls = torch.sum(w_loss_cls, dim=1)
if self.reduction == 'mean':
w_loss_cls = w_loss_cls / torch.sum(mask, dim=1)
w_loss_cls = torch.mean(w_loss_cls)
return dict(loss_cls=w_loss_cls)
if self.reduction == 'sum':
loss_cls = torch.sum(loss_cls, dim=-1)
return dict(loss_cls=torch.mean(loss_cls))
if self.loss_type == 'individual':
losses = {}
loss_weights = {}
for name, num, start_idx in zip(self.categories,
self.category_nums,
self.category_startidx):
category_score = cls_score[:, start_idx:start_idx + num]
category_label = label[:, start_idx:start_idx + num]
category_loss = F.binary_cross_entropy_with_logits(
category_score, category_label, reduction='none')
if self.reduction == 'mean':
category_loss = torch.mean(category_loss, dim=1)
elif self.reduction == 'sum':
category_loss = torch.sum(category_loss, dim=1)
idx = self.categories.index(name)
if self.with_mask:
category_mask_i = category_mask[:, idx].reshape(-1)
# there should be at least one sample which contains tags
# in thie category
if torch.sum(category_mask_i) < 0.5:
losses[f'{name}_LOSS'] = torch.tensor(.0).cuda()
loss_weights[f'{name}_LOSS'] = .0
continue
category_loss = torch.sum(category_loss * category_mask_i)
category_loss = category_loss / torch.sum(category_mask_i)
else:
category_loss = torch.mean(category_loss)
# We name the loss of each category as 'LOSS', since we only
# want to monitor them, not backward them. We will also provide
# the loss used for backward in the losses dictionary
losses[f'{name}_LOSS'] = category_loss
loss_weights[f'{name}_LOSS'] = self.category_loss_weights[idx]
loss_weight_sum = sum(loss_weights.values())
loss_weights = {
k: v / loss_weight_sum
for k, v in loss_weights.items()
}
loss_cls = sum([losses[k] * loss_weights[k] for k in losses])
losses['loss_cls'] = loss_cls
# We also trace the loss weights
losses.update({
k + '_weight': torch.tensor(v).to(losses[k].device)
for k, v in loss_weights.items()
})
# Note that the loss weights are just for reference.
return losses
else:
raise ValueError("loss_type should be 'all' or 'individual', "
f'but got {self.loss_type}')
| InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/models/losses/hvu_loss.py |
from abc import ABCMeta, abstractmethod
import torch.nn as nn
class BaseWeightedLoss(nn.Module, metaclass=ABCMeta):
"""Base class for loss.
All subclass should overwrite the ``_forward()`` method which returns the
normal loss without loss weights.
Args:
loss_weight (float): Factor scalar multiplied on the loss.
Default: 1.0.
"""
def __init__(self, loss_weight=1.0):
super().__init__()
self.loss_weight = loss_weight
@abstractmethod
def _forward(self, *args, **kwargs):
pass
def forward(self, *args, **kwargs):
"""Defines the computation performed at every call.
Args:
*args: The positional arguments for the corresponding
loss.
**kwargs: The keyword arguments for the corresponding
loss.
Returns:
torch.Tensor: The calculated loss.
"""
ret = self._forward(*args, **kwargs)
if isinstance(ret, dict):
for k in ret:
if 'loss' in k:
ret[k] *= self.loss_weight
else:
ret *= self.loss_weight
return ret
| InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/models/losses/base.py |
import torch
import torch.nn.functional as F
from ..registry import LOSSES
from .base import BaseWeightedLoss
from ...core import top_k_accuracy
@LOSSES.register_module()
class RPLoss(BaseWeightedLoss):
"""Reciprocal Point Learning Loss."""
def __init__(self, temperature=1, weight_pl=0.1, radius_init=1):
super().__init__()
self.temperature = temperature
self.weight_pl = weight_pl
self.radius = torch.nn.Parameter(torch.Tensor(radius_init))
self.radius.data.fill_(0)
def _forward(self, head_outs, labels, **kwargs):
"""Forward function.
Args:
head_outs (torch.Tensor): outputs of the RPL head
label (torch.Tensor): The ground truth label.
kwargs: Any keyword argument to be used to calculate
CrossEntropy loss.
Returns:
torch.Tensor: The returned CrossEntropy loss.
"""
dist, feature, centers = head_outs['dist'], head_outs['feature'], head_outs['centers']
if labels.shape == torch.Size([]):
labels = labels.unsqueeze(0)
# compute losses
logits = F.softmax(dist, dim=1)
loss_closed = F.cross_entropy(dist / self.temperature, labels, **kwargs)
center_batch = centers[labels, :]
_dis = (feature - center_batch).pow(2).mean(1)
loss_r = F.mse_loss(feature, self.radius) / 2
# gather losses
losses = {'loss_cls': loss_closed, 'loss_open': self.weight_pl * loss_r}
# compute top-K accuracy using CPU numpy
top_k_acc = top_k_accuracy(logits.detach().cpu().numpy(),
labels.detach().cpu().numpy(), (1, 5))
losses.update({'top1_acc': torch.tensor(top_k_acc[0], device=dist.device)})
losses.update({'top5_acc': torch.tensor(top_k_acc[1], device=dist.device)})
return losses | InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/models/losses/rpl_loss.py |
import torch
import torch.nn as nn
import torch.nn.functional as F
from ..registry import LOSSES
from .binary_logistic_regression_loss import binary_logistic_regression_loss
@LOSSES.register_module()
class BMNLoss(nn.Module):
"""BMN Loss.
From paper https://arxiv.org/abs/1907.09702,
code https://github.com/JJBOY/BMN-Boundary-Matching-Network.
It will calculate loss for BMN Model. This loss is a weighted sum of
1) temporal evaluation loss based on confidence score of start and
end positions.
2) proposal evaluation regression loss based on confidence scores of
candidate proposals.
3) proposal evaluation classification loss based on classification
results of candidate proposals.
"""
@staticmethod
def tem_loss(pred_start, pred_end, gt_start, gt_end):
"""Calculate Temporal Evaluation Module Loss.
This function calculate the binary_logistic_regression_loss for start
and end respectively and returns the sum of their losses.
Args:
pred_start (torch.Tensor): Predicted start score by BMN model.
pred_end (torch.Tensor): Predicted end score by BMN model.
gt_start (torch.Tensor): Groundtruth confidence score for start.
gt_end (torch.Tensor): Groundtruth confidence score for end.
Returns:
torch.Tensor: Returned binary logistic loss.
"""
loss_start = binary_logistic_regression_loss(pred_start, gt_start)
loss_end = binary_logistic_regression_loss(pred_end, gt_end)
loss = loss_start + loss_end
return loss
@staticmethod
def pem_reg_loss(pred_score,
gt_iou_map,
mask,
high_temporal_iou_threshold=0.7,
low_temporal_iou_threshold=0.3):
"""Calculate Proposal Evaluation Module Regression Loss.
Args:
pred_score (torch.Tensor): Predicted temporal_iou score by BMN.
gt_iou_map (torch.Tensor): Groundtruth temporal_iou score.
mask (torch.Tensor): Boundary-Matching mask.
high_temporal_iou_threshold (float): Higher threshold of
temporal_iou. Default: 0.7.
low_temporal_iou_threshold (float): Higher threshold of
temporal_iou. Default: 0.3.
Returns:
torch.Tensor: Proposal evalutaion regression loss.
"""
u_hmask = (gt_iou_map > high_temporal_iou_threshold).float()
u_mmask = ((gt_iou_map <= high_temporal_iou_threshold) &
(gt_iou_map > low_temporal_iou_threshold)).float()
u_lmask = ((gt_iou_map <= low_temporal_iou_threshold) &
(gt_iou_map > 0.)).float()
u_lmask = u_lmask * mask
num_h = torch.sum(u_hmask)
num_m = torch.sum(u_mmask)
num_l = torch.sum(u_lmask)
r_m = num_h / num_m
u_smmask = torch.rand_like(gt_iou_map)
u_smmask = u_mmask * u_smmask
u_smmask = (u_smmask > (1. - r_m)).float()
r_l = num_h / num_l
u_slmask = torch.rand_like(gt_iou_map)
u_slmask = u_lmask * u_slmask
u_slmask = (u_slmask > (1. - r_l)).float()
weights = u_hmask + u_smmask + u_slmask
loss = F.mse_loss(pred_score * weights, gt_iou_map * weights)
loss = 0.5 * torch.sum(
loss * torch.ones_like(weights)) / torch.sum(weights)
return loss
@staticmethod
def pem_cls_loss(pred_score,
gt_iou_map,
mask,
threshold=0.9,
ratio_range=(1.05, 21),
eps=1e-5):
"""Calculate Proposal Evaluation Module Classification Loss.
Args:
pred_score (torch.Tensor): Predicted temporal_iou score by BMN.
gt_iou_map (torch.Tensor): Groundtruth temporal_iou score.
mask (torch.Tensor): Boundary-Matching mask.
threshold (float): Threshold of temporal_iou for positive
instances. Default: 0.9.
ratio_range (tuple): Lower bound and upper bound for ratio.
Default: (1.05, 21)
eps (float): Epsilon for small value. Default: 1e-5
Returns:
torch.Tensor: Proposal evalutaion classification loss.
"""
pmask = (gt_iou_map > threshold).float()
nmask = (gt_iou_map <= threshold).float()
nmask = nmask * mask
num_positive = max(torch.sum(pmask), 1)
num_entries = num_positive + torch.sum(nmask)
ratio = num_entries / num_positive
ratio = torch.clamp(ratio, ratio_range[0], ratio_range[1])
coef_0 = 0.5 * ratio / (ratio - 1)
coef_1 = 0.5 * ratio
loss_pos = coef_1 * torch.log(pred_score + eps) * pmask
loss_neg = coef_0 * torch.log(1.0 - pred_score + eps) * nmask
loss = -1 * torch.sum(loss_pos + loss_neg) / num_entries
return loss
def forward(self,
pred_bm,
pred_start,
pred_end,
gt_iou_map,
gt_start,
gt_end,
bm_mask,
weight_tem=1.0,
weight_pem_reg=10.0,
weight_pem_cls=1.0):
"""Calculate Boundary Matching Network Loss.
Args:
pred_bm (torch.Tensor): Predicted confidence score for boundary
matching map.
pred_start (torch.Tensor): Predicted confidence score for start.
pred_end (torch.Tensor): Predicted confidence score for end.
gt_iou_map (torch.Tensor): Groundtruth score for boundary matching
map.
gt_start (torch.Tensor): Groundtruth temporal_iou score for start.
gt_end (torch.Tensor): Groundtruth temporal_iou score for end.
bm_mask (torch.Tensor): Boundary-Matching mask.
weight_tem (float): Weight for tem loss. Default: 1.0.
weight_pem_reg (float): Weight for pem regression loss.
Default: 10.0.
weight_pem_cls (float): Weight for pem classification loss.
Default: 1.0.
Returns:
tuple([torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]):
(loss, tem_loss, pem_reg_loss, pem_cls_loss). Loss is the bmn
loss, tem_loss is the temporal evaluation loss, pem_reg_loss is
the proposal evaluation regression loss, pem_cls_loss is the
proposal evaluation classification loss.
"""
pred_bm_reg = pred_bm[:, 0].contiguous()
pred_bm_cls = pred_bm[:, 1].contiguous()
gt_iou_map = gt_iou_map * bm_mask
pem_reg_loss = self.pem_reg_loss(pred_bm_reg, gt_iou_map, bm_mask)
pem_cls_loss = self.pem_cls_loss(pred_bm_cls, gt_iou_map, bm_mask)
tem_loss = self.tem_loss(pred_start, pred_end, gt_start, gt_end)
loss = (
weight_tem * tem_loss + weight_pem_reg * pem_reg_loss +
weight_pem_cls * pem_cls_loss)
return loss, tem_loss, pem_reg_loss, pem_cls_loss
| InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/models/losses/bmn_loss.py |
from .tpn import TPN
__all__ = ['TPN']
| InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/models/necks/__init__.py |
import numpy as np
import torch
import torch.nn as nn
from mmcv.cnn import ConvModule, constant_init, normal_init, xavier_init
from ..heads import AuxHead, RebiasHead
from ..registry import NECKS
class Identity(nn.Module):
"""Identity mapping."""
def forward(self, x):
return x
class DownSample(nn.Module):
"""DownSample modules.
It uses convolution and maxpooling to downsample the input feature,
and specifies downsample position to determine `pool-conv` or `conv-pool`.
Args:
in_channels (int): Channel number of input features.
out_channels (int): Channel number of output feature.
kernel_size (int | tuple[int]): Same as :class:`ConvModule`.
Default: (3, 1, 1).
stride (int | tuple[int]): Same as :class:`ConvModule`.
Default: (1, 1, 1).
padding (int | tuple[int]): Same as :class:`ConvModule`.
Default: (1, 0, 0).
groups (int): Same as :class:`ConvModule`. Default: 1.
bias (bool | str): Same as :class:`ConvModule`. Default: False.
conv_cfg (dict | None): Same as :class:`ConvModule`.
Default: dict(type='Conv3d').
norm_cfg (dict | None): Same as :class:`ConvModule`. Default: None.
act_cfg (dict | None): Same as :class:`ConvModule`. Default: None.
downsample_position (str): Type of downsample position. Options are
'before' and 'after'. Default: 'after'.
downsample_scale (int | tuple[int]): downsample scale for maxpooling.
It will be used for kernel size and stride of maxpooling.
Default: (1, 2, 2).
"""
def __init__(self,
in_channels,
out_channels,
kernel_size=(3, 1, 1),
stride=(1, 1, 1),
padding=(1, 0, 0),
groups=1,
bias=False,
conv_cfg=dict(type='Conv3d'),
norm_cfg=None,
act_cfg=None,
downsample_position='after',
downsample_scale=(1, 2, 2)):
super().__init__()
self.conv = ConvModule(
in_channels,
out_channels,
kernel_size,
stride,
padding,
groups=groups,
bias=bias,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
assert downsample_position in ['before', 'after']
self.downsample_position = downsample_position
self.pool = nn.MaxPool3d(
downsample_scale, downsample_scale, (0, 0, 0), ceil_mode=True)
def forward(self, x):
if self.downsample_position == 'before':
x = self.pool(x)
x = self.conv(x)
else:
x = self.conv(x)
x = self.pool(x)
return x
class LevelFusion(nn.Module):
"""Level Fusion module.
This module is used to aggregate the hierarchical features dynamic in
visual tempos and consistent in spatial semantics. The top/bottom features
for top-down/bottom-up flow would be combined to achieve two additional
options, namely 'Cascade Flow' or 'Parallel Flow'. While applying a
bottom-up flow after a top-down flow will lead to the cascade flow,
applying them simultaneously will result in the parallel flow.
Args:
in_channels (tuple[int]): Channel numbers of input features tuple.
mid_channels (tuple[int]): Channel numbers of middle features tuple.
out_channels (int): Channel numbers of output features.
downsample_scales (tuple[int | tuple[int]]): downsample scales for
each :class:`DownSample` module. Default: ((1, 1, 1), (1, 1, 1)).
"""
def __init__(self,
in_channels,
mid_channels,
out_channels,
downsample_scales=((1, 1, 1), (1, 1, 1))):
super().__init__()
num_stages = len(in_channels)
self.downsamples = nn.ModuleList()
for i in range(num_stages):
downsample = DownSample(
in_channels[i],
mid_channels[i],
kernel_size=(1, 1, 1),
stride=(1, 1, 1),
bias=False,
padding=(0, 0, 0),
groups=32,
norm_cfg=dict(type='BN3d', requires_grad=True),
act_cfg=dict(type='ReLU', inplace=True),
downsample_position='before',
downsample_scale=downsample_scales[i])
self.downsamples.append(downsample)
self.fusion_conv = ConvModule(
sum(mid_channels),
out_channels,
1,
stride=1,
padding=0,
bias=False,
conv_cfg=dict(type='Conv3d'),
norm_cfg=dict(type='BN3d', requires_grad=True),
act_cfg=dict(type='ReLU', inplace=True))
def forward(self, x):
out = [self.downsamples[i](feature) for i, feature in enumerate(x)]
out = torch.cat(out, 1)
out = self.fusion_conv(out)
return out
class SpatialModulation(nn.Module):
"""Spatial Semantic Modulation.
This module is used to align spatial semantics of features in the
multi-depth pyramid. For each but the top-level feature, a stack
of convolutions with level-specific stride are applied to it, matching
its spatial shape and receptive field with the top one.
Args:
in_channels (tuple[int]): Channel numbers of input features tuple.
out_channels (int): Channel numbers of output features tuple.
"""
def __init__(self, in_channels, out_channels):
super().__init__()
self.spatial_modulation = nn.ModuleList()
for channel in in_channels:
downsample_scale = out_channels // channel
downsample_factor = int(np.log2(downsample_scale))
op = nn.ModuleList()
if downsample_factor < 1:
op = Identity()
else:
for factor in range(downsample_factor):
in_factor = 2**factor
out_factor = 2**(factor + 1)
op.append(
ConvModule(
channel * in_factor,
channel * out_factor, (1, 3, 3),
stride=(1, 2, 2),
padding=(0, 1, 1),
bias=False,
conv_cfg=dict(type='Conv3d'),
norm_cfg=dict(type='BN3d', requires_grad=True),
act_cfg=dict(type='ReLU', inplace=True)))
self.spatial_modulation.append(op)
def forward(self, x):
out = []
for i, _ in enumerate(x):
if isinstance(self.spatial_modulation[i], nn.ModuleList):
out_ = x[i]
for op in self.spatial_modulation[i]:
out_ = op(out_)
out.append(out_)
else:
out.append(self.spatial_modulation[i](x[i]))
return out
class TemporalModulation(nn.Module):
"""Temporal Rate Modulation.
The module is used to equip TPN with a similar flexibility for temporal
tempo modulation as in the input-level frame pyramid.
Args:
in_channels (int): Channel number of input features.
out_channels (int): Channel number of output features.
downsample_scale (int): Downsample scale for maxpooling. Default: 8.
"""
def __init__(self, in_channels, out_channels, downsample_scale=8):
super().__init__()
self.conv = ConvModule(
in_channels,
out_channels, (3, 1, 1),
stride=(1, 1, 1),
padding=(1, 0, 0),
bias=False,
groups=32,
conv_cfg=dict(type='Conv3d'),
act_cfg=None)
self.pool = nn.MaxPool3d((downsample_scale, 1, 1),
(downsample_scale, 1, 1), (0, 0, 0),
ceil_mode=True)
def forward(self, x):
x = self.conv(x)
x = self.pool(x)
return x
@NECKS.register_module()
class TPN(nn.Module):
"""TPN neck.
This module is proposed in `Temporal Pyramid Network for Action Recognition
<https://arxiv.org/pdf/2004.03548.pdf>`_
Args:
in_channels (tuple[int]): Channel numbers of input features tuple.
out_channels (int): Channel number of output feature.
spatial_modulation_cfg (dict | None): Config for spatial modulation
layers. Required keys are `in_channels` and `out_channels`.
Default: None.
temporal_modulation_cfg (dict | None): Config for temporal modulation
layers. Default: None.
upsample_cfg (dict | None): Config for upsample layers. The keys are
same as that in :class:``nn.Upsample``. Default: None.
downsample_cfg (dict | None): Config for downsample layers.
Default: None.
level_fusion_cfg (dict | None): Config for level fusion layers.
Required keys are 'in_channels', 'mid_channels', 'out_channels'.
Default: None.
aux_head_cfg (dict | None): Config for aux head layers.
Required keys are 'out_channels'. Default: None.
flow_type (str): Flow type to combine the features. Options are
'cascade' and 'parallel'. Default: 'cascade'.
"""
def __init__(self,
in_channels,
out_channels,
spatial_modulation_cfg=None,
temporal_modulation_cfg=None,
upsample_cfg=None,
downsample_cfg=None,
level_fusion_cfg=None,
aux_head_cfg=None,
rebias_head_cfg=None,
flow_type='cascade'):
super().__init__()
assert isinstance(in_channels, tuple)
assert isinstance(out_channels, int)
self.in_channels = in_channels
self.out_channels = out_channels
self.num_tpn_stages = len(in_channels)
assert spatial_modulation_cfg is None or isinstance(
spatial_modulation_cfg, dict)
assert temporal_modulation_cfg is None or isinstance(
temporal_modulation_cfg, dict)
assert upsample_cfg is None or isinstance(upsample_cfg, dict)
assert downsample_cfg is None or isinstance(downsample_cfg, dict)
assert aux_head_cfg is None or isinstance(aux_head_cfg, dict)
assert rebias_head_cfg is None or isinstance(rebias_head_cfg, dict)
assert level_fusion_cfg is None or isinstance(level_fusion_cfg, dict)
if flow_type not in ['cascade', 'parallel']:
raise ValueError(
f"flow type in TPN should be 'cascade' or 'parallel', "
f'but got {flow_type} instead.')
self.flow_type = flow_type
self.temporal_modulation_ops = nn.ModuleList()
self.upsample_ops = nn.ModuleList()
self.downsample_ops = nn.ModuleList()
self.level_fusion_1 = LevelFusion(**level_fusion_cfg)
self.spatial_modulation = SpatialModulation(**spatial_modulation_cfg)
for i in range(self.num_tpn_stages):
if temporal_modulation_cfg is not None:
downsample_scale = temporal_modulation_cfg[
'downsample_scales'][i]
temporal_modulation = TemporalModulation(
in_channels[-1], out_channels, downsample_scale)
self.temporal_modulation_ops.append(temporal_modulation)
if i < self.num_tpn_stages - 1:
if upsample_cfg is not None:
upsample = nn.Upsample(**upsample_cfg)
self.upsample_ops.append(upsample)
if downsample_cfg is not None:
downsample = DownSample(out_channels, out_channels,
**downsample_cfg)
self.downsample_ops.append(downsample)
out_dims = level_fusion_cfg['out_channels']
# two pyramids
self.level_fusion_2 = LevelFusion(**level_fusion_cfg)
self.pyramid_fusion = ConvModule(
out_dims * 2,
2048,
1,
stride=1,
padding=0,
bias=False,
conv_cfg=dict(type='Conv3d'),
norm_cfg=dict(type='BN3d', requires_grad=True))
if aux_head_cfg is not None:
self.aux_head = AuxHead(self.in_channels[-2], **aux_head_cfg)
else:
self.aux_head = None
if rebias_head_cfg is not None:
self.rebias_head = RebiasHead(self.in_channels[-2], **rebias_head_cfg)
else:
self.rebias_head = None
self.init_weights()
# default init_weights for conv(msra) and norm in ConvModule
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv3d):
xavier_init(m, distribution='uniform')
if isinstance(m, nn.BatchNorm3d):
constant_init(m, 1)
if self.aux_head is not None:
self.aux_head.init_weights()
if self.rebias_head is not None:
self.rebias_head.init_weights()
def forward(self, x, target=None):
loss_aux = dict()
# Auxiliary loss
if self.aux_head is not None:
loss_aux = self.aux_head(x[-2], target)
if self.rebias_head is not None:
loss_rebias = self.rebias_head(x[-2], target)
loss_aux.update(loss_rebias)
# Spatial Modulation
spatial_modulation_outs = self.spatial_modulation(x)
# Temporal Modulation
temporal_modulation_outs = []
for i, temporal_modulation in enumerate(self.temporal_modulation_ops):
temporal_modulation_outs.append(
temporal_modulation(spatial_modulation_outs[i]))
outs = [out.clone() for out in temporal_modulation_outs]
if len(self.upsample_ops) != 0:
for i in range(self.num_tpn_stages - 1, 0, -1):
outs[i - 1] = outs[i - 1] + self.upsample_ops[i - 1](outs[i])
# Get top-down outs
top_down_outs = self.level_fusion_1(outs)
# Build bottom-up flow using downsample operation
if self.flow_type == 'parallel':
outs = [out.clone() for out in temporal_modulation_outs]
if len(self.downsample_ops) != 0:
for i in range(self.num_tpn_stages - 1):
outs[i + 1] = outs[i + 1] + self.downsample_ops[i](outs[i])
# Get bottom-up outs
botton_up_outs = self.level_fusion_2(outs)
# fuse two pyramid outs
outs = self.pyramid_fusion(
torch.cat([top_down_outs, botton_up_outs], 1))
return outs, loss_aux
| InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/models/necks/tpn.py |
import torch
import torch.nn as nn
from mmcv.cnn import normal_init
from ..registry import HEADS
from .base import BaseHead
@HEADS.register_module()
class SlowFastHead(BaseHead):
"""The classification head for SlowFast.
Args:
num_classes (int): Number of classes to be classified.
in_channels (int): Number of channels in input feature.
loss_cls (dict): Config for building loss.
Default: dict(type='CrossEntropyLoss').
spatial_type (str): Pooling type in spatial dimension. Default: 'avg'.
dropout_ratio (float): Probability of dropout layer. Default: 0.8.
init_std (float): Std value for Initiation. Default: 0.01.
kwargs (dict, optional): Any keyword argument to be used to initialize
the head.
"""
def __init__(self,
num_classes,
in_channels,
loss_cls=dict(type='CrossEntropyLoss'),
spatial_type='avg',
dropout_ratio=0.8,
init_std=0.01,
**kwargs):
super().__init__(num_classes, in_channels, loss_cls, **kwargs)
self.spatial_type = spatial_type
self.dropout_ratio = dropout_ratio
self.init_std = init_std
if self.dropout_ratio != 0:
self.dropout = nn.Dropout(p=self.dropout_ratio)
else:
self.dropout = None
self.fc_cls = nn.Linear(in_channels, num_classes)
if self.spatial_type == 'avg':
self.avg_pool = nn.AdaptiveAvgPool3d((1, 1, 1))
else:
self.avg_pool = None
def init_weights(self):
"""Initiate the parameters from scratch."""
normal_init(self.fc_cls, std=self.init_std)
def forward(self, x):
"""Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data.
Returns:
torch.Tensor: The classification scores for input samples.
"""
# ([N, channel_fast, T, H, W], [(N, channel_slow, T, H, W)])
x_fast, x_slow = x
# ([N, channel_fast, 1, 1, 1], [N, channel_slow, 1, 1, 1])
x_fast = self.avg_pool(x_fast)
x_slow = self.avg_pool(x_slow)
# [N, channel_fast + channel_slow, 1, 1, 1]
x = torch.cat((x_slow, x_fast), dim=1)
if self.dropout is not None:
x = self.dropout(x)
# [N x C]
x = x.view(x.size(0), -1)
# [N x num_classes]
cls_score = self.fc_cls(x)
return cls_score
| InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/models/heads/slowfast_head.py |
import torch.nn as nn
from mmcv.cnn import normal_init
from ..registry import HEADS
from .base import BaseHead
@HEADS.register_module()
class X3DHead(BaseHead):
"""Classification head for I3D.
Args:
num_classes (int): Number of classes to be classified.
in_channels (int): Number of channels in input feature.
loss_cls (dict): Config for building loss.
Default: dict(type='CrossEntropyLoss')
spatial_type (str): Pooling type in spatial dimension. Default: 'avg'.
dropout_ratio (float): Probability of dropout layer. Default: 0.5.
init_std (float): Std value for Initiation. Default: 0.01.
fc1_bias (bool): If the first fc layer has bias. Default: False.
"""
def __init__(self,
num_classes,
in_channels,
loss_cls=dict(type='CrossEntropyLoss'),
spatial_type='avg',
dropout_ratio=0.5,
init_std=0.01,
fc1_bias=False):
super().__init__(num_classes, in_channels, loss_cls)
self.spatial_type = spatial_type
self.dropout_ratio = dropout_ratio
self.init_std = init_std
if self.dropout_ratio != 0:
self.dropout = nn.Dropout(p=self.dropout_ratio)
else:
self.dropout = None
self.in_channels = in_channels
self.mid_channels = 2048
self.num_classes = num_classes
self.fc1_bias = fc1_bias
self.fc1 = nn.Linear(
self.in_channels, self.mid_channels, bias=self.fc1_bias)
self.fc2 = nn.Linear(self.mid_channels, self.num_classes)
self.relu = nn.ReLU()
self.pool = None
if self.spatial_type == 'avg':
self.pool = nn.AdaptiveAvgPool3d((1, 1, 1))
elif self.spatial_type == 'max':
self.pool = nn.AdaptiveMaxPool3d((1, 1, 1))
else:
raise NotImplementedError
def init_weights(self):
"""Initiate the parameters from scratch."""
normal_init(self.fc1, std=self.init_std)
normal_init(self.fc2, std=self.init_std)
def forward(self, x):
"""Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data.
Returns:
torch.Tensor: The classification scores for input samples.
"""
# [N, in_channels, T, H, W]
assert self.pool is not None
x = self.pool(x)
# [N, in_channels, 1, 1, 1]
# [N, in_channels, 1, 1, 1]
x = x.view(x.shape[0], -1)
# [N, in_channels]
x = self.fc1(x)
# [N, 2048]
x = self.relu(x)
if self.dropout is not None:
x = self.dropout(x)
cls_score = self.fc2(x)
# [N, num_classes]
return cls_score
| InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/models/heads/x3d_head.py |
import torch
import torch.nn as nn
from mmcv.cnn import ConvModule, constant_init, normal_init, xavier_init
from ..builder import build_loss
class AuxHead(nn.Module):
"""Auxiliary Head.
This auxiliary head is appended to receive stronger supervision,
leading to enhanced semantics.
Args:
in_channels (int): Channel number of input features.
out_channels (int): Channel number of output features.
loss_weight (float): weight of loss for the auxiliary head.
Default: 0.5.
loss_cls (dict): loss_cls (dict): Config for building loss.
Default: ``dict(type='CrossEntropyLoss')``.
"""
def __init__(self,
in_channels,
out_channels,
loss_weight=0.5,
loss_cls=dict(type='CrossEntropyLoss')):
super().__init__()
self.conv = ConvModule(
in_channels,
in_channels * 2, (1, 3, 3),
stride=(1, 2, 2),
padding=(0, 1, 1),
bias=False,
conv_cfg=dict(type='Conv3d'),
norm_cfg=dict(type='BN3d', requires_grad=True))
self.avg_pool = nn.AdaptiveAvgPool3d((1, 1, 1))
self.loss_weight = loss_weight
self.dropout = nn.Dropout(p=0.5)
self.fc = nn.Linear(in_channels * 2, out_channels)
self.loss_cls = build_loss(loss_cls)
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Linear):
normal_init(m, std=0.01)
if isinstance(m, nn.Conv3d):
xavier_init(m, distribution='uniform')
if isinstance(m, nn.BatchNorm3d):
constant_init(m, 1)
def forward(self, x, target=None):
losses = dict()
if target is None:
return losses
x = self.conv(x)
x = self.avg_pool(x).squeeze(-1).squeeze(-1).squeeze(-1)
x = self.dropout(x)
x = self.fc(x)
if target.shape == torch.Size([]):
target = target.unsqueeze(0)
losses['loss_aux'] = self.loss_weight * self.loss_cls(x, target)
return losses | InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/models/heads/aux_head.py |
import torch.nn as nn
from mmcv.cnn import normal_init
from ..registry import HEADS
from .base import AvgConsensus, BaseHead
@HEADS.register_module()
class TSNHead(BaseHead):
"""Class head for TSN.
Args:
num_classes (int): Number of classes to be classified.
in_channels (int): Number of channels in input feature.
loss_cls (dict): Config for building loss.
Default: dict(type='CrossEntropyLoss').
spatial_type (str): Pooling type in spatial dimension. Default: 'avg'.
consensus (dict): Consensus config dict.
dropout_ratio (float): Probability of dropout layer. Default: 0.4.
init_std (float): Std value for Initiation. Default: 0.01.
kwargs (dict, optional): Any keyword argument to be used to initialize
the head.
"""
def __init__(self,
num_classes,
in_channels,
loss_cls=dict(type='CrossEntropyLoss'),
spatial_type='avg',
consensus=dict(type='AvgConsensus', dim=1),
dropout_ratio=0.4,
init_std=0.01,
**kwargs):
super().__init__(num_classes, in_channels, loss_cls=loss_cls, **kwargs)
self.spatial_type = spatial_type
self.dropout_ratio = dropout_ratio
self.init_std = init_std
consensus_ = consensus.copy()
consensus_type = consensus_.pop('type')
if consensus_type == 'AvgConsensus':
self.consensus = AvgConsensus(**consensus_)
else:
self.consensus = None
if self.spatial_type == 'avg':
# use `nn.AdaptiveAvgPool2d` to adaptively match the in_channels.
self.avg_pool = nn.AdaptiveAvgPool2d((1, 1))
else:
self.avg_pool = None
if self.dropout_ratio != 0:
self.dropout = nn.Dropout(p=self.dropout_ratio)
else:
self.dropout = None
self.fc_cls = nn.Linear(self.in_channels, self.num_classes)
def init_weights(self):
"""Initiate the parameters from scratch."""
normal_init(self.fc_cls, std=self.init_std)
def forward(self, x, num_segs):
"""Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data.
num_segs (int): Number of segments into which a video
is divided.
Returns:
torch.Tensor: The classification scores for input samples.
"""
# [N * num_segs, in_channels, 7, 7]
if self.avg_pool is not None:
x = self.avg_pool(x)
# [N * num_segs, in_channels, 1, 1]
x = x.reshape((-1, num_segs) + x.shape[1:])
# [N, num_segs, in_channels, 1, 1]
x = self.consensus(x)
# [N, 1, in_channels, 1, 1]
x = x.squeeze(1)
# [N, in_channels, 1, 1]
if self.dropout is not None:
x = self.dropout(x)
# [N, in_channels, 1, 1]
x = x.view(x.size(0), -1)
# [N, in_channels]
cls_score = self.fc_cls(x)
# [N, num_classes]
return cls_score
| InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/models/heads/tsn_head.py |
import torch
import torch.nn as nn
from mmcv.cnn import normal_init
from ..registry import HEADS
from .base import BaseHead
@HEADS.register_module()
class SlowFastRPLHead(BaseHead):
"""The classification head for SlowFast.
Args:
num_classes (int): Number of classes to be classified.
in_channels (int): Number of channels in input feature.
loss_cls (dict): Config for building loss.
Default: dict(type='CrossEntropyLoss').
spatial_type (str): Pooling type in spatial dimension. Default: 'avg'.
dropout_ratio (float): Probability of dropout layer. Default: 0.8.
init_std (float): Std value for Initiation. Default: 0.01.
kwargs (dict, optional): Any keyword argument to be used to initialize
the head.
"""
def __init__(self,
num_classes,
in_channels,
loss_cls=dict(type='RPLoss'),
spatial_type='avg',
dropout_ratio=0.8,
init_std=0.01,
num_centers=1,
**kwargs):
super().__init__(num_classes, in_channels, loss_cls, **kwargs)
self.spatial_type = spatial_type
self.dropout_ratio = dropout_ratio
self.init_std = init_std
self.num_centers = num_centers
if self.dropout_ratio != 0:
self.dropout = nn.Dropout(p=self.dropout_ratio)
else:
self.dropout = None
self.fc_centers = nn.Linear(self.in_channels, self.num_classes * self.num_centers, bias=False)
if self.spatial_type == 'avg':
self.avg_pool = nn.AdaptiveAvgPool3d((1, 1, 1))
else:
self.avg_pool = None
def init_weights(self):
"""Initiate the parameters from scratch."""
normal_init(self.fc_centers, std=self.init_std)
def compute_dist(self, features, center=None, metric='fc'):
if metric == 'l2':
f_2 = torch.sum(torch.pow(features, 2), dim=1, keepdim=True)
if center is None:
c_2 = torch.sum(torch.pow(self.fc_centers.weight, 2), dim=1, keepdim=True)
dist = f_2 - 2 * self.fc_centers(features) + torch.transpose(c_2, 1, 0)
else:
c_2 = torch.sum(torch.pow(center, 2), dim=1, keepdim=True)
dist = f_2 - 2*torch.matmul(features, torch.transpose(center, 1, 0)) + torch.transpose(c_2, 1, 0)
dist = dist / float(features.shape[1])
else:
if center is None:
dist = self.fc_centers(features)
else:
dist = features.matmul(center.t())
dist = torch.reshape(dist, [-1, self.num_classes, self.num_centers])
dist = torch.mean(dist, dim=2)
return dist
def forward(self, x):
"""Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data.
Returns:
torch.Tensor: The classification scores for input samples.
"""
# ([N, channel_fast, T, H, W], [(N, channel_slow, T, H, W)])
x_fast, x_slow = x
# ([N, channel_fast, 1, 1, 1], [N, channel_slow, 1, 1, 1])
x_fast = self.avg_pool(x_fast)
x_slow = self.avg_pool(x_slow)
# [N, channel_fast + channel_slow, 1, 1, 1]
x = torch.cat((x_slow, x_fast), dim=1)
if self.dropout is not None:
x = self.dropout(x)
# [N x C]
x = x.view(x.size(0), -1)
# [N x num_classes]
dist = self.compute_dist(x)
# [N, num_classes]
if self.loss_cls.__class__.__name__ == 'GCPLoss':
dist = -dist
outputs = {'dist': dist, 'feature': x, 'centers': self.fc_centers.weight}
return outputs
| InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/models/heads/slowfast_rpl_head.py |
import torch
import torch.nn as nn
from mmcv.cnn import normal_init
from ..registry import HEADS
from .base import BaseHead
@HEADS.register_module()
class I3DRPLHead(BaseHead):
"""Classification head for I3D.
Args:
num_classes (int): Number of classes to be classified.
in_channels (int): Number of channels in input feature.
loss_cls (dict): Config for building loss.
Default: dict(type='CrossEntropyLoss')
spatial_type (str): Pooling type in spatial dimension. Default: 'avg'.
dropout_ratio (float): Probability of dropout layer. Default: 0.5.
init_std (float): Std value for Initiation. Default: 0.01.
kwargs (dict, optional): Any keyword argument to be used to initialize
the head.
"""
def __init__(self,
num_classes,
in_channels,
loss_cls=dict(type='RPLoss'),
spatial_type='avg',
dropout_ratio=0.5,
init_std=0.01,
num_centers=1,
**kwargs):
super().__init__(num_classes, in_channels, loss_cls, **kwargs)
self.spatial_type = spatial_type
self.dropout_ratio = dropout_ratio
self.init_std = init_std
self.num_centers = num_centers
if self.dropout_ratio != 0:
self.dropout = nn.Dropout(p=self.dropout_ratio)
else:
self.dropout = None
self.fc_centers = nn.Linear(self.in_channels, self.num_classes * self.num_centers, bias=False)
if self.spatial_type == 'avg':
# use `nn.AdaptiveAvgPool3d` to adaptively match the in_channels.
self.avg_pool = nn.AdaptiveAvgPool3d((1, 1, 1))
else:
self.avg_pool = None
def init_weights(self):
"""Initiate the parameters from scratch."""
normal_init(self.fc_centers, std=self.init_std)
def compute_dist(self, features, center=None, metric='fc'):
if metric == 'l2':
f_2 = torch.sum(torch.pow(features, 2), dim=1, keepdim=True)
if center is None:
c_2 = torch.sum(torch.pow(self.fc_centers.weight, 2), dim=1, keepdim=True)
dist = f_2 - 2 * self.fc_centers(features) + torch.transpose(c_2, 1, 0)
else:
c_2 = torch.sum(torch.pow(center, 2), dim=1, keepdim=True)
dist = f_2 - 2*torch.matmul(features, torch.transpose(center, 1, 0)) + torch.transpose(c_2, 1, 0)
dist = dist / float(features.shape[1])
else:
if center is None:
dist = self.fc_centers(features)
else:
dist = features.matmul(center.t())
dist = torch.reshape(dist, [-1, self.num_classes, self.num_centers])
dist = torch.mean(dist, dim=2)
return dist
def forward(self, x):
"""Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data.
Returns:
torch.Tensor: The classification scores for input samples.
"""
# [N, in_channels, 4, 7, 7]
if self.avg_pool is not None:
x = self.avg_pool(x)
# [N, in_channels, 1, 1, 1]
if self.dropout is not None:
x = self.dropout(x)
# [N, in_channels, 1, 1, 1]
x = x.view(x.shape[0], -1)
# [N, in_channels]
dist = self.compute_dist(x)
# [N, num_classes]
if self.loss_cls.__class__.__name__ == 'GCPLoss':
dist = -dist
outputs = {'dist': dist, 'feature': x, 'centers': self.fc_centers.weight}
return outputs
| InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/models/heads/i3d_rpl_head.py |
import torch
import torch.nn as nn
from mmcv.cnn import ConvModule, constant_init, normal_init, xavier_init
from ..builder import build_loss
class RebiasHead(nn.Module):
def __init__(self,
in_channels,
out_channels,
loss_weight=0.5,
loss_rebias=dict(type='RebiasLoss')):
super().__init__()
self.conv_f = ConvModule(
in_channels,
in_channels * 2, (1, 3, 3),
stride=(1, 2, 2),
padding=(0, 1, 1),
bias=False,
conv_cfg=dict(type='Conv3d'),
norm_cfg=dict(type='BN3d', requires_grad=True))
self.conv_g = ConvModule(
in_channels,
in_channels * 2, (1, 3, 3),
stride=(1, 2, 2),
padding=(0, 1, 1),
bias=False,
conv_cfg=dict(type='Conv3d'),
norm_cfg=dict(type='BN3d', requires_grad=True))
self.avg_pool = nn.AdaptiveAvgPool3d((1, 1, 1))
self.loss_weight = loss_weight
self.dropout = nn.Dropout(p=0.5)
self.fc_f = nn.Linear(in_channels * 2, out_channels)
self.fc_g = nn.Linear(in_channels * 2, out_channels)
self.loss_rebias = build_loss(loss_rebias)
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Linear):
normal_init(m, std=0.01)
if isinstance(m, nn.Conv3d):
xavier_init(m, distribution='uniform')
if isinstance(m, nn.BatchNorm3d):
constant_init(m, 1)
def forward(self, x, target=None):
# x: (B, 1024, 8, 14, 14)
xs = x.detach().clone() # we do not want the backbone is updated by g(xs)
xs = xs[:, :, torch.randperm(xs.size()[2])] # temporally shuffle the feature
losses = dict()
if target is None:
return losses
# f(x)
x = self.conv_f(x) # (B, 2048, 8, 7, 7)
x = self.avg_pool(x).squeeze(-1).squeeze(-1).squeeze(-1)
# phi(f(x))
y = self.dropout(x)
y = self.fc_f(y)
# g(xs)
xs = self.conv_g(xs) # (B, 2048, 8, 7, 7)
xs = self.avg_pool(xs).squeeze(-1).squeeze(-1).squeeze(-1)
# phi(g(xs))
ys = self.dropout(xs)
ys = self.fc_g(ys)
if target.shape == torch.Size([]):
target = target.unsqueeze(0)
# compute the rebias losses
rebias_loss = self.loss_rebias(x, xs, y, ys, target)
if isinstance(rebias_loss, dict):
for k, v in rebias_loss.items():
losses.update({k: self.loss_weight * v})
else:
losses = {'loss_rebias': rebias_loss}
return losses | InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/models/heads/rebias_head.py |
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
class Gaussian(object):
def __init__(self, mu, rho):
super().__init__()
self.mu = mu
self.rho = rho
self.normal = torch.distributions.Normal(0,1)
@property
def sigma(self):
return torch.log1p(torch.exp(self.rho))
def sample(self):
epsilon = self.normal.sample(self.rho.size()).to(self.mu.device)
return self.mu + self.sigma * epsilon
def log_prob(self, input):
return (-math.log(math.sqrt(2 * math.pi))
- torch.log(self.sigma)
- ((input - self.mu) ** 2) / (2 * self.sigma ** 2)).sum()
class ScaleMixtureGaussian(object):
def __init__(self, pi, sigma1, sigma2):
super().__init__()
self.pi = pi
self.sigma1 = sigma1
self.sigma2 = sigma2
def log_prob(self, input):
gaussian1 = torch.distributions.Normal(0, self.sigma1.to(input.device))
gaussian2 = torch.distributions.Normal(0, self.sigma2.to(input.device))
prob1 = torch.exp(gaussian1.log_prob(input))
prob2 = torch.exp(gaussian2.log_prob(input))
return (torch.log(self.pi * prob1 + (1-self.pi) * prob2)).sum()
class BayesianLinear(nn.Module):
def __init__(self, in_features, out_features, pi=0.5, sigma_1=None, sigma_2=None):
super().__init__()
self.in_features = in_features
self.out_features = out_features
if sigma_1 is None or sigma_2 is None:
sigma_1 = torch.FloatTensor([math.exp(-0)])
sigma_2 = torch.FloatTensor([math.exp(-6)])
# Weight parameters
self.weight_mu = nn.Parameter(torch.Tensor(out_features, in_features).uniform_(-0.2, 0.2))
self.weight_rho = nn.Parameter(torch.Tensor(out_features, in_features).uniform_(-5,-4))
self.weight = Gaussian(self.weight_mu, self.weight_rho)
# Bias parameters
self.bias_mu = nn.Parameter(torch.Tensor(out_features).uniform_(-0.2, 0.2))
self.bias_rho = nn.Parameter(torch.Tensor(out_features).uniform_(-5,-4))
self.bias = Gaussian(self.bias_mu, self.bias_rho)
# Prior distributions
self.weight_prior = ScaleMixtureGaussian(pi, sigma_1, sigma_2)
self.bias_prior = ScaleMixtureGaussian(pi, sigma_1, sigma_2)
self.log_prior = 0
self.log_variational_posterior = 0
def forward(self, input, sample=False, calculate_log_probs=False):
if self.training or sample:
weight = self.weight.sample()
bias = self.bias.sample()
else:
weight = self.weight.mu
bias = self.bias.mu
if self.training or calculate_log_probs:
self.log_prior = self.weight_prior.log_prob(weight) + self.bias_prior.log_prob(bias)
self.log_variational_posterior = self.weight.log_prob(weight) + self.bias.log_prob(bias)
else:
self.log_prior, self.log_variational_posterior = 0, 0
return F.linear(input, weight, bias)
class BayesianPredictor(nn.Module):
def __init__(self, input_dim, output_dim, act=torch.relu, pi=0.5, sigma_1=None, sigma_2=None):
super(BayesianPredictor, self).__init__()
self.output_dim = output_dim
self.act = act
self.bnn_layer = BayesianLinear(input_dim, output_dim, pi=pi, sigma_1=sigma_1, sigma_2=sigma_2)
def log_prior(self):
return self.bnn_layer.log_prior
def log_variational_posterior(self):
return self.bnn_layer.log_variational_posterior
def forward(self, input, npass=2, testing=False):
npass_size = npass + 1 if testing else npass
outputs = torch.zeros(npass_size, input.size(0), self.output_dim).to(input.device)
log_priors = torch.zeros(npass_size).to(input.device)
log_variational_posteriors = torch.zeros(npass_size).to(input.device)
for i in range(npass):
outputs[i] = self.bnn_layer(input, sample=True)
log_priors[i] = self.log_prior()
log_variational_posteriors[i] = self.log_variational_posterior()
if testing:
outputs[npass] = self.bnn_layer(input, sample=False)
return outputs, log_priors, log_variational_posteriors
def get_uncertainty(outputs):
# predict the aleatoric and epistemic uncertainties
p = F.softmax(outputs, dim=-1) # N x B x C
# compute aleatoric uncertainty
p_diag = torch.diag_embed(p, offset=0, dim1=-2, dim2=-1) # N x B x C x C
p_cov = torch.matmul(p.unsqueeze(-1), p.unsqueeze(-1).permute(0, 1, 3, 2)) # N x B x C x C
uncertain_alea = torch.mean(p_diag - p_cov, dim=0) # B x C x C
# compute epistemic uncertainty
p_bar= torch.mean(p, dim=0) # B x C
p_diff_var = torch.matmul((p-p_bar).unsqueeze(-1), (p-p_bar).unsqueeze(-1).permute(0, 1, 3, 2)) # N x B x C x C
uncertain_epis = torch.mean(p_diff_var, dim=0) # B x C x C
# transform the class-wise uncertainties into total uncertainty by matrix tracing
uncertain_alea = torch.diagonal(uncertain_alea, dim1=-2, dim2=-1).sum(-1)
uncertain_epis = torch.diagonal(uncertain_epis, dim1=-2, dim2=-1).sum(-1)
return uncertain_alea, uncertain_epis | InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/models/heads/bnn.py |
import torch
import torch.nn as nn
from mmcv.cnn import normal_init
from ..registry import HEADS
def parse_stage_config(stage_cfg):
"""Parse config of STPP for three stages.
Args:
stage_cfg (int | tuple[int]):
Config of structured temporal pyramid pooling.
Returns:
tuple[tuple[int], int]:
Config of structured temporal pyramid pooling and
total number of parts(number of multipliers).
"""
if isinstance(stage_cfg, int):
return (stage_cfg, ), stage_cfg
if isinstance(stage_cfg, tuple):
return stage_cfg, sum(stage_cfg)
raise ValueError(f'Incorrect STPP config {stage_cfg}')
class STPPTrain(nn.Module):
"""Structured temporal pyramid pooling for SSN at training.
Args:
stpp_stage (tuple): Config of structured temporal pyramid pooling.
Default: (1, (1, 2), 1).
num_segments_list (tuple): Number of segments to be sampled
in three stages. Default: (2, 5, 2).
"""
def __init__(self, stpp_stage=(1, (1, 2), 1), num_segments_list=(2, 5, 2)):
super().__init__()
starting_part, starting_multiplier = parse_stage_config(stpp_stage[0])
course_part, course_multiplier = parse_stage_config(stpp_stage[1])
ending_part, ending_multiplier = parse_stage_config(stpp_stage[2])
self.num_multipliers = (
starting_multiplier + course_multiplier + ending_multiplier)
self.stpp_stages = (starting_part, course_part, ending_part)
self.multiplier_list = (starting_multiplier, course_multiplier,
ending_multiplier)
self.num_segments_list = num_segments_list
@staticmethod
def _extract_stage_feature(stage_feat, stage_parts, num_multipliers,
scale_factors, num_samples):
"""Extract stage feature based on structured temporal pyramid pooling.
Args:
stage_feat (torch.Tensor): Stage features to be STPP.
stage_parts (tuple): Config of STPP.
num_multipliers (int): Total number of parts in the stage.
scale_factors (list): Ratios of the effective sampling lengths
to augmented lengths.
num_samples (int): Number of samples.
Returns:
torch.Tensor: Features of the stage.
"""
stage_stpp_feat = []
stage_len = stage_feat.size(1)
for stage_part in stage_parts:
ticks = torch.arange(0, stage_len + 1e-5,
stage_len / stage_part).int()
for i in range(stage_part):
part_feat = stage_feat[:, ticks[i]:ticks[i + 1], :].mean(
dim=1) / num_multipliers
if scale_factors is not None:
part_feat = (
part_feat * scale_factors.view(num_samples, 1))
stage_stpp_feat.append(part_feat)
return stage_stpp_feat
def forward(self, x, scale_factors):
"""Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data.
scale_factors (list): Ratios of the effective sampling lengths
to augmented lengths.
Returns:
tuple[torch.Tensor, torch.Tensor]:
Features for predicting activity scores and
completeness scores.
"""
x0 = self.num_segments_list[0]
x1 = x0 + self.num_segments_list[1]
num_segments = x1 + self.num_segments_list[2]
feat_dim = x.size(1)
x = x.view(-1, num_segments, feat_dim)
num_samples = x.size(0)
scale_factors = scale_factors.view(-1, 2)
stage_stpp_feats = []
stage_stpp_feats.extend(
self._extract_stage_feature(x[:, :x0, :], self.stpp_stages[0],
self.multiplier_list[0],
scale_factors[:, 0], num_samples))
stage_stpp_feats.extend(
self._extract_stage_feature(x[:, x0:x1, :], self.stpp_stages[1],
self.multiplier_list[1], None,
num_samples))
stage_stpp_feats.extend(
self._extract_stage_feature(x[:, x1:, :], self.stpp_stages[2],
self.multiplier_list[2],
scale_factors[:, 1], num_samples))
stpp_feat = torch.cat(stage_stpp_feats, dim=1)
course_feat = x[:, x0:x1, :].mean(dim=1)
return course_feat, stpp_feat
class STPPTest(nn.Module):
"""Structured temporal pyramid pooling for SSN at testing.
Args:
num_classes (int): Number of classes to be classified.
use_regression (bool): Whether to perform regression or not.
Default: True.
stpp_stage (tuple): Config of structured temporal pyramid pooling.
Default: (1, (1, 2), 1).
"""
def __init__(self,
num_classes,
use_regression=True,
stpp_stage=(1, (1, 2), 1)):
super().__init__()
self.activity_score_len = num_classes + 1
self.complete_score_len = num_classes
self.reg_score_len = num_classes * 2
self.use_regression = use_regression
starting_parts, starting_multiplier = parse_stage_config(stpp_stage[0])
course_parts, course_multiplier = parse_stage_config(stpp_stage[1])
ending_parts, ending_multiplier = parse_stage_config(stpp_stage[2])
self.num_multipliers = (
starting_multiplier + course_multiplier + ending_multiplier)
if self.use_regression:
self.feat_dim = (
self.activity_score_len + self.num_multipliers *
(self.complete_score_len + self.reg_score_len))
else:
self.feat_dim = (
self.activity_score_len +
self.num_multipliers * self.complete_score_len)
self.stpp_stage = (starting_parts, course_parts, ending_parts)
self.activity_slice = slice(0, self.activity_score_len)
self.complete_slice = slice(
self.activity_slice.stop, self.activity_slice.stop +
self.complete_score_len * self.num_multipliers)
self.reg_slice = slice(
self.complete_slice.stop, self.complete_slice.stop +
self.reg_score_len * self.num_multipliers)
@staticmethod
def _pyramids_pooling(out_scores, index, raw_scores, ticks, scale_factors,
score_len, stpp_stage):
"""Perform pyramids pooling.
Args:
out_scores (torch.Tensor): Scores to be returned.
index (int): Index of output scores.
raw_scores (torch.Tensor): Raw scores before STPP.
ticks (list): Ticks of raw scores.
scale_factors (list): Ratios of the effective sampling lengths
to augmented lengths.
score_len (int): Length of the score.
stpp_stage (tuple): Config of STPP.
"""
offset = 0
for stage_idx, stage_cfg in enumerate(stpp_stage):
if stage_idx == 0:
scale_factor = scale_factors[0]
elif stage_idx == len(stpp_stage) - 1:
scale_factor = scale_factors[1]
else:
scale_factor = 1.0
sum_parts = sum(stage_cfg)
tick_left = ticks[stage_idx]
tick_right = float(max(ticks[stage_idx] + 1, ticks[stage_idx + 1]))
if tick_right <= 0 or tick_left >= raw_scores.size(0):
offset += sum_parts
continue
for num_parts in stage_cfg:
part_ticks = torch.arange(tick_left, tick_right + 1e-5,
(tick_right - tick_left) /
num_parts).int()
for i in range(num_parts):
part_tick_left = part_ticks[i]
part_tick_right = part_ticks[i + 1]
if part_tick_right - part_tick_left >= 1:
raw_score = raw_scores[part_tick_left:part_tick_right,
offset *
score_len:(offset + 1) *
score_len]
raw_scale_score = raw_score.mean(dim=0) * scale_factor
out_scores[index, :] += raw_scale_score.detach().cpu()
offset += 1
return out_scores
def forward(self, x, proposal_ticks, scale_factors):
"""Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data.
proposal_ticks (list): Ticks of proposals to be STPP.
scale_factors (list): Ratios of the effective sampling lengths
to augmented lengths.
Returns:
tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
out_activity_scores (torch.Tensor): Activity scores
out_complete_scores (torch.Tensor): Completeness scores.
out_reg_scores (torch.Tensor): Regression scores.
"""
assert x.size(1) == self.feat_dim
num_ticks = proposal_ticks.size(0)
out_activity_scores = torch.zeros((num_ticks, self.activity_score_len),
dtype=x.dtype)
raw_activity_scores = x[:, self.activity_slice]
out_complete_scores = torch.zeros((num_ticks, self.complete_score_len),
dtype=x.dtype)
raw_complete_scores = x[:, self.complete_slice]
if self.use_regression:
out_reg_scores = torch.zeros((num_ticks, self.reg_score_len),
dtype=x.dtype)
raw_reg_scores = x[:, self.reg_slice]
else:
out_reg_scores = None
raw_reg_scores = None
for i in range(num_ticks):
ticks = proposal_ticks[i]
out_activity_scores[i, :] = raw_activity_scores[
ticks[1]:max(ticks[1] + 1, ticks[2]), :].mean(dim=0)
out_complete_scores = self._pyramids_pooling(
out_complete_scores, i, raw_complete_scores, ticks,
scale_factors[i], self.complete_score_len, self.stpp_stage)
if self.use_regression:
out_reg_scores = self._pyramids_pooling(
out_reg_scores, i, raw_reg_scores, ticks, scale_factors[i],
self.reg_score_len, self.stpp_stage)
return out_activity_scores, out_complete_scores, out_reg_scores
@HEADS.register_module()
class SSNHead(nn.Module):
"""The classification head for SSN.
Args:
dropout_ratio (float): Probability of dropout layer. Default: 0.8.
in_channels (int): Number of channels for input data. Default: 1024.
num_classes (int): Number of classes to be classified. Default: 20.
consensus (dict): Config of segmental consensus.
use_regression (bool): Whether to perform regression or not.
Default: True.
init_std (float): Std value for Initiation. Default: 0.001.
"""
def __init__(self,
dropout_ratio=0.8,
in_channels=1024,
num_classes=20,
consensus=dict(
type='STPPTrain',
standalong_classifier=True,
stpp_cfg=(1, 1, 1),
num_seg=(2, 5, 2)),
use_regression=True,
init_std=0.001):
super().__init__()
self.dropout_ratio = dropout_ratio
self.num_classes = num_classes
self.use_regression = use_regression
self.init_std = init_std
if self.dropout_ratio != 0:
self.dropout = nn.Dropout(p=self.dropout_ratio)
else:
self.dropout = None
# Based on this copy, the model will utilize different
# structured temporal pyramid pooling at training and testing.
# Warning: this copy cannot be removed.
consensus_ = consensus.copy()
consensus_type = consensus_.pop('type')
if consensus_type == 'STPPTrain':
self.consensus = STPPTrain(**consensus_)
elif consensus_type == 'STPPTest':
consensus_['num_classes'] = self.num_classes
self.consensus = STPPTest(**consensus_)
self.in_channels_activity = in_channels
self.in_channels_complete = (
self.consensus.num_multipliers * in_channels)
self.activity_fc = nn.Linear(in_channels, num_classes + 1)
self.completeness_fc = nn.Linear(self.in_channels_complete,
num_classes)
if self.use_regression:
self.regressor_fc = nn.Linear(self.in_channels_complete,
num_classes * 2)
def init_weights(self):
"""Initiate the parameters from scratch."""
normal_init(self.activity_fc, std=self.init_std)
normal_init(self.completeness_fc, std=self.init_std)
if self.use_regression:
normal_init(self.regressor_fc, std=self.init_std)
def prepare_test_fc(self, stpp_feat_multiplier):
"""Reorganize the shape of fully connected layer at testing, in order
to improve testing efficiency.
Args:
stpp_feat_multiplier (int): Total number of parts.
Returns:
bool: Whether the shape transformation is ready for testing.
"""
in_features = self.activity_fc.in_features
out_features = (
self.activity_fc.out_features +
self.completeness_fc.out_features * stpp_feat_multiplier)
if self.use_regression:
out_features += (
self.regressor_fc.out_features * stpp_feat_multiplier)
self.test_fc = nn.Linear(in_features, out_features)
# Fetch weight and bias of the reorganized fc.
complete_weight = self.completeness_fc.weight.data.view(
self.completeness_fc.out_features, stpp_feat_multiplier,
in_features).transpose(0, 1).contiguous().view(-1, in_features)
complete_bias = self.completeness_fc.bias.data.view(1, -1).expand(
stpp_feat_multiplier, self.completeness_fc.out_features
).contiguous().view(-1) / stpp_feat_multiplier
weight = torch.cat((self.activity_fc.weight.data, complete_weight))
bias = torch.cat((self.activity_fc.bias.data, complete_bias))
if self.use_regression:
reg_weight = self.regressor_fc.weight.data.view(
self.regressor_fc.out_features, stpp_feat_multiplier,
in_features).transpose(0,
1).contiguous().view(-1, in_features)
reg_bias = self.regressor_fc.bias.data.view(1, -1).expand(
stpp_feat_multiplier, self.regressor_fc.out_features
).contiguous().view(-1) / stpp_feat_multiplier
weight = torch.cat((weight, reg_weight))
bias = torch.cat((bias, reg_bias))
self.test_fc.weight.data = weight
self.test_fc.bias.data = bias
return True
def forward(self, x, test_mode=False):
"""Defines the computation performed at every call."""
if not test_mode:
x, proposal_scale_factor = x
activity_feat, completeness_feat = self.consensus(
x, proposal_scale_factor)
if self.dropout is not None:
activity_feat = self.dropout(activity_feat)
completeness_feat = self.dropout(completeness_feat)
activity_scores = self.activity_fc(activity_feat)
complete_scores = self.completeness_fc(completeness_feat)
if self.use_regression:
bbox_preds = self.regressor_fc(completeness_feat)
bbox_preds = bbox_preds.view(-1,
self.completeness_fc.out_features,
2)
else:
bbox_preds = None
return activity_scores, complete_scores, bbox_preds
x, proposal_tick_list, scale_factor_list = x
test_scores = self.test_fc(x)
(activity_scores, completeness_scores,
bbox_preds) = self.consensus(test_scores, proposal_tick_list,
scale_factor_list)
return (test_scores, activity_scores, completeness_scores, bbox_preds)
| InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/models/heads/ssn_head.py |
from .audio_tsn_head import AudioTSNHead
from .base import BaseHead
from .i3d_head import I3DHead
from .i3d_bnn_head import I3DBNNHead
from .i3d_rpl_head import I3DRPLHead
from .slowfast_head import SlowFastHead
from .slowfast_rpl_head import SlowFastRPLHead
from .slowfast_bnn_head import SlowFastBNNHead
from .ssn_head import SSNHead
from .tpn_head import TPNHead
from .tpn_rpl_head import TPNRPLHead
from .tpn_bnn_head import TPNBNNHead
from .tsm_head import TSMHead
from .tsm_bnn_head import TSMBNNHead
from .tsm_rpl_head import TSMRPLHead
from .tsn_head import TSNHead
from .x3d_head import X3DHead
from .aux_head import AuxHead
from .rebias_head import RebiasHead
from .debias_head import DebiasHead
from .base_cls_head import BaseClsHead
__all__ = [
'TSNHead', 'I3DHead', 'I3DBNNHead', 'I3DRPLHead', 'BaseHead', 'TSMHead', 'TSMBNNHead', 'TSMRPLHead', 'SlowFastHead', 'SlowFastBNNHead', 'SlowFastRPLHead', 'SSNHead',
'TPNHead', 'TPNBNNHead', 'TPNRPLHead', 'AudioTSNHead', 'X3DHead', 'AuxHead', 'RebiasHead', 'DebiasHead', 'BaseClsHead'
]
| InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/models/heads/__init__.py |
import torch.nn as nn
from ..registry import HEADS
from .tsn_head import TSNHead
from ..builder import build_loss
from .bnn import BayesianPredictor, get_uncertainty
@HEADS.register_module()
class TPNBNNHead(TSNHead):
"""Class head for TPN.
Args:
num_classes (int): Number of classes to be classified.
in_channels (int): Number of channels in input feature.
loss_cls (dict): Config for building loss.
Default: dict(type='CrossEntropyLoss').
spatial_type (str): Pooling type in spatial dimension. Default: 'avg'.
consensus (dict): Consensus config dict.
dropout_ratio (float): Probability of dropout layer. Default: 0.4.
init_std (float): Std value for Initiation. Default: 0.01.
multi_class (bool): Determines whether it is a multi-class
recognition task. Default: False.
label_smooth_eps (float): Epsilon used in label smooth.
Reference: https://arxiv.org/abs/1906.02629. Default: 0.
"""
def __init__(self, compute_uncertainty=False, *args, **kwargs):
super().__init__(*args, **kwargs)
self.compute_uncertainty = compute_uncertainty
# use bnn classification head
self.bnn_cls = BayesianPredictor(self.in_channels, self.num_classes)
self.bnn_loss = self.loss_cls
if self.spatial_type == 'avg':
# use `nn.AdaptiveAvgPool3d` to adaptively match the in_channels.
self.avg_pool3d = nn.AdaptiveAvgPool3d((1, 1, 1))
else:
self.avg_pool3d = None
self.avg_pool2d = None
def forward(self, x, num_segs=None, npass=2, testing=False):
"""Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data.
num_segs (int | None): Number of segments into which a video
is divided. Default: None.
Returns:
torch.Tensor: The classification scores for input samples.
"""
if self.avg_pool2d is None:
kernel_size = (1, x.shape[-2], x.shape[-1])
self.avg_pool2d = nn.AvgPool3d(kernel_size, stride=1, padding=0)
if num_segs is None:
# [N, in_channels, 3, 7, 7]
x = self.avg_pool3d(x)
else:
# [N * num_segs, in_channels, 7, 7]
x = self.avg_pool2d(x)
# [N * num_segs, in_channels, 1, 1]
x = x.reshape((-1, num_segs) + x.shape[1:])
# [N, num_segs, in_channels, 1, 1]
x = self.consensus(x)
# [N, 1, in_channels, 1, 1]
x = x.squeeze(1)
# [N, in_channels, 1, 1]
if self.dropout is not None:
x = self.dropout(x)
# [N, in_channels, 1, 1]
x = x.view(x.size(0), -1)
# [N, in_channels]
outputs, log_priors, log_variational_posteriors = self.bnn_cls(x, npass=npass, testing=testing)
# gather output dictionary
output = outputs.mean(0)
log_prior = log_priors.mean()
log_variational_posterior = log_variational_posteriors.mean()
output_dict = {'pred_mean': output,
'log_prior': log_prior,
'log_posterior': log_variational_posterior}
if self.compute_uncertainty:
uncertain_alea, uncertain_epis = get_uncertainty(outputs)
output_dict.update({'aleatoric': uncertain_alea,
'epistemic': uncertain_epis})
return output_dict
| InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/models/heads/tpn_bnn_head.py |
import torch
import torch.nn as nn
from mmcv.cnn import normal_init
from ..registry import HEADS
from .base import AvgConsensus, BaseHead
@HEADS.register_module()
class TSMRPLHead(BaseHead):
"""Class head for TSM.
Args:
num_classes (int): Number of classes to be classified.
in_channels (int): Number of channels in input feature.
num_segments (int): Number of frame segments. Default: 8.
loss_cls (dict): Config for building loss.
Default: dict(type='CrossEntropyLoss')
spatial_type (str): Pooling type in spatial dimension. Default: 'avg'.
consensus (dict): Consensus config dict.
dropout_ratio (float): Probability of dropout layer. Default: 0.4.
init_std (float): Std value for Initiation. Default: 0.01.
is_shift (bool): Indicating whether the feature is shifted.
Default: True.
temporal_pool (bool): Indicating whether feature is temporal pooled.
Default: False.
kwargs (dict, optional): Any keyword argument to be used to initialize
the head.
"""
def __init__(self,
num_classes,
in_channels,
num_segments=8,
loss_cls=dict(type='RPLoss'),
spatial_type='avg',
consensus=dict(type='AvgConsensus', dim=1),
dropout_ratio=0.8,
init_std=0.001,
is_shift=True,
temporal_pool=False,
num_centers=1,
**kwargs):
super().__init__(num_classes, in_channels, loss_cls, **kwargs)
self.spatial_type = spatial_type
self.dropout_ratio = dropout_ratio
self.num_segments = num_segments
self.init_std = init_std
self.is_shift = is_shift
self.temporal_pool = temporal_pool
self.num_centers = num_centers
consensus_ = consensus.copy()
consensus_type = consensus_.pop('type')
if consensus_type == 'AvgConsensus':
self.consensus = AvgConsensus(**consensus_)
else:
self.consensus = None
if self.dropout_ratio != 0:
self.dropout = nn.Dropout(p=self.dropout_ratio)
else:
self.dropout = None
self.fc_centers = nn.Linear(self.in_channels, self.num_classes * self.num_centers, bias=False)
if self.spatial_type == 'avg':
# use `nn.AdaptiveAvgPool2d` to adaptively match the in_channels.
self.avg_pool = nn.AdaptiveAvgPool2d(1)
else:
self.avg_pool = None
def init_weights(self):
"""Initiate the parameters from scratch."""
normal_init(self.fc_centers, std=self.init_std)
def compute_dist(self, features, center=None, metric='fc'):
if metric == 'l2':
f_2 = torch.sum(torch.pow(features, 2), dim=1, keepdim=True)
if center is None:
c_2 = torch.sum(torch.pow(self.fc_centers.weight, 2), dim=1, keepdim=True)
dist = f_2 - 2 * self.fc_centers(features) + torch.transpose(c_2, 1, 0)
else:
c_2 = torch.sum(torch.pow(center, 2), dim=1, keepdim=True)
dist = f_2 - 2*torch.matmul(features, torch.transpose(center, 1, 0)) + torch.transpose(c_2, 1, 0)
dist = dist / float(features.shape[1])
else:
if center is None:
dist = self.fc_centers(features)
else:
dist = features.matmul(center.t())
dist = torch.reshape(dist, [-1, self.num_classes, self.num_centers])
dist = torch.mean(dist, dim=2)
return dist
def aggregate(self, cls_score):
if self.is_shift and self.temporal_pool:
# [2 * N, num_segs // 2, num_classes]
cls_score = cls_score.view((-1, self.num_segments // 2) +
cls_score.size()[1:])
else:
# [N, num_segs, num_classes]
cls_score = cls_score.view((-1, self.num_segments) +
cls_score.size()[1:])
# [N, 1, num_classes]
cls_score = self.consensus(cls_score)
# [N, num_classes]
return cls_score.squeeze(1)
def forward(self, x, num_segs):
"""Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data.
num_segs (int): Useless in TSMHead. By default, `num_segs`
is equal to `clip_len * num_clips * num_crops`, which is
automatically generated in Recognizer forward phase and
useless in TSM models. The `self.num_segments` we need is a
hyper parameter to build TSM models.
Returns:
torch.Tensor: The classification scores for input samples.
"""
# [N * num_segs, in_channels, 7, 7]
x = self.avg_pool(x)
# [N * num_segs, in_channels, 1, 1]
x = torch.flatten(x, 1)
# [N * num_segs, in_channels]
if self.dropout is not None:
x = self.dropout(x)
# [N * num_segs, num_classes]
dist = self.compute_dist(x)
# [N, num_classes]
dist = self.aggregate(dist)
feature = self.aggregate(x)
if self.loss_cls.__class__.__name__ == 'GCPLoss':
dist = -dist
outputs = {'dist': dist, 'feature': feature, 'centers': self.fc_centers.weight}
return outputs
| InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/models/heads/tsm_rpl_head.py |
import torch
import torch.nn as nn
from mmcv.cnn import normal_init
from ..registry import HEADS
from .base import AvgConsensus, BaseHead
@HEADS.register_module()
class TSMHead(BaseHead):
"""Class head for TSM.
Args:
num_classes (int): Number of classes to be classified.
in_channels (int): Number of channels in input feature.
num_segments (int): Number of frame segments. Default: 8.
loss_cls (dict): Config for building loss.
Default: dict(type='CrossEntropyLoss')
spatial_type (str): Pooling type in spatial dimension. Default: 'avg'.
consensus (dict): Consensus config dict.
dropout_ratio (float): Probability of dropout layer. Default: 0.4.
init_std (float): Std value for Initiation. Default: 0.01.
is_shift (bool): Indicating whether the feature is shifted.
Default: True.
temporal_pool (bool): Indicating whether feature is temporal pooled.
Default: False.
kwargs (dict, optional): Any keyword argument to be used to initialize
the head.
"""
def __init__(self,
num_classes,
in_channels,
num_segments=8,
loss_cls=dict(type='CrossEntropyLoss'),
spatial_type='avg',
consensus=dict(type='AvgConsensus', dim=1),
dropout_ratio=0.8,
init_std=0.001,
is_shift=True,
temporal_pool=False,
**kwargs):
super().__init__(num_classes, in_channels, loss_cls, **kwargs)
self.spatial_type = spatial_type
self.dropout_ratio = dropout_ratio
self.num_segments = num_segments
self.init_std = init_std
self.is_shift = is_shift
self.temporal_pool = temporal_pool
consensus_ = consensus.copy()
consensus_type = consensus_.pop('type')
if consensus_type == 'AvgConsensus':
self.consensus = AvgConsensus(**consensus_)
else:
self.consensus = None
if self.dropout_ratio != 0:
self.dropout = nn.Dropout(p=self.dropout_ratio)
else:
self.dropout = None
self.fc_cls = nn.Linear(self.in_channels, self.num_classes)
if self.spatial_type == 'avg':
# use `nn.AdaptiveAvgPool2d` to adaptively match the in_channels.
self.avg_pool = nn.AdaptiveAvgPool2d(1)
else:
self.avg_pool = None
def init_weights(self):
"""Initiate the parameters from scratch."""
normal_init(self.fc_cls, std=self.init_std)
def forward(self, x, num_segs):
"""Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data.
num_segs (int): Useless in TSMHead. By default, `num_segs`
is equal to `clip_len * num_clips * num_crops`, which is
automatically generated in Recognizer forward phase and
useless in TSM models. The `self.num_segments` we need is a
hyper parameter to build TSM models.
Returns:
torch.Tensor: The classification scores for input samples.
"""
# [N * num_segs, in_channels, 7, 7]
x = self.avg_pool(x)
# [N * num_segs, in_channels, 1, 1]
x = torch.flatten(x, 1)
# [N * num_segs, in_channels]
if self.dropout is not None:
x = self.dropout(x)
# [N * num_segs, num_classes]
cls_score = self.fc_cls(x)
if self.is_shift and self.temporal_pool:
# [2 * N, num_segs // 2, num_classes]
cls_score = cls_score.view((-1, self.num_segments // 2) +
cls_score.size()[1:])
else:
# [N, num_segs, num_classes]
cls_score = cls_score.view((-1, self.num_segments) +
cls_score.size()[1:])
# [N, 1, num_classes]
cls_score = self.consensus(cls_score)
# [N, num_classes]
return cls_score.squeeze(1)
| InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/models/heads/tsm_head.py |
import torch
import torch.nn as nn
from ..registry import HEADS
from .base import BaseHead
from ..builder import build_loss
from .bnn import BayesianPredictor, get_uncertainty
@HEADS.register_module()
class I3DBNNHead(BaseHead):
"""Classification head for I3D.
Args:
num_classes (int): Number of classes to be classified.
in_channels (int): Number of channels in input feature.
loss_cls (dict): Config for building loss.
Default: dict(type='CrossEntropyLoss')
spatial_type (str): Pooling type in spatial dimension. Default: 'avg'.
dropout_ratio (float): Probability of dropout layer. Default: 0.
init_std (float): Std value for Initiation. Default: 0.01.
kwargs (dict, optional): Any keyword argument to be used to initialize
the head.
"""
def __init__(self,
num_classes,
in_channels,
loss_cls=dict(type='BayesianNNLoss'),
spatial_type='avg',
dropout_ratio=0,
init_std=0.01,
compute_uncertainty=False,
**kwargs):
super().__init__(num_classes, in_channels, loss_cls, **kwargs)
self.bnn_loss = self.loss_cls
self.spatial_type = spatial_type
self.dropout_ratio = dropout_ratio
self.init_std = init_std
self.compute_uncertainty = compute_uncertainty
if self.dropout_ratio != 0:
self.dropout = nn.Dropout(p=self.dropout_ratio)
else:
self.dropout = None
# self.fc_cls = nn.Linear(self.in_channels, self.num_classes)
self.bnn_cls = BayesianPredictor(self.in_channels, self.num_classes)
if self.spatial_type == 'avg':
# use `nn.AdaptiveAvgPool3d` to adaptively match the in_channels.
self.avg_pool = nn.AdaptiveAvgPool3d((1, 1, 1))
else:
self.avg_pool = None
def init_weights(self):
"""Initiate the parameters from scratch."""
# normal_init(self.bnn_cls, std=self.init_std)
pass # BNN do not need to explicity initialized
def forward(self, x, npass=2, testing=False):
"""Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data.
Returns:
torch.Tensor: The classification scores for input samples.
"""
# [N, in_channels, 4, 7, 7]
if self.avg_pool is not None:
x = self.avg_pool(x)
# [N, in_channels, 1, 1, 1]
if self.dropout is not None:
x = self.dropout(x)
# [N, in_channels, 1, 1, 1]
x = x.view(x.shape[0], -1)
# [N, in_channels]
outputs, log_priors, log_variational_posteriors = self.bnn_cls(x, npass=npass, testing=testing)
# gather output dictionary
output = outputs.mean(0)
log_prior = log_priors.mean()
log_variational_posterior = log_variational_posteriors.mean()
output_dict = {'pred_mean': output,
'log_prior': log_prior,
'log_posterior': log_variational_posterior}
if self.compute_uncertainty:
uncertain_alea, uncertain_epis = get_uncertainty(outputs)
output_dict.update({'aleatoric': uncertain_alea,
'epistemic': uncertain_epis})
return output_dict
| InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/models/heads/i3d_bnn_head.py |
import torch.nn as nn
from ..registry import HEADS
from .tsn_head import TSNHead
@HEADS.register_module()
class TPNHead(TSNHead):
"""Class head for TPN.
Args:
num_classes (int): Number of classes to be classified.
in_channels (int): Number of channels in input feature.
loss_cls (dict): Config for building loss.
Default: dict(type='CrossEntropyLoss').
spatial_type (str): Pooling type in spatial dimension. Default: 'avg'.
consensus (dict): Consensus config dict.
dropout_ratio (float): Probability of dropout layer. Default: 0.4.
init_std (float): Std value for Initiation. Default: 0.01.
multi_class (bool): Determines whether it is a multi-class
recognition task. Default: False.
label_smooth_eps (float): Epsilon used in label smooth.
Reference: https://arxiv.org/abs/1906.02629. Default: 0.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if self.spatial_type == 'avg':
# use `nn.AdaptiveAvgPool3d` to adaptively match the in_channels.
self.avg_pool3d = nn.AdaptiveAvgPool3d((1, 1, 1))
else:
self.avg_pool3d = None
self.avg_pool2d = None
def forward(self, x, num_segs=None):
"""Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data.
num_segs (int | None): Number of segments into which a video
is divided. Default: None.
Returns:
torch.Tensor: The classification scores for input samples.
"""
if self.avg_pool2d is None:
kernel_size = (1, x.shape[-2], x.shape[-1])
self.avg_pool2d = nn.AvgPool3d(kernel_size, stride=1, padding=0)
if num_segs is None:
# [N, in_channels, 3, 7, 7]
x = self.avg_pool3d(x)
else:
# [N * num_segs, in_channels, 7, 7]
x = self.avg_pool2d(x)
# [N * num_segs, in_channels, 1, 1]
x = x.reshape((-1, num_segs) + x.shape[1:])
# [N, num_segs, in_channels, 1, 1]
x = self.consensus(x)
# [N, 1, in_channels, 1, 1]
x = x.squeeze(1)
# [N, in_channels, 1, 1]
if self.dropout is not None:
x = self.dropout(x)
# [N, in_channels, 1, 1]
x = x.view(x.size(0), -1)
# [N, in_channels]
cls_score = self.fc_cls(x)
# [N, num_classes]
return cls_score
| InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/models/heads/tpn_head.py |
# Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
from mmcv.cnn import normal_init
from ..builder import HEADS
from .base import BaseHead
import pdb
@HEADS.register_module()
class BaseClsHead(BaseHead):
"""The classification head for SlowFast.
Args:
num_classes (int): Number of classes to be classified.
in_channels (int): Number of channels in input feature.
loss_cls (dict): Config for building loss.
Default: dict(type='CrossEntropyLoss').
spatial_type (str): Pooling type in spatial dimension. Default: 'avg'.
dropout_ratio (float): Probability of dropout layer. Default: 0.8.
init_std (float): Std value for Initiation. Default: 0.01.
kwargs (dict, optional): Any keyword argument to be used to initialize
the head.
"""
def __init__(self,
num_classes,
in_channels,
loss_cls=dict(type='CrossEntropyLoss'),
dropout_ratio=0.5,
init_std=0.01,
**kwargs):
super().__init__(num_classes, in_channels, loss_cls, **kwargs)
self.init_std = init_std
self.dropout_ratio = dropout_ratio
self.fc_cls = nn.Linear(in_channels, num_classes)
if self.dropout_ratio != 0:
self.dropout = nn.Dropout(p=self.dropout_ratio)
else:
self.dropout = None
def init_weights(self):
"""Initiate the parameters from scratch."""
normal_init(self.fc_cls, std=self.init_std)
def forward(self, x):
"""Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data.
Returns:
torch.Tensor: The classification scores for input samples.
"""
if self.dropout is not None:
x = self.dropout(x)
# [N x num_classes]
cls_score = self.fc_cls(x)
return cls_score
| InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/models/heads/base_cls_head.py |
import torch
import torch.nn as nn
class Dist(nn.Module):
def __init__(self, num_classes=10, num_centers=1, feat_dim=2, init='random'):
super(Dist, self).__init__()
self.feat_dim = feat_dim
self.num_classes = num_classes
self.num_centers = num_centers
if init == 'random':
self.centers = nn.Parameter(0.1 * torch.randn(num_classes * num_centers, self.feat_dim))
else:
self.centers = nn.Parameter(torch.Tensor(num_classes * num_centers, self.feat_dim))
self.centers.data.fill_(0)
def forward(self, features, center=None, metric='l2'):
if metric == 'l2':
f_2 = torch.sum(torch.pow(features, 2), dim=1, keepdim=True)
if center is None:
c_2 = torch.sum(torch.pow(self.centers, 2), dim=1, keepdim=True)
dist = f_2 - 2*torch.matmul(features, torch.transpose(self.centers, 1, 0)) + torch.transpose(c_2, 1, 0)
else:
c_2 = torch.sum(torch.pow(center, 2), dim=1, keepdim=True)
dist = f_2 - 2*torch.matmul(features, torch.transpose(center, 1, 0)) + torch.transpose(c_2, 1, 0)
dist = dist / float(features.shape[1])
else:
if center is None:
center = self.centers
else:
center = center
dist = features.matmul(center.t())
dist = torch.reshape(dist, [-1, self.num_classes, self.num_centers])
dist = torch.mean(dist, dim=2)
return dist
| InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/models/heads/rpl_dist.py |
import torch
import torch.nn as nn
from ..registry import HEADS
from .base import BaseHead
from ..builder import build_loss
from .bnn import BayesianPredictor, get_uncertainty
@HEADS.register_module()
class SlowFastBNNHead(BaseHead):
"""The classification head for SlowFast.
Args:
num_classes (int): Number of classes to be classified.
in_channels (int): Number of channels in input feature.
loss_cls (dict): Config for building loss.
Default: dict(type='CrossEntropyLoss').
spatial_type (str): Pooling type in spatial dimension. Default: 'avg'.
dropout_ratio (float): Probability of dropout layer. Default: 0.8.
init_std (float): Std value for Initiation. Default: 0.01.
kwargs (dict, optional): Any keyword argument to be used to initialize
the head.
"""
def __init__(self,
num_classes,
in_channels,
loss_cls=dict(type='BayesianNNLoss'),
spatial_type='avg',
dropout_ratio=0.8,
init_std=0.01,
compute_uncertainty=False,
**kwargs):
super().__init__(num_classes, in_channels, loss_cls, **kwargs)
self.bnn_loss = self.loss_cls
self.spatial_type = spatial_type
self.dropout_ratio = dropout_ratio
self.init_std = init_std
self.compute_uncertainty = compute_uncertainty
if self.dropout_ratio != 0:
self.dropout = nn.Dropout(p=self.dropout_ratio)
else:
self.dropout = None
# self.fc_cls = nn.Linear(in_channels, num_classes)
self.bnn_cls = BayesianPredictor(self.in_channels, self.num_classes)
if self.spatial_type == 'avg':
self.avg_pool = nn.AdaptiveAvgPool3d((1, 1, 1))
else:
self.avg_pool = None
def init_weights(self):
"""Initiate the parameters from scratch."""
pass # BNN do not need to explicity initialized
def forward(self, x, npass=2, testing=False):
"""Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data.
Returns:
torch.Tensor: The classification scores for input samples.
"""
# ([N, channel_fast, T, H, W], [(N, channel_slow, T, H, W)])
x_fast, x_slow = x
# ([N, channel_fast, 1, 1, 1], [N, channel_slow, 1, 1, 1])
x_fast = self.avg_pool(x_fast)
x_slow = self.avg_pool(x_slow)
# [N, channel_fast + channel_slow, 1, 1, 1]
x = torch.cat((x_slow, x_fast), dim=1)
if self.dropout is not None:
x = self.dropout(x)
# [N x C]
x = x.view(x.size(0), -1)
# [N x num_classes]
outputs, log_priors, log_variational_posteriors = self.bnn_cls(x, npass=npass, testing=testing)
# gather output dictionary
output = outputs.mean(0)
log_prior = log_priors.mean()
log_variational_posterior = log_variational_posteriors.mean()
output_dict = {'pred_mean': output,
'log_prior': log_prior,
'log_posterior': log_variational_posterior}
if self.compute_uncertainty:
uncertain_alea, uncertain_epis = get_uncertainty(outputs)
output_dict.update({'aleatoric': uncertain_alea,
'epistemic': uncertain_epis})
return output_dict
| InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/models/heads/slowfast_bnn_head.py |
import torch.nn as nn
from mmcv.cnn import normal_init
from ..registry import HEADS
from .base import BaseHead
@HEADS.register_module()
class I3DHead(BaseHead):
"""Classification head for I3D.
Args:
num_classes (int): Number of classes to be classified.
in_channels (int): Number of channels in input feature.
loss_cls (dict): Config for building loss.
Default: dict(type='CrossEntropyLoss')
spatial_type (str): Pooling type in spatial dimension. Default: 'avg'.
dropout_ratio (float): Probability of dropout layer. Default: 0.5.
init_std (float): Std value for Initiation. Default: 0.01.
kwargs (dict, optional): Any keyword argument to be used to initialize
the head.
"""
def __init__(self,
num_classes,
in_channels,
loss_cls=dict(type='CrossEntropyLoss'),
spatial_type='avg',
dropout_ratio=0.5,
init_std=0.01,
**kwargs):
super().__init__(num_classes, in_channels, loss_cls, **kwargs)
self.spatial_type = spatial_type
self.dropout_ratio = dropout_ratio
self.init_std = init_std
if self.dropout_ratio != 0:
self.dropout = nn.Dropout(p=self.dropout_ratio)
else:
self.dropout = None
self.fc_cls = nn.Linear(self.in_channels, self.num_classes)
if self.spatial_type == 'avg':
# use `nn.AdaptiveAvgPool3d` to adaptively match the in_channels.
self.avg_pool = nn.AdaptiveAvgPool3d((1, 1, 1))
else:
self.avg_pool = None
def init_weights(self):
"""Initiate the parameters from scratch."""
normal_init(self.fc_cls, std=self.init_std)
def forward(self, x):
"""Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data.
Returns:
torch.Tensor: The classification scores for input samples.
"""
# [N, in_channels, 4, 7, 7]
if self.avg_pool is not None:
x = self.avg_pool(x)
# [N, in_channels, 1, 1, 1]
if self.dropout is not None:
x = self.dropout(x)
# [N, in_channels, 1, 1, 1]
x = x.view(x.shape[0], -1)
# [N, in_channels]
cls_score = self.fc_cls(x)
# [N, num_classes]
return cls_score
| InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/models/heads/i3d_head.py |
import torch.nn as nn
from mmcv.cnn import normal_init
from ..registry import HEADS
from .base import BaseHead
@HEADS.register_module()
class AudioTSNHead(BaseHead):
"""Classification head for TSN on audio.
Args:
num_classes (int): Number of classes to be classified.
in_channels (int): Number of channels in input feature.
loss_cls (dict): Config for building loss.
Default: dict(type='CrossEntropyLoss').
spatial_type (str): Pooling type in spatial dimension. Default: 'avg'.
dropout_ratio (float): Probability of dropout layer. Default: 0.4.
init_std (float): Std value for Initiation. Default: 0.01.
kwargs (dict, optional): Any keyword argument to be used to initialize
the head.
"""
def __init__(self,
num_classes,
in_channels,
loss_cls=dict(type='CrossEntropyLoss'),
spatial_type='avg',
dropout_ratio=0.4,
init_std=0.01,
**kwargs):
super().__init__(num_classes, in_channels, loss_cls=loss_cls, **kwargs)
self.spatial_type = spatial_type
self.dropout_ratio = dropout_ratio
self.init_std = init_std
if self.spatial_type == 'avg':
# use `nn.AdaptiveAvgPool2d` to adaptively match the in_channels.
self.avg_pool = nn.AdaptiveAvgPool2d((1, 1))
else:
self.avg_pool = None
if self.dropout_ratio != 0:
self.dropout = nn.Dropout(p=self.dropout_ratio)
else:
self.dropout = None
self.fc_cls = nn.Linear(self.in_channels, self.num_classes)
def init_weights(self):
"""Initiate the parameters from scratch."""
normal_init(self.fc_cls, std=self.init_std)
def forward(self, x):
"""Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data.
Returns:
torch.Tensor: The classification scores for input samples.
"""
# [N * num_segs, in_channels, h, w]
x = self.avg_pool(x)
# [N, in_channels, 1, 1]
x = x.view(x.size(0), -1)
# [N, in_channels]
if self.dropout is not None:
x = self.dropout(x)
# [N, in_channels]
cls_score = self.fc_cls(x)
# [N, num_classes]
return cls_score
| InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/models/heads/audio_tsn_head.py |
import torch
import torch.nn as nn
from mmcv.cnn import normal_init
from ..registry import HEADS
from .tsn_head import TSNHead
@HEADS.register_module()
class TPNRPLHead(TSNHead):
"""Class head for TPN.
Args:
num_classes (int): Number of classes to be classified.
in_channels (int): Number of channels in input feature.
loss_cls (dict): Config for building loss.
Default: dict(type='CrossEntropyLoss').
spatial_type (str): Pooling type in spatial dimension. Default: 'avg'.
consensus (dict): Consensus config dict.
dropout_ratio (float): Probability of dropout layer. Default: 0.4.
init_std (float): Std value for Initiation. Default: 0.01.
multi_class (bool): Determines whether it is a multi-class
recognition task. Default: False.
label_smooth_eps (float): Epsilon used in label smooth.
Reference: https://arxiv.org/abs/1906.02629. Default: 0.
"""
def __init__(self, num_centers=1, *args, **kwargs):
super().__init__(*args, **kwargs)
self.num_centers = num_centers
self.fc_centers = nn.Linear(self.in_channels, self.num_classes * self.num_centers, bias=False)
if self.spatial_type == 'avg':
# use `nn.AdaptiveAvgPool3d` to adaptively match the in_channels.
self.avg_pool3d = nn.AdaptiveAvgPool3d((1, 1, 1))
else:
self.avg_pool3d = None
self.avg_pool2d = None
def init_weights(self):
"""Initiate the parameters from scratch."""
normal_init(self.fc_centers, std=self.init_std)
def compute_dist(self, features, center=None, metric='fc'):
if metric == 'l2':
f_2 = torch.sum(torch.pow(features, 2), dim=1, keepdim=True)
if center is None:
c_2 = torch.sum(torch.pow(self.fc_centers.weight, 2), dim=1, keepdim=True)
dist = f_2 - 2 * self.fc_centers(features) + torch.transpose(c_2, 1, 0)
else:
c_2 = torch.sum(torch.pow(center, 2), dim=1, keepdim=True)
dist = f_2 - 2*torch.matmul(features, torch.transpose(center, 1, 0)) + torch.transpose(c_2, 1, 0)
dist = dist / float(features.shape[1])
else:
if center is None:
dist = self.fc_centers(features)
else:
dist = features.matmul(center.t())
dist = torch.reshape(dist, [-1, self.num_classes, self.num_centers])
dist = torch.mean(dist, dim=2)
return dist
def forward(self, x, num_segs=None):
"""Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data.
num_segs (int | None): Number of segments into which a video
is divided. Default: None.
Returns:
torch.Tensor: The classification scores for input samples.
"""
if self.avg_pool2d is None:
kernel_size = (1, x.shape[-2], x.shape[-1])
self.avg_pool2d = nn.AvgPool3d(kernel_size, stride=1, padding=0)
if num_segs is None:
# [N, in_channels, 3, 7, 7]
x = self.avg_pool3d(x)
else:
# [N * num_segs, in_channels, 7, 7]
x = self.avg_pool2d(x)
# [N * num_segs, in_channels, 1, 1]
x = x.reshape((-1, num_segs) + x.shape[1:])
# [N, num_segs, in_channels, 1, 1]
x = self.consensus(x)
# [N, 1, in_channels, 1, 1]
x = x.squeeze(1)
# [N, in_channels, 1, 1]
if self.dropout is not None:
x = self.dropout(x)
# [N, in_channels, 1, 1]
x = x.view(x.size(0), -1)
# [N, in_channels]
dist = self.compute_dist(x)
# [N, num_classes]
if self.loss_cls.__class__.__name__ == 'GCPLoss':
dist = -dist
outputs = {'dist': dist, 'feature': x, 'centers': self.fc_centers.weight}
return outputs
| InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/models/heads/tpn_rpl_head.py |
import torch
import torch.nn as nn
from ..registry import HEADS
from .base import AvgConsensus, BaseHead
from ..builder import build_loss
from .bnn import BayesianPredictor, get_uncertainty
@HEADS.register_module()
class TSMBNNHead(BaseHead):
"""Class head for TSM.
Args:
num_classes (int): Number of classes to be classified.
in_channels (int): Number of channels in input feature.
num_segments (int): Number of frame segments. Default: 8.
loss_cls (dict): Config for building loss.
Default: dict(type='CrossEntropyLoss')
spatial_type (str): Pooling type in spatial dimension. Default: 'avg'.
consensus (dict): Consensus config dict.
dropout_ratio (float): Probability of dropout layer. Default: 0.4.
init_std (float): Std value for Initiation. Default: 0.01.
is_shift (bool): Indicating whether the feature is shifted.
Default: True.
temporal_pool (bool): Indicating whether feature is temporal pooled.
Default: False.
kwargs (dict, optional): Any keyword argument to be used to initialize
the head.
"""
def __init__(self,
num_classes,
in_channels,
num_segments=8,
loss_cls=dict(type='BayesianNNLoss'),
spatial_type='avg',
consensus=dict(type='AvgConsensus', dim=1),
dropout_ratio=0,
init_std=0.001,
compute_uncertainty=False,
is_shift=True,
temporal_pool=False,
**kwargs):
super().__init__(num_classes, in_channels, loss_cls, **kwargs)
self.bnn_loss = self.loss_cls
self.spatial_type = spatial_type
self.dropout_ratio = dropout_ratio
self.num_segments = num_segments
self.init_std = init_std
self.compute_uncertainty = compute_uncertainty
self.is_shift = is_shift
self.temporal_pool = temporal_pool
consensus_ = consensus.copy()
consensus_type = consensus_.pop('type')
if consensus_type == 'AvgConsensus':
self.consensus = AvgConsensus(**consensus_)
else:
self.consensus = None
if self.dropout_ratio != 0:
self.dropout = nn.Dropout(p=self.dropout_ratio)
else:
self.dropout = None
# self.fc_cls = nn.Linear(self.in_channels, self.num_classes)
self.bnn_cls = BayesianPredictor(self.in_channels, self.num_classes)
if self.spatial_type == 'avg':
# use `nn.AdaptiveAvgPool2d` to adaptively match the in_channels.
self.avg_pool = nn.AdaptiveAvgPool2d(1)
else:
self.avg_pool = None
def init_weights(self):
"""Initiate the parameters from scratch."""
pass # BNN do not need to explicity initialized
def forward(self, x, num_segs, npass=2, testing=False):
"""Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data.
num_segs (int): Useless in TSMHead. By default, `num_segs`
is equal to `clip_len * num_clips * num_crops`, which is
automatically generated in Recognizer forward phase and
useless in TSM models. The `self.num_segments` we need is a
hyper parameter to build TSM models.
Returns:
torch.Tensor: The classification scores for input samples.
"""
# [N * num_segs, in_channels, 7, 7]
x = self.avg_pool(x)
# [N * num_segs, in_channels, 1, 1]
x = torch.flatten(x, 1)
# [N * num_segs, in_channels]
if self.dropout is not None:
x = self.dropout(x)
# [N * num_segs, num_classes]
outputs, log_priors, log_variational_posteriors = self.bnn_cls(x, npass=npass, testing=testing)
# gather output dictionary
log_prior = log_priors.mean()
log_variational_posterior = log_variational_posteriors.mean()
output_dict = {'log_prior': log_prior,
'log_posterior': log_variational_posterior}
if self.compute_uncertainty:
uncertain_alea, uncertain_epis = get_uncertainty(outputs)
output_dict.update({'aleatoric': uncertain_alea,
'epistemic': uncertain_epis})
cls_score = outputs.mean(0)
if self.is_shift and self.temporal_pool:
# [2 * N, num_segs // 2, num_classes]
cls_score = cls_score.view((-1, self.num_segments // 2) +
cls_score.size()[1:])
else:
# [N, num_segs, num_classes]
cls_score = cls_score.view((-1, self.num_segments) +
cls_score.size()[1:])
# [N, 1, num_classes]
cls_score = self.consensus(cls_score)
output_dict.update({'pred_mean': cls_score.squeeze(1)})
return output_dict
| InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/models/heads/tsm_bnn_head.py |
import torch.nn as nn
import torch
from mmcv.cnn import ConvModule, constant_init, normal_init, xavier_init
import numpy as np
from ..registry import HEADS
from .base import BaseHead
@HEADS.register_module()
class DebiasHead(BaseHead):
"""Debias head.
Args:
num_classes (int): Number of classes to be classified.
in_channels (int): Number of channels in input feature.
loss_cls (dict): Config for building loss.
Default: dict(type='EvidenceLoss')
spatial_type (str): Pooling type in spatial dimension. Default: 'avg'.
dropout_ratio (float): Probability of dropout layer. Default: 0.5.
init_std (float): Std value for Initiation. Default: 0.01.
kwargs (dict, optional): Any keyword argument to be used to initialize
the head.
"""
def __init__(self,
num_classes,
in_channels,
loss_cls=dict(type='EvidenceLoss'),
loss_factor=0.1,
hsic_factor=0.5, # useful when alternative=True
alternative=False,
bias_input=True,
bias_network=True,
dropout_ratio=0.5,
init_std=0.01,
**kwargs):
super().__init__(num_classes, in_channels, loss_cls, **kwargs)
self.bias_input = bias_input
self.bias_network = bias_network
assert bias_input or bias_network, "At least one of the choices (bias_input, bias_network) should be True!"
self.loss_factor = loss_factor
self.hsic_factor = hsic_factor
self.alternative = alternative
self.f1_conv3d = ConvModule(
in_channels,
in_channels * 2, (1, 3, 3),
stride=(1, 2, 2),
padding=(0, 1, 1),
bias=False,
conv_cfg=dict(type='Conv3d'),
norm_cfg=dict(type='BN3d', requires_grad=True))
if bias_input:
self.f2_conv3d = ConvModule(
in_channels,
in_channels * 2, (1, 3, 3),
stride=(1, 2, 2),
padding=(0, 1, 1),
bias=False,
conv_cfg=dict(type='Conv3d'),
norm_cfg=dict(type='BN3d', requires_grad=True))
if bias_network:
self.f3_conv2d = ConvModule(
in_channels,
in_channels * 2, (3, 3),
stride=(2, 2),
padding=(1, 1),
bias=False,
conv_cfg=dict(type='Conv2d'),
norm_cfg=dict(type='BN', requires_grad=True))
self.dropout_ratio = dropout_ratio
self.init_std = init_std
if self.dropout_ratio != 0:
self.dropout = nn.Dropout(p=self.dropout_ratio)
else:
self.dropout = None
self.f1_fc = nn.Linear(self.in_channels * 2, self.num_classes)
self.f2_fc = nn.Linear(self.in_channels * 2, self.num_classes)
self.f3_fc = nn.Linear(self.in_channels * 2, self.num_classes)
self.avg_pool = nn.AdaptiveAvgPool3d((1, 1, 1))
def init_weights(self):
"""Initiate the parameters from scratch."""
for m in self.modules():
if isinstance(m, nn.Linear):
normal_init(m, std=self.init_std)
if isinstance(m, nn.Conv3d):
xavier_init(m, distribution='uniform')
if isinstance(m, nn.BatchNorm3d):
constant_init(m, 1)
def exp_evidence(self, y):
return torch.exp(torch.clamp(y, -10, 10))
def edl_loss(self, func, alpha, y):
S = torch.sum(alpha, dim=1, keepdim=True)
loss = torch.sum(y * (func(S) - func(alpha)), dim=1, keepdim=True)
return loss
def kl_divergence(self, alpha, beta):
# compute the negative KL divergence between two Dirichlet distribution
S_alpha = torch.sum(alpha, dim=1, keepdim=True)
S_beta = torch.sum(beta, dim=1, keepdim=True)
lnA = torch.lgamma(S_alpha) - torch.sum(torch.lgamma(alpha), dim=1, keepdim=True)
lnB = torch.lgamma(S_beta) - torch.sum(torch.lgamma(beta), dim=1, keepdim=True)
# compute the digamma term
dg_term = torch.digamma(alpha) - torch.digamma(S_alpha)
# final KL divergence
kl = lnA - lnB + torch.sum((alpha - beta) * dg_term, dim=1, keepdim=True)
return kl
def _kernel(self, X, sigma):
X = X.view(len(X), -1)
XX = X @ X.t()
X_sqnorms = torch.diag(XX)
X_L2 = -2 * XX + X_sqnorms.unsqueeze(1) + X_sqnorms.unsqueeze(0)
gamma = 1 / (2 * sigma ** 2)
kernel_XX = torch.exp(-gamma * X_L2)
return kernel_XX
def hsic_loss(self, input1, input2, unbiased=False):
N = len(input1)
if N < 4:
return torch.tensor(0.0).to(input1.device)
# we simply use the squared dimension of feature as the sigma for RBF kernel
sigma_x = np.sqrt(input1.size()[1])
sigma_y = np.sqrt(input2.size()[1])
# compute the kernels
kernel_XX = self._kernel(input1, sigma_x)
kernel_YY = self._kernel(input2, sigma_y)
if unbiased:
"""Unbiased estimator of Hilbert-Schmidt Independence Criterion
Song, Le, et al. "Feature selection via dependence maximization." 2012.
"""
tK = kernel_XX - torch.diag(torch.diag(kernel_XX))
tL = kernel_YY - torch.diag(torch.diag(kernel_YY))
hsic = (
torch.trace(tK @ tL)
+ (torch.sum(tK) * torch.sum(tL) / (N - 1) / (N - 2))
- (2 * torch.sum(tK, 0).dot(torch.sum(tL, 0)) / (N - 2))
)
loss = hsic if self.alternative else hsic / (N * (N - 3))
else:
"""Biased estimator of Hilbert-Schmidt Independence Criterion
Gretton, Arthur, et al. "Measuring statistical dependence with Hilbert-Schmidt norms." 2005.
"""
KH = kernel_XX - kernel_XX.mean(0, keepdim=True)
LH = kernel_YY - kernel_YY.mean(0, keepdim=True)
loss = torch.trace(KH @ LH / (N - 1) ** 2)
return loss
def forward(self, x, num_segs=None, target=None, **kwargs):
"""Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data. (B, 1024, 8, 14, 14)
Returns:
torch.Tensor: The classification scores for input samples.
"""
feat = x.clone() if isinstance(x, torch.Tensor) else x[-2].clone()
if len(feat.size()) == 4: # for 2D recognizer
assert num_segs is not None
feat = feat.view((-1, num_segs) + feat.size()[1:]).transpose(1, 2).contiguous()
# one-hot embedding for the target
y = torch.eye(self.num_classes).to(feat.device)
y = y[target]
losses = dict()
# f1_Conv3D(x)
x = self.f1_conv3d(feat) # (B, 2048, 8, 7, 7)
feat_unbias = self.avg_pool(x).squeeze(-1).squeeze(-1).squeeze(-1)
x = self.dropout(feat_unbias)
x = self.f1_fc(x)
alpha_unbias = self.exp_evidence(x) + 1
# minimize the edl losses
loss_cls1 = self.edl_loss(torch.log, alpha_unbias, y)
losses.update({'loss_unbias_cls': loss_cls1})
loss_hsic_f, loss_hsic_g = torch.zeros_like(loss_cls1), torch.zeros_like(loss_cls1)
if self.bias_input:
# f2_Conv3D(x)
feat_shuffle = feat[:, :, torch.randperm(feat.size()[2])]
x = self.f2_conv3d(feat_shuffle) # (B, 2048, 8, 7, 7)
feat_bias1 = self.avg_pool(x).squeeze(-1).squeeze(-1).squeeze(-1)
x = self.dropout(feat_bias1)
x = self.f2_fc(x)
alpha_bias1 = self.exp_evidence(x) + 1
# minimize the edl losses
loss_cls2 = self.edl_loss(torch.log, alpha_bias1, y)
losses.update({'loss_bias1_cls': loss_cls2})
if self.alternative:
# minimize HSIC w.r.t. feat_unbias, and maximize HSIC w.r.t. feat_bias1
loss_hsic_f += self.hsic_factor * self.hsic_loss(feat_unbias, feat_bias1.detach(), unbiased=True)
loss_hsic_g += - self.hsic_factor * self.hsic_loss(feat_unbias.detach(), feat_bias1, unbiased=True)
else:
# maximize HSIC
loss_hsic1 = -1.0 * self.hsic_loss(alpha_unbias, alpha_bias1)
losses.update({"loss_bias1_hsic": loss_hsic1})
if self.bias_network:
# f3_Conv2D(x)
B, C, T, H, W = feat.size()
feat_reshape = feat.permute(0, 2, 1, 3, 4).contiguous().view(-1, C, H, W) # (B*T, C, H, W)
x = self.f3_conv2d(feat_reshape) # (64, 2048, 7, 7)
x = x.view(B, T, x.size(-3), x.size(-2), x.size(-1)).permute(0, 2, 1, 3, 4) # (B, 2048, 8, 7, 7)
feat_bias2 = self.avg_pool(x).squeeze(-1).squeeze(-1).squeeze(-1)
x = self.dropout(feat_bias2)
x = self.f3_fc(x)
alpha_bias2 = self.exp_evidence(x) + 1
# minimize the edl losses
loss_cls3 = self.edl_loss(torch.log, alpha_bias2, y)
losses.update({'loss_bias2_cls': loss_cls3})
if self.alternative:
# minimize HSIC w.r.t. feat_unbias, and maximize HSIC w.r.t. feat_bias2
loss_hsic_f += self.hsic_factor * self.hsic_loss(feat_unbias, feat_bias2.detach(), unbiased=True)
loss_hsic_g += - self.hsic_factor * self.hsic_loss(feat_unbias.detach(), feat_bias2, unbiased=True)
else:
# maximize HSIC
loss_hsic2 = -1.0 * self.hsic_loss(alpha_unbias, alpha_bias2)
losses.update({"loss_bias2_hsic": loss_hsic2})
if self.alternative:
# Here, we use odd iterations for minimizing hsic_f, and use even iterations for maximizing hsic_g
assert 'iter' in kwargs, "iter number is missing!"
loss_mask = kwargs['iter'] % 2
loss_hsic = loss_mask * loss_hsic_f + (1 - loss_mask) * loss_hsic_g
losses.update({'loss_hsic': loss_hsic})
for k, v in losses.items():
losses.update({k: v * self.loss_factor})
return losses
| InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/models/heads/debias_head.py |
from abc import ABCMeta, abstractmethod
import torch
import torch.nn as nn
from ...core import top_k_accuracy
from ..builder import build_loss
class AvgConsensus(nn.Module):
"""Average consensus module.
Args:
dim (int): Decide which dim consensus function to apply.
Default: 1.
"""
def __init__(self, dim=1):
super().__init__()
self.dim = dim
def forward(self, x):
"""Defines the computation performed at every call."""
return x.mean(dim=self.dim, keepdim=True)
class BaseHead(nn.Module, metaclass=ABCMeta):
"""Base class for head.
All Head should subclass it.
All subclass should overwrite:
- Methods:``init_weights``, initializing weights in some modules.
- Methods:``forward``, supporting to forward both for training and testing.
Args:
num_classes (int): Number of classes to be classified.
in_channels (int): Number of channels in input feature.
loss_cls (dict): Config for building loss.
Default: dict(type='CrossEntropyLoss').
multi_class (bool): Determines whether it is a multi-class
recognition task. Default: False.
label_smooth_eps (float): Epsilon used in label smooth.
Reference: arxiv.org/abs/1906.02629. Default: 0.
"""
def __init__(self,
num_classes,
in_channels,
loss_cls=dict(type='CrossEntropyLoss', loss_factor=1.0),
multi_class=False,
label_smooth_eps=0.0):
super().__init__()
self.num_classes = num_classes
self.in_channels = in_channels
self.loss_cls = build_loss(loss_cls)
self.multi_class = multi_class
self.label_smooth_eps = label_smooth_eps
@abstractmethod
def init_weights(self):
"""Initiate the parameters either from existing checkpoint or from
scratch."""
@abstractmethod
def forward(self, x):
"""Defines the computation performed at every call."""
def loss(self, cls_score, labels, **kwargs):
"""Calculate the loss given output ``cls_score``, target ``labels``.
Args:
cls_score (torch.Tensor): The output of the model.
labels (torch.Tensor): The target output of the model.
Returns:
dict: A dict containing field 'loss_cls'(mandatory)
and 'top1_acc', 'top5_acc'(optional).
"""
losses = dict()
if labels.shape == torch.Size([]):
labels = labels.unsqueeze(0)
if not self.multi_class:
top_k_acc = top_k_accuracy(cls_score.detach().cpu().numpy(),
labels.detach().cpu().numpy(), (1, 5))
losses['top1_acc'] = torch.tensor(
top_k_acc[0], device=cls_score.device)
losses['top5_acc'] = torch.tensor(
top_k_acc[1], device=cls_score.device)
elif self.label_smooth_eps != 0:
labels = ((1 - self.label_smooth_eps) * labels +
self.label_smooth_eps / self.num_classes)
loss_cls = self.loss_cls(cls_score, labels, **kwargs)
# loss_cls may be dictionary or single tensor
if isinstance(loss_cls, dict):
losses.update(loss_cls)
else:
losses['loss_cls'] = loss_cls
return losses
| InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/models/heads/base.py |
import torch.nn as nn
from mmcv.cnn import CONV_LAYERS, build_norm_layer, constant_init, kaiming_init
from torch.nn.modules.utils import _triple
@CONV_LAYERS.register_module()
class Conv2plus1d(nn.Module):
"""(2+1)d Conv module for R(2+1)d backbone.
https://arxiv.org/pdf/1711.11248.pdf.
Args:
in_channels (int): Same as nn.Conv3d.
out_channels (int): Same as nn.Conv3d.
kernel_size (int | tuple[int]): Same as nn.Conv3d.
stride (int | tuple[int]): Same as nn.Conv3d.
padding (int | tuple[int]): Same as nn.Conv3d.
dilation (int | tuple[int]): Same as nn.Conv3d.
groups (int): Same as nn.Conv3d.
bias (bool | str): If specified as `auto`, it will be decided by the
norm_cfg. Bias will be set as True if norm_cfg is None, otherwise
False.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
bias=True,
norm_cfg=dict(type='BN3d')):
super().__init__()
kernel_size = _triple(kernel_size)
stride = _triple(stride)
padding = _triple(padding)
assert len(kernel_size) == len(stride) == len(padding) == 3
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.dilation = dilation
self.groups = groups
self.bias = bias
self.norm_cfg = norm_cfg
self.output_padding = (0, 0, 0)
self.transposed = False
# The middle-plane is calculated according to:
# M_i = \floor{\frac{t * d^2 N_i-1 * N_i}
# {d^2 * N_i-1 + t * N_i}}
# where d, t are spatial and temporal kernel, and
# N_i, N_i-1 are planes
# and inplanes. https://arxiv.org/pdf/1711.11248.pdf
mid_channels = 3 * (
in_channels * out_channels * kernel_size[1] * kernel_size[2])
mid_channels /= (
in_channels * kernel_size[1] * kernel_size[2] + 3 * out_channels)
mid_channels = int(mid_channels)
self.conv_s = nn.Conv3d(
in_channels,
mid_channels,
kernel_size=(1, kernel_size[1], kernel_size[2]),
stride=(1, stride[1], stride[2]),
padding=(0, padding[1], padding[2]),
bias=bias)
_, self.bn_s = build_norm_layer(self.norm_cfg, mid_channels)
self.relu = nn.ReLU(inplace=True)
self.conv_t = nn.Conv3d(
mid_channels,
out_channels,
kernel_size=(kernel_size[0], 1, 1),
stride=(stride[0], 1, 1),
padding=(padding[0], 0, 0),
bias=bias)
self.init_weights()
def forward(self, x):
"""Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data.
Returns:
torch.Tensor: The output of the module.
"""
x = self.conv_s(x)
x = self.bn_s(x)
x = self.relu(x)
x = self.conv_t(x)
return x
def init_weights(self):
"""Initiate the parameters from scratch."""
kaiming_init(self.conv_s)
kaiming_init(self.conv_t)
constant_init(self.bn_s, 1, bias=0)
| InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/models/common/conv2plus1d.py |
from .conv2plus1d import Conv2plus1d
from .conv_audio import ConvAudio
__all__ = ['Conv2plus1d', 'ConvAudio']
| InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/models/common/__init__.py |
import torch
import torch.nn as nn
from mmcv.cnn import CONV_LAYERS, ConvModule, constant_init, kaiming_init
from torch.nn.modules.utils import _pair
@CONV_LAYERS.register_module()
class ConvAudio(nn.Module):
"""Conv2d module for AudioResNet backbone.
<https://arxiv.org/abs/2001.08740>`_.
Args:
in_channels (int): Same as nn.Conv2d.
out_channels (int): Same as nn.Conv2d.
kernel_size (int | tuple[int]): Same as nn.Conv2d.
op (string): Operation to merge the output of freq
and time feature map. Choices are 'sum' and 'concat'.
Default: 'concat'.
stride (int | tuple[int]): Same as nn.Conv2d.
padding (int | tuple[int]): Same as nn.Conv2d.
dilation (int | tuple[int]): Same as nn.Conv2d.
groups (int): Same as nn.Conv2d.
bias (bool | str): If specified as `auto`, it will be decided by the
norm_cfg. Bias will be set as True if norm_cfg is None, otherwise
False.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
op='concat',
stride=1,
padding=0,
dilation=1,
groups=1,
bias=False):
super().__init__()
kernel_size = _pair(kernel_size)
stride = _pair(stride)
padding = _pair(padding)
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
assert op in ['concat', 'sum']
self.op = op
self.stride = stride
self.padding = padding
self.dilation = dilation
self.groups = groups
self.bias = bias
self.output_padding = (0, 0)
self.transposed = False
self.conv_1 = ConvModule(
in_channels,
out_channels,
kernel_size=(kernel_size[0], 1),
stride=stride,
padding=(kernel_size[0] // 2, 0),
bias=bias,
conv_cfg=dict(type='Conv'),
norm_cfg=dict(type='BN'),
act_cfg=dict(type='ReLU'))
self.conv_2 = ConvModule(
in_channels,
out_channels,
kernel_size=(1, kernel_size[1]),
stride=stride,
padding=(0, kernel_size[1] // 2),
bias=bias,
conv_cfg=dict(type='Conv'),
norm_cfg=dict(type='BN'),
act_cfg=dict(type='ReLU'))
self.init_weights()
def forward(self, x):
"""Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data.
Returns:
torch.Tensor: The output of the module.
"""
x_1 = self.conv_1(x)
x_2 = self.conv_2(x)
if self.op == 'concat':
out = torch.cat([x_1, x_2], 1)
else:
out = x_1 + x_2
return out
def init_weights(self):
"""Initiate the parameters from scratch."""
kaiming_init(self.conv_1.conv)
kaiming_init(self.conv_2.conv)
constant_init(self.conv_1.bn, 1, bias=0)
constant_init(self.conv_2.bn, 1, bias=0)
| InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/models/common/conv_audio.py |
from ..registry import RECOGNIZERS
from .base import BaseRecognizer
import pdb
@RECOGNIZERS.register_module()
class Recognizer3D(BaseRecognizer):
"""3D recognizer model framework."""
def forward_train(self, imgs, labels, **kwargs):
"""Defines the computation performed at every call when training."""
imgs = imgs.reshape((-1, ) + imgs.shape[2:])
losses = dict()
x = self.extract_feat(imgs)
if hasattr(self, 'debias_head'):
loss_debias = self.debias_head(x, target=labels.squeeze(), **kwargs)
losses.update(loss_debias)
if hasattr(self, 'neck'):
x, loss_aux = self.neck(x, labels.squeeze())
losses.update(loss_aux)
cls_score = self.cls_head(x)
gt_labels = labels.squeeze()
loss_cls = self.cls_head.loss(cls_score, gt_labels, **kwargs)
losses.update(loss_cls)
return losses
def _do_test(self, imgs):
"""Defines the computation performed at every call when evaluation,
testing and gradcam."""
num_segs = imgs.shape[1]
imgs = imgs.reshape((-1, ) + imgs.shape[2:])
x = self.extract_feat(imgs)
if hasattr(self, 'neck'):
x, _ = self.neck(x)
cls_score = self.cls_head(x)
cls_score = self.average_clip(cls_score, num_segs)
return cls_score
def forward_test(self, imgs):
"""Defines the computation performed at every call when evaluation and
testing."""
# pdb.set_trace()
return self._do_test(imgs).cpu().numpy()
def forward_dummy(self, imgs):
"""Used for computing network FLOPs.
See ``tools/analysis/get_flops.py``.
Args:
imgs (torch.Tensor): Input images.
Returns:
Tensor: Class score.
"""
imgs = imgs.reshape((-1, ) + imgs.shape[2:])
x = self.extract_feat(imgs)
outs = (self.cls_head(x), )
return outs
def forward_gradcam(self, imgs):
"""Defines the computation performed at every call when using gradcam
utils."""
return self._do_test(imgs)
def get_feat(self, imgs, return_score=False):
"""Defines the computation performed at every call when using get_feat
utils."""
num_segs = imgs.shape[1]
imgs = imgs.reshape((-1, ) + imgs.shape[2:])
x = self.extract_feat(imgs)
if hasattr(self, 'neck'):
x, _ = self.neck(x) # (num_clips * num_crops, 2048, 1, 8, 8)
if return_score:
cls_score = self.cls_head(x)
cls_score = self.average_clip(cls_score, num_segs)
return x, cls_score
return x | InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/models/recognizers/recognizer3d.py |
from ..registry import RECOGNIZERS
from .base import BaseRecognizer
@RECOGNIZERS.register_module()
class Recognizer2D(BaseRecognizer):
"""2D recognizer model framework."""
def forward_train(self, imgs, labels, **kwargs):
"""Defines the computation performed at every call when training."""
batches = imgs.shape[0]
imgs = imgs.reshape((-1, ) + imgs.shape[2:])
num_segs = imgs.shape[0] // batches
losses = dict()
x = self.extract_feat(imgs)
if hasattr(self, 'debias_head'):
loss_debias = self.debias_head(x, num_segs=num_segs, target=labels.squeeze(), **kwargs)
losses.update(loss_debias)
if hasattr(self, 'neck'):
x = [
each.reshape((-1, num_segs) +
each.shape[1:]).transpose(1, 2).contiguous()
for each in x
]
x, _ = self.neck(x, labels.squeeze())
x = x.squeeze(2)
num_segs = 1
cls_score = self.cls_head(x, num_segs)
gt_labels = labels.squeeze()
loss_cls = self.cls_head.loss(cls_score, gt_labels, **kwargs)
losses.update(loss_cls)
return losses
def _do_test(self, imgs):
"""Defines the computation performed at every call when evaluation,
testing and gradcam."""
batches = imgs.shape[0]
imgs = imgs.reshape((-1, ) + imgs.shape[2:])
num_segs = imgs.shape[0] // batches
losses = dict()
x = self.extract_feat(imgs)
if hasattr(self, 'neck'):
x = [
each.reshape((-1, num_segs) +
each.shape[1:]).transpose(1, 2).contiguous()
for each in x
]
x, loss_aux = self.neck(x)
x = x.squeeze(2)
losses.update(loss_aux)
num_segs = 1
# When using `TSNHead` or `TPNHead`, shape is [batch_size, num_classes]
# When using `TSMHead`, shape is [batch_size * num_crops, num_classes]
# `num_crops` is calculated by:
# 1) `twice_sample` in `SampleFrames`
# 2) `num_sample_positions` in `DenseSampleFrames`
# 3) `ThreeCrop/TenCrop/MultiGroupCrop` in `test_pipeline`
# 4) `num_clips` in `SampleFrames` or its subclass if `clip_len != 1`
cls_score = self.cls_head(x, num_segs)
assert cls_score.size()[0] % batches == 0
# calculate num_crops automatically
cls_score = self.average_clip(cls_score,
cls_score.size()[0] // batches)
return cls_score
def forward_test(self, imgs):
"""Defines the computation performed at every call when evaluation and
testing."""
return self._do_test(imgs).cpu().numpy()
def forward_dummy(self, imgs):
"""Used for computing network FLOPs.
See ``tools/analysis/get_flops.py``.
Args:
imgs (torch.Tensor): Input images.
Returns:
Tensor: Class score.
"""
batches = imgs.shape[0]
imgs = imgs.reshape((-1, ) + imgs.shape[2:])
num_segs = imgs.shape[0] // batches
x = self.extract_feat(imgs)
outs = (self.cls_head(x, num_segs), )
return outs
def forward_gradcam(self, imgs):
"""Defines the computation performed at every call when using gradcam
utils."""
return self._do_test(imgs)
def get_feat(self, imgs, return_score=False):
"""Defines the computation performed at every call when using get_feat
utils."""
batches = imgs.shape[0]
imgs = imgs.reshape((-1, ) + imgs.shape[2:])
num_segs = imgs.shape[0] // batches
x = self.extract_feat(imgs)
if hasattr(self, 'neck'):
x, _ = self.neck(x)
if return_score:
cls_score = self.cls_head(x, num_segs)
assert cls_score.size()[0] % batches == 0
cls_score = self.average_clip(cls_score,
cls_score.size()[0] // batches)
return x, cls_score
return x | InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/models/recognizers/recognizer2d.py |
from ..registry import RECOGNIZERS
from .recognizer3d import Recognizer3D
@RECOGNIZERS.register_module()
class Recognizer3DRPL(Recognizer3D):
"""3D recognizer model framework."""
def forward_train(self, imgs, labels, **kwargs):
"""Defines the computation performed at every call when training."""
imgs = imgs.reshape((-1, ) + imgs.shape[2:])
losses = dict()
x = self.extract_feat(imgs)
if hasattr(self, 'neck'):
x, loss_aux = self.neck(x, labels.squeeze())
losses.update(loss_aux)
outputs = self.cls_head(x)
gt_labels = labels.squeeze()
loss_dict = self.cls_head.loss_cls(outputs, gt_labels, **kwargs)
losses.update(loss_dict)
return losses
def _do_test(self, imgs):
"""Defines the computation performed at every call when evaluation,
testing and gradcam."""
num_segs = imgs.shape[1]
imgs = imgs.reshape((-1, ) + imgs.shape[2:])
x = self.extract_feat(imgs)
if hasattr(self, 'neck'):
x, _ = self.neck(x)
outputs = self.cls_head(x)
cls_score = outputs['dist'] # the negative distance is equivalent to the cls_score before softmax
cls_score = self.average_clip(cls_score, num_segs)
return cls_score
def forward_dummy(self, imgs):
"""Used for computing network FLOPs.
See ``tools/analysis/get_flops.py``.
Args:
imgs (torch.Tensor): Input images.
Returns:
Tensor: Class score.
"""
imgs = imgs.reshape((-1, ) + imgs.shape[2:])
x = self.extract_feat(imgs)
outputs = self.cls_head(x)
outs = (outputs['dist'], )
return outs
| InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/models/recognizers/recognizer3d_rpl.py |
from .audio_recognizer import AudioRecognizer
from .base import BaseRecognizer
from .recognizer2d import Recognizer2D
from .recognizer3d import Recognizer3D
from .recognizer2d_bnn import Recognizer2DBNN
from .recognizer3d_bnn import Recognizer3DBNN
from .recognizer2d_rpl import Recognizer2DRPL
from .recognizer3d_rpl import Recognizer3DRPL
__all__ = ['BaseRecognizer', 'Recognizer2D', 'Recognizer3D', 'Recognizer2DBNN', 'Recognizer3DBNN', 'Recognizer2DRPL', 'Recognizer3DRPL', 'AudioRecognizer']
| InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/models/recognizers/__init__.py |
from ..registry import RECOGNIZERS
from .recognizer2d import Recognizer2D
@RECOGNIZERS.register_module()
class Recognizer2DRPL(Recognizer2D):
"""2D recognizer model framework."""
def forward_train(self, imgs, labels, **kwargs):
"""Defines the computation performed at every call when training."""
batches = imgs.shape[0]
imgs = imgs.reshape((-1, ) + imgs.shape[2:])
num_segs = imgs.shape[0] // batches
losses = dict()
x = self.extract_feat(imgs)
if hasattr(self, 'neck'):
x = [
each.reshape((-1, num_segs) +
each.shape[1:]).transpose(1, 2).contiguous()
for each in x
]
x, _ = self.neck(x, labels.squeeze())
x = x.squeeze(2)
num_segs = 1
outputs = self.cls_head(x, num_segs)
gt_labels = labels.squeeze()
loss_dict = self.cls_head.loss_cls(outputs, gt_labels, **kwargs)
losses.update(loss_dict)
return losses
def _do_test(self, imgs):
"""Defines the computation performed at every call when evaluation,
testing and gradcam."""
batches = imgs.shape[0]
imgs = imgs.reshape((-1, ) + imgs.shape[2:])
num_segs = imgs.shape[0] // batches
losses = dict()
x = self.extract_feat(imgs)
if hasattr(self, 'neck'):
x = [
each.reshape((-1, num_segs) +
each.shape[1:]).transpose(1, 2).contiguous()
for each in x
]
x, loss_aux = self.neck(x)
x = x.squeeze(2)
losses.update(loss_aux)
num_segs = 1
# When using `TSNHead` or `TPNHead`, shape is [batch_size, num_classes]
# When using `TSMHead`, shape is [batch_size * num_crops, num_classes]
# `num_crops` is calculated by:
# 1) `twice_sample` in `SampleFrames`
# 2) `num_sample_positions` in `DenseSampleFrames`
# 3) `ThreeCrop/TenCrop/MultiGroupCrop` in `test_pipeline`
# 4) `num_clips` in `SampleFrames` or its subclass if `clip_len != 1`
outputs = self.cls_head(x, num_segs)
cls_score = outputs['dist']
assert cls_score.size()[0] % batches == 0
# calculate num_crops automatically
cls_score = self.average_clip(cls_score,
cls_score.size()[0] // batches)
return cls_score
def forward_test(self, imgs):
"""Defines the computation performed at every call when evaluation and
testing."""
return self._do_test(imgs).cpu().numpy()
def forward_dummy(self, imgs):
"""Used for computing network FLOPs.
See ``tools/analysis/get_flops.py``.
Args:
imgs (torch.Tensor): Input images.
Returns:
Tensor: Class score.
"""
batches = imgs.shape[0]
imgs = imgs.reshape((-1, ) + imgs.shape[2:])
num_segs = imgs.shape[0] // batches
x = self.extract_feat(imgs)
outputs = self.cls_head(x, num_segs)
outs = (outputs['dist'], )
return outs
| InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/models/recognizers/recognizer2d_rpl.py |
from ..registry import RECOGNIZERS
from .recognizer2d import Recognizer2D
@RECOGNIZERS.register_module()
class Recognizer2DBNN(Recognizer2D):
"""2D recognizer model framework."""
def forward_train(self, imgs, labels, **kwargs):
"""Defines the computation performed at every call when training."""
batches = imgs.shape[0]
imgs = imgs.reshape((-1, ) + imgs.shape[2:])
num_segs = imgs.shape[0] // batches
losses = dict()
x = self.extract_feat(imgs)
if hasattr(self, 'debias_head'):
loss_debias = self.debias_head(x, num_segs=num_segs, target=labels.squeeze(), **kwargs)
losses.update(loss_debias)
if hasattr(self, 'neck'):
x = [
each.reshape((-1, num_segs) +
each.shape[1:]).transpose(1, 2).contiguous()
for each in x
]
x, _ = self.neck(x, labels.squeeze())
x = x.squeeze(2)
num_segs = 1
outputs = self.cls_head(x, num_segs, npass=self.train_cfg['npass'], testing=False)
# parse the outputs
cls_score = outputs['pred_mean']
gt_labels = labels.squeeze()
loss_dict = self.cls_head.bnn_loss(cls_score, gt_labels, outputs, beta=self.train_cfg['loss_weight'], **kwargs)
losses.update(loss_dict)
return losses
def _do_test(self, imgs):
"""Defines the computation performed at every call when evaluation,
testing and gradcam."""
batches = imgs.shape[0]
imgs = imgs.reshape((-1, ) + imgs.shape[2:])
num_segs = imgs.shape[0] // batches
losses = dict()
x = self.extract_feat(imgs)
if hasattr(self, 'neck'):
x = [
each.reshape((-1, num_segs) +
each.shape[1:]).transpose(1, 2).contiguous()
for each in x
]
x, loss_aux = self.neck(x)
x = x.squeeze(2)
losses.update(loss_aux)
num_segs = 1
# When using `TSNHead` or `TPNHead`, shape is [batch_size, num_classes]
# When using `TSMHead`, shape is [batch_size * num_crops, num_classes]
# `num_crops` is calculated by:
# 1) `twice_sample` in `SampleFrames`
# 2) `num_sample_positions` in `DenseSampleFrames`
# 3) `ThreeCrop/TenCrop/MultiGroupCrop` in `test_pipeline`
# 4) `num_clips` in `SampleFrames` or its subclass if `clip_len != 1`
outputs = self.cls_head(x, num_segs, npass=self.test_cfg['npass'], testing=True)
cls_score = outputs['pred_mean']
assert cls_score.size()[0] % batches == 0
# calculate num_crops automatically
cls_score = self.average_clip(cls_score,
cls_score.size()[0] // batches)
return cls_score
def forward_test(self, imgs):
"""Defines the computation performed at every call when evaluation and
testing."""
return self._do_test(imgs).cpu().numpy()
def forward_dummy(self, imgs):
"""Used for computing network FLOPs.
See ``tools/analysis/get_flops.py``.
Args:
imgs (torch.Tensor): Input images.
Returns:
Tensor: Class score.
"""
batches = imgs.shape[0]
imgs = imgs.reshape((-1, ) + imgs.shape[2:])
num_segs = imgs.shape[0] // batches
x = self.extract_feat(imgs)
outputs = (self.cls_head(x, num_segs, npass=self.test_cfg['npass'], testing=True), )
outs = (outputs['pred_mean'], )
return outs
| InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/models/recognizers/recognizer2d_bnn.py |
from abc import ABCMeta, abstractmethod
from collections import OrderedDict
import torch
import torch.distributed as dist
import torch.nn as nn
import torch.nn.functional as F
from mmcv.runner import auto_fp16
from .. import builder
class BaseRecognizer(nn.Module, metaclass=ABCMeta):
"""Base class for recognizers.
All recognizers should subclass it.
All subclass should overwrite:
- Methods:``forward_train``, supporting to forward when training.
- Methods:``forward_test``, supporting to forward when testing.
Args:
backbone (dict): Backbone modules to extract feature.
cls_head (dict): Classification head to process feature.
train_cfg (dict | None): Config for training. Default: None.
test_cfg (dict | None): Config for testing. Default: None.
"""
def __init__(self,
backbone,
cls_head,
neck=None,
debias_head=None,
train_cfg=None,
test_cfg=None):
super().__init__()
self.backbone = builder.build_backbone(backbone)
if neck is not None:
self.neck = builder.build_neck(neck)
self.cls_head = builder.build_head(cls_head)
if debias_head is not None:
self.debias_head = builder.build_head(debias_head)
self.train_cfg = train_cfg
self.test_cfg = test_cfg
# aux_info is the list of tensor names beyond 'imgs' and 'label' which
# will be used in train_step and val_step, data_batch should contain
# these tensors
self.aux_info = []
if train_cfg is not None and 'aux_info' in train_cfg:
self.aux_info = train_cfg['aux_info']
self.init_weights()
self.fp16_enabled = False
def init_weights(self):
"""Initialize the model network weights."""
self.backbone.init_weights()
self.cls_head.init_weights()
if hasattr(self, 'neck'):
self.neck.init_weights()
if hasattr(self, 'debias_head'):
self.debias_head.init_weights()
@auto_fp16()
def extract_feat(self, imgs):
"""Extract features through a backbone.
Args:
imgs (torch.Tensor): The input images.
Returns:
torch.tensor: The extracted features.
"""
x = self.backbone(imgs)
return x
def evidence_to_prob(self, output, evidence_type):
if evidence_type == 'relu':
from ..losses.edl_loss import relu_evidence as evidence
elif evidence_type == 'exp':
from ..losses.edl_loss import exp_evidence as evidence
elif evidence_type == 'softplus':
from ..losses.edl_loss import softplus_evidence as evidence
alpha = evidence(output) + 1
S = torch.sum(alpha, dim=-1, keepdim=True)
prob = alpha / S
return prob
def average_clip(self, cls_score, num_segs=1):
"""Averaging class score over multiple clips.
Using different averaging types ('score' or 'prob' or None,
which defined in test_cfg) to computed the final averaged
class score. Only called in test mode.
Args:
cls_score (torch.Tensor): Class score to be averaged.
num_segs (int): Number of clips for each input sample.
Returns:
torch.Tensor: Averaged class score.
"""
if 'average_clips' not in self.test_cfg.keys():
raise KeyError('"average_clips" must defined in test_cfg\'s keys')
average_clips = self.test_cfg['average_clips']
if average_clips not in ['score', 'prob', 'evidence', None]:
raise ValueError(f'{average_clips} is not supported. '
f'Currently supported ones are '
f'["score", "prob", "evidence", None]')
if average_clips is None:
return cls_score
batch_size = cls_score.shape[0]
cls_score = cls_score.view(batch_size // num_segs, num_segs, -1)
if average_clips == 'prob':
cls_score = F.softmax(cls_score, dim=2).mean(dim=1)
elif average_clips == 'score':
cls_score = cls_score.mean(dim=1)
elif average_clips == 'evidence':
assert 'evidence_type' in self.test_cfg.keys()
cls_score = self.evidence_to_prob(cls_score, self.test_cfg['evidence_type'])
cls_score = cls_score.mean(dim=1)
return cls_score
@abstractmethod
def forward_train(self, imgs, labels, **kwargs):
"""Defines the computation performed at every call when training."""
@abstractmethod
def forward_test(self, imgs):
"""Defines the computation performed at every call when evaluation and
testing."""
@abstractmethod
def forward_gradcam(self, imgs):
"""Defines the computation performed at every all when using gradcam
utils."""
@staticmethod
def _parse_losses(losses):
"""Parse the raw outputs (losses) of the network.
Args:
losses (dict): Raw output of the network, which usually contain
losses and other necessary information.
Returns:
tuple[Tensor, dict]: (loss, log_vars), loss is the loss tensor
which may be a weighted sum of all losses, log_vars contains
all the variables to be sent to the logger.
"""
log_vars = OrderedDict()
for loss_name, loss_value in losses.items():
if isinstance(loss_value, torch.Tensor):
log_vars[loss_name] = loss_value.mean()
elif isinstance(loss_value, list):
log_vars[loss_name] = sum(_loss.mean() for _loss in loss_value)
else:
raise TypeError(
f'{loss_name} is not a tensor or list of tensors')
loss = sum(_value for _key, _value in log_vars.items()
if 'loss' in _key)
log_vars['loss'] = loss
for loss_name, loss_value in log_vars.items():
# reduce loss when distributed training
if dist.is_available() and dist.is_initialized():
loss_value = loss_value.data.clone().to(log_vars['loss'].device)
dist.all_reduce(loss_value.div_(dist.get_world_size()))
log_vars[loss_name] = loss_value.item()
return loss, log_vars
def forward(self, imgs, label=None, return_loss=True, **kwargs):
"""Define the computation performed at every call."""
if kwargs.get('gradcam', False):
del kwargs['gradcam']
return self.forward_gradcam(imgs, **kwargs)
if kwargs.get('get_feat', False):
del kwargs['get_feat']
return self.get_feat(imgs, **kwargs)
if return_loss:
if label is None:
raise ValueError('Label should not be None.')
return self.forward_train(imgs, label, **kwargs)
return self.forward_test(imgs, **kwargs)
def train_step(self, data_batch, optimizer, **kwargs):
"""The iteration step during training.
This method defines an iteration step during training, except for the
back propagation and optimizer updating, which are done in an optimizer
hook. Note that in some complicated cases or models, the whole process
including back propagation and optimizer updating is also defined in
this method, such as GAN.
Args:
data_batch (dict): The output of dataloader.
optimizer (:obj:`torch.optim.Optimizer` | dict): The optimizer of
runner is passed to ``train_step()``. This argument is unused
and reserved.
Returns:
dict: It should contain at least 3 keys: ``loss``, ``log_vars``,
``num_samples``.
``loss`` is a tensor for back propagation, which can be a
weighted sum of multiple losses.
``log_vars`` contains all the variables to be sent to the
logger.
``num_samples`` indicates the batch size (when the model is
DDP, it means the batch size on each GPU), which is used for
averaging the logs.
"""
imgs = data_batch['imgs']
label = data_batch['label']
aux_info = {}
for item in self.aux_info:
assert item in data_batch
aux_info[item] = data_batch[item]
aux_info.update(kwargs)
losses = self(imgs, label, return_loss=True, **aux_info)
loss, log_vars = self._parse_losses(losses)
outputs = dict(
loss=loss,
log_vars=log_vars,
num_samples=len(next(iter(data_batch.values()))))
return outputs
def val_step(self, data_batch, optimizer, **kwargs):
"""The iteration step during validation.
This method shares the same signature as :func:`train_step`, but used
during val epochs. Note that the evaluation after training epochs is
not implemented with this method, but an evaluation hook.
"""
imgs = data_batch['imgs']
label = data_batch['label']
aux_info = {}
for item in self.aux_info:
aux_info[item] = data_batch[item]
losses = self(imgs, label, return_loss=True, **aux_info)
loss, log_vars = self._parse_losses(losses)
outputs = dict(
loss=loss,
log_vars=log_vars,
num_samples=len(next(iter(data_batch.values()))))
return outputs
| InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/models/recognizers/base.py |
from ..registry import RECOGNIZERS
from .recognizer3d import Recognizer3D
@RECOGNIZERS.register_module()
class Recognizer3DBNN(Recognizer3D):
"""3D recognizer model framework."""
def forward_train(self, imgs, labels, **kwargs):
"""Defines the computation performed at every call when training."""
imgs = imgs.reshape((-1, ) + imgs.shape[2:])
losses = dict()
x = self.extract_feat(imgs)
if hasattr(self, 'neck'):
x, loss_aux = self.neck(x, labels.squeeze())
losses.update(loss_aux)
outputs = self.cls_head(x, npass=self.train_cfg['npass'], testing=False)
# parse the outputs
cls_score = outputs['pred_mean']
gt_labels = labels.squeeze()
loss_dict = self.cls_head.bnn_loss(cls_score, gt_labels, outputs, beta=self.train_cfg['loss_weight'], **kwargs)
losses.update(loss_dict)
return losses
def _do_test(self, imgs):
"""Defines the computation performed at every call when evaluation,
testing and gradcam."""
num_segs = imgs.shape[1]
imgs = imgs.reshape((-1, ) + imgs.shape[2:])
x = self.extract_feat(imgs)
if hasattr(self, 'neck'):
x, _ = self.neck(x)
outputs = self.cls_head(x, npass=self.test_cfg['npass'], testing=True)
cls_score = outputs['pred_mean']
cls_score = self.average_clip(cls_score, num_segs)
return cls_score
def forward_dummy(self, imgs):
"""Used for computing network FLOPs.
See ``tools/analysis/get_flops.py``.
Args:
imgs (torch.Tensor): Input images.
Returns:
Tensor: Class score.
"""
imgs = imgs.reshape((-1, ) + imgs.shape[2:])
x = self.extract_feat(imgs)
outputs = self.cls_head(x, npass=self.test_cfg['npass'], testing=True)
outs = (outputs['pred_mean'], )
return outs
| InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/models/recognizers/recognizer3d_bnn.py |
from ..registry import RECOGNIZERS
from .base import BaseRecognizer
@RECOGNIZERS.register_module()
class AudioRecognizer(BaseRecognizer):
"""Audio recognizer model framework."""
def forward(self, audios, label=None, return_loss=True):
"""Define the computation performed at every call."""
if return_loss:
if label is None:
raise ValueError('Label should not be None.')
return self.forward_train(audios, label)
return self.forward_test(audios)
def forward_train(self, audios, labels):
"""Defines the computation performed at every call when training."""
audios = audios.reshape((-1, ) + audios.shape[2:])
x = self.extract_feat(audios)
cls_score = self.cls_head(x)
gt_labels = labels.squeeze()
loss = self.cls_head.loss(cls_score, gt_labels)
return loss
def forward_test(self, audios):
"""Defines the computation performed at every call when evaluation and
testing."""
num_segs = audios.shape[1]
audios = audios.reshape((-1, ) + audios.shape[2:])
x = self.extract_feat(audios)
cls_score = self.cls_head(x)
cls_score = self.average_clip(cls_score, num_segs)
return cls_score.cpu().numpy()
def forward_gradcam(self, audios):
raise NotImplementedError
def train_step(self, data_batch, optimizer, **kwargs):
"""The iteration step during training.
This method defines an iteration step during training, except for the
back propagation and optimizer updating, which are done in an optimizer
hook. Note that in some complicated cases or models, the whole process
including back propagation and optimizer updating is also defined in
this method, such as GAN.
Args:
data_batch (dict): The output of dataloader.
optimizer (:obj:`torch.optim.Optimizer` | dict): The optimizer of
runner is passed to ``train_step()``. This argument is unused
and reserved.
Returns:
dict: It should contain at least 3 keys: ``loss``, ``log_vars``,
``num_samples``.
``loss`` is a tensor for back propagation, which can be a
weighted sum of multiple losses.
``log_vars`` contains all the variables to be sent to the
logger.
``num_samples`` indicates the batch size (when the model is
DDP, it means the batch size on each GPU), which is used for
averaging the logs.
"""
audios = data_batch['audios']
label = data_batch['label']
losses = self(audios, label)
loss, log_vars = self._parse_losses(losses)
outputs = dict(
loss=loss,
log_vars=log_vars,
num_samples=len(next(iter(data_batch.values()))))
return outputs
def val_step(self, data_batch, optimizer, **kwargs):
"""The iteration step during validation.
This method shares the same signature as :func:`train_step`, but used
during val epochs. Note that the evaluation after training epochs is
not implemented with this method, but an evaluation hook.
"""
audios = data_batch['audios']
label = data_batch['label']
losses = self(audios, label)
loss, log_vars = self._parse_losses(losses)
outputs = dict(
loss=loss,
log_vars=log_vars,
num_samples=len(next(iter(data_batch.values()))))
return outputs
| InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/models/recognizers/audio_recognizer.py |
import torch.nn as nn
import torch.utils.checkpoint as cp
from mmcv.cnn import ConvModule, constant_init, kaiming_init
from mmcv.runner import load_checkpoint
from torch.nn.modules.batchnorm import _BatchNorm
from torch.nn.modules.utils import _ntuple
from mmaction.models.registry import BACKBONES
from mmaction.utils import get_root_logger
class Bottleneck2dAudio(nn.Module):
"""Bottleneck2D block for ResNet2D.
Args:
inplanes (int): Number of channels for the input in first conv3d layer.
planes (int): Number of channels produced by some norm/conv3d layers.
stride (int | tuple[int]): Stride in the conv layer. Default: 1.
dilation (int): Spacing between kernel elements. Default: 1.
downsample (nn.Module): Downsample layer. Default: None.
factorize (bool): Whether to factorize kernel. Default: True.
norm_cfg (dict):
Config for norm layers. required keys are `type` and
`requires_grad`. Default: None.
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed. Default: False.
"""
expansion = 4
def __init__(self,
inplanes,
planes,
stride=2,
dilation=1,
downsample=None,
factorize=True,
norm_cfg=None,
with_cp=False):
super().__init__()
self.inplanes = inplanes
self.planes = planes
self.stride = stride
self.dilation = dilation
self.factorize = factorize
self.norm_cfg = norm_cfg
self.with_cp = with_cp
self.conv1_stride = 1
self.conv2_stride = stride
conv1_kernel_size = (1, 1)
conv1_padding = 0
conv2_kernel_size = (3, 3)
conv2_padding = (dilation, dilation)
self.conv1 = ConvModule(
inplanes,
planes,
kernel_size=conv1_kernel_size,
padding=conv1_padding,
dilation=dilation,
norm_cfg=self.norm_cfg,
bias=False)
self.conv2 = ConvModule(
planes,
planes,
kernel_size=conv2_kernel_size,
stride=stride,
padding=conv2_padding,
dilation=dilation,
bias=False,
conv_cfg=dict(type='ConvAudio') if factorize else dict(
type='Conv'),
norm_cfg=None,
act_cfg=None)
self.conv3 = ConvModule(
2 * planes if factorize else planes,
planes * self.expansion,
kernel_size=1,
bias=False,
norm_cfg=self.norm_cfg,
act_cfg=None)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
def forward(self, x):
def _inner_forward(x):
identity = x
out = self.conv1(x)
out = self.conv2(out)
out = self.conv3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
return out
if self.with_cp and x.requires_grad:
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
out = self.relu(out)
return out
@BACKBONES.register_module()
class ResNetAudio(nn.Module):
"""ResNet 2d audio backbone. Reference:
<https://arxiv.org/abs/2001.08740>`_.
Args:
depth (int): Depth of resnet, from {50, 101, 152}.
pretrained (str | None): Name of pretrained model.
in_channels (int): Channel num of input features. Default: 1.
base_channels (int): Channel num of stem output features. Default: 32.
num_stages (int): Resnet stages. Default: 4.
strides (Sequence[int]): Strides of residual blocks of each stage.
Default: (1, 2, 2, 2).
dilations (Sequence[int]): Dilation of each stage.
Default: (1, 1, 1, 1).
conv1_kernel (int): Kernel size of the first conv layer. Default: 9.
conv1_stride (int | tuple[int]): Stride of the first conv layer.
Default: 1.
frozen_stages (int): Stages to be frozen (all param fixed). -1 means
not freezing any parameters.
factorize (Sequence[int]): factorize Dims of each block for audio.
Default: (1, 1, 0, 0).
norm_eval (bool): Whether to set BN layers to eval mode, namely, freeze
running stats (mean and var). Default: False.
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed. Default: False.
conv_cfg (dict): Config for norm layers. Default: dict(type='Conv').
norm_cfg (dict):
Config for norm layers. required keys are `type` and
`requires_grad`. Default: dict(type='BN2d', requires_grad=True).
act_cfg (dict): Config for activate layers.
Default: dict(type='ReLU', inplace=True).
zero_init_residual (bool):
Whether to use zero initialization for residual block,
Default: True.
"""
arch_settings = {
# 18: (BasicBlock2dAudio, (2, 2, 2, 2)),
# 34: (BasicBlock2dAudio, (3, 4, 6, 3)),
50: (Bottleneck2dAudio, (3, 4, 6, 3)),
101: (Bottleneck2dAudio, (3, 4, 23, 3)),
152: (Bottleneck2dAudio, (3, 8, 36, 3))
}
def __init__(self,
depth,
pretrained,
in_channels=1,
num_stages=4,
base_channels=32,
strides=(1, 2, 2, 2),
dilations=(1, 1, 1, 1),
conv1_kernel=9,
conv1_stride=1,
frozen_stages=-1,
factorize=(1, 1, 0, 0),
norm_eval=False,
with_cp=False,
conv_cfg=dict(type='Conv'),
norm_cfg=dict(type='BN2d', requires_grad=True),
act_cfg=dict(type='ReLU', inplace=True),
zero_init_residual=True):
super().__init__()
if depth not in self.arch_settings:
raise KeyError(f'invalid depth {depth} for resnet')
self.depth = depth
self.pretrained = pretrained
self.in_channels = in_channels
self.base_channels = base_channels
self.num_stages = num_stages
assert num_stages >= 1 and num_stages <= 4
self.dilations = dilations
self.conv1_kernel = conv1_kernel
self.conv1_stride = conv1_stride
self.frozen_stages = frozen_stages
self.stage_factorization = _ntuple(num_stages)(factorize)
self.norm_eval = norm_eval
self.with_cp = with_cp
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.act_cfg = act_cfg
self.zero_init_residual = zero_init_residual
self.block, stage_blocks = self.arch_settings[depth]
self.stage_blocks = stage_blocks[:num_stages]
self.inplanes = self.base_channels
self._make_stem_layer()
self.res_layers = []
for i, num_blocks in enumerate(self.stage_blocks):
stride = strides[i]
dilation = dilations[i]
planes = self.base_channels * 2**i
res_layer = self.make_res_layer(
self.block,
self.inplanes,
planes,
num_blocks,
stride=stride,
dilation=dilation,
factorize=self.stage_factorization[i],
norm_cfg=self.norm_cfg,
with_cp=with_cp)
self.inplanes = planes * self.block.expansion
layer_name = f'layer{i + 1}'
self.add_module(layer_name, res_layer)
self.res_layers.append(layer_name)
self.feat_dim = self.block.expansion * self.base_channels * 2**(
len(self.stage_blocks) - 1)
def make_res_layer(self,
block,
inplanes,
planes,
blocks,
stride=1,
dilation=1,
factorize=1,
norm_cfg=None,
with_cp=False):
"""Build residual layer for ResNetAudio.
Args:
block (nn.Module): Residual module to be built.
inplanes (int): Number of channels for the input feature
in each block.
planes (int): Number of channels for the output feature
in each block.
blocks (int): Number of residual blocks.
strides (Sequence[int]): Strides of residual blocks of each stage.
Default: (1, 2, 2, 2).
dilation (int): Spacing between kernel elements. Default: 1.
factorize (int | Sequence[int]): Determine whether to factorize
for each block. Default: 1.
norm_cfg (dict):
Config for norm layers. required keys are `type` and
`requires_grad`. Default: None.
with_cp (bool): Use checkpoint or not. Using checkpoint will save
some memory while slowing down the training speed.
Default: False.
Returns:
A residual layer for the given config.
"""
factorize = factorize if not isinstance(
factorize, int) else (factorize, ) * blocks
assert len(factorize) == blocks
downsample = None
if stride != 1 or inplanes != planes * block.expansion:
downsample = ConvModule(
inplanes,
planes * block.expansion,
kernel_size=1,
stride=stride,
bias=False,
norm_cfg=norm_cfg,
act_cfg=None)
layers = []
layers.append(
block(
inplanes,
planes,
stride,
dilation,
downsample,
factorize=(factorize[0] == 1),
norm_cfg=norm_cfg,
with_cp=with_cp))
inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(
block(
inplanes,
planes,
1,
dilation,
factorize=(factorize[i] == 1),
norm_cfg=norm_cfg,
with_cp=with_cp))
return nn.Sequential(*layers)
def _make_stem_layer(self):
"""Construct the stem layers consists of a conv+norm+act module and a
pooling layer."""
self.conv1 = ConvModule(
self.in_channels,
self.base_channels,
kernel_size=self.conv1_kernel,
stride=self.conv1_stride,
bias=False,
conv_cfg=dict(type='ConvAudio', op='sum'),
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
def _freeze_stages(self):
"""Prevent all the parameters from being optimized before
``self.frozen_stages``."""
if self.frozen_stages >= 0:
self.conv1.bn.eval()
for m in [self.conv1.conv, self.conv1.bn]:
for param in m.parameters():
param.requires_grad = False
for i in range(1, self.frozen_stages + 1):
m = getattr(self, f'layer{i}')
m.eval()
for param in m.parameters():
param.requires_grad = False
def init_weights(self):
"""Initiate the parameters either from existing checkpoint or from
scratch."""
if isinstance(self.pretrained, str):
logger = get_root_logger()
logger.info(f'load model from: {self.pretrained}')
load_checkpoint(self, self.pretrained, strict=False, logger=logger)
elif self.pretrained is None:
for m in self.modules():
if isinstance(m, nn.Conv2d):
kaiming_init(m)
elif isinstance(m, _BatchNorm):
constant_init(m, 1)
if self.zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck2dAudio):
constant_init(m.conv3.bn, 0)
else:
raise TypeError('pretrained must be a str or None')
def forward(self, x):
"""Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data.
Returns:
torch.Tensor: The feature of the input samples extracted
by the backbone.
"""
x = self.conv1(x)
for layer_name in self.res_layers:
res_layer = getattr(self, layer_name)
x = res_layer(x)
return x
def train(self, mode=True):
"""Set the optimization status when training."""
super().train(mode)
self._freeze_stages()
if mode and self.norm_eval:
for m in self.modules():
if isinstance(m, _BatchNorm):
m.eval()
| InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/models/backbones/resnet_audio.py |
import torch.nn as nn
import torch.utils.checkpoint as cp
from mmcv.cnn import (ConvModule, NonLocal3d, build_activation_layer,
constant_init, kaiming_init)
from mmcv.runner import _load_checkpoint, load_checkpoint
from mmcv.utils import _BatchNorm
from torch.nn.modules.utils import _ntuple, _triple
from ...utils import get_root_logger
from ..registry import BACKBONES
class BasicBlock3d(nn.Module):
"""BasicBlock 3d block for ResNet3D.
Args:
inplanes (int): Number of channels for the input in first conv3d layer.
planes (int): Number of channels produced by some norm/conv3d layers.
spatial_stride (int): Spatial stride in the conv3d layer. Default: 1.
temporal_stride (int): Temporal stride in the conv3d layer. Default: 1.
dilation (int): Spacing between kernel elements. Default: 1.
downsample (nn.Module | None): Downsample layer. Default: None.
style (str): ``pytorch`` or ``caffe``. If set to "pytorch", the
stride-two layer is the 3x3 conv layer, otherwise the stride-two
layer is the first 1x1 conv layer. Default: 'pytorch'.
inflate (bool): Whether to inflate kernel. Default: True.
non_local (bool): Determine whether to apply non-local module in this
block. Default: False.
non_local_cfg (dict): Config for non-local module. Default: ``dict()``.
conv_cfg (dict): Config dict for convolution layer.
Default: ``dict(type='Conv3d')``.
norm_cfg (dict): Config for norm layers. required keys are ``type``,
Default: ``dict(type='BN3d')``.
act_cfg (dict): Config dict for activation layer.
Default: ``dict(type='ReLU')``.
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed. Default: False.
"""
expansion = 1
def __init__(self,
inplanes,
planes,
spatial_stride=1,
temporal_stride=1,
dilation=1,
downsample=None,
style='pytorch',
inflate=True,
non_local=False,
non_local_cfg=dict(),
conv_cfg=dict(type='Conv3d'),
norm_cfg=dict(type='BN3d'),
act_cfg=dict(type='ReLU'),
with_cp=False,
**kwargs):
super().__init__()
assert style in ['pytorch', 'caffe']
# make sure that only ``inflate_style`` is passed into kwargs
assert set(kwargs.keys()).issubset(['inflate_style'])
self.inplanes = inplanes
self.planes = planes
self.spatial_stride = spatial_stride
self.temporal_stride = temporal_stride
self.dilation = dilation
self.style = style
self.inflate = inflate
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.act_cfg = act_cfg
self.with_cp = with_cp
self.non_local = non_local
self.non_local_cfg = non_local_cfg
self.conv1_stride_s = spatial_stride
self.conv2_stride_s = 1
self.conv1_stride_t = temporal_stride
self.conv2_stride_t = 1
if self.inflate:
conv1_kernel_size = (3, 3, 3)
conv1_padding = (1, dilation, dilation)
conv2_kernel_size = (3, 3, 3)
conv2_padding = (1, 1, 1)
else:
conv1_kernel_size = (1, 3, 3)
conv1_padding = (0, dilation, dilation)
conv2_kernel_size = (1, 3, 3)
conv2_padding = (0, 1, 1)
self.conv1 = ConvModule(
inplanes,
planes,
conv1_kernel_size,
stride=(self.conv1_stride_t, self.conv1_stride_s,
self.conv1_stride_s),
padding=conv1_padding,
dilation=(1, dilation, dilation),
bias=False,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
self.conv2 = ConvModule(
planes,
planes * self.expansion,
conv2_kernel_size,
stride=(self.conv2_stride_t, self.conv2_stride_s,
self.conv2_stride_s),
padding=conv2_padding,
bias=False,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=None)
self.downsample = downsample
self.relu = build_activation_layer(self.act_cfg)
if self.non_local:
self.non_local_block = NonLocal3d(self.conv2.norm.num_features,
**self.non_local_cfg)
def forward(self, x):
"""Defines the computation performed at every call."""
def _inner_forward(x):
"""Forward wrapper for utilizing checkpoint."""
identity = x
out = self.conv1(x)
out = self.conv2(out)
if self.downsample is not None:
identity = self.downsample(x)
out = out + identity
return out
if self.with_cp and x.requires_grad:
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
out = self.relu(out)
if self.non_local:
out = self.non_local_block(out)
return out
class Bottleneck3d(nn.Module):
"""Bottleneck 3d block for ResNet3D.
Args:
inplanes (int): Number of channels for the input in first conv3d layer.
planes (int): Number of channels produced by some norm/conv3d layers.
spatial_stride (int): Spatial stride in the conv3d layer. Default: 1.
temporal_stride (int): Temporal stride in the conv3d layer. Default: 1.
dilation (int): Spacing between kernel elements. Default: 1.
downsample (nn.Module | None): Downsample layer. Default: None.
style (str): ``pytorch`` or ``caffe``. If set to "pytorch", the
stride-two layer is the 3x3 conv layer, otherwise the stride-two
layer is the first 1x1 conv layer. Default: 'pytorch'.
inflate (bool): Whether to inflate kernel. Default: True.
inflate_style (str): ``3x1x1`` or ``3x3x3``. which determines the
kernel sizes and padding strides for conv1 and conv2 in each block.
Default: '3x1x1'.
non_local (bool): Determine whether to apply non-local module in this
block. Default: False.
non_local_cfg (dict): Config for non-local module. Default: ``dict()``.
conv_cfg (dict): Config dict for convolution layer.
Default: ``dict(type='Conv3d')``.
norm_cfg (dict): Config for norm layers. required keys are ``type``,
Default: ``dict(type='BN3d')``.
act_cfg (dict): Config dict for activation layer.
Default: ``dict(type='ReLU')``.
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed. Default: False.
"""
expansion = 4
def __init__(self,
inplanes,
planes,
spatial_stride=1,
temporal_stride=1,
dilation=1,
downsample=None,
style='pytorch',
inflate=True,
inflate_style='3x1x1',
non_local=False,
non_local_cfg=dict(),
conv_cfg=dict(type='Conv3d'),
norm_cfg=dict(type='BN3d'),
act_cfg=dict(type='ReLU'),
with_cp=False):
super().__init__()
assert style in ['pytorch', 'caffe']
assert inflate_style in ['3x1x1', '3x3x3']
self.inplanes = inplanes
self.planes = planes
self.spatial_stride = spatial_stride
self.temporal_stride = temporal_stride
self.dilation = dilation
self.style = style
self.inflate = inflate
self.inflate_style = inflate_style
self.norm_cfg = norm_cfg
self.conv_cfg = conv_cfg
self.act_cfg = act_cfg
self.with_cp = with_cp
self.non_local = non_local
self.non_local_cfg = non_local_cfg
if self.style == 'pytorch':
self.conv1_stride_s = 1
self.conv2_stride_s = spatial_stride
self.conv1_stride_t = 1
self.conv2_stride_t = temporal_stride
else:
self.conv1_stride_s = spatial_stride
self.conv2_stride_s = 1
self.conv1_stride_t = temporal_stride
self.conv2_stride_t = 1
if self.inflate:
if inflate_style == '3x1x1':
conv1_kernel_size = (3, 1, 1)
conv1_padding = (1, 0, 0)
conv2_kernel_size = (1, 3, 3)
conv2_padding = (0, dilation, dilation)
else:
conv1_kernel_size = (1, 1, 1)
conv1_padding = (0, 0, 0)
conv2_kernel_size = (3, 3, 3)
conv2_padding = (1, dilation, dilation)
else:
conv1_kernel_size = (1, 1, 1)
conv1_padding = (0, 0, 0)
conv2_kernel_size = (1, 3, 3)
conv2_padding = (0, dilation, dilation)
self.conv1 = ConvModule(
inplanes,
planes,
conv1_kernel_size,
stride=(self.conv1_stride_t, self.conv1_stride_s,
self.conv1_stride_s),
padding=conv1_padding,
bias=False,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
self.conv2 = ConvModule(
planes,
planes,
conv2_kernel_size,
stride=(self.conv2_stride_t, self.conv2_stride_s,
self.conv2_stride_s),
padding=conv2_padding,
dilation=(1, dilation, dilation),
bias=False,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
self.conv3 = ConvModule(
planes,
planes * self.expansion,
1,
bias=False,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
# No activation in the third ConvModule for bottleneck
act_cfg=None)
self.downsample = downsample
self.relu = build_activation_layer(self.act_cfg)
if self.non_local:
self.non_local_block = NonLocal3d(self.conv3.norm.num_features,
**self.non_local_cfg)
def forward(self, x):
"""Defines the computation performed at every call."""
def _inner_forward(x):
"""Forward wrapper for utilizing checkpoint."""
identity = x
out = self.conv1(x)
out = self.conv2(out)
out = self.conv3(out)
if self.downsample is not None:
identity = self.downsample(x)
out = out + identity
return out
if self.with_cp and x.requires_grad:
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
out = self.relu(out)
if self.non_local:
out = self.non_local_block(out)
return out
@BACKBONES.register_module()
class ResNet3d(nn.Module):
"""ResNet 3d backbone.
Args:
depth (int): Depth of resnet, from {18, 34, 50, 101, 152}.
pretrained (str | None): Name of pretrained model.
pretrained2d (bool): Whether to load pretrained 2D model.
Default: True.
in_channels (int): Channel num of input features. Default: 3.
base_channels (int): Channel num of stem output features. Default: 64.
out_indices (Sequence[int]): Indices of output feature. Default: (3, ).
num_stages (int): Resnet stages. Default: 4.
spatial_strides (Sequence[int]):
Spatial strides of residual blocks of each stage.
Default: ``(1, 2, 2, 2)``.
temporal_strides (Sequence[int]):
Temporal strides of residual blocks of each stage.
Default: ``(1, 1, 1, 1)``.
dilations (Sequence[int]): Dilation of each stage.
Default: ``(1, 1, 1, 1)``.
conv1_kernel (Sequence[int]): Kernel size of the first conv layer.
Default: ``(5, 7, 7)``.
conv1_stride_t (int): Temporal stride of the first conv layer.
Default: 2.
pool1_stride_t (int): Temporal stride of the first pooling layer.
Default: 2.
with_pool2 (bool): Whether to use pool2. Default: True.
style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two
layer is the 3x3 conv layer, otherwise the stride-two layer is
the first 1x1 conv layer. Default: 'pytorch'.
frozen_stages (int): Stages to be frozen (all param fixed). -1 means
not freezing any parameters. Default: -1.
inflate (Sequence[int]): Inflate Dims of each block.
Default: (1, 1, 1, 1).
inflate_style (str): ``3x1x1`` or ``1x1x1``. which determines the
kernel sizes and padding strides for conv1 and conv2 in each block.
Default: '3x1x1'.
conv_cfg (dict): Config for conv layers. required keys are ``type``
Default: ``dict(type='Conv3d')``.
norm_cfg (dict): Config for norm layers. required keys are ``type`` and
``requires_grad``.
Default: ``dict(type='BN3d', requires_grad=True)``.
act_cfg (dict): Config dict for activation layer.
Default: ``dict(type='ReLU', inplace=True)``.
norm_eval (bool): Whether to set BN layers to eval mode, namely, freeze
running stats (mean and var). Default: False.
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed. Default: False.
non_local (Sequence[int]): Determine whether to apply non-local module
in the corresponding block of each stages. Default: (0, 0, 0, 0).
non_local_cfg (dict): Config for non-local module. Default: ``dict()``.
zero_init_residual (bool):
Whether to use zero initialization for residual block,
Default: True.
kwargs (dict, optional): Key arguments for "make_res_layer".
"""
arch_settings = {
18: (BasicBlock3d, (2, 2, 2, 2)),
34: (BasicBlock3d, (3, 4, 6, 3)),
50: (Bottleneck3d, (3, 4, 6, 3)),
101: (Bottleneck3d, (3, 4, 23, 3)),
152: (Bottleneck3d, (3, 8, 36, 3))
}
def __init__(self,
depth,
pretrained,
pretrained2d=True,
in_channels=3,
num_stages=4,
base_channels=64,
out_indices=(3, ),
spatial_strides=(1, 2, 2, 2),
temporal_strides=(1, 1, 1, 1),
dilations=(1, 1, 1, 1),
conv1_kernel=(5, 7, 7),
conv1_stride_t=2,
pool1_stride_t=2,
with_pool2=True,
style='pytorch',
frozen_stages=-1,
inflate=(1, 1, 1, 1),
inflate_style='3x1x1',
conv_cfg=dict(type='Conv3d'),
norm_cfg=dict(type='BN3d', requires_grad=True),
act_cfg=dict(type='ReLU', inplace=True),
norm_eval=False,
with_cp=False,
non_local=(0, 0, 0, 0),
non_local_cfg=dict(),
zero_init_residual=True,
**kwargs):
super().__init__()
if depth not in self.arch_settings:
raise KeyError(f'invalid depth {depth} for resnet')
self.depth = depth
self.pretrained = pretrained
self.pretrained2d = pretrained2d
self.in_channels = in_channels
self.base_channels = base_channels
self.num_stages = num_stages
assert 1 <= num_stages <= 4
self.out_indices = out_indices
assert max(out_indices) < num_stages
self.spatial_strides = spatial_strides
self.temporal_strides = temporal_strides
self.dilations = dilations
assert len(spatial_strides) == len(temporal_strides) == len(
dilations) == num_stages
self.conv1_kernel = conv1_kernel
self.conv1_stride_t = conv1_stride_t
self.pool1_stride_t = pool1_stride_t
self.with_pool2 = with_pool2
self.style = style
self.frozen_stages = frozen_stages
self.stage_inflations = _ntuple(num_stages)(inflate)
self.non_local_stages = _ntuple(num_stages)(non_local)
self.inflate_style = inflate_style
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.act_cfg = act_cfg
self.norm_eval = norm_eval
self.with_cp = with_cp
self.zero_init_residual = zero_init_residual
self.block, stage_blocks = self.arch_settings[depth]
self.stage_blocks = stage_blocks[:num_stages]
self.inplanes = self.base_channels
self.non_local_cfg = non_local_cfg
self._make_stem_layer()
self.res_layers = []
for i, num_blocks in enumerate(self.stage_blocks):
spatial_stride = spatial_strides[i]
temporal_stride = temporal_strides[i]
dilation = dilations[i]
planes = self.base_channels * 2**i
res_layer = self.make_res_layer(
self.block,
self.inplanes,
planes,
num_blocks,
spatial_stride=spatial_stride,
temporal_stride=temporal_stride,
dilation=dilation,
style=self.style,
norm_cfg=self.norm_cfg,
conv_cfg=self.conv_cfg,
act_cfg=self.act_cfg,
non_local=self.non_local_stages[i],
non_local_cfg=self.non_local_cfg,
inflate=self.stage_inflations[i],
inflate_style=self.inflate_style,
with_cp=with_cp,
**kwargs)
self.inplanes = planes * self.block.expansion
layer_name = f'layer{i + 1}'
self.add_module(layer_name, res_layer)
self.res_layers.append(layer_name)
self.feat_dim = self.block.expansion * self.base_channels * 2**(
len(self.stage_blocks) - 1)
@staticmethod
def make_res_layer(block,
inplanes,
planes,
blocks,
spatial_stride=1,
temporal_stride=1,
dilation=1,
style='pytorch',
inflate=1,
inflate_style='3x1x1',
non_local=0,
non_local_cfg=dict(),
norm_cfg=None,
act_cfg=None,
conv_cfg=None,
with_cp=False,
**kwargs):
"""Build residual layer for ResNet3D.
Args:
block (nn.Module): Residual module to be built.
inplanes (int): Number of channels for the input feature
in each block.
planes (int): Number of channels for the output feature
in each block.
blocks (int): Number of residual blocks.
spatial_stride (int | Sequence[int]): Spatial strides in
residual and conv layers. Default: 1.
temporal_stride (int | Sequence[int]): Temporal strides in
residual and conv layers. Default: 1.
dilation (int): Spacing between kernel elements. Default: 1.
style (str): ``pytorch`` or ``caffe``. If set to ``pytorch``,
the stride-two layer is the 3x3 conv layer, otherwise
the stride-two layer is the first 1x1 conv layer.
Default: ``pytorch``.
inflate (int | Sequence[int]): Determine whether to inflate
for each block. Default: 1.
inflate_style (str): ``3x1x1`` or ``1x1x1``. which determines
the kernel sizes and padding strides for conv1 and conv2
in each block. Default: '3x1x1'.
non_local (int | Sequence[int]): Determine whether to apply
non-local module in the corresponding block of each stages.
Default: 0.
non_local_cfg (dict): Config for non-local module.
Default: ``dict()``.
conv_cfg (dict | None): Config for norm layers. Default: None.
norm_cfg (dict | None): Config for norm layers. Default: None.
act_cfg (dict | None): Config for activate layers. Default: None.
with_cp (bool | None): Use checkpoint or not. Using checkpoint
will save some memory while slowing down the training speed.
Default: False.
Returns:
nn.Module: A residual layer for the given config.
"""
inflate = inflate if not isinstance(inflate,
int) else (inflate, ) * blocks
non_local = non_local if not isinstance(
non_local, int) else (non_local, ) * blocks
assert len(inflate) == blocks and len(non_local) == blocks
downsample = None
if spatial_stride != 1 or inplanes != planes * block.expansion:
downsample = ConvModule(
inplanes,
planes * block.expansion,
kernel_size=1,
stride=(temporal_stride, spatial_stride, spatial_stride),
bias=False,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=None)
layers = []
layers.append(
block(
inplanes,
planes,
spatial_stride=spatial_stride,
temporal_stride=temporal_stride,
dilation=dilation,
downsample=downsample,
style=style,
inflate=(inflate[0] == 1),
inflate_style=inflate_style,
non_local=(non_local[0] == 1),
non_local_cfg=non_local_cfg,
norm_cfg=norm_cfg,
conv_cfg=conv_cfg,
act_cfg=act_cfg,
with_cp=with_cp,
**kwargs))
inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(
block(
inplanes,
planes,
spatial_stride=1,
temporal_stride=1,
dilation=dilation,
style=style,
inflate=(inflate[i] == 1),
inflate_style=inflate_style,
non_local=(non_local[i] == 1),
non_local_cfg=non_local_cfg,
norm_cfg=norm_cfg,
conv_cfg=conv_cfg,
act_cfg=act_cfg,
with_cp=with_cp,
**kwargs))
return nn.Sequential(*layers)
@staticmethod
def _inflate_conv_params(conv3d, state_dict_2d, module_name_2d,
inflated_param_names):
"""Inflate a conv module from 2d to 3d.
Args:
conv3d (nn.Module): The destination conv3d module.
state_dict_2d (OrderedDict): The state dict of pretrained 2d model.
module_name_2d (str): The name of corresponding conv module in the
2d model.
inflated_param_names (list[str]): List of parameters that have been
inflated.
"""
weight_2d_name = module_name_2d + '.weight'
conv2d_weight = state_dict_2d[weight_2d_name]
kernel_t = conv3d.weight.data.shape[2]
new_weight = conv2d_weight.data.unsqueeze(2).expand_as(
conv3d.weight) / kernel_t
conv3d.weight.data.copy_(new_weight)
inflated_param_names.append(weight_2d_name)
if getattr(conv3d, 'bias') is not None:
bias_2d_name = module_name_2d + '.bias'
conv3d.bias.data.copy_(state_dict_2d[bias_2d_name])
inflated_param_names.append(bias_2d_name)
@staticmethod
def _inflate_bn_params(bn3d, state_dict_2d, module_name_2d,
inflated_param_names):
"""Inflate a norm module from 2d to 3d.
Args:
bn3d (nn.Module): The destination bn3d module.
state_dict_2d (OrderedDict): The state dict of pretrained 2d model.
module_name_2d (str): The name of corresponding bn module in the
2d model.
inflated_param_names (list[str]): List of parameters that have been
inflated.
"""
for param_name, param in bn3d.named_parameters():
param_2d_name = f'{module_name_2d}.{param_name}'
param_2d = state_dict_2d[param_2d_name]
param.data.copy_(param_2d)
inflated_param_names.append(param_2d_name)
for param_name, param in bn3d.named_buffers():
param_2d_name = f'{module_name_2d}.{param_name}'
# some buffers like num_batches_tracked may not exist in old
# checkpoints
if param_2d_name in state_dict_2d:
param_2d = state_dict_2d[param_2d_name]
param.data.copy_(param_2d)
inflated_param_names.append(param_2d_name)
def inflate_weights(self, logger):
"""Inflate the resnet2d parameters to resnet3d.
The differences between resnet3d and resnet2d mainly lie in an extra
axis of conv kernel. To utilize the pretrained parameters in 2d model,
the weight of conv2d models should be inflated to fit in the shapes of
the 3d counterpart.
Args:
logger (logging.Logger): The logger used to print
debugging infomation.
"""
state_dict_r2d = _load_checkpoint(self.pretrained)
if 'state_dict' in state_dict_r2d:
state_dict_r2d = state_dict_r2d['state_dict']
inflated_param_names = []
for name, module in self.named_modules():
if isinstance(module, ConvModule):
# we use a ConvModule to wrap conv+bn+relu layers, thus the
# name mapping is needed
if 'downsample' in name:
# layer{X}.{Y}.downsample.conv->layer{X}.{Y}.downsample.0
original_conv_name = name + '.0'
# layer{X}.{Y}.downsample.bn->layer{X}.{Y}.downsample.1
original_bn_name = name + '.1'
else:
# layer{X}.{Y}.conv{n}.conv->layer{X}.{Y}.conv{n}
original_conv_name = name
# layer{X}.{Y}.conv{n}.bn->layer{X}.{Y}.bn{n}
original_bn_name = name.replace('conv', 'bn')
if original_conv_name + '.weight' not in state_dict_r2d:
logger.warning(f'Module not exist in the state_dict_r2d'
f': {original_conv_name}')
else:
shape_2d = state_dict_r2d[original_conv_name +
'.weight'].shape
shape_3d = module.conv.weight.data.shape
if shape_2d != shape_3d[:2] + shape_3d[3:]:
logger.warning(f'Weight shape mismatch for '
f': {original_conv_name} : '
f'3d weight shape: {shape_3d}; '
f'2d weight shape: {shape_2d}. ')
else:
self._inflate_conv_params(module.conv, state_dict_r2d,
original_conv_name,
inflated_param_names)
if original_bn_name + '.weight' not in state_dict_r2d:
logger.warning(f'Module not exist in the state_dict_r2d'
f': {original_bn_name}')
else:
self._inflate_bn_params(module.bn, state_dict_r2d,
original_bn_name,
inflated_param_names)
# check if any parameters in the 2d checkpoint are not loaded
remaining_names = set(
state_dict_r2d.keys()) - set(inflated_param_names)
if remaining_names:
logger.info(f'These parameters in the 2d checkpoint are not loaded'
f': {remaining_names}')
def _make_stem_layer(self):
"""Construct the stem layers consists of a conv+norm+act module and a
pooling layer."""
self.conv1 = ConvModule(
self.in_channels,
self.base_channels,
kernel_size=self.conv1_kernel,
stride=(self.conv1_stride_t, 2, 2),
padding=tuple([(k - 1) // 2 for k in _triple(self.conv1_kernel)]),
bias=False,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
self.maxpool = nn.MaxPool3d(
kernel_size=(1, 3, 3),
stride=(self.pool1_stride_t, 2, 2),
padding=(0, 1, 1))
self.pool2 = nn.MaxPool3d(kernel_size=(2, 1, 1), stride=(2, 1, 1))
def _freeze_stages(self):
"""Prevent all the parameters from being optimized before
``self.frozen_stages``."""
if self.frozen_stages >= 0:
self.conv1.eval()
for param in self.conv1.parameters():
param.requires_grad = False
for i in range(1, self.frozen_stages + 1):
m = getattr(self, f'layer{i}')
m.eval()
for param in m.parameters():
param.requires_grad = False
def init_weights(self):
"""Initiate the parameters either from existing checkpoint or from
scratch."""
if isinstance(self.pretrained, str):
logger = get_root_logger()
logger.info(f'load model from: {self.pretrained}')
if self.pretrained2d:
# Inflate 2D model into 3D model.
self.inflate_weights(logger)
else:
# Directly load 3D model.
load_checkpoint(
self, self.pretrained, strict=False, logger=logger)
elif self.pretrained is None:
for m in self.modules():
if isinstance(m, nn.Conv3d):
kaiming_init(m)
elif isinstance(m, _BatchNorm):
constant_init(m, 1)
if self.zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck3d):
constant_init(m.conv3.bn, 0)
elif isinstance(m, BasicBlock3d):
constant_init(m.conv2.bn, 0)
else:
raise TypeError('pretrained must be a str or None')
def forward(self, x):
"""Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data.
Returns:
torch.Tensor: The feature of the input
samples extracted by the backbone.
"""
x = self.conv1(x)
x = self.maxpool(x)
outs = []
for i, layer_name in enumerate(self.res_layers):
res_layer = getattr(self, layer_name)
x = res_layer(x)
if i == 0 and self.with_pool2:
x = self.pool2(x)
if i in self.out_indices:
outs.append(x)
if len(outs) == 1:
return outs[0]
return tuple(outs)
def train(self, mode=True):
"""Set the optimization status when training."""
super().train(mode)
self._freeze_stages()
if mode and self.norm_eval:
for m in self.modules():
if isinstance(m, _BatchNorm):
m.eval()
| InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/models/backbones/resnet3d.py |
from functools import partial
import imp
from tkinter.messagebox import NO
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from timm.models.layers import drop_path, to_2tuple, trunc_normal_
from timm.models.registry import register_model
from mmcv.runner import load_checkpoint,_load_checkpoint,load_state_dict
from ..builder import BACKBONES
from ...utils import get_root_logger
import pdb
from torch.autograd import Variable
try:
from mmdet.models import BACKBONES as MMDET_BACKBONES
mmdet_imported = True
except (ImportError, ModuleNotFoundError):
mmdet_imported = False
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 400, 'input_size': (3, 224, 224), 'pool_size': None,
'crop_pct': .9, 'interpolation': 'bicubic',
'mean': (0.5, 0.5, 0.5), 'std': (0.5, 0.5, 0.5),
**kwargs
}
class DropPath(nn.Module):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
"""
def __init__(self, drop_prob=None):
super(DropPath, self).__init__()
self.drop_prob = drop_prob
def forward(self, x):
return drop_path(x, self.drop_prob, self.training)
def extra_repr(self) -> str:
return 'p={}'.format(self.drop_prob)
class Mlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
# x = self.drop(x)
# commit this for the orignal BERT implement
x = self.fc2(x)
x = self.drop(x)
return x
class Attention(nn.Module):
def __init__(
self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0.,
proj_drop=0., attn_head_dim=None):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
if attn_head_dim is not None:
head_dim = attn_head_dim
all_head_dim = head_dim * self.num_heads
self.scale = qk_scale or head_dim ** -0.5
self.qkv = nn.Linear(dim, all_head_dim * 3, bias=False)
if qkv_bias:
self.q_bias = nn.Parameter(torch.zeros(all_head_dim))
self.v_bias = nn.Parameter(torch.zeros(all_head_dim))
else:
self.q_bias = None
self.v_bias = None
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(all_head_dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x):
B, N, C = x.shape
qkv_bias = None
if self.q_bias is not None:
qkv_bias = torch.cat((self.q_bias, torch.zeros_like(self.v_bias, requires_grad=False), self.v_bias))
# qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
qkv = F.linear(input=x, weight=self.qkv.weight, bias=qkv_bias)
qkv = qkv.reshape(B, N, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)
q = q * self.scale
attn = (q @ k.transpose(-2, -1))
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, -1)
x = self.proj(x)
x = self.proj_drop(x)
return x
## encoder 和 decoder中的基本块
class Block(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., init_values=None, act_layer=nn.GELU, norm_layer=nn.LayerNorm,
attn_head_dim=None):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = Attention(
dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale,
attn_drop=attn_drop, proj_drop=drop, attn_head_dim=attn_head_dim)
# NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
if init_values > 0:
self.gamma_1 = nn.Parameter(init_values * torch.ones((dim)),requires_grad=True)
self.gamma_2 = nn.Parameter(init_values * torch.ones((dim)),requires_grad=True)
else:
self.gamma_1, self.gamma_2 = None, None
def forward(self, x):
if self.gamma_1 is None:
x = x + self.drop_path(self.attn(self.norm1(x)))
x = x + self.drop_path(self.mlp(self.norm2(x)))
else:
x = x + self.drop_path(self.gamma_1 * self.attn(self.norm1(x)))
x = x + self.drop_path(self.gamma_2 * self.mlp(self.norm2(x)))
return x
### 将图像打成patch
class PatchEmbed(nn.Module):
""" Image to Patch Embedding
"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768, num_frames=16, tubelet_size=2):
super().__init__()
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
self.tubelet_size = int(tubelet_size)
num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // patch_size[0]) * (num_frames // self.tubelet_size)
self.img_size = img_size
self.patch_size = patch_size
self.num_patches = num_patches
self.proj = nn.Conv3d(in_channels=in_chans, out_channels=embed_dim,
kernel_size = (self.tubelet_size, patch_size[0],patch_size[1]),
stride=(self.tubelet_size, patch_size[0], patch_size[1]))
def forward(self, x, **kwargs):
B, C, T, H, W = x.shape
# FIXME look at relaxing size constraints
assert H == self.img_size[0] and W == self.img_size[1], \
f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})."
x = self.proj(x).flatten(2).transpose(1, 2)
return x
# sin-cos position encoding
# https://github.com/jadore801120/attention-is-all-you-need-pytorch/blob/master/transformer/Models.py#L31
def get_sinusoid_encoding_table(n_position, d_hid):
''' Sinusoid position encoding table '''
# TODO: make it with torch instead of numpy
def get_position_angle_vec(position):
return [position / np.power(10000, 2 * (hid_j // 2) / d_hid) for hid_j in range(d_hid)]
sinusoid_table = np.array([get_position_angle_vec(pos_i) for pos_i in range(n_position)])
sinusoid_table[:, 0::2] = np.sin(sinusoid_table[:, 0::2]) # dim 2i
sinusoid_table[:, 1::2] = np.cos(sinusoid_table[:, 1::2]) # dim 2i+1
return torch.FloatTensor(sinusoid_table).unsqueeze(0)
@BACKBONES.register_module()
class VisionTransformer3D(nn.Module):
""" Vision Transformer with support for patch or hybrid CNN input stage
"""
def __init__(self,
img_size=224,
patch_size=16,
in_chans=3,
num_classes=1000,
embed_dim=768,
depth=12,
num_heads=12,
mlp_ratio=4.,
qkv_bias=False,
qk_scale=None,
drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.,
norm_layer=partial(nn.LayerNorm, eps=1e-6),#nn.LayerNorm,
init_values=0.,
use_learnable_pos_emb=False,
init_scale=0.,
all_frames=32,
tubelet_size=2,
use_mean_pooling=True,
feature_reshape = False,
pretrained=None):
super().__init__()
self.pretrained = pretrained
self.num_classes = num_classes
self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models
self.tubelet_size = tubelet_size
self.patch_embed = PatchEmbed(
img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim, num_frames=all_frames, tubelet_size=self.tubelet_size)
num_patches = self.patch_embed.num_patches
if use_learnable_pos_emb:
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches, embed_dim))
else:
# sine-cosine positional embeddings is on the way
self.pos_embed = get_sinusoid_encoding_table(num_patches, embed_dim)
self.pos_drop = nn.Dropout(p=drop_rate)
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
self.blocks = nn.ModuleList([
Block(
dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer,
init_values=init_values)
for i in range(depth)])
self.norm = nn.Identity() if use_mean_pooling else norm_layer(embed_dim)
self.fc_norm = norm_layer(embed_dim) if use_mean_pooling else None
self.feature_reshape = feature_reshape
if use_learnable_pos_emb:
trunc_normal_(self.pos_embed, std=.02)
self.init_weights()
def init_weights(self,pretrained=None):
print("Steped into init_weights")
if pretrained:
self.pretrained = pretrained
if isinstance(self.pretrained,str):
self.apply(self._init_weights)
logger = get_root_logger()
logger.info(f'load model from: {self.pretrained}')
state_dict = _load_checkpoint(self.pretrained)
state_dict = state_dict['module']
load_state_dict(self, state_dict, strict=False, logger=logger)
elif self.pretrained is None:
self.apply(self._init_weights)
else:
raise TypeError('pretrained must be a str or None')
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def get_num_layers(self):
return len(self.blocks)
@torch.jit.ignore
def no_weight_decay(self):
return {'pos_embed', 'cls_token'}
def get_classifier(self):
self.num_classes = num_classes
self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()
def forward_features(self, x):
x = self.patch_embed(x)
B, _, _ = x.size()
if self.pos_embed is not None:
#pdb.set_trace()
x = x + self.pos_embed.expand(B, -1, -1).type_as(x).to(x.device).clone().detach()
x = self.pos_drop(x)
for blk in self.blocks:
x = blk(x)
x = self.norm(x)
#pdb.set_trace()
if self.feature_reshape:
x = x.reshape(-1,8,14,14,self.embed_dim)
x = x.permute(0,4,1,2,3)
return x
if self.fc_norm is not None:
return self.fc_norm(x.mean(1))
else:
return x[:, 0]
def forward(self, x):
x = self.forward_features(x)
return x
@register_model
def vit_small_patch16_224(pretrained=False, **kwargs):
model = VisionTransformer3D(
patch_size=16, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
model.default_cfg = _cfg()
return model
@register_model
def vit_base_patch16_224(pretrained=False, **kwargs):
model = VisionTransformer3D(
patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
model.default_cfg = _cfg()
return model
@register_model
def vit_base_patch16_384(pretrained=False, **kwargs):
model = VisionTransformer3D(
img_size=384, patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
model.default_cfg = _cfg()
return model
@register_model
def vit_large_patch16_224(pretrained=False, **kwargs):
model = VisionTransformer3D(
patch_size=16, embed_dim=1024, depth=24, num_heads=16, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
model.default_cfg = _cfg()
return model
@register_model
def vit_large_patch16_384(pretrained=False, **kwargs):
model = VisionTransformer3D(
img_size=384, patch_size=16, embed_dim=1024, depth=24, num_heads=16, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
model.default_cfg = _cfg()
return model
@register_model
def vit_large_patch16_512(pretrained=False, **kwargs):
model = VisionTransformer3D(
img_size=512, patch_size=16, embed_dim=1024, depth=24, num_heads=16, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
model.default_cfg = _cfg()
return model
if mmdet_imported:
MMDET_BACKBONES.register_module()(VisionTransformer3D)
| InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/models/backbones/ViT3D.py |
import math
import torch.nn as nn
import torch.utils.checkpoint as cp
from mmcv.cnn import (ConvModule, Swish, build_activation_layer, constant_init,
kaiming_init)
from mmcv.runner import load_checkpoint
from mmcv.utils import _BatchNorm
from ...utils import get_root_logger
from ..registry import BACKBONES
class SEModule(nn.Module):
def __init__(self, channels, reduction):
super().__init__()
self.avg_pool = nn.AdaptiveAvgPool3d(1)
self.bottleneck = self._round_width(channels, reduction)
self.fc1 = nn.Conv3d(
channels, self.bottleneck, kernel_size=1, padding=0)
self.relu = nn.ReLU()
self.fc2 = nn.Conv3d(
self.bottleneck, channels, kernel_size=1, padding=0)
self.sigmoid = nn.Sigmoid()
@staticmethod
def _round_width(width, multiplier, min_width=8, divisor=8):
width *= multiplier
min_width = min_width or divisor
width_out = max(min_width,
int(width + divisor / 2) // divisor * divisor)
if width_out < 0.9 * width:
width_out += divisor
return int(width_out)
def forward(self, x):
module_input = x
x = self.avg_pool(x)
x = self.fc1(x)
x = self.relu(x)
x = self.fc2(x)
x = self.sigmoid(x)
return module_input * x
class BlockX3D(nn.Module):
"""BlockX3D 3d building block for X3D.
Args:
inplanes (int): Number of channels for the input in first conv3d layer.
planes (int): Number of channels produced by some norm/conv3d layers.
outplanes (int): Number of channels produced by final the conv3d layer.
spatial_stride (int): Spatial stride in the conv3d layer. Default: 1.
downsample (nn.Module | None): Downsample layer. Default: None.
se_ratio (float | None): The reduction ratio of squeeze and excitation
unit. If set as None, it means not using SE unit. Default: None.
use_swish (bool): Whether to use swish as the activation function
before and after the 3x3x3 conv. Default: True.
conv_cfg (dict): Config dict for convolution layer.
Default: ``dict(type='Conv3d')``.
norm_cfg (dict): Config for norm layers. required keys are ``type``,
Default: ``dict(type='BN3d')``.
act_cfg (dict): Config dict for activation layer.
Default: ``dict(type='ReLU')``.
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed. Default: False.
"""
def __init__(self,
inplanes,
planes,
outplanes,
spatial_stride=1,
downsample=None,
se_ratio=None,
use_swish=True,
conv_cfg=dict(type='Conv3d'),
norm_cfg=dict(type='BN3d'),
act_cfg=dict(type='ReLU'),
with_cp=False):
super().__init__()
self.inplanes = inplanes
self.planes = planes
self.outplanes = outplanes
self.spatial_stride = spatial_stride
self.downsample = downsample
self.se_ratio = se_ratio
self.use_swish = use_swish
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.act_cfg = act_cfg
self.act_cfg_swish = dict(type='Swish')
self.with_cp = with_cp
self.conv1 = ConvModule(
in_channels=inplanes,
out_channels=planes,
kernel_size=1,
stride=1,
padding=0,
bias=False,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
# Here we use the channel-wise conv
self.conv2 = ConvModule(
in_channels=planes,
out_channels=planes,
kernel_size=3,
stride=(1, self.spatial_stride, self.spatial_stride),
padding=1,
groups=planes,
bias=False,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=None)
self.swish = Swish()
self.conv3 = ConvModule(
in_channels=planes,
out_channels=outplanes,
kernel_size=1,
stride=1,
padding=0,
bias=False,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=None)
if self.se_ratio is not None:
self.se_module = SEModule(planes, self.se_ratio)
self.relu = build_activation_layer(self.act_cfg)
def forward(self, x):
"""Defines the computation performed at every call."""
def _inner_forward(x):
"""Forward wrapper for utilizing checkpoint."""
identity = x
out = self.conv1(x)
out = self.conv2(out)
if self.se_ratio is not None:
out = self.se_module(out)
out = self.swish(out)
out = self.conv3(out)
if self.downsample is not None:
identity = self.downsample(x)
out = out + identity
return out
if self.with_cp and x.requires_grad:
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
out = self.relu(out)
return out
# We do not support initialize with 2D pretrain weight for X3D
@BACKBONES.register_module()
class X3D(nn.Module):
"""X3D backbone. https://arxiv.org/pdf/2004.04730.pdf.
Args:
gamma_w (float): Global channel width expansion factor. Default: 1.
gamma_b (float): Bottleneck channel width expansion factor. Default: 1.
gamma_d (float): Network depth expansion factor. Default: 1.
pretrained (str | None): Name of pretrained model. Default: None.
in_channels (int): Channel num of input features. Default: 3.
num_stages (int): Resnet stages. Default: 4.
spatial_strides (Sequence[int]):
Spatial strides of residual blocks of each stage.
Default: ``(1, 2, 2, 2)``.
frozen_stages (int): Stages to be frozen (all param fixed). If set to
-1, it means not freezing any parameters. Default: -1.
se_style (str): The style of inserting SE modules into BlockX3D, 'half'
denotes insert into half of the blocks, while 'all' denotes insert
into all blocks. Default: 'half'.
se_ratio (float | None): The reduction ratio of squeeze and excitation
unit. If set as None, it means not using SE unit. Default: 1 / 16.
use_swish (bool): Whether to use swish as the activation function
before and after the 3x3x3 conv. Default: True.
conv_cfg (dict): Config for conv layers. required keys are ``type``
Default: ``dict(type='Conv3d')``.
norm_cfg (dict): Config for norm layers. required keys are ``type`` and
``requires_grad``.
Default: ``dict(type='BN3d', requires_grad=True)``.
act_cfg (dict): Config dict for activation layer.
Default: ``dict(type='ReLU', inplace=True)``.
norm_eval (bool): Whether to set BN layers to eval mode, namely, freeze
running stats (mean and var). Default: False.
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed. Default: False.
zero_init_residual (bool):
Whether to use zero initialization for residual block,
Default: True.
kwargs (dict, optional): Key arguments for "make_res_layer".
"""
def __init__(self,
gamma_w=1.0,
gamma_b=1.0,
gamma_d=1.0,
pretrained=None,
in_channels=3,
num_stages=4,
spatial_strides=(2, 2, 2, 2),
frozen_stages=-1,
se_style='half',
se_ratio=1 / 16,
use_swish=True,
conv_cfg=dict(type='Conv3d'),
norm_cfg=dict(type='BN3d', requires_grad=True),
act_cfg=dict(type='ReLU', inplace=True),
norm_eval=False,
with_cp=False,
zero_init_residual=True,
**kwargs):
super().__init__()
self.gamma_w = gamma_w
self.gamma_b = gamma_b
self.gamma_d = gamma_d
self.pretrained = pretrained
self.in_channels = in_channels
# Hard coded, can be changed by gamma_w
self.base_channels = 24
self.stage_blocks = [1, 2, 5, 3]
# apply parameters gamma_w and gamma_d
self.base_channels = self._round_width(self.base_channels,
self.gamma_w)
self.stage_blocks = [
self._round_repeats(x, self.gamma_d) for x in self.stage_blocks
]
self.num_stages = num_stages
assert 1 <= num_stages <= 4
self.spatial_strides = spatial_strides
assert len(spatial_strides) == num_stages
self.frozen_stages = frozen_stages
self.se_style = se_style
assert self.se_style in ['all', 'half']
self.se_ratio = se_ratio
assert (self.se_ratio is None) or (self.se_ratio > 0)
self.use_swish = use_swish
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.act_cfg = act_cfg
self.norm_eval = norm_eval
self.with_cp = with_cp
self.zero_init_residual = zero_init_residual
self.block = BlockX3D
self.stage_blocks = self.stage_blocks[:num_stages]
self.layer_inplanes = self.base_channels
self._make_stem_layer()
self.res_layers = []
for i, num_blocks in enumerate(self.stage_blocks):
spatial_stride = spatial_strides[i]
inplanes = self.base_channels * 2**i
planes = int(inplanes * self.gamma_b)
res_layer = self.make_res_layer(
self.block,
self.layer_inplanes,
inplanes,
planes,
num_blocks,
spatial_stride=spatial_stride,
se_style=self.se_style,
se_ratio=self.se_ratio,
use_swish=self.use_swish,
norm_cfg=self.norm_cfg,
conv_cfg=self.conv_cfg,
act_cfg=self.act_cfg,
with_cp=with_cp,
**kwargs)
self.layer_inplanes = inplanes
layer_name = f'layer{i + 1}'
self.add_module(layer_name, res_layer)
self.res_layers.append(layer_name)
self.feat_dim = self.base_channels * 2**(len(self.stage_blocks) - 1)
self.conv5 = ConvModule(
self.feat_dim,
int(self.feat_dim * self.gamma_b),
kernel_size=1,
stride=1,
padding=0,
bias=False,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
self.feat_dim = int(self.feat_dim * self.gamma_b)
@staticmethod
def _round_width(width, multiplier, min_depth=8, divisor=8):
"""Round width of filters based on width multiplier."""
if not multiplier:
return width
width *= multiplier
min_depth = min_depth or divisor
new_filters = max(min_depth,
int(width + divisor / 2) // divisor * divisor)
if new_filters < 0.9 * width:
new_filters += divisor
return int(new_filters)
@staticmethod
def _round_repeats(repeats, multiplier):
"""Round number of layers based on depth multiplier."""
if not multiplier:
return repeats
return int(math.ceil(multiplier * repeats))
# the module is parameterized with gamma_b
# no temporal_stride
def make_res_layer(self,
block,
layer_inplanes,
inplanes,
planes,
blocks,
spatial_stride=1,
se_style='half',
se_ratio=None,
use_swish=True,
norm_cfg=None,
act_cfg=None,
conv_cfg=None,
with_cp=False,
**kwargs):
"""Build residual layer for ResNet3D.
Args:
block (nn.Module): Residual module to be built.
layer_inplanes (int): Number of channels for the input feature
of the res layer.
inplanes (int): Number of channels for the input feature in each
block, which equals to base_channels * gamma_w.
planes (int): Number of channels for the output feature in each
block, which equals to base_channel * gamma_w * gamma_b.
blocks (int): Number of residual blocks.
spatial_stride (int): Spatial strides in residual and conv layers.
Default: 1.
se_style (str): The style of inserting SE modules into BlockX3D,
'half' denotes insert into half of the blocks, while 'all'
denotes insert into all blocks. Default: 'half'.
se_ratio (float | None): The reduction ratio of squeeze and
excitation unit. If set as None, it means not using SE unit.
Default: None.
use_swish (bool): Whether to use swish as the activation function
before and after the 3x3x3 conv. Default: True.
conv_cfg (dict | None): Config for norm layers. Default: None.
norm_cfg (dict | None): Config for norm layers. Default: None.
act_cfg (dict | None): Config for activate layers. Default: None.
with_cp (bool | None): Use checkpoint or not. Using checkpoint
will save some memory while slowing down the training speed.
Default: False.
Returns:
nn.Module: A residual layer for the given config.
"""
downsample = None
if spatial_stride != 1 or layer_inplanes != inplanes:
downsample = ConvModule(
layer_inplanes,
inplanes,
kernel_size=1,
stride=(1, spatial_stride, spatial_stride),
padding=0,
bias=False,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=None)
use_se = [False] * blocks
if self.se_style == 'all':
use_se = [True] * blocks
elif self.se_style == 'half':
use_se = [i % 2 == 0 for i in range(blocks)]
else:
raise NotImplementedError
layers = []
layers.append(
block(
layer_inplanes,
planes,
inplanes,
spatial_stride=spatial_stride,
downsample=downsample,
se_ratio=se_ratio if use_se[0] else None,
use_swish=use_swish,
norm_cfg=norm_cfg,
conv_cfg=conv_cfg,
act_cfg=act_cfg,
with_cp=with_cp,
**kwargs))
for i in range(1, blocks):
layers.append(
block(
inplanes,
planes,
inplanes,
spatial_stride=1,
se_ratio=se_ratio if use_se[i] else None,
use_swish=use_swish,
norm_cfg=norm_cfg,
conv_cfg=conv_cfg,
act_cfg=act_cfg,
with_cp=with_cp,
**kwargs))
return nn.Sequential(*layers)
def _make_stem_layer(self):
"""Construct the stem layers consists of a conv+norm+act module and a
pooling layer."""
self.conv1_s = ConvModule(
self.in_channels,
self.base_channels,
kernel_size=(1, 3, 3),
stride=(1, 2, 2),
padding=(0, 1, 1),
bias=False,
conv_cfg=self.conv_cfg,
norm_cfg=None,
act_cfg=None)
self.conv1_t = ConvModule(
self.base_channels,
self.base_channels,
kernel_size=(5, 1, 1),
stride=(1, 1, 1),
padding=(2, 0, 0),
groups=self.base_channels,
bias=False,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
def _freeze_stages(self):
"""Prevent all the parameters from being optimized before
``self.frozen_stages``."""
if self.frozen_stages >= 0:
self.conv1_s.eval()
self.conv1_t.eval()
for param in self.conv1_s.parameters():
param.requires_grad = False
for param in self.conv1_t.parameters():
param.requires_grad = False
for i in range(1, self.frozen_stages + 1):
m = getattr(self, f'layer{i}')
m.eval()
for param in m.parameters():
param.requires_grad = False
def init_weights(self):
"""Initiate the parameters either from existing checkpoint or from
scratch."""
if isinstance(self.pretrained, str):
logger = get_root_logger()
logger.info(f'load model from: {self.pretrained}')
load_checkpoint(self, self.pretrained, strict=False, logger=logger)
elif self.pretrained is None:
for m in self.modules():
if isinstance(m, nn.Conv3d):
kaiming_init(m)
elif isinstance(m, _BatchNorm):
constant_init(m, 1)
if self.zero_init_residual:
for m in self.modules():
if isinstance(m, BlockX3D):
constant_init(m.conv3.bn, 0)
else:
raise TypeError('pretrained must be a str or None')
def forward(self, x):
"""Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data.
Returns:
torch.Tensor: The feature of the input
samples extracted by the backbone.
"""
x = self.conv1_s(x)
x = self.conv1_t(x)
for layer_name in self.res_layers:
res_layer = getattr(self, layer_name)
x = res_layer(x)
x = self.conv5(x)
return x
def train(self, mode=True):
"""Set the optimization status when training."""
super().train(mode)
self._freeze_stages()
if mode and self.norm_eval:
for m in self.modules():
if isinstance(m, _BatchNorm):
m.eval()
| InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/models/backbones/x3d.py |
import torch.nn as nn
from mmcv.cnn import ConvModule
from mmcv.utils import _BatchNorm
from ..registry import BACKBONES
from .resnet3d import Bottleneck3d, ResNet3d
class CSNBottleneck3d(Bottleneck3d):
"""Channel-Separated Bottleneck Block.
This module is proposed in
"Video Classification with Channel-Separated Convolutional Networks"
Link: https://arxiv.org/pdf/1711.11248.pdf
Args:
inplanes (int): Number of channels for the input in first conv3d layer.
planes (int): Number of channels produced by some norm/conv3d layers.
bottleneck_mode (str): Determine which ways to factorize a 3D
bottleneck block using channel-separated convolutional networks.
If set to 'ip', it will replace the 3x3x3 conv2 layer with a
1x1x1 traditional convolution and a 3x3x3 depthwise
convolution, i.e., Interaction-preserved channel-separated
bottleneck block.
If set to 'ir', it will replace the 3x3x3 conv2 layer with a
3x3x3 depthwise convolution, which is derived from preserved
bottleneck block by removing the extra 1x1x1 convolution,
i.e., Interaction-reduced channel-separated bottleneck block.
Default: 'ir'.
args (position arguments): Position arguments for Bottleneck.
kwargs (dict, optional): Keyword arguments for Bottleneck.
"""
def __init__(self,
inplanes,
planes,
*args,
bottleneck_mode='ir',
**kwargs):
super(CSNBottleneck3d, self).__init__(inplanes, planes, *args,
**kwargs)
self.bottleneck_mode = bottleneck_mode
conv2 = []
if self.bottleneck_mode == 'ip':
conv2.append(
nn.Conv3d(planes, planes, kernel_size=1, stride=1, bias=False))
conv2_kernel_size = self.conv2.conv.kernel_size
conv2_stride = self.conv2.conv.stride
conv2_padding = self.conv2.conv.padding
conv2_dilation = self.conv2.conv.dilation
conv2_bias = bool(self.conv2.conv.bias)
self.conv2 = ConvModule(
planes,
planes,
conv2_kernel_size,
stride=conv2_stride,
padding=conv2_padding,
dilation=conv2_dilation,
bias=conv2_bias,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg,
groups=planes)
conv2.append(self.conv2)
self.conv2 = nn.Sequential(*conv2)
@BACKBONES.register_module()
class ResNet3dCSN(ResNet3d):
"""ResNet backbone for CSN.
Args:
depth (int): Depth of ResNetCSN, from {18, 34, 50, 101, 152}.
pretrained (str | None): Name of pretrained model.
temporal_strides (tuple[int]):
Temporal strides of residual blocks of each stage.
Default: (1, 2, 2, 2).
conv1_kernel (tuple[int]): Kernel size of the first conv layer.
Default: (3, 7, 7).
conv1_stride_t (int): Temporal stride of the first conv layer.
Default: 1.
pool1_stride_t (int): Temporal stride of the first pooling layer.
Default: 1.
norm_cfg (dict): Config for norm layers. required keys are `type` and
`requires_grad`.
Default: dict(type='BN3d', requires_grad=True, eps=1e-3).
inflate_style (str): `3x1x1` or `1x1x1`. which determines the kernel
sizes and padding strides for conv1 and conv2 in each block.
Default: '3x3x3'.
bottleneck_mode (str): Determine which ways to factorize a 3D
bottleneck block using channel-separated convolutional networks.
If set to 'ip', it will replace the 3x3x3 conv2 layer with a
1x1x1 traditional convolution and a 3x3x3 depthwise
convolution, i.e., Interaction-preserved channel-separated
bottleneck block.
If set to 'ir', it will replace the 3x3x3 conv2 layer with a
3x3x3 depthwise convolution, which is derived from preserved
bottleneck block by removing the extra 1x1x1 convolution,
i.e., Interaction-reduced channel-separated bottleneck block.
Default: 'ip'.
kwargs (dict, optional): Key arguments for "make_res_layer".
"""
def __init__(self,
depth,
pretrained,
temporal_strides=(1, 2, 2, 2),
conv1_kernel=(3, 7, 7),
conv1_stride_t=1,
pool1_stride_t=1,
norm_cfg=dict(type='BN3d', requires_grad=True, eps=1e-3),
inflate_style='3x3x3',
bottleneck_mode='ir',
bn_frozen=False,
**kwargs):
self.arch_settings = {
# 18: (BasicBlock3d, (2, 2, 2, 2)),
# 34: (BasicBlock3d, (3, 4, 6, 3)),
50: (CSNBottleneck3d, (3, 4, 6, 3)),
101: (CSNBottleneck3d, (3, 4, 23, 3)),
152: (CSNBottleneck3d, (3, 8, 36, 3))
}
self.bn_frozen = bn_frozen
if bottleneck_mode not in ['ip', 'ir']:
raise ValueError(f'Bottleneck mode must be "ip" or "ir",'
f'but got {bottleneck_mode}.')
super(ResNet3dCSN, self).__init__(
depth,
pretrained,
temporal_strides=temporal_strides,
conv1_kernel=conv1_kernel,
conv1_stride_t=conv1_stride_t,
pool1_stride_t=pool1_stride_t,
norm_cfg=norm_cfg,
inflate_style=inflate_style,
bottleneck_mode=bottleneck_mode,
**kwargs)
def train(self, mode=True):
super(ResNet3d, self).train(mode)
self._freeze_stages()
if mode and self.norm_eval:
for m in self.modules():
if isinstance(m, _BatchNorm):
m.eval()
if self.bn_frozen:
for param in m.parameters():
param.requires_grad = False
| InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/models/backbones/resnet3d_csn.py |
from .c3d import C3D
from .resnet import ResNet
from .resnet2plus1d import ResNet2Plus1d
from .resnet3d import ResNet3d
from .resnet3d_csn import ResNet3dCSN
from .resnet3d_slowfast import ResNet3dSlowFast
from .resnet3d_slowonly import ResNet3dSlowOnly
from .resnet_audio import ResNetAudio
from .resnet_tin import ResNetTIN
from .resnet_tsm import ResNetTSM
from .x3d import X3D
from .ViT3D import VisionTransformer3D
__all__ = [
'C3D', 'ResNet', 'ResNet3d', 'ResNetTSM', 'ResNet2Plus1d',
'ResNet3dSlowFast', 'ResNet3dSlowOnly', 'ResNet3dCSN', 'ResNetTIN', 'X3D',
'ResNetAudio', 'VisionTransformer3D'
]
| InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/models/backbones/__init__.py |
import warnings
import torch
import torch.nn as nn
from ..registry import BACKBONES
from .resnet_tsm import ResNetTSM
try:
from mmcv.ops import tin_shift
except (ImportError, ModuleNotFoundError):
warnings.warn('Please install mmcv-full to support "tin_shift"')
def linear_sampler(data, offset):
"""Differentiable Temporal-wise Frame Sampling, which is essentially a
linear interpolation process.
It gets the feature map which has been split into several groups
and shift them by different offsets according to their groups.
Then compute the weighted sum along with the temporal dimension.
Args:
data (torch.Tensor): Split data for certain group in shape
[N, num_segments, C, H, W].
offset (torch.Tensor): Data offsets for this group data in shape
[N, num_segments].
"""
# [N, num_segments, C, H, W]
n, t, c, h, w = data.shape
# offset0, offset1: [N, num_segments]
offset0 = torch.floor(offset).int()
offset1 = offset0 + 1
# data, data0, data1: [N, num_segments, C, H * W]
data = data.view(n, t, c, h * w).contiguous()
data0 = tin_shift(data, offset0)
data1 = tin_shift(data, offset1)
# weight0, weight1: [N, num_segments]
weight0 = 1 - (offset - offset0.float())
weight1 = 1 - weight0
# weight0, weight1:
# [N, num_segments] -> [N, num_segments, C // num_segments] -> [N, C]
group_size = offset.shape[1]
weight0 = weight0[:, :, None].repeat(1, 1, c // group_size)
weight0 = weight0.view(weight0.size(0), -1)
weight1 = weight1[:, :, None].repeat(1, 1, c // group_size)
weight1 = weight1.view(weight1.size(0), -1)
# weight0, weight1: [N, C] -> [N, 1, C, 1]
weight0 = weight0[:, None, :, None]
weight1 = weight1[:, None, :, None]
# output: [N, num_segments, C, H * W] -> [N, num_segments, C, H, W]
output = weight0 * data0 + weight1 * data1
output = output.view(n, t, c, h, w)
return output
class CombineNet(nn.Module):
"""Combine Net.
It combines Temporal interlace module with some part of ResNet layer.
Args:
net1 (nn.module): Temporal interlace module.
net2 (nn.module): Some part of ResNet layer.
"""
def __init__(self, net1, net2):
super().__init__()
self.net1 = net1
self.net2 = net2
def forward(self, x):
"""Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data.
Returns:
torch.Tensor: The output of the module.
"""
# input shape: [num_batches * num_segments, C, H, W]
# output x shape: [num_batches * num_segments, C, H, W]
x = self.net1(x)
# [num_batches * num_segments, C, H, W]
x = self.net2(x)
return x
class WeightNet(nn.Module):
"""WeightNet in Temporal interlace module.
The WeightNet consists of two parts: one convolution layer
and a sigmoid function. Following the convolution layer, the sigmoid
function and rescale module can scale our output to the range (0, 2).
Here we set the initial bias of the convolution layer to 0, and the
final initial output will be 1.0.
Args:
in_channels (int): Channel num of input features.
groups (int): Number of groups for fc layer outputs.
"""
def __init__(self, in_channels, groups):
super().__init__()
self.sigmoid = nn.Sigmoid()
self.groups = groups
self.conv = nn.Conv1d(in_channels, groups, 3, padding=1)
self.init_weights()
def init_weights(self):
"""Initiate the parameters either from existing checkpoint or from
scratch."""
# we set the initial bias of the convolution
# layer to 0, and the final initial output will be 1.0
self.conv.bias.data[...] = 0
def forward(self, x):
"""Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data.
Returns:
torch.Tensor: The output of the module.
"""
# calculate weight
# [N, C, T]
n, _, t = x.shape
# [N, groups, T]
x = self.conv(x)
x = x.view(n, self.groups, t)
# [N, T, groups]
x = x.permute(0, 2, 1)
# scale the output to range (0, 2)
x = 2 * self.sigmoid(x)
# [N, T, groups]
return x
class OffsetNet(nn.Module):
"""OffsetNet in Temporal interlace module.
The OffsetNet consists of one convolution layer and two fc layers
with a relu activation following with a sigmoid function. Following
the convolution layer, two fc layers and relu are applied to the output.
Then, apply the sigmoid function with a multiply factor and a minus 0.5
to transform the output to (-4, 4).
Args:
in_channels (int): Channel num of input features.
groups (int): Number of groups for fc layer outputs.
num_segments (int): Number of frame segments.
"""
def __init__(self, in_channels, groups, num_segments):
super().__init__()
self.sigmoid = nn.Sigmoid()
# hard code ``kernel_size`` and ``padding`` according to original repo.
kernel_size = 3
padding = 1
self.conv = nn.Conv1d(in_channels, 1, kernel_size, padding=padding)
self.fc1 = nn.Linear(num_segments, num_segments)
self.relu = nn.ReLU()
self.fc2 = nn.Linear(num_segments, groups)
self.init_weights()
def init_weights(self):
"""Initiate the parameters either from existing checkpoint or from
scratch."""
# The bias of the last fc layer is initialized to
# make the post-sigmoid output start from 1
self.fc2.bias.data[...] = 0.5108
def forward(self, x):
"""Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data.
Returns:
torch.Tensor: The output of the module.
"""
# calculate offset
# [N, C, T]
n, _, t = x.shape
# [N, 1, T]
x = self.conv(x)
# [N, T]
x = x.view(n, t)
# [N, T]
x = self.relu(self.fc1(x))
# [N, groups]
x = self.fc2(x)
# [N, 1, groups]
x = x.view(n, 1, -1)
# to make sure the output is in (-t/2, t/2)
# where t = num_segments = 8
x = 4 * (self.sigmoid(x) - 0.5)
# [N, 1, groups]
return x
class TemporalInterlace(nn.Module):
"""Temporal interlace module.
This module is proposed in `Temporal Interlacing Network
<https://arxiv.org/abs/2001.06499>`_
Args:
in_channels (int): Channel num of input features.
num_segments (int): Number of frame segments. Default: 3.
shift_div (int): Number of division parts for shift. Default: 1.
"""
def __init__(self, in_channels, num_segments=3, shift_div=1):
super().__init__()
self.num_segments = num_segments
self.shift_div = shift_div
self.in_channels = in_channels
# hard code ``deform_groups`` according to original repo.
self.deform_groups = 2
self.offset_net = OffsetNet(in_channels // shift_div,
self.deform_groups, num_segments)
self.weight_net = WeightNet(in_channels // shift_div,
self.deform_groups)
def forward(self, x):
"""Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data.
Returns:
torch.Tensor: The output of the module.
"""
# x: [N, C, H, W],
# where N = num_batches x num_segments, C = shift_div * num_folds
n, c, h, w = x.size()
num_batches = n // self.num_segments
num_folds = c // self.shift_div
# x_out: [num_batches x num_segments, C, H, W]
x_out = torch.zeros((n, c, h, w), device=x.device)
# x_descriptor: [num_batches, num_segments, num_folds, H, W]
x_descriptor = x[:, :num_folds, :, :].view(num_batches,
self.num_segments,
num_folds, h, w)
# x should only obtain information on temporal and channel dimensions
# x_pooled: [num_batches, num_segments, num_folds, W]
x_pooled = torch.mean(x_descriptor, 3)
# x_pooled: [num_batches, num_segments, num_folds]
x_pooled = torch.mean(x_pooled, 3)
# x_pooled: [num_batches, num_folds, num_segments]
x_pooled = x_pooled.permute(0, 2, 1).contiguous()
# Calculate weight and bias, here groups = 2
# x_offset: [num_batches, groups]
x_offset = self.offset_net(x_pooled).view(num_batches, -1)
# x_weight: [num_batches, num_segments, groups]
x_weight = self.weight_net(x_pooled)
# x_offset: [num_batches, 2 * groups]
x_offset = torch.cat([x_offset, -x_offset], 1)
# x_shift: [num_batches, num_segments, num_folds, H, W]
x_shift = linear_sampler(x_descriptor, x_offset)
# x_weight: [num_batches, num_segments, groups, 1]
x_weight = x_weight[:, :, :, None]
# x_weight:
# [num_batches, num_segments, groups * 2, c // self.shift_div // 4]
x_weight = x_weight.repeat(1, 1, 2, num_folds // 2 // 2)
# x_weight:
# [num_batches, num_segments, c // self.shift_div = num_folds]
x_weight = x_weight.view(x_weight.size(0), x_weight.size(1), -1)
# x_weight: [num_batches, num_segments, num_folds, 1, 1]
x_weight = x_weight[:, :, :, None, None]
# x_shift: [num_batches, num_segments, num_folds, H, W]
x_shift = x_shift * x_weight
# x_shift: [num_batches, num_segments, num_folds, H, W]
x_shift = x_shift.contiguous().view(n, num_folds, h, w)
# x_out: [num_batches x num_segments, C, H, W]
x_out[:, :num_folds, :] = x_shift
x_out[:, num_folds:, :] = x[:, num_folds:, :]
return x_out
@BACKBONES.register_module()
class ResNetTIN(ResNetTSM):
"""ResNet backbone for TIN.
Args:
depth (int): Depth of ResNet, from {18, 34, 50, 101, 152}.
num_segments (int): Number of frame segments. Default: 8.
is_tin (bool): Whether to apply temporal interlace. Default: True.
shift_div (int): Number of division parts for shift. Default: 4.
kwargs (dict, optional): Arguments for ResNet.
"""
def __init__(self,
depth,
num_segments=8,
is_tin=True,
shift_div=4,
**kwargs):
super().__init__(depth, **kwargs)
self.num_segments = num_segments
self.is_tin = is_tin
self.shift_div = shift_div
def make_temporal_interlace(self):
"""Make temporal interlace for some layers."""
num_segment_list = [self.num_segments] * 4
assert num_segment_list[-1] > 0
n_round = 1
if len(list(self.layer3.children())) >= 23:
print(f'=> Using n_round {n_round} to insert temporal shift.')
def make_block_interlace(stage, num_segments, shift_div):
"""Apply Deformable shift for a ResNet layer module.
Args:
stage (nn.module): A ResNet layer to be deformed.
num_segments (int): Number of frame segments.
shift_div (int): Number of division parts for shift.
Returns:
nn.Sequential: A Sequential container consisted of
deformed Interlace blocks.
"""
blocks = list(stage.children())
for i, b in enumerate(blocks):
if i % n_round == 0:
tds = TemporalInterlace(
b.conv1.in_channels,
num_segments=num_segments,
shift_div=shift_div)
blocks[i].conv1.conv = CombineNet(tds,
blocks[i].conv1.conv)
return nn.Sequential(*blocks)
self.layer1 = make_block_interlace(self.layer1, num_segment_list[0],
self.shift_div)
self.layer2 = make_block_interlace(self.layer2, num_segment_list[1],
self.shift_div)
self.layer3 = make_block_interlace(self.layer3, num_segment_list[2],
self.shift_div)
self.layer4 = make_block_interlace(self.layer4, num_segment_list[3],
self.shift_div)
def init_weights(self):
"""Initiate the parameters either from existing checkpoint or from
scratch."""
super(ResNetTSM, self).init_weights()
if self.is_tin:
self.make_temporal_interlace()
if len(self.non_local_cfg) != 0:
self.make_non_local()
| InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/models/backbones/resnet_tin.py |
import torch
import torch.nn as nn
from mmcv.cnn import NonLocal3d
from torch.nn.modules.utils import _ntuple
from ..registry import BACKBONES
from .resnet import ResNet
class NL3DWrapper(nn.Module):
"""3D Non-local wrapper for ResNet50.
Wrap ResNet layers with 3D NonLocal modules.
Args:
block (nn.Module): Residual blocks to be built.
num_segments (int): Number of frame segments.
non_local_cfg (dict): Config for non-local layers. Default: ``dict()``.
"""
def __init__(self, block, num_segments, non_local_cfg=dict()):
super(NL3DWrapper, self).__init__()
self.block = block
self.non_local_cfg = non_local_cfg
self.non_local_block = NonLocal3d(self.block.conv3.norm.num_features,
**self.non_local_cfg)
self.num_segments = num_segments
def forward(self, x):
x = self.block(x)
n, c, h, w = x.size()
x = x.view(n // self.num_segments, self.num_segments, c, h,
w).transpose(1, 2).contiguous()
x = self.non_local_block(x)
x = x.transpose(1, 2).contiguous().view(n, c, h, w)
return x
class TemporalShift(nn.Module):
"""Temporal shift module.
This module is proposed in
`TSM: Temporal Shift Module for Efficient Video Understanding
<https://arxiv.org/abs/1811.08383>`_
Args:
net (nn.module): Module to make temporal shift.
num_segments (int): Number of frame segments. Default: 3.
shift_div (int): Number of divisions for shift. Default: 8.
"""
def __init__(self, net, num_segments=3, shift_div=8):
super().__init__()
self.net = net
self.num_segments = num_segments
self.shift_div = shift_div
def forward(self, x):
"""Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data.
Returns:
torch.Tensor: The output of the module.
"""
x = self.shift(x, self.num_segments, shift_div=self.shift_div)
return self.net(x)
@staticmethod
def shift(x, num_segments, shift_div=3):
"""Perform temporal shift operation on the feature.
Args:
x (torch.Tensor): The input feature to be shifted.
num_segments (int): Number of frame segments.
shift_div (int): Number of divisions for shift. Default: 3.
Returns:
torch.Tensor: The shifted feature.
"""
# [N, C, H, W]
n, c, h, w = x.size()
# [N // num_segments, num_segments, C, H*W]
# can't use 5 dimensional array on PPL2D backend for caffe
x = x.view(-1, num_segments, c, h * w)
# get shift fold
fold = c // shift_div
# split c channel into three parts:
# left_split, mid_split, right_split
left_split = x[:, :, :fold, :]
mid_split = x[:, :, fold:2 * fold, :]
right_split = x[:, :, 2 * fold:, :]
# can't use torch.zeros(*A.shape) or torch.zeros_like(A)
# because array on caffe inference must be got by computing
# shift left on num_segments channel in `left_split`
zeros = left_split - left_split
blank = zeros[:, :1, :, :]
left_split = left_split[:, 1:, :, :]
left_split = torch.cat((left_split, blank), 1)
# shift right on num_segments channel in `mid_split`
zeros = mid_split - mid_split
blank = zeros[:, :1, :, :]
mid_split = mid_split[:, :-1, :, :]
mid_split = torch.cat((blank, mid_split), 1)
# right_split: no shift
# concatenate
out = torch.cat((left_split, mid_split, right_split), 2)
# [N, C, H, W]
# restore the original dimension
return out.view(n, c, h, w)
@BACKBONES.register_module()
class ResNetTSM(ResNet):
"""ResNet backbone for TSM.
Args:
num_segments (int): Number of frame segments. Default: 8.
is_shift (bool): Whether to make temporal shift in reset layers.
Default: True.
non_local (Sequence[int]): Determine whether to apply non-local module
in the corresponding block of each stages. Default: (0, 0, 0, 0).
non_local_cfg (dict): Config for non-local module. Default: ``dict()``.
shift_div (int): Number of div for shift. Default: 8.
shift_place (str): Places in resnet layers for shift, which is chosen
from ['block', 'blockres'].
If set to 'block', it will apply temporal shift to all child blocks
in each resnet layer.
If set to 'blockres', it will apply temporal shift to each `conv1`
layer of all child blocks in each resnet layer.
Default: 'blockres'.
temporal_pool (bool): Whether to add temporal pooling. Default: False.
**kwargs (keyword arguments, optional): Arguments for ResNet.
"""
def __init__(self,
depth,
num_segments=8,
is_shift=True,
non_local=(0, 0, 0, 0),
non_local_cfg=dict(),
shift_div=8,
shift_place='blockres',
temporal_pool=False,
**kwargs):
super().__init__(depth, **kwargs)
self.num_segments = num_segments
self.is_shift = is_shift
self.shift_div = shift_div
self.shift_place = shift_place
self.temporal_pool = temporal_pool
self.non_local = non_local
self.non_local_stages = _ntuple(self.num_stages)(non_local)
self.non_local_cfg = non_local_cfg
def make_temporal_shift(self):
"""Make temporal shift for some layers."""
if self.temporal_pool:
num_segment_list = [
self.num_segments, self.num_segments // 2,
self.num_segments // 2, self.num_segments // 2
]
else:
num_segment_list = [self.num_segments] * 4
if num_segment_list[-1] <= 0:
raise ValueError('num_segment_list[-1] must be positive')
if self.shift_place == 'block':
def make_block_temporal(stage, num_segments):
"""Make temporal shift on some blocks.
Args:
stage (nn.Module): Model layers to be shifted.
num_segments (int): Number of frame segments.
Returns:
nn.Module: The shifted blocks.
"""
blocks = list(stage.children())
for i, b in enumerate(blocks):
blocks[i] = TemporalShift(
b, num_segments=num_segments, shift_div=self.shift_div)
return nn.Sequential(*blocks)
self.layer1 = make_block_temporal(self.layer1, num_segment_list[0])
self.layer2 = make_block_temporal(self.layer2, num_segment_list[1])
self.layer3 = make_block_temporal(self.layer3, num_segment_list[2])
self.layer4 = make_block_temporal(self.layer4, num_segment_list[3])
elif 'blockres' in self.shift_place:
n_round = 1
if len(list(self.layer3.children())) >= 23:
n_round = 2
def make_block_temporal(stage, num_segments):
"""Make temporal shift on some blocks.
Args:
stage (nn.Module): Model layers to be shifted.
num_segments (int): Number of frame segments.
Returns:
nn.Module: The shifted blocks.
"""
blocks = list(stage.children())
for i, b in enumerate(blocks):
if i % n_round == 0:
blocks[i].conv1.conv = TemporalShift(
b.conv1.conv,
num_segments=num_segments,
shift_div=self.shift_div)
return nn.Sequential(*blocks)
self.layer1 = make_block_temporal(self.layer1, num_segment_list[0])
self.layer2 = make_block_temporal(self.layer2, num_segment_list[1])
self.layer3 = make_block_temporal(self.layer3, num_segment_list[2])
self.layer4 = make_block_temporal(self.layer4, num_segment_list[3])
else:
raise NotImplementedError
def make_temporal_pool(self):
"""Make temporal pooling between layer1 and layer2, using a 3D max
pooling layer."""
class TemporalPool(nn.Module):
"""Temporal pool module.
Wrap layer2 in ResNet50 with a 3D max pooling layer.
Args:
net (nn.Module): Module to make temporal pool.
num_segments (int): Number of frame segments.
"""
def __init__(self, net, num_segments):
super().__init__()
self.net = net
self.num_segments = num_segments
self.max_pool3d = nn.MaxPool3d(
kernel_size=(3, 1, 1), stride=(2, 1, 1), padding=(1, 0, 0))
def forward(self, x):
# [N, C, H, W]
n, c, h, w = x.size()
# [N // num_segments, C, num_segments, H, W]
x = x.view(n // self.num_segments, self.num_segments, c, h,
w).transpose(1, 2)
# [N // num_segmnets, C, num_segments // 2, H, W]
x = self.max_pool3d(x)
# [N // 2, C, H, W]
x = x.transpose(1, 2).contiguous().view(n // 2, c, h, w)
return self.net(x)
self.layer2 = TemporalPool(self.layer2, self.num_segments)
def make_non_local(self):
# This part is for ResNet50
for i in range(self.num_stages):
non_local_stage = self.non_local_stages[i]
if sum(non_local_stage) == 0:
continue
layer_name = f'layer{i + 1}'
res_layer = getattr(self, layer_name)
for idx, non_local in enumerate(non_local_stage):
if non_local:
res_layer[idx] = NL3DWrapper(res_layer[idx],
self.num_segments,
self.non_local_cfg)
def init_weights(self):
"""Initiate the parameters either from existing checkpoint or from
scratch."""
super().init_weights()
if self.is_shift:
self.make_temporal_shift()
if len(self.non_local_cfg) != 0:
self.make_non_local()
if self.temporal_pool:
self.make_temporal_pool()
| InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/models/backbones/resnet_tsm.py |
import torch.nn as nn
from mmcv.cnn import ConvModule, constant_init, kaiming_init
from mmcv.runner import _load_checkpoint, load_checkpoint
from mmcv.utils import _BatchNorm
from torch.utils import checkpoint as cp
from ...utils import get_root_logger
from ..registry import BACKBONES
class BasicBlock(nn.Module):
"""Basic block for ResNet.
Args:
inplanes (int): Number of channels for the input in first conv2d layer.
planes (int): Number of channels produced by some norm/conv2d layers.
stride (int): Stride in the conv layer. Default: 1.
dilation (int): Spacing between kernel elements. Default: 1.
downsample (nn.Module | None): Downsample layer. Default: None.
style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two
layer is the 3x3 conv layer, otherwise the stride-two layer is
the first 1x1 conv layer. Default: 'pytorch'.
conv_cfg (dict): Config for norm layers. Default: dict(type='Conv').
norm_cfg (dict):
Config for norm layers. required keys are `type` and
`requires_grad`. Default: dict(type='BN2d', requires_grad=True).
act_cfg (dict): Config for activate layers.
Default: dict(type='ReLU', inplace=True).
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed. Default: False.
"""
expansion = 1
def __init__(self,
inplanes,
planes,
stride=1,
dilation=1,
downsample=None,
style='pytorch',
conv_cfg=dict(type='Conv'),
norm_cfg=dict(type='BN', requires_grad=True),
act_cfg=dict(type='ReLU', inplace=True),
with_cp=False):
super().__init__()
assert style in ['pytorch', 'caffe']
self.conv1 = ConvModule(
inplanes,
planes,
kernel_size=3,
stride=stride,
padding=dilation,
dilation=dilation,
bias=False,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
self.conv2 = ConvModule(
planes,
planes,
kernel_size=3,
stride=1,
padding=1,
dilation=1,
bias=False,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=None)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.style = style
self.stride = stride
self.dilation = dilation
self.norm_cfg = norm_cfg
assert not with_cp
def forward(self, x):
"""Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data.
Returns:
torch.Tensor: The output of the module.
"""
identity = x
out = self.conv1(x)
out = self.conv2(out)
if self.downsample is not None:
identity = self.downsample(x)
out = out + identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
"""Bottleneck block for ResNet.
Args:
inplanes (int):
Number of channels for the input feature in first conv layer.
planes (int):
Number of channels produced by some norm layes and conv layers
stride (int): Spatial stride in the conv layer. Default: 1.
dilation (int): Spacing between kernel elements. Default: 1.
downsample (nn.Module | None): Downsample layer. Default: None.
style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two
layer is the 3x3 conv layer, otherwise the stride-two layer is
the first 1x1 conv layer. Default: 'pytorch'.
conv_cfg (dict): Config for norm layers. Default: dict(type='Conv').
norm_cfg (dict):
Config for norm layers. required keys are `type` and
`requires_grad`. Default: dict(type='BN2d', requires_grad=True).
act_cfg (dict): Config for activate layers.
Default: dict(type='ReLU', inplace=True).
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed. Default: False.
"""
expansion = 4
def __init__(self,
inplanes,
planes,
stride=1,
dilation=1,
downsample=None,
style='pytorch',
conv_cfg=dict(type='Conv'),
norm_cfg=dict(type='BN', requires_grad=True),
act_cfg=dict(type='ReLU', inplace=True),
with_cp=False):
super().__init__()
assert style in ['pytorch', 'caffe']
self.inplanes = inplanes
self.planes = planes
if style == 'pytorch':
self.conv1_stride = 1
self.conv2_stride = stride
else:
self.conv1_stride = stride
self.conv2_stride = 1
self.conv1 = ConvModule(
inplanes,
planes,
kernel_size=1,
stride=self.conv1_stride,
bias=False,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
self.conv2 = ConvModule(
planes,
planes,
kernel_size=3,
stride=self.conv2_stride,
padding=dilation,
dilation=dilation,
bias=False,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
self.conv3 = ConvModule(
planes,
planes * self.expansion,
kernel_size=1,
bias=False,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=None)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
self.dilation = dilation
self.norm_cfg = norm_cfg
self.with_cp = with_cp
def forward(self, x):
"""Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data.
Returns:
torch.Tensor: The output of the module.
"""
def _inner_forward(x):
"""Forward wrapper for utilizing checkpoint."""
identity = x
out = self.conv1(x)
out = self.conv2(out)
out = self.conv3(out)
if self.downsample is not None:
identity = self.downsample(x)
out = out + identity
return out
if self.with_cp and x.requires_grad:
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
out = self.relu(out)
return out
def make_res_layer(block,
inplanes,
planes,
blocks,
stride=1,
dilation=1,
style='pytorch',
conv_cfg=None,
norm_cfg=None,
act_cfg=None,
with_cp=False):
"""Build residual layer for ResNet.
Args:
block: (nn.Module): Residual module to be built.
inplanes (int): Number of channels for the input feature in each block.
planes (int): Number of channels for the output feature in each block.
blocks (int): Number of residual blocks.
stride (int): Stride in the conv layer. Default: 1.
dilation (int): Spacing between kernel elements. Default: 1.
style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two
layer is the 3x3 conv layer, otherwise the stride-two layer is
the first 1x1 conv layer. Default: 'pytorch'.
conv_cfg (dict | None): Config for norm layers. Default: None.
norm_cfg (dict | None): Config for norm layers. Default: None.
act_cfg (dict | None): Config for activate layers. Default: None.
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed. Default: False.
Returns:
nn.Module: A residual layer for the given config.
"""
downsample = None
if stride != 1 or inplanes != planes * block.expansion:
downsample = ConvModule(
inplanes,
planes * block.expansion,
kernel_size=1,
stride=stride,
bias=False,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=None)
layers = []
layers.append(
block(
inplanes,
planes,
stride,
dilation,
downsample,
style=style,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg,
with_cp=with_cp))
inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(
block(
inplanes,
planes,
1,
dilation,
style=style,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg,
with_cp=with_cp))
return nn.Sequential(*layers)
@BACKBONES.register_module()
class ResNet(nn.Module):
"""ResNet backbone.
Args:
depth (int): Depth of resnet, from {18, 34, 50, 101, 152}.
pretrained (str | None): Name of pretrained model. Default: None.
in_channels (int): Channel num of input features. Default: 3.
num_stages (int): Resnet stages. Default: 4.
strides (Sequence[int]): Strides of the first block of each stage.
out_indices (Sequence[int]): Indices of output feature. Default: (3, ).
dilations (Sequence[int]): Dilation of each stage.
style (str): ``pytorch`` or ``caffe``. If set to "pytorch", the
stride-two layer is the 3x3 conv layer, otherwise the stride-two
layer is the first 1x1 conv layer. Default: ``pytorch``.
frozen_stages (int): Stages to be frozen (all param fixed). -1 means
not freezing any parameters. Default: -1.
conv_cfg (dict): Config for norm layers. Default: dict(type='Conv').
norm_cfg (dict):
Config for norm layers. required keys are `type` and
`requires_grad`. Default: dict(type='BN2d', requires_grad=True).
act_cfg (dict): Config for activate layers.
Default: dict(type='ReLU', inplace=True).
norm_eval (bool): Whether to set BN layers to eval mode, namely, freeze
running stats (mean and var). Default: False.
partial_bn (bool): Whether to use partial bn. Default: False.
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed. Default: False.
"""
arch_settings = {
18: (BasicBlock, (2, 2, 2, 2)),
34: (BasicBlock, (3, 4, 6, 3)),
50: (Bottleneck, (3, 4, 6, 3)),
101: (Bottleneck, (3, 4, 23, 3)),
152: (Bottleneck, (3, 8, 36, 3))
}
def __init__(self,
depth,
pretrained=None,
torchvision_pretrain=True,
in_channels=3,
num_stages=4,
out_indices=(3, ),
strides=(1, 2, 2, 2),
dilations=(1, 1, 1, 1),
style='pytorch',
frozen_stages=-1,
conv_cfg=dict(type='Conv'),
norm_cfg=dict(type='BN2d', requires_grad=True),
act_cfg=dict(type='ReLU', inplace=True),
norm_eval=False,
partial_bn=False,
with_cp=False):
super().__init__()
if depth not in self.arch_settings:
raise KeyError(f'invalid depth {depth} for resnet')
self.depth = depth
self.in_channels = in_channels
self.pretrained = pretrained
self.torchvision_pretrain = torchvision_pretrain
self.num_stages = num_stages
assert 1 <= num_stages <= 4
self.out_indices = out_indices
assert max(out_indices) < num_stages
self.strides = strides
self.dilations = dilations
assert len(strides) == len(dilations) == num_stages
self.style = style
self.frozen_stages = frozen_stages
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.act_cfg = act_cfg
self.norm_eval = norm_eval
self.partial_bn = partial_bn
self.with_cp = with_cp
self.block, stage_blocks = self.arch_settings[depth]
self.stage_blocks = stage_blocks[:num_stages]
self.inplanes = 64
self._make_stem_layer()
self.res_layers = []
for i, num_blocks in enumerate(self.stage_blocks):
stride = strides[i]
dilation = dilations[i]
planes = 64 * 2**i
res_layer = make_res_layer(
self.block,
self.inplanes,
planes,
num_blocks,
stride=stride,
dilation=dilation,
style=self.style,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg,
with_cp=with_cp)
self.inplanes = planes * self.block.expansion
layer_name = f'layer{i + 1}'
self.add_module(layer_name, res_layer)
self.res_layers.append(layer_name)
self.feat_dim = self.block.expansion * 64 * 2**(
len(self.stage_blocks) - 1)
def _make_stem_layer(self):
"""Construct the stem layers consists of a conv+norm+act module and a
pooling layer."""
self.conv1 = ConvModule(
self.in_channels,
64,
kernel_size=7,
stride=2,
padding=3,
bias=False,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
@staticmethod
def _load_conv_params(conv, state_dict_tv, module_name_tv,
loaded_param_names):
"""Load the conv parameters of resnet from torchvision.
Args:
conv (nn.Module): The destination conv module.
state_dict_tv (OrderedDict): The state dict of pretrained
torchvision model.
module_name_tv (str): The name of corresponding conv module in the
torchvision model.
loaded_param_names (list[str]): List of parameters that have been
loaded.
"""
weight_tv_name = module_name_tv + '.weight'
conv.weight.data.copy_(state_dict_tv[weight_tv_name])
loaded_param_names.append(weight_tv_name)
if getattr(conv, 'bias') is not None:
bias_tv_name = module_name_tv + '.bias'
conv.bias.data.copy_(state_dict_tv[bias_tv_name])
loaded_param_names.append(bias_tv_name)
@staticmethod
def _load_bn_params(bn, state_dict_tv, module_name_tv, loaded_param_names):
"""Load the bn parameters of resnet from torchvision.
Args:
bn (nn.Module): The destination bn module.
state_dict_tv (OrderedDict): The state dict of pretrained
torchvision model.
module_name_tv (str): The name of corresponding bn module in the
torchvision model.
loaded_param_names (list[str]): List of parameters that have been
loaded.
"""
for param_name, param in bn.named_parameters():
param_tv_name = f'{module_name_tv}.{param_name}'
param_tv = state_dict_tv[param_tv_name]
param.data.copy_(param_tv)
loaded_param_names.append(param_tv_name)
for param_name, param in bn.named_buffers():
param_tv_name = f'{module_name_tv}.{param_name}'
# some buffers like num_batches_tracked may not exist
if param_tv_name in state_dict_tv:
param_tv = state_dict_tv[param_tv_name]
param.data.copy_(param_tv)
loaded_param_names.append(param_tv_name)
def _load_torchvision_checkpoint(self, logger=None):
"""Initiate the parameters from torchvision pretrained checkpoint."""
state_dict_torchvision = _load_checkpoint(self.pretrained)
if 'state_dict' in state_dict_torchvision:
state_dict_torchvision = state_dict_torchvision['state_dict']
loaded_param_names = []
for name, module in self.named_modules():
if isinstance(module, ConvModule):
# we use a ConvModule to wrap conv+bn+relu layers, thus the
# name mapping is needed
if 'downsample' in name:
# layer{X}.{Y}.downsample.conv->layer{X}.{Y}.downsample.0
original_conv_name = name + '.0'
# layer{X}.{Y}.downsample.bn->layer{X}.{Y}.downsample.1
original_bn_name = name + '.1'
else:
# layer{X}.{Y}.conv{n}.conv->layer{X}.{Y}.conv{n}
original_conv_name = name
# layer{X}.{Y}.conv{n}.bn->layer{X}.{Y}.bn{n}
original_bn_name = name.replace('conv', 'bn')
self._load_conv_params(module.conv, state_dict_torchvision,
original_conv_name, loaded_param_names)
self._load_bn_params(module.bn, state_dict_torchvision,
original_bn_name, loaded_param_names)
# check if any parameters in the 2d checkpoint are not loaded
remaining_names = set(
state_dict_torchvision.keys()) - set(loaded_param_names)
if remaining_names:
logger.info(
f'These parameters in pretrained checkpoint are not loaded'
f': {remaining_names}')
def init_weights(self):
"""Initiate the parameters either from existing checkpoint or from
scratch."""
if isinstance(self.pretrained, str):
logger = get_root_logger()
if self.torchvision_pretrain:
# torchvision's
self._load_torchvision_checkpoint(logger)
else:
# ours
load_checkpoint(
self, self.pretrained, strict=False, logger=logger)
elif self.pretrained is None:
for m in self.modules():
if isinstance(m, nn.Conv2d):
kaiming_init(m)
elif isinstance(m, nn.BatchNorm2d):
constant_init(m, 1)
else:
raise TypeError('pretrained must be a str or None')
def forward(self, x):
"""Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data.
Returns:
torch.Tensor: The feature of the input samples extracted
by the backbone.
"""
x = self.conv1(x)
x = self.maxpool(x)
outs = []
for i, layer_name in enumerate(self.res_layers):
res_layer = getattr(self, layer_name)
x = res_layer(x)
if i in self.out_indices:
outs.append(x)
if len(outs) == 1:
return outs[0]
return tuple(outs)
def _freeze_stages(self):
"""Prevent all the parameters from being optimized before
``self.frozen_stages``."""
if self.frozen_stages >= 0:
self.conv1.bn.eval()
for m in self.conv1.modules():
for param in m.parameters():
param.requires_grad = False
for i in range(1, self.frozen_stages + 1):
m = getattr(self, f'layer{i}')
m.eval()
for param in m.parameters():
param.requires_grad = False
def _partial_bn(self):
logger = get_root_logger()
logger.info('Freezing BatchNorm2D except the first one.')
count_bn = 0
for m in self.modules():
if isinstance(m, nn.BatchNorm2d):
count_bn += 1
if count_bn >= 2:
m.eval()
# shutdown update in frozen mode
m.weight.requires_grad = False
m.bias.requires_grad = False
def train(self, mode=True):
"""Set the optimization status when training."""
super().train(mode)
self._freeze_stages()
if mode and self.norm_eval:
for m in self.modules():
if isinstance(m, _BatchNorm):
m.eval()
if mode and self.partial_bn:
self._partial_bn()
| InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/models/backbones/resnet.py |
from ..registry import BACKBONES
from .resnet3d import ResNet3d
@BACKBONES.register_module()
class ResNet2Plus1d(ResNet3d):
"""ResNet (2+1)d backbone.
This model is proposed in `A Closer Look at Spatiotemporal Convolutions for
Action Recognition <https://arxiv.org/abs/1711.11248>`_
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
assert self.pretrained2d is False
assert self.conv_cfg['type'] == 'Conv2plus1d'
def _freeze_stages(self):
"""Prevent all the parameters from being optimized before
``self.frozen_stages``."""
if self.frozen_stages >= 0:
self.conv1.eval()
for param in self.conv1.parameters():
param.requires_grad = False
for i in range(1, self.frozen_stages + 1):
m = getattr(self, f'layer{i}')
m.eval()
for param in m.parameters():
param.requires_grad = False
def forward(self, x):
"""Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data.
Returns:
torch.Tensor: The feature of the input
samples extracted by the backbone.
"""
x = self.conv1(x)
x = self.maxpool(x)
for layer_name in self.res_layers:
res_layer = getattr(self, layer_name)
# no pool2 in R(2+1)d
x = res_layer(x)
return x
| InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/models/backbones/resnet2plus1d.py |
import torch
import torch.nn as nn
from mmcv.cnn import ConvModule, kaiming_init
from mmcv.runner import _load_checkpoint, load_checkpoint
from mmcv.utils import print_log
from ...utils import get_root_logger
from ..registry import BACKBONES
from .resnet3d import ResNet3d
class ResNet3dPathway(ResNet3d):
"""A pathway of Slowfast based on ResNet3d.
Args:
*args (arguments): Arguments same as :class:``ResNet3d``.
lateral (bool): Determines whether to enable the lateral connection
from another pathway. Default: False.
speed_ratio (int): Speed ratio indicating the ratio between time
dimension of the fast and slow pathway, corresponding to the
``alpha`` in the paper. Default: 8.
channel_ratio (int): Reduce the channel number of fast pathway
by ``channel_ratio``, corresponding to ``beta`` in the paper.
Default: 8.
fusion_kernel (int): The kernel size of lateral fusion.
Default: 5.
**kwargs (keyword arguments): Keywords arguments for ResNet3d.
"""
def __init__(self,
*args,
lateral=False,
speed_ratio=8,
channel_ratio=8,
fusion_kernel=5,
**kwargs):
self.lateral = lateral
self.speed_ratio = speed_ratio
self.channel_ratio = channel_ratio
self.fusion_kernel = fusion_kernel
super().__init__(*args, **kwargs)
self.inplanes = self.base_channels
if self.lateral:
self.conv1_lateral = ConvModule(
self.inplanes // self.channel_ratio,
# https://arxiv.org/abs/1812.03982, the
# third type of lateral connection has out_channel:
# 2 * \beta * C
self.inplanes * 2 // self.channel_ratio,
kernel_size=(fusion_kernel, 1, 1),
stride=(self.speed_ratio, 1, 1),
padding=((fusion_kernel - 1) // 2, 0, 0),
bias=False,
conv_cfg=self.conv_cfg,
norm_cfg=None,
act_cfg=None)
self.lateral_connections = []
for i in range(len(self.stage_blocks)):
planes = self.base_channels * 2**i
self.inplanes = planes * self.block.expansion
if lateral and i != self.num_stages - 1:
# no lateral connection needed in final stage
lateral_name = f'layer{(i + 1)}_lateral'
setattr(
self, lateral_name,
ConvModule(
self.inplanes // self.channel_ratio,
self.inplanes * 2 // self.channel_ratio,
kernel_size=(fusion_kernel, 1, 1),
stride=(self.speed_ratio, 1, 1),
padding=((fusion_kernel - 1) // 2, 0, 0),
bias=False,
conv_cfg=self.conv_cfg,
norm_cfg=None,
act_cfg=None))
self.lateral_connections.append(lateral_name)
def make_res_layer(self,
block,
inplanes,
planes,
blocks,
spatial_stride=1,
temporal_stride=1,
dilation=1,
style='pytorch',
inflate=1,
inflate_style='3x1x1',
non_local=0,
non_local_cfg=dict(),
conv_cfg=None,
norm_cfg=None,
act_cfg=None,
with_cp=False):
"""Build residual layer for Slowfast.
Args:
block (nn.Module): Residual module to be built.
inplanes (int): Number of channels for the input
feature in each block.
planes (int): Number of channels for the output
feature in each block.
blocks (int): Number of residual blocks.
spatial_stride (int | Sequence[int]): Spatial strides
in residual and conv layers. Default: 1.
temporal_stride (int | Sequence[int]): Temporal strides in
residual and conv layers. Default: 1.
dilation (int): Spacing between kernel elements. Default: 1.
style (str): ``pytorch`` or ``caffe``. If set to ``pytorch``,
the stride-two layer is the 3x3 conv layer,
otherwise the stride-two layer is the first 1x1 conv layer.
Default: ``pytorch``.
inflate (int | Sequence[int]): Determine whether to inflate
for each block. Default: 1.
inflate_style (str): ``3x1x1`` or ``1x1x1``. which determines
the kernel sizes and padding strides for conv1 and
conv2 in each block. Default: ``3x1x1``.
non_local (int | Sequence[int]): Determine whether to apply
non-local module in the corresponding block of each stages.
Default: 0.
non_local_cfg (dict): Config for non-local module.
Default: ``dict()``.
conv_cfg (dict | None): Config for conv layers. Default: None.
norm_cfg (dict | None): Config for norm layers. Default: None.
act_cfg (dict | None): Config for activate layers. Default: None.
with_cp (bool): Use checkpoint or not. Using checkpoint will save
some memory while slowing down the training speed.
Default: False.
Returns:
nn.Module: A residual layer for the given config.
"""
inflate = inflate if not isinstance(inflate,
int) else (inflate, ) * blocks
non_local = non_local if not isinstance(
non_local, int) else (non_local, ) * blocks
assert len(inflate) == blocks and len(non_local) == blocks
if self.lateral:
lateral_inplanes = inplanes * 2 // self.channel_ratio
else:
lateral_inplanes = 0
if (spatial_stride != 1
or (inplanes + lateral_inplanes) != planes * block.expansion):
downsample = ConvModule(
inplanes + lateral_inplanes,
planes * block.expansion,
kernel_size=1,
stride=(temporal_stride, spatial_stride, spatial_stride),
bias=False,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=None)
else:
downsample = None
layers = []
layers.append(
block(
inplanes + lateral_inplanes,
planes,
spatial_stride,
temporal_stride,
dilation,
downsample,
style=style,
inflate=(inflate[0] == 1),
inflate_style=inflate_style,
non_local=(non_local[0] == 1),
non_local_cfg=non_local_cfg,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg,
with_cp=with_cp))
inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(
block(
inplanes,
planes,
1,
1,
dilation,
style=style,
inflate=(inflate[i] == 1),
inflate_style=inflate_style,
non_local=(non_local[i] == 1),
non_local_cfg=non_local_cfg,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg,
with_cp=with_cp))
return nn.Sequential(*layers)
def inflate_weights(self, logger):
"""Inflate the resnet2d parameters to resnet3d pathway.
The differences between resnet3d and resnet2d mainly lie in an extra
axis of conv kernel. To utilize the pretrained parameters in 2d model,
the weight of conv2d models should be inflated to fit in the shapes of
the 3d counterpart. For pathway the ``lateral_connection`` part should
not be inflated from 2d weights.
Args:
logger (logging.Logger): The logger used to print
debugging infomation.
"""
state_dict_r2d = _load_checkpoint(self.pretrained)
if 'state_dict' in state_dict_r2d:
state_dict_r2d = state_dict_r2d['state_dict']
inflated_param_names = []
for name, module in self.named_modules():
if 'lateral' in name:
continue
if isinstance(module, ConvModule):
# we use a ConvModule to wrap conv+bn+relu layers, thus the
# name mapping is needed
if 'downsample' in name:
# layer{X}.{Y}.downsample.conv->layer{X}.{Y}.downsample.0
original_conv_name = name + '.0'
# layer{X}.{Y}.downsample.bn->layer{X}.{Y}.downsample.1
original_bn_name = name + '.1'
else:
# layer{X}.{Y}.conv{n}.conv->layer{X}.{Y}.conv{n}
original_conv_name = name
# layer{X}.{Y}.conv{n}.bn->layer{X}.{Y}.bn{n}
original_bn_name = name.replace('conv', 'bn')
self._inflate_conv_params(module.conv, state_dict_r2d,
original_conv_name,
inflated_param_names)
self._inflate_bn_params(module.bn, state_dict_r2d,
original_bn_name, inflated_param_names)
# check if any parameters in the 2d checkpoint are not loaded
remaining_names = set(
state_dict_r2d.keys()) - set(inflated_param_names)
if remaining_names:
logger.info(f'These parameters in the 2d checkpoint are not loaded'
f': {remaining_names}')
def _inflate_conv_params(self, conv3d, state_dict_2d, module_name_2d,
inflated_param_names):
"""Inflate a conv module from 2d to 3d.
The differences of conv modules betweene 2d and 3d in Pathway
mainly lie in the inplanes due to lateral connections. To fit the
shapes of the lateral connection counterpart, it will expand
parameters by concatting conv2d parameters and extra zero paddings.
Args:
conv3d (nn.Module): The destination conv3d module.
state_dict_2d (OrderedDict): The state dict of pretrained 2d model.
module_name_2d (str): The name of corresponding conv module in the
2d model.
inflated_param_names (list[str]): List of parameters that have been
inflated.
"""
weight_2d_name = module_name_2d + '.weight'
conv2d_weight = state_dict_2d[weight_2d_name]
old_shape = conv2d_weight.shape
new_shape = conv3d.weight.data.shape
kernel_t = new_shape[2]
if new_shape[1] != old_shape[1]:
# Inplanes may be different due to lateral connections
new_channels = new_shape[1] - old_shape[1]
pad_shape = old_shape
pad_shape = pad_shape[:1] + (new_channels, ) + pad_shape[2:]
# Expand parameters by concat extra channels
conv2d_weight = torch.cat(
(conv2d_weight,
torch.zeros(pad_shape).type_as(conv2d_weight).to(
conv2d_weight.device)),
dim=1)
new_weight = conv2d_weight.data.unsqueeze(2).expand_as(
conv3d.weight) / kernel_t
conv3d.weight.data.copy_(new_weight)
inflated_param_names.append(weight_2d_name)
if getattr(conv3d, 'bias') is not None:
bias_2d_name = module_name_2d + '.bias'
conv3d.bias.data.copy_(state_dict_2d[bias_2d_name])
inflated_param_names.append(bias_2d_name)
def _freeze_stages(self):
"""Prevent all the parameters from being optimized before
`self.frozen_stages`."""
if self.frozen_stages >= 0:
self.conv1.eval()
for param in self.conv1.parameters():
param.requires_grad = False
for i in range(1, self.frozen_stages + 1):
m = getattr(self, f'layer{i}')
m.eval()
for param in m.parameters():
param.requires_grad = False
if i != len(self.res_layers) and self.lateral:
# No fusion needed in the final stage
lateral_name = self.lateral_connections[i - 1]
conv_lateral = getattr(self, lateral_name)
conv_lateral.eval()
for param in conv_lateral.parameters():
param.requires_grad = False
def init_weights(self):
"""Initiate the parameters either from existing checkpoint or from
scratch."""
# Override the init_weights of i3d
super().init_weights()
for module_name in self.lateral_connections:
layer = getattr(self, module_name)
for m in layer.modules():
if isinstance(m, (nn.Conv3d, nn.Conv2d)):
kaiming_init(m)
pathway_cfg = {
'resnet3d': ResNet3dPathway,
# TODO: BNInceptionPathway
}
def build_pathway(cfg, *args, **kwargs):
"""Build pathway.
Args:
cfg (None or dict): cfg should contain:
- type (str): identify conv layer type.
Returns:
nn.Module: Created pathway.
"""
if not (isinstance(cfg, dict) and 'type' in cfg):
raise TypeError('cfg must be a dict containing the key "type"')
cfg_ = cfg.copy()
pathway_type = cfg_.pop('type')
if pathway_type not in pathway_cfg:
raise KeyError(f'Unrecognized pathway type {pathway_type}')
pathway_cls = pathway_cfg[pathway_type]
pathway = pathway_cls(*args, **kwargs, **cfg_)
return pathway
@BACKBONES.register_module()
class ResNet3dSlowFast(nn.Module):
"""Slowfast backbone.
This module is proposed in `SlowFast Networks for Video Recognition
<https://arxiv.org/abs/1812.03982>`_
Args:
pretrained (str): The file path to a pretrained model.
resample_rate (int): A large temporal stride ``resample_rate``
on input frames, corresponding to the :math:`\\tau` in the paper.
i.e., it processes only one out of ``resample_rate`` frames.
Default: 16.
speed_ratio (int): Speed ratio indicating the ratio between time
dimension of the fast and slow pathway, corresponding to the
:math:`\\alpha` in the paper. Default: 8.
channel_ratio (int): Reduce the channel number of fast pathway
by ``channel_ratio``, corresponding to :math:`\\beta` in the paper.
Default: 8.
slow_pathway (dict): Configuration of slow branch, should contain
necessary arguments for building the specific type of pathway
and:
type (str): type of backbone the pathway bases on.
lateral (bool): determine whether to build lateral connection
for the pathway.Default:
.. code-block:: Python
dict(type='ResNetPathway',
lateral=True, depth=50, pretrained=None,
conv1_kernel=(1, 7, 7), dilations=(1, 1, 1, 1),
conv1_stride_t=1, pool1_stride_t=1, inflate=(0, 0, 1, 1))
fast_pathway (dict): Configuration of fast branch, similar to
`slow_pathway`. Default:
.. code-block:: Python
dict(type='ResNetPathway',
lateral=False, depth=50, pretrained=None, base_channels=8,
conv1_kernel=(5, 7, 7), conv1_stride_t=1, pool1_stride_t=1)
"""
def __init__(self,
pretrained,
resample_rate=8,
speed_ratio=8,
channel_ratio=8,
slow_pathway=dict(
type='resnet3d',
depth=50,
pretrained=None,
lateral=True,
conv1_kernel=(1, 7, 7),
dilations=(1, 1, 1, 1),
conv1_stride_t=1,
pool1_stride_t=1,
inflate=(0, 0, 1, 1)),
fast_pathway=dict(
type='resnet3d',
depth=50,
pretrained=None,
lateral=False,
base_channels=8,
conv1_kernel=(5, 7, 7),
conv1_stride_t=1,
pool1_stride_t=1)):
super().__init__()
self.pretrained = pretrained
self.resample_rate = resample_rate
self.speed_ratio = speed_ratio
self.channel_ratio = channel_ratio
if slow_pathway['lateral']:
slow_pathway['speed_ratio'] = speed_ratio
slow_pathway['channel_ratio'] = channel_ratio
self.slow_path = build_pathway(slow_pathway)
self.fast_path = build_pathway(fast_pathway)
def init_weights(self):
"""Initiate the parameters either from existing checkpoint or from
scratch."""
if isinstance(self.pretrained, str):
logger = get_root_logger()
msg = f'load model from: {self.pretrained}'
print_log(msg, logger=logger)
# Directly load 3D model.
load_checkpoint(self, self.pretrained, strict=True, logger=logger)
elif self.pretrained is None:
# Init two branch seperately.
self.fast_path.init_weights()
self.slow_path.init_weights()
else:
raise TypeError('pretrained must be a str or None')
def forward(self, x):
"""Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data.
Returns:
tuple[torch.Tensor]: The feature of the input samples extracted
by the backbone.
"""
x_slow = nn.functional.interpolate(
x,
mode='nearest',
scale_factor=(1.0 / self.resample_rate, 1.0, 1.0))
x_slow = self.slow_path.conv1(x_slow)
x_slow = self.slow_path.maxpool(x_slow)
x_fast = nn.functional.interpolate(
x,
mode='nearest',
scale_factor=(1.0 / (self.resample_rate // self.speed_ratio), 1.0,
1.0))
x_fast = self.fast_path.conv1(x_fast)
x_fast = self.fast_path.maxpool(x_fast)
if self.slow_path.lateral:
x_fast_lateral = self.slow_path.conv1_lateral(x_fast)
x_slow = torch.cat((x_slow, x_fast_lateral), dim=1)
for i, layer_name in enumerate(self.slow_path.res_layers):
res_layer = getattr(self.slow_path, layer_name)
x_slow = res_layer(x_slow)
res_layer_fast = getattr(self.fast_path, layer_name)
x_fast = res_layer_fast(x_fast)
if (i != len(self.slow_path.res_layers) - 1
and self.slow_path.lateral):
# No fusion needed in the final stage
lateral_name = self.slow_path.lateral_connections[i]
conv_lateral = getattr(self.slow_path, lateral_name)
x_fast_lateral = conv_lateral(x_fast)
x_slow = torch.cat((x_slow, x_fast_lateral), dim=1)
out = (x_slow, x_fast)
return out
| InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/models/backbones/resnet3d_slowfast.py |
from ..registry import BACKBONES
from .resnet3d_slowfast import ResNet3dPathway
@BACKBONES.register_module()
class ResNet3dSlowOnly(ResNet3dPathway):
"""SlowOnly backbone based on ResNet3dPathway.
Args:
*args (arguments): Arguments same as :class:`ResNet3dPathway`.
conv1_kernel (Sequence[int]): Kernel size of the first conv layer.
Default: (1, 7, 7).
conv1_stride_t (int): Temporal stride of the first conv layer.
Default: 1.
pool1_stride_t (int): Temporal stride of the first pooling layer.
Default: 1.
inflate (Sequence[int]): Inflate Dims of each block.
Default: (0, 0, 1, 1).
**kwargs (keyword arguments): Keywords arguments for
:class:`ResNet3dPathway`.
"""
def __init__(self,
*args,
lateral=False,
conv1_kernel=(1, 7, 7),
conv1_stride_t=1,
pool1_stride_t=1,
inflate=(0, 0, 1, 1),
with_pool2=False,
**kwargs):
super().__init__(
*args,
lateral=lateral,
conv1_kernel=conv1_kernel,
conv1_stride_t=conv1_stride_t,
pool1_stride_t=pool1_stride_t,
inflate=inflate,
with_pool2=with_pool2,
**kwargs)
assert not self.lateral
| InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/models/backbones/resnet3d_slowonly.py |
import torch.nn as nn
from mmcv.cnn import ConvModule, constant_init, kaiming_init, normal_init
from mmcv.runner import load_checkpoint
from mmcv.utils import _BatchNorm
from ...utils import get_root_logger
from ..registry import BACKBONES
@BACKBONES.register_module()
class C3D(nn.Module):
"""C3D backbone.
Args:
pretrained (str | None): Name of pretrained model.
style (str): ``pytorch`` or ``caffe``. If set to "pytorch", the
stride-two layer is the 3x3 conv layer, otherwise the stride-two
layer is the first 1x1 conv layer. Default: 'pytorch'.
conv_cfg (dict | None): Config dict for convolution layer.
If set to None, it uses ``dict(type='Conv3d')`` to construct
layers. Default: None.
norm_cfg (dict | None): Config for norm layers. required keys are
``type``, Default: None.
act_cfg (dict | None): Config dict for activation layer. If set to
None, it uses ``dict(type='ReLU')`` to construct layers.
Default: None.
dropout_ratio (float): Probability of dropout layer. Default: 0.5.
init_std (float): Std value for Initiation of fc layers. Default: 0.01.
"""
def __init__(self,
pretrained=None,
style='pytorch',
conv_cfg=None,
norm_cfg=None,
act_cfg=None,
dropout_ratio=0.5,
init_std=0.005):
super().__init__()
if conv_cfg is None:
conv_cfg = dict(type='Conv3d')
if act_cfg is None:
act_cfg = dict(type='ReLU')
self.pretrained = pretrained
self.style = style
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.act_cfg = act_cfg
self.dropout_ratio = dropout_ratio
self.init_std = init_std
c3d_conv_param = dict(
kernel_size=(3, 3, 3),
padding=(1, 1, 1),
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
self.conv1a = ConvModule(3, 64, **c3d_conv_param)
self.pool1 = nn.MaxPool3d(kernel_size=(1, 2, 2), stride=(1, 2, 2))
self.conv2a = ConvModule(64, 128, **c3d_conv_param)
self.pool2 = nn.MaxPool3d(kernel_size=(2, 2, 2), stride=(2, 2, 2))
self.conv3a = ConvModule(128, 256, **c3d_conv_param)
self.conv3b = ConvModule(256, 256, **c3d_conv_param)
self.pool3 = nn.MaxPool3d(kernel_size=(2, 2, 2), stride=(2, 2, 2))
self.conv4a = ConvModule(256, 512, **c3d_conv_param)
self.conv4b = ConvModule(512, 512, **c3d_conv_param)
self.pool4 = nn.MaxPool3d(kernel_size=(2, 2, 2), stride=(2, 2, 2))
self.conv5a = ConvModule(512, 512, **c3d_conv_param)
self.conv5b = ConvModule(512, 512, **c3d_conv_param)
self.pool5 = nn.MaxPool3d(
kernel_size=(2, 2, 2), stride=(2, 2, 2), padding=(0, 1, 1))
self.fc6 = nn.Linear(8192, 4096)
self.fc7 = nn.Linear(4096, 4096)
self.relu = nn.ReLU()
self.dropout = nn.Dropout(p=self.dropout_ratio)
def init_weights(self):
"""Initiate the parameters either from existing checkpoint or from
scratch."""
if isinstance(self.pretrained, str):
logger = get_root_logger()
logger.info(f'load model from: {self.pretrained}')
load_checkpoint(self, self.pretrained, strict=False, logger=logger)
elif self.pretrained is None:
for m in self.modules():
if isinstance(m, nn.Conv3d):
kaiming_init(m)
elif isinstance(m, nn.Linear):
normal_init(m, std=self.init_std)
elif isinstance(m, _BatchNorm):
constant_init(m, 1)
else:
raise TypeError('pretrained must be a str or None')
def forward(self, x):
"""Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data.
the size of x is (num_batches, 3, 16, 112, 112).
Returns:
torch.Tensor: The feature of the input
samples extracted by the backbone.
"""
x = self.conv1a(x)
x = self.pool1(x)
x = self.conv2a(x)
x = self.pool2(x)
x = self.conv3a(x)
x = self.conv3b(x)
x = self.pool3(x)
x = self.conv4a(x)
x = self.conv4b(x)
x = self.pool4(x)
x = self.conv5a(x)
x = self.conv5b(x)
x = self.pool5(x)
x = x.flatten(start_dim=1)
x = self.relu(self.fc6(x))
x = self.dropout(x)
x = self.relu(self.fc7(x))
return x
| InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/models/backbones/c3d.py |
import torch
import torch.nn as nn
from .. import builder
from ..registry import LOCALIZERS
from .base import BaseLocalizer
@LOCALIZERS.register_module()
class SSN(BaseLocalizer):
"""Temporal Action Detection with Structured Segment Networks.
Args:
backbone (dict): Config for building backbone.
cls_head (dict): Config for building classification head.
in_channels (int): Number of channels for input data.
Default: 3.
spatial_type (str): Type of spatial pooling.
Default: 'avg'.
dropout_ratio (float): Ratio of dropout.
Default: 0.5.
loss_cls (dict): Config for building loss.
Default: ``dict(type='SSNLoss')``.
train_cfg (dict | None): Config for training. Default: None.
test_cfg (dict | None): Config for testing. Default: None.
"""
def __init__(self,
backbone,
cls_head,
in_channels=3,
spatial_type='avg',
dropout_ratio=0.5,
loss_cls=dict(type='SSNLoss'),
train_cfg=None,
test_cfg=None):
super().__init__(backbone, cls_head, train_cfg, test_cfg)
self.is_test_prepared = False
self.in_channels = in_channels
self.spatial_type = spatial_type
if self.spatial_type == 'avg':
self.pool = nn.AvgPool2d((7, 7), stride=1, padding=0)
elif self.spatial_type == 'max':
self.pool = nn.MaxPool2d((7, 7), stride=1, padding=0)
else:
self.pool = None
self.dropout_ratio = dropout_ratio
if self.dropout_ratio != 0:
self.dropout = nn.Dropout(p=self.dropout_ratio)
else:
self.dropout = None
self.loss_cls = builder.build_loss(loss_cls)
def forward_train(self, imgs, proposal_scale_factor, proposal_type,
proposal_labels, reg_targets, **kwargs):
"""Define the computation performed at every call when training."""
imgs = imgs.reshape((-1, self.in_channels) + imgs.shape[4:])
x = self.extract_feat(imgs)
if self.pool:
x = self.pool(x)
if self.dropout is not None:
x = self.dropout(x)
activity_scores, completeness_scores, bbox_preds = self.cls_head(
(x, proposal_scale_factor))
loss = self.loss_cls(activity_scores, completeness_scores, bbox_preds,
proposal_type, proposal_labels, reg_targets,
self.train_cfg)
loss_dict = dict(**loss)
return loss_dict
def forward_test(self, imgs, relative_proposal_list, scale_factor_list,
proposal_tick_list, reg_norm_consts, **kwargs):
"""Define the computation performed at every call when testing."""
num_crops = imgs.shape[0]
imgs = imgs.reshape((num_crops, -1, self.in_channels) + imgs.shape[3:])
num_ticks = imgs.shape[1]
output = []
minibatch_size = self.test_cfg.ssn.sampler.batch_size
for idx in range(0, num_ticks, minibatch_size):
chunk = imgs[:, idx:idx +
minibatch_size, :, :, :].view((-1, ) + imgs.shape[2:])
x = self.extract_feat(chunk)
if self.pool:
x = self.pool(x)
# Merge crop to save memory.
x = x.reshape((num_crops, x.size(0) // num_crops, -1)).mean(dim=0)
output.append(x)
output = torch.cat(output, dim=0)
relative_proposal_list = relative_proposal_list.squeeze(0)
proposal_tick_list = proposal_tick_list.squeeze(0)
scale_factor_list = scale_factor_list.squeeze(0)
reg_norm_consts = reg_norm_consts.squeeze(0)
if not self.is_test_prepared:
self.is_test_prepared = self.cls_head.prepare_test_fc(
self.cls_head.consensus.num_multipliers)
(output, activity_scores, completeness_scores,
bbox_preds) = self.cls_head(
(output, proposal_tick_list, scale_factor_list), test_mode=True)
relative_proposal_list = relative_proposal_list.cpu().numpy()
activity_scores = activity_scores.cpu().numpy()
completeness_scores = completeness_scores.cpu().numpy()
if bbox_preds is not None:
bbox_preds = bbox_preds.view(-1, self.cls_head.num_classes, 2)
bbox_preds[:, :, 0] = (
bbox_preds[:, :, 0] * reg_norm_consts[1, 0] +
reg_norm_consts[0, 0])
bbox_preds[:, :, 1] = (
bbox_preds[:, :, 1] * reg_norm_consts[1, 1] +
reg_norm_consts[0, 1])
bbox_preds = bbox_preds.cpu().numpy()
result = [
dict(
relative_proposal_list=relative_proposal_list,
activity_scores=activity_scores,
completeness_scores=completeness_scores,
bbox_preds=bbox_preds)
]
return result
| InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/models/localizers/ssn.py |
import math
import numpy as np
import torch
import torch.nn as nn
from ...localization import temporal_iop, temporal_iou
from ..builder import build_loss
from ..registry import LOCALIZERS
from .base import BaseLocalizer
from .utils import post_processing
@LOCALIZERS.register_module()
class BMN(BaseLocalizer):
"""Boundary Matching Network for temporal action proposal generation.
Please refer `BMN: Boundary-Matching Network for Temporal Action Proposal
Generation <https://arxiv.org/abs/1907.09702>`_.
Code Reference https://github.com/JJBOY/BMN-Boundary-Matching-Network
Args:
temporal_dim (int): Total frames selected for each video.
boundary_ratio (float): Ratio for determining video boundaries.
num_samples (int): Number of samples for each proposal.
num_samples_per_bin (int): Number of bin samples for each sample.
feat_dim (int): Feature dimension.
soft_nms_alpha (float): Soft NMS alpha.
soft_nms_low_threshold (float): Soft NMS low threshold.
soft_nms_high_threshold (float): Soft NMS high threshold.
post_process_top_k (int): Top k proposals in post process.
feature_extraction_interval (int):
Interval used in feature extraction. Default: 16.
loss_cls (dict): Config for building loss.
Default: ``dict(type='BMNLoss')``.
hidden_dim_1d (int): Hidden dim for 1d conv. Default: 256.
hidden_dim_2d (int): Hidden dim for 2d conv. Default: 128.
hidden_dim_3d (int): Hidden dim for 3d conv. Default: 512.
"""
def __init__(self,
temporal_dim,
boundary_ratio,
num_samples,
num_samples_per_bin,
feat_dim,
soft_nms_alpha,
soft_nms_low_threshold,
soft_nms_high_threshold,
post_process_top_k,
feature_extraction_interval=16,
loss_cls=dict(type='BMNLoss'),
hidden_dim_1d=256,
hidden_dim_2d=128,
hidden_dim_3d=512):
super(BaseLocalizer, self).__init__()
self.tscale = temporal_dim
self.boundary_ratio = boundary_ratio
self.num_samples = num_samples
self.num_samples_per_bin = num_samples_per_bin
self.feat_dim = feat_dim
self.soft_nms_alpha = soft_nms_alpha
self.soft_nms_low_threshold = soft_nms_low_threshold
self.soft_nms_high_threshold = soft_nms_high_threshold
self.post_process_top_k = post_process_top_k
self.feature_extraction_interval = feature_extraction_interval
self.loss_cls = build_loss(loss_cls)
self.hidden_dim_1d = hidden_dim_1d
self.hidden_dim_2d = hidden_dim_2d
self.hidden_dim_3d = hidden_dim_3d
self._get_interp1d_mask()
# Base Module
self.x_1d_b = nn.Sequential(
nn.Conv1d(
self.feat_dim,
self.hidden_dim_1d,
kernel_size=3,
padding=1,
groups=4), nn.ReLU(inplace=True),
nn.Conv1d(
self.hidden_dim_1d,
self.hidden_dim_1d,
kernel_size=3,
padding=1,
groups=4), nn.ReLU(inplace=True))
# Temporal Evaluation Module
self.x_1d_s = nn.Sequential(
nn.Conv1d(
self.hidden_dim_1d,
self.hidden_dim_1d,
kernel_size=3,
padding=1,
groups=4), nn.ReLU(inplace=True),
nn.Conv1d(self.hidden_dim_1d, 1, kernel_size=1), nn.Sigmoid())
self.x_1d_e = nn.Sequential(
nn.Conv1d(
self.hidden_dim_1d,
self.hidden_dim_1d,
kernel_size=3,
padding=1,
groups=4), nn.ReLU(inplace=True),
nn.Conv1d(self.hidden_dim_1d, 1, kernel_size=1), nn.Sigmoid())
# Proposal Evaluation Module
self.x_1d_p = nn.Sequential(
nn.Conv1d(
self.hidden_dim_1d,
self.hidden_dim_1d,
kernel_size=3,
padding=1), nn.ReLU(inplace=True))
self.x_3d_p = nn.Sequential(
nn.Conv3d(
self.hidden_dim_1d,
self.hidden_dim_3d,
kernel_size=(self.num_samples, 1, 1)), nn.ReLU(inplace=True))
self.x_2d_p = nn.Sequential(
nn.Conv2d(self.hidden_dim_3d, self.hidden_dim_2d, kernel_size=1),
nn.ReLU(inplace=True),
nn.Conv2d(
self.hidden_dim_2d,
self.hidden_dim_2d,
kernel_size=3,
padding=1), nn.ReLU(inplace=True),
nn.Conv2d(
self.hidden_dim_2d,
self.hidden_dim_2d,
kernel_size=3,
padding=1), nn.ReLU(inplace=True),
nn.Conv2d(self.hidden_dim_2d, 2, kernel_size=1), nn.Sigmoid())
self.anchors_tmins, self.anchors_tmaxs = self._temporal_anchors(
-0.5, 1.5)
self.match_map = self._match_map()
self.bm_mask = self._get_bm_mask()
def _match_map(self):
"""Generate match map."""
temporal_gap = 1. / self.tscale
match_map = []
for idx in range(self.tscale):
match_window = []
tmin = temporal_gap * idx
for jdx in range(1, self.tscale + 1):
tmax = tmin + temporal_gap * jdx
match_window.append([tmin, tmax])
match_map.append(match_window)
match_map = np.array(match_map)
match_map = np.transpose(match_map, [1, 0, 2])
match_map = np.reshape(match_map, [-1, 2])
return match_map
def _temporal_anchors(self, tmin_offset=0., tmax_offset=1.):
"""Generate temporal anchors.
Args:
tmin_offset (int): Offset for the minimum value of temporal anchor.
Default: 0.
tmax_offset (int): Offset for the maximun value of temporal anchor.
Default: 1.
Returns:
tuple[Sequence[float]]: The minimum and maximum values of temporal
anchors.
"""
temporal_gap = 1. / self.tscale
anchors_tmins = []
anchors_tmaxs = []
for i in range(self.tscale):
anchors_tmins.append(temporal_gap * (i + tmin_offset))
anchors_tmaxs.append(temporal_gap * (i + tmax_offset))
return anchors_tmins, anchors_tmaxs
def _forward(self, x):
"""Define the computation performed at every call.
Args:
x (torch.Tensor): The input data.
Returns:
torch.Tensor: The output of the module.
"""
# x.shape [batch_size, self.feat_dim, self.tscale]
base_feature = self.x_1d_b(x)
# base_feature.shape [batch_size, self.hidden_dim_1d, self.tscale]
start = self.x_1d_s(base_feature).squeeze(1)
# start.shape [batch_size, self.tscale]
end = self.x_1d_e(base_feature).squeeze(1)
# end.shape [batch_size, self.tscale]
confidence_map = self.x_1d_p(base_feature)
# [batch_size, self.hidden_dim_1d, self.tscale]
confidence_map = self._boundary_matching_layer(confidence_map)
# [batch_size, self.hidden_dim_1d,, self.num_sampls, self.tscale, self.tscale] # noqa
confidence_map = self.x_3d_p(confidence_map).squeeze(2)
# [batch_size, self.hidden_dim_3d, self.tscale, self.tscale]
confidence_map = self.x_2d_p(confidence_map)
# [batch_size, 2, self.tscale, self.tscale]
return confidence_map, start, end
def _boundary_matching_layer(self, x):
"""Generate matching layer."""
input_size = x.size()
out = torch.matmul(x,
self.sample_mask).reshape(input_size[0],
input_size[1],
self.num_samples,
self.tscale, self.tscale)
return out
def forward_test(self, raw_feature, video_meta):
"""Define the computation performed at every call when testing."""
confidence_map, start, end = self._forward(raw_feature)
start_scores = start[0].cpu().numpy()
end_scores = end[0].cpu().numpy()
cls_confidence = (confidence_map[0][1]).cpu().numpy()
reg_confidence = (confidence_map[0][0]).cpu().numpy()
max_start = max(start_scores)
max_end = max(end_scores)
# generate the set of start points and end points
start_bins = np.zeros(len(start_scores))
start_bins[0] = 1 # [1,0,0...,0,0]
end_bins = np.zeros(len(end_scores))
end_bins[-1] = 1 # [0,0,0...,0,1]
for idx in range(1, self.tscale - 1):
if start_scores[idx] > start_scores[
idx + 1] and start_scores[idx] > start_scores[idx - 1]:
start_bins[idx] = 1
elif start_scores[idx] > (0.5 * max_start):
start_bins[idx] = 1
if end_scores[idx] > end_scores[
idx + 1] and end_scores[idx] > end_scores[idx - 1]:
end_bins[idx] = 1
elif end_scores[idx] > (0.5 * max_end):
end_bins[idx] = 1
# iterate through all combinations of start_index and end_index
new_proposals = []
for idx in range(self.tscale):
for jdx in range(self.tscale):
start_index = jdx
end_index = start_index + idx + 1
if end_index < self.tscale and start_bins[
start_index] == 1 and end_bins[end_index] == 1:
tmin = start_index / self.tscale
tmax = end_index / self.tscale
tmin_score = start_scores[start_index]
tmax_score = end_scores[end_index]
cls_score = cls_confidence[idx, jdx]
reg_score = reg_confidence[idx, jdx]
score = tmin_score * tmax_score * cls_score * reg_score
new_proposals.append([
tmin, tmax, tmin_score, tmax_score, cls_score,
reg_score, score
])
new_proposals = np.stack(new_proposals)
video_info = dict(video_meta[0])
proposal_list = post_processing(new_proposals, video_info,
self.soft_nms_alpha,
self.soft_nms_low_threshold,
self.soft_nms_high_threshold,
self.post_process_top_k,
self.feature_extraction_interval)
output = [
dict(
video_name=video_info['video_name'],
proposal_list=proposal_list)
]
return output
def forward_train(self, raw_feature, label_confidence, label_start,
label_end):
"""Define the computation performed at every call when training."""
confidence_map, start, end = self._forward(raw_feature)
loss = self.loss_cls(confidence_map, start, end, label_confidence,
label_start, label_end,
self.bm_mask.to(raw_feature.device))
loss_dict = dict(loss=loss[0])
return loss_dict
def generate_labels(self, gt_bbox):
"""Generate training labels."""
match_score_confidence_list = []
match_score_start_list = []
match_score_end_list = []
for every_gt_bbox in gt_bbox:
gt_iou_map = []
for start, end in every_gt_bbox:
start = start.numpy()
end = end.numpy()
current_gt_iou_map = temporal_iou(self.match_map[:, 0],
self.match_map[:, 1], start,
end)
current_gt_iou_map = np.reshape(current_gt_iou_map,
[self.tscale, self.tscale])
gt_iou_map.append(current_gt_iou_map)
gt_iou_map = np.array(gt_iou_map).astype(np.float32)
gt_iou_map = np.max(gt_iou_map, axis=0)
gt_tmins = every_gt_bbox[:, 0]
gt_tmaxs = every_gt_bbox[:, 1]
gt_len_pad = 3 * (1. / self.tscale)
gt_start_bboxs = np.stack(
(gt_tmins - gt_len_pad / 2, gt_tmins + gt_len_pad / 2), axis=1)
gt_end_bboxs = np.stack(
(gt_tmaxs - gt_len_pad / 2, gt_tmaxs + gt_len_pad / 2), axis=1)
match_score_start = []
match_score_end = []
for anchor_tmin, anchor_tmax in zip(self.anchors_tmins,
self.anchors_tmaxs):
match_score_start.append(
np.max(
temporal_iop(anchor_tmin, anchor_tmax,
gt_start_bboxs[:, 0], gt_start_bboxs[:,
1])))
match_score_end.append(
np.max(
temporal_iop(anchor_tmin, anchor_tmax,
gt_end_bboxs[:, 0], gt_end_bboxs[:, 1])))
match_score_confidence_list.append(gt_iou_map)
match_score_start_list.append(match_score_start)
match_score_end_list.append(match_score_end)
match_score_confidence_list = torch.Tensor(match_score_confidence_list)
match_score_start_list = torch.Tensor(match_score_start_list)
match_score_end_list = torch.Tensor(match_score_end_list)
return (match_score_confidence_list, match_score_start_list,
match_score_end_list)
def forward(self,
raw_feature,
gt_bbox=None,
video_meta=None,
return_loss=True):
"""Define the computation performed at every call."""
if return_loss:
label_confidence, label_start, label_end = (
self.generate_labels(gt_bbox))
device = raw_feature.device
label_confidence = label_confidence.to(device)
label_start = label_start.to(device)
label_end = label_end.to(device)
return self.forward_train(raw_feature, label_confidence,
label_start, label_end)
return self.forward_test(raw_feature, video_meta)
@staticmethod
def _get_interp1d_bin_mask(seg_tmin, seg_tmax, tscale, num_samples,
num_samples_per_bin):
"""Generate sample mask for a boundary-matching pair."""
plen = float(seg_tmax - seg_tmin)
plen_sample = plen / (num_samples * num_samples_per_bin - 1.0)
total_samples = [
seg_tmin + plen_sample * i
for i in range(num_samples * num_samples_per_bin)
]
p_mask = []
for idx in range(num_samples):
bin_samples = total_samples[idx * num_samples_per_bin:(idx + 1) *
num_samples_per_bin]
bin_vector = np.zeros(tscale)
for sample in bin_samples:
sample_upper = math.ceil(sample)
sample_decimal, sample_down = math.modf(sample)
if 0 <= int(sample_down) <= (tscale - 1):
bin_vector[int(sample_down)] += 1 - sample_decimal
if 0 <= int(sample_upper) <= (tscale - 1):
bin_vector[int(sample_upper)] += sample_decimal
bin_vector = 1.0 / num_samples_per_bin * bin_vector
p_mask.append(bin_vector)
p_mask = np.stack(p_mask, axis=1)
return p_mask
def _get_interp1d_mask(self):
"""Generate sample mask for each point in Boundary-Matching Map."""
mask_mat = []
for start_index in range(self.tscale):
mask_mat_vector = []
for duration_index in range(self.tscale):
if start_index + duration_index < self.tscale:
p_tmin = start_index
p_tmax = start_index + duration_index
center_len = float(p_tmax - p_tmin) + 1
sample_tmin = p_tmin - (center_len * self.boundary_ratio)
sample_tmax = p_tmax + (center_len * self.boundary_ratio)
p_mask = self._get_interp1d_bin_mask(
sample_tmin, sample_tmax, self.tscale,
self.num_samples, self.num_samples_per_bin)
else:
p_mask = np.zeros([self.tscale, self.num_samples])
mask_mat_vector.append(p_mask)
mask_mat_vector = np.stack(mask_mat_vector, axis=2)
mask_mat.append(mask_mat_vector)
mask_mat = np.stack(mask_mat, axis=3)
mask_mat = mask_mat.astype(np.float32)
self.sample_mask = nn.Parameter(
torch.tensor(mask_mat).view(self.tscale, -1), requires_grad=False)
def _get_bm_mask(self):
"""Generate Boundary-Matching Mask."""
bm_mask = []
for idx in range(self.tscale):
mask_vector = [1] * (self.tscale - idx) + [0] * idx
bm_mask.append(mask_vector)
bm_mask = torch.tensor(bm_mask, dtype=torch.float)
return bm_mask
| InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/models/localizers/bmn.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.