python_code
stringlengths 0
992k
| repo_name
stringlengths 8
46
| file_path
stringlengths 5
162
|
---|---|---|
from .base import BaseLocalizer
from .bmn import BMN
from .bsn import PEM, TEM
from .ssn import SSN
__all__ = ['PEM', 'TEM', 'BMN', 'SSN', 'BaseLocalizer']
| InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/models/localizers/__init__.py |
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from ...localization import temporal_iop
from ..builder import build_loss
from ..registry import LOCALIZERS
from .base import BaseLocalizer
from .utils import post_processing
@LOCALIZERS.register_module()
class TEM(BaseLocalizer):
"""Temporal Evaluation Model for Boundary Sensetive Network.
Please refer `BSN: Boundary Sensitive Network for Temporal Action
Proposal Generation <http://arxiv.org/abs/1806.02964>`_.
Code reference
https://github.com/wzmsltw/BSN-boundary-sensitive-network
Args:
tem_feat_dim (int): Feature dimension.
tem_hidden_dim (int): Hidden layer dimension.
tem_match_threshold (float): Temporal evaluation match threshold.
loss_cls (dict): Config for building loss.
Default: ``dict(type='BinaryLogisticRegressionLoss')``.
loss_weight (float): Weight term for action_loss. Default: 2.
output_dim (int): Output dimension. Default: 3.
conv1_ratio (float): Ratio of conv1 layer output. Default: 1.0.
conv2_ratio (float): Ratio of conv2 layer output. Default: 1.0.
conv3_ratio (float): Ratio of conv3 layer output. Default: 0.01.
"""
def __init__(self,
temporal_dim,
boundary_ratio,
tem_feat_dim,
tem_hidden_dim,
tem_match_threshold,
loss_cls=dict(type='BinaryLogisticRegressionLoss'),
loss_weight=2,
output_dim=3,
conv1_ratio=1,
conv2_ratio=1,
conv3_ratio=0.01):
super(BaseLocalizer, self).__init__()
self.temporal_dim = temporal_dim
self.boundary_ratio = boundary_ratio
self.feat_dim = tem_feat_dim
self.c_hidden = tem_hidden_dim
self.match_threshold = tem_match_threshold
self.output_dim = output_dim
self.loss_cls = build_loss(loss_cls)
self.loss_weight = loss_weight
self.conv1_ratio = conv1_ratio
self.conv2_ratio = conv2_ratio
self.conv3_ratio = conv3_ratio
self.conv1 = nn.Conv1d(
in_channels=self.feat_dim,
out_channels=self.c_hidden,
kernel_size=3,
stride=1,
padding=1,
groups=1)
self.conv2 = nn.Conv1d(
in_channels=self.c_hidden,
out_channels=self.c_hidden,
kernel_size=3,
stride=1,
padding=1,
groups=1)
self.conv3 = nn.Conv1d(
in_channels=self.c_hidden,
out_channels=self.output_dim,
kernel_size=1,
stride=1,
padding=0)
self.anchors_tmins, self.anchors_tmaxs = self._temporal_anchors()
def _temporal_anchors(self, tmin_offset=0., tmax_offset=1.):
"""Generate temporal anchors.
Args:
tmin_offset (int): Offset for the minimum value of temporal anchor.
Default: 0.
tmax_offset (int): Offset for the maximun value of temporal anchor.
Default: 1.
Returns:
tuple[Sequence[float]]: The minimum and maximum values of temporal
anchors.
"""
temporal_gap = 1. / self.temporal_dim
anchors_tmins = []
anchors_tmaxs = []
for i in range(self.temporal_dim):
anchors_tmins.append(temporal_gap * (i + tmin_offset))
anchors_tmaxs.append(temporal_gap * (i + tmax_offset))
return anchors_tmins, anchors_tmaxs
def _forward(self, x):
"""Define the computation performed at every call.
Args:
x (torch.Tensor): The input data.
Returns:
torch.Tensor: The output of the module.
"""
x = F.relu(self.conv1_ratio * self.conv1(x))
x = F.relu(self.conv2_ratio * self.conv2(x))
x = torch.sigmoid(self.conv3_ratio * self.conv3(x))
return x
def forward_train(self, raw_feature, label_action, label_start, label_end):
"""Define the computation performed at every call when training."""
tem_output = self._forward(raw_feature)
score_action = tem_output[:, 0, :]
score_start = tem_output[:, 1, :]
score_end = tem_output[:, 2, :]
loss_action = self.loss_cls(score_action, label_action,
self.match_threshold)
loss_start_small = self.loss_cls(score_start, label_start,
self.match_threshold)
loss_end_small = self.loss_cls(score_end, label_end,
self.match_threshold)
loss_dict = {
'loss_action': loss_action * self.loss_weight,
'loss_start': loss_start_small,
'loss_end': loss_end_small
}
return loss_dict
def forward_test(self, raw_feature, video_meta):
"""Define the computation performed at every call when testing."""
tem_output = self._forward(raw_feature).cpu().numpy()
batch_action = tem_output[:, 0, :]
batch_start = tem_output[:, 1, :]
batch_end = tem_output[:, 2, :]
video_meta_list = [dict(x) for x in video_meta]
video_results = []
for batch_idx, _ in enumerate(batch_action):
video_name = video_meta_list[batch_idx]['video_name']
video_action = batch_action[batch_idx]
video_start = batch_start[batch_idx]
video_end = batch_end[batch_idx]
video_result = np.stack((video_action, video_start, video_end,
self.anchors_tmins, self.anchors_tmaxs),
axis=1)
video_results.append((video_name, video_result))
return video_results
def generate_labels(self, gt_bbox):
"""Generate training labels."""
match_score_action_list = []
match_score_start_list = []
match_score_end_list = []
for every_gt_bbox in gt_bbox:
gt_tmins = every_gt_bbox[:, 0].cpu().numpy()
gt_tmaxs = every_gt_bbox[:, 1].cpu().numpy()
gt_lens = gt_tmaxs - gt_tmins
gt_len_pad = np.maximum(1. / self.temporal_dim,
self.boundary_ratio * gt_lens)
gt_start_bboxs = np.stack(
(gt_tmins - gt_len_pad / 2, gt_tmins + gt_len_pad / 2), axis=1)
gt_end_bboxs = np.stack(
(gt_tmaxs - gt_len_pad / 2, gt_tmaxs + gt_len_pad / 2), axis=1)
match_score_action = []
match_score_start = []
match_score_end = []
for anchor_tmin, anchor_tmax in zip(self.anchors_tmins,
self.anchors_tmaxs):
match_score_action.append(
np.max(
temporal_iop(anchor_tmin, anchor_tmax, gt_tmins,
gt_tmaxs)))
match_score_start.append(
np.max(
temporal_iop(anchor_tmin, anchor_tmax,
gt_start_bboxs[:, 0], gt_start_bboxs[:,
1])))
match_score_end.append(
np.max(
temporal_iop(anchor_tmin, anchor_tmax,
gt_end_bboxs[:, 0], gt_end_bboxs[:, 1])))
match_score_action_list.append(match_score_action)
match_score_start_list.append(match_score_start)
match_score_end_list.append(match_score_end)
match_score_action_list = torch.Tensor(match_score_action_list)
match_score_start_list = torch.Tensor(match_score_start_list)
match_score_end_list = torch.Tensor(match_score_end_list)
return (match_score_action_list, match_score_start_list,
match_score_end_list)
def forward(self,
raw_feature,
gt_bbox=None,
video_meta=None,
return_loss=True):
"""Define the computation performed at every call."""
if return_loss:
label_action, label_start, label_end = (
self.generate_labels(gt_bbox))
device = raw_feature.device
label_action = label_action.to(device)
label_start = label_start.to(device)
label_end = label_end.to(device)
return self.forward_train(raw_feature, label_action, label_start,
label_end)
return self.forward_test(raw_feature, video_meta)
@LOCALIZERS.register_module()
class PEM(BaseLocalizer):
"""Proposals Evaluation Model for Boundary Sensetive Network.
Please refer `BSN: Boundary Sensitive Network for Temporal Action
Proposal Generation <http://arxiv.org/abs/1806.02964>`_.
Code reference
https://github.com/wzmsltw/BSN-boundary-sensitive-network
Args:
pem_feat_dim (int): Feature dimension.
pem_hidden_dim (int): Hidden layer dimension.
pem_u_ratio_m (float): Ratio for medium score proprosals to balance
data.
pem_u_ratio_l (float): Ratio for low score proprosals to balance data.
pem_high_temporal_iou_threshold (float): High IoU threshold.
pem_low_temporal_iou_threshold (float): Low IoU threshold.
soft_nms_alpha (float): Soft NMS alpha.
soft_nms_low_threshold (float): Soft NMS low threshold.
soft_nms_high_threshold (float): Soft NMS high threshold.
post_process_top_k (int): Top k proposals in post process.
feature_extraction_interval (int):
Interval used in feature extraction. Default: 16.
fc1_ratio (float): Ratio for fc1 layer output. Default: 0.1.
fc2_ratio (float): Ratio for fc2 layer output. Default: 0.1.
output_dim (int): Output dimension. Default: 1.
"""
def __init__(self,
pem_feat_dim,
pem_hidden_dim,
pem_u_ratio_m,
pem_u_ratio_l,
pem_high_temporal_iou_threshold,
pem_low_temporal_iou_threshold,
soft_nms_alpha,
soft_nms_low_threshold,
soft_nms_high_threshold,
post_process_top_k,
feature_extraction_interval=16,
fc1_ratio=0.1,
fc2_ratio=0.1,
output_dim=1):
super(BaseLocalizer, self).__init__()
self.feat_dim = pem_feat_dim
self.hidden_dim = pem_hidden_dim
self.u_ratio_m = pem_u_ratio_m
self.u_ratio_l = pem_u_ratio_l
self.pem_high_temporal_iou_threshold = pem_high_temporal_iou_threshold
self.pem_low_temporal_iou_threshold = pem_low_temporal_iou_threshold
self.soft_nms_alpha = soft_nms_alpha
self.soft_nms_low_threshold = soft_nms_low_threshold
self.soft_nms_high_threshold = soft_nms_high_threshold
self.post_process_top_k = post_process_top_k
self.feature_extraction_interval = feature_extraction_interval
self.fc1_ratio = fc1_ratio
self.fc2_ratio = fc2_ratio
self.output_dim = output_dim
self.fc1 = nn.Linear(
in_features=self.feat_dim, out_features=self.hidden_dim, bias=True)
self.fc2 = nn.Linear(
in_features=self.hidden_dim,
out_features=self.output_dim,
bias=True)
def _forward(self, x):
"""Define the computation performed at every call.
Args:
x (torch.Tensor): The input data.
Returns:
torch.Tensor: The output of the module.
"""
x = torch.cat(list(x))
x = F.relu(self.fc1_ratio * self.fc1(x))
x = torch.sigmoid(self.fc2_ratio * self.fc2(x))
return x
def forward_train(self, bsp_feature, reference_temporal_iou):
"""Define the computation performed at every call when training."""
pem_output = self._forward(bsp_feature)
reference_temporal_iou = torch.cat(list(reference_temporal_iou))
device = pem_output.device
reference_temporal_iou = reference_temporal_iou.to(device)
anchors_temporal_iou = pem_output.view(-1)
u_hmask = (reference_temporal_iou >
self.pem_high_temporal_iou_threshold).float()
u_mmask = (
(reference_temporal_iou <= self.pem_high_temporal_iou_threshold)
& (reference_temporal_iou > self.pem_low_temporal_iou_threshold)
).float()
u_lmask = (reference_temporal_iou <=
self.pem_low_temporal_iou_threshold).float()
num_h = torch.sum(u_hmask)
num_m = torch.sum(u_mmask)
num_l = torch.sum(u_lmask)
r_m = self.u_ratio_m * num_h / (num_m)
r_m = torch.min(r_m, torch.Tensor([1.0]).to(device))[0]
u_smmask = torch.rand(u_hmask.size()[0], device=device)
u_smmask = u_smmask * u_mmask
u_smmask = (u_smmask > (1. - r_m)).float()
r_l = self.u_ratio_l * num_h / (num_l)
r_l = torch.min(r_l, torch.Tensor([1.0]).to(device))[0]
u_slmask = torch.rand(u_hmask.size()[0], device=device)
u_slmask = u_slmask * u_lmask
u_slmask = (u_slmask > (1. - r_l)).float()
temporal_iou_weights = u_hmask + u_smmask + u_slmask
temporal_iou_loss = F.smooth_l1_loss(anchors_temporal_iou,
reference_temporal_iou)
temporal_iou_loss = torch.sum(
temporal_iou_loss *
temporal_iou_weights) / torch.sum(temporal_iou_weights)
loss_dict = dict(temporal_iou_loss=temporal_iou_loss)
return loss_dict
def forward_test(self, bsp_feature, tmin, tmax, tmin_score, tmax_score,
video_meta):
"""Define the computation performed at every call when testing."""
pem_output = self._forward(bsp_feature).view(-1).cpu().numpy().reshape(
-1, 1)
tmin = tmin.view(-1).cpu().numpy().reshape(-1, 1)
tmax = tmax.view(-1).cpu().numpy().reshape(-1, 1)
tmin_score = tmin_score.view(-1).cpu().numpy().reshape(-1, 1)
tmax_score = tmax_score.view(-1).cpu().numpy().reshape(-1, 1)
score = np.array(pem_output * tmin_score * tmax_score).reshape(-1, 1)
result = np.concatenate(
(tmin, tmax, tmin_score, tmax_score, pem_output, score), axis=1)
result = result.reshape(-1, 6)
video_info = dict(video_meta[0])
proposal_list = post_processing(result, video_info,
self.soft_nms_alpha,
self.soft_nms_low_threshold,
self.soft_nms_high_threshold,
self.post_process_top_k,
self.feature_extraction_interval)
output = [
dict(
video_name=video_info['video_name'],
proposal_list=proposal_list)
]
return output
def forward(self,
bsp_feature,
reference_temporal_iou=None,
tmin=None,
tmax=None,
tmin_score=None,
tmax_score=None,
video_meta=None,
return_loss=True):
"""Define the computation performed at every call."""
if return_loss:
return self.forward_train(bsp_feature, reference_temporal_iou)
return self.forward_test(bsp_feature, tmin, tmax, tmin_score,
tmax_score, video_meta)
| InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/models/localizers/bsn.py |
from abc import ABCMeta, abstractmethod
from collections import OrderedDict
import torch
import torch.distributed as dist
import torch.nn as nn
from .. import builder
class BaseLocalizer(nn.Module, metaclass=ABCMeta):
"""Base class for localizers.
All localizers should subclass it. All subclass should overwrite:
Methods:``forward_train``, supporting to forward when training.
Methods:``forward_test``, supporting to forward when testing.
"""
def __init__(self, backbone, cls_head, train_cfg=None, test_cfg=None):
super().__init__()
self.backbone = builder.build_backbone(backbone)
self.cls_head = builder.build_head(cls_head)
self.train_cfg = train_cfg
self.test_cfg = test_cfg
self.init_weights()
def init_weights(self):
"""Weight initialization for model."""
self.backbone.init_weights()
self.cls_head.init_weights()
def extract_feat(self, imgs):
"""Extract features through a backbone.
Args:
imgs (torch.Tensor): The input images.
Returns:
torch.tensor: The extracted features.
"""
x = self.backbone(imgs)
return x
@abstractmethod
def forward_train(self, imgs, labels):
"""Defines the computation performed at training."""
@abstractmethod
def forward_test(self, imgs):
"""Defines the computation performed at testing."""
def forward(self, imgs, return_loss=True, **kwargs):
"""Define the computation performed at every call."""
if return_loss:
return self.forward_train(imgs, **kwargs)
return self.forward_test(imgs, **kwargs)
@staticmethod
def _parse_losses(losses):
"""Parse the raw outputs (losses) of the network.
Args:
losses (dict): Raw output of the network, which usually contain
losses and other necessary information.
Returns:
tuple[Tensor, dict]: (loss, log_vars), loss is the loss tensor
which may be a weighted sum of all losses, log_vars contains
all the variables to be sent to the logger.
"""
log_vars = OrderedDict()
for loss_name, loss_value in losses.items():
if isinstance(loss_value, torch.Tensor):
log_vars[loss_name] = loss_value.mean()
elif isinstance(loss_value, list):
log_vars[loss_name] = sum(_loss.mean() for _loss in loss_value)
else:
raise TypeError(
f'{loss_name} is not a tensor or list of tensors')
loss = sum(_value for _key, _value in log_vars.items()
if 'loss' in _key)
log_vars['loss'] = loss
for loss_name, loss_value in log_vars.items():
# reduce loss when distributed training
if dist.is_available() and dist.is_initialized():
loss_value = loss_value.data.clone()
dist.all_reduce(loss_value.div_(dist.get_world_size()))
log_vars[loss_name] = loss_value.item()
return loss, log_vars
def train_step(self, data_batch, optimizer, **kwargs):
"""The iteration step during training.
This method defines an iteration step during training, except for the
back propagation and optimizer updating, which are done in an optimizer
hook. Note that in some complicated cases or models, the whole process
including back propagation and optimizer updating is also defined in
this method, such as GAN.
Args:
data_batch (dict): The output of dataloader.
optimizer (:obj:`torch.optim.Optimizer` | dict): The optimizer of
runner is passed to ``train_step()``. This argument is unused
and reserved.
Returns:
dict: It should contain at least 3 keys: ``loss``, ``log_vars``,
``num_samples``.
``loss`` is a tensor for back propagation, which can be a
weighted sum of multiple losses.
``log_vars`` contains all the variables to be sent to the
logger.
``num_samples`` indicates the batch size (when the model is
DDP, it means the batch size on each GPU), which is used for
averaging the logs.
"""
losses = self.forward(**data_batch)
loss, log_vars = self._parse_losses(losses)
outputs = dict(
loss=loss,
log_vars=log_vars,
num_samples=len(next(iter(data_batch.values()))))
return outputs
def val_step(self, data_batch, optimizer, **kwargs):
"""The iteration step during validation.
This method shares the same signature as :func:`train_step`, but used
during val epochs. Note that the evaluation after training epochs is
not implemented with this method, but an evaluation hook.
"""
results = self.forward(return_loss=False, **data_batch)
outputs = dict(results=results)
return outputs
| InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/models/localizers/base.py |
from .post_processing import post_processing
__all__ = ['post_processing']
| InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/models/localizers/utils/__init__.py |
from mmaction.localization import soft_nms
def post_processing(result, video_info, soft_nms_alpha, soft_nms_low_threshold,
soft_nms_high_threshold, post_process_top_k,
feature_extraction_interval):
"""Post process for temporal proposals generation.
Args:
result (np.ndarray): Proposals generated by network.
video_info (dict): Meta data of video. Required keys are
'duration_frame', 'duration_second'.
soft_nms_alpha (float): Alpha value of Gaussian decaying function.
soft_nms_low_threshold (float): Low threshold for soft nms.
soft_nms_high_threshold (float): High threshold for soft nms.
post_process_top_k (int): Top k values to be considered.
feature_extraction_interval (int): Interval used in feature extraction.
Returns:
list[dict]: The updated proposals, e.g.
[{'score': 0.9, 'segment': [0, 1]},
{'score': 0.8, 'segment': [0, 2]},
...].
"""
if len(result) > 1:
result = soft_nms(result, soft_nms_alpha, soft_nms_low_threshold,
soft_nms_high_threshold, post_process_top_k)
result = result[result[:, -1].argsort()[::-1]]
video_duration = float(
video_info['duration_frame'] // feature_extraction_interval *
feature_extraction_interval
) / video_info['duration_frame'] * video_info['duration_second']
proposal_list = []
for j in range(min(post_process_top_k, len(result))):
proposal = {}
proposal['score'] = float(result[j, -1])
proposal['segment'] = [
max(0, result[j, 0]) * video_duration,
min(1, result[j, 1]) * video_duration
]
proposal_list.append(proposal)
return proposal_list
| InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/models/localizers/utils/post_processing.py |
from .bsn_utils import generate_bsp_feature, generate_candidate_proposals
from .proposal_utils import soft_nms, temporal_iop, temporal_iou
from .ssn_utils import (eval_ap, load_localize_proposal_file,
perform_regression, temporal_nms)
__all__ = [
'generate_candidate_proposals', 'generate_bsp_feature', 'temporal_iop',
'temporal_iou', 'soft_nms', 'load_localize_proposal_file',
'perform_regression', 'temporal_nms', 'eval_ap'
]
| InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/localization/__init__.py |
import os.path as osp
import numpy as np
from .proposal_utils import temporal_iop, temporal_iou
def generate_candidate_proposals(video_list,
video_infos,
tem_results_dir,
temporal_scale,
peak_threshold,
tem_results_ext='.csv',
result_dict=None):
"""Generate Candidate Proposals with given temporal evalutation results.
Each proposal file will contain:
'tmin,tmax,tmin_score,tmax_score,score,match_iou,match_ioa'.
Args:
video_list (list[int]): List of video indexs to generate proposals.
video_infos (list[dict]): List of video_info dict that contains
'video_name', 'duration_frame', 'duration_second',
'feature_frame', and 'annotations'.
tem_results_dir (str): Directory to load temporal evaluation
results.
temporal_scale (int): The number (scale) on temporal axis.
peak_threshold (float): The threshold for proposal generation.
tem_results_ext (str): File extension for temporal evaluation
model output. Default: '.csv'.
result_dict (dict | None): The dict to save the results. Default: None.
Returns:
dict: A dict contains video_name as keys and proposal list as value.
If result_dict is not None, save the results to it.
"""
if tem_results_ext != '.csv':
raise NotImplementedError('Only support csv format now.')
tscale = temporal_scale
tgap = 1. / tscale
proposal_dict = {}
for video_index in video_list:
video_name = video_infos[video_index]['video_name']
tem_path = osp.join(tem_results_dir, video_name + tem_results_ext)
tem_results = np.loadtxt(
tem_path, dtype=np.float32, delimiter=',', skiprows=1)
start_scores = tem_results[:, 1]
end_scores = tem_results[:, 2]
max_start = max(start_scores)
max_end = max(end_scores)
start_bins = np.zeros(len(start_scores))
start_bins[[0, -1]] = 1
end_bins = np.zeros(len(end_scores))
end_bins[[0, -1]] = 1
for idx in range(1, tscale - 1):
if start_scores[idx] > start_scores[
idx + 1] and start_scores[idx] > start_scores[idx - 1]:
start_bins[idx] = 1
elif start_scores[idx] > (peak_threshold * max_start):
start_bins[idx] = 1
if end_scores[idx] > end_scores[
idx + 1] and end_scores[idx] > end_scores[idx - 1]:
end_bins[idx] = 1
elif end_scores[idx] > (peak_threshold * max_end):
end_bins[idx] = 1
tmin_list = []
tmin_score_list = []
tmax_list = []
tmax_score_list = []
for idx in range(tscale):
if start_bins[idx] == 1:
tmin_list.append(tgap / 2 + tgap * idx)
tmin_score_list.append(start_scores[idx])
if end_bins[idx] == 1:
tmax_list.append(tgap / 2 + tgap * idx)
tmax_score_list.append(end_scores[idx])
new_props = []
for tmax, tmax_score in zip(tmax_list, tmax_score_list):
for tmin, tmin_score in zip(tmin_list, tmin_score_list):
if tmin >= tmax:
break
new_props.append([tmin, tmax, tmin_score, tmax_score])
new_props = np.stack(new_props)
score = (new_props[:, 2] * new_props[:, 3]).reshape(-1, 1)
new_props = np.concatenate((new_props, score), axis=1)
new_props = new_props[new_props[:, -1].argsort()[::-1]]
video_info = video_infos[video_index]
video_frame = video_info['duration_frame']
video_second = video_info['duration_second']
feature_frame = video_info['feature_frame']
corrected_second = float(feature_frame) / video_frame * video_second
gt_tmins = []
gt_tmaxs = []
for annotations in video_info['annotations']:
gt_tmins.append(annotations['segment'][0] / corrected_second)
gt_tmaxs.append(annotations['segment'][1] / corrected_second)
new_iou_list = []
new_ioa_list = []
for new_prop in new_props:
new_iou = max(
temporal_iou(new_prop[0], new_prop[1], gt_tmins, gt_tmaxs))
new_ioa = max(
temporal_iop(new_prop[0], new_prop[1], gt_tmins, gt_tmaxs))
new_iou_list.append(new_iou)
new_ioa_list.append(new_ioa)
new_iou_list = np.array(new_iou_list).reshape(-1, 1)
new_ioa_list = np.array(new_ioa_list).reshape(-1, 1)
new_props = np.concatenate((new_props, new_iou_list), axis=1)
new_props = np.concatenate((new_props, new_ioa_list), axis=1)
proposal_dict[video_name] = new_props
if result_dict is not None:
result_dict[video_name] = new_props
return proposal_dict
def generate_bsp_feature(video_list,
video_infos,
tem_results_dir,
pgm_proposals_dir,
top_k=1000,
bsp_boundary_ratio=0.2,
num_sample_start=8,
num_sample_end=8,
num_sample_action=16,
num_sample_interp=3,
tem_results_ext='.csv',
pgm_proposal_ext='.csv',
result_dict=None):
"""Generate Boundary-Sensitive Proposal Feature with given proposals.
Args:
video_list (list[int]): List of video indexs to generate bsp_feature.
video_infos (list[dict]): List of video_info dict that contains
'video_name'.
tem_results_dir (str): Directory to load temporal evaluation
results.
pgm_proposals_dir (str): Directory to load proposals.
top_k (int): Number of proposals to be considered. Default: 1000
bsp_boundary_ratio (float): Ratio for proposal boundary
(start/end). Default: 0.2.
num_sample_start (int): Num of samples for actionness in
start region. Default: 8.
num_sample_end (int): Num of samples for actionness in end region.
Default: 8.
num_sample_action (int): Num of samples for actionness in center
region. Default: 16.
num_sample_interp (int): Num of samples for interpolation for
each sample point. Default: 3.
tem_results_ext (str): File extension for temporal evaluation
model output. Default: '.csv'.
pgm_proposal_ext (str): File extension for proposals. Default: '.csv'.
result_dict (dict | None): The dict to save the results. Default: None.
Returns:
bsp_feature_dict (dict): A dict contains video_name as keys and
bsp_feature as value. If result_dict is not None, save the
results to it.
"""
if tem_results_ext != '.csv' or pgm_proposal_ext != '.csv':
raise NotImplementedError('Only support csv format now.')
bsp_feature_dict = {}
for video_index in video_list:
video_name = video_infos[video_index]['video_name']
# Load temporal evaluation results
tem_path = osp.join(tem_results_dir, video_name + tem_results_ext)
tem_results = np.loadtxt(
tem_path, dtype=np.float32, delimiter=',', skiprows=1)
score_action = tem_results[:, 0]
seg_tmins = tem_results[:, 3]
seg_tmaxs = tem_results[:, 4]
video_scale = len(tem_results)
video_gap = seg_tmaxs[0] - seg_tmins[0]
video_extend = int(video_scale / 4 + 10)
# Load proposals results
proposal_path = osp.join(pgm_proposals_dir,
video_name + pgm_proposal_ext)
pgm_proposals = np.loadtxt(
proposal_path, dtype=np.float32, delimiter=',', skiprows=1)
pgm_proposals = pgm_proposals[:top_k]
# Generate temporal sample points
boundary_zeros = np.zeros([video_extend])
score_action = np.concatenate(
(boundary_zeros, score_action, boundary_zeros))
begin_tp = []
middle_tp = []
end_tp = []
for i in range(video_extend):
begin_tp.append(-video_gap / 2 -
(video_extend - 1 - i) * video_gap)
end_tp.append(video_gap / 2 + seg_tmaxs[-1] + i * video_gap)
for i in range(video_scale):
middle_tp.append(video_gap / 2 + i * video_gap)
t_points = begin_tp + middle_tp + end_tp
bsp_feature = []
for pgm_proposal in pgm_proposals:
tmin = pgm_proposal[0]
tmax = pgm_proposal[1]
tlen = tmax - tmin
# Temporal range for start
tmin_0 = tmin - tlen * bsp_boundary_ratio
tmin_1 = tmin + tlen * bsp_boundary_ratio
# Temporal range for end
tmax_0 = tmax - tlen * bsp_boundary_ratio
tmax_1 = tmax + tlen * bsp_boundary_ratio
# Generate features at start boundary
tlen_start = (tmin_1 - tmin_0) / (num_sample_start - 1)
tlen_start_sample = tlen_start / num_sample_interp
t_new = [
tmin_0 - tlen_start / 2 + tlen_start_sample * i
for i in range(num_sample_start * num_sample_interp + 1)
]
y_new_start_action = np.interp(t_new, t_points, score_action)
y_new_start = [
np.mean(y_new_start_action[i * num_sample_interp:(i + 1) *
num_sample_interp + 1])
for i in range(num_sample_start)
]
# Generate features at end boundary
tlen_end = (tmax_1 - tmax_0) / (num_sample_end - 1)
tlen_end_sample = tlen_end / num_sample_interp
t_new = [
tmax_0 - tlen_end / 2 + tlen_end_sample * i
for i in range(num_sample_end * num_sample_interp + 1)
]
y_new_end_action = np.interp(t_new, t_points, score_action)
y_new_end = [
np.mean(y_new_end_action[i * num_sample_interp:(i + 1) *
num_sample_interp + 1])
for i in range(num_sample_end)
]
# Generate features for action
tlen_action = (tmax - tmin) / (num_sample_action - 1)
tlen_action_sample = tlen_action / num_sample_interp
t_new = [
tmin - tlen_action / 2 + tlen_action_sample * i
for i in range(num_sample_action * num_sample_interp + 1)
]
y_new_action = np.interp(t_new, t_points, score_action)
y_new_action = [
np.mean(y_new_action[i * num_sample_interp:(i + 1) *
num_sample_interp + 1])
for i in range(num_sample_action)
]
feature = np.concatenate([y_new_action, y_new_start, y_new_end])
bsp_feature.append(feature)
bsp_feature = np.array(bsp_feature)
bsp_feature_dict[video_name] = bsp_feature
if result_dict is not None:
result_dict[video_name] = bsp_feature
return bsp_feature_dict
| InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/localization/bsn_utils.py |
import numpy as np
def temporal_iou(proposal_min, proposal_max, gt_min, gt_max):
"""Compute IoU score between a groundtruth bbox and the proposals.
Args:
proposal_min (list[float]): List of temporal anchor min.
proposal_max (list[float]): List of temporal anchor max.
gt_min (float): Groundtruth temporal box min.
gt_max (float): Groundtruth temporal box max.
Returns:
list[float]: List of iou scores.
"""
len_anchors = proposal_max - proposal_min
int_tmin = np.maximum(proposal_min, gt_min)
int_tmax = np.minimum(proposal_max, gt_max)
inter_len = np.maximum(int_tmax - int_tmin, 0.)
union_len = len_anchors - inter_len + gt_max - gt_min
jaccard = np.divide(inter_len, union_len)
return jaccard
def temporal_iop(proposal_min, proposal_max, gt_min, gt_max):
"""Compute IoP score between a groundtruth bbox and the proposals.
Compute the IoP which is defined as the overlap ratio with
groundtruth proportional to the duration of this proposal.
Args:
proposal_min (list[float]): List of temporal anchor min.
proposal_max (list[float]): List of temporal anchor max.
gt_min (float): Groundtruth temporal box min.
gt_max (float): Groundtruth temporal box max.
Returns:
list[float]: List of intersection over anchor scores.
"""
len_anchors = np.array(proposal_max - proposal_min)
int_tmin = np.maximum(proposal_min, gt_min)
int_tmax = np.minimum(proposal_max, gt_max)
inter_len = np.maximum(int_tmax - int_tmin, 0.)
scores = np.divide(inter_len, len_anchors)
return scores
def soft_nms(proposals, alpha, low_threshold, high_threshold, top_k):
"""Soft NMS for temporal proposals.
Args:
proposals (np.ndarray): Proposals generated by network.
alpha (float): Alpha value of Gaussian decaying function.
low_threshold (float): Low threshold for soft nms.
high_threshold (float): High threshold for soft nms.
top_k (int): Top k values to be considered.
Returns:
np.ndarray: The updated proposals.
"""
proposals = proposals[proposals[:, -1].argsort()[::-1]]
tstart = list(proposals[:, 0])
tend = list(proposals[:, 1])
tscore = list(proposals[:, -1])
rstart = []
rend = []
rscore = []
while len(tscore) > 0 and len(rscore) <= top_k:
max_index = np.argmax(tscore)
max_width = tend[max_index] - tstart[max_index]
iou_list = temporal_iou(tstart[max_index], tend[max_index],
np.array(tstart), np.array(tend))
iou_exp_list = np.exp(-np.square(iou_list) / alpha)
for idx, _ in enumerate(tscore):
if idx != max_index:
current_iou = iou_list[idx]
if current_iou > low_threshold + (high_threshold -
low_threshold) * max_width:
tscore[idx] = tscore[idx] * iou_exp_list[idx]
rstart.append(tstart[max_index])
rend.append(tend[max_index])
rscore.append(tscore[max_index])
tstart.pop(max_index)
tend.pop(max_index)
tscore.pop(max_index)
rstart = np.array(rstart).reshape(-1, 1)
rend = np.array(rend).reshape(-1, 1)
rscore = np.array(rscore).reshape(-1, 1)
new_proposals = np.concatenate((rstart, rend, rscore), axis=1)
return new_proposals
| InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/localization/proposal_utils.py |
from itertools import groupby
import numpy as np
from ..core import average_precision_at_temporal_iou
from . import temporal_iou
def load_localize_proposal_file(filename):
"""Load the proposal file and split it into many parts which contain one
video's information separately.
Args:
filename(str): Path to the proposal file.
Returns:
list: List of all videos' information.
"""
lines = list(open(filename))
# Split the proposal file into many parts which contain one video's
# information separately.
groups = groupby(lines, lambda x: x.startswith('#'))
video_infos = [[x.strip() for x in list(g)] for k, g in groups if not k]
def parse_group(video_info):
"""Parse the video's information.
Template information of a video in a standard file:
# index
video_id
num_frames
fps
num_gts
label, start_frame, end_frame
label, start_frame, end_frame
...
num_proposals
label, best_iou, overlap_self, start_frame, end_frame
label, best_iou, overlap_self, start_frame, end_frame
...
Example of a standard annotation file:
.. code-block:: txt
# 0
video_validation_0000202
5666
1
3
8 130 185
8 832 1136
8 1303 1381
5
8 0.0620 0.0620 790 5671
8 0.1656 0.1656 790 2619
8 0.0833 0.0833 3945 5671
8 0.0960 0.0960 4173 5671
8 0.0614 0.0614 3327 5671
Args:
video_info (list): Information of the video.
Returns:
tuple[str, int, list, list]:
video_id (str): Name of the video.
num_frames (int): Number of frames in the video.
gt_boxes (list): List of the information of gt boxes.
proposal_boxes (list): List of the information of
proposal boxes.
"""
offset = 0
video_id = video_info[offset]
offset += 1
num_frames = int(float(video_info[1]) * float(video_info[2]))
num_gts = int(video_info[3])
offset = 4
gt_boxes = [x.split() for x in video_info[offset:offset + num_gts]]
offset += num_gts
num_proposals = int(video_info[offset])
offset += 1
proposal_boxes = [
x.split() for x in video_info[offset:offset + num_proposals]
]
return video_id, num_frames, gt_boxes, proposal_boxes
return [parse_group(video_info) for video_info in video_infos]
def perform_regression(detections):
"""Perform regression on detection results.
Args:
detections (list): Detection results before regression.
Returns:
list: Detection results after regression.
"""
starts = detections[:, 0]
ends = detections[:, 1]
centers = (starts + ends) / 2
durations = ends - starts
new_centers = centers + durations * detections[:, 3]
new_durations = durations * np.exp(detections[:, 4])
new_detections = np.concatenate(
(np.clip(new_centers - new_durations / 2, 0,
1)[:, None], np.clip(new_centers + new_durations / 2, 0,
1)[:, None], detections[:, 2:]),
axis=1)
return new_detections
def temporal_nms(detections, threshold):
"""Parse the video's information.
Args:
detections (list): Detection results before NMS.
threshold (float): Threshold of NMS.
Returns:
list: Detection results after NMS.
"""
starts = detections[:, 0]
ends = detections[:, 1]
scores = detections[:, 2]
order = scores.argsort()[::-1]
keep = []
while order.size > 0:
i = order[0]
keep.append(i)
ious = temporal_iou(starts[order[1:]], ends[order[1:]], starts[i],
ends[i])
idxs = np.where(ious <= threshold)[0]
order = order[idxs + 1]
return detections[keep, :]
def eval_ap(detections, gt_by_cls, iou_range):
"""Evaluate average precisions.
Args:
detections (dict): Results of detections.
gt_by_cls (dict): Information of groudtruth.
iou_range (list): Ranges of iou.
Returns:
list: Average precision values of classes at ious.
"""
ap_values = np.zeros((len(detections), len(iou_range)))
for iou_idx, min_overlap in enumerate(iou_range):
for class_idx, _ in enumerate(detections):
ap = average_precision_at_temporal_iou(gt_by_cls[class_idx],
detections[class_idx],
[min_overlap])
ap_values[class_idx, iou_idx] = ap
return ap_values
| InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/localization/ssn_utils.py |
import argparse
import mmcv
import numpy as np
import torch
from mmcv.runner import load_checkpoint
from mmaction.models import build_model
try:
import onnx
import onnxruntime as rt
except ImportError as e:
raise ImportError(f'Please install onnx and onnxruntime first. {e}')
try:
from mmcv.onnx.symbolic import register_extra_symbolics
except ModuleNotFoundError:
raise NotImplementedError('please update mmcv to version>=1.0.4')
def _convert_batchnorm(module):
"""Convert the syncBNs into normal BN3ds."""
module_output = module
if isinstance(module, torch.nn.SyncBatchNorm):
module_output = torch.nn.BatchNorm3d(module.num_features, module.eps,
module.momentum, module.affine,
module.track_running_stats)
if module.affine:
module_output.weight.data = module.weight.data.clone().detach()
module_output.bias.data = module.bias.data.clone().detach()
# keep requires_grad unchanged
module_output.weight.requires_grad = module.weight.requires_grad
module_output.bias.requires_grad = module.bias.requires_grad
module_output.running_mean = module.running_mean
module_output.running_var = module.running_var
module_output.num_batches_tracked = module.num_batches_tracked
for name, child in module.named_children():
module_output.add_module(name, _convert_batchnorm(child))
del module
return module_output
def pytorch2onnx(model,
input_shape,
opset_version=11,
show=False,
output_file='tmp.onnx',
verify=False):
"""Convert pytorch model to onnx model.
Args:
model (:obj:`nn.Module`): The pytorch model to be exported.
input_shape (tuple[int]): The input tensor shape of the model.
opset_version (int): Opset version of onnx used. Default: 11.
show (bool): Determines whether to print the onnx model architecture.
Default: False.
output_file (str): Output onnx model name. Default: 'tmp.onnx'.
verify (bool): Determines whether to verify the onnx model.
Default: False.
"""
model.cpu().eval()
one_img = torch.randn(input_shape)
register_extra_symbolics(opset_version)
torch.onnx.export(
model,
one_img,
output_file,
export_params=True,
keep_initializers_as_inputs=True,
verbose=show,
opset_version=opset_version)
print(f'Successfully exported ONNX model: {output_file}')
if verify:
# check by onnx
onnx_model = onnx.load(output_file)
onnx.checker.check_model(onnx_model)
# check the numerical value
# get pytorch output
pytorch_result = model(one_img)[0].detach().numpy()
# get onnx output
input_all = [node.name for node in onnx_model.graph.input]
input_initializer = [
node.name for node in onnx_model.graph.initializer
]
net_feed_input = list(set(input_all) - set(input_initializer))
assert len(net_feed_input) == 1
sess = rt.InferenceSession(output_file)
onnx_result = sess.run(None,
{net_feed_input[0]: one_img.detach().numpy()
})[0]
# only compare part of results
assert np.allclose(
pytorch_result[:, 4], onnx_result[:, 4]
), 'The outputs are different between Pytorch and ONNX'
print('The numerical values are same between Pytorch and ONNX')
def parse_args():
parser = argparse.ArgumentParser(
description='Convert MMAction2 models to ONNX')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument('--show', action='store_true', help='show onnx graph')
parser.add_argument('--output-file', type=str, default='tmp.onnx')
parser.add_argument('--opset-version', type=int, default=11)
parser.add_argument(
'--verify',
action='store_true',
help='verify the onnx model output against pytorch output')
parser.add_argument(
'--is-localizer',
action='store_true',
help='whether it is a localizer')
parser.add_argument(
'--shape',
type=int,
nargs='+',
default=[1, 3, 8, 224, 224],
help='input video size')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
assert args.opset_version == 11, 'MMAction2 only supports opset 11 now'
cfg = mmcv.Config.fromfile(args.config)
# import modules from string list.
if not args.is_localizer:
cfg.model.backbone.pretrained = None
# build the model
model = build_model(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)
model = _convert_batchnorm(model)
# onnx.export does not support kwargs
if hasattr(model, 'forward_dummy'):
model.forward = model.forward_dummy
elif hasattr(model, '_forward') and args.is_localizer:
model.forward = model._forward
else:
raise NotImplementedError(
'Please implement the forward method for exporting.')
checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu')
# conver model to onnx file
pytorch2onnx(
model,
args.shape,
opset_version=args.opset_version,
show=args.show,
output_file=args.output_file,
verify=args.verify)
| InternVideo-main | Downstream/Open-Set-Action-Recognition/tools/pytorch2onnx.py |
InternVideo-main | Downstream/Open-Set-Action-Recognition/tools/__init__.py |
|
import argparse
import os
import os.path as osp
import cv2
import numpy as np
def flow_to_img(raw_flow, bound=20.):
"""Convert flow to gray image.
Args:
raw_flow (np.ndarray[float]): Estimated flow with the shape (w, h).
bound (float): Bound for the flow-to-image normalization. Default: 20.
Returns:
np.ndarray[uint8]: The result list of np.ndarray[uint8], with shape
(w, h).
"""
flow = np.clip(raw_flow, -bound, bound)
flow += bound
flow *= (255 / float(2 * bound))
flow = flow.astype(np.uint8)
return flow
def generate_flow(frames, method='tvl1'):
"""Estimate flow with given frames.
Args:
frames (list[np.ndarray[uint8]]): List of rgb frames, with shape
(w, h, 3).
method (str): Use which method to generate flow. Options are 'tvl1'
and 'farneback'. Default: 'tvl1'.
Returns:
list[np.ndarray[float]]: The result list of np.ndarray[float], with
shape (w, h, 2).
"""
assert method in ['tvl1', 'farneback']
gray_frames = [cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) for frame in frames]
if method == 'tvl1':
tvl1 = cv2.optflow.DualTVL1OpticalFlow_create()
def op(x, y):
return tvl1.calc(x, y, None)
elif method == 'farneback':
def op(x, y):
return cv2.calcOpticalFlowFarneback(x, y, None, 0.5, 3, 15, 3, 5,
1.2, 0)
gray_st = gray_frames[:-1]
gray_ed = gray_frames[1:]
flow = [op(x, y) for x, y in zip(gray_st, gray_ed)]
return flow
def extract_dense_flow(path,
dest,
bound=20.,
save_rgb=False,
start_idx=0,
rgb_tmpl='img_{:05d}.jpg',
flow_tmpl='{}_{:05d}.jpg',
method='tvl1'):
"""Extract dense flow given video or frames, save them as gray-scale
images.
Args:
path (str): Location of the input video.
dest (str): The directory to store the extracted flow images.
bound (float): Bound for the flow-to-image normalization. Default: 20.
save_rgb (bool): Save extracted RGB frames. Default: False.
start_idx (int): The starting frame index if use frames as input, the
first image is path.format(start_idx). Default: 0.
rgb_tmpl (str): The template of RGB frame names, Default:
'img_{:05d}.jpg'.
flow_tmpl (str): The template of Flow frame names, Default:
'{}_{:05d}.jpg'.
method (str): Use which method to generate flow. Options are 'tvl1'
and 'farneback'. Default: 'tvl1'.
"""
frames = []
assert osp.exists(path)
video = cv2.VideoCapture(path)
flag, f = video.read()
while flag:
frames.append(f)
flag, f = video.read()
flow = generate_flow(frames, method=method)
flow_x = [flow_to_img(x[:, :, 0], bound) for x in flow]
flow_y = [flow_to_img(x[:, :, 1], bound) for x in flow]
if not osp.exists(dest):
os.system('mkdir -p ' + dest)
flow_x_names = [
osp.join(dest, flow_tmpl.format('x', ind + start_idx))
for ind in range(len(flow_x))
]
flow_y_names = [
osp.join(dest, flow_tmpl.format('y', ind + start_idx))
for ind in range(len(flow_y))
]
num_frames = len(flow)
for i in range(num_frames):
cv2.imwrite(flow_x_names[i], flow_x[i])
cv2.imwrite(flow_y_names[i], flow_y[i])
if save_rgb:
img_names = [
osp.join(dest, rgb_tmpl.format(ind + start_idx))
for ind in range(len(frames))
]
for frame, name in zip(frames, img_names):
cv2.imwrite(name, frame)
def parse_args():
parser = argparse.ArgumentParser(description='Extract flow and RGB images')
parser.add_argument(
'--input',
help='videos for frame extraction, can be'
'single video or a video list, the video list should be a txt file '
'and just consists of filenames without directories')
parser.add_argument(
'--prefix',
default='',
help='the prefix of input '
'videos, used when input is a video list')
parser.add_argument(
'--dest',
default='',
help='the destination to save '
'extracted frames')
parser.add_argument(
'--save-rgb', action='store_true', help='also save '
'rgb frames')
parser.add_argument(
'--rgb-tmpl',
default='img_{:05d}.jpg',
help='template filename of rgb frames')
parser.add_argument(
'--flow-tmpl',
default='{}_{:05d}.jpg',
help='template filename of flow frames')
parser.add_argument(
'--start-idx',
type=int,
default=1,
help='the start '
'index of extracted frames')
parser.add_argument(
'--method',
default='tvl1',
help='use which method to '
'generate flow')
parser.add_argument(
'--bound', type=float, default=20, help='maximum of '
'optical flow')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
if args.input.endswith('.txt'):
lines = open(args.input).readlines()
lines = [x.strip() for x in lines]
videos = [osp.join(args.prefix, x) for x in lines]
dests = [osp.join(args.dest, x.split('.')[0]) for x in lines]
for video, dest in zip(videos, dests):
extract_dense_flow(video, dest, args.bound, args.save_rgb,
args.start_idx, args.rgb_tmpl, args.flow_tmpl,
args.method)
else:
extract_dense_flow(args.input, args.dest, args.bound, args.save_rgb,
args.start_idx, args.rgb_tmpl, args.flow_tmpl,
args.method)
| InternVideo-main | Downstream/Open-Set-Action-Recognition/tools/flow_extraction.py |
import argparse
import os
import os.path as osp
import warnings
import mmcv
import torch
from mmcv import Config, DictAction
from mmcv.cnn import fuse_conv_bn
from mmcv.fileio.io import file_handlers
from mmcv.parallel import MMDataParallel, MMDistributedDataParallel
from mmcv.runner import get_dist_info, init_dist, load_checkpoint
from mmcv.runner.fp16_utils import wrap_fp16_model
from mmaction.apis import multi_gpu_test, single_gpu_test
from mmaction.datasets import build_dataloader, build_dataset
from mmaction.models import build_model
def parse_args():
parser = argparse.ArgumentParser(
description='MMAction2 test (and eval) a model')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument('--videos_per_gpu', type=int, default=2, help='the updated number of videos for each GPU')
parser.add_argument('--workers_per_gpu', type=int, default=2, help='the updated number of workers for each GPU')
parser.add_argument(
'--out',
default=None,
help='output result file in pkl/yaml/json format')
parser.add_argument(
'--fuse-conv-bn',
action='store_true',
help='Whether to fuse conv and bn, this will slightly increase'
'the inference speed')
parser.add_argument(
'--eval',
type=str,
nargs='+',
help='evaluation metrics, which depends on the dataset, e.g.,'
' "top_k_accuracy", "mean_class_accuracy" for video dataset')
parser.add_argument(
'--gpu-collect',
action='store_true',
help='whether to use gpu to collect results')
parser.add_argument(
'--tmpdir',
help='tmp directory used for collecting results from multiple '
'workers, available when gpu-collect is not specified')
parser.add_argument(
'--options',
nargs='+',
action=DictAction,
default={},
help='custom options for evaluation, the key-value pair in xxx=yyy '
'format will be kwargs for dataset.evaluate() function (deprecate), '
'change to --eval-options instead.')
parser.add_argument(
'--eval-options',
nargs='+',
action=DictAction,
default={},
help='custom options for evaluation, the key-value pair in xxx=yyy '
'format will be kwargs for dataset.evaluate() function')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
default={},
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. For example, '
"'--cfg-options model.backbone.depth=18 model.backbone.with_cp=True'")
parser.add_argument(
'--average-clips',
choices=['score', 'prob', None],
default=None,
help='average type when averaging test clips')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
if args.options and args.eval_options:
raise ValueError(
'--options and --eval-options cannot be both '
'specified, --options is deprecated in favor of --eval-options')
if args.options:
warnings.warn('--options is deprecated in favor of --eval-options')
args.eval_options = args.options
return args
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
cfg.merge_from_dict(args.cfg_options)
# Load output_config from cfg
output_config = cfg.get('output_config', {})
# Overwrite output_config from args.out
output_config = Config._merge_a_into_b(dict(out=args.out), output_config)
# Load eval_config from cfg
eval_config = cfg.get('eval_config', {})
# Overwrite eval_config from args.eval
eval_config = Config._merge_a_into_b(dict(metrics=args.eval), eval_config)
# Add options from args.eval_options
eval_config = Config._merge_a_into_b(args.eval_options, eval_config)
assert output_config or eval_config, \
('Please specify at least one operation (save or eval the '
'results) with the argument "--out" or "--eval"')
if output_config.get('out', None):
out = output_config['out']
# make sure the dirname of the output path exists
mmcv.mkdir_or_exist(osp.dirname(out))
_, suffix = osp.splitext(out)
assert suffix[1:] in file_handlers, \
'The format of the output file should be json, pickle or yaml'
# set cudnn benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
cfg.data.test.test_mode = True
if cfg.test_cfg is None:
cfg.test_cfg = dict(average_clips=args.average_clips)
else:
# You can set average_clips during testing, it will override the
# original settting
if args.average_clips is not None:
cfg.test_cfg.average_clips = args.average_clips
# init distributed env first, since logger depends on the dist info.
if args.launcher == 'none':
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
videos_per_gpu = min(args.videos_per_gpu, cfg.data.get('videos_per_gpu', 1))
workers_per_gpu = min(args.workers_per_gpu, cfg.data.get('workers_per_gpu', 1))
# build the dataloader
dataset = build_dataset(cfg.data.test, dict(test_mode=True))
dataloader_setting = dict(
videos_per_gpu=videos_per_gpu,
workers_per_gpu=workers_per_gpu,
dist=distributed,
shuffle=False,
pin_memory=False)
dataloader_setting = dict(dataloader_setting,
**cfg.data.get('test_dataloader', {}))
data_loader = build_dataloader(dataset, **dataloader_setting)
# build the model and load checkpoint
model = build_model(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)
fp16_cfg = cfg.get('fp16', None)
if fp16_cfg is not None:
wrap_fp16_model(model)
load_checkpoint(model, args.checkpoint, map_location='cpu')
if args.fuse_conv_bn:
model = fuse_conv_bn(model)
if not distributed:
model = MMDataParallel(model, device_ids=[0])
outputs = single_gpu_test(model, data_loader)
else:
model = MMDistributedDataParallel(
model.cuda(),
device_ids=[torch.cuda.current_device()],
broadcast_buffers=False)
outputs = multi_gpu_test(model, data_loader, args.tmpdir,
args.gpu_collect)
rank, _ = get_dist_info()
if rank == 0:
if output_config.get('out', None):
out = output_config['out']
print(f'\nwriting results to {out}')
dataset.dump_results(outputs, **output_config)
if eval_config:
eval_res = dataset.evaluate(outputs, **eval_config)
for name, val in eval_res.items():
print(f'{name}: {val:.04f}')
if __name__ == '__main__':
main()
| InternVideo-main | Downstream/Open-Set-Action-Recognition/tools/test.py |
import argparse
import subprocess
import torch
def parse_args():
parser = argparse.ArgumentParser(
description='Process a checkpoint to be published')
parser.add_argument('in_file', help='input checkpoint filename')
parser.add_argument('out_file', help='output checkpoint filename')
args = parser.parse_args()
return args
def process_checkpoint(in_file, out_file):
checkpoint = torch.load(in_file, map_location='cpu')
# remove optimizer for smaller file size
if 'optimizer' in checkpoint:
del checkpoint['optimizer']
# if it is necessary to remove some sensitive data in checkpoint['meta'],
# add the code here.
torch.save(checkpoint, out_file)
sha = subprocess.check_output(['sha256sum', out_file]).decode()
if out_file.endswith('.pth'):
out_file_name = out_file[:-4]
else:
out_file_name = out_file
final_file = out_file_name + f'-{sha[:8]}.pth'
subprocess.Popen(['mv', out_file, final_file])
def main():
args = parse_args()
process_checkpoint(args.in_file, args.out_file)
if __name__ == '__main__':
main()
| InternVideo-main | Downstream/Open-Set-Action-Recognition/tools/publish_model.py |
import argparse
import os
import os.path as osp
import mmcv
import numpy as np
import torch.multiprocessing as mp
from mmaction.localization import (generate_bsp_feature,
generate_candidate_proposals)
def load_video_infos(ann_file):
"""Load the video annotations.
Args:
ann_file (str): A json file path of the annotation file.
Returns:
list[dict]: A list containing annotations for videos.
"""
video_infos = []
anno_database = mmcv.load(ann_file)
for video_name in anno_database:
video_info = anno_database[video_name]
video_info['video_name'] = video_name
video_infos.append(video_info)
return video_infos
def generate_proposals(ann_file, tem_results_dir, pgm_proposals_dir,
pgm_proposals_thread, **kwargs):
"""Generate proposals using multi-process.
Args:
ann_file (str): A json file path of the annotation file for
all videos to be processed.
tem_results_dir (str): Directory to read tem results
pgm_proposals_dir (str): Directory to save generated proposals.
pgm_proposals_thread (int): Total number of threads.
kwargs (dict): Keyword arguments for "generate_candidate_proposals".
"""
video_infos = load_video_infos(ann_file)
num_videos = len(video_infos)
num_videos_per_thread = num_videos // pgm_proposals_thread
processes = []
manager = mp.Manager()
result_dict = manager.dict()
kwargs['result_dict'] = result_dict
for tid in range(pgm_proposals_thread - 1):
tmp_video_list = range(tid * num_videos_per_thread,
(tid + 1) * num_videos_per_thread)
p = mp.Process(
target=generate_candidate_proposals,
args=(
tmp_video_list,
video_infos,
tem_results_dir,
),
kwargs=kwargs)
p.start()
processes.append(p)
tmp_video_list = range((pgm_proposals_thread - 1) * num_videos_per_thread,
num_videos)
p = mp.Process(
target=generate_candidate_proposals,
args=(
tmp_video_list,
video_infos,
tem_results_dir,
),
kwargs=kwargs)
p.start()
processes.append(p)
for p in processes:
p.join()
# save results
os.makedirs(pgm_proposals_dir, exist_ok=True)
prog_bar = mmcv.ProgressBar(num_videos)
header = 'tmin,tmax,tmin_score,tmax_score,score,match_iou,match_ioa'
for video_name in result_dict:
proposals = result_dict[video_name]
proposal_path = osp.join(pgm_proposals_dir, video_name + '.csv')
np.savetxt(
proposal_path,
proposals,
header=header,
delimiter=',',
comments='')
prog_bar.update()
def generate_features(ann_file, tem_results_dir, pgm_proposals_dir,
pgm_features_dir, pgm_features_thread, **kwargs):
"""Generate proposals features using multi-process.
Args:
ann_file (str): A json file path of the annotation file for
all videos to be processed.
tem_results_dir (str): Directory to read tem results.
pgm_proposals_dir (str): Directory to read generated proposals.
pgm_features_dir (str): Directory to save generated features.
pgm_features_thread (int): Total number of threads.
kwargs (dict): Keyword arguments for "generate_bsp_feature".
"""
video_infos = load_video_infos(ann_file)
num_videos = len(video_infos)
num_videos_per_thread = num_videos // pgm_features_thread
processes = []
manager = mp.Manager()
feature_return_dict = manager.dict()
kwargs['result_dict'] = feature_return_dict
for tid in range(pgm_features_thread - 1):
tmp_video_list = range(tid * num_videos_per_thread,
(tid + 1) * num_videos_per_thread)
p = mp.Process(
target=generate_bsp_feature,
args=(
tmp_video_list,
video_infos,
tem_results_dir,
pgm_proposals_dir,
),
kwargs=kwargs)
p.start()
processes.append(p)
tmp_video_list = range((pgm_features_thread - 1) * num_videos_per_thread,
num_videos)
p = mp.Process(
target=generate_bsp_feature,
args=(
tmp_video_list,
video_infos,
tem_results_dir,
pgm_proposals_dir,
),
kwargs=kwargs)
p.start()
processes.append(p)
for p in processes:
p.join()
# save results
os.makedirs(pgm_features_dir, exist_ok=True)
prog_bar = mmcv.ProgressBar(num_videos)
for video_name in feature_return_dict.keys():
bsp_feature = feature_return_dict[video_name]
feature_path = osp.join(pgm_features_dir, video_name + '.npy')
np.save(feature_path, bsp_feature)
prog_bar.update()
def parse_args():
parser = argparse.ArgumentParser(description='Proposal generation module')
parser.add_argument('config', help='test config file path')
parser.add_argument(
'--mode',
choices=['train', 'test'],
default='test',
help='train or test')
args = parser.parse_args()
return args
def main():
print('Begin Proposal Generation Module')
args = parse_args()
cfg = mmcv.Config.fromfile(args.config)
tem_results_dir = cfg.tem_results_dir
pgm_proposals_dir = cfg.pgm_proposals_dir
pgm_features_dir = cfg.pgm_features_dir
if args.mode == 'test':
generate_proposals(cfg.ann_file_val, tem_results_dir,
pgm_proposals_dir, **cfg.pgm_proposals_cfg)
print('\nFinish proposal generation')
generate_features(cfg.ann_file_val, tem_results_dir, pgm_proposals_dir,
pgm_features_dir, **cfg.pgm_features_test_cfg)
print('\nFinish feature generation')
elif args.mode == 'train':
generate_proposals(cfg.ann_file_train, tem_results_dir,
pgm_proposals_dir, **cfg.pgm_proposals_cfg)
print('\nFinish proposal generation')
generate_features(cfg.ann_file_train, tem_results_dir,
pgm_proposals_dir, pgm_features_dir,
**cfg.pgm_features_train_cfg)
print('\nFinish feature generation')
print('Finish Proposal Generation Module')
if __name__ == '__main__':
main()
| InternVideo-main | Downstream/Open-Set-Action-Recognition/tools/bsn_proposal_generation.py |
import argparse
import copy
import os
import os.path as osp
import time
import warnings
import mmcv
import torch
from mmcv import Config, DictAction
from mmcv.runner import init_dist, set_random_seed
from mmcv.utils import get_git_hash
from mmaction import __version__
from mmaction.apis import train_model
from mmaction.datasets import build_dataset
from mmaction.models import build_model
from mmaction.utils import collect_env, get_root_logger
def parse_args():
parser = argparse.ArgumentParser(description='Train a recognizer')
parser.add_argument('config', help='train config file path')
parser.add_argument('--work-dir', help='the dir to save logs and models')
parser.add_argument(
'--resume-from', help='the checkpoint file to resume from')
parser.add_argument(
'--validate',
action='store_true',
help='whether to evaluate the checkpoint during training')
group_gpus = parser.add_mutually_exclusive_group()
group_gpus.add_argument(
'--gpus',
type=int,
help='number of gpus to use '
'(only applicable to non-distributed training)')
group_gpus.add_argument(
'--gpu-ids',
type=int,
nargs='+',
help='ids of gpus to use '
'(only applicable to non-distributed training)')
parser.add_argument('--seed', type=int, default=None, help='random seed')
parser.add_argument(
'--deterministic',
action='store_true',
help='whether to set deterministic options for CUDNN backend.')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
default={},
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. For example, '
"'--cfg-options model.backbone.depth=18 model.backbone.with_cp=True'")
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
cfg.merge_from_dict(args.cfg_options)
# set cudnn_benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
# work_dir is determined in this priority:
# CLI > config file > default (base filename)
if args.work_dir is not None:
# update configs according to CLI args if args.work_dir is not None
cfg.work_dir = args.work_dir
elif cfg.get('work_dir', None) is None:
# use config filename as default work_dir if cfg.work_dir is None
cfg.work_dir = osp.join('./work_dirs',
osp.splitext(osp.basename(args.config))[0])
if args.resume_from is not None:
cfg.resume_from = args.resume_from
if args.gpu_ids is not None:
cfg.gpu_ids = args.gpu_ids
else:
cfg.gpu_ids = range(1) if args.gpus is None else range(args.gpus)
# init distributed env first, since logger depends on the dist info.
if args.launcher == 'none':
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
# The flag is used to determine whether it is omnisource training
cfg.setdefault('omnisource', False)
# create work_dir
mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir))
# dump config
cfg.dump(osp.join(cfg.work_dir, osp.basename(args.config)))
# init logger before other steps
timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
log_file = osp.join(cfg.work_dir, f'{timestamp}.log')
logger = get_root_logger(log_file=log_file, log_level=cfg.log_level)
# init the meta dict to record some important information such as
# environment info and seed, which will be logged
meta = dict()
# log env info
env_info_dict = collect_env()
env_info = '\n'.join([f'{k}: {v}' for k, v in env_info_dict.items()])
dash_line = '-' * 60 + '\n'
logger.info('Environment info:\n' + dash_line + env_info + '\n' +
dash_line)
meta['env_info'] = env_info
# log some basic info
logger.info(f'Distributed training: {distributed}')
logger.info(f'Config: {cfg.text}')
# set random seeds
if args.seed is not None:
logger.info(f'Set random seed to {args.seed}, '
f'deterministic: {args.deterministic}')
set_random_seed(args.seed, deterministic=args.deterministic)
cfg.seed = args.seed
meta['seed'] = args.seed
meta['config_name'] = osp.basename(args.config)
meta['work_dir'] = osp.basename(cfg.work_dir.rstrip('/\\'))
model = build_model(
cfg.model, train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg)
if cfg.omnisource:
# If omnisource flag is set, cfg.data.train should be a list
assert type(cfg.data.train) is list
datasets = [build_dataset(dataset) for dataset in cfg.data.train]
else:
datasets = [build_dataset(cfg.data.train)]
if len(cfg.workflow) == 2:
# For simplicity, omnisource is not compatiable with val workflow,
# we recommend you to use `--validate`
assert not cfg.omnisource
if args.validate:
warnings.warn('val workflow is duplicated with `--validate`, '
'it is recommended to use `--validate`. see '
'https://github.com/open-mmlab/mmaction2/pull/123')
val_dataset = copy.deepcopy(cfg.data.val)
datasets.append(build_dataset(val_dataset))
if cfg.checkpoint_config is not None:
# save mmaction version, config file content and class names in
# checkpoints as meta data
cfg.checkpoint_config.meta = dict(
mmaction_version=__version__ + get_git_hash(digits=7),
config=cfg.text)
train_model(
model,
datasets,
cfg,
distributed=distributed,
validate=args.validate,
timestamp=timestamp,
meta=meta)
if __name__ == '__main__':
main()
| InternVideo-main | Downstream/Open-Set-Action-Recognition/tools/train.py |
import argparse
import copy
import os
import os.path as osp
import time
import warnings
import mmcv
import torch
from mmcv import Config, DictAction
from mmcv.runner import init_dist, set_random_seed
from mmcv.utils import get_git_hash
import numpy as np
from mmaction import __version__
from mmaction.apis import train_model
from mmaction.datasets import build_dataset
from mmaction.models import build_model
from mmaction.utils import collect_env, get_root_logger
def parse_args():
parser = argparse.ArgumentParser(description='Train a recognizer')
parser.add_argument('config', help='train config file path')
parser.add_argument('--work-dir', help='the dir to save logs and models')
parser.add_argument(
'--resume-from', help='the checkpoint file to resume from')
parser.add_argument(
'--validate',
action='store_true',
help='whether to evaluate the checkpoint during training')
group_gpus = parser.add_mutually_exclusive_group()
group_gpus.add_argument(
'--gpus',
type=int,
help='number of gpus to use '
'(only applicable to non-distributed training)')
group_gpus.add_argument(
'--gpu-ids',
type=int,
nargs='+',
help='ids of gpus to use '
'(only applicable to non-distributed training)')
parser.add_argument('--seed', type=int, default=None, help='random seed')
parser.add_argument(
'--deterministic',
action='store_true',
help='whether to set deterministic options for CUDNN backend.')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
default={},
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. For example, '
"'--cfg-options model.backbone.depth=18 model.backbone.with_cp=True'")
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
parser.add_argument('--weight_factor', nargs='+', type=float, default=[0.1, 0.2, 0.3, 0.4])
parser.add_argument('--batch_size', type=int, default=8)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
cfg.merge_from_dict(args.cfg_options)
# for hyperparameter searching
cfg.log_config.hooks = [dict(type='TextLoggerHook')]
cfg.checkpoint_config.interval = 10
cfg.evaluation.interval = 10
cfg.data.videos_per_gpu = args.batch_size
# set cudnn_benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
# work_dir is determined in this priority:
# CLI > config file > default (base filename)
if args.work_dir is not None:
# update configs according to CLI args if args.work_dir is not None
cfg.work_dir = args.work_dir
elif cfg.get('work_dir', None) is None:
# use config filename as default work_dir if cfg.work_dir is None
cfg.work_dir = osp.join('./work_dirs',
osp.splitext(osp.basename(args.config))[0])
if args.resume_from is not None:
cfg.resume_from = args.resume_from
if args.gpu_ids is not None:
cfg.gpu_ids = args.gpu_ids
else:
cfg.gpu_ids = range(1) if args.gpus is None else range(args.gpus)
# init distributed env first, since logger depends on the dist info.
if args.launcher == 'none':
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
# The flag is used to determine whether it is omnisource training
cfg.setdefault('omnisource', False)
# create work_dir
mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir))
# dump config
cfg.dump(osp.join(cfg.work_dir, osp.basename(args.config)))
# init logger before other steps
timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
log_file = osp.join(cfg.work_dir, f'{timestamp}.log')
logger = get_root_logger(log_file=log_file, log_level=cfg.log_level)
# init the meta dict to record some important information such as
# environment info and seed, which will be logged
meta = dict()
# log env info
env_info_dict = collect_env()
env_info = '\n'.join([f'{k}: {v}' for k, v in env_info_dict.items()])
dash_line = '-' * 60 + '\n'
logger.info('Environment info:\n' + dash_line + env_info + '\n' +
dash_line)
meta['env_info'] = env_info
# log some basic info
logger.info(f'Distributed training: {distributed}')
logger.info(f'Config: {cfg.text}')
# set random seeds
if args.seed is not None:
logger.info(f'Set random seed to {args.seed}, '
f'deterministic: {args.deterministic}')
set_random_seed(args.seed, deterministic=args.deterministic)
cfg.seed = args.seed
meta['seed'] = args.seed
meta['config_name'] = osp.basename(args.config)
meta['work_dir'] = osp.basename(cfg.work_dir.rstrip('/\\'))
if cfg.omnisource:
# If omnisource flag is set, cfg.data.train should be a list
assert type(cfg.data.train) is list
datasets = [build_dataset(dataset) for dataset in cfg.data.train]
else:
datasets = [build_dataset(cfg.data.train)]
if len(cfg.workflow) == 2:
# For simplicity, omnisource is not compatiable with val workflow,
# we recommend you to use `--validate`
assert not cfg.omnisource
if args.validate:
warnings.warn('val workflow is duplicated with `--validate`, '
'it is recommended to use `--validate`. see '
'https://github.com/open-mmlab/mmaction2/pull/123')
val_dataset = copy.deepcopy(cfg.data.val)
datasets.append(build_dataset(val_dataset))
if cfg.checkpoint_config is not None:
# save mmaction version, config file content and class names in
# checkpoints as meta data
cfg.checkpoint_config.meta = dict(
mmaction_version=__version__ + get_git_hash(digits=7),
config=cfg.text)
# search for hyperprameters
save_dir = cfg.work_dir
for v in args.weight_factor:
print("searching for v=%.1f"%(v))
# change the hyperparameter
cfg.model.debias_head.loss_factor = v
cfg.work_dir = osp.join(save_dir, 'search_%.1f'%(v))
# build the model
model = build_model(
cfg.model, train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg)
# training and validation
train_model(model, datasets, cfg, distributed=distributed, validate=args.validate, timestamp=timestamp, meta=meta)
if __name__ == '__main__':
main()
| InternVideo-main | Downstream/Open-Set-Action-Recognition/tools/hypertune.py |
import argparse
import mmcv
from mmcv import Config, DictAction
from mmaction.datasets import build_dataset
def parse_args():
parser = argparse.ArgumentParser(description='Evaluate metric of the '
'results saved in pkl/yaml/json format')
parser.add_argument('config', help='Config of the model')
parser.add_argument('results', help='Results in pkl/yaml/json format')
parser.add_argument(
'--eval',
type=str,
nargs='+',
help='evaluation metrics, which depends on the dataset, e.g.,'
' "top_k_accuracy", "mean_class_accuracy" for video dataset')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
default={},
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. For example, '
"'--cfg-options model.backbone.depth=18 model.backbone.with_cp=True'")
parser.add_argument(
'--eval-options',
nargs='+',
action=DictAction,
help='custom options for evaluation, the key-value pair in xxx=yyy '
'format will be kwargs for dataset.evaluate() function')
args = parser.parse_args()
return args
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
assert args.eval is not None
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
cfg.data.test.test_mode = True
dataset = build_dataset(cfg.data.test)
outputs = mmcv.load(args.results)
kwargs = {} if args.eval_options is None else args.eval_options
eval_kwargs = cfg.get('evaluation', {}).copy()
# hard-code way to remove EpochEvalHook args
for key in [
'interval', 'tmpdir', 'start', 'gpu_collect', 'save_best', 'rule',
'key_indicator'
]:
eval_kwargs.pop(key, None)
eval_kwargs.update(dict(metrics=args.eval, **kwargs))
print(dataset.evaluate(outputs, **eval_kwargs))
if __name__ == '__main__':
main()
| InternVideo-main | Downstream/Open-Set-Action-Recognition/tools/analysis/eval_metric.py |
import argparse
import time
import torch
from mmcv import Config
from mmcv.cnn import fuse_conv_bn
from mmcv.parallel import MMDataParallel
from mmcv.runner.fp16_utils import wrap_fp16_model
from mmaction.datasets import build_dataloader, build_dataset
from mmaction.models import build_model
def parse_args():
parser = argparse.ArgumentParser(
description='MMAction2 benchmark a recognizer')
parser.add_argument('config', help='test config file path')
parser.add_argument(
'--log-interval', default=10, help='interval of logging')
parser.add_argument(
'--fuse-conv-bn',
action='store_true',
help='Whether to fuse conv and bn, this will slightly increase'
'the inference speed')
args = parser.parse_args()
return args
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
# set cudnn_benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
cfg.model.backbone.pretrained = None
cfg.data.test.test_mode = True
# build the dataloader
dataset = build_dataset(cfg.data.test, dict(test_mode=True))
data_loader = build_dataloader(
dataset,
videos_per_gpu=1,
workers_per_gpu=cfg.data.workers_per_gpu,
dist=False,
shuffle=False)
# build the model and load checkpoint
model = build_model(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)
fp16_cfg = cfg.get('fp16', None)
if fp16_cfg is not None:
wrap_fp16_model(model)
if args.fuse_conv_bn:
model = fuse_conv_bn(model)
model = MMDataParallel(model, device_ids=[0])
model.eval()
# the first several iterations may be very slow so skip them
num_warmup = 5
pure_inf_time = 0
# benchmark with 2000 video and take the average
for i, data in enumerate(data_loader):
torch.cuda.synchronize()
start_time = time.perf_counter()
with torch.no_grad():
model(return_loss=False, **data)
torch.cuda.synchronize()
elapsed = time.perf_counter() - start_time
if i >= num_warmup:
pure_inf_time += elapsed
if (i + 1) % args.log_interval == 0:
fps = (i + 1 - num_warmup) / pure_inf_time
print(
f'Done video [{i + 1:<3}/ 2000], fps: {fps:.1f} video / s')
if (i + 1) == 200:
pure_inf_time += elapsed
fps = (i + 1 - num_warmup) / pure_inf_time
print(f'Overall fps: {fps:.1f} video / s')
break
if __name__ == '__main__':
main()
| InternVideo-main | Downstream/Open-Set-Action-Recognition/tools/analysis/benchmark.py |
import argparse
from mmcv import Config, DictAction
def parse_args():
parser = argparse.ArgumentParser(description='Print the whole config')
parser.add_argument('config', help='config file path')
parser.add_argument(
'--options', nargs='+', action=DictAction, help='arguments in dict')
args = parser.parse_args()
return args
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
if args.options is not None:
cfg.merge_from_dict(args.options)
print(f'Config:\n{cfg.pretty_text}')
if __name__ == '__main__':
main()
| InternVideo-main | Downstream/Open-Set-Action-Recognition/tools/analysis/print_config.py |
import argparse
from mmcv import load
from scipy.special import softmax
from mmaction.core.evaluation import (get_weighted_score, mean_class_accuracy,
top_k_accuracy)
def parse_args():
parser = argparse.ArgumentParser(description='Fusing multiple scores')
parser.add_argument(
'--scores',
nargs='+',
help='list of scores',
default=['demo/fuse/rgb.pkl', 'demo/fuse/flow.pkl'])
parser.add_argument(
'--coefficients',
nargs='+',
type=float,
help='coefficients of each score file',
default=[1.0, 1.0])
parser.add_argument(
'--datalist',
help='list of testing data',
default='demo/fuse/data_list.txt')
parser.add_argument('--apply-softmax', action='store_true')
args = parser.parse_args()
return args
def main():
args = parse_args()
assert len(args.scores) == len(args.coefficients)
score_list = args.scores
score_list = [load(f) for f in score_list]
if args.apply_softmax:
def apply_softmax(scores):
return [softmax(score) for score in scores]
score_list = [apply_softmax(scores) for scores in score_list]
weighted_scores = get_weighted_score(score_list, args.coefficients)
data = open(args.datalist).readlines()
labels = [int(x.strip().split()[-1]) for x in data]
mean_class_acc = mean_class_accuracy(weighted_scores, labels)
top_1_acc, top_5_acc = top_k_accuracy(weighted_scores, labels, (1, 5))
print(f'Mean Class Accuracy: {mean_class_acc:.04f}')
print(f'Top 1 Accuracy: {top_1_acc:.04f}')
print(f'Top 5 Accuracy: {top_5_acc:.04f}')
if __name__ == '__main__':
main()
| InternVideo-main | Downstream/Open-Set-Action-Recognition/tools/analysis/report_accuracy.py |
"""This file is for benchmark dataloading process. The command line to run this
file is:
$ python -m cProfile -o program.prof tools/analysis/bench_processing.py
configs/task/method/[config filename]
It use cProfile to record cpu running time and output to program.prof
To visualize cProfile output program.prof, use Snakeviz and run:
$ snakeviz program.prof
"""
import argparse
import os
import mmcv
from mmcv import Config
from mmaction import __version__
from mmaction.datasets import build_dataloader, build_dataset
from mmaction.utils import get_root_logger
def main():
parser = argparse.ArgumentParser(description='Benchmark dataloading')
parser.add_argument('config', help='train config file path')
args = parser.parse_args()
cfg = Config.fromfile(args.config)
# init logger before other steps
logger = get_root_logger()
logger.info(f'MMAction2 Version: {__version__}')
logger.info(f'Config: {cfg.text}')
# create bench data list
ann_file_bench = 'benchlist.txt'
if not os.path.exists(ann_file_bench):
with open(cfg.ann_file_train) as f:
lines = f.readlines()[:256]
with open(ann_file_bench, 'w') as f1:
f1.writelines(lines)
cfg.data.train.ann_file = ann_file_bench
dataset = build_dataset(cfg.data.train)
data_loader = build_dataloader(
dataset,
videos_per_gpu=cfg.data.videos_per_gpu,
workers_per_gpu=0,
num_gpus=1,
dist=False)
# Start progress bar after first 5 batches
prog_bar = mmcv.ProgressBar(
len(dataset) - 5 * cfg.data.videos_per_gpu, start=False)
for i, data in enumerate(data_loader):
if i == 5:
prog_bar.start()
for _ in data['imgs']:
if i < 5:
continue
prog_bar.update()
if __name__ == '__main__':
main()
| InternVideo-main | Downstream/Open-Set-Action-Recognition/tools/analysis/bench_processing.py |
import argparse
from mmcv import Config
from mmaction.models import build_recognizer
try:
from mmcv.cnn import get_model_complexity_info
except ImportError:
raise ImportError('Please upgrade mmcv to >0.6.2')
def parse_args():
parser = argparse.ArgumentParser(description='Train a recognizer')
parser.add_argument('config', help='train config file path')
parser.add_argument(
'--shape',
type=int,
nargs='+',
default=[340, 256],
help='input image size')
args = parser.parse_args()
return args
def main():
args = parse_args()
if len(args.shape) == 1:
input_shape = (1, 3, args.shape[0], args.shape[0])
elif len(args.shape) == 2:
input_shape = (
1,
3,
) + tuple(args.shape)
elif len(args.shape) == 4:
# n, c, h, w = args.shape
input_shape = tuple(args.shape)
elif len(args.shape) == 5:
# n, c, t, h, w = args.shape
input_shape = tuple(args.shape)
else:
raise ValueError('invalid input shape')
cfg = Config.fromfile(args.config)
model = build_recognizer(
cfg.model, train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg)
model = model.cuda()
model.eval()
if hasattr(model, 'forward_dummy'):
model.forward = model.forward_dummy
else:
raise NotImplementedError(
'FLOPs counter is currently not currently supported with {}'.
format(model.__class__.__name__))
flops, params = get_model_complexity_info(model, input_shape)
split_line = '=' * 30
print(f'{split_line}\nInput shape: {input_shape}\n'
f'Flops: {flops}\nParams: {params}\n{split_line}')
print('!!!Please be cautious if you use the results in papers. '
'You may need to check if all ops are supported and verify that the '
'flops computation is correct.')
if __name__ == '__main__':
main()
| InternVideo-main | Downstream/Open-Set-Action-Recognition/tools/analysis/get_flops.py |
import argparse
import os
import os.path as osp
import mmcv
import numpy as np
from mmaction.core import ActivityNetDetection
args = None
def cuhk17_top1():
"""Assign label for each proposal with the cuhk17 result, which is the #2
entry in http://activity-net.org/challenges/2017/evaluation.html."""
if not osp.exists('cuhk_anet17_pred.json'):
os.system('wget https://download.openmmlab.com/'
'mmaction/localization/cuhk_anet17_pred.json')
proposal = mmcv.load(args.proposal)
results = proposal['results']
cuhk_pred = mmcv.load('cuhk_anet17_pred.json')['results']
def get_topk(preds, k):
preds.sort(key=lambda x: x['score'])
return preds[-k:]
for k, v in results.items():
action_pred = cuhk_pred[k]
top1 = get_topk(action_pred, 1)
top1_label = top1[0]['label']
new_value = []
for item in v:
x = dict(label=top1_label)
x.update(item)
new_value.append(x)
results[k] = new_value
proposal['results'] = results
mmcv.dump(proposal, args.det_output)
cls_funcs = {'cuhk17_top1': cuhk17_top1}
def parse_args():
parser = argparse.ArgumentParser(description='Report detection mAP for'
'ActivityNet proposal file')
parser.add_argument('--proposal', type=str, help='proposal file')
parser.add_argument(
'--gt',
type=str,
default='data/ActivityNet/'
'anet_anno_val.json',
help='groundtruth file')
parser.add_argument(
'--cls',
type=str,
default='cuhk17_top1',
choices=['cuhk17_top1'],
help='the way to assign label for each '
'proposal')
parser.add_argument(
'--det-output',
type=str,
default='det_result.json',
help='the path to store detection results')
args = parser.parse_args()
return args
def main():
global args, cls_funcs
args = parse_args()
func = cls_funcs[args.cls]
func()
anet_detection = ActivityNetDetection(
args.gt,
args.det_output,
tiou_thresholds=np.linspace(0.5, 0.95, 10),
verbose=True)
mAP, average_mAP = anet_detection.evaluate()
print('[RESULTS] Performance on ActivityNet detection task.\n'
f'mAP: {mAP}\nAverage-mAP: {average_mAP}')
if __name__ == '__main__':
main()
| InternVideo-main | Downstream/Open-Set-Action-Recognition/tools/analysis/report_map.py |
import argparse
import json
from collections import defaultdict
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
def cal_train_time(log_dicts, args):
for i, log_dict in enumerate(log_dicts):
print(f'{"-" * 5}Analyze train time of {args.json_logs[i]}{"-" * 5}')
all_times = []
for epoch in log_dict.keys():
if args.include_outliers:
all_times.append(log_dict[epoch]['time'])
else:
all_times.append(log_dict[epoch]['time'][1:])
all_times = np.array(all_times)
epoch_ave_time = all_times.mean(-1)
slowest_epoch = epoch_ave_time.argmax()
fastest_epoch = epoch_ave_time.argmin()
std_over_epoch = epoch_ave_time.std()
print(f'slowest epoch {slowest_epoch + 1}, '
f'average time is {epoch_ave_time[slowest_epoch]:.4f}')
print(f'fastest epoch {fastest_epoch + 1}, '
f'average time is {epoch_ave_time[fastest_epoch]:.4f}')
print(f'time std over epochs is {std_over_epoch:.4f}')
print(f'average iter time: {np.mean(all_times):.4f} s/iter')
print()
def plot_curve(log_dicts, args):
if args.backend is not None:
plt.switch_backend(args.backend)
sns.set_style(args.style)
# if legend is None, use {filename}_{key} as legend
legend = args.legend
if legend is None:
legend = []
for json_log in args.json_logs:
for metric in args.keys:
legend.append(f'{json_log}_{metric}')
assert len(legend) == (len(args.json_logs) * len(args.keys))
metrics = args.keys
num_metrics = len(metrics)
for i, log_dict in enumerate(log_dicts):
epochs = list(log_dict.keys())
for j, metric in enumerate(metrics):
print(f'plot curve of {args.json_logs[i]}, metric is {metric}')
if metric not in log_dict[epochs[0]]:
raise KeyError(
f'{args.json_logs[i]} does not contain metric {metric}')
xs = []
ys = []
num_iters_per_epoch = log_dict[epochs[0]]['iter'][-1]
for epoch in epochs:
iters = log_dict[epoch]['iter']
if log_dict[epoch]['mode'][-1] == 'val':
iters = iters[:-1]
xs.append(np.array(iters) + (epoch - 1) * num_iters_per_epoch)
ys.append(np.array(log_dict[epoch][metric][:len(iters)]))
xs = np.concatenate(xs)
ys = np.concatenate(ys)
plt.xlabel('iter')
plt.plot(xs, ys, label=legend[i * num_metrics + j], linewidth=0.5)
plt.legend()
if args.title is not None:
plt.title(args.title)
if args.out is None:
plt.show()
else:
print(f'save curve to: {args.out}')
plt.savefig(args.out)
plt.cla()
def add_plot_parser(subparsers):
parser_plt = subparsers.add_parser(
'plot_curve', help='parser for plotting curves')
parser_plt.add_argument(
'json_logs',
type=str,
nargs='+',
help='path of train log in json format')
parser_plt.add_argument(
'--keys',
type=str,
nargs='+',
default=['top1_acc'],
help='the metric that you want to plot')
parser_plt.add_argument('--title', type=str, help='title of figure')
parser_plt.add_argument(
'--legend',
type=str,
nargs='+',
default=None,
help='legend of each plot')
parser_plt.add_argument(
'--backend', type=str, default=None, help='backend of plt')
parser_plt.add_argument(
'--style', type=str, default='dark', help='style of plt')
parser_plt.add_argument('--out', type=str, default=None)
def add_time_parser(subparsers):
parser_time = subparsers.add_parser(
'cal_train_time',
help='parser for computing the average time per training iteration')
parser_time.add_argument(
'json_logs',
type=str,
nargs='+',
help='path of train log in json format')
parser_time.add_argument(
'--include-outliers',
action='store_true',
help='include the first value of every epoch when computing '
'the average time')
def parse_args():
parser = argparse.ArgumentParser(description='Analyze Json Log')
# currently only support plot curve and calculate average train time
subparsers = parser.add_subparsers(dest='task', help='task parser')
add_plot_parser(subparsers)
add_time_parser(subparsers)
args = parser.parse_args()
return args
def load_json_logs(json_logs):
# load and convert json_logs to log_dict, key is epoch, value is a sub dict
# keys of sub dict is different metrics, e.g. memory, top1_acc
# value of sub dict is a list of corresponding values of all iterations
log_dicts = [dict() for _ in json_logs]
for json_log, log_dict in zip(json_logs, log_dicts):
with open(json_log, 'r') as log_file:
for line in log_file:
log = json.loads(line.strip())
# skip lines without `epoch` field
if 'epoch' not in log:
continue
epoch = log.pop('epoch')
if epoch not in log_dict:
log_dict[epoch] = defaultdict(list)
for k, v in log.items():
log_dict[epoch][k].append(v)
return log_dicts
def main():
args = parse_args()
json_logs = args.json_logs
for json_log in json_logs:
assert json_log.endswith('.json')
log_dicts = load_json_logs(json_logs)
eval(args.task)(log_dicts, args)
if __name__ == '__main__':
main()
| InternVideo-main | Downstream/Open-Set-Action-Recognition/tools/analysis/analyze_logs.py |
import argparse
import glob
import os
import os.path as osp
from multiprocessing import Pool
import mmcv
def extract_audio_wav(line):
"""Extract the audio wave from video streams using FFMPEG."""
video_id, _ = osp.splitext(osp.basename(line))
video_dir = osp.dirname(line)
video_rel_dir = osp.relpath(video_dir, args.root)
dst_dir = osp.join(args.dst_root, video_rel_dir)
os.popen(f'mkdir -p {dst_dir}')
try:
if osp.exists(f'{dst_dir}/{video_id}.wav'):
return
cmd = f'ffmpeg -i ./{line} -map 0:a -y {dst_dir}/{video_id}.wav'
os.popen(cmd)
except BaseException:
with open('extract_wav_err_file.txt', 'a+') as f:
f.write(f'{line}\n')
def parse_args():
parser = argparse.ArgumentParser(description='Extract audios')
parser.add_argument('root', type=str, help='source video directory')
parser.add_argument('dst_root', type=str, help='output audio directory')
parser.add_argument(
'--level', type=int, default=2, help='directory level of data')
parser.add_argument(
'--ext',
type=str,
default='mp4',
choices=['avi', 'mp4', 'webm'],
help='video file extensions')
parser.add_argument(
'--num-worker', type=int, default=8, help='number of workers')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
mmcv.mkdir_or_exist(args.out_dir)
print('Reading videos from folder: ', args.src_dir)
print('Extension of videos: ', args.ext)
fullpath_list = glob.glob(args.src_dir + '/*' * args.level + '.' +
args.ext)
done_fullpath_list = glob.glob(args.out_dir + '/*' * args.level + '.wav')
print('Total number of videos found: ', len(fullpath_list))
print('Total number of videos extracted finished: ',
len(done_fullpath_list))
pool = Pool(args.num_worker)
pool.map(extract_audio_wav, fullpath_list)
| InternVideo-main | Downstream/Open-Set-Action-Recognition/tools/data/extract_audio.py |
import argparse
import mmcv
def parse_args():
parser = argparse.ArgumentParser(
description='Convert txt annotation list to json')
parser.add_argument(
'annofile', type=str, help='the txt annotation file to convert')
parser.add_argument(
'--format',
type=str,
default='rawframes',
choices=['rawframes', 'videos'],
help='the format of the txt annotation file')
parser.add_argument(
'--output',
type=str,
default=None,
help=(
'the output file name, use annofile.replace(\'.txt\', \'.json\') '
'if the arg value is None'))
args = parser.parse_args()
return args
def lines2dictlist(lines, format):
"""Convert lines in 'txt' format to dictionaries in 'json' format.
Currently support single-label and multi-label.
Example of a single-label rawframes annotation txt file:
.. code-block:: txt
(frame_dir num_frames label)
some/directory-1 163 1
some/directory-2 122 1
some/directory-3 258 2
Example of a multi-label rawframes annotation txt file:
.. code-block:: txt
(frame_dir num_frames label1 label2 ...)
some/directory-1 163 1 3 5
some/directory-2 122 1 2
some/directory-3 258 2
Example of a single-label videos annotation txt file:
.. code-block:: txt
(filename label)
some/path/000.mp4 1
some/path/001.mp4 1
some/path/002.mp4 2
Example of a multi-label videos annotation txt file:
.. code-block:: txt
(filename label1 label2 ...)
some/path/000.mp4 1 3 5
some/path/001.mp4 1 4 8
some/path/002.mp4 2 4 9
Args:
lines (list): List of lines in 'txt' label format.
format (str): Data format, choices are 'rawframes' and 'videos'.
Returns:
list[dict]: For rawframes format, each dict has keys: frame_dir,
total_frames, label; for videos format, each diction has keys:
filename, label.
"""
lines = [x.split() for x in lines]
if format == 'rawframes':
data = [
dict(
frame_dir=line[0],
total_frames=int(line[1]),
label=[int(x) for x in line[2:]]) for line in lines
]
elif format == 'videos':
data = [
dict(filename=line[0], label=[int(x) for x in line[1:]])
for line in lines
]
return data
if __name__ == '__main__':
# convert txt anno list to json
args = parse_args()
lines = open(args.annofile).readlines()
lines = [x.strip() for x in lines]
result = lines2dictlist(lines, args.format)
if args.output is None:
args.output = args.annofile.replace('.txt', '.json')
mmcv.dump(result, args.output)
| InternVideo-main | Downstream/Open-Set-Action-Recognition/tools/data/anno_txt2json.py |
import argparse
import glob
import json
import os.path as osp
import random
from mmcv.runner import set_random_seed
from tools.data.anno_txt2json import lines2dictlist
from tools.data.parse_file_list import (parse_directory, parse_hmdb51_split,
parse_jester_splits,
parse_kinetics_splits,
parse_mit_splits, parse_mmit_splits,
parse_sthv1_splits, parse_sthv2_splits,
parse_ucf101_splits)
def parse_args():
parser = argparse.ArgumentParser(description='Build file list')
parser.add_argument(
'dataset',
type=str,
choices=[
'ucf101', 'kinetics10', 'kinetics400', 'kinetics600', 'kinetics700', 'thumos14',
'sthv1', 'sthv2', 'mit', 'mmit', 'activitynet', 'hmdb51', 'jester'
],
help='dataset to be built file list')
parser.add_argument(
'src_folder', type=str, help='root directory for the frames or videos')
parser.add_argument(
'--rgb-prefix', type=str, default='img_', help='prefix of rgb frames')
parser.add_argument(
'--flow-x-prefix',
type=str,
default='flow_x_',
help='prefix of flow x frames')
parser.add_argument(
'--flow-y-prefix',
type=str,
default='flow_y_',
help='prefix of flow y frames')
parser.add_argument(
'--num-split',
type=int,
default=3,
help='number of split to file list')
parser.add_argument(
'--subset',
type=str,
default='train',
choices=['train', 'val', 'test'],
help='subset to generate file list')
parser.add_argument(
'--level',
type=int,
default=2,
choices=[1, 2],
help='directory level of data')
parser.add_argument(
'--format',
type=str,
default='rawframes',
choices=['rawframes', 'videos'],
help='data format')
parser.add_argument(
'--out-root-path',
type=str,
default='data/',
help='root path for output')
parser.add_argument(
'--output-format',
type=str,
default='txt',
choices=['txt', 'json'],
help='built file list format')
parser.add_argument('--seed', type=int, default=None, help='random seed')
parser.add_argument(
'--shuffle',
action='store_true',
default=False,
help='whether to shuffle the file list')
args = parser.parse_args()
return args
def build_file_list(splits, frame_info, shuffle=False):
"""Build file list for a certain data split.
Args:
splits (tuple): Data split to generate file list.
frame_info (dict): Dict mapping from frames to path. e.g.,
'Skiing/v_Skiing_g18_c02': ('data/ucf101/rawframes/Skiing/v_Skiing_g18_c02', 0, 0). # noqa: E501
shuffle (bool): Whether to shuffle the file list.
Returns:
tuple: RGB file list for training and testing, together with
Flow file list for training and testing.
"""
def build_list(split):
"""Build RGB and Flow file list with a given split.
Args:
split (list): Split to be generate file list.
Returns:
tuple[list, list]: (rgb_list, flow_list), rgb_list is the
generated file list for rgb, flow_list is the generated
file list for flow.
"""
rgb_list, flow_list = list(), list()
for item in split:
if item[0] not in frame_info:
continue
elif frame_info[item[0]][1] > 0:
# rawframes
rgb_cnt = frame_info[item[0]][1]
flow_cnt = frame_info[item[0]][2]
if isinstance(item[1], int):
rgb_list.append(f'{item[0]} {rgb_cnt} {item[1]}\n')
flow_list.append(f'{item[0]} {flow_cnt} {item[1]}\n')
elif isinstance(item[1], list):
# only for multi-label datasets like mmit
rgb_list.append(f'{item[0]} {rgb_cnt} ' +
' '.join([str(digit)
for digit in item[1]]) + '\n')
rgb_list.append(f'{item[0]} {flow_cnt} ' +
' '.join([str(digit)
for digit in item[1]]) + '\n')
else:
raise ValueError(
'frame_info should be ' +
'[`video`(str), `label`(int)|`labels(list[int])`')
else:
# videos
if isinstance(item[1], int):
rgb_list.append(f'{frame_info[item[0]][0]} {item[1]}\n')
flow_list.append(f'{frame_info[item[0]][0]} {item[1]}\n')
elif isinstance(item[1], list):
# only for multi-label datasets like mmit
rgb_list.append(f'{frame_info[item[0]][0]} ' +
' '.join([str(digit)
for digit in item[1]]) + '\n')
flow_list.append(
f'{frame_info[item[0]][0]} ' +
' '.join([str(digit) for digit in item[1]]) + '\n')
else:
raise ValueError(
'frame_info should be ' +
'[`video`(str), `label`(int)|`labels(list[int])`')
if shuffle:
random.shuffle(rgb_list)
random.shuffle(flow_list)
return rgb_list, flow_list
train_rgb_list, train_flow_list = build_list(splits[0])
test_rgb_list, test_flow_list = build_list(splits[1])
return (train_rgb_list, test_rgb_list), (train_flow_list, test_flow_list)
def main():
args = parse_args()
if args.seed is not None:
print(f'Set random seed to {args.seed}')
set_random_seed(args.seed)
if args.format == 'rawframes':
frame_info = parse_directory(
args.src_folder,
rgb_prefix=args.rgb_prefix,
flow_x_prefix=args.flow_x_prefix,
flow_y_prefix=args.flow_y_prefix,
level=args.level)
elif args.format == 'videos':
if args.level == 1:
# search for one-level directory
video_list = glob.glob(osp.join(args.src_folder, '*'))
elif args.level == 2:
# search for two-level directory
video_list = glob.glob(osp.join(args.src_folder, '*', '*'))
else:
raise ValueError(f'level must be 1 or 2, but got {args.level}')
frame_info = {}
for video in video_list:
video_path = osp.relpath(video, args.src_folder)
# video_id: (video_relative_path, -1, -1)
frame_info[osp.splitext(video_path)[0]] = (video_path, -1, -1)
else:
raise NotImplementedError('only rawframes and videos are supported')
if args.dataset == 'ucf101':
splits = parse_ucf101_splits(args.level)
elif args.dataset == 'sthv1':
splits = parse_sthv1_splits(args.level)
elif args.dataset == 'sthv2':
splits = parse_sthv2_splits(args.level)
elif args.dataset == 'mit':
splits = parse_mit_splits()
elif args.dataset == 'mmit':
splits = parse_mmit_splits()
elif args.dataset in ['kinetics10', 'kinetics400', 'kinetics600', 'kinetics700']:
splits = parse_kinetics_splits(args.level, args.dataset)
elif args.dataset == 'hmdb51':
splits = parse_hmdb51_split(args.level)
elif args.dataset == 'jester':
splits = parse_jester_splits(args.level)
else:
raise ValueError(
f"Supported datasets are 'ucf101, sthv1, sthv2', 'jester', "
f"'mmit', 'mit', 'kinetics10', 'kinetics400', 'kinetics600', 'kinetics700', but "
f'got {args.dataset}')
assert len(splits) == args.num_split
out_path = args.out_root_path + args.dataset
if len(splits) > 1:
for i, split in enumerate(splits):
file_lists = build_file_list(
split, frame_info, shuffle=args.shuffle)
train_name = f'{args.dataset}_train_split_{i+1}_{args.format}.txt'
val_name = f'{args.dataset}_val_split_{i+1}_{args.format}.txt'
if args.output_format == 'txt':
with open(osp.join(out_path, train_name), 'w') as f:
f.writelines(file_lists[0][0])
with open(osp.join(out_path, val_name), 'w') as f:
f.writelines(file_lists[0][1])
elif args.output_format == 'json':
train_list = lines2dictlist(file_lists[0][0], args.format)
val_list = lines2dictlist(file_lists[0][1], args.format)
train_name = train_name.replace('.txt', '.json')
val_name = val_name.replace('.txt', '.json')
with open(osp.join(out_path, train_name), 'w') as f:
json.dump(train_list, f)
with open(osp.join(out_path, val_name), 'w') as f:
json.dump(val_list, f)
else:
lists = build_file_list(splits[0], frame_info, shuffle=args.shuffle)
if args.subset == 'train':
ind = 0
elif args.subset == 'val':
ind = 1
elif args.subset == 'test':
ind = 2
else:
raise ValueError(f"subset must be in ['train', 'val', 'test'], "
f'but got {args.subset}.')
filename = f'{args.dataset}_{args.subset}_list_{args.format}.txt'
if args.output_format == 'txt':
with open(osp.join(out_path, filename), 'w') as f:
f.writelines(lists[0][ind])
elif args.output_format == 'json':
data_list = lines2dictlist(lists[0][ind], args.format)
filename = filename.replace('.txt', '.json')
with open(osp.join(out_path, filename), 'w') as f:
json.dump(data_list, f)
if __name__ == '__main__':
main()
| InternVideo-main | Downstream/Open-Set-Action-Recognition/tools/data/build_file_list.py |
import argparse
import glob
import os
import os.path as osp
import sys
import warnings
from multiprocessing import Pool
import mmcv
import numpy as np
def extract_frame(vid_item):
"""Generate optical flow using dense flow.
Args:
vid_item (list): Video item containing video full path,
video (short) path, video id.
Returns:
bool: Whether generate optical flow successfully.
"""
full_path, vid_path, vid_id, method, task = vid_item
if '/' in vid_path:
act_name = osp.basename(osp.dirname(vid_path))
out_full_path = osp.join(args.out_dir, act_name)
else:
out_full_path = args.out_dir
if task == 'rgb':
if args.use_opencv:
# Not like using denseflow,
# Use OpenCV will not make a sub directory with the video name
video_name = osp.splitext(osp.basename(vid_path))[0]
out_full_path = osp.join(out_full_path, video_name)
vr = mmcv.VideoReader(full_path)
for i in range(len(vr)):
if vr[i] is not None:
w, h, c = np.shape(vr[i])
if args.new_short == 0:
if args.new_width == 0 or args.new_height == 0:
# Keep original shape
out_img = vr[i]
else:
out_img = mmcv.imresize(vr[i], (args.new_width,
args.new_height))
else:
if min(h, w) == h:
new_h = args.new_short
new_w = int((new_h / h) * w)
else:
new_w = args.new_short
new_h = int((new_w / w) * h)
out_img = mmcv.imresize(vr[i], (new_h, new_w))
mmcv.imwrite(out_img,
f'{out_full_path}/img_{i + 1:05d}.jpg')
else:
warnings.warn(
'Length inconsistent!'
f'Early stop with {i + 1} out of {len(vr)} frames.')
break
else:
if args.new_short == 0:
cmd = osp.join(
f"denseflow '{full_path}' -b=20 -s=0 -o='{out_full_path}'"
f' -nw={args.new_width} -nh={args.new_height} -v')
else:
cmd = osp.join(
f"denseflow '{full_path}' -b=20 -s=0 -o='{out_full_path}'"
f' -ns={args.new_short} -v')
os.system(cmd)
elif task == 'flow':
if args.input_frames:
if args.new_short == 0:
cmd = osp.join(
f"denseflow '{full_path}' -a={method} -b=20 -s=1 -o='{out_full_path}'" # noqa: E501
f' -nw={args.new_width} --nh={args.new_height} -v --if')
else:
cmd = osp.join(
f"denseflow '{full_path}' -a={method} -b=20 -s=1 -o='{out_full_path}'" # noqa: E501
f' -ns={args.new_short} -v --if')
else:
if args.new_short == 0:
cmd = osp.join(
f"denseflow '{full_path}' -a={method} -b=20 -s=1 -o='{out_full_path}'" # noqa: E501
f' -nw={args.new_width} --nh={args.new_height} -v')
else:
cmd = osp.join(
f"denseflow '{full_path}' -a={method} -b=20 -s=1 -o='{out_full_path}'" # noqa: E501
f' -ns={args.new_short} -v')
os.system(cmd)
else:
if args.new_short == 0:
cmd_rgb = osp.join(
f"denseflow '{full_path}' -b=20 -s=0 -o='{out_full_path}'"
f' -nw={args.new_width} -nh={args.new_height} -v')
cmd_flow = osp.join(
f"denseflow '{full_path}' -a={method} -b=20 -s=1 -o='{out_full_path}'" # noqa: E501
f' -nw={args.new_width} -nh={args.new_height} -v')
else:
cmd_rgb = osp.join(
f"denseflow '{full_path}' -b=20 -s=0 -o='{out_full_path}'"
f' -ns={args.new_short} -v')
cmd_flow = osp.join(
f"denseflow '{full_path}' -a={method} -b=20 -s=1 -o='{out_full_path}'" # noqa: E501
f' -ns={args.new_short} -v')
os.system(cmd_rgb)
os.system(cmd_flow)
print(f'{task} {vid_id} {vid_path} {method} done')
sys.stdout.flush()
return True
def parse_args():
parser = argparse.ArgumentParser(description='extract optical flows')
parser.add_argument('src_dir', type=str, help='source video directory')
parser.add_argument('out_dir', type=str, help='output rawframe directory')
parser.add_argument(
'--task',
type=str,
default='flow',
choices=['rgb', 'flow', 'both'],
help='which type of frames to be extracted')
parser.add_argument(
'--level',
type=int,
choices=[1, 2],
default=2,
help='directory level of data')
parser.add_argument(
'--num-worker',
type=int,
default=8,
help='number of workers to build rawframes')
parser.add_argument(
'--flow-type',
type=str,
default=None,
choices=[None, 'tvl1', 'warp_tvl1', 'farn', 'brox'],
help='flow type to be generated')
parser.add_argument(
'--out-format',
type=str,
default='jpg',
choices=['jpg', 'h5', 'png'],
help='output format')
parser.add_argument(
'--ext',
type=str,
default='avi',
choices=['avi', 'mp4', 'webm'],
help='video file extensions')
parser.add_argument(
'--mixed-ext',
action='store_true',
help='process video files with mixed extensions')
parser.add_argument(
'--new-width', type=int, default=0, help='resize image width')
parser.add_argument(
'--new-height', type=int, default=0, help='resize image height')
parser.add_argument(
'--new-short',
type=int,
default=0,
help='resize image short side length keeping ratio')
parser.add_argument('--num-gpu', type=int, default=8, help='number of GPU')
parser.add_argument(
'--resume',
action='store_true',
default=False,
help='resume optical flow extraction instead of overwriting')
parser.add_argument(
'--use-opencv',
action='store_true',
help='Whether to use opencv to extract rgb frames')
parser.add_argument(
'--input-frames',
action='store_true',
help='Whether to extract flow frames based on rgb frames')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
if not osp.isdir(args.out_dir):
print(f'Creating folder: {args.out_dir}')
os.makedirs(args.out_dir)
if args.level == 2:
classes = os.listdir(args.src_dir)
for classname in classes:
new_dir = osp.join(args.out_dir, classname)
if not osp.isdir(new_dir):
print(f'Creating folder: {new_dir}')
os.makedirs(new_dir)
if args.input_frames:
print('Reading rgb frames from folder: ', args.src_dir)
fullpath_list = glob.glob(args.src_dir + '/*' * args.level)
done_fullpath_list = glob.glob(args.out_dir + '/*' * args.level)
print('Total number of rgb frame folders found: ', len(fullpath_list))
else:
print('Reading videos from folder: ', args.src_dir)
if args.mixed_ext:
print('Extension of videos is mixed')
fullpath_list = glob.glob(args.src_dir + '/*' * args.level)
done_fullpath_list = glob.glob(args.out_dir + '/*' * args.level)
else:
print('Extension of videos: ', args.ext)
fullpath_list = glob.glob(args.src_dir + '/*' * args.level + '.' +
args.ext)
done_fullpath_list = glob.glob(args.out_dir + '/*' * args.level)
print('Total number of videos found: ', len(fullpath_list))
if args.resume:
fullpath_list = set(fullpath_list).difference(set(done_fullpath_list))
fullpath_list = list(fullpath_list)
print('Resuming. number of videos to be done: ', len(fullpath_list))
if args.level == 2:
vid_list = list(
map(
lambda p: osp.join(
osp.basename(osp.dirname(p)), osp.basename(p)),
fullpath_list))
elif args.level == 1:
vid_list = list(map(lambda p: osp.basename(p), fullpath_list))
pool = Pool(args.num_worker)
pool.map(
extract_frame,
zip(fullpath_list, vid_list, range(len(vid_list)),
len(vid_list) * [args.flow_type],
len(vid_list) * [args.task]))
| InternVideo-main | Downstream/Open-Set-Action-Recognition/tools/data/build_rawframes.py |
import os, argparse
import cv2
from tqdm import tqdm
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Check the data')
parser.add_argument('video_path', type=str, help='The video path')
parser.add_argument('dataset_list', type=str, help='The list file of dataset.')
parser.add_argument('split', type=str, choices=['train', 'test', 'val'], help='The split of the data.')
args = parser.parse_args()
# parse the filelist into list
filelist, labels = [], []
assert os.path.exists(args.dataset_list), 'File list does not exist! %s'%(args.dataset_list)
with open(args.dataset_list, 'r') as f:
for line in f.readlines():
filename, label = line.strip().split(' ')
filelist.append(filename)
labels.append(label)
# checking
valid_files, invalid_files, valid_labels = [], [], []
for filename, label in tqdm(zip(filelist, labels), total=len(filelist), desc=args.split):
videofile = os.path.join(args.video_path, filename)
if not os.path.exists(videofile):
# file not exist
invalid_files.append(filename)
else:
# file cannot be read
cap = cv2.VideoCapture(videofile)
ret, frame = cap.read()
if not ret:
invalid_files.append(filename)
else:
valid_files.append(filename)
valid_labels.append(label)
cap.release()
# print
print('Valid file number: %d, Invalid number: %d'%(len(valid_files), len(invalid_files)))
if len(invalid_files) > 0:
tmp_listfile = os.path.join(os.path.dirname(args.dataset_list), args.dataset_list.split('/')[-1][:-4] + '_temp.txt')
with open(tmp_listfile, 'w') as f:
for filename, label in zip(valid_files, valid_labels):
f.writelines('%s %s\n'%(filename, label))
print('\nFollowing files are invalid: ')
for filename in invalid_files:
print(invalid_files)
| InternVideo-main | Downstream/Open-Set-Action-Recognition/tools/data/data_check.py |
import argparse
import glob
import os
import os.path as osp
import sys
from multiprocessing import Pool
def encode_video(frame_dir_item):
"""Encode frames to video using ffmpeg.
Args:
frame_dir_item (list): Rawframe item containing raw frame directory
full path, rawframe directory (short) path, rawframe directory id.
Returns:
bool: Whether synthesize video successfully.
"""
full_path, frame_dir_path, frame_dir_id = frame_dir_item
out_full_path = args.out_dir
img_name_tmpl = args.filename_tmpl + '.' + args.in_format
img_path = osp.join(full_path, img_name_tmpl)
out_vid_name = frame_dir_path + '.' + args.ext
out_vid_path = osp.join(out_full_path, out_vid_name)
cmd = osp.join(
f"ffmpeg -start_number {args.start_idx} -r {args.fps} -i '{img_path}' "
f"-vcodec {args.vcodec} '{out_vid_path}'")
os.system(cmd)
print(f'{frame_dir_id} {frame_dir_path} done')
sys.stdout.flush()
return True
def parse_args():
parser = argparse.ArgumentParser(description='synthesize videos')
parser.add_argument('src_dir', type=str, help='source rawframe directory')
parser.add_argument('out_dir', type=str, help='output video directory')
parser.add_argument(
'--fps', type=int, default=30, help='fps of videos to be synthesized')
parser.add_argument(
'--level',
type=int,
choices=[1, 2],
default=2,
help='directory level of data')
parser.add_argument(
'--num-worker',
type=int,
default=8,
help='number of workers to build videos')
parser.add_argument(
'--in-format',
type=str,
default='jpg',
choices=['jpg', 'png'],
help='input format')
parser.add_argument(
'--start-idx', type=int, default=0, help='starting index of rawframes')
parser.add_argument(
'--filename-tmpl',
type=str,
default='img_%05d',
help='filename template of rawframes')
parser.add_argument(
'--vcodec', type=str, default='mpeg4', help='coding method of videos')
parser.add_argument(
'--ext',
type=str,
default='mp4',
choices=['mp4', 'avi'],
help='video file extensions')
parser.add_argument('--num-gpu', type=int, default=8, help='number of GPU')
parser.add_argument(
'--resume',
action='store_true',
default=False,
help='resume optical flow extraction instead of overwriting')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
if not osp.isdir(args.out_dir):
print(f'Creating folder: {args.out_dir}')
os.makedirs(args.out_dir)
if args.level == 2:
classes = os.listdir(args.src_dir)
for classname in classes:
new_dir = osp.join(args.out_dir, classname)
if not osp.isdir(new_dir):
print(f'Creating folder: {new_dir}')
os.makedirs(new_dir)
print('Reading rgb frames from folder: ', args.src_dir)
print('Input format of rgb frames: ', args.in_format)
fullpath_list = glob.glob(args.src_dir + '/*' * args.level)
done_fullpath_list = glob.glob(args.src_dir + '/*' * args.level + '.' +
args.ext)
print('Total number of rgb frame folders found: ', len(fullpath_list))
if args.resume:
fullpath_list = set(fullpath_list).difference(set(done_fullpath_list))
fullpath_list = list(fullpath_list)
print('Resuming. number of videos to be synthesized: ',
len(fullpath_list))
if args.level == 2:
frame_dir_list = list(
map(
lambda p: osp.join(
osp.basename(osp.dirname(p)), osp.basename(p)),
fullpath_list))
elif args.level == 1:
frame_dir_list = list(map(lambda p: osp.basename(p), fullpath_list))
pool = Pool(args.num_worker)
pool.map(encode_video,
zip(fullpath_list, frame_dir_list, range(len(frame_dir_list))))
| InternVideo-main | Downstream/Open-Set-Action-Recognition/tools/data/build_videos.py |
import csv
import fnmatch
import glob
import json
import os
import os.path as osp
def parse_directory(path,
rgb_prefix='img_',
flow_x_prefix='flow_x_',
flow_y_prefix='flow_y_',
level=1):
"""Parse directories holding extracted frames from standard benchmarks.
Args:
path (str): Directory path to parse frames.
rgb_prefix (str): Prefix of generated rgb frames name.
default: 'img_'.
flow_x_prefix (str): Prefix of generated flow x name.
default: `flow_x_`.
flow_y_prefix (str): Prefix of generated flow y name.
default: `flow_y_`.
level (int): Directory level for glob searching. Options are 1 and 2.
default: 1.
Returns:
dict: frame info dict with video id as key and tuple(path(str),
rgb_num(int), flow_x_num(int)) as value.
"""
print(f'parse frames under directory {path}')
if level == 1:
# Only search for one-level directory
def locate_directory(x):
return osp.basename(x)
frame_dirs = glob.glob(osp.join(path, '*'))
elif level == 2:
# search for two-level directory
def locate_directory(x):
return osp.join(osp.basename(osp.dirname(x)), osp.basename(x))
frame_dirs = glob.glob(osp.join(path, '*', '*'))
else:
raise ValueError('level can be only 1 or 2')
def count_files(directory, prefix_list):
"""Count file number with a given directory and prefix.
Args:
directory (str): Data directory to be search.
prefix_list (list): List or prefix.
Returns:
list (int): Number list of the file with the prefix.
"""
lst = os.listdir(directory)
cnt_list = [len(fnmatch.filter(lst, x + '*')) for x in prefix_list]
return cnt_list
# check RGB
frame_dict = {}
for i, frame_dir in enumerate(frame_dirs):
total_num = count_files(frame_dir,
(rgb_prefix, flow_x_prefix, flow_y_prefix))
dir_name = locate_directory(frame_dir)
num_x = total_num[1]
num_y = total_num[2]
if num_x != num_y:
raise ValueError(f'x and y direction have different number '
f'of flow images in video directory: {frame_dir}')
if i % 200 == 0:
print(f'{i} videos parsed')
frame_dict[dir_name] = (frame_dir, total_num[0], num_x)
print('frame directory analysis done')
return frame_dict
def parse_ucf101_splits(level):
"""Parse UCF-101 dataset into "train", "val", "test" splits.
Args:
level (int): Directory level of data. 1 for the single-level directory,
2 for the two-level directory.
Returns:
list: "train", "val", "test" splits of UCF-101.
"""
class_index_file = 'data/ucf101/annotations/classInd.txt'
train_file_template = 'data/ucf101/annotations/trainlist{:02d}.txt'
test_file_template = 'data/ucf101/annotations/testlist{:02d}.txt'
with open(class_index_file, 'r') as fin:
class_index = [x.strip().split() for x in fin]
class_mapping = {x[1]: int(x[0]) - 1 for x in class_index}
def line_to_map(line):
"""A function to map line string to video and label.
Args:
line (str): A long directory path, which is a text path.
Returns:
tuple[str, str]: (video, label), video is the video id,
label is the video label.
"""
items = line.strip().split()
video = osp.splitext(items[0])[0]
if level == 1:
video = osp.basename(video)
label = items[0]
elif level == 2:
video = osp.join(
osp.basename(osp.dirname(video)), osp.basename(video))
label = class_mapping[osp.dirname(items[0])]
return video, label
splits = []
for i in range(1, 4):
with open(train_file_template.format(i), 'r') as fin:
train_list = [line_to_map(x) for x in fin]
with open(test_file_template.format(i), 'r') as fin:
test_list = [line_to_map(x) for x in fin]
splits.append((train_list, test_list))
return splits
def parse_jester_splits(level):
"""Parse Jester into "train", "val" splits.
Args:
level (int): Directory level of data. 1 for the single-level directory,
2 for the two-level directory.
Returns:
list: "train", "val", "test" splits of Jester dataset.
"""
# Read the annotations
class_index_file = 'data/jester/annotations/jester-v1-labels.csv'
train_file = 'data/jester/annotations/jester-v1-train.csv'
val_file = 'data/jester/annotations/jester-v1-validation.csv'
test_file = 'data/jester/annotations/jester-v1-test.csv'
with open(class_index_file, 'r') as fin:
class_index = [x.strip() for x in fin]
class_mapping = {class_index[idx]: idx for idx in range(len(class_index))}
def line_to_map(line, test_mode=False):
items = line.strip().split(';')
video = items[0]
if level == 1:
video = osp.basename(video)
elif level == 2:
video = osp.join(
osp.basename(osp.dirname(video)), osp.basename(video))
if test_mode:
return video
else:
label = class_mapping[items[1]]
return video, label
with open(train_file, 'r') as fin:
train_list = [line_to_map(x) for x in fin]
with open(val_file, 'r') as fin:
val_list = [line_to_map(x) for x in fin]
with open(test_file, 'r') as fin:
test_list = [line_to_map(x, test_mode=True) for x in fin]
splits = ((train_list, val_list, test_list), )
return splits
def parse_sthv1_splits(level):
"""Parse Something-Something dataset V1 into "train", "val" splits.
Args:
level (int): Directory level of data. 1 for the single-level directory,
2 for the two-level directory.
Returns:
list: "train", "val", "test" splits of Something-Something V1 dataset.
"""
# Read the annotations
# yapf: disable
class_index_file = 'data/sthv1/annotations/something-something-v1-labels.csv' # noqa
# yapf: enable
train_file = 'data/sthv1/annotations/something-something-v1-train.csv'
val_file = 'data/sthv1/annotations/something-something-v1-validation.csv'
test_file = 'data/sthv1/annotations/something-something-v1-test.csv'
with open(class_index_file, 'r') as fin:
class_index = [x.strip() for x in fin]
class_mapping = {class_index[idx]: idx for idx in range(len(class_index))}
def line_to_map(line, test_mode=False):
items = line.strip().split(';')
video = items[0]
if level == 1:
video = osp.basename(video)
elif level == 2:
video = osp.join(
osp.basename(osp.dirname(video)), osp.basename(video))
if test_mode:
return video
else:
label = class_mapping[items[1]]
return video, label
with open(train_file, 'r') as fin:
train_list = [line_to_map(x) for x in fin]
with open(val_file, 'r') as fin:
val_list = [line_to_map(x) for x in fin]
with open(test_file, 'r') as fin:
test_list = [line_to_map(x, test_mode=True) for x in fin]
splits = ((train_list, val_list, test_list), )
return splits
def parse_sthv2_splits(level):
"""Parse Something-Something dataset V2 into "train", "val" splits.
Args:
level (int): Directory level of data. 1 for the single-level directory,
2 for the two-level directory.
Returns:
list: "train", "val", "test" splits of Something-Something V2 dataset.
"""
# Read the annotations
# yapf: disable
class_index_file = 'data/sthv2/annotations/something-something-v2-labels.json' # noqa
# yapf: enable
train_file = 'data/sthv2/annotations/something-something-v2-train.json'
val_file = 'data/sthv2/annotations/something-something-v2-validation.json'
test_file = 'data/sthv2/annotations/something-something-v2-test.json'
with open(class_index_file, 'r') as fin:
class_mapping = json.loads(fin.read())
def line_to_map(item, test_mode=False):
video = item['id']
if level == 1:
video = osp.basename(video)
elif level == 2:
video = osp.join(
osp.basename(osp.dirname(video)), osp.basename(video))
if test_mode:
return video
else:
template = item['template'].replace('[', '')
template = template.replace(']', '')
label = int(class_mapping[template])
return video, label
with open(train_file, 'r') as fin:
items = json.loads(fin.read())
train_list = [line_to_map(item) for item in items]
with open(val_file, 'r') as fin:
items = json.loads(fin.read())
val_list = [line_to_map(item) for item in items]
with open(test_file, 'r') as fin:
items = json.loads(fin.read())
test_list = [line_to_map(item, test_mode=True) for item in items]
splits = ((train_list, val_list, test_list), )
return splits
def parse_mmit_splits():
"""Parse Multi-Moments in Time dataset into "train", "val" splits.
Returns:
list: "train", "val", "test" splits of Multi-Moments in Time.
"""
# Read the annotations
def line_to_map(x):
video = osp.splitext(x[0])[0]
labels = [int(digit) for digit in x[1:]]
return video, labels
csv_reader = csv.reader(open('data/mmit/annotations/trainingSet.csv'))
train_list = [line_to_map(x) for x in csv_reader]
csv_reader = csv.reader(open('data/mmit/annotations/validationSet.csv'))
val_list = [line_to_map(x) for x in csv_reader]
test_list = val_list # not test for mit
splits = ((train_list, val_list, test_list), )
return splits
def parse_kinetics_splits(level, dataset):
"""Parse Kinetics dataset into "train", "val", "test" splits.
Args:
level (int): Directory level of data. 1 for the single-level directory,
2 for the two-level directory.
dataset (str): Denotes the version of Kinetics that needs to be parsed,
choices are "kinetics400", "kinetics600" and "kinetics700".
Returns:
list: "train", "val", "test" splits of Kinetics.
"""
def convert_label(s, keep_whitespaces=False):
"""Convert label name to a formal string.
Remove redundant '"' and convert whitespace to '_'.
Args:
s (str): String to be converted.
keep_whitespaces(bool): Whether to keep whitespace. Default: False.
Returns:
str: Converted string.
"""
if not keep_whitespaces:
return s.replace('"', '').replace(' ', '_')
else:
return s.replace('"', '')
def line_to_map(x, test=False):
"""A function to map line string to video and label.
Args:
x (str): A single line from Kinetics csv file.
test (bool): Indicate whether the line comes from test
annotation file.
Returns:
tuple[str, str]: (video, label), video is the video id,
label is the video label.
"""
if test:
# video = f'{x[0]}_{int(x[1]):06d}_{int(x[2]):06d}'
video = f'{x[1]}_{int(float(x[2])):06d}_{int(float(x[3])):06d}'
label = -1 # label unknown
return video, label
else:
video = f'{x[1]}_{int(float(x[2])):06d}_{int(float(x[3])):06d}'
if level == 2:
video = f'{convert_label(x[0])}/{video}'
else:
assert level == 1
label = class_mapping[convert_label(x[0])]
return video, label
train_file = f'data/{dataset}/annotations/kinetics_train.csv'
val_file = f'data/{dataset}/annotations/kinetics_val.csv'
test_file = f'data/{dataset}/annotations/kinetics_test.csv'
csv_reader = csv.reader(open(train_file))
# skip the first line
next(csv_reader)
labels_sorted = sorted(set([convert_label(row[0]) for row in csv_reader]))
class_mapping = {label: i for i, label in enumerate(labels_sorted)}
csv_reader = csv.reader(open(train_file))
next(csv_reader)
train_list = [line_to_map(x) for x in csv_reader]
csv_reader = csv.reader(open(val_file))
next(csv_reader)
val_list = [line_to_map(x) for x in csv_reader]
csv_reader = csv.reader(open(test_file))
next(csv_reader)
test_list = [line_to_map(x, test=True) for x in csv_reader]
splits = ((train_list, val_list, test_list), )
return splits
def parse_mit_splits():
"""Parse Moments in Time dataset into "train", "val" splits.
Returns:
list: "train", "val", "test" splits of Moments in Time.
"""
# Read the annotations
class_mapping = {}
with open('data/mit/annotations/moments_categories.txt') as f_cat:
for line in f_cat.readlines():
cat, digit = line.rstrip().split(',')
class_mapping[cat] = int(digit)
def line_to_map(x):
video = osp.splitext(x[0])[0]
label = class_mapping[osp.dirname(x[0])]
return video, label
csv_reader = csv.reader(open('data/mit/annotations/trainingSet.csv'))
train_list = [line_to_map(x) for x in csv_reader]
csv_reader = csv.reader(open('data/mit/annotations/validationSet.csv'))
val_list = [line_to_map(x) for x in csv_reader]
test_list = val_list # no test for mit
splits = ((train_list, val_list, test_list), )
return splits
def parse_hmdb51_split(level):
train_file_template = 'data/hmdb51/annotations/trainlist{:02d}.txt'
test_file_template = 'data/hmdb51/annotations/testlist{:02d}.txt'
class_index_file = 'data/hmdb51/annotations/classInd.txt'
def generate_class_index_file():
"""This function will generate a `ClassInd.txt` for HMDB51 in a format
like UCF101, where class id starts with 1."""
frame_path = 'data/hmdb51/rawframes'
annotation_dir = 'data/hmdb51/annotations'
class_list = sorted(os.listdir(frame_path))
class_dict = dict()
with open(class_index_file, 'w') as f:
content = []
for class_id, class_name in enumerate(class_list):
# like `ClassInd.txt` in UCF-101, the class_id begins with 1
class_dict[class_name] = class_id + 1
cur_line = ' '.join([str(class_id + 1), class_name])
content.append(cur_line)
content = '\n'.join(content)
f.write(content)
for i in range(1, 4):
train_content = []
test_content = []
for class_name in class_dict:
filename = class_name + f'_test_split{i}.txt'
filename_path = osp.join(annotation_dir, filename)
with open(filename_path, 'r') as fin:
for line in fin:
video_info = line.strip().split()
video_name = video_info[0]
if video_info[1] == '1':
target_line = ' '.join([
osp.join(class_name, video_name),
str(class_dict[class_name])
])
train_content.append(target_line)
elif video_info[1] == '2':
target_line = ' '.join([
osp.join(class_name, video_name),
str(class_dict[class_name])
])
test_content.append(target_line)
train_content = '\n'.join(train_content)
test_content = '\n'.join(test_content)
with open(train_file_template.format(i), 'w') as fout:
fout.write(train_content)
with open(test_file_template.format(i), 'w') as fout:
fout.write(test_content)
if not osp.exists(class_index_file):
generate_class_index_file()
with open(class_index_file, 'r') as fin:
class_index = [x.strip().split() for x in fin]
class_mapping = {x[1]: int(x[0]) - 1 for x in class_index}
def line_to_map(line):
items = line.strip().split()
video = osp.splitext(items[0])[0]
if level == 1:
video = osp.basename(video)
elif level == 2:
video = osp.join(
osp.basename(osp.dirname(video)), osp.basename(video))
label = class_mapping[osp.dirname(items[0])]
return video, label
splits = []
for i in range(1, 4):
with open(train_file_template.format(i), 'r') as fin:
train_list = [line_to_map(x) for x in fin]
with open(test_file_template.format(i), 'r') as fin:
test_list = [line_to_map(x) for x in fin]
splits.append((train_list, test_list))
return splits
| InternVideo-main | Downstream/Open-Set-Action-Recognition/tools/data/parse_file_list.py |
import argparse
import os.path as osp
from tools.data.parse_file_list import parse_directory
from mmaction.localization import load_localize_proposal_file
def process_norm_proposal_file(norm_proposal_file, frame_dict):
"""Process the normalized proposal file and denormalize it.
Args:
norm_proposal_file (str): Name of normalized proposal file.
frame_dict (dict): Information of frame folders.
"""
proposal_file = norm_proposal_file.replace('normalized_', '')
norm_proposals = load_localize_proposal_file(norm_proposal_file)
processed_proposal_list = []
for idx, norm_proposal in enumerate(norm_proposals):
video_id = norm_proposal[0]
frame_info = frame_dict[video_id]
num_frames = frame_info[1]
frame_path = osp.basename(frame_info[0])
gt = [[
int(x[0]),
int(float(x[1]) * num_frames),
int(float(x[2]) * num_frames)
] for x in norm_proposal[2]]
proposal = [[
int(x[0]),
float(x[1]),
float(x[2]),
int(float(x[3]) * num_frames),
int(float(x[4]) * num_frames)
] for x in norm_proposal[3]]
gt_dump = '\n'.join(['{} {} {}'.format(*x) for x in gt])
gt_dump += '\n' if len(gt) else ''
proposal_dump = '\n'.join(
['{} {:.04f} {:.04f} {} {}'.format(*x) for x in proposal])
proposal_dump += '\n' if len(proposal) else ''
processed_proposal_list.append(
f'# {idx}\n{frame_path}\n{num_frames}\n1'
f'\n{len(gt)}\n{gt_dump}{len(proposal)}\n{proposal_dump}')
with open(proposal_file, 'w') as f:
f.writelines(processed_proposal_list)
def parse_args():
parser = argparse.ArgumentParser(description='Denormalize proposal file')
parser.add_argument(
'dataset',
type=str,
choices=['thumos14'],
help='dataset to be denormalize proposal file')
parser.add_argument(
'--norm-proposal-file',
type=str,
help='normalized proposal file to be denormalize')
parser.add_argument(
'--data-prefix',
type=str,
help='path to a directory where rawframes are held')
args = parser.parse_args()
return args
def main():
args = parse_args()
print(f'Converting from {args.norm_proposal_file}.')
frame_dict = parse_directory(args.data_prefix)
process_norm_proposal_file(args.norm_proposal_file, frame_dict)
if __name__ == '__main__':
main()
| InternVideo-main | Downstream/Open-Set-Action-Recognition/tools/data/denormalize_proposal_file.py |
import argparse
import glob
import os
import os.path as osp
import sys
from multiprocessing import Pool
import mmcv
import numpy as np
from scipy.io import wavfile
try:
import librosa
import lws
except ImportError:
print('Please import librosa, lws first.')
sys.path.append('..')
SILENCE_THRESHOLD = 2
FMIN = 125
FMAX = 7600
FRAME_SHIFT_MS = None
MIN_LEVEL_DB = -100
REF_LEVEL_DB = 20
RESCALING = True
RESCALING_MAX = 0.999
ALLOW_CLIPPING_IN_NORMALIZATION = True
LOG_SCALE_MIN = -32.23619130191664
NORM_AUDIO = True
class AudioTools:
"""All methods related to audio feature extraction. Code Reference:
<https://github.com/r9y9/deepvoice3_pytorch>`_,
<https://pypi.org/project/lws/1.2.6/>`_.
Args:
frame_rate (int): The frame rate per second of the video. Default: 30.
sample_rate (int): The sample rate for audio sampling. Default: 16000.
num_mels (int): Number of channels of the melspectrogram. Default: 80.
fft_size (int): fft_size / sample_rate is window size. Default: 1280.
hop_size (int): hop_size / sample_rate is step size. Default: 320.
"""
def __init__(self,
frame_rate=30,
sample_rate=16000,
num_mels=80,
fft_size=1280,
hop_size=320):
self.frame_rate = frame_rate
self.sample_rate = sample_rate
self.silence_threshold = SILENCE_THRESHOLD
self.num_mels = num_mels
self.fmin = FMIN
self.fmax = FMAX
self.fft_size = fft_size
self.hop_size = hop_size
self.frame_shift_ms = FRAME_SHIFT_MS
self.min_level_db = MIN_LEVEL_DB
self.ref_level_db = REF_LEVEL_DB
self.rescaling = RESCALING
self.rescaling_max = RESCALING_MAX
self.allow_clipping_in_normalization = ALLOW_CLIPPING_IN_NORMALIZATION
self.log_scale_min = LOG_SCALE_MIN
self.norm_audio = NORM_AUDIO
def load_wav(self, path):
"""Load an audio file into numpy array."""
return librosa.core.load(path, sr=self.sample_rate)[0]
def audio_normalize(self, samples, desired_rms=0.1, eps=1e-4):
"""RMS normalize the audio data."""
rms = np.maximum(eps, np.sqrt(np.mean(samples**2)))
samples = samples * (desired_rms / rms)
return samples
def generate_spectrogram_magphase(self, audio, with_phase=False):
"""Separate a complex-valued spectrogram D into its magnitude (S)
and phase (P) components, so that D = S * P.
Args:
audio (np.ndarray): The input audio signal.
with_phase (bool): Determines whether to output the
phase components. Default: False.
Returns:
np.ndarray: magnitude and phase component of the complex-valued
spectrogram.
"""
spectro = librosa.core.stft(
audio,
hop_length=self.get_hop_size(),
n_fft=self.fft_size,
center=True)
spectro_mag, spectro_phase = librosa.core.magphase(spectro)
spectro_mag = np.expand_dims(spectro_mag, axis=0)
if with_phase:
spectro_phase = np.expand_dims(np.angle(spectro_phase), axis=0)
return spectro_mag, spectro_phase
else:
return spectro_mag
def save_wav(self, wav, path):
"""Save the wav to disk."""
# 32767 = (2 ^ 15 - 1) maximum of int16
wav *= 32767 / max(0.01, np.max(np.abs(wav)))
wavfile.write(path, self.sample_rate, wav.astype(np.int16))
def trim(self, quantized):
"""Trim the audio wavfile."""
start, end = self.start_and_end_indices(quantized,
self.silence_threshold)
return quantized[start:end]
def adjust_time_resolution(self, quantized, mel):
"""Adjust time resolution by repeating features.
Args:
quantized (np.ndarray): (T,)
mel (np.ndarray): (N, D)
Returns:
tuple: Tuple of (T,) and (T, D)
"""
assert quantized.ndim == 1
assert mel.ndim == 2
upsample_factor = quantized.size // mel.shape[0]
mel = np.repeat(mel, upsample_factor, axis=0)
n_pad = quantized.size - mel.shape[0]
if n_pad != 0:
assert n_pad > 0
mel = np.pad(
mel, [(0, n_pad), (0, 0)], mode='constant', constant_values=0)
# trim
start, end = self.start_and_end_indices(quantized,
self.silence_threshold)
return quantized[start:end], mel[start:end, :]
def start_and_end_indices(self, quantized, silence_threshold=2):
"""Trim the audio file when reaches the silence threshold."""
for start in range(quantized.size):
if abs(quantized[start] - 127) > silence_threshold:
break
for end in range(quantized.size - 1, 1, -1):
if abs(quantized[end] - 127) > silence_threshold:
break
assert abs(quantized[start] - 127) > silence_threshold
assert abs(quantized[end] - 127) > silence_threshold
return start, end
def melspectrogram(self, y):
"""Generate the melspectrogram."""
D = self._lws_processor().stft(y).T
S = self._amp_to_db(self._linear_to_mel(np.abs(D))) - self.ref_level_db
if not self.allow_clipping_in_normalization:
assert S.max() <= 0 and S.min() - self.min_level_db >= 0
return self._normalize(S)
def get_hop_size(self):
"""Calculate the hop size."""
hop_size = self.hop_size
if hop_size is None:
assert self.frame_shift_ms is not None
hop_size = int(self.frame_shift_ms / 1000 * self.sample_rate)
return hop_size
def _lws_processor(self):
"""Perform local weighted sum.
Please refer to <https://pypi.org/project/lws/1.2.6/>`_.
"""
return lws.lws(self.fft_size, self.get_hop_size(), mode='speech')
def lws_num_frames(self, length, fsize, fshift):
"""Compute number of time frames of lws spectrogram.
Please refer to <https://pypi.org/project/lws/1.2.6/>`_.
"""
pad = (fsize - fshift)
if length % fshift == 0:
M = (length + pad * 2 - fsize) // fshift + 1
else:
M = (length + pad * 2 - fsize) // fshift + 2
return M
def lws_pad_lr(self, x, fsize, fshift):
"""Compute left and right padding lws internally uses.
Please refer to <https://pypi.org/project/lws/1.2.6/>`_.
"""
M = self.lws_num_frames(len(x), fsize, fshift)
pad = (fsize - fshift)
T = len(x) + 2 * pad
r = (M - 1) * fshift + fsize - T
return pad, pad + r
def _linear_to_mel(self, spectrogram):
"""Warp linear scale spectrograms to the mel scale.
Please refer to <https://github.com/r9y9/deepvoice3_pytorch>`_
"""
global _mel_basis
_mel_basis = self._build_mel_basis()
return np.dot(_mel_basis, spectrogram)
def _build_mel_basis(self):
"""Build mel filters.
Please refer to <https://github.com/r9y9/deepvoice3_pytorch>`_
"""
assert self.fmax <= self.sample_rate // 2
return librosa.filters.mel(
self.sample_rate,
self.fft_size,
fmin=self.fmin,
fmax=self.fmax,
n_mels=self.num_mels)
def _amp_to_db(self, x):
min_level = np.exp(self.min_level_db / 20 * np.log(10))
return 20 * np.log10(np.maximum(min_level, x))
def _db_to_amp(self, x):
return np.power(10.0, x * 0.05)
def _normalize(self, S):
return np.clip((S - self.min_level_db) / -self.min_level_db, 0, 1)
def _denormalize(self, S):
return (np.clip(S, 0, 1) * -self.min_level_db) + self.min_level_db
def read_audio(self, audio_path):
wav = self.load_wav(audio_path)
if self.norm_audio:
wav = self.audio_normalize(wav)
else:
wav = wav / np.abs(wav).max()
return wav
def audio_to_spectrogram(self, wav):
if self.melspectrogram:
spectrogram = self.melspectrogram(wav).astype(np.float32).T
else:
spectrogram = self.generate_spectrogram_magphase(wav)
return spectrogram
def extract_audio_feature(wav_path, audio_tools, mel_out_dir):
file_name, _ = osp.splitext(osp.basename(wav_path))
# Write the spectrograms to disk:
mel_filename = os.path.join(mel_out_dir, file_name + '.npy')
if not os.path.exists(mel_filename):
try:
wav = audio_tools.read_audio(wav_path)
spectrogram = audio_tools.audio_to_spectrogram(wav)
np.save(
mel_filename,
spectrogram.astype(np.float32),
allow_pickle=False)
except BaseException:
print(f'Read audio [{wav_path}] failed.')
if __name__ == '__main__':
audio_tools = AudioTools(
fft_size=512, hop_size=256) # window_size:32ms hop_size:16ms
parser = argparse.ArgumentParser()
parser.add_argument('audio_home_path', type=str)
parser.add_argument('spectrogram_save_path', type=str)
parser.add_argument('--level', type=int, default=1)
parser.add_argument('--ext', default='.m4a')
parser.add_argument('--num-workers', type=int, default=4)
parser.add_argument('--part', type=str, default='1/1')
args = parser.parse_args()
mmcv.mkdir_or_exist(args.spectrogram_save_path)
files = glob.glob(
osp.join(args.audio_home_path, '*/' * args.level, '*' + args.ext))
print(f'found {len(files)} files.')
files = sorted(files)
if args.part is not None:
[this_part, num_parts] = [int(i) for i in args.part.split('/')]
part_len = len(files) // num_parts
p = Pool(args.num_workers)
for file in files[part_len * (this_part - 1):(
part_len * this_part) if this_part != num_parts else len(files)]:
p.apply_async(
extract_audio_feature,
args=(file, audio_tools, args.spectrogram_save_path))
p.close()
p.join()
| InternVideo-main | Downstream/Open-Set-Action-Recognition/tools/data/build_audio_features.py |
import argparse
import glob
import os
import os.path as osp
import sys
from multiprocessing import Pool
def resize_videos(vid_item):
"""Generate resized video cache.
Args:
vid_item (list): Video item containing video full path,
video relative path.
Returns:
bool: Whether generate video cache successfully.
"""
full_path, vid_path = vid_item
out_full_path = osp.join(args.out_dir, vid_path)
dir_name = osp.dirname(vid_path)
out_dir = osp.join(args.out_dir, dir_name)
if not osp.exists(out_dir):
os.makedirs(out_dir)
result = os.popen(
f'ffprobe -hide_banner -loglevel error -select_streams v:0 -show_entries stream=width,height -of csv=p=0 {full_path}' # noqa:E501
)
w, h = [int(d) for d in result.readline().rstrip().split(',')]
if w > h:
cmd = (f'ffmpeg -hide_banner -loglevel error -i {full_path} '
f'-vf {"mpdecimate," if args.remove_dup else ""}'
f'scale=-2:{args.scale} '
f'{"-vsync vfr" if args.remove_dup else ""} '
f'-c:v libx264 {"-g 16" if args.dense else ""} '
f'-an {out_full_path} -y')
else:
cmd = (f'ffmpeg -hide_banner -loglevel error -i {full_path} '
f'-vf {"mpdecimate," if args.remove_dup else ""}'
f'scale={args.scale}:-2 '
f'{"-vsync vfr" if args.remove_dup else ""} '
f'-c:v libx264 {"-g 16" if args.dense else ""} '
f'-an {out_full_path} -y')
os.popen(cmd)
print(f'{vid_path} done')
sys.stdout.flush()
return True
def parse_args():
parser = argparse.ArgumentParser(
description='Generate the resized cache of original videos')
parser.add_argument('src_dir', type=str, help='source video directory')
parser.add_argument('out_dir', type=str, help='output video directory')
parser.add_argument(
'--dense',
action='store_true',
help='whether to generate a faster cache')
parser.add_argument(
'--level',
type=int,
choices=[1, 2],
default=2,
help='directory level of data')
parser.add_argument(
'--remove-dup',
action='store_true',
help='whether to remove duplicated frames')
parser.add_argument(
'--ext',
type=str,
default='mp4',
choices=['avi', 'mp4', 'webm'],
help='video file extensions')
parser.add_argument(
'--scale',
type=int,
default=256,
help='resize image short side length keeping ratio')
parser.add_argument(
'--num-worker', type=int, default=8, help='number of workers')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
if not osp.isdir(args.out_dir):
print(f'Creating folder: {args.out_dir}')
os.makedirs(args.out_dir)
print('Reading videos from folder: ', args.src_dir)
print('Extension of videos: ', args.ext)
fullpath_list = glob.glob(args.src_dir + '/*' * args.level + '.' +
args.ext)
done_fullpath_list = glob.glob(args.out_dir + '/*' * args.level + args.ext)
print('Total number of videos found: ', len(fullpath_list))
print('Total number of videos transfer finished: ',
len(done_fullpath_list))
if args.level == 2:
vid_list = list(
map(
lambda p: osp.join(
osp.basename(osp.dirname(p)), osp.basename(p)),
fullpath_list))
elif args.level == 1:
vid_list = list(map(lambda p: osp.basename(p), fullpath_list))
pool = Pool(args.num_worker)
pool.map(resize_videos, zip(fullpath_list, vid_list))
| InternVideo-main | Downstream/Open-Set-Action-Recognition/tools/data/resize_video.py |
import os
import os.path as osp
import sys
from subprocess import check_output
import mmcv
def get_duration(vid_name):
command = f'ffprobe -i {vid_name} 2>&1 | grep "Duration"'
output = str(check_output(command, shell=True))
output = output.split(',')[0].split('Duration:')[1].strip()
h, m, s = output.split(':')
duration = int(h) * 3600 + int(m) * 60 + float(s)
return duration
def trim(vid_name):
try:
lt = get_duration(vid_name)
except Exception:
print(f'get_duration failed for video {vid_name}', flush=True)
return
i = 0
name, _ = osp.splitext(vid_name)
# We output 10-second clips into the folder `name`
dest = name
mmcv.mkdir_or_exist(dest)
command_tmpl = ('ffmpeg -y loglevel error -i {} -ss {} -t {} -crf 18 '
'-c:v libx264 {}/part_{}.mp4')
while i * 10 < lt:
os.system(command_tmpl.format(vid_name, i * 10, 10, dest, i))
i += 1
# remove a raw video after decomposing it into 10-second clip to save space
os.remove(vid_name)
if __name__ == '__main__':
vid_name = sys.argv[1]
trim(vid_name)
| InternVideo-main | Downstream/Open-Set-Action-Recognition/tools/data/omnisource/trim_raw_video.py |
import argparse
import multiprocessing
import os
import os.path as osp
import numpy as np
import scipy.interpolate
from mmcv import dump, load
args = None
def parse_args():
parser = argparse.ArgumentParser(description='ANet Feature Prepare')
parser.add_argument('--rgb', default='', help='rgb feature root')
parser.add_argument('--flow', default='', help='flow feature root')
parser.add_argument('--dest', default='', help='dest root')
parser.add_argument('--output-format', default='csv')
args = parser.parse_args()
return args
def pool_feature(data, num_proposals=100, num_sample_bins=3, pool_type='mean'):
"""Pool features with arbitrary temporal length.
Args:
data (list[np.ndarray] | np.ndarray): Features of an untrimmed video,
with arbitrary temporal length.
num_proposals (int): The temporal dim of pooled feature. Default: 100.
num_sample_bins (int): How many points to sample to get the feature
vector at one timestamp. Default: 3.
pool_type (str): Type of pooling to pool features. Choices are
['mean', 'max']. Default: 'mean'.
Returns:
np.ndarray: The pooled feature with shape num_proposals x feature_dim.
"""
if len(data) == 1:
return np.concatenate([data] * num_proposals)
x_range = list(range(len(data)))
f = scipy.interpolate.interp1d(x_range, data, axis=0)
eps = 1e-4
start, end = eps, len(data) - 1 - eps
anchor_size = (end - start) / num_proposals
ptr = start
feature = []
for i in range(num_proposals):
x_new = [
ptr + i / num_sample_bins * anchor_size
for i in range(num_sample_bins)
]
y_new = f(x_new)
if pool_type == 'mean':
y_new = np.mean(y_new, axis=0)
elif pool_type == 'max':
y_new = np.max(y_new, axis=0)
else:
raise NotImplementedError('Unsupported pool type')
feature.append(y_new)
ptr += anchor_size
feature = np.stack(feature)
return feature
def merge_feat(name):
# concatenate rgb feat and flow feat for a single sample
rgb_feat = load(osp.join(args.rgb, name))
flow_feat = load(osp.join(args.flow, name))
rgb_feat = pool_feature(rgb_feat)
flow_feat = pool_feature(flow_feat)
feat = np.concatenate([rgb_feat, flow_feat], axis=-1)
if not osp.exists(args.dest):
os.system(f'mkdir -p {args.dest}')
if args.output_format == 'pkl':
dump(feat, osp.join(args.dest, name))
elif args.output_format == 'csv':
feat = feat.tolist()
lines = []
line0 = ','.join([f'f{i}' for i in range(400)])
lines.append(line0)
for line in feat:
lines.append(','.join([f'{x:.4f}' for x in line]))
with open(osp.join(args.dest, name.replace('.pkl', '.csv')), 'w') as f:
f.write('\n'.join(lines))
def main():
global args
args = parse_args()
rgb_feat = os.listdir(args.rgb)
flow_feat = os.listdir(args.flow)
assert set(rgb_feat) == set(flow_feat)
pool = multiprocessing.Pool(32)
pool.map(merge_feat, rgb_feat)
if __name__ == '__main__':
main()
| InternVideo-main | Downstream/Open-Set-Action-Recognition/tools/data/activitynet/activitynet_feature_postprocessing.py |
# This scripts is copied from
# https://github.com/activitynet/ActivityNet/blob/master/Crawler/Kinetics/download.py # noqa: E501
# The code is licensed under the MIT licence.
import os
import ssl
import subprocess
import mmcv
from joblib import Parallel, delayed
ssl._create_default_https_context = ssl._create_unverified_context
data_file = '../../../data/ActivityNet'
video_list = f'{data_file}/video_info_new.csv'
anno_file = f'{data_file}/anet_anno_action.json'
output_dir = f'{data_file}/videos'
def download_clip(video_identifier,
output_filename,
num_attempts=5,
url_base='https://www.youtube.com/watch?v='):
"""Download a video from youtube if exists and is not blocked.
arguments:
---------
video_identifier: str
Unique YouTube video identifier (11 characters)
output_filename: str
File path where the video will be stored.
"""
# Defensive argument checking.
assert isinstance(video_identifier, str), 'video_identifier must be string'
assert isinstance(output_filename, str), 'output_filename must be string'
assert len(video_identifier) == 11, 'video_identifier must have length 11'
status = False
if not os.path.exists(output_filename):
command = [
'youtube-dl', '--quiet', '--no-warnings', '--no-check-certificate',
'-f', 'mp4', '-o',
'"%s"' % output_filename,
'"%s"' % (url_base + video_identifier)
]
command = ' '.join(command)
print(command)
attempts = 0
while True:
try:
subprocess.check_output(
command, shell=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError:
attempts += 1
if attempts == num_attempts:
return status, 'Fail'
else:
break
# Check if the video was successfully saved.
status = os.path.exists(output_filename)
return status, 'Downloaded'
def download_clip_wrapper(youtube_id, output_dir):
"""Wrapper for parallel processing purposes."""
# we do this to align with names in annotations
output_filename = os.path.join(output_dir, 'v_' + youtube_id + '.mp4')
if os.path.exists(output_filename):
status = tuple(['v_' + youtube_id, True, 'Exists'])
return status
downloaded, log = download_clip(youtube_id, output_filename)
status = tuple(['v_' + youtube_id, downloaded, log])
return status
def parse_activitynet_annotations(input_csv):
"""Returns a list of YoutubeID.
arguments:
---------
input_csv: str
Path to CSV file containing the following columns:
'video,numFrame,seconds,fps,rfps,subset,featureFrame'
returns:
-------
youtube_ids: list
List of all YoutubeIDs in ActivityNet.
"""
lines = open(input_csv).readlines()
lines = lines[1:]
# YoutubeIDs do not have prefix `v_`
youtube_ids = [x.split(',')[0][2:] for x in lines]
return youtube_ids
def main(input_csv, output_dir, anno_file, num_jobs=24):
# Reading and parsing ActivityNet.
youtube_ids = parse_activitynet_annotations(input_csv)
# Creates folders where videos will be saved later.
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# Download all clips.
if num_jobs == 1:
status_list = []
for index in youtube_ids:
status_list.append(download_clip_wrapper(index, output_dir))
else:
status_list = Parallel(n_jobs=num_jobs)(
delayed(download_clip_wrapper)(index, output_dir)
for index in youtube_ids)
# Save download report.
mmcv.dump(status_list, 'download_report.json')
annotation = mmcv.load(anno_file)
downloaded = {status[0]: status[1] for status in status_list}
annotation = {k: v for k, v in annotation.items() if downloaded[k]}
anno_file_bak = anno_file.replace('.json', '_bak.json')
os.system(f'mv {anno_file} {anno_file_bak}')
mmcv.dump(annotation, anno_file)
if __name__ == '__main__':
main(video_list, output_dir, anno_file, 24)
| InternVideo-main | Downstream/Open-Set-Action-Recognition/tools/data/activitynet/download.py |
import os
import os.path as osp
import mmcv
data_file = '../../../data/ActivityNet'
video_list = f'{data_file}/video_info_new.csv'
anno_file = f'{data_file}/anet_anno_action.json'
rawframe_dir = f'{data_file}/rawframes'
action_name_list = 'action_name.csv'
def generate_rawframes_filelist():
anet_annotations = mmcv.load(anno_file)
videos = open(video_list).readlines()
videos = [x.strip().split(',') for x in videos]
attr_names = videos[0][1:]
# the first line is 'video,numFrame,seconds,fps,rfps,subset,featureFrame'
attr_names = [x.lower() for x in attr_names]
attr_types = [int, float, float, float, str, int]
video_annos = {}
for line in videos[1:]:
name = line[0]
data = {}
for attr_name, attr_type, attr_val in zip(attr_names, attr_types,
line[1:]):
data[attr_name] = attr_type(attr_val)
video_annos[name] = data
# only keep downloaded videos
video_annos = {
k: v
for k, v in video_annos.items() if k in anet_annotations
}
# update numframe
for video in video_annos:
pth = osp.join(rawframe_dir, video)
num_imgs = len(os.listdir(pth))
# one more rgb img than flow
assert (num_imgs - 1) % 3 == 0
num_frames = (num_imgs - 1) // 3
video_annos[video]['numframe'] = num_frames
anet_labels = open(action_name_list).readlines()
anet_labels = [x.strip() for x in anet_labels[1:]]
train_videos, val_videos = {}, {}
for k, video in video_annos.items():
if video['subset'] == 'training':
train_videos[k] = video
elif video['subset'] == 'validation':
val_videos[k] = video
def simple_label(video_idx):
anno = anet_annotations[video_idx]
label = anno['annotations'][0]['label']
return anet_labels.index(label)
train_lines = [
k + ' ' + str(train_videos[k]['numframe']) + ' ' +
str(simple_label(k)) for k in train_videos
]
val_lines = [
k + ' ' + str(val_videos[k]['numframe']) + ' ' + str(simple_label(k))
for k in val_videos
]
with open(osp.join(data_file, 'anet_train_video.txt'), 'w') as fout:
fout.write('\n'.join(train_lines))
with open(osp.join(data_file, 'anet_val_video.txt'), 'w') as fout:
fout.write('\n'.join(val_lines))
def clip_list(k, anno, vidanno):
num_seconds = anno['duration_second']
num_frames = vidanno['numframe']
fps = num_frames / num_seconds
segs = anno['annotations']
lines = []
for seg in segs:
segment = seg['segment']
label = seg['label']
label = anet_labels.index(label)
start, end = int(segment[0] * fps), int(segment[1] * fps)
if end > num_frames - 1:
end = num_frames - 1
newline = f'{k} {start} {end - start + 1} {label}'
lines.append(newline)
return lines
train_clips, val_clips = [], []
for k in train_videos:
train_clips.extend(clip_list(k, anet_annotations[k], train_videos[k]))
for k in val_videos:
val_clips.extend(clip_list(k, anet_annotations[k], val_videos[k]))
with open(osp.join(data_file, 'anet_train_clip.txt'), 'w') as fout:
fout.write('\n'.join(train_clips))
with open(osp.join(data_file, 'anet_val_clip.txt'), 'w') as fout:
fout.write('\n'.join(val_clips))
if __name__ == '__main__':
generate_rawframes_filelist()
| InternVideo-main | Downstream/Open-Set-Action-Recognition/tools/data/activitynet/generate_rawframes_filelist.py |
"""This file converts the output proposal file of proposal generator (BSN, BMN)
into the input proposal file of action classifier (Currently supports SSN and
P-GCN, not including TSN, I3D etc.)."""
import argparse
import mmcv
import numpy as np
from mmaction.core import pairwise_temporal_iou
def load_annotations(ann_file):
"""Load the annotation according to ann_file into video_infos."""
video_infos = []
anno_database = mmcv.load(ann_file)
for video_name in anno_database:
video_info = anno_database[video_name]
video_info['video_name'] = video_name
video_infos.append(video_info)
return video_infos
def import_ground_truth(video_infos, activity_index):
"""Read ground truth data from video_infos."""
ground_truth = {}
for video_info in video_infos:
video_id = video_info['video_name'][2:]
this_video_ground_truths = []
for ann in video_info['annotations']:
t_start, t_end = ann['segment']
label = activity_index[ann['label']]
this_video_ground_truths.append([t_start, t_end, label])
ground_truth[video_id] = np.array(this_video_ground_truths)
return ground_truth
def import_proposals(result_dict):
"""Read predictions from result dict."""
proposals = {}
num_proposals = 0
for video_id in result_dict:
result = result_dict[video_id]
this_video_proposals = []
for proposal in result:
t_start, t_end = proposal['segment']
score = proposal['score']
this_video_proposals.append([t_start, t_end, score])
num_proposals += 1
proposals[video_id] = np.array(this_video_proposals)
return proposals, num_proposals
def dump_formatted_proposal(video_idx, video_id, num_frames, fps, gts,
proposals, tiou, t_overlap_self,
formatted_proposal_file):
"""dump the formatted proposal file, which is the input proposal file of
action classifier (e.g: SSN).
Args:
video_idx (int): Index of video.
video_id (str): ID of video.
num_frames (int): Total frames of the video.
fps (float): Fps of the video.
gts (np.ndarray[float]): t_start, t_end and label of groundtruths.
proposals (np.ndarray[float]): t_start, t_end and score of proposals.
tiou (np.ndarray[float]): 2-dim array with IoU ratio.
t_overlap_self (np.ndarray[float]): 2-dim array with overlap_self
(union / self_len) ratio.
formatted_proposal_file (open file object): Open file object of
formatted_proposal_file.
"""
formatted_proposal_file.write(
f'#{video_idx}\n{video_id}\n{num_frames}\n{fps}\n{gts.shape[0]}\n')
for gt in gts:
formatted_proposal_file.write(f'{int(gt[2])} {gt[0]} {gt[1]}\n')
formatted_proposal_file.write(f'{proposals.shape[0]}\n')
best_iou = np.amax(tiou, axis=0)
best_iou_index = np.argmax(tiou, axis=0)
best_overlap = np.amax(t_overlap_self, axis=0)
best_overlap_index = np.argmax(t_overlap_self, axis=0)
for i in range(proposals.shape[0]):
index_iou = best_iou_index[i]
index_overlap = best_overlap_index[i]
label_iou = gts[index_iou][2]
label_overlap = gts[index_overlap][2]
if label_iou != label_overlap:
label = label_iou if label_iou != 0 else label_overlap
else:
label = label_iou
if best_iou[i] == 0 and best_overlap[i] == 0:
formatted_proposal_file.write(
f'0 0 0 {proposals[i][0]} {proposals[i][1]}\n')
else:
formatted_proposal_file.write(
f'{int(label)} {best_iou[i]} {best_overlap[i]} '
f'{proposals[i][0]} {proposals[i][1]}\n')
def parse_args():
parser = argparse.ArgumentParser(description='convert proposal format')
parser.add_argument(
'--ann-file',
type=str,
default='../../../data/ActivityNet/anet_anno_val.json',
help='name of annotation file')
parser.add_argument(
'--activity-index-file',
type=str,
default='../../../data/ActivityNet/anet_activity_indexes_val.txt',
help='name of activity index file')
parser.add_argument(
'--proposal-file',
type=str,
default='../../../results.json',
help='name of proposal file, which is the'
'output of proposal generator (BMN)')
parser.add_argument(
'--formatted-proposal-file',
type=str,
default='../../../anet_val_formatted_proposal.txt',
help='name of formatted proposal file, which is the'
'input of action classifier (SSN)')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
formatted_proposal_file = open(args.formatted_proposal_file, 'w')
# The activity index file is constructed according to
# 'https://github.com/activitynet/ActivityNet/blob/master/Evaluation/eval_classification.py'
activity_index, class_idx = {}, 0
for line in open(args.activity_index_file).readlines():
activity_index[line.strip()] = class_idx
class_idx += 1
video_infos = load_annotations(args.ann_file)
ground_truth = import_ground_truth(video_infos, activity_index)
proposal, num_proposals = import_proposals(
mmcv.load(args.proposal_file)['results'])
video_idx = 0
for video_info in video_infos:
video_id = video_info['video_name'][2:]
num_frames = video_info['duration_frame']
fps = video_info['fps']
tiou, t_overlap = pairwise_temporal_iou(
proposal[video_id][:, :2].astype(float),
ground_truth[video_id][:, :2].astype(float),
calculate_overlap_self=True)
dump_formatted_proposal(video_idx, video_id, num_frames, fps,
ground_truth[video_id], proposal[video_id],
tiou, t_overlap, formatted_proposal_file)
video_idx += 1
formatted_proposal_file.close()
| InternVideo-main | Downstream/Open-Set-Action-Recognition/tools/data/activitynet/convert_proposal_format.py |
import argparse
import os
import os.path as osp
import pickle
import mmcv
import numpy as np
import torch
from mmaction.datasets.pipelines import Compose
from mmaction.models import build_model
def parse_args():
parser = argparse.ArgumentParser(description='Extract TSN Feature')
parser.add_argument('--data-prefix', default='', help='dataset prefix')
parser.add_argument('--output-prefix', default='', help='output prefix')
parser.add_argument(
'--data-list',
help='video list of the dataset, the format should be '
'`frame_dir num_frames output_file`')
parser.add_argument(
'--frame-interval',
type=int,
default=16,
help='the sampling frequency of frame in the untrimed video')
parser.add_argument('--modality', default='RGB', choices=['RGB', 'Flow'])
parser.add_argument('--ckpt', help='checkpoint for feature extraction')
parser.add_argument(
'--part',
type=int,
default=0,
help='which part of dataset to forward(alldata[part::total])')
parser.add_argument(
'--total', type=int, default=1, help='how many parts exist')
args = parser.parse_args()
return args
def main():
args = parse_args()
args.is_rgb = args.modality == 'RGB'
args.clip_len = 1 if args.is_rgb else 5
args.input_format = 'NCHW' if args.is_rgb else 'NCHW_Flow'
rgb_norm_cfg = dict(
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_bgr=False)
flow_norm_cfg = dict(mean=[128, 128], std=[128, 128])
args.img_norm_cfg = rgb_norm_cfg if args.is_rgb else flow_norm_cfg
args.f_tmpl = 'img_{:05d}.jpg' if args.is_rgb else 'flow_{}_{:05d}.jpg'
args.in_channels = args.clip_len * (3 if args.is_rgb else 2)
# max batch_size for one forward
args.batch_size = 200
# define the data pipeline for Untrimmed Videos
data_pipeline = [
dict(
type='UntrimmedSampleFrames',
clip_len=args.clip_len,
frame_interval=args.frame_interval,
start_index=0),
dict(type='FrameSelector'),
dict(type='Resize', scale=(-1, 256)),
dict(type='CenterCrop', crop_size=256),
dict(type='Normalize', **args.img_norm_cfg),
dict(type='FormatShape', input_format=args.input_format),
dict(type='Collect', keys=['imgs'], meta_keys=[]),
dict(type='ToTensor', keys=['imgs'])
]
data_pipeline = Compose(data_pipeline)
# define TSN R50 model, the model is used as the feature extractor
model = dict(
type='Recognizer2D',
backbone=dict(
type='ResNet',
depth=50,
in_channels=args.in_channels,
norm_eval=False),
cls_head=dict(
type='TSNHead',
num_classes=200,
in_channels=2048,
spatial_type='avg',
consensus=dict(type='AvgConsensus', dim=1)))
model = build_model(model, test_cfg=dict(average_clips=None))
# load pretrained weight into the feature extractor
state_dict = torch.load(args.ckpt)['state_dict']
model.load_state_dict(state_dict)
model = model.cuda()
model.eval()
data = open(args.data_list).readlines()
data = [x.strip() for x in data]
data = data[args.part::args.total]
# enumerate Untrimmed videos, extract feature from each of them
prog_bar = mmcv.ProgressBar(len(data))
if not osp.exists(args.output_prefix):
os.system(f'mkdir -p {args.output_prefix}')
for item in data:
frame_dir, length, label = item.split()
output_file = osp.basename(frame_dir) + '.pkl'
frame_dir = osp.join(args.data_prefix, frame_dir)
output_file = osp.join(args.output_prefix, output_file)
assert output_file.endswith('.pkl')
length = int(length)
# prepare a psuedo sample
tmpl = dict(
frame_dir=frame_dir,
total_frames=length,
filename_tmpl=args.f_tmpl,
start_index=0,
modality=args.modality)
sample = data_pipeline(tmpl)
imgs = sample['imgs']
shape = imgs.shape
# the original shape should be N_seg * C * H * W, resize it to N_seg *
# 1 * C * H * W so that the network return feature of each frame (No
# score average among segments)
imgs = imgs.reshape((shape[0], 1) + shape[1:])
imgs = imgs.cuda()
def forward_data(model, data):
# chop large data into pieces and extract feature from them
results = []
start_idx = 0
num_clip = data.shape[0]
while start_idx < num_clip:
with torch.no_grad():
part = data[start_idx:start_idx + args.batch_size]
feat = model.forward(part, return_loss=False)
results.append(feat)
start_idx += args.batch_size
return np.concatenate(results)
feat = forward_data(model, imgs)
with open(output_file, 'wb') as fout:
pickle.dump(feat, fout)
prog_bar.update()
if __name__ == '__main__':
main()
| InternVideo-main | Downstream/Open-Set-Action-Recognition/tools/data/activitynet/tsn_feature_extraction.py |
"""This file processes the annotation files and generates proper annotation
files for localizers."""
import json
import numpy as np
def load_json(file):
with open(file) as json_file:
data = json.load(json_file)
return data
data_file = '../../../data/ActivityNet'
info_file = f'{data_file}/video_info_new.csv'
ann_file = f'{data_file}/anet_anno_action.json'
anno_database = load_json(ann_file)
video_record = np.loadtxt(info_file, dtype=np.str, delimiter=',', skiprows=1)
video_dict_train = {}
video_dict_val = {}
video_dict_test = {}
video_dict_full = {}
for i in range(len(video_record)):
video_name = video_record[i][0]
video_info = anno_database[video_name]
video_subset = video_record[i][5]
video_info['fps'] = video_record[i][3].astype(np.float)
video_info['rfps'] = video_record[i][4].astype(np.float)
video_dict_full[video_name] = video_info
if video_subset == 'training':
video_dict_train[video_name] = video_info
elif video_subset == 'testing':
video_dict_test[video_name] = video_info
elif video_subset == 'validation':
video_dict_val[video_name] = video_info
print(f'full subset video numbers: {len(video_record)}')
with open(f'{data_file}/anet_anno_train.json', 'w') as result_file:
json.dump(video_dict_train, result_file)
with open(f'{data_file}/anet_anno_val.json', 'w') as result_file:
json.dump(video_dict_val, result_file)
with open(f'{data_file}/anet_anno_test.json', 'w') as result_file:
json.dump(video_dict_test, result_file)
with open(f'{data_file}/anet_anno_full.json', 'w') as result_file:
json.dump(video_dict_full, result_file)
| InternVideo-main | Downstream/Open-Set-Action-Recognition/tools/data/activitynet/process_annotations.py |
# ------------------------------------------------------------------------------
# Adapted from https://github.com/activitynet/ActivityNet/
# Original licence: Copyright (c) Microsoft, under the MIT License.
# ------------------------------------------------------------------------------
import argparse
import glob
import json
import os
import shutil
import ssl
import subprocess
import uuid
from collections import OrderedDict
import pandas as pd
from joblib import Parallel, delayed
ssl._create_default_https_context = ssl._create_unverified_context
def create_video_folders(dataset, output_dir, tmp_dir):
"""Creates a directory for each label name in the dataset."""
if 'label-name' not in dataset.columns:
this_dir = os.path.join(output_dir, 'test')
if not os.path.exists(this_dir):
os.makedirs(this_dir)
# I should return a dict but ...
return this_dir
if not os.path.exists(output_dir):
os.makedirs(output_dir)
if not os.path.exists(tmp_dir):
os.makedirs(tmp_dir)
label_to_dir = {}
for label_name in dataset['label-name'].unique():
this_dir = os.path.join(output_dir, label_name)
if not os.path.exists(this_dir):
os.makedirs(this_dir)
label_to_dir[label_name] = this_dir
return label_to_dir
def construct_video_filename(row, label_to_dir, trim_format='%06d'):
"""Given a dataset row, this function constructs the output filename for a
given video."""
basename = '%s_%s_%s.mp4' % (row['video-id'],
trim_format % row['start-time'],
trim_format % row['end-time'])
if not isinstance(label_to_dir, dict):
dirname = label_to_dir
else:
dirname = label_to_dir[row['label-name']]
output_filename = os.path.join(dirname, basename)
return output_filename
def download_clip(video_identifier,
output_filename,
start_time,
end_time,
tmp_dir='/tmp/kinetics',
num_attempts=5,
url_base='https://www.youtube.com/watch?v='):
"""Download a video from youtube if exists and is not blocked.
arguments:
---------
video_identifier: str
Unique YouTube video identifier (11 characters)
output_filename: str
File path where the video will be stored.
start_time: float
Indicates the begining time in seconds from where the video
will be trimmed.
end_time: float
Indicates the ending time in seconds of the trimmed video.
"""
# Defensive argument checking.
assert isinstance(video_identifier, str), 'video_identifier must be string'
assert isinstance(output_filename, str), 'output_filename must be string'
assert len(video_identifier) == 11, 'video_identifier must have length 11'
status = False
# Construct command line for getting the direct video link.
tmp_filename = os.path.join(tmp_dir, '%s.%%(ext)s' % uuid.uuid4())
if not os.path.exists(output_filename):
if not os.path.exists(tmp_filename):
command = [
'youtube-dl', '--quiet', '--no-warnings',
'--no-check-certificate', '-f', 'mp4', '-o',
'"%s"' % tmp_filename,
'"%s"' % (url_base + video_identifier)
]
command = ' '.join(command)
print(command)
attempts = 0
while True:
try:
subprocess.check_output(
command, shell=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as err:
attempts += 1
if attempts == num_attempts:
return status, err.output
else:
break
tmp_filename = glob.glob('%s*' % tmp_filename.split('.')[0])[0]
# Construct command to trim the videos (ffmpeg required).
command = [
'ffmpeg', '-i',
'"%s"' % tmp_filename, '-ss',
str(start_time), '-t',
str(end_time - start_time), '-c:v', 'libx264', '-c:a', 'copy',
'-threads', '1', '-loglevel', 'panic',
'"%s"' % output_filename
]
command = ' '.join(command)
try:
subprocess.check_output(
command, shell=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as err:
return status, err.output
# Check if the video was successfully saved.
status = os.path.exists(output_filename)
os.remove(tmp_filename)
return status, 'Downloaded'
def download_clip_wrapper(row, label_to_dir, trim_format, tmp_dir):
"""Wrapper for parallel processing purposes."""
output_filename = construct_video_filename(row, label_to_dir, trim_format)
clip_id = os.path.basename(output_filename).split('.mp4')[0]
if os.path.exists(output_filename):
status = tuple([clip_id, True, 'Exists'])
return status
downloaded, log = download_clip(
row['video-id'],
output_filename,
row['start-time'],
row['end-time'],
tmp_dir=tmp_dir)
status = tuple([clip_id, downloaded, log])
return status
def parse_kinetics_annotations(input_csv, ignore_is_cc=False):
"""Returns a parsed DataFrame.
arguments:
---------
input_csv: str
Path to CSV file containing the following columns:
'YouTube Identifier,Start time,End time,Class label'
returns:
-------
dataset: DataFrame
Pandas with the following columns:
'video-id', 'start-time', 'end-time', 'label-name'
"""
df = pd.read_csv(input_csv)
if 'youtube_id' in df.columns:
columns = OrderedDict([('youtube_id', 'video-id'),
('time_start', 'start-time'),
('time_end', 'end-time'),
('label', 'label-name')])
df.rename(columns=columns, inplace=True)
if ignore_is_cc:
df = df.loc[:, df.columns.tolist()[:-1]]
return df
def main(input_csv,
output_dir,
trim_format='%06d',
num_jobs=24,
tmp_dir='/tmp/kinetics'):
# Reading and parsing Kinetics.
dataset = parse_kinetics_annotations(input_csv)
# Creates folders where videos will be saved later.
label_to_dir = create_video_folders(dataset, output_dir, tmp_dir)
# Download all clips.
if num_jobs == 1:
status_list = []
for i, row in dataset.iterrows():
status_list.append(
download_clip_wrapper(row, label_to_dir, trim_format, tmp_dir))
else:
status_list = Parallel(
n_jobs=num_jobs)(delayed(download_clip_wrapper)(
row, label_to_dir, trim_format, tmp_dir)
for i, row in dataset.iterrows())
# Clean tmp dir.
shutil.rmtree(tmp_dir)
# Save download report.
with open('download_report.json', 'w') as fobj:
fobj.write(json.dumps(status_list))
if __name__ == '__main__':
description = 'Helper script for downloading and trimming kinetics videos.'
p = argparse.ArgumentParser(description=description)
p.add_argument(
'input_csv',
type=str,
help=('CSV file containing the following format: '
'YouTube Identifier,Start time,End time,Class label'))
p.add_argument(
'output_dir',
type=str,
help='Output directory where videos will be saved.')
p.add_argument(
'-f',
'--trim-format',
type=str,
default='%06d',
help=('This will be the format for the '
'filename of trimmed videos: '
'videoid_%0xd(start_time)_%0xd(end_time).mp4'))
p.add_argument('-n', '--num-jobs', type=int, default=24)
p.add_argument('-t', '--tmp-dir', type=str, default='/tmp/kinetics')
# help='CSV file of the previous version of Kinetics.')
main(**vars(p.parse_args()))
| InternVideo-main | Downstream/Open-Set-Action-Recognition/tools/data/kinetics/download.py |
# ------------------------------------------------------------------------------
# Adapted from https://github.com/activitynet/ActivityNet/
# Original licence: Copyright (c) Microsoft, under the MIT License.
# ------------------------------------------------------------------------------
import argparse
import glob
import json
import os
import shutil
import ssl
import subprocess
import uuid
from collections import OrderedDict
import pandas as pd
from joblib import Parallel, delayed
ssl._create_default_https_context = ssl._create_unverified_context
def create_video_folders(dataset, output_dir, tmp_dir):
"""Creates a directory for each label name in the dataset."""
if 'label-name' not in dataset.columns:
this_dir = os.path.join(output_dir, 'test')
if not os.path.exists(this_dir):
os.makedirs(this_dir)
# I should return a dict but ...
return this_dir
if not os.path.exists(output_dir):
os.makedirs(output_dir)
if not os.path.exists(tmp_dir):
os.makedirs(tmp_dir)
label_to_dir = {}
for label_name in dataset['label-name'].unique():
this_dir = os.path.join(output_dir, label_name)
if not os.path.exists(this_dir):
os.makedirs(this_dir)
label_to_dir[label_name] = this_dir
return label_to_dir
def construct_video_filename(row, label_to_dir, trim_format='%06d'):
"""Given a dataset row, this function constructs the output filename for a
given video."""
basename = '%s_%s_%s.mp4' % (row['video-id'],
trim_format % row['start-time'],
trim_format % row['end-time'])
if not isinstance(label_to_dir, dict):
dirname = label_to_dir
else:
dirname = label_to_dir[row['label-name']]
output_filename = os.path.join(dirname, basename)
return output_filename
def download_clip(video_identifier,
output_filename,
start_time,
end_time,
tmp_dir='/tmp/kinetics',
num_attempts=5,
url_base='https://www.youtube.com/watch?v='):
"""Download a video from youtube if exists and is not blocked.
arguments:
---------
video_identifier: str
Unique YouTube video identifier (11 characters)
output_filename: str
File path where the video will be stored.
start_time: float
Indicates the begining time in seconds from where the video
will be trimmed.
end_time: float
Indicates the ending time in seconds of the trimmed video.
"""
# Defensive argument checking.
assert isinstance(video_identifier, str), 'video_identifier must be string'
assert isinstance(output_filename, str), 'output_filename must be string'
assert len(video_identifier) == 11, 'video_identifier must have length 11'
status = False
# Construct command line for getting the direct video link.
tmp_filename = os.path.join(tmp_dir, '%s.%%(ext)s' % uuid.uuid4())
if not os.path.exists(output_filename):
if not os.path.exists(tmp_filename):
command = [
'youtube-dl', '--quiet', '--no-warnings',
'--no-check-certificate', '-f', 'mp4', '-o',
'"%s"' % tmp_filename,
'"%s"' % (url_base + video_identifier)
]
command = ' '.join(command)
print(command)
attempts = 0
while True:
try:
subprocess.check_output(
command, shell=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as err:
attempts += 1
if attempts == num_attempts:
return status, err.output
else:
break
tmp_filename = glob.glob('%s*' % tmp_filename.split('.')[0])[0]
# Construct command to trim the videos (ffmpeg required).
command = [
'ffmpeg', '-i',
'"%s"' % tmp_filename, '-ss',
str(start_time), '-t',
str(end_time - start_time), '-c:v', 'libx264', '-c:a', 'copy',
'-threads', '1', '-loglevel', 'panic',
'"%s"' % output_filename
]
command = ' '.join(command)
try:
subprocess.check_output(
command, shell=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as err:
return status, err.output
# Check if the video was successfully saved.
status = os.path.exists(output_filename)
os.remove(tmp_filename)
return status, 'Downloaded'
def download_clip_wrapper(row, label_to_dir, trim_format, tmp_dir):
"""Wrapper for parallel processing purposes."""
output_filename = construct_video_filename(row, label_to_dir, trim_format)
clip_id = os.path.basename(output_filename).split('.mp4')[0]
if os.path.exists(output_filename):
status = tuple([clip_id, True, 'Exists'])
return status
downloaded, log = download_clip(
row['video-id'],
output_filename,
row['start-time'],
row['end-time'],
tmp_dir=tmp_dir)
status = tuple([clip_id, downloaded, log])
return status
def parse_kinetics_annotations(input_csv, ignore_is_cc=False):
"""Returns a parsed DataFrame.
arguments:
---------
input_csv: str
Path to CSV file containing the following columns:
'YouTube Identifier,Start time,End time,Class label'
returns:
-------
dataset: DataFrame
Pandas with the following columns:
'video-id', 'start-time', 'end-time', 'label-name'
"""
df = pd.read_csv(input_csv)
if 'youtube_id' in df.columns:
columns = OrderedDict([('youtube_id', 'video-id'),
('time_start', 'start-time'),
('time_end', 'end-time'),
('label', 'label-name')])
df.rename(columns=columns, inplace=True)
if ignore_is_cc:
df = df.loc[:, df.columns.tolist()[:-1]]
return df
def setup_new_annotations(input_csv, output_dir, subset):
if not os.path.exists(output_dir):
os.makedirs(output_dir)
anno_dir = os.path.join(output_dir, '../annotations')
if not os.path.exists(anno_dir):
os.makedirs(anno_dir)
anno_file = os.path.join(anno_dir, input_csv.split('/')[-1])
fout = open(anno_file, 'w')
# read original annotation file
with open(input_csv, 'r') as f:
# write subset lines
for idx, line in enumerate(f.readlines()):
line_list = line.strip().split(',')
if idx > 0 and line_list[0] not in subset:
continue
fout.writelines("%s,%s,%s,%s,%s\n"%(line_list[0], line_list[1], line_list[2], line_list[3], line_list[4]))
fout.close()
return anno_file
def main(input_csv,
output_dir,
subset_file,
trim_format='%06d',
num_jobs=24,
tmp_dir='/tmp/kinetics'):
# read the subset class list
subset = []
with open(subset_file, 'r') as f:
for line in f.readlines():
subset.append(line.strip())
# setup the annotations of the subset
real_csv = setup_new_annotations(input_csv, output_dir, subset)
# Reading and parsing Kinetics.
dataset = parse_kinetics_annotations(real_csv)
# Creates folders where videos will be saved later.
label_to_dir = create_video_folders(dataset, output_dir, tmp_dir)
# Download all clips.
if num_jobs == 1:
status_list = []
for i, row in dataset.iterrows():
status_list.append(
download_clip_wrapper(row, label_to_dir, trim_format, tmp_dir))
else:
status_list = Parallel(
n_jobs=num_jobs)(delayed(download_clip_wrapper)(
row, label_to_dir, trim_format, tmp_dir)
for i, row in dataset.iterrows())
# Clean tmp dir.
shutil.rmtree(tmp_dir)
# Save download report.
with open('download_report.json', 'w') as fobj:
fobj.write(json.dumps(status_list))
if __name__ == '__main__':
description = 'Helper script for downloading and trimming kinetics videos.'
p = argparse.ArgumentParser(description=description)
p.add_argument(
'input_csv',
type=str,
help=('CSV file containing the following format: '
'YouTube Identifier,Start time,End time,Class label'))
p.add_argument(
'output_dir',
type=str,
help='Output directory where videos will be saved.')
p.add_argument('-s', '--subset_file', type=str, help='the subset file of kinetics classes.')
p.add_argument(
'-f',
'--trim-format',
type=str,
default='%06d',
help=('This will be the format for the '
'filename of trimmed videos: '
'videoid_%0xd(start_time)_%0xd(end_time).mp4'))
p.add_argument('-n', '--num-jobs', type=int, default=24)
p.add_argument('-t', '--tmp-dir', type=str, default='/tmp/kinetics')
# help='CSV file of the previous version of Kinetics.')
main(**vars(p.parse_args()))
| InternVideo-main | Downstream/Open-Set-Action-Recognition/tools/data/kinetics/download_subset.py |
import argparse
import os.path as osp
import subprocess
import mmcv
from joblib import Parallel, delayed
URL_PREFIX = 'https://s3.amazonaws.com/ava-dataset/trainval/'
def download_video(video_url, output_dir, num_attempts=5):
video_file = osp.basename(video_url)
output_file = osp.join(output_dir, video_file)
status = False
if not osp.exists(output_file):
command = ['wget', '-c', video_url, '-P', output_dir]
command = ' '.join(command)
print(command)
attempts = 0
while True:
try:
subprocess.check_output(
command, shell=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError:
attempts += 1
if attempts == num_attempts:
return status, 'Downloading Failed'
else:
break
status = osp.exists(output_file)
return status, 'Downloaded'
def main(source_file, output_dir, num_jobs=24, num_attempts=5):
mmcv.mkdir_or_exist(output_dir)
video_list = open(source_file).read().strip().split('\n')
video_list = [osp.join(URL_PREFIX, video) for video in video_list]
if num_jobs == 1:
status_list = []
for video in video_list:
video_list.append(download_video(video, output_dir, num_attempts))
else:
status_list = Parallel(n_jobs=num_jobs)(
delayed(download_video)(video, output_dir, num_attempts)
for video in video_list)
mmcv.dump(status_list, 'download_report.json')
if __name__ == '__main__':
description = 'Helper script for downloading AVA videos'
parser = argparse.ArgumentParser(description=description)
parser.add_argument(
'source_file', type=str, help='TXT file containing the video filename')
parser.add_argument(
'output_dir',
type=str,
help='Output directory where videos will be saved')
parser.add_argument('-n', '--num-jobs', type=int, default=24)
parser.add_argument('--num-attempts', type=int, default=5)
main(**vars(parser.parse_args()))
| InternVideo-main | Downstream/Open-Set-Action-Recognition/tools/data/ava/download_videos_parallel.py |
# ------------------------------------------------------------------------------
# Adapted from https://github.com/activitynet/ActivityNet/
# Original licence: Copyright (c) Microsoft, under the MIT License.
# ------------------------------------------------------------------------------
import argparse
import glob
import os
import shutil
import ssl
import subprocess
import uuid
import mmcv
from joblib import Parallel, delayed
ssl._create_default_https_context = ssl._create_unverified_context
args = None
def create_video_folders(dataset, output_dir, tmp_dir):
if not os.path.exists(output_dir):
os.makedirs(output_dir)
if not os.path.exists(tmp_dir):
os.makedirs(tmp_dir)
def construct_video_filename(item, trim_format, output_dir):
"""Given a dataset row, this function constructs the output filename for a
given video."""
youtube_id, start_time, end_time = item
start_time, end_time = int(start_time * 10), int(end_time * 10)
basename = '%s_%s_%s.mp4' % (youtube_id, trim_format % start_time,
trim_format % end_time)
output_filename = os.path.join(output_dir, basename)
return output_filename
def download_clip(video_identifier,
output_filename,
start_time,
end_time,
tmp_dir='/tmp/hvu',
num_attempts=5,
url_base='https://www.youtube.com/watch?v='):
"""Download a video from youtube if exists and is not blocked.
arguments:
---------
video_identifier: str
Unique YouTube video identifier (11 characters)
output_filename: str
File path where the video will be stored.
start_time: float
Indicates the begining time in seconds from where the video
will be trimmed.
end_time: float
Indicates the ending time in seconds of the trimmed video.
"""
# Defensive argument checking.
assert isinstance(video_identifier, str), 'video_identifier must be string'
assert isinstance(output_filename, str), 'output_filename must be string'
assert len(video_identifier) == 11, 'video_identifier must have length 11'
status = False
tmp_filename = os.path.join(tmp_dir, '%s.%%(ext)s' % uuid.uuid4())
if not os.path.exists(output_filename):
if not os.path.exists(tmp_filename):
command = [
'youtube-dl', '--quiet', '--no-warnings',
'--no-check-certificate', '-f', 'mp4', '-o',
'"%s"' % tmp_filename,
'"%s"' % (url_base + video_identifier)
]
command = ' '.join(command)
print(command)
attempts = 0
while True:
try:
subprocess.check_output(
command, shell=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError:
attempts += 1
if attempts == num_attempts:
return status, 'Downloading Failed'
else:
break
tmp_filename = glob.glob('%s*' % tmp_filename.split('.')[0])[0]
# Construct command to trim the videos (ffmpeg required).
command = [
'ffmpeg', '-i',
'"%s"' % tmp_filename, '-ss',
str(start_time), '-t',
str(end_time - start_time), '-c:v', 'libx264', '-c:a', 'copy',
'-threads', '1', '-loglevel', 'panic',
'"%s"' % output_filename
]
command = ' '.join(command)
try:
subprocess.check_output(
command, shell=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError:
return status, 'Trimming Failed'
# Check if the video was successfully saved.
status = os.path.exists(output_filename)
os.remove(tmp_filename)
return status, 'Downloaded'
def download_clip_wrapper(item, trim_format, tmp_dir, output_dir):
"""Wrapper for parallel processing purposes."""
output_filename = construct_video_filename(item, trim_format, output_dir)
clip_id = os.path.basename(output_filename).split('.mp4')[0]
if os.path.exists(output_filename):
status = tuple([clip_id, True, 'Exists'])
return status
youtube_id, start_time, end_time = item
downloaded, log = download_clip(
youtube_id, output_filename, start_time, end_time, tmp_dir=tmp_dir)
status = tuple([clip_id, downloaded, log])
return status
def parse_hvu_annotations(input_csv):
"""Returns a parsed DataFrame.
arguments:
---------
input_csv: str
Path to CSV file containing the following columns:
'Tags, youtube_id, time_start, time_end'
returns:
-------
dataset: List of tuples. Each tuple consists of
(youtube_id, time_start, time_end). The type of time is float.
"""
lines = open(input_csv).readlines()
lines = [x.strip().split(',')[1:] for x in lines[1:]]
lines = [(x[0], float(x[1]), float(x[2])) for x in lines]
return lines
def main(input_csv,
output_dir,
trim_format='%06d',
num_jobs=24,
tmp_dir='/tmp/hvu'):
# Reading and parsing HVU.
dataset = parse_hvu_annotations(input_csv)
# Creates folders where videos will be saved later.
create_video_folders(dataset, output_dir, tmp_dir)
# Download all clips.
if num_jobs == 1:
status_lst = []
for item in dataset:
status_lst.append(
download_clip_wrapper(item, trim_format, tmp_dir, output_dir))
else:
status_lst = Parallel(n_jobs=num_jobs)(
delayed(download_clip_wrapper)(item, trim_format, tmp_dir,
output_dir) for item in dataset)
# Clean tmp dir.
shutil.rmtree(tmp_dir)
# Save download report.
mmcv.dump(status_lst, 'download_report.json')
if __name__ == '__main__':
description = 'Helper script for downloading and trimming HVU videos.'
p = argparse.ArgumentParser(description=description)
p.add_argument(
'input_csv',
type=str,
help=('CSV file containing the following format: '
'Tags, youtube_id, time_start, time_end'))
p.add_argument(
'output_dir',
type=str,
help='Output directory where videos will be saved.')
p.add_argument(
'-f',
'--trim-format',
type=str,
default='%06d',
help=('This will be the format for the '
'filename of trimmed videos: '
'videoid_%0xd(start_time)_%0xd(end_time).mp4. '
'Note that the start_time is multiplied by 10 since '
'decimal exists somewhere. '))
p.add_argument('-n', '--num-jobs', type=int, default=24)
p.add_argument('-t', '--tmp-dir', type=str, default='/tmp/hvu')
main(**vars(p.parse_args()))
| InternVideo-main | Downstream/Open-Set-Action-Recognition/tools/data/hvu/download.py |
import mmcv
tag_list = '../../../data/hvu/annotations/hvu_categories.csv'
lines = open(tag_list).readlines()
lines = [x.strip().split(',') for x in lines[1:]]
tag_categories = {}
for line in lines:
tag, category = line
tag_categories.setdefault(category, []).append(tag)
for k in tag_categories:
tag_categories[k].sort()
mmcv.dump(tag_categories, 'hvu_tags.json')
| InternVideo-main | Downstream/Open-Set-Action-Recognition/tools/data/hvu/parse_tag_list.py |
import argparse
import os.path as osp
import mmcv
def main(annotation_file, category):
assert category in [
'action', 'attribute', 'concept', 'event', 'object', 'scene'
]
data = mmcv.load(annotation_file)
basename = osp.basename(annotation_file)
dirname = osp.dirname(annotation_file)
basename = basename.replace('hvu', f'hvu_{category}')
target_file = osp.join(dirname, basename)
result = []
for item in data:
label = item['label']
if category in label:
item['label'] = label[category]
result.append(item)
mmcv.dump(data, target_file)
if __name__ == '__main__':
description = 'Helper script for generating HVU per-category file list.'
p = argparse.ArgumentParser(description=description)
p.add_argument(
'annotation_file',
type=str,
help=('The annotation file which contains tags of all categories.'))
p.add_argument(
'category',
type=str,
choices=['action', 'attribute', 'concept', 'event', 'object', 'scene'],
help='The tag category that you want to generate file list for.')
main(**vars(p.parse_args()))
| InternVideo-main | Downstream/Open-Set-Action-Recognition/tools/data/hvu/generate_sub_file_list.py |
import argparse
import fnmatch
import glob
import os
import os.path as osp
import mmcv
annotation_root = '../../data/hvu/annotations'
tag_file = 'hvu_tags.json'
args = None
def parse_directory(path,
rgb_prefix='img_',
flow_x_prefix='flow_x_',
flow_y_prefix='flow_y_',
level=1):
"""Parse directories holding extracted frames from standard benchmarks.
Args:
path (str): Directory path to parse frames.
rgb_prefix (str): Prefix of generated rgb frames name.
default: 'img_'.
flow_x_prefix (str): Prefix of generated flow x name.
default: `flow_x_`.
flow_y_prefix (str): Prefix of generated flow y name.
default: `flow_y_`.
level (int): Directory level for glob searching. Options are 1 and 2.
default: 1.
Returns:
dict: frame info dict with video id as key and tuple(path(str),
rgb_num(int), flow_x_num(int)) as value.
"""
print(f'parse frames under directory {path}')
if level == 1:
# Only search for one-level directory
def locate_directory(x):
return osp.basename(x)
frame_dirs = glob.glob(osp.join(path, '*'))
elif level == 2:
# search for two-level directory
def locate_directory(x):
return osp.join(osp.basename(osp.dirname(x)), osp.basename(x))
frame_dirs = glob.glob(osp.join(path, '*', '*'))
else:
raise ValueError('level can be only 1 or 2')
def count_files(directory, prefix_list):
"""Count file number with a given directory and prefix.
Args:
directory (str): Data directory to be search.
prefix_list (list): List or prefix.
Returns:
list (int): Number list of the file with the prefix.
"""
lst = os.listdir(directory)
cnt_list = [len(fnmatch.filter(lst, x + '*')) for x in prefix_list]
return cnt_list
# check RGB
frame_dict = {}
for i, frame_dir in enumerate(frame_dirs):
total_num = count_files(frame_dir,
(rgb_prefix, flow_x_prefix, flow_y_prefix))
dir_name = locate_directory(frame_dir)
num_x = total_num[1]
num_y = total_num[2]
if num_x != num_y:
raise ValueError(f'x and y direction have different number '
f'of flow images in video directory: {frame_dir}')
if i % 200 == 0:
print(f'{i} videos parsed')
frame_dict[dir_name] = (frame_dir, total_num[0], num_x)
print('frame directory analysis done')
return frame_dict
def parse_args():
parser = argparse.ArgumentParser(description='build file list for HVU')
parser.add_argument('--input_csv', type=str, help='path of input csv file')
parser.add_argument(
'--src_dir', type=str, help='source video / frames directory')
parser.add_argument(
'--output',
type=str,
help='output filename, should \
ends with .json')
parser.add_argument(
'--mode',
type=str,
choices=['frames', 'videos'],
help='generate file list for frames or videos')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
tag_cates = mmcv.load(tag_file)
tag2category = {}
for k in tag_cates:
for tag in tag_cates[k]:
tag2category[tag] = k
data_list = open(args.input_csv).readlines()
data_list = [x.strip().split(',') for x in data_list[1:]]
if args.mode == 'videos':
downloaded = os.listdir(args.src_dir)
downloaded = [x.split('.')[0] for x in downloaded]
downloaded_set = set(downloaded)
else:
parse_result = parse_directory(args.src_dir)
downloaded_set = set(parse_result)
def parse_line(line):
tags, youtube_id, start, end = line
start, end = int(float(start) * 10), int(float(end) * 10)
newname = f'{youtube_id}_{start:06d}_{end:06d}'
tags = tags.split('|')
all_tags = {}
for tag in tags:
category = tag2category[tag]
all_tags.setdefault(category,
[]).append(tag_cates[category].index(tag))
return newname, all_tags
data_list = [parse_line(line) for line in data_list]
data_list = [line for line in data_list if line[0] in downloaded_set]
if args.mode == 'frames':
result = [
dict(
frame_dir=k[0], total_frames=parse_result[k[0]][1], label=k[1])
for k in data_list
]
elif args.mode == 'videos':
result = [dict(filename=k[0] + '.mp4', label=k[1]) for k in data_list]
mmcv.dump(result, args.output)
| InternVideo-main | Downstream/Open-Set-Action-Recognition/tools/data/hvu/generate_file_list.py |
import os, argparse
from sklearn.utils import shuffle
def parse_args():
parser = argparse.ArgumentParser(description='Build file list')
parser.add_argument('dataset', type=str, choices=['mimetics10', 'mimetics'], help='dataset to be built file list')
parser.add_argument('src_folder', type=str, help='root directory for the frames or videos')
parser.add_argument('list_file', type=str, help='file list result')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
filelist, labels = [], []
for cls_id, labelname in enumerate(sorted(os.listdir(args.src_folder))):
video_path = os.path.join(args.src_folder, labelname)
for videoname in os.listdir(video_path):
# get the video file
video_file = os.path.join(labelname, videoname)
filelist.append(video_file)
# get the label
labels.append(str(cls_id))
filelist, labels = shuffle(filelist, labels)
with open(args.list_file, 'w') as f:
for filepath, label in zip(filelist, labels):
print(filepath, label)
f.writelines('%s %s\n'%(filepath, label))
| InternVideo-main | Downstream/Open-Set-Action-Recognition/tools/data/mimetics/build_file_list.py |
# This scripts is copied from
# https://github.com/activitynet/ActivityNet/blob/master/Crawler/Kinetics/download.py # noqa: E501
# The code is licensed under the MIT licence.
import argparse
import os
import ssl
import subprocess
import mmcv
from joblib import Parallel, delayed
import youtube_dl
import glob
ssl._create_default_https_context = ssl._create_unverified_context
# def download(video_identifier,
# output_filename,
# num_attempts=5,
# url_base='https://www.youtube.com/watch?v='):
# """Download a video from youtube if exists and is not blocked.
# arguments:
# ---------
# video_identifier: str
# Unique YouTube video identifier (11 characters)
# output_filename: str
# File path where the video will be stored.
# """
# # Defensive argument checking.
# assert isinstance(video_identifier, str), 'video_identifier must be string'
# assert isinstance(output_filename, str), 'output_filename must be string'
# assert len(video_identifier) == 11, 'video_identifier must have length 11'
# status = False
# if not os.path.exists(output_filename):
# command = [
# 'youtube-dl', '--quiet', '--no-warnings', '--no-check-certificate',
# '-f', 'mp4', '-o',
# '"%s"' % output_filename,
# '"%s"' % (url_base + video_identifier)
# ]
# command = ' '.join(command)
# print(command)
# attempts = 0
# while True:
# try:
# subprocess.check_output(
# command, shell=True, stderr=subprocess.STDOUT)
# except subprocess.CalledProcessError:
# attempts += 1
# if attempts == num_attempts:
# return status, 'Fail'
# else:
# break
# # Check if the video was successfully saved.
# status = os.path.exists(output_filename)
# return status, 'Downloaded'
# def download_wrapper(youtube_id, output_dir):
# """Wrapper for parallel processing purposes."""
# # we do this to align with names in annotations
# output_filename = os.path.join(output_dir, youtube_id + '.mp4')
# if os.path.exists(output_filename):
# status = tuple([youtube_id, True, 'Exists'])
# return status
# downloaded, log = download(youtube_id, output_filename)
# status = tuple([youtube_id, downloaded, log])
# return status
def main(input, output_dir, num_jobs=24):
# Reading and parsing ActivityNet.
youtube_ids = mmcv.load(input).keys()
# Creates folders where videos will be saved later.
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# Download all clips.
ydl_opt = {'outtmpl': output_dir + '%(id)s.%(ext)s',
'format': 'mp4',
'quiet': True,
'no-warnings': True,
'ignoreerros': True,
'no-check-certificate': True,
'skipdownload': True}
ydl = youtube_dl.YoutubeDL(ydl_opt)
url_list = []
for vid in youtube_ids:
url = "https://www.youtube.com/watch?v=" + vid + "&has_verified=1"
url_list.append(url)
print('Number of videos to be downloaded: %d'%(len(url_list)))
failed_files = './failed_list.txt'
with open(failed_files, 'w') as f:
for vid, url in zip(youtube_ids, url_list):
filename = os.path.join(output_dir, vid + '.mp4')
if not os.path.exists(filename):
f.writelines(url + '\n')
try:
ydl.download(url_list)
except:
print('errors ocurred!')
print("Download finished!")
all_videos = sorted(glob.glob(output_dir + '*.mp4'))
print("Number of videos: ", len(all_videos))
# if num_jobs == 1:
# status_list = []
# for index in youtube_ids:
# status_list.append(download_wrapper(index, output_dir))
# else:
# status_list = Parallel(n_jobs=num_jobs)(
# delayed(download_wrapper)(index, output_dir)
# for index in youtube_ids)
# # Save download report.
# mmcv.dump(status_list, 'download_report.json')
if __name__ == '__main__':
description = 'Helper script for downloading GYM videos.'
p = argparse.ArgumentParser(description=description)
p.add_argument('input', type=str, help='The gym annotation file')
p.add_argument(
'output_dir', type=str, help='Output directory to save videos.')
p.add_argument('-n', '--num-jobs', type=int, default=24)
main(**vars(p.parse_args()))
| InternVideo-main | Downstream/Open-Set-Action-Recognition/tools/data/gym/download_ytdl.py |
# This scripts is copied from
# https://github.com/activitynet/ActivityNet/blob/master/Crawler/Kinetics/download.py # noqa: E501
# The code is licensed under the MIT licence.
import argparse
import os
import ssl
import subprocess
import mmcv
from joblib import Parallel, delayed
ssl._create_default_https_context = ssl._create_unverified_context
def download(video_identifier,
output_filename,
num_attempts=5,
url_base='https://www.youtube.com/watch?v='):
"""Download a video from youtube if exists and is not blocked.
arguments:
---------
video_identifier: str
Unique YouTube video identifier (11 characters)
output_filename: str
File path where the video will be stored.
"""
# Defensive argument checking.
assert isinstance(video_identifier, str), 'video_identifier must be string'
assert isinstance(output_filename, str), 'output_filename must be string'
assert len(video_identifier) == 11, 'video_identifier must have length 11'
status = False
if not os.path.exists(output_filename):
command = [
'youtube-dl', '--quiet', '--no-warnings', '--no-check-certificate',
'-f', 'mp4', '-o',
'"%s"' % output_filename,
'"%s"' % (url_base + video_identifier)
]
command = ' '.join(command)
print(command)
attempts = 0
while True:
try:
subprocess.check_output(
command, shell=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError:
attempts += 1
if attempts == num_attempts:
return status, 'Fail'
else:
break
# Check if the video was successfully saved.
status = os.path.exists(output_filename)
return status, 'Downloaded'
def download_wrapper(youtube_id, output_dir):
"""Wrapper for parallel processing purposes."""
# we do this to align with names in annotations
output_filename = os.path.join(output_dir, youtube_id + '.mp4')
if os.path.exists(output_filename):
status = tuple([youtube_id, True, 'Exists'])
return status
downloaded, log = download(youtube_id, output_filename)
status = tuple([youtube_id, downloaded, log])
return status
def main(input, output_dir, num_jobs=24):
# Reading and parsing ActivityNet.
youtube_ids = mmcv.load(input).keys()
# Creates folders where videos will be saved later.
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# Download all clips.
if num_jobs == 1:
status_list = []
for index in youtube_ids:
status_list.append(download_wrapper(index, output_dir))
else:
status_list = Parallel(n_jobs=num_jobs)(
delayed(download_wrapper)(index, output_dir)
for index in youtube_ids)
# Save download report.
mmcv.dump(status_list, 'download_report.json')
if __name__ == '__main__':
description = 'Helper script for downloading GYM videos.'
p = argparse.ArgumentParser(description=description)
p.add_argument('input', type=str, help='The gym annotation file')
p.add_argument(
'output_dir', type=str, help='Output directory to save videos.')
p.add_argument('-n', '--num-jobs', type=int, default=24)
main(**vars(p.parse_args()))
| InternVideo-main | Downstream/Open-Set-Action-Recognition/tools/data/gym/download.py |
import os
import os.path as osp
import subprocess
import mmcv
data_root = '../../../data/gym'
anno_root = f'{data_root}/annotations'
event_anno_file = f'{anno_root}/event_annotation.json'
event_root = f'{data_root}/events'
subaction_root = f'{data_root}/subactions'
events = os.listdir(event_root)
events = set(events)
annotation = mmcv.load(event_anno_file)
mmcv.mkdir_or_exist(subaction_root)
for k, v in annotation.items():
if k + '.mp4' not in events:
print(f'video {k[:11]} has not been downloaded '
f'or the event clip {k} not generated')
continue
video_path = osp.join(event_root, k + '.mp4')
for subaction_id, subaction_anno in v.items():
timestamps = subaction_anno['timestamps']
start_time, end_time = timestamps[0][0], timestamps[-1][1]
subaction_name = k + '_' + subaction_id
output_filename = subaction_name + '.mp4'
command = [
'ffmpeg', '-i',
'"%s"' % video_path, '-ss',
str(start_time), '-t',
str(end_time - start_time), '-c:v', 'libx264', '-c:a', 'copy',
'-threads', '8', '-loglevel', 'panic',
'"%s"' % osp.join(subaction_root, output_filename)
]
command = ' '.join(command)
try:
subprocess.check_output(
command, shell=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError:
print(
f'Trimming of the Subaction {subaction_name} of Event '
f'{k} Failed',
flush=True)
| InternVideo-main | Downstream/Open-Set-Action-Recognition/tools/data/gym/trim_subaction.py |
import os
import os.path as osp
import subprocess
import mmcv
data_root = '../../../data/gym'
video_root = f'{data_root}/videos'
anno_root = f'{data_root}/annotations'
anno_file = f'{anno_root}/annotation.json'
event_anno_file = f'{anno_root}/event_annotation.json'
event_root = f'{data_root}/events'
videos = os.listdir(video_root)
videos = set(videos)
annotation = mmcv.load(anno_file)
event_annotation = {}
mmcv.mkdir_or_exist(event_root)
for k, v in annotation.items():
if k + '.mp4' not in videos:
print(f'video {k} has not been downloaded')
continue
video_path = osp.join(video_root, k + '.mp4')
for event_id, event_anno in v.items():
timestamps = event_anno['timestamps'][0]
start_time, end_time = timestamps
event_name = k + '_' + event_id
output_filename = event_name + '.mp4'
if not os.path.exists(osp.join(event_root, output_filename)):
command = [
'ffmpeg', '-i',
'"%s"' % video_path, '-ss',
str(start_time), '-t',
str(end_time - start_time), '-c:v', 'libx264', '-c:a', 'copy',
'-threads', '8', '-loglevel', 'panic',
'"%s"' % osp.join(event_root, output_filename)
]
command = ' '.join(command)
try:
subprocess.check_output(
command, shell=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError:
print(
f'Trimming of the Event {event_name} of Video {k} Failed',
flush=True)
else:
print("Result event file exists, ignored!")
segments = event_anno['segments']
if segments is not None:
event_annotation[event_name] = segments
mmcv.dump(event_annotation, event_anno_file)
| InternVideo-main | Downstream/Open-Set-Action-Recognition/tools/data/gym/trim_event.py |
import os
import os.path as osp
annotation_root = '../../../data/gym/annotations'
data_root = '../../../data/gym/subactions'
frame_data_root = '../../../data/gym/subaction_frames'
videos = os.listdir(data_root)
videos = set(videos)
train_file_org = osp.join(annotation_root, 'gym99_train_org.txt')
val_file_org = osp.join(annotation_root, 'gym99_val_org.txt')
train_file = osp.join(annotation_root, 'gym99_train.txt')
val_file = osp.join(annotation_root, 'gym99_val.txt')
train_frame_file = osp.join(annotation_root, 'gym99_train_frame.txt')
val_frame_file = osp.join(annotation_root, 'gym99_val_frame.txt')
train_org = open(train_file_org).readlines()
train_org = [x.strip().split() for x in train_org]
train = [x for x in train_org if x[0] + '.mp4' in videos]
if osp.exists(frame_data_root):
train_frames = []
for line in train:
length = len(os.listdir(osp.join(frame_data_root, line[0])))
train_frames.append([line[0], str(length // 3), line[1]])
train_frames = [' '.join(x) for x in train_frames]
with open(train_frame_file, 'w') as fout:
fout.write('\n'.join(train_frames))
train = [x[0] + '.mp4 ' + x[1] for x in train]
with open(train_file, 'w') as fout:
fout.write('\n'.join(train))
val_org = open(val_file_org).readlines()
val_org = [x.strip().split() for x in val_org]
val = [x for x in val_org if x[0] + '.mp4' in videos]
if osp.exists(frame_data_root):
val_frames = []
for line in val:
length = len(os.listdir(osp.join(frame_data_root, line[0])))
val_frames.append([line[0], str(length // 3), line[1]])
val_frames = [' '.join(x) for x in val_frames]
with open(val_frame_file, 'w') as fout:
fout.write('\n'.join(val_frames))
val = [x[0] + '.mp4 ' + x[1] for x in val]
with open(val_file, 'w') as fout:
fout.write('\n'.join(val))
| InternVideo-main | Downstream/Open-Set-Action-Recognition/tools/data/gym/generate_file_list.py |
import os
from tqdm import tqdm
def fix_listfile(file_split, phase):
assert os.path.exists(file_split), 'File does not exist! %s'%(file_split)
filename = file_split.split('/')[-1]
file_split_new = os.path.join(dataset_path, 'temp_' + filename)
with open(file_split_new, 'w') as fw:
with open(file_split, 'r') as fr:
for line in tqdm(fr.readlines(), desc=phase):
foldername = line.split('/')[0]
if foldername == phase:
continue
fw.writelines(phase + '/' + line)
os.remove(file_split)
os.rename(file_split_new, file_split)
if __name__ == '__main__':
dataset_path = '../../../data/mit'
file_split = os.path.join(dataset_path, 'mit_train_list_videos.txt')
fix_listfile(file_split, 'training')
file_split = os.path.join(dataset_path, 'mit_val_list_videos.txt')
fix_listfile(file_split, 'validation') | InternVideo-main | Downstream/Open-Set-Action-Recognition/tools/data/mit/fix_video_filelist.py |
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.lines import Line2D
def draw_curves():
fig = plt.figure(figsize=(8,5))
plt.rcParams["font.family"] = "Arial"
fontsize = 15
markersize = 80
# I3D
I3D_DNN_HMDB = [94.69, 75.07] # (closed-set ACC, open-set AUC)
I3D_DNN_MiT = [94.69, 79.14]
I3D_DEAR_HMDB = [94.34, 77.08]
I3D_DEAR_MiT = [94.34, 81.54]
# TSM
TSM_DNN_HMDB = [95.11, 73.85]
TSM_DNN_MiT = [95.11, 78.35]
TSM_DEAR_HMDB = [94.45, 78.65]
TSM_DEAR_MiT = [94.45, 83.92]
# TPN
TPN_DNN_HMDB = [95.41, 74.13]
TPN_DNN_MiT = [95.41, 77.76]
TPN_DEAR_HMDB = [96.42, 79.23]
TPN_DEAR_MiT = [96.42, 81.80]
# SlowFast
SlowFast_DNN_HMDB = [96.72, 75.41]
SlowFast_DNN_MiT = [96.72, 78.49]
SlowFast_DEAR_HMDB = [96.66, 82.94]
SlowFast_DEAR_MiT = [96.66, 86.99]
# Line: DNN for HMDB
plt.plot([I3D_DNN_HMDB[0], TSM_DNN_HMDB[0], TPN_DNN_HMDB[0], SlowFast_DNN_HMDB[0]],
[I3D_DNN_HMDB[1], TSM_DNN_HMDB[1], TPN_DNN_HMDB[1], SlowFast_DNN_HMDB[1]], 'r-', linewidth=2, label='HMDB')
# Line: DEAR for HMDB
plt.plot([I3D_DEAR_HMDB[0], TSM_DEAR_HMDB[0], TPN_DEAR_HMDB[0], SlowFast_DEAR_HMDB[0]],
[I3D_DEAR_HMDB[1], TSM_DEAR_HMDB[1], TPN_DEAR_HMDB[1], SlowFast_DEAR_HMDB[1]], 'r-', linewidth=2)
# Line: DNN for MiT
plt.plot([I3D_DNN_MiT[0], TSM_DNN_MiT[0], TPN_DNN_MiT[0], SlowFast_DNN_MiT[0]],
[I3D_DNN_MiT[1], TSM_DNN_MiT[1], TPN_DNN_MiT[1], SlowFast_DNN_MiT[1]], 'b-', linewidth=2, label='MiT')
# Line: DEAR for MiT
plt.plot([I3D_DEAR_MiT[0], TSM_DEAR_MiT[0], TPN_DEAR_MiT[0], SlowFast_DEAR_MiT[0]],
[I3D_DEAR_MiT[1], TSM_DEAR_MiT[1], TPN_DEAR_MiT[1], SlowFast_DEAR_MiT[1]], 'b-', linewidth=2)
# Draw all I3D points
# HMDB
plt.scatter(I3D_DNN_HMDB[0], I3D_DNN_HMDB[1], marker='^', s=markersize, color='r', label='Dropout BALD')
plt.text(I3D_DNN_HMDB[0], I3D_DNN_HMDB[1], 'I3D', fontsize=fontsize)
plt.scatter(I3D_DEAR_HMDB[0], I3D_DEAR_HMDB[1], marker='*', s=markersize, color='r', label='DEAR EU')
plt.text(I3D_DEAR_HMDB[0], I3D_DEAR_HMDB[1], 'I3D', fontsize=fontsize)
plt.plot([I3D_DNN_HMDB[0], I3D_DEAR_HMDB[0]], [I3D_DNN_HMDB[1], I3D_DEAR_HMDB[1]], 'r--', linewidth=0.5)
# plt.arrow(I3D_DNN_HMDB[0]+1, I3D_DNN_HMDB[1], I3D_DEAR_HMDB[0]-I3D_DNN_HMDB[0]-2, I3D_DEAR_HMDB[1]-I3D_DNN_HMDB[1]-1,head_width=0.8, fc='skyblue',ec='skyblue', head_length=0.8)
# # MiT
plt.scatter(I3D_DNN_MiT[0], I3D_DNN_MiT[1], marker='^', s=markersize, color='b')
plt.text(I3D_DNN_MiT[0], I3D_DNN_MiT[1], 'I3D', fontsize=fontsize)
plt.scatter(I3D_DEAR_MiT[0], I3D_DEAR_MiT[1], marker='*', s=markersize, color='b')
plt.text(I3D_DEAR_MiT[0], I3D_DEAR_MiT[1], 'I3D', fontsize=fontsize)
plt.plot([I3D_DNN_MiT[0], I3D_DEAR_MiT[0]], [I3D_DNN_MiT[1], I3D_DEAR_MiT[1]], 'b--', linewidth=0.5)
# plt.arrow(I3D_DNN_MiT[0]+1, I3D_DNN_MiT[1], I3D_DEAR_MiT[0]-I3D_DNN_MiT[0]-3, I3D_DEAR_MiT[1]-I3D_DNN_MiT[1]-2,head_width=0.8, fc='grey',ec='grey', head_length=0.8)
# Draw all TSM points
# HMDB
plt.scatter(TSM_DNN_HMDB[0], TSM_DNN_HMDB[1], marker='^', s=markersize, color='r')
plt.text(TSM_DNN_HMDB[0], TSM_DNN_HMDB[1], 'TSM', fontsize=fontsize)
plt.scatter(TSM_DEAR_HMDB[0], TSM_DEAR_HMDB[1], marker='*', s=markersize, color='r')
plt.text(TSM_DEAR_HMDB[0], TSM_DEAR_HMDB[1], 'TSM', fontsize=fontsize)
plt.plot([TSM_DNN_HMDB[0], TSM_DEAR_HMDB[0]], [TSM_DNN_HMDB[1], TSM_DEAR_HMDB[1]], 'r--', linewidth=0.5)
# plt.arrow(TSM_DNN_HMDB[0]+1, TSM_DNN_HMDB[1], TSM_DEAR_HMDB[0]-TSM_DNN_HMDB[0]-2, TSM_DEAR_HMDB[1]-TSM_DNN_HMDB[1]-1,head_width=0.8, fc='skyblue',ec='skyblue', head_length=0.8)
# # MiT
plt.scatter(TSM_DNN_MiT[0], TSM_DNN_MiT[1], marker='^', s=markersize, color='b')
plt.text(TSM_DNN_MiT[0], TSM_DNN_MiT[1], 'TSM', fontsize=fontsize)
plt.scatter(TSM_DEAR_MiT[0], TSM_DEAR_MiT[1], marker='*', s=markersize, color='b')
plt.text(TSM_DEAR_MiT[0], TSM_DEAR_MiT[1], 'TSM', fontsize=fontsize)
plt.plot([TSM_DNN_MiT[0], TSM_DEAR_MiT[0]], [TSM_DNN_MiT[1], TSM_DEAR_MiT[1]], 'b--', linewidth=0.5)
# Draw all TPN points
# HMDB
plt.scatter(TPN_DNN_HMDB[0], TPN_DNN_HMDB[1], marker='^', s=markersize, color='r')
plt.text(TPN_DNN_HMDB[0], TPN_DNN_HMDB[1], 'TPN', fontsize=fontsize)
plt.scatter(TPN_DEAR_HMDB[0], TPN_DEAR_HMDB[1], marker='*', s=markersize, color='r')
plt.text(TPN_DEAR_HMDB[0], TPN_DEAR_HMDB[1], 'TPN', fontsize=fontsize)
plt.plot([TPN_DNN_HMDB[0], TPN_DEAR_HMDB[0]], [TPN_DNN_HMDB[1], TPN_DEAR_HMDB[1]], 'r--', linewidth=0.5)
# plt.arrow(TPN_DNN_HMDB[0]+1, TPN_DNN_HMDB[1], TPN_DEAR_HMDB[0]-TPN_DNN_HMDB[0]-2, TPN_DEAR_HMDB[1]-TPN_DNN_HMDB[1]-1,head_width=0.8, fc='skyblue',ec='skyblue', head_length=0.8)
plt.scatter(TPN_DNN_MiT[0], TPN_DNN_MiT[1], marker='^', s=markersize, color='b')
plt.text(TPN_DNN_MiT[0], TPN_DNN_MiT[1], 'TPN', fontsize=fontsize)
plt.scatter(TPN_DEAR_MiT[0], TPN_DEAR_MiT[1], marker='*', s=markersize, color='b')
plt.text(TPN_DEAR_MiT[0], TPN_DEAR_MiT[1], 'TPN', fontsize=fontsize)
plt.plot([TPN_DNN_MiT[0], TPN_DEAR_MiT[0]], [TPN_DNN_MiT[1], TPN_DEAR_MiT[1]], 'b--', linewidth=0.5)
# Draw all SlowFast points
# HMDB
plt.scatter(SlowFast_DNN_HMDB[0], SlowFast_DNN_HMDB[1], marker='^', s=markersize, color='r')
plt.text(SlowFast_DNN_HMDB[0], SlowFast_DNN_HMDB[1], 'SlowFast', fontsize=fontsize)
plt.scatter(SlowFast_DEAR_HMDB[0], SlowFast_DEAR_HMDB[1], marker='*', s=markersize, color='r')
plt.text(SlowFast_DEAR_HMDB[0], SlowFast_DEAR_HMDB[1], 'SlowFast', fontsize=fontsize)
plt.plot([SlowFast_DNN_HMDB[0], SlowFast_DEAR_HMDB[0]], [SlowFast_DNN_HMDB[1], SlowFast_DEAR_HMDB[1]], 'r--', linewidth=0.5)
# MiT
plt.scatter(SlowFast_DNN_MiT[0], SlowFast_DNN_MiT[1], marker='^', s=markersize, color='b')
plt.text(SlowFast_DNN_MiT[0], SlowFast_DNN_MiT[1], 'SlowFast', fontsize=fontsize)
plt.scatter(SlowFast_DEAR_MiT[0], SlowFast_DEAR_MiT[1], marker='*', s=markersize, color='b')
plt.text(SlowFast_DEAR_MiT[0], SlowFast_DEAR_MiT[1], 'SlowFast', fontsize=fontsize)
plt.plot([SlowFast_DNN_MiT[0], SlowFast_DEAR_MiT[0]], [SlowFast_DNN_MiT[1], SlowFast_DEAR_MiT[1]], 'b--', linewidth=0.5)
plt.xlim(94, 97.5)
plt.ylim(65, 90)
plt.xlabel('Closed-Set Accuracy (%)', fontsize=fontsize)
plt.ylabel('Open-Set AUC Score (%)', fontsize=fontsize)
plt.xticks(fontsize=fontsize)
plt.yticks(fontsize=fontsize)
plt.legend(loc='lower left', fontsize=fontsize)
plt.grid('on', linestyle='--')
plt.tight_layout()
plt.savefig('../temp/compare_gain.png')
plt.savefig('../temp/compare_gain.pdf')
def draw_one_curve(data_dict, markers, markercolor='g', markersize=80, fontsize=10, label='I3D', linestyle='g-', add_marker_text=False, text_offset=[0,0]):
sorted_dict = dict(sorted(data_dict.items(),key=lambda x:x[1][1]))
x_data, y_data = [], []
for k, v in sorted_dict.items():
x_data.append(v[1])
y_data.append(v[0])
# Marker: OpenMax
if k == 'DEAR (Ours)':
plt.scatter(v[1], v[0], marker=markers[k], s=markersize*4, color=markercolor)
else:
plt.scatter(v[1], v[0], marker=markers[k], s=markersize, color=markercolor)
if add_marker_text:
# plt.text(v[1] + text_offset[0], v[0]+ text_offset[1], k, fontsize=fontsize)
pass
# Line: I3D for MiT
line_hd, = plt.plot(x_data, y_data, linestyle, linewidth=2, label=label, markersize=1)
return line_hd
def draw_mit_curves():
fig, ax = plt.subplots(figsize=(8,6))
plt.rcParams["font.family"] = "Arial"
fontsize = 25
markersize = 80
# (open maF1, open-set AUC)
# I3D
I3D_OpenMax = [66.22, 77.76]
I3D_Dropout = [68.11, 79.14]
I3D_BNNSVI = [68.65, 79.50]
I3D_SoftMax = [68.84, 79.94]
I3D_RPL = [68.11, 79.16]
I3D_DEAR = [69.98, 81.54]
# TSM
TSM_OpenMax = [71.81, 83.05]
TSM_Dropout = [65.32, 78.35]
TSM_BNNSVI = [64.28, 77.39]
TSM_SoftMax = [71.68, 82.38]
TSM_RPL = [63.92, 77.28]
TSM_DEAR = [70.15, 83.92]
# TPN
TPN_OpenMax = [64.80, 76.26]
TPN_Dropout = [65.77, 77.76]
TPN_BNNSVI = [61.40, 75.32]
TPN_SoftMax = [70.82, 81.35]
TPN_RPL = [66.21, 78.21]
TPN_DEAR = [71.18, 81.80]
# SlowFast
SlowFast_OpenMax = [72.48, 80.62]
SlowFast_Dropout = [67.53, 78.49]
SlowFast_BNNSVI = [65.22, 77.39]
SlowFast_SoftMax = [74.42, 82.88]
SlowFast_RPL = [66.33, 77.42]
SlowFast_DEAR = [77.28, 86.99]
markers = {'DEAR (Ours)': '*', 'SoftMax': 'o', 'OpenMax': '^', 'RPL': 'd', 'MC Dropout': 's', 'BNN SVI': 'P'}
# Line: I3D for MiT
data_dict = {'OpenMax': I3D_OpenMax, 'MC Dropout': I3D_Dropout, 'BNN SVI': I3D_BNNSVI, 'SoftMax': I3D_SoftMax, 'RPL': I3D_RPL, 'DEAR (Ours)': I3D_DEAR}
line1_hd = draw_one_curve(data_dict, markers=markers, markercolor='g', markersize=markersize, fontsize=fontsize, label='I3D', linestyle='g-')
data_dict = {'OpenMax': TSM_OpenMax, 'MC Dropout': TSM_Dropout, 'BNN SVI': TSM_BNNSVI, 'SoftMax': TSM_SoftMax, 'RPL': TSM_RPL, 'DEAR (Ours)': TSM_DEAR}
line2_hd = draw_one_curve(data_dict, markers=markers, markercolor='k', markersize=markersize, fontsize=fontsize, label='TSM', linestyle='k-')
data_dict = {'OpenMax': TPN_OpenMax, 'MC Dropout': TPN_Dropout, 'BNN SVI': TPN_BNNSVI, 'SoftMax': TPN_SoftMax, 'RPL': TPN_RPL, 'DEAR (Ours)': TPN_DEAR}
line3_hd = draw_one_curve(data_dict, markers=markers, markercolor='b', markersize=markersize, fontsize=fontsize, label='TPN', linestyle='b-')
data_dict = {'OpenMax': SlowFast_OpenMax, 'MC Dropout': SlowFast_Dropout, 'BNN SVI': SlowFast_BNNSVI, 'SoftMax': SlowFast_SoftMax, 'RPL': SlowFast_RPL, 'DEAR (Ours)': SlowFast_DEAR}
line4_hd = draw_one_curve(data_dict, markers=markers, markercolor='r', markersize=markersize, fontsize=fontsize, label='SlowFast', linestyle='r-',
add_marker_text=True, text_offset=[-2.2, -0.2])
marker_elements = []
for k, v in markers.items():
msize = 18 if k == 'DEAR (Ours)' else 12
elem = Line2D([0], [0], marker=v, label=k, markersize=msize, linestyle="None")
marker_elements.append(elem)
marker_legend = ax.legend(handles=marker_elements, fontsize=fontsize-3, loc='lower right', ncol=1, handletextpad=0.05, columnspacing=0.05, borderaxespad=0.1)
ax.add_artist(marker_legend)
plt.ylim(60, 78)
plt.xlim(75, 88)
plt.ylabel('Open maF1 (%)', fontsize=fontsize)
plt.xlabel('Open-Set AUC Score (%)', fontsize=fontsize)
plt.xticks(np.arange(75, 89, 4), fontsize=fontsize)
plt.yticks(np.arange(60, 79, 4), fontsize=fontsize)
plt.legend(handles=[line1_hd, line2_hd, line3_hd, line4_hd], loc='upper left', fontsize=fontsize-3, handletextpad=0.5, borderaxespad=0.1)
plt.title('MiT-v2 as Unknown', fontsize=fontsize)
plt.grid('on', linestyle='--')
plt.tight_layout()
plt.savefig('../temp/compare_gain_mit.png', bbox_inches='tight', dpi=fig.dpi, pad_inches=0.0)
plt.savefig('../temp/compare_gain_mit.pdf', bbox_inches='tight', dpi=fig.dpi, pad_inches=0.0)
def draw_hmdb_curves():
fig, ax = plt.subplots(figsize=(8,6))
plt.rcParams["font.family"] = "Arial"
fontsize = 25
markersize = 80
# (open maF1, open-set AUC)
# I3D
I3D_OpenMax = [67.85, 74.34]
I3D_Dropout = [71.13, 75.07]
I3D_BNNSVI = [71.57, 74.66]
I3D_SoftMax = [73.19, 75.68]
I3D_RPL = [71.48, 75.20]
I3D_DEAR = [77.24, 77.08]
# TSM
TSM_OpenMax = [74.17, 77.07]
TSM_Dropout = [71.52, 73.85]
TSM_BNNSVI = [69.11, 73.42]
TSM_SoftMax = [78.27, 77.99]
TSM_RPL = [69.34, 73.62]
TSM_DEAR = [84.69, 78.65]
# TPN
TPN_OpenMax = [65.27, 74.12]
TPN_Dropout = [68.45, 74.13]
TPN_BNNSVI = [63.81, 72.68]
TPN_SoftMax = [76.23, 77.97]
TPN_RPL = [70.31, 75.32]
TPN_DEAR = [81.79, 79.23]
# SlowFast
SlowFast_OpenMax = [73.57, 78.76]
SlowFast_Dropout = [70.55, 75.41]
SlowFast_BNNSVI = [69.19, 74.78]
SlowFast_SoftMax = [78.04, 79.16]
SlowFast_RPL = [68.32, 74.23]
SlowFast_DEAR = [85.48, 82.94]
markers = {'DEAR (Ours)': '*', 'SoftMax': 'o', 'OpenMax': '^', 'RPL': 'd', 'MC Dropout': 's', 'BNN SVI': 'P'}
# Line: I3D for HMDB
data_dict = {'OpenMax': I3D_OpenMax, 'MC Dropout': I3D_Dropout, 'BNN SVI': I3D_BNNSVI, 'SoftMax': I3D_SoftMax, 'RPL': I3D_RPL, 'DEAR (Ours)': I3D_DEAR}
line1_hd = draw_one_curve(data_dict, markers=markers, markercolor='g', markersize=markersize, fontsize=fontsize, label='I3D', linestyle='g-')
data_dict = {'OpenMax': TSM_OpenMax, 'MC Dropout': TSM_Dropout, 'BNN SVI': TSM_BNNSVI, 'SoftMax': TSM_SoftMax, 'RPL': TSM_RPL, 'DEAR (Ours)': TSM_DEAR}
line2_hd = draw_one_curve(data_dict, markers=markers, markercolor='k', markersize=markersize, fontsize=fontsize, label='TSM', linestyle='k-')
data_dict = {'OpenMax': TPN_OpenMax, 'MC Dropout': TPN_Dropout, 'BNN SVI': TPN_BNNSVI, 'SoftMax': TPN_SoftMax, 'RPL': TPN_RPL, 'DEAR (Ours)': TPN_DEAR}
line3_hd = draw_one_curve(data_dict, markers=markers, markercolor='b', markersize=markersize, fontsize=fontsize, label='TPN', linestyle='b-')
data_dict = {'OpenMax': SlowFast_OpenMax, 'MC Dropout': SlowFast_Dropout, 'BNN SVI': SlowFast_BNNSVI, 'SoftMax': SlowFast_SoftMax, 'RPL': SlowFast_RPL, 'DEAR (Ours)': SlowFast_DEAR}
line4_hd = draw_one_curve(data_dict, markers=markers, markercolor='r', markersize=markersize, fontsize=fontsize, label='SlowFast', linestyle='r-',
add_marker_text=True, text_offset=[0.2, -1.5])
marker_elements = []
for k, v in markers.items():
msize = 18 if k == 'DEAR (Ours)' else 12
elem = Line2D([0], [0], marker=v, label=k, markersize=msize, linestyle="None")
marker_elements.append(elem)
marker_legend = ax.legend(handles=marker_elements, fontsize=fontsize-3, loc='lower right', ncol=1, handletextpad=0.3, columnspacing=0.2, borderaxespad=0.1)
ax.add_artist(marker_legend)
plt.ylim(60, 88)
plt.xlim(72, 85)
plt.ylabel('Open maF1 (%)', fontsize=fontsize)
plt.xlabel('Open-Set AUC Score (%)', fontsize=fontsize)
plt.xticks(fontsize=fontsize)
plt.yticks(fontsize=fontsize)
plt.legend(handles=[line1_hd, line2_hd, line3_hd, line4_hd], loc='upper left', fontsize=fontsize-3, handletextpad=0.5, borderaxespad=0.1)
plt.grid('on', linestyle='--')
plt.title('HMDB-51 as Unknown', fontsize=fontsize)
plt.tight_layout()
plt.savefig('../temp/compare_gain_hmdb.png', bbox_inches='tight', dpi=fig.dpi, pad_inches=0.0)
plt.savefig('../temp/compare_gain_hmdb.pdf', bbox_inches='tight', dpi=fig.dpi, pad_inches=0.0)
if __name__ == '__main__':
models = ['I3D', 'TPN', 'TSM', 'SlowFast']
# draw_curves()
draw_mit_curves()
draw_hmdb_curves()
| InternVideo-main | Downstream/Open-Set-Action-Recognition/experiments/draw_performance_gain.py |
import numpy as np
import argparse
import os
import matplotlib.pyplot as plt
# from mmaction.core.evaluation import confusion_matrix
from mpl_toolkits.axes_grid1 import make_axes_locatable
import matplotlib.ticker as ticker
from matplotlib.colors import LogNorm
def confusion_maxtrix(ind_labels, ind_results, ind_uncertainties,
ood_labels, ood_results, ood_uncertainties,
threshold, know_ood_labels=False, normalize=True):
num_indcls = max(ind_labels) + 1
num_oodcls = max(ood_labels) + 1
confmat = np.zeros((num_indcls + num_oodcls, num_indcls + num_oodcls), dtype=np.float32)
for rlabel, plabel, uncertain in zip(ind_labels, ind_results, ind_uncertainties):
if uncertain > threshold:
# known --> unknown (bottom-left)
confmat[num_indcls:num_indcls+num_oodcls, rlabel] += 1.0 * num_oodcls
else:
# known --> known (top-left)
confmat[plabel, rlabel] += 1.0 * num_indcls
if know_ood_labels:
for rlable, plabel, uncertain in zip(ood_labels, ood_results, ood_uncertainties):
if uncertain > threshold:
# unknown --> unknown (bottom-right)
confmat[num_indcls:num_indcls+num_oodcls, num_indcls+rlable] += 1.0
else:
# unknown --> known (top-right)
confmat[plabel, num_indcls+rlable] += 1.0 * num_oodcls
else:
for plabel, uncertain in zip(ood_results, ood_uncertainties):
if uncertain > threshold:
# unknown --> unknown (bottom-right)
confmat[num_indcls:num_indcls+num_oodcls, num_indcls:num_indcls+num_oodcls] += 1.0
else:
# unknown --> known (top-right)
confmat[plabel, num_indcls:num_indcls+num_oodcls] += 1 * num_oodcls
if normalize:
minval = np.min(confmat[np.nonzero(confmat)])
maxval = np.max(confmat)
confmat = (confmat - minval) / (maxval - minval + 1e-6)
# confmat = np.nan_to_num(confmat)
return confmat
def confusion_maxtrix_top(ind_labels, ind_results, ind_uncertainties,
ood_labels, ood_results, ood_uncertainties,
threshold, normalize=True):
num_indcls = max(ind_labels) + 1
num_oodcls = max(ood_labels) + 1
confmat = np.ones((num_indcls, num_indcls + num_oodcls), dtype=np.float32) # (K, K+C) white
# process in-distribution results
for rlabel, plabel, uncertain in zip(ind_labels, ind_results, ind_uncertainties):
if uncertain < threshold:
# known --> known (top-left)
confmat[plabel, rlabel] -= 1.0 / len(ind_results) # make it darker
# process out-of-distribution results
for rlable, plabel, uncertain in zip(ood_labels, ood_results, ood_uncertainties):
if uncertain < threshold:
# unknown --> known (top-right)
confmat[plabel, num_indcls+rlable] -= 1.0 / len(ood_results) # make it darker
if normalize:
minval = np.min(confmat)
maxval = np.max(confmat)
confmat = (confmat - minval) / (maxval - minval + 1e-6) # normalize to [0, 1]
return confmat
def plot_confmat(confmat, know_ood_labels=False):
fig = plt.figure(figsize=(4,4))
plt.rcParams["font.family"] = "Arial" # Times New Roman
fontsize = 20
ax = plt.gca()
confmat_vis = confmat.copy()
im = ax.imshow(confmat_vis, cmap='hot')
plt.xticks(fontsize=fontsize)
plt.yticks(fontsize=fontsize)
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
cbar = plt.colorbar(im, cax=cax)
# cbar.locator = ticker.MaxNLocator(nbins=5)
# # barticks = np.linspace(np.min(confmat) * 1000, np.max(confmat) * 1000, 5).tolist()
# # cbar.set_ticks(barticks)
# cbar.ax.tick_params(labelsize=fontsize)
cbar.set_ticks([])
cbar.update_ticks()
plt.tight_layout()
save_file = args.save_file[:-4] + '_knownOOD.png' if know_ood_labels else args.save_file
plt.savefig(save_file, bbox_inches='tight', dpi=fig.dpi, pad_inches=0.0)
plt.savefig(save_file[:-4] + '.pdf', bbox_inches='tight', dpi=fig.dpi, pad_inches=0.0)
plt.close()
def get_topk_2d(arr, topk=5):
col_indices = np.argmax(arr, axis=1) # column indices
vals = [arr[r, c] for r, c in enumerate(col_indices)]
row_indices = np.argsort(vals)[::-1] # decreasing sort
result_inds = np.zeros((topk, 2), dtype=np.int32)
result_vals = []
for k in range(topk):
result_inds[k, 0] = row_indices[k]
result_inds[k, 1] = col_indices[row_indices[k]]
result_vals.append(vals[row_indices[k]])
return result_inds, result_vals
def read_classnames(list_file):
names = []
with open(list_file, 'r') as f:
for line in f.readlines():
names.append(line.strip().split(' ')[-1])
return names
def plot_top_confmat(confmat):
ind_mappings = 'data/ucf101/annotations/classInd.txt'
ind_cls_names = read_classnames(ind_mappings)
ood_mappings = 'data/hmdb51/annotations/classInd.txt'
ood_cls_names = read_classnames(ood_mappings)
fig = plt.figure(figsize=(8,4))
plt.rcParams["font.family"] = "Arial" # Times New Roman
fontsize = 15
ax = plt.gca()
confmat_vis = confmat.copy()
im = ax.imshow(confmat_vis, cmap='hot')
plt.axvline(num_indcls-1, 0, num_indcls-1, linestyle='--')
# find the top-K mis-classification for unknown
result_inds, result_vals = get_topk_2d(1 - confmat_vis[:, num_indcls+1:])
ood_ids = np.argmin(confmat_vis[:, num_indcls+1:], axis=1)
text_offset = [[-25, 1], [-25, -3], [-61, 1], [-28, 2], [-32, 1]]
for i, (r, c) in enumerate(result_inds):
hd = plt.Circle((c + num_indcls, r), 5, fill=False)
ax.set_aspect(1)
ax.add_artist(hd )
off_c, off_r = 6, 1
if i == 1:
off_c, off_r = -4, -6
plt.text(c + num_indcls + off_c, r + off_r, ood_cls_names[c], color='blue', fontsize=fontsize)
plt.plot([num_indcls, num_indcls + c], [r, r], 'r--')
plt.text(num_indcls + text_offset[i][0], r+text_offset[i][1], ind_cls_names[r], color='red', fontsize=fontsize)
plt.ylabel('Predicted Classes', fontsize=fontsize)
plt.xlabel('UCF-101 (known) + HMDB-51 (unknown)', fontsize=fontsize)
plt.xticks(fontsize=fontsize)
plt.yticks(fontsize=fontsize)
plt.tight_layout()
plt.savefig(args.save_file[:-4] + '_top.png', bbox_inches='tight', dpi=fig.dpi, pad_inches=0.0)
plt.savefig(args.save_file[:-4] + '_top.pdf', bbox_inches='tight', dpi=fig.dpi, pad_inches=0.0)
plt.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='MMAction2 test')
# model config
parser.add_argument('--ood_result', help='the result file of ood detection')
parser.add_argument('--uncertain_thresh', type=float, default=0.0001, help='the threshold value for prediction')
parser.add_argument('--top_part', action='store_true', help='Whether to show the top part of confmat separately.')
parser.add_argument('--save_file', help='the image file path of generated confusion matrix')
args = parser.parse_args()
results = np.load(args.ood_result, allow_pickle=True)
ind_uncertainties = results['ind_unctt'] # (N1,)
ood_uncertainties = results['ood_unctt'] # (N2,)
ind_results = results['ind_pred'] # (N1,)
ood_results = results['ood_pred'] # (N2,)
ind_labels = results['ind_label']
ood_labels = results['ood_label']
# result path
result_path = os.path.dirname(args.save_file)
if not os.path.exists(result_path):
os.makedirs(result_path)
# OOD classes are unknown
confmat1 = confusion_maxtrix(ind_labels, ind_results, ind_uncertainties,
ood_labels, ood_results, ood_uncertainties,
args.uncertain_thresh, know_ood_labels=False)
plot_confmat(confmat1, know_ood_labels=False)
num_indcls = max(ind_labels) + 1
num_oodcls = max(ood_labels) + 1
UKC_value = np.mean(confmat1[:num_indcls, num_indcls:]) # unknown --> known (top-right)
UUC_value = np.mean(confmat1[num_indcls:, num_indcls:]) # unknown --> unknown (bottom-right)
KUC_value = np.mean(confmat1[num_indcls:, :num_indcls]) # known --> unknown (bottom-left)
KKC_value = np.mean(np.diag(confmat1[:num_indcls, :num_indcls])) # known --> known (top-left)
print("The average UUC=: %.6lf, UKC=%.6lf, KUC=%.6lf, KKC=%.6lf"%(UUC_value, UKC_value, KUC_value, KKC_value))
# # OOD classes are known
# confmat2 = confusion_maxtrix(ind_labels, ind_results, ind_uncertainties,
# ood_labels, ood_results, ood_uncertainties,
# args.uncertain_thresh, know_ood_labels=True)
# plot_confmat(confmat2, know_ood_labels=True)
# # save the confusion matrix for further analysis
# np.savez(args.save_file[:-4], confmat_unknown_ood=confmat1, confmat_known_ood=confmat2)
# votes_ind = np.sum(confmat1[:101, 101:], axis=1)
# print("Top-10 false positive IND classes: ", np.argsort(votes_ind)[-10:])
# votes_ood = np.sum(confmat1[101:, :101], axis=1)
# print("Top-10 false negative IND classes: ", np.argsort(votes_ood)[-10:])
if args.top_part:
top_confmat = confusion_maxtrix_top(ind_labels, ind_results, ind_uncertainties,
ood_labels, ood_results, ood_uncertainties,
args.uncertain_thresh)
plot_top_confmat(top_confmat) | InternVideo-main | Downstream/Open-Set-Action-Recognition/experiments/draw_confusion_matrix.py |
import argparse
import os
import os.path as osp
import torch
import mmcv
from mmaction.apis import init_recognizer
from mmcv.parallel import collate, scatter
from operator import itemgetter
from mmaction.datasets.pipelines import Compose
from mmaction.datasets import build_dataloader, build_dataset
from mmcv.parallel import MMDataParallel
import numpy as np
from scipy.special import xlogy
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
from tqdm import tqdm
import torch.multiprocessing
torch.multiprocessing.set_sharing_strategy('file_system')
def parse_args():
parser = argparse.ArgumentParser(description='MMAction2 test')
# model config
parser.add_argument('--config', help='test config file path')
parser.add_argument('--checkpoint', help='checkpoint file/url')
parser.add_argument('--uncertainty', choices=['BALD', 'Entropy', 'EDL'], help='the uncertainty estimation method')
parser.add_argument('--forward_pass', type=int, default=10, help='the number of forward passes')
# data config
parser.add_argument('--ind_data', help='the split file of in-distribution testing data')
parser.add_argument('--ood_data', help='the split file of out-of-distribution testing data')
# env config
parser.add_argument('--device', type=str, default='cuda:0', help='CPU/CUDA device option')
parser.add_argument('--result_prefix', help='result file prefix')
args = parser.parse_args()
return args
def apply_dropout(m):
if type(m) == torch.nn.Dropout:
m.train()
def update_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
def compute_uncertainty(predictions, method='BALD'):
"""Compute the entropy
scores: (B x C x T)
"""
expected_p = np.mean(predictions, axis=-1) # mean of all forward passes (C,)
entropy_expected_p = - np.sum(xlogy(expected_p, expected_p), axis=1) # the entropy of expect_p (across classes)
if method == 'Entropy':
uncertain_score = entropy_expected_p
elif method == 'BALD':
expected_entropy = - np.mean(np.sum(xlogy(predictions, predictions), axis=1), axis=-1) # mean of entropies (across classes), (scalar)
uncertain_score = entropy_expected_p - expected_entropy
else:
raise NotImplementedError
if not np.all(np.isfinite(uncertain_score)):
uncertain_score[~np.isfinite] = 9999
return uncertain_score
def run_stochastic_inference(model, data_loader, npass=10):
# run inference
model = MMDataParallel(model, device_ids=[0])
all_confidences, all_uncertainties, all_results, all_gts = [], [], [], []
prog_bar = mmcv.ProgressBar(len(data_loader.dataset))
for i, data in enumerate(data_loader):
all_scores = []
with torch.no_grad():
for n in range(npass):
# set new random seed
update_seed(n * 1234)
scores = model(return_loss=False, **data)
# gather results
all_scores.append(np.expand_dims(scores, axis=-1))
all_scores = np.concatenate(all_scores, axis=-1) # (B, C, T)
# compute the uncertainty
uncertainty = compute_uncertainty(all_scores, method=args.uncertainty)
all_uncertainties.append(uncertainty)
# compute the predictions and save labels
mean_scores = np.mean(all_scores, axis=-1)
preds = np.argmax(mean_scores, axis=1)
all_results.append(preds)
conf = np.max(mean_scores, axis=1)
all_confidences.append(conf)
labels = data['label'].numpy()
all_gts.append(labels)
# use the first key as main key to calculate the batch size
batch_size = len(next(iter(data.values())))
for _ in range(batch_size):
prog_bar.update()
all_confidences = np.concatenate(all_confidences, axis=0)
all_uncertainties = np.concatenate(all_uncertainties, axis=0)
all_results = np.concatenate(all_results, axis=0)
all_gts = np.concatenate(all_gts, axis=0)
return all_confidences, all_uncertainties, all_results, all_gts
def run_evidence_inference(model, data_loader, evidence='exp'):
# set new random seed
update_seed(1234)
# get the evidence function
if evidence == 'relu':
from mmaction.models.losses.edl_loss import relu_evidence as get_evidence
elif evidence == 'exp':
from mmaction.models.losses.edl_loss import exp_evidence as get_evidence
elif evidence == 'softplus':
from mmaction.models.losses.edl_loss import softplus_evidence as get_evidence
else:
raise NotImplementedError
num_classes = model.cls_head.num_classes
# run inference
model = MMDataParallel(model, device_ids=[0])
all_confidences, all_uncertainties, all_results, all_gts = [], [], [], []
prog_bar = mmcv.ProgressBar(len(data_loader.dataset))
for i, data in enumerate(data_loader):
with torch.no_grad():
output = model(return_loss=False, **data)
evidence = get_evidence(torch.from_numpy(output))
alpha = evidence + 1
uncertainty = num_classes / torch.sum(alpha, dim=1)
scores = alpha / torch.sum(alpha, dim=1, keepdim=True)
all_uncertainties.append(uncertainty.numpy())
# compute the predictions and save labels
preds = np.argmax(scores.numpy(), axis=1)
all_results.append(preds)
conf = np.max(scores.numpy(), axis=1)
all_confidences.append(conf)
labels = data['label'].numpy()
all_gts.append(labels)
# use the first key as main key to calculate the batch size
batch_size = len(next(iter(data.values())))
for _ in range(batch_size):
prog_bar.update()
all_confidences = np.concatenate(all_confidences, axis=0)
all_uncertainties = np.concatenate(all_uncertainties, axis=0)
all_results = np.concatenate(all_results, axis=0)
all_gts = np.concatenate(all_gts, axis=0)
return all_confidences, all_uncertainties, all_results, all_gts
def run_inference(model, datalist_file, npass=10):
# switch config for different dataset
cfg = model.cfg
cfg.data.test.ann_file = datalist_file
cfg.data.test.data_prefix = os.path.join(os.path.dirname(datalist_file), 'videos')
evidence = cfg.get('evidence', 'exp')
# build the dataloader
dataset = build_dataset(cfg.data.test, dict(test_mode=True))
dataloader_setting = dict(
videos_per_gpu=cfg.data.get('videos_per_gpu', 1),
workers_per_gpu=cfg.data.get('workers_per_gpu', 1),
dist=False,
shuffle=False,
pin_memory=False)
dataloader_setting = dict(dataloader_setting, **cfg.data.get('test_dataloader', {}))
data_loader = build_dataloader(dataset, **dataloader_setting)
if not args.uncertainty == 'EDL':
all_confidences, all_uncertainties, all_results, all_gts = run_stochastic_inference(model, data_loader, npass)
else:
all_confidences, all_uncertainties, all_results, all_gts = run_evidence_inference(model, data_loader, evidence)
return all_confidences, all_uncertainties, all_results, all_gts
def main():
# build the recognizer from a config file and checkpoint file/url
model = init_recognizer(
args.config,
args.checkpoint,
device=device,
use_frames=False)
cfg = model.cfg
if not args.uncertainty == 'EDL':
# use dropout in testing stage
if 'dnn' in args.config:
model.apply(apply_dropout)
if 'bnn' in args.config:
model.test_cfg.npass = 1
# set cudnn benchmark
torch.backends.cudnn.benchmark = True
cfg.data.test.test_mode = True
result_file = os.path.join(args.result_prefix + '_result.npz')
if not os.path.exists(result_file):
# prepare result path
result_dir = os.path.dirname(result_file)
if not os.path.exists(result_dir):
os.makedirs(result_dir)
# run inference (OOD)
ood_confidences, ood_uncertainties, ood_results, ood_labels = run_inference(model, args.ood_data, npass=args.forward_pass)
# run inference (IND)
ind_confidences, ind_uncertainties, ind_results, ind_labels = run_inference(model, args.ind_data, npass=args.forward_pass)
# save
np.savez(result_file[:-4], ind_conf=ind_confidences, ood_conf=ood_confidences,
ind_unctt=ind_uncertainties, ood_unctt=ood_uncertainties,
ind_pred=ind_results, ood_pred=ood_results,
ind_label=ind_labels, ood_label=ood_labels)
else:
results = np.load(result_file, allow_pickle=True)
ind_confidences = results['ind_conf']
ood_confidences = results['ood_conf']
ind_uncertainties = results['ind_unctt'] # (N1,)
ood_uncertainties = results['ood_unctt'] # (N2,)
ind_results = results['ind_pred'] # (N1,)
ood_results = results['ood_pred'] # (N2,)
ind_labels = results['ind_label']
ood_labels = results['ood_label']
# visualize
ind_uncertainties = np.array(ind_uncertainties)
ind_uncertainties = (ind_uncertainties-np.min(ind_uncertainties)) / (np.max(ind_uncertainties) - np.min(ind_uncertainties)) # normalize
ood_uncertainties = np.array(ood_uncertainties)
ood_uncertainties = (ood_uncertainties-np.min(ood_uncertainties)) / (np.max(ood_uncertainties) - np.min(ood_uncertainties)) # normalize
dataName_ind = args.ind_data.split('/')[-2].upper()
dataName_ood = args.ood_data.split('/')[-2].upper()
if dataName_ind == 'UCF101':
dataName_ind = 'UCF-101'
if dataName_ood == 'MIT':
dataName_ood = 'MiT-v2'
if dataName_ood == 'HMDB51':
dataName_ood = 'HMDB-51'
plt.figure(figsize=(5,4)) # (w, h)
plt.rcParams["font.family"] = "Arial" # Times New Roman
fontsize = 15
plt.hist([ind_uncertainties, ood_uncertainties], 50,
density=True, histtype='bar', color=['blue', 'red'],
label=['in-distribution (%s)'%(dataName_ind), 'out-of-distribution (%s)'%(dataName_ood)])
plt.legend(fontsize=fontsize)
plt.xlabel('%s uncertainty'%(args.uncertainty), fontsize=fontsize)
plt.ylabel('density', fontsize=fontsize)
plt.xticks(fontsize=fontsize)
plt.yticks(fontsize=fontsize)
plt.xlim(0, 1.01)
plt.ylim(0, 10.01)
plt.tight_layout()
plt.savefig(os.path.join(args.result_prefix + '_distribution.png'))
plt.savefig(os.path.join(args.result_prefix + '_distribution.pdf'))
if __name__ == '__main__':
args = parse_args()
# assign the desired device.
device = torch.device(args.device)
main()
| InternVideo-main | Downstream/Open-Set-Action-Recognition/experiments/ood_detection.py |
import argparse
import os
import torch
import mmcv
from mmaction.apis import init_recognizer
from mmaction.datasets import build_dataloader, build_dataset
from mmaction.core.evaluation import top_k_accuracy
from mmcv.parallel import MMDataParallel
import numpy as np
import torch.multiprocessing
torch.multiprocessing.set_sharing_strategy('file_system')
def parse_args():
parser = argparse.ArgumentParser(description='MMAction2 test')
# model config
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file/url')
parser.add_argument('--split_file', help='the split file for evaluation')
parser.add_argument('--video_path', help='the video path for evaluation')
parser.add_argument('--device', type=str, default='cuda:0', help='CPU/CUDA device option')
parser.add_argument('--result_prefix', help='result file prefix')
args = parser.parse_args()
return args
def set_deterministic(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed) # if you are using multi-GPU.
np.random.seed(seed) # Numpy module.
torch.backends.cudnn.benchmark = True
torch.backends.cudnn.deterministic = True
def init_inference():
# build the recognizer from a config file and checkpoint file/url
model = init_recognizer(args.config, args.checkpoint, device=device, use_frames=False)
cfg = model.cfg
cfg.data.test.test_mode = True
cfg.data.test.ann_file = args.split_file
cfg.data.test.data_prefix = args.video_path
evidence = cfg.get('evidence', 'exp')
# build the dataloader
dataset = build_dataset(cfg.data.test, dict(test_mode=True))
dataloader_setting = dict(
videos_per_gpu=cfg.data.get('videos_per_gpu', 1),
workers_per_gpu=cfg.data.get('workers_per_gpu', 1),
dist=False,
shuffle=False,
pin_memory=False)
dataloader_setting = dict(dataloader_setting, **cfg.data.get('test_dataloader', {}))
data_loader = build_dataloader(dataset, **dataloader_setting)
return model, data_loader, evidence
def run_evidence_inference(model, data_loader, evidence='exp'):
# get the evidence function
if evidence == 'relu':
from mmaction.models.losses.edl_loss import relu_evidence as get_evidence
elif evidence == 'exp':
from mmaction.models.losses.edl_loss import exp_evidence as get_evidence
elif evidence == 'softplus':
from mmaction.models.losses.edl_loss import softplus_evidence as get_evidence
else:
raise NotImplementedError
num_classes = model.cls_head.num_classes
# run inference
model = MMDataParallel(model, device_ids=[0])
all_scores, all_uncertainties, all_labels = [], [], []
prog_bar = mmcv.ProgressBar(len(data_loader.dataset))
for i, data in enumerate(data_loader):
with torch.no_grad():
output = model(return_loss=False, **data)
evidence = get_evidence(torch.from_numpy(output))
alpha = evidence + 1
uncertainty = num_classes / torch.sum(alpha, dim=1)
scores = alpha / torch.sum(alpha, dim=1, keepdim=True)
all_uncertainties.append(uncertainty.numpy())
all_scores.append(scores.numpy())
labels = data['label'].numpy()
all_labels.append(labels)
# use the first key as main key to calculate the batch size
batch_size = len(next(iter(data.values())))
for _ in range(batch_size):
prog_bar.update()
all_scores = np.concatenate(all_scores, axis=0)
all_uncertainties = np.concatenate(all_uncertainties, axis=0)
all_labels = np.concatenate(all_labels, axis=0)
return all_scores, all_uncertainties, all_labels
def main():
result_file = os.path.join(args.result_prefix + '_result.npz')
if not os.path.exists(result_file):
# prepare result path
result_dir = os.path.dirname(result_file)
if not os.path.exists(result_dir):
os.makedirs(result_dir)
# init
model, data_loader, evidence = init_inference()
# run inference
all_scores, all_uncertainties, all_labels = run_evidence_inference(model, data_loader, evidence)
# save
np.savez(result_file[:-4], score=all_scores, uncertainty=all_uncertainties, label=all_labels)
else:
results = np.load(result_file, allow_pickle=True)
all_scores = results['score']
all_uncertainties = results['uncertainty']
all_labels = results['label'] # (N2,)
# evaluation on bias/unbiased closed set
top_k_acc = top_k_accuracy(all_scores, all_labels, (1, 5))
print('Evaluation Results:\ntop1_acc: %.5lf, top5_acc: %.5lf\n'%(top_k_acc[0], top_k_acc[1]))
if __name__ == '__main__':
args = parse_args()
# assign the desired device.
device = torch.device(args.device)
set_deterministic(1234)
main()
| InternVideo-main | Downstream/Open-Set-Action-Recognition/experiments/eval_debias.py |
import argparse, os, sys
import torch
import mmcv
import numpy as np
import torch.nn.functional as F
from mmcv.parallel import collate, scatter
from mmaction.datasets.pipelines import Compose
from mmaction.apis import init_recognizer
from mmaction.datasets import build_dataloader, build_dataset
from mmcv.parallel import MMDataParallel
from tqdm import tqdm
import scipy.spatial.distance as spd
try:
import libmr
except ImportError:
print("LibMR not installed or libmr.so not found")
print("Install libmr: cd libMR/; ./compile.sh")
sys.exit()
from sklearn.metrics import f1_score, roc_auc_score, accuracy_score
import matplotlib.pyplot as plt
def set_deterministic(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed) # if you are using multi-GPU.
np.random.seed(seed) # Numpy module.
torch.backends.cudnn.benchmark = True
torch.backends.cudnn.deterministic = True
def parse_args():
""" Example shell script:
$ cd experiments
$ source activate mmaction
$ nohup python baseline_openmax.py --model i3d --config configs/recognition/tpn/inference_tpn_slowonly_dnn.py
"""
parser = argparse.ArgumentParser(description='MMAction2 test')
# model config
parser.add_argument('--config', help='test config file path')
parser.add_argument('--checkpoint', help='checkpoint file/url')
parser.add_argument('--trainset_split', default='data/ucf101/ucf101_train_split_1_videos.txt', help='the split file path of the training set')
parser.add_argument('--num_cls', type=int, default=101, help='The number of classes in training set.')
parser.add_argument('--cache_mav_dist', help='the result path to cache the mav and distances for each class.')
# test data config
parser.add_argument('--ind_data', help='the split file of in-distribution testing data')
parser.add_argument('--ood_data', help='the split file of out-of-distribution testing data')
parser.add_argument('--ood_ncls', type=int, help='the number of classes in unknwon dataset')
parser.add_argument('--num_rand', type=int, default=10, help='the number of random selection for ood classes')
# device
parser.add_argument('--device', type=str, default='cuda:0', help='CPU/CUDA device option')
parser.add_argument('--result_prefix', help='result file prefix')
args = parser.parse_args()
return args
def get_datalist(split_file):
assert os.path.exists(split_file), 'split file does not exist! %s'%(split_file)
video_dir = os.path.join(os.path.dirname(split_file), 'videos')
filelist, labels = [], []
with open(split_file, 'r') as f:
for line in f.readlines():
videofile = os.path.join(video_dir, line.strip().split(' ')[0])
clsid = int(line.strip().split(' ')[-1])
filelist.append(videofile)
labels.append(clsid)
return filelist, labels
def spatial_temporal_pooling(feat_blob):
if isinstance(feat_blob, tuple): # slowfast model returns a tuple of features
assert len(feat_blob) == 2, "invalid feature tuple!"
avg_pool3d = torch.nn.AdaptiveAvgPool3d((1, 1, 1))
x_fast, x_slow = feat_blob
x_fast = avg_pool3d(x_fast)
x_slow = avg_pool3d(x_slow)
# [N, channel_fast + channel_slow, 1, 1, 1]
feat_clips = torch.cat((x_slow, x_fast), dim=1).squeeze(-1).squeeze(-1).squeeze(-1)
else:
if len(feat_blob.size()) == 5: # 3D Network
# spatial temporal average pooling
kernel_size = (feat_blob.size(-3), feat_blob.size(-2), feat_blob.size(-1))
avg_pool3d = torch.nn.AvgPool3d(kernel_size, stride=1, padding=0)
feat_clips = avg_pool3d(feat_blob).view(feat_blob.size(0), feat_blob.size(1)) # (c, D)
elif len(feat_blob.size()) == 4: # 2D Network
# spatial temporal average pooling
kernel_size = (feat_blob.size(-2), feat_blob.size(-1))
avg_pool2d = torch.nn.AvgPool2d(kernel_size, stride=1, padding=0)
feat_clips = avg_pool2d(feat_blob).view(feat_blob.size(0), feat_blob.size(1)) # (c, D)
else:
print('Unsupported feature dimension: {}'.format(feat_blob.size()))
# get the mean features of all clips and crops
feat_final = torch.mean(feat_clips, dim=0, keepdim=True) # (c=1, D)
return feat_final
def inference_recognizer(model, video_path):
"""Inference a video with the detector.
Args:
model (nn.Module): The loaded recognizer.
video_path (str): The video file path/url or the rawframes directory
path. If ``use_frames`` is set to True, it should be rawframes
directory path. Otherwise, it should be video file path.
"""
cfg = model.cfg
device = next(model.parameters()).device # model device
# build the data pipeline
test_pipeline = cfg.data.test.pipeline
test_pipeline = Compose(test_pipeline)
# prepare data (by default, we use videodata)
start_index = cfg.data.test.get('start_index', 0)
data = dict(filename=video_path, label=-1, start_index=start_index, modality='RGB')
data = test_pipeline(data)
data = collate([data], samples_per_gpu=1)
if next(model.parameters()).is_cuda:
# scatter to specified GPU
data = scatter(data, [device])[0]
# forward the model
with torch.no_grad():
feat_blob, score = model(return_loss=False, return_score=True, get_feat=True, **data) # (c, D, t, h, w)
feat_blob = spatial_temporal_pooling(feat_blob)
feat_final = feat_blob.cpu().numpy()
score = score.cpu().numpy()
return feat_final, score
def extract_class_features(videolist, model, cls_gt):
features = []
for videofile in tqdm(videolist, total=len(videolist), desc='Extract Class %d Features'%(cls_gt)):
feat, score = inference_recognizer(model, videofile) # (c, D)
cls_pred = np.argmax(score, axis=1)
if cls_gt in cls_pred:
features.append(feat)
features = np.array(features) # (N, c, D)
return features
def compute_distance(mav, features):
# extract features and compute distances for each class
num_channels = mav.shape[0]
eucos_dist, eu_dist, cos_dist = [], [], []
for feat in features:
# compute distance of each channel
eu_channel, cos_channel, eu_cos_channel = [], [], []
for c in range(num_channels):
eu_channel += [spd.euclidean(mav[c, :], feat[c, :])/200.]
cos_channel += [spd.cosine(mav[c, :], feat[c, :])]
eu_cos_channel += [spd.euclidean(mav[c, :], feat[c, :]) / 200.
+ spd.cosine(mav[c, :], feat[c, :])] # Here, 200 is from the official OpenMax code
eu_dist += [eu_channel]
cos_dist += [cos_channel]
eucos_dist += [eu_cos_channel]
return np.array(eucos_dist), np.array(eu_dist), np.array(cos_dist)
def compute_channel_distance(mav_channel, feat_channel, distance_type='eucos'):
if distance_type == 'eucos':
query_distance = spd.euclidean(mav_channel, feat_channel)/200. + spd.cosine(mav_channel, feat_channel)
elif distance_type == 'eu':
query_distance = spd.euclidean(mav_channel, feat_channel)/200.
elif distance_type == 'cos':
query_distance = spd.cosine(mav_channel, feat_channel)
else:
print("distance type not known: enter either of eucos, euclidean or cosine")
return query_distance
def compute_mav_dist(videolist, labels, model, mav_dist_cachedir):
num_cls = model.cls_head.num_classes
mav_dist_list = []
for cls_gt in range(num_cls):
mav_dist_file = os.path.join(mav_dist_cachedir, 'mav_dist_cls%03d.npz'%(cls_gt))
mav_dist_list.append(mav_dist_file)
if os.path.exists(mav_dist_file):
continue
# data for the current class
inds = np.where(np.array(labels) == cls_gt)[0]
videos_cls = [videolist[i] for i in inds]
# extract MAV features
features = extract_class_features(videos_cls, model, cls_gt)
mav_train = np.mean(features, axis=0)
# compute distance
eucos_dist, eu_dist, cos_dist = compute_distance(mav_train, features)
# save MAV and distances
np.savez(mav_dist_file[:-4], mav=mav_train, eucos=eucos_dist, eu=eu_dist, cos=cos_dist)
return mav_dist_list
def weibull_fitting(mav_dist_list, distance_type='eucos', tailsize=20):
weibull_model = {}
for cls_gt in range(len(mav_dist_list)):
# load the mav_dist file
cache = np.load(mav_dist_list[cls_gt], allow_pickle=True)
mav_train = cache['mav']
distances = cache[distance_type]
weibull_model[cls_gt] = {}
weibull_model[cls_gt]['mean_vec'] = mav_train
# weibull fitting for each channel
weibull_model[cls_gt]['weibull_model'] = []
num_channels = mav_train.shape[0]
for c in range(num_channels):
mr = libmr.MR()
tailtofit = sorted(distances[:, c])[-tailsize:]
mr.fit_high(tailtofit, len(tailtofit))
weibull_model[cls_gt]['weibull_model'] += [mr]
return weibull_model
def compute_openmax_prob(openmax_score, openmax_score_u):
num_channels, num_cls = openmax_score.shape
prob_scores, prob_unknowns = [], []
for c in range(num_channels):
channel_scores, channel_unknowns = [], []
for gt_cls in range(num_cls):
channel_scores += [np.exp(openmax_score[c, gt_cls])]
total_denominator = np.sum(np.exp(openmax_score[c, :])) + np.exp(np.sum(openmax_score_u[c, :]))
prob_scores += [channel_scores/total_denominator ]
prob_unknowns += [np.exp(np.sum(openmax_score_u[c, :]))/total_denominator]
prob_scores = np.array(prob_scores)
prob_unknowns = np.array(prob_unknowns)
scores = np.mean(prob_scores, axis=0)
unknowns = np.mean(prob_unknowns, axis=0)
modified_scores = scores.tolist() + [unknowns]
assert len(modified_scores) == num_cls + 1
modified_scores = np.expand_dims(np.array(modified_scores), axis=0)
return modified_scores
def openmax_recalibrate(weibull_model, feature, score, rank=1, distance_type='eucos'):
num_channels, num_cls = score.shape
# get the ranked alpha
alpharank = min(num_cls, rank)
ranked_list = np.mean(score, axis=0).argsort().ravel()[::-1]
alpha_weights = [((alpharank+1) - i)/float(alpharank) for i in range(1, alpharank+1)]
ranked_alpha = np.zeros((num_cls,))
for i in range(len(alpha_weights)):
ranked_alpha[ranked_list[i]] = alpha_weights[i]
# calibrate
openmax_score, openmax_score_u = [], []
for c in range(num_channels):
channel_scores = score[c, :]
openmax_channel = []
openmax_unknown = []
for cls_gt in range(num_cls):
# get distance between current channel and mean vector
mav_train = weibull_model[cls_gt]['mean_vec']
category_weibull = weibull_model[cls_gt]['weibull_model']
channel_distance = compute_channel_distance(mav_train[c, :], feature[c, :], distance_type=distance_type)
# obtain w_score for the distance and compute probability of the distance
wscore = category_weibull[c].w_score(channel_distance)
modified_score = channel_scores[cls_gt] * ( 1 - wscore*ranked_alpha[cls_gt] )
openmax_channel += [modified_score]
openmax_unknown += [channel_scores[cls_gt] - modified_score]
# gather modified scores for each channel
openmax_score += [openmax_channel]
openmax_score_u += [openmax_unknown]
openmax_score = np.array(openmax_score)
openmax_score_u = np.array(openmax_score_u)
# Pass the recalibrated scores into openmax
openmax_prob = compute_openmax_prob(openmax_score, openmax_score_u)
return openmax_prob
def run_inference(model, weibull_model, datalist_file):
# switch config for different dataset
cfg = model.cfg
cfg.data.test.ann_file = datalist_file
cfg.data.test.data_prefix = os.path.join(os.path.dirname(datalist_file), 'videos')
cfg.test_cfg.average_clips = 'score' # we only need scores before softmax layer
model.cfg.data.videos_per_gpu = 1
model.cfg.data.workers_per_gpu = 0
num_cls = model.cls_head.num_classes
# build the dataloader
dataset = build_dataset(cfg.data.test, dict(test_mode=True))
dataloader_setting = dict(
videos_per_gpu=cfg.data.get('videos_per_gpu', 1),
workers_per_gpu=cfg.data.get('workers_per_gpu', 1),
dist=False,
shuffle=False,
pin_memory=False)
dataloader_setting = dict(dataloader_setting, **cfg.data.get('test_dataloader', {}))
data_loader = build_dataloader(dataset, **dataloader_setting)
model = MMDataParallel(model, device_ids=[0])
all_softmax, all_openmax, all_gts = [], [], []
prog_bar = mmcv.ProgressBar(len(data_loader.dataset))
for i, data in enumerate(data_loader):
with torch.no_grad():
feat_blob, score = model(return_loss=False, return_score=True, get_feat=True, **data)
softmax_prob = F.softmax(score, dim=1).cpu().numpy()
# aggregate features
feat_blob = spatial_temporal_pooling(feat_blob)
feat_final = feat_blob.cpu().numpy()
# Re-calibrate score before softmax with OpenMax
openmax_prob = openmax_recalibrate(weibull_model, feat_final, score.cpu().numpy())
# gather preds
all_openmax.append(openmax_prob)
all_softmax.append(softmax_prob)
# gather label
labels = data['label'].numpy()
all_gts.append(labels)
# use the first key as main key to calculate the batch size
batch_size = len(next(iter(data.values())))
for _ in range(batch_size):
prog_bar.update()
all_softmax = np.concatenate(all_softmax, axis=0)
all_openmax = np.concatenate(all_openmax, axis=0)
all_gts = np.concatenate(all_gts, axis=0)
return all_openmax, all_softmax, all_gts
def evaluate_openmax(ind_openmax, ood_openmax, ind_labels, ood_labels, ood_ncls, num_rand=10):
ind_ncls = model.cls_head.num_classes
ind_results = np.argmax(ind_openmax, axis=1)
ood_results = np.argmax(ood_openmax, axis=1)
# close-set accuracy (multi-class)
acc = accuracy_score(ind_labels, ind_results)
# open-set auc-roc (binary class)
preds = np.concatenate((ind_results, ood_results), axis=0)
preds[preds == ind_ncls] = 1 # unknown class
preds[preds != 1] = 0 # known class
labels = np.concatenate((np.zeros_like(ind_labels), np.ones_like(ood_labels)))
auc = roc_auc_score(labels, preds)
print('OpenMax: ClosedSet Accuracy (multi-class): %.3lf, OpenSet AUC (bin-class): %.3lf'%(acc * 100, auc * 100))
# open set F1 score (multi-class)
macro_F1_list = [f1_score(ind_labels, ind_results, average='macro')]
std_list = [0]
openness_list = [0]
for n in range(ood_ncls):
ncls_novel = n + 1
openness = (1 - np.sqrt((2 * ind_ncls) / (2 * ind_ncls + ncls_novel))) * 100
openness_list.append(openness)
# randoml select the subset of ood samples
macro_F1_multi = np.zeros((num_rand), dtype=np.float32)
for m in range(num_rand):
cls_select = np.random.choice(ood_ncls, ncls_novel, replace=False)
ood_sub_results = np.concatenate([ood_results[ood_labels == clsid] for clsid in cls_select])
ood_sub_labels = np.ones_like(ood_sub_results) * ind_ncls
# construct preds and labels
preds = np.concatenate((ind_results, ood_sub_results), axis=0)
labels = np.concatenate((ind_labels, ood_sub_labels), axis=0)
macro_F1_multi[m] = f1_score(labels, preds, average='macro')
macro_F1 = np.mean(macro_F1_multi)
std = np.std(macro_F1_multi)
macro_F1_list.append(macro_F1)
std_list.append(std)
# draw comparison curves
macro_F1_list = np.array(macro_F1_list)
std_list = np.array(std_list)
w_openness = np.array(openness_list) / 100.
open_maF1_mean = np.sum(w_openness * macro_F1_list) / np.sum(w_openness)
open_maF1_std = np.sum(w_openness * std_list) / np.sum(w_openness)
print('Open macro-F1 score: %.3f, std=%.3lf'%(open_maF1_mean * 100, open_maF1_std * 100))
return openness_list, macro_F1_list, std_list
if __name__ == '__main__':
args = parse_args()
# assign the desired device.
device = torch.device(args.device)
set_deterministic(0)
# initialize recognition model
model = init_recognizer(args.config, args.checkpoint, device=device, use_frames=False)
torch.backends.cudnn.benchmark = True
model.cfg.data.test.test_mode = True
######## Compute the Mean Activation Vector (MAV) and Distances ########
if not os.path.exists(args.cache_mav_dist):
os.makedirs(args.cache_mav_dist)
# parse the video files list of training set
videolist, labels = get_datalist(args.trainset_split)
# compute mav and dist
mav_dist_list = compute_mav_dist(videolist, labels, model, args.cache_mav_dist)
######## OOD and IND detection ########
result_file = os.path.join(args.result_prefix + '_result.npz')
if not os.path.exists(result_file):
# prepare result path
result_dir = os.path.dirname(result_file)
if not os.path.exists(result_dir):
os.makedirs(result_dir)
# Weibull Model by EVT Fitting
print("Weibull fitting...")
weibull_model = weibull_fitting(mav_dist_list)
# run inference (OOD)
ood_openmax, ood_softmax, ood_labels = run_inference(model, weibull_model, args.ood_data)
# run inference (OOD)
ind_openmax, ind_softmax, ind_labels = run_inference(model, weibull_model, args.ind_data)
# save
np.savez(result_file[:-4], ind_openmax=ind_openmax, ood_openmax=ood_openmax,
ind_softmax=ind_softmax, ood_softmax=ood_softmax,
ind_label=ind_labels, ood_label=ood_labels)
else:
results = np.load(result_file, allow_pickle=True)
ind_openmax = results['ind_openmax'] # (N1, C+1)
ood_openmax = results['ood_openmax'] # (N2, C+1)
ind_softmax = results['ind_softmax'] # (N1, C)
ood_softmax = results['ood_softmax'] # (N2, C)
ind_labels = results['ind_label'] # (N1,)
ood_labels = results['ood_label'] # (N2,)
######## Evaluation ########
openness_list, macro_F1_list, std_list = evaluate_openmax(ind_openmax, ood_openmax, ind_labels, ood_labels, args.ood_ncls, num_rand=args.num_rand)
# draw F1 curve
plt.figure(figsize=(8,5)) # (w, h)
plt.plot(openness_list, macro_F1_list, 'r-', linewidth=2)
# plt.fill_between(openness_list, macro_F1_list - std_list, macro_F1_list + std_list, 'c')
plt.ylim(0.5, 1.0)
plt.xlabel('Openness (%)')
plt.ylabel('macro F1')
plt.grid('on')
plt.legend('OpenMax')
plt.tight_layout()
dataset_name = args.result_prefix.split('_')[-1]
png_file = os.path.join(os.path.dirname(args.result_prefix), 'F1_openness_%s.png'%(dataset_name))
plt.savefig(png_file)
print('Openness curve figure is saved in: %s'%(png_file)) | InternVideo-main | Downstream/Open-Set-Action-Recognition/experiments/baseline_openmax.py |
import argparse
import os
import os.path as osp
import torch
import mmcv
from mmaction.apis import init_recognizer
from mmcv.parallel import collate, scatter
from operator import itemgetter
from mmaction.datasets.pipelines import Compose
from mmaction.datasets import build_dataloader, build_dataset
from mmcv.parallel import MMDataParallel, MMDistributedDataParallel
import numpy as np
from scipy.special import xlogy
from tqdm import tqdm
import pdb
from mmaction.apis import collect_results_cpu
from mmcv.runner import get_dist_info, init_dist, load_checkpoint
#from mmcv.runner import init_dist, set_random_seed
from mmcv import Config, DictAction
from mmaction.models import build_model
def parse_args():
parser = argparse.ArgumentParser(description='MMAction2 test')
# model and data config
parser.add_argument('--config', help='test config file path')
parser.add_argument('--checkpoint', help='checkpoint file/url')
parser.add_argument('--uncertainty', default='BALD', choices=['BALD', 'Entropy', 'EDL'], help='the uncertainty estimation method')
parser.add_argument('--train_data', help='the split file of in-distribution training data')
parser.add_argument('--forward_pass', type=int, default=10, help='the number of forward passes')
parser.add_argument('--batch_size', type=int, default=8, help='the testing batch size')
# env config
parser.add_argument('--device', type=str, default='cuda:0', help='CPU/CUDA device option')
parser.add_argument('--result_prefix', help='result file prefix')
parser.add_argument('--launcher', choices=['none', 'pytorch', 'slurm', 'mpi'], default='none', help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
parser.add_argument('--cfg-options', nargs='+', action=DictAction, default={},
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. For example, '
"'--cfg-options model.backbone.depth=18 model.backbone.with_cp=True'")
parser.add_argument('--fuse-conv-bn', action='store_true', help='Whether to fuse conv and bn, this will slightly increase''the inference speed')
args = parser.parse_args()
return args
def apply_dropout(m):
if type(m) == torch.nn.Dropout:
m.train()
def update_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
def compute_uncertainty(predictions, method='BALD'):
"""Compute the entropy
scores: (B x C x T)
"""
expected_p = np.mean(predictions, axis=-1) # mean of all forward passes (C,)
entropy_expected_p = - np.sum(xlogy(expected_p, expected_p), axis=1) # the entropy of expect_p (across classes)
if method == 'Entropy':
uncertain_score = entropy_expected_p
elif method == 'BALD':
expected_entropy = - np.mean(np.sum(xlogy(predictions, predictions), axis=1), axis=-1) # mean of entropies (across classes), (scalar)
uncertain_score = entropy_expected_p - expected_entropy
else:
raise NotImplementedError
if not np.all(np.isfinite(uncertain_score)):
uncertain_score[~np.isfinite] = 9999
return uncertain_score
def run_stochastic_inference(model, data_loader, forward_passes):
# model = MMDataParallel(model, device_ids=[0])
model.eval()
all_uncertainties = []
# prog_bar = mmcv.ProgressBar(len(data_loader.dataset))
rank, world_size = get_dist_info()
if rank == 0:
prog_bar = mmcv.ProgressBar(len(data_loader.dataset))
for i, data in enumerate(data_loader):
all_scores = []
with torch.no_grad():
for n in range(forward_passes):
# set new random seed
update_seed(n * 1234)
# original code 69-76
# scores = model(return_loss=False, **data)
# # gather results
# all_scores.append(np.expand_dims(scores, axis=-1))
# all_scores = np.concatenate(all_scores, axis=-1) # (B, C, T)
# # compute the uncertainty
# uncertainty = compute_uncertainty(all_scores, method=args.uncertainty)
# all_uncertainties.append(uncertainty)
#---vae---------------------------------
# pdb.set_trace()
scores, recon = model(return_loss=False, **data)
uncertainty = recon
all_scores.append(np.expand_dims(scores, axis=-1))
all_scores = np.concatenate(all_scores, axis=-1) # (B, C, T)
all_uncertainties.append(uncertainty)
#-----------------------------------------------
# use the first key as main key to calculate the batch size
# batch_size = len(next(iter(data.values())))
# for _ in range(batch_size):
# prog_bar.update()
if rank == 0:
# use the first key as main key to calculate the batch size
batch_size = len(next(iter(data.values())))
for _ in range(batch_size * world_size):
prog_bar.update()
all_uncertainties = collect_results_cpu(all_uncertainties, len(data_loader.dataset), tmpdir=None)
rank, _ = get_dist_info()
if rank == 0:
all_uncertainties = np.concatenate(all_uncertainties, axis=0)
return all_uncertainties
def run_evidence_inference(model, data_loader, evidence):
# get the evidence function
if evidence == 'relu':
from mmaction.models.losses.edl_loss import relu_evidence as get_evidence
elif evidence == 'exp':
from mmaction.models.losses.edl_loss import exp_evidence as get_evidence
elif evidence == 'softplus':
from mmaction.models.losses.edl_loss import softplus_evidence as get_evidence
else:
raise NotImplementedError
num_classes = 101
model.eval()
# model = MMDataParallel(model, device_ids=[0])
all_uncertainties = []
# prog_bar = mmcv.ProgressBar(len(data_loader.dataset))
rank, world_size = get_dist_info()
if rank == 0:
prog_bar = mmcv.ProgressBar(len(data_loader.dataset))
for i, data in enumerate(data_loader):
with torch.no_grad():
# output = model(return_loss=False, **data)
output = model(return_loss=False, **data)
evidence = get_evidence(torch.from_numpy(output))
alpha = evidence + 1
uncertainty = num_classes / torch.sum(alpha, dim=1)
scores = alpha / torch.sum(alpha, dim=1, keepdim=True)
all_uncertainties.append(uncertainty.numpy())
#---vae---------------------------------
# output, recon = model(return_loss=False, **data)
# uncertainty = recon
# all_uncertainties.append(uncertainty)
#------------------------------------------
# pdb.set_trace()
# use the first key as main key to calculate the batch size
# batch_size = len(next(iter(data.values())))
# for _ in range(batch_size):
# prog_bar.update()
if rank == 0:
# use the first key as main key to calculate the batch size
batch_size = len(next(iter(data.values())))
for _ in range(batch_size * world_size):
prog_bar.update()
all_uncertainties = collect_results_cpu(all_uncertainties, len(data_loader.dataset), tmpdir=None)
rank, _ = get_dist_info()
if rank == 0:
all_uncertainties = np.concatenate(all_uncertainties, axis=0)
return all_uncertainties
def run_inference():
# build the recognizer from a config file and checkpoint file/url
# model = init_recognizer(
# args.config,
# args.checkpoint,
# device=device,
# use_frames=False)
# cfg = model.cfg
model = build_model(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)
fp16_cfg = cfg.get('fp16', None)
if fp16_cfg is not None:
wrap_fp16_model(model)
load_checkpoint(model, args.checkpoint, map_location='cpu')
print('cpk is loaded:', args.checkpoint)
if args.fuse_conv_bn:
model = fuse_conv_bn(model)
model = MMDistributedDataParallel(
model.cuda(),
device_ids=[torch.cuda.current_device()],
broadcast_buffers=False)
# if not args.uncertainty == 'EDL':
# # use dropout in testing stage
# if 'dnn' in args.config:
# model.apply(apply_dropout)
# if 'bnn' in args.config:
# model.test_cfg.npass = 1
# set cudnn benchmark
torch.backends.cudnn.benchmark = True
cfg.data.test.test_mode = True
cfg.data.videos_per_gpu = args.batch_size
evidence = cfg.get('evidence', 'exp')
# We use training data to obtain the threshold
#pdb.set_trace()
cfg.data.test.ann_file = args.train_data
cfg.data.test.data_prefix = os.path.join(os.path.dirname(args.train_data), 'videos')
# build the dataloader
dataset = build_dataset(cfg.data.test, dict(test_mode=True))
dataloader_setting = dict(
videos_per_gpu=cfg.data.get('videos_per_gpu', 1),
workers_per_gpu=cfg.data.get('workers_per_gpu', 1),
dist=distributed,
shuffle=False,
pin_memory=False)
dataloader_setting = dict(dataloader_setting, **cfg.data.get('test_dataloader', {}))
#pdb.set_trace()
data_loader = build_dataloader(dataset, **dataloader_setting)
# run inference
if not args.uncertainty == 'EDL':
all_uncertainties = run_stochastic_inference(model, data_loader, args.forward_pass)
else:
all_uncertainties = run_evidence_inference(model, data_loader, evidence)
return all_uncertainties
if __name__ == '__main__':
# args = parse_args()
# # assign the desired device.
# device = torch.device(args.device)
# build the recognizer from a config file and checkpoint file/url
args = parse_args()
cfg = Config.fromfile(args.config)
cfg.merge_from_dict(args.cfg_options)
# init distributed env first, since logger depends on the dist info.
if args.launcher == 'none':
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
# pdb.set_trace()
result_file = os.path.join(args.result_prefix + '_trainset_uncertainties.npz')
if not os.path.exists(result_file):
# run the inference on the entire training set (takes long time)
#pdb.set_trace()
all_uncertainties = run_inference()
np.savez(result_file[:-4], uncertainty=all_uncertainties)
else:
result = np.load(result_file)
all_uncertainties = result['uncertainty']
# evaluation by macro-F1 within (C1 + 1) classes
# pdb.set_trace()
rank, _ = get_dist_info()
if rank == 0:
# uncertain_sort = np.sort(all_uncertainties)[::-1] # sort the uncertainties with descending order
uncertain_sort = np.sort(all_uncertainties.squeeze())
#uncertain_sort = (uncertain_sort - np.min(uncertain_sort)) / (np.max(uncertain_sort) - np.min(uncertain_sort)) # normalize
N = all_uncertainties.shape[0]
topK = N - int(N * 0.95)
print(uncertain_sort)
print(uncertain_sort.min())
print(uncertain_sort.max())
print(len(uncertain_sort))
threshold = uncertain_sort[topK-1]
print('The model %s uncertainty threshold on UCF-101 train set: %lf'%(args.result_prefix.split('/')[-1], threshold))
| InternVideo-main | Downstream/Open-Set-Action-Recognition/experiments/get_threshold_dist.py |
import argparse
import os
import os.path as osp
import torch
import mmcv
from mmaction.apis import init_recognizer
from mmcv.parallel import collate, scatter
from operator import itemgetter
from mmaction.datasets.pipelines import Compose
from mmaction.datasets import build_dataloader, build_dataset
from mmcv.parallel import MMDataParallel
import numpy as np
from scipy.special import xlogy
from tqdm import tqdm
def parse_args():
parser = argparse.ArgumentParser(description='MMAction2 test')
# model and data config
parser.add_argument('--config', help='test config file path')
parser.add_argument('--checkpoint', help='checkpoint file/url')
parser.add_argument('--uncertainty', default='BALD', choices=['BALD', 'Entropy', 'EDL'], help='the uncertainty estimation method')
parser.add_argument('--train_data', help='the split file of in-distribution training data')
parser.add_argument('--forward_pass', type=int, default=10, help='the number of forward passes')
parser.add_argument('--batch_size', type=int, default=8, help='the testing batch size')
# env config
parser.add_argument('--device', type=str, default='cuda:0', help='CPU/CUDA device option')
parser.add_argument('--result_prefix', help='result file prefix')
args = parser.parse_args()
return args
def apply_dropout(m):
if type(m) == torch.nn.Dropout:
m.train()
def update_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
def compute_uncertainty(predictions, method='BALD'):
"""Compute the entropy
scores: (B x C x T)
"""
expected_p = np.mean(predictions, axis=-1) # mean of all forward passes (C,)
entropy_expected_p = - np.sum(xlogy(expected_p, expected_p), axis=1) # the entropy of expect_p (across classes)
if method == 'Entropy':
uncertain_score = entropy_expected_p
elif method == 'BALD':
expected_entropy = - np.mean(np.sum(xlogy(predictions, predictions), axis=1), axis=-1) # mean of entropies (across classes), (scalar)
uncertain_score = entropy_expected_p - expected_entropy
else:
raise NotImplementedError
if not np.all(np.isfinite(uncertain_score)):
uncertain_score[~np.isfinite] = 9999
return uncertain_score
def run_stochastic_inference(model, data_loader, forward_passes):
model = MMDataParallel(model, device_ids=[0])
all_uncertainties = []
prog_bar = mmcv.ProgressBar(len(data_loader.dataset))
for i, data in enumerate(data_loader):
all_scores = []
with torch.no_grad():
for n in range(forward_passes):
# set new random seed
update_seed(n * 1234)
scores = model(return_loss=False, **data)
# gather results
all_scores.append(np.expand_dims(scores, axis=-1))
all_scores = np.concatenate(all_scores, axis=-1) # (B, C, T)
# compute the uncertainty
uncertainty = compute_uncertainty(all_scores, method=args.uncertainty)
all_uncertainties.append(uncertainty)
# use the first key as main key to calculate the batch size
batch_size = len(next(iter(data.values())))
for _ in range(batch_size):
prog_bar.update()
all_uncertainties = np.concatenate(all_uncertainties, axis=0)
return all_uncertainties
def run_evidence_inference(model, data_loader, evidence):
# get the evidence function
if evidence == 'relu':
from mmaction.models.losses.edl_loss import relu_evidence as get_evidence
elif evidence == 'exp':
from mmaction.models.losses.edl_loss import exp_evidence as get_evidence
elif evidence == 'softplus':
from mmaction.models.losses.edl_loss import softplus_evidence as get_evidence
else:
raise NotImplementedError
num_classes = model.cls_head.num_classes
model = MMDataParallel(model, device_ids=[0])
all_uncertainties = []
prog_bar = mmcv.ProgressBar(len(data_loader.dataset))
for i, data in enumerate(data_loader):
with torch.no_grad():
output = model(return_loss=False, **data)
evidence = get_evidence(torch.from_numpy(output))
alpha = evidence + 1
uncertainty = num_classes / torch.sum(alpha, dim=1)
scores = alpha / torch.sum(alpha, dim=1, keepdim=True)
all_uncertainties.append(uncertainty.numpy())
# use the first key as main key to calculate the batch size
batch_size = len(next(iter(data.values())))
for _ in range(batch_size):
prog_bar.update()
all_uncertainties = np.concatenate(all_uncertainties, axis=0)
return all_uncertainties
def run_inference():
# build the recognizer from a config file and checkpoint file/url
model = init_recognizer(
args.config,
args.checkpoint,
device=device,
use_frames=False)
cfg = model.cfg
if not args.uncertainty == 'EDL':
# use dropout in testing stage
if 'dnn' in args.config:
model.apply(apply_dropout)
if 'bnn' in args.config:
model.test_cfg.npass = 1
# set cudnn benchmark
torch.backends.cudnn.benchmark = True
cfg.data.test.test_mode = True
cfg.data.videos_per_gpu = args.batch_size
evidence = cfg.get('evidence', 'exp')
# We use training data to obtain the threshold
cfg.data.test.ann_file = args.train_data
cfg.data.test.data_prefix = os.path.join(os.path.dirname(args.train_data), 'videos')
# build the dataloader
dataset = build_dataset(cfg.data.test, dict(test_mode=True))
dataloader_setting = dict(
videos_per_gpu=cfg.data.get('videos_per_gpu', 1),
workers_per_gpu=cfg.data.get('workers_per_gpu', 1),
dist=False,
shuffle=False,
pin_memory=False)
dataloader_setting = dict(dataloader_setting, **cfg.data.get('test_dataloader', {}))
data_loader = build_dataloader(dataset, **dataloader_setting)
# run inference
if not args.uncertainty == 'EDL':
all_uncertainties = run_stochastic_inference(model, data_loader, args.forward_pass)
else:
all_uncertainties = run_evidence_inference(model, data_loader, evidence)
return all_uncertainties
if __name__ == '__main__':
args = parse_args()
# assign the desired device.
device = torch.device(args.device)
result_file = os.path.join(args.result_prefix + '_trainset_uncertainties.npz')
if not os.path.exists(result_file):
# run the inference on the entire training set (takes long time)
all_uncertainties = run_inference()
np.savez(result_file[:-4], uncertainty=all_uncertainties)
else:
result = np.load(result_file)
all_uncertainties = result['uncertainty']
# evaluation by macro-F1 within (C1 + 1) classes
uncertain_sort = np.sort(all_uncertainties)[::-1] # sort the uncertainties with descending order
N = all_uncertainties.shape[0]
topK = N - int(N * 0.95)
threshold = uncertain_sort[topK-1]
print('The model %s uncertainty threshold on UCF-101 train set: %lf'%(args.result_prefix.split('/')[-1], threshold)) | InternVideo-main | Downstream/Open-Set-Action-Recognition/experiments/get_threshold.py |
import argparse, os, sys
import torch
import mmcv
import numpy as np
import torch.nn.functional as F
from mmcv.parallel import collate, scatter
from mmaction.datasets.pipelines import Compose
from mmaction.apis import init_recognizer
from mmaction.datasets import build_dataloader, build_dataset
from mmcv.parallel import MMDataParallel
from tqdm import tqdm
from sklearn.metrics import f1_score, roc_auc_score, accuracy_score
import matplotlib.pyplot as plt
def parse_args():
""" Example shell script:
$ cd experiments
$ source activate mmaction
$ nohup python baseline_openmax.py --model i3d --config configs/recognition/tpn/inference_tpn_slowonly_dnn.py
"""
parser = argparse.ArgumentParser(description='MMAction2 test')
# model config
parser.add_argument('--config', help='test config file path')
parser.add_argument('--checkpoint', help='checkpoint file/url')
parser.add_argument('--train_data', default='data/ucf101/ucf101_train_split_1_videos.txt', help='the split file of in-distribution training data')
parser.add_argument('--batch_size', type=int, default=8, help='the testing batch size')
parser.add_argument('--ood_ncls', type=int, help='the number of classes in unknwon dataset')
# device
parser.add_argument('--device', type=str, default='cuda:0', help='CPU/CUDA device option')
parser.add_argument('--result_prefix', help='result file prefix')
args = parser.parse_args()
return args
def set_deterministic(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed) # if you are using multi-GPU.
np.random.seed(seed) # Numpy module.
torch.backends.cudnn.benchmark = True
torch.backends.cudnn.deterministic = True
def run_inference(config, checkpoint, train_data, batch_size, device):
# initialize recognition model
model = init_recognizer(config, checkpoint, device=device, use_frames=False)
torch.backends.cudnn.benchmark = True
model.cfg.data.test.test_mode = True
model.cfg.test_cfg.average_clips = 'prob' # we need the probability socore from softmax layer
model.cfg.data.videos_per_gpu = batch_size # batch size of training set
# We use training data to obtain the threshold
model.cfg.data.test.ann_file = train_data
model.cfg.data.test.data_prefix = os.path.join(os.path.dirname(train_data), 'videos')
# build the dataloader
dataset = build_dataset(model.cfg.data.test, dict(test_mode=True))
dataloader_setting = dict(
videos_per_gpu=model.cfg.data.get('videos_per_gpu', 1),
workers_per_gpu=model.cfg.data.get('workers_per_gpu', 1),
dist=False,
shuffle=False,
pin_memory=False)
dataloader_setting = dict(dataloader_setting, **model.cfg.data.get('test_dataloader', {}))
data_loader = build_dataloader(dataset, **dataloader_setting)
# running the inference
model = MMDataParallel(model, device_ids=[0])
all_scores = []
prog_bar = mmcv.ProgressBar(len(data_loader.dataset))
for i, data in enumerate(data_loader):
with torch.no_grad():
scores = model(return_loss=False, **data) # (B, C)
all_scores.append(scores)
# use the first key as main key to calculate the batch size
bs = len(next(iter(data.values())))
for _ in range(bs):
prog_bar.update()
all_scores = np.concatenate(all_scores, axis=0)
return all_scores
def evaluate_softmax(ind_softmax, ood_softmax, ind_labels, ood_labels, ood_ncls, thresh, num_rand=10):
ind_ncls = ind_softmax.shape[1]
ind_results = np.argmax(ind_softmax, axis=1)
ood_results = np.argmax(ood_softmax, axis=1)
# close-set accuracy (multi-class)
acc = accuracy_score(ind_labels, ind_results)
# open-set auc-roc (binary class)
ind_conf = np.max(ind_softmax, axis=1)
ood_conf = np.max(ood_softmax, axis=1)
preds = np.concatenate((ind_results, ood_results), axis=0)
confs = np.concatenate((ind_conf, ood_conf), axis=0)
preds[confs < threshold] = 1 # unknown class
preds[confs >= threshold] = 0 # known class
labels = np.concatenate((np.zeros_like(ind_labels), np.ones_like(ood_labels)))
auc = roc_auc_score(labels, preds)
print('SoftMax: ClosedSet Accuracy (multi-class): %.3lf, OpenSet AUC (bin-class): %.3lf'%(acc * 100, auc * 100))
ind_results[ind_conf < thresh] = ind_ncls # incorrect rejection
# open set F1 score (multi-class)
macro_F1_list = [f1_score(ind_labels, ind_results, average='macro')]
std_list = [0]
openness_list = [0]
for n in range(ood_ncls):
ncls_novel = n + 1
openness = (1 - np.sqrt((2 * ind_ncls) / (2 * ind_ncls + ncls_novel))) * 100
openness_list.append(openness)
# randoml select the subset of ood samples
macro_F1_multi = np.zeros((num_rand), dtype=np.float32)
for m in range(num_rand):
cls_select = np.random.choice(ood_ncls, ncls_novel, replace=False)
ood_sub_results = np.concatenate([ood_results[ood_labels == clsid] for clsid in cls_select])
ood_sub_labels = np.ones_like(ood_sub_results) * ind_ncls
ood_sub_confs = np.concatenate([ood_conf[ood_labels == clsid] for clsid in cls_select])
ood_sub_results[ood_sub_confs < thresh] = ind_ncls # correct rejection
# construct preds and labels
preds = np.concatenate((ind_results, ood_sub_results), axis=0)
labels = np.concatenate((ind_labels, ood_sub_labels), axis=0)
macro_F1_multi[m] = f1_score(labels, preds, average='macro')
macro_F1 = np.mean(macro_F1_multi)
std = np.std(macro_F1_multi)
macro_F1_list.append(macro_F1)
std_list.append(std)
# draw comparison curves
macro_F1_list = np.array(macro_F1_list)
std_list = np.array(std_list)
w_openness = np.array(openness_list) / 100.
open_maF1_mean = np.sum(w_openness * macro_F1_list) / np.sum(w_openness)
open_maF1_std = np.sum(w_openness * std_list) / np.sum(w_openness)
print('Open macro-F1 score: %.3f, std=%.3lf'%(open_maF1_mean * 100, open_maF1_std * 100))
return openness_list, macro_F1_list, std_list
if __name__ == '__main__':
args = parse_args()
# assign the desired device.
device = torch.device(args.device)
set_deterministic(0)
modelname, methodname, dataname = args.result_prefix.split('/')[-1].split('_')
######## Compute threshold with training data ########
result_file = os.path.join(os.path.dirname(args.result_prefix), modelname +'_SoftMax_trainset_softmax.npz')
if not os.path.exists(result_file):
# prepare result path
result_dir = os.path.dirname(result_file)
if not os.path.exists(result_dir):
os.makedirs(result_dir)
# run the inference on training data
trainset_scores = run_inference(args.config, args.checkpoint, args.train_data, args.batch_size, device)
# save
np.savez(result_file[:-4], trainset_scores=trainset_scores)
else:
result = np.load(result_file)
trainset_scores = result['trainset_scores']
max_scores = np.max(trainset_scores, axis=1)
scores_sort = np.sort(max_scores)[::-1] # sort the uncertainties with descending order
N = max_scores.shape[0]
threshold = scores_sort[int(N * 0.95)-1]
print('\nThe model %s softmax threshold on UCF-101 train set: %lf'%(args.result_prefix.split('/')[-1], threshold))
# load the softmax results on testing dataset (in OpenMax baseline)
######## OOD and IND detection ########
openmax_result = os.path.join(os.path.dirname(args.result_prefix), '../openmax', modelname +'_OpenMax_'+ dataname +'_result.npz')
if not os.path.exists(openmax_result):
print('File does not exist! %s'%(openmax_result))
print('Run baseline_openmax.py first to get softmax testing results!')
else:
results = np.load(openmax_result, allow_pickle=True)
ind_softmax = results['ind_softmax'] # (N1, C)
ood_softmax = results['ood_softmax'] # (N2, C)
ind_labels = results['ind_label'] # (N1,)
ood_labels = results['ood_label'] # (N2,)
openness_list, macro_F1_list, std_list = evaluate_softmax(ind_softmax, ood_softmax, ind_labels, ood_labels, args.ood_ncls, threshold) | InternVideo-main | Downstream/Open-Set-Action-Recognition/experiments/baseline_softmax.py |
import argparse
import os
import os.path as osp
import torch
import mmcv
from mmaction.apis import init_recognizer
from mmcv.parallel import collate, scatter
from mmaction.datasets.pipelines import Compose
from mmaction.datasets import build_dataloader, build_dataset
from mmcv.parallel import MMDataParallel
import numpy as np
from tqdm import tqdm
from sklearn.metrics import f1_score, roc_auc_score, accuracy_score
import torch.multiprocessing
torch.multiprocessing.set_sharing_strategy('file_system')
def parse_args():
"""
experiments/baseline_rpl.py --config configs/recognition/tsm/inference_tsm_rpl.py \
--checkpoint work_dirs/tsm/finetune_ucf101_tsm_rpl/latest.pth \
--train_data data/ucf101/ucf101_train_split_1_videos.txt \
--ind_data
--result_prefix experiments/tsm/results_baselines/rpl/RPL
"""
parser = argparse.ArgumentParser(description='MMAction2 test')
# model and data config
parser.add_argument('--config', help='test config file path')
parser.add_argument('--checkpoint', help='checkpoint file/url')
parser.add_argument('--train_data', help='the split file of in-distribution training data')
parser.add_argument('--batch_size', type=int, default=8, help='the testing batch size')
# test data config
parser.add_argument('--ind_data', help='the split file of in-distribution testing data')
parser.add_argument('--ood_data', help='the split file of out-of-distribution testing data')
parser.add_argument('--ood_ncls', type=int, help='the number of classes in unknwon dataset')
parser.add_argument('--ood_dataname', choices=['HMDB', 'MiT'], help='the name of out-of-distribution testing data')
# env config
parser.add_argument('--device', type=str, default='cuda:0', help='CPU/CUDA device option')
parser.add_argument('--result_prefix', help='result file prefix')
args = parser.parse_args()
return args
def set_deterministic(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed) # if you are using multi-GPU.
np.random.seed(seed) # Numpy module.
torch.backends.cudnn.benchmark = True
torch.backends.cudnn.deterministic = True
def run_inference(config, checkpoint, data_split, batch_size, device):
# initialize recognition model
model = init_recognizer(config, checkpoint, device=device, use_frames=False)
torch.backends.cudnn.benchmark = True
model.cfg.data.test.test_mode = True
model.cfg.test_cfg.average_clips = 'prob' # we need the probability socore from softmax layer
model.cfg.data.videos_per_gpu = batch_size # batch size
model.cfg.data.test.ann_file = data_split
model.cfg.data.test.data_prefix = os.path.join(os.path.dirname(data_split), 'videos')
# build the dataloader
dataset = build_dataset(model.cfg.data.test, dict(test_mode=True))
dataloader_setting = dict(
videos_per_gpu=model.cfg.data.get('videos_per_gpu', 1),
workers_per_gpu=model.cfg.data.get('workers_per_gpu', 1),
dist=False,
shuffle=False,
pin_memory=False)
dataloader_setting = dict(dataloader_setting, **model.cfg.data.get('test_dataloader', {}))
data_loader = build_dataloader(dataset, **dataloader_setting)
# running the inference
model = MMDataParallel(model, device_ids=[0])
all_scores, all_labels = [], []
prog_bar = mmcv.ProgressBar(len(data_loader.dataset))
for i, data in enumerate(data_loader):
with torch.no_grad():
scores = model(return_loss=False, **data) # (B, C)
all_scores.append(scores)
# gather labels
labels = data['label'].numpy()
all_labels.append(labels)
# use the first key as main key to calculate the batch size
bs = len(next(iter(data.values())))
for _ in range(bs):
prog_bar.update()
all_scores = np.concatenate(all_scores, axis=0)
all_labels = np.concatenate(all_labels, axis=0)
return all_scores, all_labels
def evaluate_softmax(ind_softmax, ood_softmax, ind_labels, ood_labels, ood_ncls, thresh, num_rand=10):
ind_ncls = ind_softmax.shape[1]
ind_results = np.argmax(ind_softmax, axis=1)
ood_results = np.argmax(ood_softmax, axis=1)
# close-set accuracy (multi-class)
acc = accuracy_score(ind_labels, ind_results)
# open-set auc-roc (binary class)
ind_conf = np.max(ind_softmax, axis=1)
ood_conf = np.max(ood_softmax, axis=1)
preds = np.concatenate((ind_results, ood_results), axis=0)
confs = np.concatenate((ind_conf, ood_conf), axis=0)
preds[confs < threshold] = 1 # unknown class
preds[confs >= threshold] = 0 # known class
labels = np.concatenate((np.zeros_like(ind_labels), np.ones_like(ood_labels)))
auc = roc_auc_score(labels, preds)
print('\nClosedSet Accuracy (multi-class): %.3lf, OpenSet AUC (bin-class): %.3lf'%(acc * 100, auc * 100))
ind_results[ind_conf < thresh] = ind_ncls # incorrect rejection
# open set F1 score (multi-class)
macro_F1_list = [f1_score(ind_labels, ind_results, average='macro')]
std_list = [0]
openness_list = [0]
for n in range(ood_ncls):
ncls_novel = n + 1
openness = (1 - np.sqrt((2 * ind_ncls) / (2 * ind_ncls + ncls_novel))) * 100
openness_list.append(openness)
# randoml select the subset of ood samples
macro_F1_multi = np.zeros((num_rand), dtype=np.float32)
for m in range(num_rand):
cls_select = np.random.choice(ood_ncls, ncls_novel, replace=False)
ood_sub_results = np.concatenate([ood_results[ood_labels == clsid] for clsid in cls_select])
ood_sub_labels = np.ones_like(ood_sub_results) * ind_ncls
ood_sub_confs = np.concatenate([ood_conf[ood_labels == clsid] for clsid in cls_select])
ood_sub_results[ood_sub_confs < thresh] = ind_ncls # correct rejection
# construct preds and labels
preds = np.concatenate((ind_results, ood_sub_results), axis=0)
labels = np.concatenate((ind_labels, ood_sub_labels), axis=0)
macro_F1_multi[m] = f1_score(labels, preds, average='macro')
macro_F1 = np.mean(macro_F1_multi)
std = np.std(macro_F1_multi)
macro_F1_list.append(macro_F1)
std_list.append(std)
# draw comparison curves
macro_F1_list = np.array(macro_F1_list)
std_list = np.array(std_list)
w_openness = np.array(openness_list) / 100.
open_maF1_mean = np.sum(w_openness * macro_F1_list) / np.sum(w_openness)
open_maF1_std = np.sum(w_openness * std_list) / np.sum(w_openness)
print('Open macro-F1 score: %.3f, std=%.3lf'%(open_maF1_mean * 100, open_maF1_std * 100))
return openness_list, macro_F1_list, std_list
if __name__ == '__main__':
args = parse_args()
# assign the desired device.
device = torch.device(args.device)
set_deterministic(0)
modelname = os.path.dirname(args.config).split('/')[-1].upper()
######## Compute threshold with training data ########
result_file = os.path.join(os.path.dirname(args.result_prefix), modelname + '_RPL_trainset_softmax.npz')
if not os.path.exists(result_file):
# prepare result path
result_dir = os.path.dirname(result_file)
if not os.path.exists(result_dir):
os.makedirs(result_dir)
# run the inference on training data
trainset_scores, _ = run_inference(args.config, args.checkpoint, args.train_data, args.batch_size, device)
# save
np.savez(result_file[:-4], trainset_scores=trainset_scores)
else:
result = np.load(result_file)
trainset_scores = result['trainset_scores']
max_scores = np.max(trainset_scores, axis=1)
scores_sort = np.sort(max_scores)[::-1] # sort the uncertainties with descending order
N = max_scores.shape[0]
threshold = scores_sort[int(N * 0.95)-1] # 95% percentile
print('\nThe RPL softmax threshold on UCF-101 train set: %lf'%(threshold))
######## OOD and IND detection ########
testset_result = os.path.join(os.path.dirname(args.result_prefix), modelname +'_RPL_'+ args.ood_dataname +'_result.npz')
if not os.path.exists(testset_result):
# prepare result path
result_dir = os.path.dirname(testset_result)
if not os.path.exists(result_dir):
os.makedirs(result_dir)
# run the inference on OOD data
ood_softmax, ood_labels = run_inference(args.config, args.checkpoint, args.ood_data, args.batch_size, device)
# run the inference on IND data
ind_softmax, ind_labels = run_inference(args.config, args.checkpoint, args.ind_data, args.batch_size, device)
# save
np.savez(testset_result[:-4], ind_softmax=ind_softmax, ood_softmax=ood_softmax,
ind_label=ind_labels, ood_label=ood_labels)
else:
results = np.load(testset_result, allow_pickle=True)
ind_softmax = results['ind_softmax'] # (N1, C)
ood_softmax = results['ood_softmax'] # (N2, C)
ind_labels = results['ind_label'] # (N1,)
ood_labels = results['ood_label'] # (N2,)
openness_list, macro_F1_list, std_list = evaluate_softmax(ind_softmax, ood_softmax, ind_labels, ood_labels, args.ood_ncls, threshold) | InternVideo-main | Downstream/Open-Set-Action-Recognition/experiments/baseline_rpl.py |
import os
import argparse
from matplotlib.pyplot import axis
import numpy as np
from sklearn.metrics import roc_auc_score, accuracy_score, precision_recall_curve, auc, roc_curve
from terminaltables import AsciiTable
def parse_args():
'''Command instruction:
source activate mmaction
python experiments/compare_openness.py
'''
parser = argparse.ArgumentParser(description='Compare the performance of openness')
# model config
parser.add_argument('--base_model', default='i3d', help='the backbone model name')
parser.add_argument('--ood_data', default='HMDB', help='the name of OOD dataset.')
parser.add_argument('--thresholds', nargs='+', type=float, default=[-1,-1,-1,-1,-1,-1])
parser.add_argument('--baseline_results', nargs='+', help='the testing results files.')
args = parser.parse_args()
return args
def eval_osr(y_true, y_pred):
# open-set auc-roc (binary class)
auroc = roc_auc_score(y_true, y_pred)
# open-set auc-pr (binary class)
# as an alternative, you may also use `ap = average_precision_score(labels, uncertains)`, which is approximate to aupr.
precision, recall, _ = precision_recall_curve(y_true, y_pred)
aupr = auc(recall, precision)
# open-set fpr@95 (binary class)
fpr, tpr, _ = roc_curve(y_true, y_pred, pos_label=1)
operation_idx = np.abs(tpr - 0.95).argmin()
fpr95 = fpr[operation_idx] # FPR when TPR at 95%
return auroc, aupr, fpr95
def parse_results(result_file, method='softmax'):
# Softmax and OpenMax
assert os.path.exists(result_file), "File not found! Run baseline_openmax.py first to get softmax testing results!\n%s"%(result_file)
results = np.load(result_file, allow_pickle=True)
# parse results
ind_labels = results['ind_label'] # (N1,)
ood_labels = results['ood_label'] # (N2,)
if method == 'softmax':
ind_softmax = results['ind_softmax'] # (N1, C)
ood_softmax = results['ood_softmax'] # (N2, C)
return ind_softmax, ood_softmax, ind_labels, ood_labels
elif method == 'openmax':
ind_openmax = results['ind_openmax'] # (N1, C+1)
ood_openmax = results['ood_openmax'] # (N2, C+1)
return ind_openmax, ood_openmax, ind_labels, ood_labels
def eval_confidence_methods(ind_probs, ood_probs, ind_labels, ood_labels, score='max_prob', ind_ncls=101, threshold=-1):
# close-set accuracy (multi-class)
ind_results = np.argmax(ind_probs, axis=1)
ood_results = np.argmax(ood_probs, axis=1)
acc = accuracy_score(ind_labels, ind_results)
# open-set evaluation (binary class)
if score == 'binary':
preds = np.concatenate((ind_results, ood_results), axis=0)
idx_pos = preds == ind_ncls
idx_neg = preds != ind_ncls
preds[idx_pos] = 1 # unknown class
preds[idx_neg] = 0 # known class
elif score == 'max_prob':
ind_conf = np.max(ind_probs, axis=1)
ood_conf = np.max(ood_probs, axis=1)
confs = np.concatenate((ind_conf, ood_conf), axis=0)
if threshold > 0:
preds = np.concatenate((ind_results, ood_results), axis=0)
preds[confs < threshold] = 1 # unknown class
preds[confs >= threshold] = 0 # known class
else:
preds = 1 - confs
labels = np.concatenate((np.zeros_like(ind_labels), np.ones_like(ood_labels)))
auroc, aupr, fpr95 = eval_osr(labels, preds)
return acc, auroc, aupr, fpr95
def eval_uncertainty_methods(result_file, threshold=-1):
assert os.path.exists(result_file), "File not found! Run ood_detection first!\n%s"%(result_file)
# load the testing results
results = np.load(result_file, allow_pickle=True)
ind_uncertainties = results['ind_unctt'] # (N1,)
ood_uncertainties = results['ood_unctt'] # (N2,)
ind_results = results['ind_pred'] # (N1,)
ood_results = results['ood_pred'] # (N2,)
ind_labels = results['ind_label']
ood_labels = results['ood_label']
# close-set accuracy (multi-class)
acc = accuracy_score(ind_labels, ind_results)
# open-set evaluation (binary class)
if threshold > 0:
preds = np.concatenate((ind_results, ood_results), axis=0)
uncertains = np.concatenate((ind_uncertainties, ood_uncertainties), axis=0)
preds[uncertains > threshold] = 1
preds[uncertains <= threshold] = 0
else:
preds = np.concatenate((ind_uncertainties, ood_uncertainties), axis=0)
labels = np.concatenate((np.zeros_like(ind_labels), np.ones_like(ood_labels)))
auroc, aupr, fpr95 = eval_osr(labels, preds)
return acc, auroc, aupr, fpr95
def main():
print(f'\nNew Evaluation Results (open-set data: {args.ood_data}, backbone: {args.base_model})')
display_data = [["Methods", "AUROC (%)", "AUPR (%)", "FPR@95 (%)", "Closed-Set ACC (%)"],
["OpenMax"], ["MC Dropout"], ["BNN SVI"], ["SoftMax"], ["RPL"], ["DEAR (ours)"]] # table heads and rows
exp_dir = os.path.join('./experiments', args.base_model)
# OpenMax
result_path = os.path.join(exp_dir, args.baseline_results[0])
ind_openmax, ood_openmax, ind_labels, ood_labels = parse_results(result_path, method='openmax')
acc, auroc, aupr, fpr95 = eval_confidence_methods(ind_openmax, ood_openmax, ind_labels, ood_labels, score='binary')
display_data[1].extend(["%.3f"%(auroc * 100), "%.3f"%(aupr * 100), "%.3f"%(fpr95 * 100), "%.3f"%(acc * 100)])
# MC Dropout
result_path = os.path.join(exp_dir, args.baseline_results[1])
acc, auroc, aupr, fpr95 = eval_uncertainty_methods(result_path, threshold=args.thresholds[1])
display_data[2].extend(["%.3f"%(auroc * 100), "%.3f"%(aupr * 100), "%.3f"%(fpr95 * 100), "%.3f"%(acc * 100)])
# BNN SVI
result_path = os.path.join(exp_dir, args.baseline_results[2])
acc, auroc, aupr, fpr95 = eval_uncertainty_methods(result_path, threshold=args.thresholds[2])
display_data[3].extend(["%.3f"%(auroc * 100), "%.3f"%(aupr * 100), "%.3f"%(fpr95 * 100), "%.3f"%(acc * 100)])
# SoftMax
result_path = os.path.join(exp_dir, args.baseline_results[3])
ind_softmax, ood_softmax, ind_labels, ood_labels = parse_results(result_path, method='softmax')
acc, auroc, aupr, fpr95 = eval_confidence_methods(ind_softmax, ood_softmax, ind_labels, ood_labels, threshold=args.thresholds[3])
display_data[4].extend(["%.3f"%(auroc * 100), "%.3f"%(aupr * 100), "%.3f"%(fpr95 * 100), "%.3f"%(acc * 100)])
# RPL
result_path = os.path.join(exp_dir, args.baseline_results[4])
ind_softmax, ood_softmax, ind_labels, ood_labels = parse_results(result_path, method='softmax')
acc, auroc, aupr, fpr95 = eval_confidence_methods(ind_softmax, ood_softmax, ind_labels, ood_labels, threshold=args.thresholds[4])
display_data[5].extend(["%.3f"%(auroc * 100), "%.3f"%(aupr * 100), "%.3f"%(fpr95 * 100), "%.3f"%(acc * 100)])
# DEAR (ours)
result_path = os.path.join(exp_dir, args.baseline_results[5])
acc, auroc, aupr, fpr95 = eval_uncertainty_methods(result_path, threshold=args.thresholds[5])
display_data[6].extend(["%.3f"%(auroc * 100), "%.3f"%(aupr * 100), "%.3f"%(fpr95 * 100), "%.3f"%(acc * 100)])
table = AsciiTable(display_data)
table.inner_footing_row_border = True
table.justify_columns = {0: 'left', 1: 'center', 2: 'center', 3: 'center', 4: 'center'}
print(table.table)
print("\n")
if __name__ == "__main__":
np.random.seed(123)
args = parse_args()
main() | InternVideo-main | Downstream/Open-Set-Action-Recognition/experiments/compare_openness_new.py |
import argparse
import os
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
def parse_args():
parser = argparse.ArgumentParser(description='Draw histogram')
parser.add_argument('--uncertainty', default='EDL', choices=['BALD', 'Entropy', 'EDL'], help='the uncertainty estimation method')
parser.add_argument('--ind_data', default='UCF-101', help='the split file of in-distribution testing data')
parser.add_argument('--ood_data', default='HMDB', choices=['HMDB', 'MiT'], help='the split file of out-of-distribution testing data')
parser.add_argument('--model', default='I3D', choices=['I3D', 'TSM', 'SlowFast', 'TPN'], help='the action recognition model.')
parser.add_argument('--result_prefix', default='temp/temp.png', help='result file prefix')
args = parser.parse_args()
return args
def plot_by_uncertainty(result_file, uncertainty='EDL', auc=80, fontsize=16):
assert os.path.exists(result_file), 'result file not exists! %s'%(result_file)
results = np.load(result_file, allow_pickle=True)
# ind_confidences = results['ind_conf']
# ood_confidences = results['ood_conf']
ind_uncertainties = results['ind_unctt'] # (N1,)
ood_uncertainties = results['ood_unctt'] # (N2,)
ind_results = results['ind_pred'] # (N1,)
ood_results = results['ood_pred'] # (N2,)
ind_labels = results['ind_label']
ood_labels = results['ood_label']
# visualize
ind_uncertainties = np.array(ind_uncertainties)
ind_uncertainties = (ind_uncertainties-np.min(ind_uncertainties)) / (np.max(ind_uncertainties) - np.min(ind_uncertainties)) # normalize
ood_uncertainties = np.array(ood_uncertainties)
ood_uncertainties = (ood_uncertainties-np.min(ood_uncertainties)) / (np.max(ood_uncertainties) - np.min(ood_uncertainties)) # normalize
fig = plt.figure(figsize=(5,4)) # (w, h)
plt.rcParams["font.family"] = "Arial" # Times New Roman
data_label = 'HMDB-51' if args.ood_data == 'HMDB' else 'MiT-v2'
plt.hist([ind_uncertainties, ood_uncertainties], 50,
density=True, histtype='bar', color=['blue', 'red'],
label=['in-distribution (%s)'%(args.ind_data), 'out-of-distribution (%s)'%(data_label)])
plt.legend(fontsize=fontsize-3)
plt.text(0.6, 6, 'AUC = %.2lf'%(auc), fontsize=fontsize-3)
plt.xlabel('%s uncertainty'%(uncertainty), fontsize=fontsize)
plt.ylabel('Density', fontsize=fontsize)
plt.xticks(fontsize=fontsize)
plt.yticks(fontsize=fontsize)
plt.xlim(0, 1.01)
plt.ylim(0, 10.01)
plt.tight_layout()
result_dir = os.path.dirname(args.result_prefix)
if not os.path.exists(result_dir):
os.makedirs(result_dir)
# save the figure
plt.savefig(os.path.join(args.result_prefix + '_distribution.png'), bbox_inches='tight', dpi=fig.dpi, pad_inches=0.0)
plt.savefig(os.path.join(args.result_prefix + '_distribution.pdf'), bbox_inches='tight', dpi=fig.dpi, pad_inches=0.0)
def get_confidence(result_file, conf='softmax'):
# only for SoftMax and OpenMax
assert os.path.exists(result_file), 'result file not exists! %s'%(result_file)
results = np.load(result_file, allow_pickle=True)
if conf == 'softmax':
ind_score = results['ind_softmax'] # (N1, C)
ood_score = results['ood_softmax'] # (N2, C)
else:
ind_score = results['ind_openmax'] # (N1, C+1)
ood_score = results['ood_openmax'] # (N2, C+1)
ind_conf = np.max(ind_score, axis=1)
ood_conf = np.max(ood_score, axis=1)
return ind_conf, ood_conf
def plot_by_confidence(ind_confidence, ood_confidence, auc=80, fontsize=16):
# visualize
ind_conf = ind_confidence.copy()
ind_conf = (ind_conf-np.min(ind_conf)) / (np.max(ind_conf) - np.min(ind_conf) + 1e-6) # normalize
ood_conf = ood_confidence.copy()
ood_conf = (ood_conf-np.min(ood_conf)) / (np.max(ood_conf) - np.min(ood_conf) + 1e-6) # normalize
fig = plt.figure(figsize=(5,4)) # (w, h)
plt.rcParams["font.family"] = "Arial" # Times New Roman
data_label = 'HMDB-51' if args.ood_data == 'HMDB' else 'MiT-v2'
plt.hist([ind_conf, ood_conf], 50,
density=True, histtype='bar', color=['blue', 'red'],
label=['in-distribution (%s)'%(args.ind_data), 'out-of-distribution (%s)'%(data_label)])
plt.legend(fontsize=fontsize-3)
plt.text(0.6, 6, 'AUC = %.2lf'%(auc), fontsize=fontsize-3)
plt.xlabel('Confidence', fontsize=fontsize)
plt.ylabel('Density', fontsize=fontsize)
plt.xticks(fontsize=fontsize)
plt.yticks(fontsize=fontsize)
plt.xlim(0, 1.01)
plt.ylim(0, 10.01)
plt.tight_layout()
result_dir = os.path.dirname(args.result_prefix)
if not os.path.exists(result_dir):
os.makedirs(result_dir)
# save the figure
plt.savefig(os.path.join(args.result_prefix + '_distribution.png'), bbox_inches='tight', dpi=fig.dpi, pad_inches=0.0)
plt.savefig(os.path.join(args.result_prefix + '_distribution.pdf'), bbox_inches='tight', dpi=fig.dpi, pad_inches=0.0)
def main_i3d():
# common settings
fontsize = 18 if args.ood_data == 'HMDB' else 20
# SoftMax
result_file = 'i3d/results_baselines/openmax/I3D_OpenMax_%s_result.npz'%(args.ood_data)
args.result_prefix = 'i3d/results_baselines/softmax/I3D_SoftMax_Conf_%s'%(args.ood_data)
auc = 75.68 if args.ood_data == 'HMDB' else 79.94
ind_conf, ood_conf = get_confidence(result_file, conf='softmax')
plot_by_confidence(ind_conf, ood_conf, auc=auc, fontsize=fontsize)
# OpenMax
result_file = 'i3d/results_baselines/openmax/I3D_OpenMax_%s_result.npz'%(args.ood_data)
args.result_prefix = 'i3d/results_baselines/openmax/I3D_OpenMax_Conf_%s'%(args.ood_data)
auc = 74.34 if args.ood_data == 'HMDB' else 77.76
ind_conf, ood_conf = get_confidence(result_file, conf='openmax')
plot_by_confidence(ind_conf, ood_conf, auc=auc, fontsize=fontsize)
# RPL
result_file = 'i3d/results_baselines/rpl/I3D_RPL_%s_result.npz'%(args.ood_data)
args.result_prefix = 'i3d/results_baselines/rpl/I3D_RPL_Conf_%s'%(args.ood_data)
auc = 75.20 if args.ood_data == 'HMDB' else 79.16
ind_conf, ood_conf = get_confidence(result_file, conf='softmax')
plot_by_confidence(ind_conf, ood_conf, auc=auc, fontsize=fontsize)
# MC Dropout
result_file = 'i3d/results/I3D_DNN_BALD_%s_result.npz'%(args.ood_data)
args.result_prefix = 'i3d/results/I3D_DNN_BALD_%s'%(args.ood_data)
auc = 75.07 if args.ood_data == 'HMDB' else 79.14
plot_by_uncertainty(result_file, uncertainty='BALD', auc=auc, fontsize=fontsize)
# BNN SVI
result_file = 'i3d/results/I3D_BNN_BALD_%s_result.npz'%(args.ood_data)
args.result_prefix = 'i3d/results/I3D_BNN_BALD_%s'%(args.ood_data)
auc = 74.66 if args.ood_data == 'HMDB' else 79.50
plot_by_uncertainty(result_file, uncertainty='BALD', auc=auc, fontsize=fontsize)
# DRIVE (vanilla)
result_file = 'i3d/results/I3D_EDLNoKL_EDL_%s_result.npz'%(args.ood_data)
args.result_prefix = 'i3d/results/I3D_EDLNoKL_EDL_%s'%(args.ood_data)
auc = 76.41 if args.ood_data == 'HMDB' else 81.43
plot_by_uncertainty(result_file, uncertainty='EDL', auc=auc, fontsize=fontsize)
# DRIVE (full)
result_file = 'i3d/results/I3D_EDLNoKLAvUCCED_EDL_%s_result.npz'%(args.ood_data)
args.result_prefix = 'i3d/results/I3D_EDLNoKLAvUCCED_EDL_%s'%(args.ood_data)
auc = 77.08 if args.ood_data == 'HMDB' else 81.54
plot_by_uncertainty(result_file, uncertainty='EDL', auc=auc, fontsize=fontsize)
def main_tsm():
# common settings
fontsize = 18 if args.ood_data == 'HMDB' else 20
# SoftMax
result_file = 'tsm/results_baselines/openmax/TSM_OpenMax_%s_result.npz'%(args.ood_data)
args.result_prefix = 'tsm/results_baselines/softmax/TSM_SoftMax_Conf_%s'%(args.ood_data)
auc = 77.99 if args.ood_data == 'HMDB' else 82.38
ind_conf, ood_conf = get_confidence(result_file, conf='softmax')
plot_by_confidence(ind_conf, ood_conf, auc=auc, fontsize=fontsize)
# OpenMax
result_file = 'tsm/results_baselines/openmax/TSM_OpenMax_%s_result.npz'%(args.ood_data)
args.result_prefix = 'tsm/results_baselines/openmax/TSM_OpenMax_Conf_%s'%(args.ood_data)
auc = 77.07 if args.ood_data == 'HMDB' else 83.05
ind_conf, ood_conf = get_confidence(result_file, conf='openmax')
plot_by_confidence(ind_conf, ood_conf, auc=auc, fontsize=fontsize)
# RPL
result_file = 'tsm/results_baselines/rpl/TSM_RPL_%s_result.npz'%(args.ood_data)
args.result_prefix = 'tsm/results_baselines/rpl/TSM_RPL_Conf_%s'%(args.ood_data)
auc = 73.62 if args.ood_data == 'HMDB' else 77.28
ind_conf, ood_conf = get_confidence(result_file, conf='softmax')
plot_by_confidence(ind_conf, ood_conf, auc=auc, fontsize=fontsize)
# MC Dropout
result_file = 'tsm/results/TSM_DNN_BALD_%s_result.npz'%(args.ood_data)
args.result_prefix = 'tsm/results/TSM_DNN_BALD_%s'%(args.ood_data)
auc = 73.85 if args.ood_data == 'HMDB' else 78.35
plot_by_uncertainty(result_file, uncertainty='BALD', auc=auc, fontsize=fontsize)
# BNN SVI
result_file = 'tsm/results/TSM_BNN_BALD_%s_result.npz'%(args.ood_data)
args.result_prefix = 'tsm/results/TSM_BNN_BALD_%s'%(args.ood_data)
auc = 73.42 if args.ood_data == 'HMDB' else 77.39
plot_by_uncertainty(result_file, uncertainty='BALD', auc=auc, fontsize=fontsize)
# DRIVE (full)
result_file = 'tsm/results/TSM_EDLNoKLAvUCDebias_EDL_%s_result.npz'%(args.ood_data)
args.result_prefix = 'tsm/results/TSM_EDLNoKLAvUCDebias_EDL_%s'%(args.ood_data)
auc = 78.65 if args.ood_data == 'HMDB' else 83.92
plot_by_uncertainty(result_file, uncertainty='EDL', auc=auc, fontsize=fontsize)
def main_slowfast():
# common settings
fontsize = 18 if args.ood_data == 'HMDB' else 20
# SoftMax
result_file = 'slowfast/results_baselines/openmax/SlowFast_OpenMax_%s_result.npz'%(args.ood_data)
args.result_prefix = 'slowfast/results_baselines/softmax/SlowFast_SoftMax_Conf_%s'%(args.ood_data)
auc = 79.16 if args.ood_data == 'HMDB' else 82.88
ind_conf, ood_conf = get_confidence(result_file, conf='softmax')
plot_by_confidence(ind_conf, ood_conf, auc=auc, fontsize=fontsize)
# OpenMax
result_file = 'slowfast/results_baselines/openmax/SlowFast_OpenMax_%s_result.npz'%(args.ood_data)
args.result_prefix = 'slowfast/results_baselines/openmax/SlowFast_OpenMax_Conf_%s'%(args.ood_data)
auc = 78.76 if args.ood_data == 'HMDB' else 80.62
ind_conf, ood_conf = get_confidence(result_file, conf='openmax')
plot_by_confidence(ind_conf, ood_conf, auc=auc, fontsize=fontsize)
# RPL
result_file = 'slowfast/results_baselines/rpl/SlowFast_RPL_%s_result.npz'%(args.ood_data)
args.result_prefix = 'slowfast/results_baselines/rpl/SlowFast_RPL_Conf_%s'%(args.ood_data)
auc = 74.23 if args.ood_data == 'HMDB' else 77.42
ind_conf, ood_conf = get_confidence(result_file, conf='softmax')
plot_by_confidence(ind_conf, ood_conf, auc=auc, fontsize=fontsize)
# MC Dropout
result_file = 'slowfast/results/SlowFast_DNN_BALD_%s_result.npz'%(args.ood_data)
args.result_prefix = 'slowfast/results/SlowFast_DNN_BALD_%s'%(args.ood_data)
auc = 75.41 if args.ood_data == 'HMDB' else 78.49
plot_by_uncertainty(result_file, uncertainty='BALD', auc=auc, fontsize=fontsize)
# BNN SVI
result_file = 'slowfast/results/SlowFast_BNN_BALD_%s_result.npz'%(args.ood_data)
args.result_prefix = 'slowfast/results/SlowFast_BNN_BALD_%s'%(args.ood_data)
auc = 74.78 if args.ood_data == 'HMDB' else 77.39
plot_by_uncertainty(result_file, uncertainty='BALD', auc=auc, fontsize=fontsize)
# DRIVE (full)
result_file = 'slowfast/results/SlowFast_EDLNoKLAvUCDebias_EDL_%s_result.npz'%(args.ood_data)
args.result_prefix = 'slowfast/results/SlowFast_EDLNoKLAvUCDebias_EDL_%s'%(args.ood_data)
auc = 82.94 if args.ood_data == 'HMDB' else 86.99
plot_by_uncertainty(result_file, uncertainty='EDL', auc=auc, fontsize=fontsize)
def main_tpn():
# common settings
fontsize = 18 if args.ood_data == 'HMDB' else 20
# SoftMax
result_file = 'tpn_slowonly/results_baselines/openmax/TPN_OpenMax_%s_result.npz'%(args.ood_data)
args.result_prefix = 'tpn_slowonly/results_baselines/softmax/TPN_SoftMax_Conf_%s'%(args.ood_data)
auc = 77.97 if args.ood_data == 'HMDB' else 81.35
ind_conf, ood_conf = get_confidence(result_file, conf='softmax')
plot_by_confidence(ind_conf, ood_conf, auc=auc, fontsize=fontsize)
# OpenMax
result_file = 'tpn_slowonly/results_baselines/openmax/TPN_OpenMax_%s_result.npz'%(args.ood_data)
args.result_prefix = 'tpn_slowonly/results_baselines/openmax/TPN_OpenMax_Conf_%s'%(args.ood_data)
auc = 74.12 if args.ood_data == 'HMDB' else 76.26
ind_conf, ood_conf = get_confidence(result_file, conf='openmax')
plot_by_confidence(ind_conf, ood_conf, auc=auc, fontsize=fontsize)
# RPL
result_file = 'tpn_slowonly/results_baselines/rpl/TPN_RPL_%s_result.npz'%(args.ood_data)
args.result_prefix = 'tpn_slowonly/results_baselines/rpl/TPN_RPL_Conf_%s'%(args.ood_data)
auc = 75.32 if args.ood_data == 'HMDB' else 78.21
ind_conf, ood_conf = get_confidence(result_file, conf='softmax')
plot_by_confidence(ind_conf, ood_conf, auc=auc, fontsize=fontsize)
# MC Dropout
result_file = 'tpn_slowonly/results/TPN_SlowOnly_Dropout_BALD_%s_result.npz'%(args.ood_data)
args.result_prefix = 'tpn_slowonly/results/TPN_SlowOnly_Dropout_BALD_%s'%(args.ood_data)
auc = 74.13 if args.ood_data == 'HMDB' else 77.76
plot_by_uncertainty(result_file, uncertainty='BALD', auc=auc, fontsize=fontsize)
# BNN SVI
result_file = 'tpn_slowonly/results/TPN_SlowOnly_BNN_BALD_%s_result.npz'%(args.ood_data)
args.result_prefix = 'tpn_slowonly/results/TPN_SlowOnly_BNN_BALD_%s'%(args.ood_data)
auc = 72.68 if args.ood_data == 'HMDB' else 75.32
plot_by_uncertainty(result_file, uncertainty='BALD', auc=auc, fontsize=fontsize)
# DRIVE (full)
result_file = 'tpn_slowonly/results/TPN_SlowOnly_EDLlogNoKLAvUCDebias_EDL_%s_result.npz'%(args.ood_data)
args.result_prefix = 'tpn_slowonly/results/TPN_SlowOnly_EDLlogNoKLAvUCDebias_EDL_%s'%(args.ood_data)
auc = 79.23 if args.ood_data == 'HMDB' else 81.80
plot_by_uncertainty(result_file, uncertainty='EDL', auc=auc, fontsize=fontsize)
if __name__ == '__main__':
args = parse_args()
if args.model == 'I3D':
# draw results on I3D
main_i3d()
elif args.model == 'TSM':
# draw results on TSM
main_tsm()
elif args.model == 'SlowFast':
# draw results on SlowFast
main_slowfast()
elif args.model == 'TPN':
# draw results on TPN
main_tpn()
else:
raise NotImplementedError
| InternVideo-main | Downstream/Open-Set-Action-Recognition/experiments/draw_ood_hist.py |
import argparse
import os
import os.path as osp
import torch
import mmcv
from mmcv import Config, DictAction
from mmaction.apis import init_recognizer
from mmcv.parallel import collate, scatter
from operator import itemgetter
from mmaction.datasets.pipelines import Compose
from mmaction.datasets import build_dataloader, build_dataset
from mmcv.parallel import MMDataParallel, MMDistributedDataParallel
from mmcv.runner import get_dist_info, init_dist, load_checkpoint
import numpy as np
from scipy.special import xlogy
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
from tqdm import tqdm
import torch.multiprocessing
torch.multiprocessing.set_sharing_strategy('file_system')
import pdb
from mmaction.models import build_model
from mmcv.cnn import fuse_conv_bn
from mmaction.apis import collect_results_cpu
def parse_args():
parser = argparse.ArgumentParser(description='MMAction2 test')
# model config
parser.add_argument('--config', help='test config file path')
parser.add_argument('--checkpoint', help='checkpoint file/url')
parser.add_argument('--uncertainty', choices=['BALD', 'Entropy', 'EDL'], help='the uncertainty estimation method')
parser.add_argument('--forward_pass', type=int, default=10, help='the number of forward passes')
# data config
parser.add_argument('--ind_data', help='the split file of in-distribution testing data')
parser.add_argument('--ood_data', help='the split file of out-of-distribution testing data')
# env config
parser.add_argument('--device', type=str, default='cuda:0', help='CPU/CUDA device option')
parser.add_argument('--result_prefix', help='result file prefix')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
default={},
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. For example, '
"'--cfg-options model.backbone.depth=18 model.backbone.with_cp=True'")
parser.add_argument(
'--fuse-conv-bn',
action='store_true',
help='Whether to fuse conv and bn, this will slightly increase'
'the inference speed')
args = parser.parse_args()
return args
def apply_dropout(m):
if type(m) == torch.nn.Dropout:
m.train()
def update_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
def compute_uncertainty(predictions, method='BALD'):
"""Compute the entropy
scores: (B x C x T)
"""
expected_p = np.mean(predictions, axis=-1) # mean of all forward passes (C,)
entropy_expected_p = - np.sum(xlogy(expected_p, expected_p), axis=1) # the entropy of expect_p (across classes)
if method == 'Entropy':
uncertain_score = entropy_expected_p
elif method == 'BALD':
expected_entropy = - np.mean(np.sum(xlogy(predictions, predictions), axis=1), axis=-1) # mean of entropies (across classes), (scalar)
uncertain_score = entropy_expected_p - expected_entropy
else:
raise NotImplementedError
if not np.all(np.isfinite(uncertain_score)):
uncertain_score[~np.isfinite] = 9999
return uncertain_score
def run_stochastic_inference(model, data_loader, npass=10):
# run inference
# model = MMDataParallel(model, device_ids=[0])
# model = MMDistributedDataParallel(
# model.cuda(),
# device_ids=[torch.cuda.current_device()],
# broadcast_buffers=False)
model.eval()
all_confidences, all_uncertainties, all_results, all_gts = [], [], [], []
# prog_bar = mmcv.ProgressBar(len(data_loader.dataset))
rank, world_size = get_dist_info()
if rank == 0:
prog_bar = mmcv.ProgressBar(len(data_loader.dataset))
for i, data in enumerate(data_loader):
all_scores = []
with torch.no_grad():
for n in range(npass):
# set new random seed
update_seed(n * 1234)
#-original--
# scores = model(return_loss=False, **data)
# # gather results
# all_scores.append(np.expand_dims(scores, axis=-1))
# all_scores = np.concatenate(all_scores, axis=-1) # (B, C, T)
# # compute the uncertainty
# uncertainty = compute_uncertainty(all_scores, method=args.uncertainty)
# all_uncertainties.append(uncertainty)
#---vae---------------------------------
# # pdb.set_trace()
# scores, recon = model(return_loss=False, **data)
# uncertainty = recon
# all_scores.append(np.expand_dims(scores, axis=-1))
# all_scores = np.concatenate(all_scores, axis=-1) # (B, C, T)
# all_uncertainties.append(uncertainty)
#-----------------------------------------------
#---FLOW---------------------------------
# pdb.set_trace()
scores, logpx = model(return_loss=False, **data)
uncertainty = logpx
all_scores.append(np.expand_dims(scores, axis=-1))
all_scores = np.concatenate(all_scores, axis=-1) # (B, C, T)
all_uncertainties.append(uncertainty)
#-----------------------------------------------
# compute the predictions and save labels
mean_scores = np.mean(all_scores, axis=-1)
preds = np.argmax(mean_scores, axis=1)
all_results.append(preds)
conf = np.max(mean_scores, axis=1)
all_confidences.append(conf)
labels = data['label'].numpy()
all_gts.append(labels)
# # use the first key as main key to calculate the batch size
# batch_size = len(next(iter(data.values())))
# for _ in range(batch_size):
# prog_bar.update()
if rank == 0:
# use the first key as main key to calculate the batch size
batch_size = len(next(iter(data.values())))
for _ in range(batch_size * world_size):
prog_bar.update()
# pdb.set_trace()
all_confidences = collect_results_cpu(all_confidences, len(data_loader.dataset), tmpdir=None)
all_uncertainties = collect_results_cpu(all_uncertainties, len(data_loader.dataset), tmpdir=None)
all_results = collect_results_cpu(all_results, len(data_loader.dataset), tmpdir=None)
all_gts = collect_results_cpu(all_gts, len(data_loader.dataset), tmpdir=None)
rank, _ = get_dist_info()
if rank == 0:
all_confidences = np.concatenate(all_confidences, axis=0)
all_uncertainties = np.concatenate(all_uncertainties, axis=0)
all_results = np.concatenate(all_results, axis=0)
all_gts = np.concatenate(all_gts, axis=0)
# pdb.set_trace()
return all_confidences, all_uncertainties, all_results, all_gts
def run_evidence_inference(model, data_loader, evidence='exp'):
# set new random seed
update_seed(1234)
# get the evidence function
if evidence == 'relu':
from mmaction.models.losses.edl_loss import relu_evidence as get_evidence
elif evidence == 'exp':
from mmaction.models.losses.edl_loss import exp_evidence as get_evidence
elif evidence == 'softplus':
from mmaction.models.losses.edl_loss import softplus_evidence as get_evidence
else:
raise NotImplementedError
# pdb.set_trace()
num_classes = 101
model.eval()
# run inference
# model = MMDataParallel(model, device_ids=[0])
all_confidences, all_uncertainties, all_results, all_gts = [], [], [], []
# prog_bar = mmcv.ProgressBar(len(data_loader.dataset))
rank, world_size = get_dist_info()
if rank == 0:
prog_bar = mmcv.ProgressBar(len(data_loader.dataset))
for i, data in enumerate(data_loader):
with torch.no_grad():
output = model(return_loss=False, **data)
evidence = get_evidence(torch.from_numpy(output))
alpha = evidence + 1
uncertainty = num_classes / torch.sum(alpha, dim=1)
scores = alpha / torch.sum(alpha, dim=1, keepdim=True)
all_uncertainties.append(uncertainty.numpy())
#---vae---------------------------------
# output, recon = model(return_loss=False, **data)
# evidence = get_evidence(torch.from_numpy(output))
# alpha = evidence + 1
# scores = alpha / torch.sum(alpha, dim=1, keepdim=True)
# uncertainty = recon
# all_uncertainties.append(uncertainty)
#-------------------------------------------
# pdb.set_trace()
# compute the predictions and save labels
preds = np.argmax(scores.numpy(), axis=1)
all_results.append(preds)
conf = np.max(scores.numpy(), axis=1)
all_confidences.append(conf)
labels = data['label'].numpy()
all_gts.append(labels)
# use the first key as main key to calculate the batch size
# batch_size = len(next(iter(data.values())))
# for _ in range(batch_size):
# prog_bar.update()
if rank == 0:
# use the first key as main key to calculate the batch size
batch_size = len(next(iter(data.values())))
for _ in range(batch_size * world_size):
prog_bar.update()
# pdb.set_trace()
all_confidences = collect_results_cpu(all_confidences, len(data_loader.dataset), tmpdir=None)
all_uncertainties = collect_results_cpu(all_uncertainties, len(data_loader.dataset), tmpdir=None)
all_results = collect_results_cpu(all_results, len(data_loader.dataset), tmpdir=None)
all_gts = collect_results_cpu(all_gts, len(data_loader.dataset), tmpdir=None)
rank, _ = get_dist_info()
if rank == 0:
# pdb.set_trace()
all_confidences = np.concatenate(all_confidences, axis=0)
all_uncertainties = np.concatenate(all_uncertainties, axis=0)
all_results = np.concatenate(all_results, axis=0)
all_gts = np.concatenate(all_gts, axis=0)
# pdb.set_trace()
return all_confidences, all_uncertainties, all_results, all_gts
def run_inference(model, distributed, cfg, datalist_file, npass=10):
# switch config for different dataset
# cfg = model.cfg
cfg.data.test.ann_file = datalist_file
cfg.data.test.data_prefix = os.path.join(os.path.dirname(datalist_file), 'videos')
evidence = cfg.get('evidence', 'exp')
# build the dataloader
dataset = build_dataset(cfg.data.test, dict(test_mode=True))
dataloader_setting = dict(
videos_per_gpu=cfg.data.get('videos_per_gpu', 1),
workers_per_gpu=cfg.data.get('workers_per_gpu', 1),
dist=distributed,
shuffle=False,
pin_memory=False)
dataloader_setting = dict(dataloader_setting, **cfg.data.get('test_dataloader', {}))
data_loader = build_dataloader(dataset, **dataloader_setting)
# pdb.set_trace()
if not args.uncertainty == 'EDL':
all_confidences, all_uncertainties, all_results, all_gts = run_stochastic_inference(model, data_loader, npass)
else:
all_confidences, all_uncertainties, all_results, all_gts = run_evidence_inference(model, data_loader, evidence)
return all_confidences, all_uncertainties, all_results, all_gts
def main():
# pdb.set_trace()
# build the recognizer from a config file and checkpoint file/url
args = parse_args()
cfg = Config.fromfile(args.config)
cfg.merge_from_dict(args.cfg_options)
if args.launcher == 'none':
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
model = build_model(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)
fp16_cfg = cfg.get('fp16', None)
if fp16_cfg is not None:
wrap_fp16_model(model)
load_checkpoint(model, args.checkpoint, map_location='cpu')
print('cpk is loaded:', args.checkpoint)
# model = init_recognizer(
# args.config,
# args.checkpoint,
# device=device,
# use_frames=False)
# cfg = model.cfg
if args.fuse_conv_bn:
model = fuse_conv_bn(model)
model = MMDistributedDataParallel(
model.cuda(),
device_ids=[torch.cuda.current_device()],
broadcast_buffers=False)
# if not args.uncertainty == 'EDL':
# # use dropout in testing stage
# if 'dnn' in args.config:
# model.apply(apply_dropout)
# if 'bnn' in args.config:
# model.test_cfg.npass = 1
# set cudnn benchmark
torch.backends.cudnn.benchmark = True
cfg.data.test.test_mode = True
result_file = os.path.join(args.result_prefix + '_result.npz')
if not os.path.exists(result_file):
# prepare result path
result_dir = os.path.dirname(result_file)
if not os.path.exists(result_dir):
# os.makedirs(result_dir)
os.makedirs(result_dir, exist_ok=True)
# run inference (OOD)
# pdb.set_trace()
ood_confidences, ood_uncertainties, ood_results, ood_labels = run_inference(model, distributed, cfg, args.ood_data, npass=args.forward_pass)
# run inference (IND)
# pdb.set_trace()
ind_confidences, ind_uncertainties, ind_results, ind_labels = run_inference(model, distributed, cfg, args.ind_data, npass=args.forward_pass)
# save
np.savez(result_file[:-4], ind_conf=ind_confidences, ood_conf=ood_confidences,
ind_unctt=ind_uncertainties, ood_unctt=ood_uncertainties,
ind_pred=ind_results, ood_pred=ood_results,
ind_label=ind_labels, ood_label=ood_labels)
else:
results = np.load(result_file, allow_pickle=True)
ind_confidences = results['ind_conf']
ood_confidences = results['ood_conf']
ind_uncertainties = results['ind_unctt'] # (N1,)
ood_uncertainties = results['ood_unctt'] # (N2,)
ind_results = results['ind_pred'] # (N1,)
ood_results = results['ood_pred'] # (N2,)
ind_labels = results['ind_label']
ood_labels = results['ood_label']
# visualize
# pdb.set_trace()
# torch.distributed.barrier()
# ood_confidences = np.concatenate(ood_confidences, axis=0)
# ood_uncertainties = np.concatenate(ood_uncertainties, axis=0)
# ood_results = np.concatenate(ood_results, axis=0)
# ood_labels = np.concatenate(ood_labels, axis=0)
# ind_confidences = np.concatenate(ind_confidences, axis=0)
# ind_uncertainties = np.concatenate(ind_uncertainties, axis=0)
# ind_results = np.concatenate(ind_results, axis=0)
# ind_labels = np.concatenate(ind_labels, axis=0)
# pdb.set_trace()
rank, _ = get_dist_info()
if rank == 0:
ind_uncertainties = np.array(ind_uncertainties.squeeze())
# ind_uncertainties = (ind_uncertainties-np.min(ind_uncertainties)) / (np.max(ind_uncertainties) - np.min(ind_uncertainties)) # normalize
ood_uncertainties = np.array(ood_uncertainties.squeeze())
# ood_uncertainties = (ood_uncertainties-np.min(ood_uncertainties)) / (np.max(ood_uncertainties) - np.min(ood_uncertainties)) # normalize
# ind和ood合在一起nomalize
all_uncertainties = np.concatenate((ind_uncertainties, ood_uncertainties))
ind_uncertainties = (ind_uncertainties-np.min(all_uncertainties)) / (np.max(all_uncertainties) - np.min(all_uncertainties)) # normalize
ood_uncertainties = (ood_uncertainties-np.min(all_uncertainties)) / (np.max(all_uncertainties) - np.min(all_uncertainties)) # normalize
dataName_ind = args.ind_data.split('/')[-2].upper()
dataName_ood = args.ood_data.split('/')[-2].upper()
if dataName_ind == 'UCF101':
dataName_ind = 'UCF-101'
if dataName_ood == 'MIT':
dataName_ood = 'MiT-v2'
if dataName_ood == 'HMDB51':
dataName_ood = 'HMDB-51'
plt.figure(figsize=(5,4)) # (w, h)
plt.rcParams["font.family"] = "Arial" # Times New Roman
fontsize = 15
# pdb.set_trace()
plt.hist([ind_uncertainties, ood_uncertainties], 50,
density=True, histtype='bar', color=['blue', 'red'],
label=['in-distribution (%s)'%(dataName_ind), 'out-of-distribution (%s)'%(dataName_ood)])
plt.legend(fontsize=fontsize)
plt.xlabel('%s uncertainty'%(args.uncertainty), fontsize=fontsize)
plt.ylabel('density', fontsize=fontsize)
plt.xticks(fontsize=fontsize)
plt.yticks(fontsize=fontsize)
plt.xlim(0, 1.01)
plt.ylim(0, 10.01)
plt.tight_layout()
plt.savefig(os.path.join(args.result_prefix + '_distribution.png'))
plt.savefig(os.path.join(args.result_prefix + '_distribution.pdf'))
if __name__ == '__main__':
args = parse_args()
# assign the desired device.
device = torch.device(args.device)
main()
| InternVideo-main | Downstream/Open-Set-Action-Recognition/experiments/ood_detection_dist.py |
import os
import numpy as np
import matplotlib.pyplot as plt
def plot_by_uncertainty(result_file, uncertainty='EDL', auc=80, fontsize=16, result_prefix=''):
assert os.path.exists(result_file), 'result file not exists! %s'%(result_file)
results = np.load(result_file, allow_pickle=True)
# ind_confidences = results['ind_conf']
# ood_confidences = results['ood_conf']
ind_uncertainties = results['ind_unctt'] # (N1,)
ood_uncertainties = results['ood_unctt'] # (N2,)
ind_results = results['ind_pred'] # (N1,)
ood_results = results['ood_pred'] # (N2,)
ind_labels = results['ind_label']
ood_labels = results['ood_label']
# visualize
ind_uncertainties = np.array(ind_uncertainties)
ind_uncertainties = (ind_uncertainties-np.min(ind_uncertainties)) / (np.max(ind_uncertainties) - np.min(ind_uncertainties)) # normalize
ood_uncertainties = np.array(ood_uncertainties)
ood_uncertainties = (ood_uncertainties-np.min(ood_uncertainties)) / (np.max(ood_uncertainties) - np.min(ood_uncertainties)) # normalize
fig = plt.figure(figsize=(5,4)) # (w, h)
plt.rcParams["font.family"] = "Arial" # Times New Roman
data_label = 'HMDB-51' if ood_data == 'HMDB' else 'MiT-v2'
counts, bins, bars = plt.hist([ind_uncertainties, ood_uncertainties], 50,
density=True, histtype='bar', color=['blue', 'red'],
label=['in-distribution (%s)'%(ind_data), 'out-of-distribution (%s)'%(data_label)])
plt.legend(fontsize=fontsize-3)
plt.text(0.6, 6, 'AUC = %.2lf'%(auc), fontsize=fontsize-3)
plt.xlabel('%s uncertainty'%(uncertainty), fontsize=fontsize)
plt.ylabel('Density', fontsize=fontsize)
plt.xticks(fontsize=fontsize)
plt.yticks(fontsize=fontsize)
plt.xlim(0, 1.01)
plt.ylim(0, 10.01)
plt.tight_layout()
result_dir = os.path.dirname(result_prefix)
if not os.path.exists(result_dir):
os.makedirs(result_dir)
# save the figure
plt.savefig(os.path.join(result_prefix + '_distribution.png'), bbox_inches='tight', dpi=fig.dpi, pad_inches=0.0)
plt.savefig(os.path.join(result_prefix + '_distribution.pdf'), bbox_inches='tight', dpi=fig.dpi, pad_inches=0.0)
return counts, bins, bars
if __name__ == '__main__':
fontsize = 20
ind_data = 'UCF-101'
ood_data = 'MiT'
# DRIVE (vanilla)
result_file = 'i3d/results/I3D_EDLNoKL_EDL_%s_result.npz'%(ood_data)
counts, bins, bars = plot_by_uncertainty(result_file, uncertainty='EDL', auc=81.43, fontsize=fontsize, result_prefix='temp_rebuttal/I3D_MiT_Vanilla')
counts = counts[:, 3:]
bins = bins[3:]
# mode = np.argsort(counts[1, :])[:5]
mode = np.argmax(counts[1, :])
print('the most frequent bin:(' + str(bins[mode]) + ',' + str(bins[mode+1]) + ')')
# DRIVE (full)
result_file = 'i3d/results/I3D_EDLNoKLAvUCCED_EDL_%s_result.npz'%(ood_data)
counts, bins, bars = plot_by_uncertainty(result_file, uncertainty='EDL', auc=81.54, fontsize=fontsize, result_prefix='temp_rebuttal/I3D_MiT_Full')
counts = counts[:, 3:]
bins = bins[3:]
# mode = np.argsort(counts[1, :])[:5]
mode = np.argmax(counts[1, :])
print('the most frequent bin:(' + str(bins[mode]) + ',' + str(bins[mode+1]) + ')') | InternVideo-main | Downstream/Open-Set-Action-Recognition/experiments/draw_fig7cd.py |
import os
import argparse
import numpy as np
from sklearn.metrics import f1_score, roc_auc_score, accuracy_score
import matplotlib.pyplot as plt
def parse_args():
'''Command instruction:
source activate mmaction
python experiments/compare_openness.py --ind_ncls 101 --ood_ncls 51
'''
parser = argparse.ArgumentParser(description='Compare the performance of openness')
# model config
parser.add_argument('--base_model', default='i3d', help='the backbone model name')
parser.add_argument('--baselines', nargs='+', default=['I3D_Dropout_BALD', 'I3D_BNN_BALD', 'I3D_EDLlog_EDL', 'I3D_EDLlogAvUC_EDL'])
parser.add_argument('--thresholds', nargs='+', type=float, default=[0.000423, 0.000024, 0.495783, 0.495783])
parser.add_argument('--styles', nargs='+', default=['-b', '-k', '-r', '-g', '-m'])
parser.add_argument('--ind_ncls', type=int, default=101, help='the number of classes in known dataset')
parser.add_argument('--ood_ncls', type=int, help='the number of classes in unknwon dataset')
parser.add_argument('--ood_data', default='HMDB', help='the name of OOD dataset.')
parser.add_argument('--num_rand', type=int, default=10, help='the number of random selection for ood classes')
parser.add_argument('--result_png', default='F1_openness_compare_HMDB.png')
args = parser.parse_args()
return args
def main():
result_path = os.path.join('./experiments', args.base_model, 'results')
plt.figure(figsize=(8,5)) # (w, h)
plt.rcParams["font.family"] = "Arial" # Times New Roman
fontsize = 15
for style, thresh, baseline in zip(args.styles, args.thresholds, args.baselines):
result_file = os.path.join(result_path, baseline + '_%s'%(args.ood_data) + '_result.npz')
assert os.path.exists(result_file), "File not found! Run ood_detection first!"
# load the testing results
results = np.load(result_file, allow_pickle=True)
ind_uncertainties = results['ind_unctt'] # (N1,)
ood_uncertainties = results['ood_unctt'] # (N2,)
ind_results = results['ind_pred'] # (N1,)
ood_results = results['ood_pred'] # (N2,)
ind_labels = results['ind_label']
ood_labels = results['ood_label']
# close-set accuracy (multi-class)
acc = accuracy_score(ind_labels, ind_results)
# open-set auc-roc (binary class)
preds = np.concatenate((ind_results, ood_results), axis=0)
uncertains = np.concatenate((ind_uncertainties, ood_uncertainties), axis=0)
preds[uncertains > thresh] = 1
preds[uncertains <= thresh] = 0
labels = np.concatenate((np.zeros_like(ind_labels), np.ones_like(ood_labels)))
aupr = roc_auc_score(labels, preds)
print('Model: %s, ClosedSet Accuracy (multi-class): %.3lf, OpenSet AUC (bin-class): %.3lf'%(baseline, acc * 100, aupr * 100))
# open set F1 score (multi-class)
ind_results[ind_uncertainties > thresh] = args.ind_ncls # falsely rejection
macro_F1_list = [f1_score(ind_labels, ind_results, average='macro')]
std_list = [0]
openness_list = [0]
for n in range(args.ood_ncls):
ncls_novel = n + 1
openness = (1 - np.sqrt((2 * args.ind_ncls) / (2 * args.ind_ncls + ncls_novel))) * 100
openness_list.append(openness)
# randoml select the subset of ood samples
macro_F1_multi = np.zeros((args.num_rand), dtype=np.float32)
for m in range(args.num_rand):
cls_select = np.random.choice(args.ood_ncls, ncls_novel, replace=False)
ood_sub_results = np.concatenate([ood_results[ood_labels == clsid] for clsid in cls_select])
ood_sub_uncertainties = np.concatenate([ood_uncertainties[ood_labels == clsid] for clsid in cls_select])
ood_sub_results[ood_sub_uncertainties > thresh] = args.ind_ncls # correctly rejection
ood_sub_labels = np.ones_like(ood_sub_results) * args.ind_ncls
# construct preds and labels
preds = np.concatenate((ind_results, ood_sub_results), axis=0)
labels = np.concatenate((ind_labels, ood_sub_labels), axis=0)
macro_F1_multi[m] = f1_score(labels, preds, average='macro')
macro_F1 = np.mean(macro_F1_multi)
std = np.std(macro_F1_multi)
macro_F1_list.append(macro_F1)
std_list.append(std)
# draw comparison curves
macro_F1_list = np.array(macro_F1_list)
std_list = np.array(std_list)
plt.plot(openness_list, macro_F1_list * 100, style, linewidth=2)
# plt.fill_between(openness_list, macro_F1_list - std_list, macro_F1_list + std_list, style)
w_openness = np.array(openness_list) / 100.
open_maF1_mean = np.sum(w_openness * macro_F1_list) / np.sum(w_openness)
open_maF1_std = np.sum(w_openness * std_list) / np.sum(w_openness)
print('Open macro-F1 score: %.3f, std=%.3lf'%(open_maF1_mean * 100, open_maF1_std * 100))
plt.xlim(0, max(openness_list))
plt.ylim(60, 80)
plt.xlabel('Openness (%)', fontsize=fontsize)
plt.ylabel('macro F1 (%)', fontsize=fontsize)
plt.grid('on')
# plt.legend(args.baselines)
plt.legend(['MC Dropout BALD', 'BNN SVI BALD', 'DEAR (vanilla)', 'DEAR (alter)', 'DEAR (joint)'], loc='lower left', fontsize=fontsize)
plt.xticks(fontsize=fontsize)
plt.yticks(fontsize=fontsize)
plt.tight_layout()
png_file = os.path.join(result_path, args.result_png)
plt.savefig(png_file)
plt.savefig(png_file[:-4] + '.pdf')
print('Openness curve figure is saved in: %s'%(png_file))
if __name__ == "__main__":
np.random.seed(123)
args = parse_args()
main() | InternVideo-main | Downstream/Open-Set-Action-Recognition/experiments/compare_openness.py |
import argparse
import os
import torch
from mmaction.apis import init_recognizer
from mmcv.parallel import collate, scatter
from mmaction.datasets.pipelines import Compose
import numpy as np
from tqdm import tqdm
def parse_args():
parser = argparse.ArgumentParser(description='MMAction2 test')
# model config
parser.add_argument('config', help='test config file path')
parser.add_argument('--ckpt_dear', help='checkpoint file/url')
parser.add_argument('--ckpt_nodebias', help='checkpoint file/url')
parser.add_argument('--split_file', help='the split file for evaluation')
parser.add_argument('--video_path', help='the video path for evaluation')
parser.add_argument('--device', type=str, default='cuda:0', help='CPU/CUDA device option')
parser.add_argument('--result_list', help='result file prefix')
args = parser.parse_args()
return args
def set_deterministic(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed) # if you are using multi-GPU.
np.random.seed(seed) # Numpy module.
torch.backends.cudnn.benchmark = True
torch.backends.cudnn.deterministic = True
def init_inference(config, checkpoint):
# build the recognizer from a config file and checkpoint file/url
model = init_recognizer(config, checkpoint, device=device, use_frames=False)
cfg = model.cfg
cfg.data.test.test_mode = True
cfg.data.test.ann_file = args.split_file
cfg.data.test.data_prefix = args.video_path
evidence = cfg.get('evidence', 'exp')
return model, evidence
def parse_listfile(list_file, videos_path):
assert os.path.exists(list_file), 'split file does not exist! %s'%(list_file)
assert os.path.exists(videos_path), 'video path does not exist! %s'%(videos_path)
# parse file list
filelist, labels = [], []
with open(list_file, 'r') as f:
for line in f.readlines():
videofile = line.strip().split(' ')[0]
label = int(line.strip().split(' ')[1])
videofile_full = os.path.join(videos_path, videofile)
assert os.path.exists(videofile_full), 'video file does not exist! %s'%(videofile_full)
filelist.append(videofile_full)
labels.append(label)
return filelist, labels
def run_evidence_inference(model, video_path, evidence='exp'):
"""Inference a video with the detector.
Args:
model (nn.Module): The loaded recognizer.
video_path (str): The video file path/url or the rawframes directory
path. If ``use_frames`` is set to True, it should be rawframes
directory path. Otherwise, it should be video file path.
"""
cfg = model.cfg
device = next(model.parameters()).device # model device
# build the data pipeline
test_pipeline = cfg.data.test.pipeline
test_pipeline = Compose(test_pipeline)
# prepare data (by default, we use videodata)
start_index = cfg.data.test.get('start_index', 0)
data = dict(filename=video_path, label=-1, start_index=start_index, modality='RGB')
data = test_pipeline(data)
data = collate([data], samples_per_gpu=1)
if next(model.parameters()).is_cuda:
# scatter to specified GPU
data = scatter(data, [device])[0]
# get the evidence function
if evidence == 'relu':
from mmaction.models.losses.edl_loss import relu_evidence as get_evidence
elif evidence == 'exp':
from mmaction.models.losses.edl_loss import exp_evidence as get_evidence
elif evidence == 'softplus':
from mmaction.models.losses.edl_loss import softplus_evidence as get_evidence
else:
raise NotImplementedError
num_classes = model.cls_head.num_classes
# forward the model
with torch.no_grad():
output = model(return_loss=False, **data)[0] # batchsize = 1
evidence = get_evidence(torch.from_numpy(output))
alpha = evidence + 1
uncertainty = num_classes / torch.sum(alpha, dim=0)
scores = alpha / torch.sum(alpha, dim=0, keepdim=True)
return scores.cpu().numpy(), uncertainty.cpu().numpy()
def main():
model_dear, evidence_dear = init_inference(args.config, args.ckpt_dear)
model_nodebias, evidence_nodebias = init_inference(args.config, args.ckpt_nodebias)
# result file
result_path = os.path.dirname(args.result_list)
if not os.path.exists(result_path):
os.makedirs(result_path)
fid = open(args.result_list, 'w')
# run inference
videofiles, labels = parse_listfile(args.split_file, args.video_path)
for i, (videofile, label) in tqdm(enumerate(zip(videofiles, labels)), total=len(videofiles)):
scores_dear, uncertainty_dear = run_evidence_inference(model_dear, videofile, evidence_dear) # (101,)
scores_nodebias, uncertainty_nodebias = run_evidence_inference(model_nodebias, videofile, evidence_nodebias) # (101,)
# save
pred_dear = int(np.argmax(scores_dear))
pred_nodebias = int(np.argmax(scores_nodebias))
if pred_dear == label and pred_nodebias != label:
fid.writelines('%s %d %d %.6lf %d %.6lf\n'%(videofile, label, pred_dear, float(uncertainty_dear), pred_nodebias, float(uncertainty_nodebias)))
fid.close()
if __name__ == '__main__':
args = parse_args()
# assign the desired device.
device = torch.device(args.device)
set_deterministic(1234)
main()
| InternVideo-main | Downstream/Open-Set-Action-Recognition/experiments/demo.py |
import argparse, os
import numpy as np
import matplotlib.pyplot as plt
def eval_calibration(predictions, confidences, labels, M=15):
"""
M: number of bins for confidence scores
"""
num_Bm = np.zeros((M,), dtype=np.int32)
accs = np.zeros((M,), dtype=np.float32)
confs = np.zeros((M,), dtype=np.float32)
for m in range(M):
interval = [m / M, (m+1) / M]
Bm = np.where((confidences > interval[0]) & (confidences <= interval[1]))[0]
if len(Bm) > 0:
acc_bin = np.sum(predictions[Bm] == labels[Bm]) / len(Bm)
conf_bin = np.mean(confidences[Bm])
# gather results
num_Bm[m] = len(Bm)
accs[m] = acc_bin
confs[m] = conf_bin
conf_intervals = np.arange(0, 1, 1/M)
return accs, confs, num_Bm, conf_intervals
def add_identity(axes, *line_args, **line_kwargs):
identity, = axes.plot([], [], *line_args, **line_kwargs)
def callback(axes):
low_x, high_x = axes.get_xlim()
low_y, high_y = axes.get_ylim()
low = max(low_x, low_y)
high = min(high_x, high_y)
identity.set_data([low, high], [low, high])
callback(axes)
axes.callbacks.connect('xlim_changed', callback)
axes.callbacks.connect('ylim_changed', callback)
return axes
def compute_eavuc(preds, labels, confs, uncertainties):
eavuc = 0
inds_accurate = np.where(preds == labels)[0]
eavuc += -np.sum(confs[inds_accurate] * np.log(1 - uncertainties[inds_accurate]))
inds_inaccurate = np.where(preds != labels)[0]
eavuc += -np.sum((1 - confs[inds_inaccurate]) * np.log(uncertainties[inds_inaccurate]))
return eavuc
def closedset_multicls(ind_results, ind_labels, ind_confidences, ind_uncertainties):
ind_preds = ind_results.copy()
accs, confs, num_Bm, conf_intervals = eval_calibration(ind_preds, 1-ind_uncertainties, ind_labels, M=args.M)
# compute the EAvUC
eavuc = compute_eavuc(ind_preds, ind_labels, ind_confidences, ind_uncertainties)
# compute ECE
ece = np.sum(np.abs(accs - confs) * num_Bm / np.sum(num_Bm))
return ece, eavuc, conf_intervals, accs
def openset_multicls(ind_results, ood_results, ind_labels, ood_labels, ind_confidences, ood_confidences, ind_uncertainties, ood_uncertainties):
ind_preds = ind_results.copy()
ood_preds = ood_results.copy()
ind_preds[ind_uncertainties > args.threshold] = args.ind_ncls
ood_preds[ood_uncertainties > args.threshold] = args.ind_ncls
preds = np.concatenate((ind_preds, ood_preds), axis=0)
labels = np.concatenate((ind_labels, np.ones_like(ood_labels) * args.ind_ncls), axis=0)
confs = np.concatenate((ind_confidences, ood_confidences), axis=0)
unctns = np.concatenate((ind_uncertainties, ood_uncertainties), axis=0)
# compute the EAvUC
eavuc = compute_eavuc(preds, labels, confs, unctns)
# compute ECE
accs, confs, num_Bm, conf_intervals = eval_calibration(preds, 1-unctns, labels, M=args.M)
ece = np.sum(np.abs(accs - confs) * num_Bm / np.sum(num_Bm))
return ece, eavuc, conf_intervals, accs
def openset_bincls(ind_results, ood_results, ind_labels, ood_labels, ind_confidences, ood_confidences, ind_uncertainties, ood_uncertainties):
ind_preds = ind_results.copy()
ood_preds = ood_results.copy()
ind_preds[ind_uncertainties > args.threshold] = 1
ind_preds[ind_uncertainties <= args.threshold] = 0
ood_preds[ood_uncertainties > args.threshold] = 1
ood_preds[ood_uncertainties < args.threshold] = 0
preds = np.concatenate((ind_preds, ood_preds), axis=0)
labels = np.concatenate((np.zeros_like(ind_labels), np.ones_like(ood_labels)), axis=0)
confs = np.concatenate((ind_confidences, ood_confidences), axis=0)
unctns = np.concatenate((ind_uncertainties, ood_uncertainties), axis=0)
# compute the EAvUC
eavuc = compute_eavuc(preds, labels, confs, unctns)
# compute ECE
accs, confs, num_Bm, conf_intervals = eval_calibration(preds, 1-unctns, labels, M=args.M)
ece = np.sum(np.abs(accs - confs) * num_Bm / np.sum(num_Bm))
return ece, eavuc, conf_intervals, accs
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='MMAction2 test')
# model config
parser.add_argument('--ood_result', help='the result file of ood detection')
parser.add_argument('--M', type=int, default=15, help='The number of bins')
parser.add_argument('--ind_ncls', type=int, default=101, help='the number classes for in-distribution data')
parser.add_argument('--threshold', type=float, help='the threshold to decide if it is an OOD')
parser.add_argument('--save_prefix', help='the image file path of generated calibration figure')
parser.add_argument('--draw_diagram', action='store_true', help='if to draw reliability diagram.')
args = parser.parse_args()
results = np.load(args.ood_result, allow_pickle=True)
ind_uncertainties = results['ind_unctt'] # (N1,)
ood_uncertainties = results['ood_unctt'] # (N2,)
ind_results = results['ind_pred'] # (N1,)
ood_results = results['ood_pred'] # (N2,)
ind_labels = results['ind_label']
ood_labels = results['ood_label']
if 'ind_conf' not in results:
ind_confidences = 1 - ind_uncertainties
ood_confidences = 1 - ood_uncertainties
else:
ind_confidences = results['ind_conf']
ood_confidences = results['ood_conf']
# result path
result_path = os.path.dirname(args.save_prefix)
if not os.path.exists(result_path):
os.makedirs(result_path)
# Closed Set: (K class)
ece, eavuc, conf_intervals, accs = closedset_multicls(ind_results, ind_labels, ind_confidences, ind_uncertainties)
print('The Closed Set (K class) ECE=%.3lf, EAvUC=%.3lf'%(ece, eavuc))
# Open Set: K+1 class
ece, eavuc, conf_intervals, accs = openset_multicls(ind_results, ood_results, ind_labels, ood_labels, ind_confidences, ood_confidences, ind_uncertainties, ood_uncertainties)
print('The Open Set (K+1 class) ECE=%.3lf, EAvUC=%.3lf'%(ece, eavuc))
# Open Set: 2 class
ece, eavuc, conf_intervals, accs = openset_bincls(ind_results, ood_results, ind_labels, ood_labels, ind_confidences, ood_confidences, ind_uncertainties, ood_uncertainties)
print('The Open Set (2-class) ECE=%.3lf, EAvUC=%.3lf'%(ece, eavuc))
if args.draw_diagram:
# plot the ECE figure
fig, ax = plt.subplots(figsize=(4,4))
plt.rcParams["font.family"] = "Arial" # Times New Roman
fontsize = 15
plt.bar(conf_intervals, accs, width=1/args.M, linewidth=1, edgecolor='k', align='edge', label='Outputs')
plt.bar(conf_intervals, np.maximum(0, conf_intervals - accs), bottom=accs, color='y', width=1/args.M, linewidth=1, edgecolor='k', align='edge', label='Gap')
plt.text(0.1, 0.6, 'ECE=%.4f'%(ece), fontsize=fontsize)
add_identity(ax, color='r', ls='--')
plt.xlim(0, 1)
plt.ylim(0, 1)
plt.xticks(fontsize=fontsize)
plt.yticks(fontsize=fontsize)
plt.xlabel('confidence', fontsize=fontsize)
plt.ylabel('accuracy', fontsize=fontsize)
plt.legend(fontsize=fontsize)
ax.set_aspect('equal', 'box')
plt.tight_layout()
plt.savefig(args.save_prefix + '_ind.png')
plt.savefig(args.save_prefix + '_ind.pdf')
| InternVideo-main | Downstream/Open-Set-Action-Recognition/experiments/evaluate_calibration.py |
import argparse
import os
import torch
from mmcv.parallel import collate, scatter
from mmaction.datasets.pipelines import Compose
from mmaction.apis import init_recognizer
from sklearn.manifold import TSNE
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
def parse_args():
parser = argparse.ArgumentParser(description='MMAction2 test')
# model config
parser.add_argument('--config', help='test config file path')
parser.add_argument('--checkpoint', help='checkpoint file/url')
parser.add_argument('--known_split', help='the split file path of the knowns')
parser.add_argument('--unknown_split', help='the split file path of the unknowns')
parser.add_argument('--result_file', help='the result file path')
parser.add_argument('--device', type=str, default='cuda:0', help='CPU/CUDA device option')
args = parser.parse_args()
return args
def get_data(known_split, known_classes):
known_data = []
labels = []
video_dir = os.path.join(os.path.dirname(known_split), 'videos')
with open(known_split, 'r') as f:
for line in f.readlines():
clsname, videoname = line.strip().split(' ')[0].split('/')
if clsname in known_classes.keys():
videofile = os.path.join(video_dir, clsname, videoname)
known_data.append(videofile)
labels.append(known_classes[clsname])
return known_data, labels
def inference_recognizer(model, video_path):
"""Inference a video with the detector.
Args:
model (nn.Module): The loaded recognizer.
video_path (str): The video file path/url or the rawframes directory
path. If ``use_frames`` is set to True, it should be rawframes
directory path. Otherwise, it should be video file path.
"""
cfg = model.cfg
device = next(model.parameters()).device # model device
# build the data pipeline
test_pipeline = cfg.data.test.pipeline
test_pipeline = Compose(test_pipeline)
# prepare data (by default, we use videodata)
start_index = cfg.data.test.get('start_index', 0)
data = dict(filename=video_path, label=-1, start_index=start_index, modality='RGB')
data = test_pipeline(data)
data = collate([data], samples_per_gpu=1)
if next(model.parameters()).is_cuda:
# scatter to specified GPU
data = scatter(data, [device])[0]
# forward the model
with torch.no_grad():
feat_blob = model(return_loss=False, get_feat=True, **data) # (num_clips * num_crops, 2048, 1, 8, 8)
# spatial average pooling
kernel_size = (1, feat_blob.size(-2), feat_blob.size(-1))
avg_pool2d = torch.nn.AvgPool3d(kernel_size, stride=1, padding=0)
feat_clips = avg_pool2d(feat_blob).view(feat_blob.size(0), feat_blob.size(1)) # (num_clips * num_crops, 2048)
# get the mean features of all clips and crops
feat_final = torch.mean(feat_clips, dim=0).cpu().numpy() # (2048,)
return feat_final
def extract_feature(video_files):
model = init_recognizer(
args.config,
args.checkpoint,
device=device,
use_frames=False)
cfg = model.cfg
torch.backends.cudnn.benchmark = True
cfg.data.test.test_mode = True
if 'bnn' in args.config:
model.test_cfg.npass = 1
X = []
for videofile in tqdm(video_files, total=len(video_files), desc='Extract Feature'):
feature = inference_recognizer(model, videofile) # (2048,)
X.append(feature)
X = np.vstack(X)
return X
def set_deterministic(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed) # if you are using multi-GPU.
np.random.seed(seed) # Numpy module.
torch.backends.cudnn.benchmark = True
torch.backends.cudnn.deterministic = True
if __name__ == '__main__':
args = parse_args()
# assign the desired device.
device = torch.device(args.device)
set_deterministic(0)
ind_clsID = [2, 10, 16, 69, 71, 21, 32, 41, 73, 29] # UCF-101 73 21 41 32 29 10 16 69 71 2
ind_classes = {'Archery': 0, 'Biking': 1, 'BoxingPunchingBag': 2, 'PullUps': 3, 'PushUps': 4,
'CliffDiving': 5, 'GolfSwing': 6, 'HorseRiding': 7, 'RockClimbingIndoor': 8, 'FloorGymnastics': 9}
ood_clsID = [12, 20, 21, 22, 50, 15, 16, 17, 18, 19] # HMDB-51 15 16 17 18 19 20 21 22 12 50
ood_classes = {'fall_floor': 10, 'kick': 10, 'kick_ball': 10, 'kiss': 10, 'wave': 10,
'golf': 10, 'handstand': 10, 'hit': 10, 'hug': 10, 'jump': 10}
feature_file = args.result_file[:-4] + '_feature_10p1.npz'
# get the data of known classes
known_data, known_labels = get_data(args.known_split, ind_classes)
num_knowns = len(known_data)
# get the data of unknown classes
unknown_data, unknown_labels = get_data(args.unknown_split, ood_classes)
num_unknowns = len(unknown_data)
if not os.path.exists(feature_file):
# save the figure
result_path = os.path.dirname(args.result_file)
if not os.path.exists(result_path):
os.makedirs(result_path)
# extracting the feature
X = extract_feature(known_data + unknown_data)
# save results
np.savez(feature_file[:-4], feature=X)
else:
results = np.load(feature_file, allow_pickle=True)
X = results['feature']
open_classes = {**ind_classes, 'Unknowns': len(ind_classes)}
open_labels = np.array(known_labels + [len(ind_classes)] * num_unknowns)
# run tSNE
print('running tSNE...')
Y = TSNE(n_components=2, random_state=0).fit_transform(X)
plt.figure(figsize=(5,4))
plt.rcParams["font.family"] = "Arial" # Times New Roman
fontsize = 10
for k, v in open_classes.items():
inds = np.where(open_labels == v)[0]
if k == 'Unknowns':
plt.scatter(Y[inds, 0], Y[inds, 1], s=10, c='k', marker='^', label=k)
else:
plt.scatter(Y[inds, 0], Y[inds, 1], s=3)
plt.text(np.mean(Y[inds, 0])-5, np.mean(Y[inds, 1])+5, k, fontsize=fontsize)
xmin, xmax, ymin, ymax = np.min(Y[:, 0]), np.max(Y[:, 0]), np.min(Y[:, 1]), np.max(Y[:, 1])
plt.xlim(xmin-5, xmax + 15)
plt.ylim(ymin-5, ymax + 10)
plt.legend(loc='lower right', fontsize=fontsize)
plt.xticks([])
plt.yticks([])
plt.savefig(args.result_file)
plt.savefig(args.result_file[:-4] + '.pdf')
| InternVideo-main | Downstream/Open-Set-Action-Recognition/experiments/analyze_features.py |
import os, argparse
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import f1_score
def softmax_curvepoints(result_file, thresh, ood_ncls, num_rand):
assert os.path.exists(result_file), "File not found! Run baseline_i3d_softmax.py first!"
# load the testing results
results = np.load(result_file, allow_pickle=True)
ind_softmax = results['ind_softmax'] # (N1, C)
ood_softmax = results['ood_softmax'] # (N2, C)
ind_labels = results['ind_label'] # (N1,)
ood_labels = results['ood_label'] # (N2,)
ind_ncls = ind_softmax.shape[1]
ind_results = np.argmax(ind_softmax, axis=1)
ood_results = np.argmax(ood_softmax, axis=1)
ind_conf = np.max(ind_softmax, axis=1)
ood_conf = np.max(ood_softmax, axis=1)
ind_results[ind_conf < thresh] = ind_ncls # incorrect rejection
# open set F1 score (multi-class)
macro_F1 = f1_score(ind_labels, ind_results, average='macro')
macro_F1_list = [macro_F1 * 100]
openness_list = [0]
for n in range(ood_ncls):
ncls_novel = n + 1
openness = (1 - np.sqrt((2 * ind_ncls) / (2 * ind_ncls + ncls_novel))) * 100
openness_list.append(openness)
# randoml select the subset of ood samples
macro_F1_multi = np.zeros((num_rand), dtype=np.float32)
for m in range(num_rand):
cls_select = np.random.choice(ood_ncls, ncls_novel, replace=False)
ood_sub_results = np.concatenate([ood_results[ood_labels == clsid] for clsid in cls_select])
ood_sub_labels = np.ones_like(ood_sub_results) * ind_ncls
ood_sub_confs = np.concatenate([ood_conf[ood_labels == clsid] for clsid in cls_select])
ood_sub_results[ood_sub_confs < thresh] = ind_ncls # correct rejection
# construct preds and labels
preds = np.concatenate((ind_results, ood_sub_results), axis=0)
labels = np.concatenate((ind_labels, ood_sub_labels), axis=0)
macro_F1_multi[m] = f1_score(labels, preds, average='macro')
macro_F1 = np.mean(macro_F1_multi) * 100
macro_F1_list.append(macro_F1)
return openness_list, macro_F1_list
def openmax_curvepoints(result_file, ood_ncls, num_rand):
assert os.path.exists(result_file), "File not found! Run baseline_i3d_openmax.py first!"
results = np.load(result_file, allow_pickle=True)
ind_openmax = results['ind_openmax'] # (N1, C+1)
ood_openmax = results['ood_openmax'] # (N2, C+1)
ind_labels = results['ind_label'] # (N1,)
ood_labels = results['ood_label'] # (N2,)
ind_results = np.argmax(ind_openmax, axis=1)
ood_results = np.argmax(ood_openmax, axis=1)
ind_ncls = ind_openmax.shape[1] - 1 # (C+1)-1
# open set F1 score (multi-class)
macro_F1 = f1_score(ind_labels, ind_results, average='macro')
macro_F1_list = [macro_F1 * 100]
openness_list = [0]
for n in range(ood_ncls):
ncls_novel = n + 1
openness = (1 - np.sqrt((2 * ind_ncls) / (2 * ind_ncls + ncls_novel))) * 100
openness_list.append(openness)
# randoml select the subset of ood samples
macro_F1_multi = np.zeros((num_rand), dtype=np.float32)
for m in range(num_rand):
cls_select = np.random.choice(ood_ncls, ncls_novel, replace=False)
ood_sub_results = np.concatenate([ood_results[ood_labels == clsid] for clsid in cls_select])
ood_sub_labels = np.ones_like(ood_sub_results) * ind_ncls
# construct preds and labels
preds = np.concatenate((ind_results, ood_sub_results), axis=0)
labels = np.concatenate((ind_labels, ood_sub_labels), axis=0)
macro_F1_multi[m] = f1_score(labels, preds, average='macro')
macro_F1 = np.mean(macro_F1_multi) * 100
macro_F1_list.append(macro_F1)
return openness_list, macro_F1_list
def uncertainty_curvepoints(result_file, thresh, ind_ncls, ood_ncls, num_rand):
assert os.path.exists(result_file), "File not found! Run ood_detection first!"
# load the testing results
results = np.load(result_file, allow_pickle=True)
ind_uncertainties = results['ind_unctt'] # (N1,)
ood_uncertainties = results['ood_unctt'] # (N2,)
ind_results = results['ind_pred'] # (N1,)
ood_results = results['ood_pred'] # (N2,)
ind_labels = results['ind_label']
ood_labels = results['ood_label']
# open set F1 score (multi-class)
ind_results[ind_uncertainties > thresh] = ind_ncls # falsely rejection
macro_F1 = f1_score(ind_labels, ind_results, average='macro')
macro_F1_list = [macro_F1 * 100]
openness_list = [0]
for n in range(ood_ncls):
ncls_novel = n + 1
openness = (1 - np.sqrt((2 * ind_ncls) / (2 * ind_ncls + ncls_novel))) * 100
openness_list.append(openness)
# randoml select the subset of ood samples
macro_F1_multi = np.zeros((num_rand), dtype=np.float32)
for m in range(num_rand):
cls_select = np.random.choice(ood_ncls, ncls_novel, replace=False)
ood_sub_results = np.concatenate([ood_results[ood_labels == clsid] for clsid in cls_select])
ood_sub_uncertainties = np.concatenate([ood_uncertainties[ood_labels == clsid] for clsid in cls_select])
ood_sub_results[ood_sub_uncertainties > thresh] = ind_ncls # correctly rejection
ood_sub_labels = np.ones_like(ood_sub_results) * ind_ncls
# construct preds and labels
preds = np.concatenate((ind_results, ood_sub_results), axis=0)
labels = np.concatenate((ind_labels, ood_sub_labels), axis=0)
macro_F1_multi[m] = f1_score(labels, preds, average='macro')
macro_F1 = np.mean(macro_F1_multi) * 100
macro_F1_list.append(macro_F1)
return openness_list, macro_F1_list
def plot_all_curves(openness, values, line_styles, result_prefix, ylim=[60, 80], fontsize=18):
fig = plt.figure(figsize=(8,6)) # (w, h)
plt.rcParams["font.family"] = "Arial"
for k, v in values.items():
plt.plot(openness, v, line_styles[k], linewidth=2, label=k)
plt.xlim(0, max(openness))
plt.ylim(ylim)
plt.xlabel('Openness (%)', fontsize=fontsize)
plt.ylabel('Open maF1 (%)', fontsize=fontsize)
plt.xticks(fontsize=fontsize)
plt.yticks(np.arange(ylim[0], ylim[1]+1, 5), fontsize=fontsize)
plt.grid('on')
plt.legend(fontsize=fontsize-10, loc='lower center', ncol=3, handletextpad=0.3, columnspacing=0.5)
plt.tight_layout()
result_path = os.path.dirname(result_prefix)
if not os.path.exists(result_path):
os.makedirs(result_path)
plt.savefig(result_prefix + '_%s.png'%(args.ood_data), bbox_inches='tight', dpi=fig.dpi, pad_inches=0.0)
plt.savefig(result_prefix + '_%s.pdf'%(args.ood_data), bbox_inches='tight', dpi=fig.dpi, pad_inches=0.0)
def main_i3d():
# SoftMax
print('Compute Open maF1 for SoftMax...')
result_file = 'i3d/results_baselines/openmax/I3D_OpenMax_%s_result.npz'%(args.ood_data)
openness_softmax, maF1_softmax = softmax_curvepoints(result_file, 0.996825, args.ood_ncls, args.num_rand)
# OpenMax
print('Compute Open maF1 for OpenMax...')
result_file = 'i3d/results_baselines/openmax/I3D_OpenMax_%s_result.npz'%(args.ood_data)
openness_openmax, maF1_openmax = openmax_curvepoints(result_file, args.ood_ncls, args.num_rand)
# RPL
print('Compute Open maF1 for RPL...')
result_file = 'i3d/results_baselines/rpl/I3D_RPL_%s_result.npz'%(args.ood_data)
openness_rpl, maF1_rpl = softmax_curvepoints(result_file, 0.995178, args.ood_ncls, args.num_rand)
# MCDropout BALD
print('Compute Open maF1 for MC Dropout BALD...')
result_file = 'i3d/results/I3D_DNN_BALD_%s_result.npz'%(args.ood_data)
openness_dnn, maF1_dnn = uncertainty_curvepoints(result_file, 0.000433, args.ind_ncls, args.ood_ncls, args.num_rand)
# BNN SVI BALD
print('Compute Open maF1 for BNN SVI BALD...')
result_file = 'i3d/results/I3D_BNN_BALD_%s_result.npz'%(args.ood_data)
openness_bnn, maF1_bnn = uncertainty_curvepoints(result_file, 0.000004, args.ind_ncls, args.ood_ncls, args.num_rand)
# DEAR (full)
print('Compute Open maF1 for DEAR (full)...')
result_file = 'i3d/results/I3D_EDLNoKLAvUCDebias_EDL_%s_result.npz'%(args.ood_data)
openness_dear, maF1_dear = uncertainty_curvepoints(result_file, 0.004550, args.ind_ncls, args.ood_ncls, args.num_rand)
# draw F1 curve
line_styles = {'DEAR (full)': 'r-', 'SoftMax': 'b-', 'RPL': 'm-', 'BNN SVI': 'c-', 'MC Dropout': 'y-', 'OpenMax': 'k-'}
values = {'DEAR (full)': maF1_dear, 'SoftMax': maF1_softmax, 'RPL': maF1_rpl, 'BNN SVI': maF1_bnn, 'MC Dropout': maF1_dnn, 'OpenMax': maF1_openmax}
result_prefix = args.result_prefix + '_I3D'
plot_all_curves(openness_dear, values, line_styles, result_prefix, ylim=[60,80], fontsize=30)
def main_tsm():
# SoftMax
print('Compute Open maF1 for SoftMax...')
result_file = 'tsm/results_baselines/openmax/TSM_OpenMax_%s_result.npz'%(args.ood_data)
openness_softmax, maF1_softmax = softmax_curvepoints(result_file, 0.999683, args.ood_ncls, args.num_rand)
# OpenMax
print('Compute Open maF1 for OpenMax...')
result_file = 'tsm/results_baselines/openmax/TSM_OpenMax_%s_result.npz'%(args.ood_data)
openness_openmax, maF1_openmax = openmax_curvepoints(result_file, args.ood_ncls, args.num_rand)
# RPL
print('Compute Open maF1 for RPL...')
result_file = 'tsm/results_baselines/rpl/TSM_RPL_%s_result.npz'%(args.ood_data)
openness_rpl, maF1_rpl = softmax_curvepoints(result_file, 0.999167, args.ood_ncls, args.num_rand)
# MCDropout BALD
print('Compute Open maF1 for MC Dropout BALD...')
result_file = 'tsm/results/TSM_DNN_BALD_%s_result.npz'%(args.ood_data)
openness_dnn, maF1_dnn = uncertainty_curvepoints(result_file, 0.000022, args.ind_ncls, args.ood_ncls, args.num_rand)
# BNN SVI BALD
print('Compute Open maF1 for BNN SVI BALD...')
result_file = 'tsm/results/TSM_BNN_BALD_%s_result.npz'%(args.ood_data)
openness_bnn, maF1_bnn = uncertainty_curvepoints(result_file, 0.000003, args.ind_ncls, args.ood_ncls, args.num_rand)
# DEAR (full)
print('Compute Open maF1 for DEAR (full)...')
result_file = 'tsm/results/TSM_EDLNoKLAvUCDebias_EDL_%s_result.npz'%(args.ood_data)
openness_dear, maF1_dear = uncertainty_curvepoints(result_file, 0.004549, args.ind_ncls, args.ood_ncls, args.num_rand)
# draw F1 curve
line_styles = {'DEAR (full)': 'r-', 'SoftMax': 'b-', 'RPL': 'm-', 'BNN SVI': 'c-', 'MC Dropout': 'y-', 'OpenMax': 'k-'}
values = {'DEAR (full)': maF1_dear, 'SoftMax': maF1_softmax, 'RPL': maF1_rpl, 'BNN SVI': maF1_bnn, 'MC Dropout': maF1_dnn, 'OpenMax': maF1_openmax}
result_prefix = args.result_prefix + '_TSM'
ylim = [60, 90] if args.ood_data == 'HMDB' else [55, 90]
plot_all_curves(openness_dear, values, line_styles, result_prefix, ylim=ylim, fontsize=30)
def main_slowfast():
# SoftMax
print('Compute Open maF1 for SoftMax...')
result_file = 'slowfast/results_baselines/openmax/SlowFast_OpenMax_%s_result.npz'%(args.ood_data)
openness_softmax, maF1_softmax = softmax_curvepoints(result_file, 0.997915, args.ood_ncls, args.num_rand)
# OpenMax
print('Compute Open maF1 for OpenMax...')
result_file = 'slowfast/results_baselines/openmax/SlowFast_OpenMax_%s_result.npz'%(args.ood_data)
openness_openmax, maF1_openmax = openmax_curvepoints(result_file, args.ood_ncls, args.num_rand)
# RPL
print('Compute Open maF1 for RPL...')
result_file = 'slowfast/results_baselines/rpl/SlowFast_RPL_%s_result.npz'%(args.ood_data)
openness_rpl, maF1_rpl = softmax_curvepoints(result_file, 0.997780, args.ood_ncls, args.num_rand)
# MCDropout BALD
print('Compute Open maF1 for MC Dropout BALD...')
result_file = 'slowfast/results/SlowFast_DNN_BALD_%s_result.npz'%(args.ood_data)
openness_dnn, maF1_dnn = uncertainty_curvepoints(result_file, 0.000065, args.ind_ncls, args.ood_ncls, args.num_rand)
# BNN SVI BALD
print('Compute Open maF1 for BNN SVI BALD...')
result_file = 'slowfast/results/SlowFast_BNN_BALD_%s_result.npz'%(args.ood_data)
openness_bnn, maF1_bnn = uncertainty_curvepoints(result_file, 0.000004, args.ind_ncls, args.ood_ncls, args.num_rand)
# DEAR (full)
print('Compute Open maF1 for DEAR (full)...')
result_file = 'slowfast/results/SlowFast_EDLNoKLAvUCDebias_EDL_%s_result.npz'%(args.ood_data)
openness_dear, maF1_dear = uncertainty_curvepoints(result_file, 0.004552, args.ind_ncls, args.ood_ncls, args.num_rand)
# draw F1 curve
line_styles = {'DEAR (full)': 'r-', 'SoftMax': 'b-', 'RPL': 'm-', 'BNN SVI': 'c-', 'MC Dropout': 'y-', 'OpenMax': 'k-'}
values = {'DEAR (full)': maF1_dear, 'SoftMax': maF1_softmax, 'RPL': maF1_rpl, 'BNN SVI': maF1_bnn, 'MC Dropout': maF1_dnn, 'OpenMax': maF1_openmax}
result_prefix = args.result_prefix + '_SlowFast'
plot_all_curves(openness_dear, values, line_styles, result_prefix, ylim=[60,90], fontsize=30)
def main_tpn():
# SoftMax
print('Compute Open maF1 for SoftMax...')
result_file = 'tpn_slowonly/results_baselines/openmax/TPN_OpenMax_%s_result.npz'%(args.ood_data)
openness_softmax, maF1_softmax = softmax_curvepoints(result_file, 0.997623, args.ood_ncls, args.num_rand)
# OpenMax
print('Compute Open maF1 for OpenMax...')
result_file = 'tpn_slowonly/results_baselines/openmax/TPN_OpenMax_%s_result.npz'%(args.ood_data)
openness_openmax, maF1_openmax = openmax_curvepoints(result_file, args.ood_ncls, args.num_rand)
# RPL
print('Compute Open maF1 for RPL...')
result_file = 'tpn_slowonly/results_baselines/rpl/TPN_RPL_%s_result.npz'%(args.ood_data)
openness_rpl, maF1_rpl = softmax_curvepoints(result_file, 0.996931, args.ood_ncls, args.num_rand)
# MCDropout BALD
print('Compute Open maF1 for MC Dropout BALD...')
result_file = 'tpn_slowonly/results/TPN_SlowOnly_Dropout_BALD_%s_result.npz'%(args.ood_data)
openness_dnn, maF1_dnn = uncertainty_curvepoints(result_file, 0.000096, args.ind_ncls, args.ood_ncls, args.num_rand)
# BNN SVI BALD
print('Compute Open maF1 for BNN SVI BALD...')
result_file = 'tpn_slowonly/results/TPN_SlowOnly_BNN_BALD_%s_result.npz'%(args.ood_data)
openness_bnn, maF1_bnn = uncertainty_curvepoints(result_file, 0.000007, args.ind_ncls, args.ood_ncls, args.num_rand)
# DEAR (full)
print('Compute Open maF1 for DEAR (full)...')
result_file = 'tpn_slowonly/results/TPN_SlowOnly_EDLlogNoKLAvUCDebias_EDL_%s_result.npz'%(args.ood_data)
openness_dear, maF1_dear = uncertainty_curvepoints(result_file, 0.004555, args.ind_ncls, args.ood_ncls, args.num_rand)
# draw F1 curve
line_styles = {'DEAR (full)': 'r-', 'SoftMax': 'b-', 'RPL': 'm-', 'BNN SVI': 'c-', 'MC Dropout': 'y-', 'OpenMax': 'k-'}
values = {'DEAR (full)': maF1_dear, 'SoftMax': maF1_softmax, 'RPL': maF1_rpl, 'BNN SVI': maF1_bnn, 'MC Dropout': maF1_dnn, 'OpenMax': maF1_openmax}
result_prefix = args.result_prefix + '_TPN'
ylim = [50, 85] if args.ood_data == 'HMDB' else [50, 85]
plot_all_curves(openness_dear, values, line_styles, result_prefix, ylim=ylim, fontsize=30)
def parse_args():
parser = argparse.ArgumentParser(description='Compare the performance of Open macroF1 against openness')
# model config
parser.add_argument('--ind_ncls', type=int, default=101, help='the number of classes in known dataset')
parser.add_argument('--ood_ncls', type=int, default=51, choices=[51, 305], help='the number of classes in unknwon dataset')
parser.add_argument('--ood_data', default='HMDB', choices=['HMDB', 'MiT'], help='the name of OOD dataset.')
parser.add_argument('--model', default='I3D', choices=['I3D', 'TSM', 'SlowFast', 'TPN'], help='the action recognition model.')
parser.add_argument('--num_rand', type=int, default=10, help='the number of random selection for ood classes')
parser.add_argument('--result_prefix', default='../temp/F1_openness')
args = parser.parse_args()
return args
if __name__ == '__main__':
""" Example script:
python draw_openness_curves.py --model I3D --ood_data MiT --ood_ncls 305
"""
np.random.seed(123)
args = parse_args()
if args.model == 'I3D':
# draw results on I3D
main_i3d()
elif args.model == 'TSM':
# draw results on TSM
main_tsm()
elif args.model == 'SlowFast':
# draw results on SlowFast
main_slowfast()
elif args.model == 'TPN':
# draw results on TPN
main_tpn()
else:
raise NotImplementedError
| InternVideo-main | Downstream/Open-Set-Action-Recognition/experiments/draw_openness_curves.py |
from distutils.core import setup
from distutils.extension import Extension
from Cython.Distutils import build_ext
from Cython.Build import cythonize
import sys
import numpy
#ext_modules = [Extension("libmr", ["libmr.pyx", "MetaRecognition.cpp"])]
setup(
ext_modules = cythonize(Extension('libmr',
["libmr.pyx",
"MetaRecognition.cpp",
"weibull.c"
],
include_dirs = [".", numpy.get_include()],
language="c++",
)),
data_files = [('.', ['MetaRecognition.h', 'weibull.h'])],
)
| InternVideo-main | Downstream/Open-Set-Action-Recognition/experiments/libMR/setup.py |
import os, sys
import scipy as sp
import libmr
def main():
posscores = sp.asarray([0.245 , 0.2632, 0.3233, 0.3573, 0.4014, 0.4055, 0.4212, 0.5677])
test_distances = sp.asarray([ 0.05, 0.1 , 0.25, 0.4 , 0.75, 1. , 1.5 , 2.])
mr = libmr.MR()
# since higher is worse and we want to fit the higher tail,
# use fit_high()
mr.fit_high(posscores, posscores.shape[0])
wscores = mr.w_score_vector(test_distances)
for i in range(wscores.shape[0]):
print "%.2f %.2f %.2f" %(test_distances[i], wscores[i], mr.inv(wscores[i]))
# wscores are the ones to be used in the equation
# s_i * (1 - rho_i)
print "Low wscore --> Low probability that the score is outlier i.e. sample IS NOT outlier"
print "High wscore --> High probability that the score is outlier i.e. sample IS an outlier"
print "posscores: ", posscores
print "test_distances: ", test_distances
print "wscores: ", wscores
if __name__ == "__main__":
main()
| InternVideo-main | Downstream/Open-Set-Action-Recognition/experiments/libMR/estimate_wscores.py |
import scipy as sp
import sys, os
try:
import libmr
print("Imported libmr succesfully")
except ImportError:
print("Cannot import libmr")
sys.exit()
import pickle
svm_data = {}
svm_data["labels"] = [1, 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, -1, -1, -1, -1 , -1, -1, -1, -1, -1,
1, 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, -1, -1, -1, -1 , -1, -1, -1, -1, -1,
1, 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, -1, -1, -1, -1 , -1, -1, -1, -1, -1,
1, 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, -1, -1, -1, -1 , -1, -1, -1, -1, -1,
1, 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, -1, -1, -1, -1 , -1, -1, -1, -1, -1]
svm_data["scores"] = sp.randn(100).tolist()
fit_data = sp.rand(3)
def main():
mr = libmr.MR()
datasize = len(svm_data["scores"])
mr.fit_svm(svm_data, datasize, 1, 1, 1, 10)
print(fit_data)
print(mr.w_score_vector(fit_data))
mr.mr_save("meta_rec.model")
datadump = {}
datadump = {"data": fit_data}
f = open("data.dump", "w")
pickle.dump(datadump, f)
f.close()
print(dir(mr))
if __name__ == "__main__":
main()
| InternVideo-main | Downstream/Open-Set-Action-Recognition/experiments/libMR/test_libmr.py |
import os.path as osp
import random
import numpy as np
import pytest
from numpy.testing import assert_array_almost_equal, assert_array_equal
from mmaction.core import (ActivityNetDetection,
average_recall_at_avg_proposals, confusion_matrix,
get_weighted_score, mean_average_precision,
mean_class_accuracy, mmit_mean_average_precision,
pairwise_temporal_iou, top_k_accuracy)
def gt_confusion_matrix(gt_labels, pred_labels, normalize=None):
"""Calculate the ground truth confusion matrix."""
max_index = max(max(gt_labels), max(pred_labels))
confusion_mat = np.zeros((max_index + 1, max_index + 1), dtype=np.int64)
for gt, pred in zip(gt_labels, pred_labels):
confusion_mat[gt][pred] += 1
del_index = []
for i in range(max_index):
if sum(confusion_mat[i]) == 0 and sum(confusion_mat[:, i]) == 0:
del_index.append(i)
confusion_mat = np.delete(confusion_mat, del_index, axis=0)
confusion_mat = np.delete(confusion_mat, del_index, axis=1)
if normalize is not None:
confusion_mat = np.array(confusion_mat, dtype=np.float)
m, n = confusion_mat.shape
if normalize == 'true':
for i in range(m):
s = np.sum(confusion_mat[i], dtype=float)
if s == 0:
continue
confusion_mat[i, :] = confusion_mat[i, :] / s
print(confusion_mat[i, :])
elif normalize == 'pred':
for i in range(n):
s = sum(confusion_mat[:, i])
if s == 0:
continue
confusion_mat[:, i] = confusion_mat[:, i] / s
elif normalize == 'all':
s = np.sum(confusion_mat)
if s != 0:
confusion_mat /= s
return confusion_mat
def test_activitynet_detection():
data_prefix = osp.join(osp.dirname(__file__), 'data/test_eval_detection')
gt_path = osp.join(data_prefix, 'gt.json')
result_path = osp.join(data_prefix, 'result.json')
detection = ActivityNetDetection(gt_path, result_path)
results = detection.evaluate()
mAP = np.array([
0.71428571, 0.71428571, 0.71428571, 0.6875, 0.6875, 0.59722222,
0.52083333, 0.52083333, 0.52083333, 0.5
])
average_mAP = 0.6177579365079365
assert_array_almost_equal(results[0], mAP)
assert_array_almost_equal(results[1], average_mAP)
def test_confusion_matrix():
# custom confusion_matrix
gt_labels = [np.int64(random.randint(0, 9)) for _ in range(100)]
pred_labels = np.random.randint(10, size=100, dtype=np.int64)
for normalize in [None, 'true', 'pred', 'all']:
cf_mat = confusion_matrix(pred_labels, gt_labels, normalize)
gt_cf_mat = gt_confusion_matrix(gt_labels, pred_labels, normalize)
assert_array_equal(cf_mat, gt_cf_mat)
with pytest.raises(ValueError):
# normalize must be in ['true', 'pred', 'all', None]
confusion_matrix([1], [1], 'unsupport')
with pytest.raises(TypeError):
# y_pred must be list or np.ndarray
confusion_matrix(0.5, [1])
with pytest.raises(TypeError):
# y_real must be list or np.ndarray
confusion_matrix([1], 0.5)
with pytest.raises(TypeError):
# y_pred dtype must be np.int64
confusion_matrix([0.5], [1])
with pytest.raises(TypeError):
# y_real dtype must be np.int64
confusion_matrix([1], [0.5])
def test_topk():
scores = [
np.array([-0.2203, -0.7538, 1.8789, 0.4451, -0.2526]),
np.array([-0.0413, 0.6366, 1.1155, 0.3484, 0.0395]),
np.array([0.0365, 0.5158, 1.1067, -0.9276, -0.2124]),
np.array([0.6232, 0.9912, -0.8562, 0.0148, 1.6413])
]
# top1 acc
k = (1, )
top1_labels_0 = [3, 1, 1, 1]
top1_labels_25 = [2, 0, 4, 3]
top1_labels_50 = [2, 2, 3, 1]
top1_labels_75 = [2, 2, 2, 3]
top1_labels_100 = [2, 2, 2, 4]
res = top_k_accuracy(scores, top1_labels_0, k)
assert res == [0]
res = top_k_accuracy(scores, top1_labels_25, k)
assert res == [0.25]
res = top_k_accuracy(scores, top1_labels_50, k)
assert res == [0.5]
res = top_k_accuracy(scores, top1_labels_75, k)
assert res == [0.75]
res = top_k_accuracy(scores, top1_labels_100, k)
assert res == [1.0]
# top1 acc, top2 acc
k = (1, 2)
top2_labels_0_100 = [3, 1, 1, 1]
top2_labels_25_75 = [3, 1, 2, 3]
res = top_k_accuracy(scores, top2_labels_0_100, k)
assert res == [0, 1.0]
res = top_k_accuracy(scores, top2_labels_25_75, k)
assert res == [0.25, 0.75]
# top1 acc, top3 acc, top5 acc
k = (1, 3, 5)
top5_labels_0_0_100 = [1, 0, 3, 2]
top5_labels_0_50_100 = [1, 3, 4, 0]
top5_labels_25_75_100 = [2, 3, 0, 2]
res = top_k_accuracy(scores, top5_labels_0_0_100, k)
assert res == [0, 0, 1.0]
res = top_k_accuracy(scores, top5_labels_0_50_100, k)
assert res == [0, 0.5, 1.0]
res = top_k_accuracy(scores, top5_labels_25_75_100, k)
assert res == [0.25, 0.75, 1.0]
def test_mean_class_accuracy():
scores = [
np.array([-0.2203, -0.7538, 1.8789, 0.4451, -0.2526]),
np.array([-0.0413, 0.6366, 1.1155, 0.3484, 0.0395]),
np.array([0.0365, 0.5158, 1.1067, -0.9276, -0.2124]),
np.array([0.6232, 0.9912, -0.8562, 0.0148, 1.6413])
]
# test mean class accuracy in [0, 0.25, 1/3, 0.75, 1.0]
mean_cls_acc_0 = np.int64([1, 4, 0, 2])
mean_cls_acc_25 = np.int64([2, 0, 4, 3])
mean_cls_acc_33 = np.int64([2, 2, 2, 3])
mean_cls_acc_75 = np.int64([4, 2, 2, 4])
mean_cls_acc_100 = np.int64([2, 2, 2, 4])
assert mean_class_accuracy(scores, mean_cls_acc_0) == 0
assert mean_class_accuracy(scores, mean_cls_acc_25) == 0.25
assert mean_class_accuracy(scores, mean_cls_acc_33) == 1 / 3
assert mean_class_accuracy(scores, mean_cls_acc_75) == 0.75
assert mean_class_accuracy(scores, mean_cls_acc_100) == 1.0
def test_mmit_mean_average_precision():
# One sample
y_true = [np.array([0, 0, 1, 1])]
y_scores = [np.array([0.1, 0.4, 0.35, 0.8])]
map = mmit_mean_average_precision(y_scores, y_true)
precision = [2.0 / 3.0, 0.5, 1., 1.]
recall = [1., 0.5, 0.5, 0.]
target = -np.sum(np.diff(recall) * np.array(precision)[:-1])
assert target == map
def test_pairwise_temporal_iou():
target_segments = np.array([])
candidate_segments = np.array([])
with pytest.raises(ValueError):
pairwise_temporal_iou(target_segments, candidate_segments)
# test temporal iou
target_segments = np.array([[1, 2], [2, 3]])
candidate_segments = np.array([[2, 3], [2.5, 3]])
temporal_iou = pairwise_temporal_iou(candidate_segments, target_segments)
assert_array_equal(temporal_iou, [[0, 0], [1, 0.5]])
# test temporal overlap_self
target_segments = np.array([[1, 2], [2, 3]])
candidate_segments = np.array([[2, 3], [2.5, 3]])
temporal_iou, temporal_overlap_self = pairwise_temporal_iou(
candidate_segments, target_segments, calculate_overlap_self=True)
assert_array_equal(temporal_overlap_self, [[0, 0], [1, 1]])
# test temporal overlap_self when candidate_segments is 1d
target_segments = np.array([[1, 2], [2, 3]])
candidate_segments = np.array([2.5, 3])
temporal_iou, temporal_overlap_self = pairwise_temporal_iou(
candidate_segments, target_segments, calculate_overlap_self=True)
assert_array_equal(temporal_overlap_self, [0, 1])
def test_average_recall_at_avg_proposals():
ground_truth1 = {
'v_test1': np.array([[0, 1], [1, 2]]),
'v_test2': np.array([[0, 1], [1, 2]])
}
ground_truth2 = {'v_test1': np.array([[0, 1]])}
proposals1 = {
'v_test1': np.array([[0, 1, 1], [1, 2, 1]]),
'v_test2': np.array([[0, 1, 1], [1, 2, 1]])
}
proposals2 = {
'v_test1': np.array([[10, 11, 0.6], [11, 12, 0.4]]),
'v_test2': np.array([[10, 11, 0.6], [11, 12, 0.4]])
}
proposals3 = {
'v_test1': np.array([[i, i + 1, 1 / (i + 1)] for i in range(100)])
}
recall, avg_recall, proposals_per_video, auc = (
average_recall_at_avg_proposals(ground_truth1, proposals1, 4))
assert_array_equal(recall, [[0.] * 49 + [0.5] * 50 + [1.]] * 10)
assert_array_equal(avg_recall, [0.] * 49 + [0.5] * 50 + [1.])
assert_array_almost_equal(
proposals_per_video, np.arange(0.02, 2.02, 0.02), decimal=10)
assert auc == 25.5
recall, avg_recall, proposals_per_video, auc = (
average_recall_at_avg_proposals(ground_truth1, proposals2, 4))
assert_array_equal(recall, [[0.] * 100] * 10)
assert_array_equal(avg_recall, [0.] * 100)
assert_array_almost_equal(
proposals_per_video, np.arange(0.02, 2.02, 0.02), decimal=10)
assert auc == 0
recall, avg_recall, proposals_per_video, auc = (
average_recall_at_avg_proposals(ground_truth2, proposals3, 100))
assert_array_equal(recall, [[1.] * 100] * 10)
assert_array_equal(avg_recall, ([1.] * 100))
assert_array_almost_equal(
proposals_per_video, np.arange(1, 101, 1), decimal=10)
assert auc == 99.0
def test_get_weighted_score():
score_a = [
np.array([-0.2203, -0.7538, 1.8789, 0.4451, -0.2526]),
np.array([-0.0413, 0.6366, 1.1155, 0.3484, 0.0395]),
np.array([0.0365, 0.5158, 1.1067, -0.9276, -0.2124]),
np.array([0.6232, 0.9912, -0.8562, 0.0148, 1.6413])
]
score_b = [
np.array([-0.0413, 0.6366, 1.1155, 0.3484, 0.0395]),
np.array([0.0365, 0.5158, 1.1067, -0.9276, -0.2124]),
np.array([0.6232, 0.9912, -0.8562, 0.0148, 1.6413]),
np.array([-0.2203, -0.7538, 1.8789, 0.4451, -0.2526])
]
weighted_score = get_weighted_score([score_a], [1])
assert np.all(np.isclose(np.array(score_a), np.array(weighted_score)))
coeff_a, coeff_b = 2., 1.
weighted_score = get_weighted_score([score_a, score_b], [coeff_a, coeff_b])
ground_truth = [
x * coeff_a + y * coeff_b for x, y in zip(score_a, score_b)
]
assert np.all(np.isclose(np.array(ground_truth), np.array(weighted_score)))
def test_mean_average_precision():
def content_for_unittest(scores, labels, result):
gt = mean_average_precision(scores, labels)
assert gt == result
scores = [
np.array([0.1, 0.2, 0.3, 0.4]),
np.array([0.2, 0.3, 0.4, 0.1]),
np.array([0.3, 0.4, 0.1, 0.2]),
np.array([0.4, 0.1, 0.2, 0.3])
]
label1 = np.array([[1, 1, 0, 0], [1, 0, 1, 1], [1, 0, 1, 0], [1, 1, 0, 1]])
result1 = 2 / 3
label2 = np.array([[0, 1, 0, 1], [0, 1, 1, 0], [1, 0, 1, 0], [0, 0, 1, 1]])
result2 = np.mean([0.5, 0.5833333333333333, 0.8055555555555556, 1.0])
content_for_unittest(scores, label1, result1)
content_for_unittest(scores, label2, result2)
| InternVideo-main | Downstream/Open-Set-Action-Recognition/tests/test_accuracy.py |
import os.path as osp
import numpy as np
import pytest
from numpy.testing import assert_array_almost_equal, assert_array_equal
from mmaction.localization import (generate_bsp_feature,
generate_candidate_proposals, soft_nms,
temporal_iop, temporal_iou)
def test_temporal_iou():
anchors_min = np.array([0.0, 0.5])
anchors_max = np.array([1.0, 1.5])
box_min = 0.5
box_max = 1.0
iou = temporal_iou(anchors_min, anchors_max, box_min, box_max)
assert_array_equal(iou, np.array([0.5, 0.5]))
def test_temporal_iop():
anchors_min = np.array([0.0, 0.5])
anchors_max = np.array([1.0, 1.5])
box_min = 0.4
box_max = 1.1
ioa = temporal_iop(anchors_min, anchors_max, box_min, box_max)
assert_array_almost_equal(ioa, np.array([0.6, 0.6]))
def test_soft_nms():
proposals = np.array([[0., 1., 1., 1., 0.5, 0.5],
[0., 0.4, 1., 1., 0.4, 0.4],
[0., 0.95, 1., 1., 0.6, 0.6]])
proposal_list = soft_nms(proposals, 0.75, 0.65, 0.9, 1)
assert_array_equal(proposal_list, [[0., 0.95, 0.6], [0., 0.4, 0.4]])
def test_generate_candidate_proposals():
video_list = [0, 1]
video_infos = [
dict(
video_name='v_test1',
duration_second=100,
duration_frame=1000,
annotations=[{
'segment': [30.0, 60.0],
'label': 'Rock climbing'
}],
feature_frame=900),
dict(
video_name='v_test2',
duration_second=100,
duration_frame=1000,
annotations=[{
'segment': [6.0, 8.0],
'label': 'Drinking beer'
}],
feature_frame=900)
]
tem_results_dir = osp.join(osp.dirname(__file__), 'data/test_tem_results')
# test when tem_result_ext is not valid
with pytest.raises(NotImplementedError):
result_dict = generate_candidate_proposals(
video_list,
video_infos,
tem_results_dir,
5,
0.5,
tem_results_ext='unsupport_ext')
# test without result_dict
assert_result1 = np.array([
[0.1, 0.7, 0.58390868, 0.35708317, 0.20850396, 0.55555556, 0.55555556],
[0.1, 0.5, 0.58390868, 0.32605207, 0.19038463, 0.29411765, 0.41666667],
[0.1, 0.3, 0.58390868, 0.26221931, 0.15311213, 0., 0.],
[0.3, 0.7, 0.30626667, 0.35708317, 0.10936267, 0.83333333, 0.83333333],
[0.3, 0.5, 0.30626667, 0.32605207, 0.09985888, 0.45454545, 0.83333333]
])
assert_result2 = np.array(
[[0.1, 0.3, 0.78390867, 0.3622193, 0.28394685, 0., 0.],
[0.1, 0.7, 0.78390867, 0.35708317, 0.27992059, 0., 0.],
[0.1, 0.5, 0.78390867, 0.32605207, 0.25559504, 0., 0.]])
result_dict = generate_candidate_proposals(video_list, video_infos,
tem_results_dir, 5, 0.5)
assert_array_almost_equal(result_dict['v_test1'], assert_result1)
assert_array_almost_equal(result_dict['v_test2'], assert_result2)
# test with result_dict
result_dict = {}
generate_candidate_proposals(
video_list,
video_infos,
tem_results_dir,
5,
0.5,
result_dict=result_dict)
assert_array_almost_equal(result_dict['v_test1'], assert_result1)
assert_array_almost_equal(result_dict['v_test2'], assert_result2)
def test_generate_bsp_feature():
video_list = [0, 1]
video_infos = [
dict(
video_name='v_test1',
duration_second=100,
duration_frame=1000,
annotations=[{
'segment': [30.0, 60.0],
'label': 'Rock climbing'
}],
feature_frame=900),
dict(
video_name='v_test2',
duration_second=100,
duration_frame=1000,
annotations=[{
'segment': [6.0, 8.0],
'label': 'Drinking beer'
}],
feature_frame=900)
]
tem_results_dir = osp.join(osp.dirname(__file__), 'data/test_tem_results')
pgm_proposals_dir = osp.join(osp.dirname(__file__), 'data/test_proposals')
# test when extension is not valid
with pytest.raises(NotImplementedError):
result_dict = generate_bsp_feature(
video_list,
video_infos,
tem_results_dir,
pgm_proposals_dir,
tem_results_ext='unsupport_ext')
with pytest.raises(NotImplementedError):
result_dict = generate_bsp_feature(
video_list,
video_infos,
tem_results_dir,
pgm_proposals_dir,
pgm_proposal_ext='unsupport_ext')
# test without result_dict
result_dict = generate_bsp_feature(
video_list, video_infos, tem_results_dir, pgm_proposals_dir, top_k=2)
assert_result1 = np.array(
[[
0.02633105, 0.02489364, 0.02345622, 0.0220188, 0.02058138,
0.01914396, 0.01770654, 0.01626912, 0.01541432, 0.01514214,
0.01486995, 0.01459776, 0.01432558, 0.01405339, 0.01378121,
0.01350902, 0.03064331, 0.02941124, 0.02817916, 0.02694709,
0.02571502, 0.02448295, 0.02325087, 0.0220188, 0.01432558,
0.01409228, 0.01385897, 0.01362567, 0.01339237, 0.01315907,
0.01292577, 0.01269246
],
[
0.01350902, 0.01323684, 0.01296465, 0.01269246, 0.01242028,
0.01214809, 0.01187591, 0.01160372, 0.01154264, 0.01169266,
0.01184269, 0.01199271, 0.01214273, 0.01229275, 0.01244278,
0.0125928, 0.01432558, 0.01409228, 0.01385897, 0.01362567,
0.01339237, 0.01315907, 0.01292577, 0.01269246, 0.01214273,
0.01227132, 0.01239991, 0.0125285, 0.0126571, 0.01278569,
0.01291428, 0.01304287
]])
assert_result2 = np.array(
[[
0.04133105, 0.03922697, 0.03712288, 0.0350188, 0.03291471,
0.03081063, 0.02870654, 0.02660246, 0.02541432, 0.02514214,
0.02486995, 0.02459776, 0.02432558, 0.02405339, 0.02378121,
0.02350902, 0.04764331, 0.04583981, 0.04403631, 0.04223281,
0.0404293, 0.0386258, 0.0368223, 0.0350188, 0.02432558, 0.02409228,
0.02385897, 0.02362567, 0.02339237, 0.02315907, 0.02292577,
0.02269246
],
[
0.02350902, 0.02323684, 0.02296465, 0.02269246, 0.02242028,
0.02214809, 0.02187591, 0.02160372, 0.02120931, 0.02069266,
0.02017602, 0.01965937, 0.01914273, 0.01862609, 0.01810944,
0.0175928, 0.02432558, 0.02409228, 0.02385897, 0.02362567,
0.02339237, 0.02315907, 0.02292577, 0.02269246, 0.01914273,
0.01869989, 0.01825706, 0.01781422, 0.01737138, 0.01692854,
0.0164857, 0.01604287
]])
assert_array_almost_equal(result_dict['v_test1'], assert_result1)
assert_array_almost_equal(result_dict['v_test2'], assert_result2)
# test with result_dict
result_dict = {}
generate_bsp_feature(
video_list,
video_infos,
tem_results_dir,
pgm_proposals_dir,
top_k=2,
result_dict=result_dict)
assert_array_almost_equal(result_dict['v_test1'], assert_result1)
assert_array_almost_equal(result_dict['v_test2'], assert_result2)
| InternVideo-main | Downstream/Open-Set-Action-Recognition/tests/test_localization_utils.py |
import numpy as np
import pytest
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv import ConfigDict
from numpy.testing import assert_array_almost_equal
from torch.autograd import Variable
from mmaction.models import (BCELossWithLogits, BinaryLogisticRegressionLoss,
BMNLoss, CrossEntropyLoss, HVULoss, NLLLoss,
OHEMHingeLoss, SSNLoss)
def test_hvu_loss():
pred = torch.tensor([[-1.0525, -0.7085, 0.1819, -0.8011],
[0.1555, -1.5550, 0.5586, 1.9746]])
gt = torch.tensor([[1., 0., 0., 0.], [0., 0., 1., 1.]])
mask = torch.tensor([[1., 1., 0., 0.], [0., 0., 1., 1.]])
category_mask = torch.tensor([[1., 0.], [0., 1.]])
categories = ['action', 'scene']
category_nums = (2, 2)
category_loss_weights = (1, 1)
loss_all_nomask_sum = HVULoss(
categories=categories,
category_nums=category_nums,
category_loss_weights=category_loss_weights,
loss_type='all',
with_mask=False,
reduction='sum')
loss = loss_all_nomask_sum(pred, gt, mask, category_mask)
loss1 = F.binary_cross_entropy_with_logits(pred, gt, reduction='none')
loss1 = torch.sum(loss1, dim=1)
assert torch.eq(loss['loss_cls'], torch.mean(loss1))
loss_all_mask = HVULoss(
categories=categories,
category_nums=category_nums,
category_loss_weights=category_loss_weights,
loss_type='all',
with_mask=True)
loss = loss_all_mask(pred, gt, mask, category_mask)
loss1 = F.binary_cross_entropy_with_logits(pred, gt, reduction='none')
loss1 = torch.sum(loss1 * mask, dim=1) / torch.sum(mask, dim=1)
loss1 = torch.mean(loss1)
assert torch.eq(loss['loss_cls'], loss1)
loss_ind_mask = HVULoss(
categories=categories,
category_nums=category_nums,
category_loss_weights=category_loss_weights,
loss_type='individual',
with_mask=True)
loss = loss_ind_mask(pred, gt, mask, category_mask)
action_loss = F.binary_cross_entropy_with_logits(pred[:1, :2], gt[:1, :2])
scene_loss = F.binary_cross_entropy_with_logits(pred[1:, 2:], gt[1:, 2:])
loss1 = (action_loss + scene_loss) / 2
assert torch.eq(loss['loss_cls'], loss1)
loss_ind_nomask_sum = HVULoss(
categories=categories,
category_nums=category_nums,
category_loss_weights=category_loss_weights,
loss_type='individual',
with_mask=False,
reduction='sum')
loss = loss_ind_nomask_sum(pred, gt, mask, category_mask)
action_loss = F.binary_cross_entropy_with_logits(
pred[:, :2], gt[:, :2], reduction='none')
action_loss = torch.sum(action_loss, dim=1)
action_loss = torch.mean(action_loss)
scene_loss = F.binary_cross_entropy_with_logits(
pred[:, 2:], gt[:, 2:], reduction='none')
scene_loss = torch.sum(scene_loss, dim=1)
scene_loss = torch.mean(scene_loss)
loss1 = (action_loss + scene_loss) / 2
assert torch.eq(loss['loss_cls'], loss1)
def test_cross_entropy_loss():
cls_scores = torch.rand((3, 4))
gt_labels = torch.LongTensor([2] * 3).squeeze()
cross_entropy_loss = CrossEntropyLoss()
output_loss = cross_entropy_loss(cls_scores, gt_labels)
assert torch.equal(output_loss, F.cross_entropy(cls_scores, gt_labels))
def test_bce_loss_with_logits():
cls_scores = torch.rand((3, 4))
gt_labels = torch.rand((3, 4))
bce_loss_with_logits = BCELossWithLogits()
output_loss = bce_loss_with_logits(cls_scores, gt_labels)
assert torch.equal(
output_loss, F.binary_cross_entropy_with_logits(cls_scores, gt_labels))
def test_nll_loss():
cls_scores = torch.randn(3, 3)
gt_labels = torch.tensor([0, 2, 1]).squeeze()
sm = nn.Softmax(dim=0)
nll_loss = NLLLoss()
cls_score_log = torch.log(sm(cls_scores))
output_loss = nll_loss(cls_score_log, gt_labels)
assert torch.equal(output_loss, F.nll_loss(cls_score_log, gt_labels))
def test_binary_logistic_loss():
binary_logistic_regression_loss = BinaryLogisticRegressionLoss()
reg_score = torch.tensor([0., 1.])
label = torch.tensor([0., 1.])
output_loss = binary_logistic_regression_loss(reg_score, label, 0.5)
assert_array_almost_equal(output_loss.numpy(), np.array([0.]), decimal=4)
reg_score = torch.tensor([0.3, 0.9])
label = torch.tensor([0., 1.])
output_loss = binary_logistic_regression_loss(reg_score, label, 0.5)
assert_array_almost_equal(
output_loss.numpy(), np.array([0.231]), decimal=4)
def test_bmn_loss():
bmn_loss = BMNLoss()
# test tem_loss
pred_start = torch.tensor([0.9, 0.1])
pred_end = torch.tensor([0.1, 0.9])
gt_start = torch.tensor([1., 0.])
gt_end = torch.tensor([0., 1.])
output_tem_loss = bmn_loss.tem_loss(pred_start, pred_end, gt_start, gt_end)
binary_logistic_regression_loss = BinaryLogisticRegressionLoss()
assert_loss = (
binary_logistic_regression_loss(pred_start, gt_start) +
binary_logistic_regression_loss(pred_end, gt_end))
assert_array_almost_equal(
output_tem_loss.numpy(), assert_loss.numpy(), decimal=4)
# test pem_reg_loss
seed = 1
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
pred_bm_reg = torch.tensor([[0.1, 0.99], [0.5, 0.4]])
gt_iou_map = torch.tensor([[0, 1.], [0, 1.]])
mask = torch.tensor([[0.1, 0.4], [0.4, 0.1]])
output_pem_reg_loss = bmn_loss.pem_reg_loss(pred_bm_reg, gt_iou_map, mask)
assert_array_almost_equal(
output_pem_reg_loss.numpy(), np.array([0.2140]), decimal=4)
# test pem_cls_loss
pred_bm_cls = torch.tensor([[0.1, 0.99], [0.95, 0.2]])
gt_iou_map = torch.tensor([[0., 1.], [0., 1.]])
mask = torch.tensor([[0.1, 0.4], [0.4, 0.1]])
output_pem_cls_loss = bmn_loss.pem_cls_loss(pred_bm_cls, gt_iou_map, mask)
assert_array_almost_equal(
output_pem_cls_loss.numpy(), np.array([1.6137]), decimal=4)
# test bmn_loss
pred_bm = torch.tensor([[[[0.1, 0.99], [0.5, 0.4]],
[[0.1, 0.99], [0.95, 0.2]]]])
pred_start = torch.tensor([[0.9, 0.1]])
pred_end = torch.tensor([[0.1, 0.9]])
gt_iou_map = torch.tensor([[[0., 2.5], [0., 10.]]])
gt_start = torch.tensor([[1., 0.]])
gt_end = torch.tensor([[0., 1.]])
mask = torch.tensor([[0.1, 0.4], [0.4, 0.1]])
output_loss = bmn_loss(pred_bm, pred_start, pred_end, gt_iou_map, gt_start,
gt_end, mask)
assert_array_almost_equal(
output_loss[0].numpy(),
output_tem_loss + 10 * output_pem_reg_loss + output_pem_cls_loss)
assert_array_almost_equal(output_loss[1].numpy(), output_tem_loss)
assert_array_almost_equal(output_loss[2].numpy(), output_pem_reg_loss)
assert_array_almost_equal(output_loss[3].numpy(), output_pem_cls_loss)
def test_ohem_hinge_loss():
# test normal case
pred = torch.tensor([[
0.5161, 0.5228, 0.7748, 0.0573, 0.1113, 0.8862, 0.1752, 0.9448, 0.0253,
0.1009, 0.4371, 0.2232, 0.0412, 0.3487, 0.3350, 0.9294, 0.7122, 0.3072,
0.2942, 0.7679
]],
requires_grad=True)
gt = torch.tensor([8])
num_video = 1
loss = OHEMHingeLoss.apply(pred, gt, 1, 1.0, num_video)
assert_array_almost_equal(
loss.detach().numpy(), np.array([0.0552]), decimal=4)
loss.backward(Variable(torch.ones([1])))
assert_array_almost_equal(
np.array(pred.grad),
np.array([[
0., 0., 0., 0., 0., 0., 0., -1., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0.
]]),
decimal=4)
# test error case
with pytest.raises(ValueError):
gt = torch.tensor([8, 10])
loss = OHEMHingeLoss.apply(pred, gt, 1, 1.0, num_video)
def test_ssn_loss():
ssn_loss = SSNLoss()
# test activity_loss
activity_score = torch.rand((8, 21))
labels = torch.LongTensor([8] * 8).squeeze()
activity_indexer = torch.tensor([0, 7])
output_activity_loss = ssn_loss.activity_loss(activity_score, labels,
activity_indexer)
assert torch.equal(
output_activity_loss,
F.cross_entropy(activity_score[activity_indexer, :],
labels[activity_indexer]))
# test completeness_loss
completeness_score = torch.rand((8, 20), requires_grad=True)
labels = torch.LongTensor([8] * 8).squeeze()
completeness_indexer = torch.tensor([0, 1, 2, 3, 4, 5, 6])
positive_per_video = 1
incomplete_per_video = 6
output_completeness_loss = ssn_loss.completeness_loss(
completeness_score, labels, completeness_indexer, positive_per_video,
incomplete_per_video)
pred = completeness_score[completeness_indexer, :]
gt = labels[completeness_indexer]
pred_dim = pred.size(1)
pred = pred.view(-1, positive_per_video + incomplete_per_video, pred_dim)
gt = gt.view(-1, positive_per_video + incomplete_per_video)
# yapf:disable
positive_pred = pred[:, :positive_per_video, :].contiguous().view(-1, pred_dim) # noqa:E501
incomplete_pred = pred[:, positive_per_video:, :].contiguous().view(-1, pred_dim) # noqa:E501
# yapf:enable
ohem_ratio = 0.17
positive_loss = OHEMHingeLoss.apply(
positive_pred, gt[:, :positive_per_video].contiguous().view(-1), 1,
1.0, positive_per_video)
incomplete_loss = OHEMHingeLoss.apply(
incomplete_pred, gt[:, positive_per_video:].contiguous().view(-1), -1,
ohem_ratio, incomplete_per_video)
num_positives = positive_pred.size(0)
num_incompletes = int(incomplete_pred.size(0) * ohem_ratio)
assert_loss = ((positive_loss + incomplete_loss) /
float(num_positives + num_incompletes))
assert torch.equal(output_completeness_loss, assert_loss)
# test reg_loss
bbox_pred = torch.rand((8, 20, 2))
labels = torch.LongTensor([8] * 8).squeeze()
bbox_targets = torch.rand((8, 2))
regression_indexer = torch.tensor([0])
output_reg_loss = ssn_loss.classwise_regression_loss(
bbox_pred, labels, bbox_targets, regression_indexer)
pred = bbox_pred[regression_indexer, :, :]
gt = labels[regression_indexer]
reg_target = bbox_targets[regression_indexer, :]
class_idx = gt.data - 1
classwise_pred = pred[:, class_idx, :]
classwise_reg_pred = torch.cat((torch.diag(classwise_pred[:, :, 0]).view(
-1, 1), torch.diag(classwise_pred[:, :, 1]).view(-1, 1)),
dim=1)
assert torch.equal(
output_reg_loss,
F.smooth_l1_loss(classwise_reg_pred.view(-1), reg_target.view(-1)) * 2)
# test ssn_loss
proposal_type = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 2]])
train_cfg = ConfigDict(
dict(
ssn=dict(
sampler=dict(
num_per_video=8,
positive_ratio=1,
background_ratio=1,
incomplete_ratio=6,
add_gt_as_proposals=True),
loss_weight=dict(comp_loss_weight=0.1, reg_loss_weight=0.1))))
output_loss = ssn_loss(activity_score, completeness_score, bbox_pred,
proposal_type, labels, bbox_targets, train_cfg)
assert torch.equal(output_loss['loss_activity'], output_activity_loss)
assert torch.equal(output_loss['loss_completeness'],
output_completeness_loss * 0.1)
assert torch.equal(output_loss['loss_reg'], output_reg_loss * 0.1)
| InternVideo-main | Downstream/Open-Set-Action-Recognition/tests/test_loss.py |
import os.path as osp
import mmcv
import numpy as np
import pytest
import torch
from mmaction.models import build_recognizer
from mmaction.utils.gradcam_utils import GradCAM
def _get_cfg(fname):
"""Grab configs necessary to create a recognizer.
These are deep copied to allow for safe modification of parameters without
influencing other tests.
"""
repo_dpath = osp.dirname(osp.dirname(__file__))
config_dpath = osp.join(repo_dpath, 'configs/recognition')
config_fpath = osp.join(config_dpath, fname)
if not osp.exists(config_dpath):
raise Exception('Cannot find config path')
config = mmcv.Config.fromfile(config_fpath)
return config
def _get_target_shapes(input_shape, num_classes=400, model_type='2D'):
if model_type not in ['2D', '3D']:
raise ValueError(f'Data type {model_type} is not available')
preds_target_shape = (input_shape[0], num_classes)
if model_type == '3D':
# input shape (batch_size, num_crops*num_clips, C, clip_len, H, W)
# target shape (batch_size*num_crops*num_clips, clip_len, H, W, C)
blended_imgs_target_shape = (input_shape[0] * input_shape[1],
input_shape[3], input_shape[4],
input_shape[5], input_shape[2])
else:
# input shape (batch_size, num_segments, C, H, W)
# target shape (batch_size, num_segments, H, W, C)
blended_imgs_target_shape = (input_shape[0], input_shape[1],
input_shape[3], input_shape[4],
input_shape[2])
return blended_imgs_target_shape, preds_target_shape
def _generate_gradcam_inputs(input_shape=(1, 3, 3, 224, 224), model_type='2D'):
"""Create a superset of inputs needed to run gradcam.
Args:
input_shape (tuple[int]): input batch dimensions.
Default: (1, 3, 3, 224, 224).
model_type (str): Model type for data generation, from {'2D', '3D'}.
Default:'2D'
return:
dict: model inputs, including two keys, ``imgs`` and ``label``.
"""
imgs = np.random.random(input_shape)
if model_type in ['2D', '3D']:
gt_labels = torch.LongTensor([2] * input_shape[0])
else:
raise ValueError(f'Data type {model_type} is not available')
inputs = {
'imgs': torch.FloatTensor(imgs),
'label': gt_labels,
}
return inputs
def _do_test_2D_models(recognizer,
target_layer_name,
input_shape,
num_classes=400,
device='cpu'):
demo_inputs = _generate_gradcam_inputs(input_shape)
demo_inputs['imgs'] = demo_inputs['imgs'].to(device)
demo_inputs['label'] = demo_inputs['label'].to(device)
recognizer = recognizer.to(device)
gradcam = GradCAM(recognizer, target_layer_name)
blended_imgs_target_shape, preds_target_shape = _get_target_shapes(
input_shape, num_classes=num_classes, model_type='2D')
blended_imgs, preds = gradcam(demo_inputs)
assert blended_imgs.size() == blended_imgs_target_shape
assert preds.size() == preds_target_shape
blended_imgs, preds = gradcam(demo_inputs, True)
assert blended_imgs.size() == blended_imgs_target_shape
assert preds.size() == preds_target_shape
def _do_test_3D_models(recognizer,
target_layer_name,
input_shape,
num_classes=400):
blended_imgs_target_shape, preds_target_shape = _get_target_shapes(
input_shape, num_classes=num_classes, model_type='3D')
demo_inputs = _generate_gradcam_inputs(input_shape, '3D')
# parrots 3dconv is only implemented on gpu
if torch.__version__ == 'parrots':
if torch.cuda.is_available():
recognizer = recognizer.cuda()
demo_inputs['imgs'] = demo_inputs['imgs'].cuda()
demo_inputs['label'] = demo_inputs['label'].cuda()
gradcam = GradCAM(recognizer, target_layer_name)
blended_imgs, preds = gradcam(demo_inputs)
assert blended_imgs.size() == blended_imgs_target_shape
assert preds.size() == preds_target_shape
blended_imgs, preds = gradcam(demo_inputs, True)
assert blended_imgs.size() == blended_imgs_target_shape
assert preds.size() == preds_target_shape
else:
gradcam = GradCAM(recognizer, target_layer_name)
blended_imgs, preds = gradcam(demo_inputs)
assert blended_imgs.size() == blended_imgs_target_shape
assert preds.size() == preds_target_shape
blended_imgs, preds = gradcam(demo_inputs, True)
assert blended_imgs.size() == blended_imgs_target_shape
assert preds.size() == preds_target_shape
def test_tsn():
config = _get_cfg('tsn/tsn_r50_1x1x3_100e_kinetics400_rgb.py')
config.model['backbone']['pretrained'] = None
recognizer = build_recognizer(config.model, test_cfg=config.test_cfg)
recognizer.cfg = config
input_shape = (1, 25, 3, 32, 32)
target_layer_name = 'backbone/layer4/1/relu'
_do_test_2D_models(recognizer, target_layer_name, input_shape)
def test_i3d():
config = _get_cfg('i3d/i3d_r50_32x2x1_100e_kinetics400_rgb.py')
config.model['backbone']['pretrained2d'] = False
config.model['backbone']['pretrained'] = None
recognizer = build_recognizer(config.model, test_cfg=config.test_cfg)
recognizer.cfg = config
input_shape = [1, 1, 3, 32, 32, 32]
target_layer_name = 'backbone/layer4/1/relu'
_do_test_3D_models(recognizer, target_layer_name, input_shape)
def test_r2plus1d():
config = _get_cfg('r2plus1d/r2plus1d_r34_8x8x1_180e_kinetics400_rgb.py')
config.model['backbone']['pretrained2d'] = False
config.model['backbone']['pretrained'] = None
config.model['backbone']['norm_cfg'] = dict(type='BN3d')
recognizer = build_recognizer(config.model, test_cfg=config.test_cfg)
recognizer.cfg = config
input_shape = (1, 3, 3, 8, 32, 32)
target_layer_name = 'backbone/layer4/1/relu'
_do_test_3D_models(recognizer, target_layer_name, input_shape)
def test_slowfast():
config = _get_cfg('slowfast/slowfast_r50_4x16x1_256e_kinetics400_rgb.py')
recognizer = build_recognizer(config.model, test_cfg=config.test_cfg)
recognizer.cfg = config
input_shape = (1, 1, 3, 32, 32, 32)
target_layer_name = 'backbone/slow_path/layer4/1/relu'
_do_test_3D_models(recognizer, target_layer_name, input_shape)
def test_tsm():
config = _get_cfg('tsm/tsm_r50_1x1x8_50e_kinetics400_rgb.py')
config.model['backbone']['pretrained'] = None
target_layer_name = 'backbone/layer4/1/relu'
# base config
recognizer = build_recognizer(config.model, test_cfg=config.test_cfg)
recognizer.cfg = config
input_shape = (1, 8, 3, 32, 32)
_do_test_2D_models(recognizer, target_layer_name, input_shape)
# test twice sample + 3 crops, 2*3*8=48
test_cfg = dict(average_clips='prob')
recognizer = build_recognizer(config.model, test_cfg=test_cfg)
recognizer.cfg = config
input_shape = (1, 48, 3, 32, 32)
_do_test_2D_models(recognizer, target_layer_name, input_shape)
def test_csn():
config = _get_cfg(
'csn/ircsn_ig65m_pretrained_r152_32x2x1_58e_kinetics400_rgb.py')
config.model['backbone']['pretrained2d'] = False
config.model['backbone']['pretrained'] = None
recognizer = build_recognizer(config.model, test_cfg=config.test_cfg)
recognizer.cfg = config
input_shape = (1, 1, 3, 32, 32, 32)
target_layer_name = 'backbone/layer4/1/relu'
_do_test_3D_models(recognizer, target_layer_name, input_shape)
def test_tpn():
target_layer_name = 'backbone/layer4/1/relu'
config = _get_cfg('tpn/tpn_tsm_r50_1x1x8_150e_sthv1_rgb.py')
config.model['backbone']['pretrained'] = None
recognizer = build_recognizer(config.model, test_cfg=config.test_cfg)
recognizer.cfg = config
input_shape = (1, 8, 3, 32, 32)
_do_test_2D_models(recognizer, target_layer_name, input_shape, 174)
config = _get_cfg('tpn/tpn_slowonly_r50_8x8x1_150e_kinetics_rgb.py')
config.model['backbone']['pretrained'] = None
recognizer = build_recognizer(config.model, test_cfg=config.test_cfg)
recognizer.cfg = config
input_shape = (1, 3, 3, 8, 32, 32)
_do_test_3D_models(recognizer, target_layer_name, input_shape)
def test_c3d():
config = _get_cfg('c3d/c3d_sports1m_16x1x1_45e_ucf101_rgb.py')
config.model['backbone']['pretrained'] = None
recognizer = build_recognizer(config.model, test_cfg=config.test_cfg)
recognizer.cfg = config
input_shape = (1, 1, 3, 16, 112, 112)
target_layer_name = 'backbone/conv5a/activate'
_do_test_3D_models(recognizer, target_layer_name, input_shape, 101)
@pytest.mark.skipif(
not torch.cuda.is_available(), reason='requires CUDA support')
def test_tin():
config = _get_cfg('tin/tin_tsm_finetune_r50_1x1x8_50e_kinetics400_rgb.py')
config.model['backbone']['pretrained'] = None
target_layer_name = 'backbone/layer4/1/relu'
recognizer = build_recognizer(config.model, test_cfg=config.test_cfg)
recognizer.cfg = config
input_shape = (1, 8, 3, 64, 64)
_do_test_2D_models(
recognizer, target_layer_name, input_shape, device='cuda:0')
def test_x3d():
config = _get_cfg('x3d/x3d_s_13x6x1_facebook_kinetics400_rgb.py')
config.model['backbone']['pretrained'] = None
recognizer = build_recognizer(config.model, test_cfg=config.test_cfg)
recognizer.cfg = config
input_shape = (1, 1, 3, 13, 32, 32)
target_layer_name = 'backbone/layer4/1/relu'
_do_test_3D_models(recognizer, target_layer_name, input_shape)
| InternVideo-main | Downstream/Open-Set-Action-Recognition/tests/test_gradcam.py |
import copy
import numpy as np
import pytest
import torch
import torch.nn as nn
from mmcv.utils import _BatchNorm
from mmaction.models import (C3D, X3D, ResNet, ResNet2Plus1d, ResNet3d,
ResNet3dCSN, ResNet3dSlowFast, ResNet3dSlowOnly,
ResNetAudio, ResNetTIN, ResNetTSM)
from mmaction.models.backbones.resnet_tsm import NL3DWrapper
def check_norm_state(modules, train_state):
"""Check if norm layer is in correct train state."""
for mod in modules:
if isinstance(mod, _BatchNorm):
if mod.training != train_state:
return False
return True
def test_resnet_backbone():
"""Test resnet backbone."""
with pytest.raises(KeyError):
# ResNet depth should be in [18, 34, 50, 101, 152]
ResNet(20)
with pytest.raises(AssertionError):
# In ResNet: 1 <= num_stages <= 4
ResNet(50, num_stages=0)
with pytest.raises(AssertionError):
# In ResNet: 1 <= num_stages <= 4
ResNet(50, num_stages=5)
with pytest.raises(AssertionError):
# len(strides) == len(dilations) == num_stages
ResNet(50, strides=(1, ), dilations=(1, 1), num_stages=3)
with pytest.raises(TypeError):
# pretrain must be a str
resnet50 = ResNet(50, pretrained=0)
resnet50.init_weights()
with pytest.raises(AssertionError):
# style must be in ['pytorch', 'caffe']
ResNet(18, style='tensorflow')
with pytest.raises(AssertionError):
# assert not with_cp
ResNet(18, with_cp=True)
# resnet with depth 18, norm_eval False, initial weights
resnet18 = ResNet(18)
resnet18.init_weights()
# resnet with depth 50, norm_eval True
resnet50 = ResNet(50, norm_eval=True)
resnet50.init_weights()
resnet50.train()
assert check_norm_state(resnet50.modules(), False)
# resnet with depth 50, norm_eval True, pretrained
resnet50_pretrain = ResNet(
pretrained='torchvision://resnet50', depth=50, norm_eval=True)
resnet50_pretrain.init_weights()
resnet50_pretrain.train()
assert check_norm_state(resnet50_pretrain.modules(), False)
# resnet with depth 50, norm_eval True, frozen_stages 1
frozen_stages = 1
resnet50_frozen = ResNet(50, frozen_stages=frozen_stages)
resnet50_frozen.init_weights()
resnet50_frozen.train()
assert resnet50_frozen.conv1.bn.training is False
for layer in resnet50_frozen.conv1.modules():
for param in layer.parameters():
assert param.requires_grad is False
for i in range(1, frozen_stages + 1):
layer = getattr(resnet50_frozen, f'layer{i}')
for mod in layer.modules():
if isinstance(mod, _BatchNorm):
assert mod.training is False
for param in layer.parameters():
assert param.requires_grad is False
# resnet with depth 50, partial batchnorm
resnet_pbn = ResNet(50, partial_bn=True)
resnet_pbn.train()
count_bn = 0
for m in resnet_pbn.modules():
if isinstance(m, nn.BatchNorm2d):
count_bn += 1
if count_bn >= 2:
assert m.weight.requires_grad is False
assert m.bias.requires_grad is False
assert m.training is False
else:
assert m.weight.requires_grad is True
assert m.bias.requires_grad is True
assert m.training is True
input_shape = (1, 3, 64, 64)
imgs = _demo_inputs(input_shape)
# resnet with depth 18 inference
resnet18 = ResNet(18, norm_eval=False)
resnet18.init_weights()
resnet18.train()
feat = resnet18(imgs)
assert feat.shape == torch.Size([1, 512, 2, 2])
# resnet with depth 50 inference
resnet50 = ResNet(50, norm_eval=False)
resnet50.init_weights()
resnet50.train()
feat = resnet50(imgs)
assert feat.shape == torch.Size([1, 2048, 2, 2])
# resnet with depth 50 in caffe style inference
resnet50_caffe = ResNet(50, style='caffe', norm_eval=False)
resnet50_caffe.init_weights()
resnet50_caffe.train()
feat = resnet50_caffe(imgs)
assert feat.shape == torch.Size([1, 2048, 2, 2])
def test_x3d_backbone():
"""Test resnet3d backbone."""
with pytest.raises(AssertionError):
# In X3D: 1 <= num_stages <= 4
X3D(gamma_w=1.0, gamma_b=2.25, gamma_d=2.2, num_stages=0)
with pytest.raises(AssertionError):
# In X3D: 1 <= num_stages <= 4
X3D(gamma_w=1.0, gamma_b=2.25, gamma_d=2.2, num_stages=5)
with pytest.raises(AssertionError):
# len(spatial_strides) == num_stages
X3D(gamma_w=1.0,
gamma_b=2.25,
gamma_d=2.2,
spatial_strides=(1, 2),
num_stages=4)
with pytest.raises(AssertionError):
# se_style in ['half', 'all']
X3D(gamma_w=1.0, gamma_b=2.25, gamma_d=2.2, se_style=None)
with pytest.raises(AssertionError):
# se_ratio should be None or > 0
X3D(gamma_w=1.0,
gamma_b=2.25,
gamma_d=2.2,
se_style='half',
se_ratio=0)
# x3d_s, no pretrained, norm_eval True
x3d_s = X3D(gamma_w=1.0, gamma_b=2.25, gamma_d=2.2, norm_eval=True)
x3d_s.init_weights()
x3d_s.train()
assert check_norm_state(x3d_s.modules(), False)
# x3d_l, no pretrained, norm_eval True
x3d_l = X3D(gamma_w=1.0, gamma_b=2.25, gamma_d=5.0, norm_eval=True)
x3d_l.init_weights()
x3d_l.train()
assert check_norm_state(x3d_l.modules(), False)
# x3d_s, no pretrained, norm_eval False
x3d_s = X3D(gamma_w=1.0, gamma_b=2.25, gamma_d=2.2, norm_eval=False)
x3d_s.init_weights()
x3d_s.train()
assert check_norm_state(x3d_s.modules(), True)
# x3d_l, no pretrained, norm_eval False
x3d_l = X3D(gamma_w=1.0, gamma_b=2.25, gamma_d=5.0, norm_eval=False)
x3d_l.init_weights()
x3d_l.train()
assert check_norm_state(x3d_l.modules(), True)
# x3d_s, no pretrained, frozen_stages, norm_eval False
frozen_stages = 1
x3d_s_frozen = X3D(
gamma_w=1.0,
gamma_b=2.25,
gamma_d=2.2,
norm_eval=False,
frozen_stages=frozen_stages)
x3d_s_frozen.init_weights()
x3d_s_frozen.train()
assert x3d_s_frozen.conv1_t.bn.training is False
for param in x3d_s_frozen.conv1_s.parameters():
assert param.requires_grad is False
for param in x3d_s_frozen.conv1_t.parameters():
assert param.requires_grad is False
for i in range(1, frozen_stages + 1):
layer = getattr(x3d_s_frozen, f'layer{i}')
for mod in layer.modules():
if isinstance(mod, _BatchNorm):
assert mod.training is False
for param in layer.parameters():
assert param.requires_grad is False
# test zero_init_residual, zero_init_residual is True by default
for m in x3d_s_frozen.modules():
if hasattr(m, 'conv3'):
assert torch.equal(m.conv3.bn.weight,
torch.zeros_like(m.conv3.bn.weight))
assert torch.equal(m.conv3.bn.bias,
torch.zeros_like(m.conv3.bn.bias))
# x3d_s inference
input_shape = (1, 3, 13, 160, 160)
imgs = _demo_inputs(input_shape)
# parrots 3dconv is only implemented on gpu
if torch.__version__ == 'parrots':
if torch.cuda.is_available():
x3d_s_frozen = x3d_s_frozen.cuda()
imgs_gpu = imgs.cuda()
feat = x3d_s_frozen(imgs_gpu)
assert feat.shape == torch.Size([1, 432, 13, 5, 5])
else:
feat = x3d_s_frozen(imgs)
assert feat.shape == torch.Size([1, 432, 13, 5, 5])
# x3d_m inference
input_shape = (1, 3, 16, 224, 224)
imgs = _demo_inputs(input_shape)
# parrots 3dconv is only implemented on gpu
if torch.__version__ == 'parrots':
if torch.cuda.is_available():
x3d_s_frozen = x3d_s_frozen.cuda()
imgs_gpu = imgs.cuda()
feat = x3d_s_frozen(imgs_gpu)
assert feat.shape == torch.Size([1, 432, 16, 7, 7])
else:
feat = x3d_s_frozen(imgs)
assert feat.shape == torch.Size([1, 432, 16, 7, 7])
def test_resnet3d_backbone():
"""Test resnet3d backbone."""
with pytest.raises(AssertionError):
# In ResNet3d: 1 <= num_stages <= 4
ResNet3d(34, None, num_stages=0)
with pytest.raises(AssertionError):
# In ResNet3d: 1 <= num_stages <= 4
ResNet3d(34, None, num_stages=5)
with pytest.raises(AssertionError):
# In ResNet3d: 1 <= num_stages <= 4
ResNet3d(50, None, num_stages=0)
with pytest.raises(AssertionError):
# In ResNet3d: 1 <= num_stages <= 4
ResNet3d(50, None, num_stages=5)
with pytest.raises(AssertionError):
# len(spatial_strides) == len(temporal_strides)
# == len(dilations) == num_stages
ResNet3d(
50,
None,
spatial_strides=(1, ),
temporal_strides=(1, 1),
dilations=(1, 1, 1),
num_stages=4)
with pytest.raises(AssertionError):
# len(spatial_strides) == len(temporal_strides)
# == len(dilations) == num_stages
ResNet3d(
34,
None,
spatial_strides=(1, ),
temporal_strides=(1, 1),
dilations=(1, 1, 1),
num_stages=4)
with pytest.raises(TypeError):
# pretrain must be str or None.
resnet3d_34 = ResNet3d(34, ['resnet', 'bninception'])
resnet3d_34.init_weights()
with pytest.raises(TypeError):
# pretrain must be str or None.
resnet3d_50 = ResNet3d(50, ['resnet', 'bninception'])
resnet3d_50.init_weights()
# resnet3d with depth 34, no pretrained, norm_eval True
resnet3d_34 = ResNet3d(34, None, pretrained2d=False, norm_eval=True)
resnet3d_34.init_weights()
resnet3d_34.train()
assert check_norm_state(resnet3d_34.modules(), False)
# resnet3d with depth 50, no pretrained, norm_eval True
resnet3d_50 = ResNet3d(50, None, pretrained2d=False, norm_eval=True)
resnet3d_50.init_weights()
resnet3d_50.train()
assert check_norm_state(resnet3d_50.modules(), False)
# resnet3d with depth 50, pretrained2d, norm_eval True
resnet3d_50_pretrain = ResNet3d(
50, 'torchvision://resnet50', norm_eval=True)
resnet3d_50_pretrain.init_weights()
resnet3d_50_pretrain.train()
assert check_norm_state(resnet3d_50_pretrain.modules(), False)
from mmcv.runner import _load_checkpoint
chkp_2d = _load_checkpoint('torchvision://resnet50')
for name, module in resnet3d_50_pretrain.named_modules():
if len(name.split('.')) == 4:
# layer.block.module.submodule
prefix = name.split('.')[:2]
module_type = name.split('.')[2]
submodule_type = name.split('.')[3]
if module_type == 'downsample':
name2d = name.replace('conv', '0').replace('bn', '1')
else:
layer_id = name.split('.')[2][-1]
name2d = prefix[0] + '.' + prefix[1] + '.' + \
submodule_type + layer_id
if isinstance(module, nn.Conv3d):
conv2d_weight = chkp_2d[name2d + '.weight']
conv3d_weight = getattr(module, 'weight').data
assert torch.equal(
conv3d_weight,
conv2d_weight.data.unsqueeze(2).expand_as(conv3d_weight) /
conv3d_weight.shape[2])
if getattr(module, 'bias') is not None:
conv2d_bias = chkp_2d[name2d + '.bias']
conv3d_bias = getattr(module, 'bias').data
assert torch.equal(conv2d_bias, conv3d_bias)
elif isinstance(module, nn.BatchNorm3d):
for pname in ['weight', 'bias', 'running_mean', 'running_var']:
param_2d = chkp_2d[name2d + '.' + pname]
param_3d = getattr(module, pname).data
assert torch.equal(param_2d, param_3d)
conv3d = resnet3d_50_pretrain.conv1.conv
assert torch.equal(
conv3d.weight,
chkp_2d['conv1.weight'].unsqueeze(2).expand_as(conv3d.weight) /
conv3d.weight.shape[2])
conv3d = resnet3d_50_pretrain.layer3[2].conv2.conv
assert torch.equal(
conv3d.weight, chkp_2d['layer3.2.conv2.weight'].unsqueeze(2).expand_as(
conv3d.weight) / conv3d.weight.shape[2])
# resnet3d with depth 34, no pretrained, norm_eval False
resnet3d_34_no_bn_eval = ResNet3d(
34, None, pretrained2d=False, norm_eval=False)
resnet3d_34_no_bn_eval.init_weights()
resnet3d_34_no_bn_eval.train()
assert check_norm_state(resnet3d_34_no_bn_eval.modules(), True)
# resnet3d with depth 50, no pretrained, norm_eval False
resnet3d_50_no_bn_eval = ResNet3d(
50, None, pretrained2d=False, norm_eval=False)
resnet3d_50_no_bn_eval.init_weights()
resnet3d_50_no_bn_eval.train()
assert check_norm_state(resnet3d_50_no_bn_eval.modules(), True)
# resnet3d with depth 34, no pretrained, frozen_stages, norm_eval False
frozen_stages = 1
resnet3d_34_frozen = ResNet3d(
34, None, pretrained2d=False, frozen_stages=frozen_stages)
resnet3d_34_frozen.init_weights()
resnet3d_34_frozen.train()
assert resnet3d_34_frozen.conv1.bn.training is False
for param in resnet3d_34_frozen.conv1.parameters():
assert param.requires_grad is False
for i in range(1, frozen_stages + 1):
layer = getattr(resnet3d_34_frozen, f'layer{i}')
for mod in layer.modules():
if isinstance(mod, _BatchNorm):
assert mod.training is False
for param in layer.parameters():
assert param.requires_grad is False
# test zero_init_residual
for m in resnet3d_34_frozen.modules():
if hasattr(m, 'conv2'):
assert torch.equal(m.conv2.bn.weight,
torch.zeros_like(m.conv2.bn.weight))
assert torch.equal(m.conv2.bn.bias,
torch.zeros_like(m.conv2.bn.bias))
# resnet3d with depth 50, no pretrained, frozen_stages, norm_eval False
frozen_stages = 1
resnet3d_50_frozen = ResNet3d(
50, None, pretrained2d=False, frozen_stages=frozen_stages)
resnet3d_50_frozen.init_weights()
resnet3d_50_frozen.train()
assert resnet3d_50_frozen.conv1.bn.training is False
for param in resnet3d_50_frozen.conv1.parameters():
assert param.requires_grad is False
for i in range(1, frozen_stages + 1):
layer = getattr(resnet3d_50_frozen, f'layer{i}')
for mod in layer.modules():
if isinstance(mod, _BatchNorm):
assert mod.training is False
for param in layer.parameters():
assert param.requires_grad is False
# test zero_init_residual
for m in resnet3d_50_frozen.modules():
if hasattr(m, 'conv3'):
assert torch.equal(m.conv3.bn.weight,
torch.zeros_like(m.conv3.bn.weight))
assert torch.equal(m.conv3.bn.bias,
torch.zeros_like(m.conv3.bn.bias))
# resnet3d frozen with depth 34 inference
input_shape = (1, 3, 6, 64, 64)
imgs = _demo_inputs(input_shape)
# parrots 3dconv is only implemented on gpu
if torch.__version__ == 'parrots':
if torch.cuda.is_available():
resnet3d_34_frozen = resnet3d_34_frozen.cuda()
imgs_gpu = imgs.cuda()
feat = resnet3d_34_frozen(imgs_gpu)
assert feat.shape == torch.Size([1, 512, 1, 2, 2])
else:
feat = resnet3d_34_frozen(imgs)
assert feat.shape == torch.Size([1, 512, 1, 2, 2])
# resnet3d with depth 50 inference
input_shape = (1, 3, 6, 64, 64)
imgs = _demo_inputs(input_shape)
# parrots 3dconv is only implemented on gpu
if torch.__version__ == 'parrots':
if torch.cuda.is_available():
resnet3d_50_frozen = resnet3d_50_frozen.cuda()
imgs_gpu = imgs.cuda()
feat = resnet3d_50_frozen(imgs_gpu)
assert feat.shape == torch.Size([1, 2048, 1, 2, 2])
else:
feat = resnet3d_50_frozen(imgs)
assert feat.shape == torch.Size([1, 2048, 1, 2, 2])
# resnet3d with depth 50 in caffe style inference
resnet3d_50_caffe = ResNet3d(50, None, pretrained2d=False, style='caffe')
resnet3d_50_caffe.init_weights()
resnet3d_50_caffe.train()
# parrots 3dconv is only implemented on gpu
if torch.__version__ == 'parrots':
if torch.cuda.is_available():
resnet3d_50_caffe = resnet3d_50_caffe.cuda()
imgs_gpu = imgs.cuda()
feat = resnet3d_50_caffe(imgs_gpu)
assert feat.shape == torch.Size([1, 2048, 1, 2, 2])
else:
feat = resnet3d_50_caffe(imgs)
assert feat.shape == torch.Size([1, 2048, 1, 2, 2])
# resnet3d with depth 34 in caffe style inference
resnet3d_34_caffe = ResNet3d(34, None, pretrained2d=False, style='caffe')
resnet3d_34_caffe.init_weights()
resnet3d_34_caffe.train()
# parrots 3dconv is only implemented on gpu
if torch.__version__ == 'parrots':
if torch.cuda.is_available():
resnet3d_34_caffe = resnet3d_34_caffe.cuda()
imgs_gpu = imgs.cuda()
feat = resnet3d_34_caffe(imgs_gpu)
assert feat.shape == torch.Size([1, 512, 1, 2, 2])
else:
feat = resnet3d_34_caffe(imgs)
assert feat.shape == torch.Size([1, 512, 1, 2, 2])
# resnet3d with depth with 3x3x3 inflate_style inference
resnet3d_50_1x1x1 = ResNet3d(
50, None, pretrained2d=False, inflate_style='3x3x3')
resnet3d_50_1x1x1.init_weights()
resnet3d_50_1x1x1.train()
# parrots 3dconv is only implemented on gpu
if torch.__version__ == 'parrots':
if torch.cuda.is_available():
resnet3d_50_1x1x1 = resnet3d_50_1x1x1.cuda()
imgs_gpu = imgs.cuda()
feat = resnet3d_50_1x1x1(imgs_gpu)
assert feat.shape == torch.Size([1, 2048, 1, 2, 2])
else:
feat = resnet3d_50_1x1x1(imgs)
assert feat.shape == torch.Size([1, 2048, 1, 2, 2])
resnet3d_34_1x1x1 = ResNet3d(
34, None, pretrained2d=False, inflate_style='3x3x3')
resnet3d_34_1x1x1.init_weights()
resnet3d_34_1x1x1.train()
# parrots 3dconv is only implemented on gpu
if torch.__version__ == 'parrots':
if torch.cuda.is_available():
resnet3d_34_1x1x1 = resnet3d_34_1x1x1.cuda()
imgs_gpu = imgs.cuda()
feat = resnet3d_34_1x1x1(imgs_gpu)
assert feat.shape == torch.Size([1, 512, 1, 2, 2])
else:
feat = resnet3d_34_1x1x1(imgs)
assert feat.shape == torch.Size([1, 512, 1, 2, 2])
# resnet3d with non-local module
non_local_cfg = dict(
sub_sample=True,
use_scale=False,
norm_cfg=dict(type='BN3d', requires_grad=True),
mode='embedded_gaussian')
non_local = ((0, 0, 0), (1, 0, 1, 0), (1, 0, 1, 0, 1, 0), (0, 0, 0))
resnet3d_nonlocal = ResNet3d(
50,
None,
pretrained2d=False,
non_local=non_local,
non_local_cfg=non_local_cfg)
resnet3d_nonlocal.init_weights()
for layer_name in ['layer2', 'layer3']:
layer = getattr(resnet3d_nonlocal, layer_name)
for i, _ in enumerate(layer):
if i % 2 == 0:
assert hasattr(layer[i], 'non_local_block')
feat = resnet3d_nonlocal(imgs)
assert feat.shape == torch.Size([1, 2048, 1, 2, 2])
def test_resnet2plus1d_backbone():
# Test r2+1d backbone
with pytest.raises(AssertionError):
# r2+1d does not support inflation
ResNet2Plus1d(50, None, pretrained2d=True)
with pytest.raises(AssertionError):
# r2+1d requires conv(2+1)d module
ResNet2Plus1d(
50, None, pretrained2d=False, conv_cfg=dict(type='Conv3d'))
frozen_stages = 1
r2plus1d_34_frozen = ResNet2Plus1d(
34,
None,
conv_cfg=dict(type='Conv2plus1d'),
pretrained2d=False,
frozen_stages=frozen_stages,
conv1_kernel=(3, 7, 7),
conv1_stride_t=1,
pool1_stride_t=1,
inflate=(1, 1, 1, 1),
spatial_strides=(1, 2, 2, 2),
temporal_strides=(1, 2, 2, 2))
r2plus1d_34_frozen.init_weights()
r2plus1d_34_frozen.train()
assert r2plus1d_34_frozen.conv1.conv.bn_s.training is False
assert r2plus1d_34_frozen.conv1.bn.training is False
for param in r2plus1d_34_frozen.conv1.parameters():
assert param.requires_grad is False
for i in range(1, frozen_stages + 1):
layer = getattr(r2plus1d_34_frozen, f'layer{i}')
for mod in layer.modules():
if isinstance(mod, _BatchNorm):
assert mod.training is False
for param in layer.parameters():
assert param.requires_grad is False
input_shape = (1, 3, 8, 64, 64)
imgs = _demo_inputs(input_shape)
# parrots 3dconv is only implemented on gpu
if torch.__version__ == 'parrots':
if torch.cuda.is_available():
r2plus1d_34_frozen = r2plus1d_34_frozen.cuda()
imgs_gpu = imgs.cuda()
feat = r2plus1d_34_frozen(imgs_gpu)
assert feat.shape == torch.Size([1, 512, 1, 2, 2])
else:
feat = r2plus1d_34_frozen(imgs)
assert feat.shape == torch.Size([1, 512, 1, 2, 2])
r2plus1d_50_frozen = ResNet2Plus1d(
50,
None,
conv_cfg=dict(type='Conv2plus1d'),
pretrained2d=False,
conv1_kernel=(3, 7, 7),
conv1_stride_t=1,
pool1_stride_t=1,
inflate=(1, 1, 1, 1),
spatial_strides=(1, 2, 2, 2),
temporal_strides=(1, 2, 2, 2),
frozen_stages=frozen_stages)
r2plus1d_50_frozen.init_weights()
r2plus1d_50_frozen.train()
assert r2plus1d_50_frozen.conv1.conv.bn_s.training is False
assert r2plus1d_50_frozen.conv1.bn.training is False
for param in r2plus1d_50_frozen.conv1.parameters():
assert param.requires_grad is False
for i in range(1, frozen_stages + 1):
layer = getattr(r2plus1d_50_frozen, f'layer{i}')
for mod in layer.modules():
if isinstance(mod, _BatchNorm):
assert mod.training is False
for param in layer.parameters():
assert param.requires_grad is False
input_shape = (1, 3, 8, 64, 64)
imgs = _demo_inputs(input_shape)
# parrots 3dconv is only implemented on gpu
if torch.__version__ == 'parrots':
if torch.cuda.is_available():
r2plus1d_50_frozen = r2plus1d_50_frozen.cuda()
imgs_gpu = imgs.cuda()
feat = r2plus1d_50_frozen(imgs_gpu)
assert feat.shape == torch.Size([1, 2048, 1, 2, 2])
else:
feat = r2plus1d_50_frozen(imgs)
assert feat.shape == torch.Size([1, 2048, 1, 2, 2])
def test_resnet_tsm_backbone():
"""Test resnet_tsm backbone."""
with pytest.raises(NotImplementedError):
# shift_place must be block or blockres
resnet_tsm_50_block = ResNetTSM(50, shift_place='Block')
resnet_tsm_50_block.init_weights()
from mmaction.models.backbones.resnet import Bottleneck
from mmaction.models.backbones.resnet_tsm import TemporalShift
input_shape = (8, 3, 64, 64)
imgs = _demo_inputs(input_shape)
# resnet_tsm with depth 50
resnet_tsm_50 = ResNetTSM(50)
resnet_tsm_50.init_weights()
for layer_name in resnet_tsm_50.res_layers:
layer = getattr(resnet_tsm_50, layer_name)
blocks = list(layer.children())
for block in blocks:
assert isinstance(block.conv1.conv, TemporalShift)
assert block.conv1.conv.num_segments == resnet_tsm_50.num_segments
assert block.conv1.conv.shift_div == resnet_tsm_50.shift_div
assert isinstance(block.conv1.conv.net, nn.Conv2d)
# resnet_tsm with depth 50, no pretrained, shift_place is block
resnet_tsm_50_block = ResNetTSM(50, shift_place='block')
resnet_tsm_50_block.init_weights()
for layer_name in resnet_tsm_50_block.res_layers:
layer = getattr(resnet_tsm_50_block, layer_name)
blocks = list(layer.children())
for block in blocks:
assert isinstance(block, TemporalShift)
assert block.num_segments == resnet_tsm_50_block.num_segments
assert block.num_segments == resnet_tsm_50_block.num_segments
assert block.shift_div == resnet_tsm_50_block.shift_div
assert isinstance(block.net, Bottleneck)
# resnet_tsm with depth 50, no pretrained, use temporal_pool
resnet_tsm_50_temporal_pool = ResNetTSM(50, temporal_pool=True)
resnet_tsm_50_temporal_pool.init_weights()
for layer_name in resnet_tsm_50_temporal_pool.res_layers:
layer = getattr(resnet_tsm_50_temporal_pool, layer_name)
blocks = list(layer.children())
if layer_name == 'layer2':
assert len(blocks) == 2
assert isinstance(blocks[1], nn.MaxPool3d)
blocks = copy.deepcopy(blocks[0])
for block in blocks:
assert isinstance(block.conv1.conv, TemporalShift)
if layer_name == 'layer1':
assert block.conv1.conv.num_segments == \
resnet_tsm_50_temporal_pool.num_segments
else:
assert block.conv1.conv.num_segments == \
resnet_tsm_50_temporal_pool.num_segments // 2
assert block.conv1.conv.shift_div == resnet_tsm_50_temporal_pool.shift_div # noqa: E501
assert isinstance(block.conv1.conv.net, nn.Conv2d)
# resnet_tsm with non-local module
non_local_cfg = dict(
sub_sample=True,
use_scale=False,
norm_cfg=dict(type='BN3d', requires_grad=True),
mode='embedded_gaussian')
non_local = ((0, 0, 0), (1, 0, 1, 0), (1, 0, 1, 0, 1, 0), (0, 0, 0))
resnet_tsm_nonlocal = ResNetTSM(
50, non_local=non_local, non_local_cfg=non_local_cfg)
resnet_tsm_nonlocal.init_weights()
for layer_name in ['layer2', 'layer3']:
layer = getattr(resnet_tsm_nonlocal, layer_name)
for i, _ in enumerate(layer):
if i % 2 == 0:
assert isinstance(layer[i], NL3DWrapper)
resnet_tsm_50_full = ResNetTSM(
50,
non_local=non_local,
non_local_cfg=non_local_cfg,
temporal_pool=True)
resnet_tsm_50_full.init_weights()
# TSM forword
feat = resnet_tsm_50(imgs)
assert feat.shape == torch.Size([8, 2048, 2, 2])
# TSM with non-local forward
feat = resnet_tsm_nonlocal(imgs)
assert feat.shape == torch.Size([8, 2048, 2, 2])
# TSM with temporal pool forward
feat = resnet_tsm_50_temporal_pool(imgs)
assert feat.shape == torch.Size([4, 2048, 2, 2])
# TSM with temporal pool + non-local forward
input_shape = (16, 3, 32, 32)
imgs = _demo_inputs(input_shape)
feat = resnet_tsm_50_full(imgs)
assert feat.shape == torch.Size([8, 2048, 1, 1])
def test_slowfast_backbone():
"""Test SlowFast backbone."""
with pytest.raises(TypeError):
# cfg should be a dict
ResNet3dSlowFast(None, slow_pathway=list(['foo', 'bar']))
with pytest.raises(TypeError):
# pretrained should be a str
sf_50 = ResNet3dSlowFast(dict(foo='bar'))
sf_50.init_weights()
with pytest.raises(KeyError):
# pathway type should be implemented
ResNet3dSlowFast(None, slow_pathway=dict(type='resnext'))
# test slowfast with slow inflated
sf_50_inflate = ResNet3dSlowFast(
None,
slow_pathway=dict(
type='resnet3d',
depth=50,
pretrained='torchvision://resnet50',
pretrained2d=True,
lateral=True,
conv1_kernel=(1, 7, 7),
dilations=(1, 1, 1, 1),
conv1_stride_t=1,
pool1_stride_t=1,
inflate=(0, 0, 1, 1)))
sf_50_inflate.init_weights()
sf_50_inflate.train()
# test slowfast with no lateral connection
sf_50_wo_lateral = ResNet3dSlowFast(
None,
slow_pathway=dict(
type='resnet3d',
depth=50,
pretrained=None,
lateral=False,
conv1_kernel=(1, 7, 7),
dilations=(1, 1, 1, 1),
conv1_stride_t=1,
pool1_stride_t=1,
inflate=(0, 0, 1, 1)))
sf_50_wo_lateral.init_weights()
sf_50_wo_lateral.train()
# slowfast w/o lateral connection inference test
input_shape = (1, 3, 8, 64, 64)
imgs = _demo_inputs(input_shape)
# parrots 3dconv is only implemented on gpu
if torch.__version__ == 'parrots':
if torch.cuda.is_available():
sf_50_wo_lateral = sf_50_wo_lateral.cuda()
imgs_gpu = imgs.cuda()
feat = sf_50_wo_lateral(imgs_gpu)
else:
feat = sf_50_wo_lateral(imgs)
assert isinstance(feat, tuple)
assert feat[0].shape == torch.Size([1, 2048, 1, 2, 2])
assert feat[1].shape == torch.Size([1, 256, 8, 2, 2])
# test slowfast with frozen stages config
frozen_slow = 3
sf_50 = ResNet3dSlowFast(
None,
slow_pathway=dict(
type='resnet3d',
depth=50,
pretrained=None,
pretrained2d=True,
lateral=True,
conv1_kernel=(1, 7, 7),
dilations=(1, 1, 1, 1),
conv1_stride_t=1,
pool1_stride_t=1,
inflate=(0, 0, 1, 1),
frozen_stages=frozen_slow))
sf_50.init_weights()
sf_50.train()
for stage in range(1, sf_50.slow_path.num_stages):
lateral_name = sf_50.slow_path.lateral_connections[stage - 1]
conv_lateral = getattr(sf_50.slow_path, lateral_name)
for mod in conv_lateral.modules():
if isinstance(mod, _BatchNorm):
if stage <= frozen_slow:
assert mod.training is False
else:
assert mod.training is True
for param in conv_lateral.parameters():
if stage <= frozen_slow:
assert param.requires_grad is False
else:
assert param.requires_grad is True
# test slowfast with normal config
sf_50 = ResNet3dSlowFast(None)
sf_50.init_weights()
sf_50.train()
# slowfast inference test
input_shape = (1, 3, 8, 64, 64)
imgs = _demo_inputs(input_shape)
# parrots 3dconv is only implemented on gpu
if torch.__version__ == 'parrots':
if torch.cuda.is_available():
sf_50 = sf_50.cuda()
imgs_gpu = imgs.cuda()
feat = sf_50(imgs_gpu)
else:
feat = sf_50(imgs)
assert isinstance(feat, tuple)
assert feat[0].shape == torch.Size([1, 2048, 1, 2, 2])
assert feat[1].shape == torch.Size([1, 256, 8, 2, 2])
def test_slowonly_backbone():
"""Test SlowOnly backbone."""
with pytest.raises(AssertionError):
# SlowOnly should contain no lateral connection
ResNet3dSlowOnly(50, None, lateral=True)
# test SlowOnly with normal config
so_50 = ResNet3dSlowOnly(50, None)
so_50.init_weights()
so_50.train()
# SlowOnly inference test
input_shape = (1, 3, 8, 64, 64)
imgs = _demo_inputs(input_shape)
# parrots 3dconv is only implemented on gpu
if torch.__version__ == 'parrots':
if torch.cuda.is_available():
so_50 = so_50.cuda()
imgs_gpu = imgs.cuda()
feat = so_50(imgs_gpu)
else:
feat = so_50(imgs)
assert feat.shape == torch.Size([1, 2048, 8, 2, 2])
def test_resnet_csn_backbone():
"""Test resnet_csn backbone."""
with pytest.raises(ValueError):
# Bottleneck mode must be "ip" or "ir"
ResNet3dCSN(152, None, bottleneck_mode='id')
input_shape = (2, 3, 6, 64, 64)
imgs = _demo_inputs(input_shape)
resnet3d_csn_frozen = ResNet3dCSN(
152, None, bn_frozen=True, norm_eval=True)
resnet3d_csn_frozen.train()
for m in resnet3d_csn_frozen.modules():
if isinstance(m, _BatchNorm):
for param in m.parameters():
assert param.requires_grad is False
# Interaction-preserved channel-separated bottleneck block
resnet3d_csn_ip = ResNet3dCSN(152, None, bottleneck_mode='ip')
resnet3d_csn_ip.init_weights()
resnet3d_csn_ip.train()
for i, layer_name in enumerate(resnet3d_csn_ip.res_layers):
layers = getattr(resnet3d_csn_ip, layer_name)
num_blocks = resnet3d_csn_ip.stage_blocks[i]
assert len(layers) == num_blocks
for layer in layers:
assert isinstance(layer.conv2, nn.Sequential)
assert len(layer.conv2) == 2
assert layer.conv2[1].groups == layer.planes
if torch.__version__ == 'parrots':
if torch.cuda.is_available():
resnet3d_csn_ip = resnet3d_csn_ip.cuda()
imgs_gpu = imgs.cuda()
feat = resnet3d_csn_ip(imgs_gpu)
assert feat.shape == torch.Size([2, 2048, 1, 2, 2])
else:
feat = resnet3d_csn_ip(imgs)
assert feat.shape == torch.Size([2, 2048, 1, 2, 2])
# Interaction-reduced channel-separated bottleneck block
resnet3d_csn_ir = ResNet3dCSN(152, None, bottleneck_mode='ir')
resnet3d_csn_ir.init_weights()
resnet3d_csn_ir.train()
for i, layer_name in enumerate(resnet3d_csn_ir.res_layers):
layers = getattr(resnet3d_csn_ir, layer_name)
num_blocks = resnet3d_csn_ir.stage_blocks[i]
assert len(layers) == num_blocks
for layer in layers:
assert isinstance(layer.conv2, nn.Sequential)
assert len(layer.conv2) == 1
assert layer.conv2[0].groups == layer.planes
if torch.__version__ == 'parrots':
if torch.cuda.is_available():
resnet3d_csn_ir = resnet3d_csn_ir.cuda()
imgs_gpu = imgs.cuda()
feat = resnet3d_csn_ir(imgs_gpu)
assert feat.shape == torch.Size([2, 2048, 1, 2, 2])
else:
feat = resnet3d_csn_ir(imgs)
assert feat.shape == torch.Size([2, 2048, 1, 2, 2])
# Set training status = False
resnet3d_csn_ip = ResNet3dCSN(152, None, bottleneck_mode='ip')
resnet3d_csn_ip.init_weights()
resnet3d_csn_ip.train(False)
for module in resnet3d_csn_ip.children():
assert module.training is False
def test_c3d_backbone():
"""Test c3d backbone."""
input_shape = (1, 3, 16, 112, 112)
imgs = _demo_inputs(input_shape)
# c3d inference test
c3d = C3D()
c3d.init_weights()
c3d.train()
feat = c3d(imgs)
assert feat.shape == torch.Size([1, 4096])
# c3d with bn inference test
c3d_bn = C3D(norm_cfg=dict(type='BN3d'))
c3d_bn.init_weights()
c3d_bn.train()
feat = c3d_bn(imgs)
assert feat.shape == torch.Size([1, 4096])
def test_resnet_audio_backbone():
"""Test ResNetAudio backbone."""
input_shape = (1, 1, 16, 16)
spec = _demo_inputs(input_shape)
# inference
audioonly = ResNetAudio(50, None)
audioonly.init_weights()
audioonly.train()
feat = audioonly(spec)
assert feat.shape == torch.Size([1, 1024, 2, 2])
@pytest.mark.skipif(
not torch.cuda.is_available(), reason='requires CUDA support')
def test_resnet_tin_backbone():
"""Test resnet_tin backbone."""
with pytest.raises(AssertionError):
# num_segments should be positive
resnet_tin = ResNetTIN(50, num_segments=-1)
resnet_tin.init_weights()
from mmaction.models.backbones.resnet_tin import (CombineNet,
TemporalInterlace)
# resnet_tin with normal config
resnet_tin = ResNetTIN(50)
resnet_tin.init_weights()
for layer_name in resnet_tin.res_layers:
layer = getattr(resnet_tin, layer_name)
blocks = list(layer.children())
for block in blocks:
assert isinstance(block.conv1.conv, CombineNet)
assert isinstance(block.conv1.conv.net1, TemporalInterlace)
assert (
block.conv1.conv.net1.num_segments == resnet_tin.num_segments)
assert block.conv1.conv.net1.shift_div == resnet_tin.shift_div
# resnet_tin with partial batchnorm
resnet_tin_pbn = ResNetTIN(50, partial_bn=True)
resnet_tin_pbn.train()
count_bn = 0
for m in resnet_tin_pbn.modules():
if isinstance(m, nn.BatchNorm2d):
count_bn += 1
if count_bn >= 2:
assert m.training is False
assert m.weight.requires_grad is False
assert m.bias.requires_grad is False
else:
assert m.training is True
assert m.weight.requires_grad is True
assert m.bias.requires_grad is True
input_shape = (8, 3, 64, 64)
imgs = _demo_inputs(input_shape).cuda()
resnet_tin = resnet_tin.cuda()
# resnet_tin with normal cfg inference
feat = resnet_tin(imgs)
assert feat.shape == torch.Size([8, 2048, 2, 2])
def _demo_inputs(input_shape=(1, 3, 64, 64)):
"""Create a superset of inputs needed to run backbone.
Args:
input_shape (tuple): input batch dimensions.
Default: (1, 3, 64, 64).
"""
imgs = np.random.random(input_shape)
imgs = torch.FloatTensor(imgs)
return imgs
| InternVideo-main | Downstream/Open-Set-Action-Recognition/tests/test_models/test_backbone.py |
import mmcv
import pytest
import torch
import torch.nn as nn
from mmaction.apis import inference_recognizer, init_recognizer
video_config_file = 'configs/recognition/tsn/tsn_r50_video_inference_1x1x3_100e_kinetics400_rgb.py' # noqa: E501
frame_config_file = 'configs/recognition/tsn/tsn_r50_inference_1x1x3_100e_kinetics400_rgb.py' # noqa: E501
label_path = 'demo/label_map.txt'
video_path = 'demo/demo.mp4'
def test_init_recognizer():
with pytest.raises(TypeError):
# config must be a filename or Config object
init_recognizer(dict(config_file=None))
with pytest.raises(RuntimeError):
# input data type should be consist with the dataset type
init_recognizer(frame_config_file)
with pytest.raises(RuntimeError):
# input data type should be consist with the dataset type
init_recognizer(video_config_file, use_frames=True)
if torch.cuda.is_available():
device = 'cuda:0'
else:
device = 'cpu'
model = init_recognizer(video_config_file, None, device)
config = mmcv.Config.fromfile(video_config_file)
config.model.backbone.pretrained = None
isinstance(model, nn.Module)
if torch.cuda.is_available():
assert next(model.parameters()).is_cuda is True
else:
assert next(model.parameters()).is_cuda is False
assert model.cfg.model.backbone.pretrained is None
def test_inference_recognizer():
if torch.cuda.is_available():
device = 'cuda:0'
else:
device = 'cpu'
model = init_recognizer(video_config_file, None, device)
with pytest.raises(RuntimeError):
# video path doesn't exist
inference_recognizer(model, 'missing.mp4', label_path)
with pytest.raises(RuntimeError):
# ``video_path`` should be consist with the ``use_frames``
inference_recognizer(model, video_path, label_path, use_frames=True)
with pytest.raises(RuntimeError):
# ``video_path`` should be consist with the ``use_frames``
inference_recognizer(model, 'demo/', label_path)
for ops in model.cfg.data.test.pipeline:
if ops['type'] in ('TenCrop', 'ThreeCrop'):
# Use CenterCrop to reduce memory in order to pass CI
ops['type'] = 'CenterCrop'
top5_label = inference_recognizer(model, video_path, label_path)
scores = [item[1] for item in top5_label]
assert len(top5_label) == 5
assert scores == sorted(scores, reverse=True)
| InternVideo-main | Downstream/Open-Set-Action-Recognition/tests/test_models/test_inference.py |
import os.path as osp
import mmcv
import numpy as np
import pytest
import torch
import torch.nn.functional as F
from mmaction.models import BaseRecognizer, build_recognizer
class ExampleRecognizer(BaseRecognizer):
def __init__(self, train_cfg, test_cfg):
super(BaseRecognizer, self).__init__()
# reconstruct `__init__()` method in BaseRecognizer to avoid building
# backbone and head which are useless to ExampleRecognizer,
# since ExampleRecognizer is only used for model-unrelated methods
# (like `average_clip`) testing.
self.train_cfg = train_cfg
self.test_cfg = test_cfg
def forward_train(self, imgs, labels):
pass
def forward_test(self, imgs):
pass
def forward_gradcam(self, imgs):
pass
def _get_recognizer_cfg(fname):
"""Grab configs necessary to create a recognizer.
These are deep copied to allow for safe modification of parameters without
influencing other tests.
"""
repo_dpath = osp.dirname(osp.dirname(osp.dirname(__file__)))
config_dpath = osp.join(repo_dpath, 'configs/recognition')
config_fpath = osp.join(config_dpath, fname)
if not osp.exists(config_dpath):
raise Exception('Cannot find config path')
config = mmcv.Config.fromfile(config_fpath)
return config.model, config.train_cfg, config.test_cfg
def _get_audio_recognizer_cfg(fname):
"""Grab configs necessary to create a audio recognizer.
These are deep copied to allow for safe modification of parameters without
influencing other tests.
"""
repo_dpath = osp.dirname(osp.dirname(osp.dirname(__file__)))
config_dpath = osp.join(repo_dpath, 'configs/recognition_audio/')
config_fpath = osp.join(config_dpath, fname)
if not osp.exists(config_dpath):
raise Exception('Cannot find config path')
config = mmcv.Config.fromfile(config_fpath)
return config.model, config.train_cfg, config.test_cfg
def test_base_recognizer():
cls_score = torch.rand(5, 400)
with pytest.raises(KeyError):
# "average_clips" must defined in test_cfg keys
wrong_test_cfg = dict(clip='score')
recognizer = ExampleRecognizer(None, wrong_test_cfg)
recognizer.average_clip(cls_score)
with pytest.raises(ValueError):
# unsupported average clips type
wrong_test_cfg = dict(average_clips='softmax')
recognizer = ExampleRecognizer(None, wrong_test_cfg)
recognizer.average_clip(cls_score)
with pytest.raises(ValueError):
# Label should not be None
recognizer = ExampleRecognizer(None, None)
recognizer(torch.tensor(0))
# average_clips='score'
test_cfg = dict(average_clips='score')
recognizer = ExampleRecognizer(None, test_cfg)
score = recognizer.average_clip(cls_score, num_segs=5)
assert torch.equal(score, cls_score.mean(dim=0, keepdim=True))
# average_clips='prob'
test_cfg = dict(average_clips='prob')
recognizer = ExampleRecognizer(None, test_cfg)
score = recognizer.average_clip(cls_score, num_segs=5)
assert torch.equal(score,
F.softmax(cls_score, dim=1).mean(dim=0, keepdim=True))
def test_tsn():
model, train_cfg, test_cfg = _get_recognizer_cfg(
'tsn/tsn_r50_1x1x3_100e_kinetics400_rgb.py')
model['backbone']['pretrained'] = None
recognizer = build_recognizer(
model, train_cfg=train_cfg, test_cfg=test_cfg)
input_shape = (1, 3, 3, 32, 32)
demo_inputs = generate_demo_inputs(input_shape)
imgs = demo_inputs['imgs']
gt_labels = demo_inputs['gt_labels']
losses = recognizer(imgs, gt_labels)
assert isinstance(losses, dict)
# Test forward test
with torch.no_grad():
img_list = [img[None, :] for img in imgs]
for one_img in img_list:
recognizer(one_img, None, return_loss=False)
# Test forward gradcam
recognizer(imgs, gradcam=True)
for one_img in img_list:
recognizer(one_img, gradcam=True)
def test_i3d():
model, train_cfg, test_cfg = _get_recognizer_cfg(
'i3d/i3d_r50_32x2x1_100e_kinetics400_rgb.py')
model['backbone']['pretrained2d'] = False
model['backbone']['pretrained'] = None
recognizer = build_recognizer(
model, train_cfg=train_cfg, test_cfg=test_cfg)
input_shape = (1, 3, 3, 8, 32, 32)
demo_inputs = generate_demo_inputs(input_shape, '3D')
imgs = demo_inputs['imgs']
gt_labels = demo_inputs['gt_labels']
# parrots 3dconv is only implemented on gpu
if torch.__version__ == 'parrots':
if torch.cuda.is_available():
recognizer = recognizer.cuda()
imgs = imgs.cuda()
gt_labels = gt_labels.cuda()
losses = recognizer(imgs, gt_labels)
assert isinstance(losses, dict)
# Test forward test
with torch.no_grad():
img_list = [img[None, :] for img in imgs]
for one_img in img_list:
recognizer(one_img, None, return_loss=False)
# Test forward gradcam
recognizer(imgs, gradcam=True)
for one_img in img_list:
recognizer(one_img, gradcam=True)
else:
losses = recognizer(imgs, gt_labels)
assert isinstance(losses, dict)
# Test forward test
with torch.no_grad():
img_list = [img[None, :] for img in imgs]
for one_img in img_list:
recognizer(one_img, None, return_loss=False)
# Test forward gradcam
recognizer(imgs, gradcam=True)
for one_img in img_list:
recognizer(one_img, gradcam=True)
def test_r2plus1d():
model, train_cfg, test_cfg = _get_recognizer_cfg(
'r2plus1d/r2plus1d_r34_8x8x1_180e_kinetics400_rgb.py')
model['backbone']['pretrained2d'] = False
model['backbone']['pretrained'] = None
model['backbone']['norm_cfg'] = dict(type='BN3d')
recognizer = build_recognizer(
model, train_cfg=train_cfg, test_cfg=test_cfg)
input_shape = (1, 3, 3, 8, 32, 32)
demo_inputs = generate_demo_inputs(input_shape, '3D')
imgs = demo_inputs['imgs']
gt_labels = demo_inputs['gt_labels']
# parrots 3dconv is only implemented on gpu
if torch.__version__ == 'parrots':
if torch.cuda.is_available():
recognizer = recognizer.cuda()
imgs = imgs.cuda()
gt_labels = gt_labels.cuda()
losses = recognizer(imgs, gt_labels)
assert isinstance(losses, dict)
# Test forward test
with torch.no_grad():
img_list = [img[None, :] for img in imgs]
for one_img in img_list:
recognizer(one_img, None, return_loss=False)
# Test forward gradcam
recognizer(imgs, gradcam=True)
for one_img in img_list:
recognizer(one_img, gradcam=True)
else:
losses = recognizer(imgs, gt_labels)
assert isinstance(losses, dict)
# Test forward test
with torch.no_grad():
img_list = [img[None, :] for img in imgs]
for one_img in img_list:
recognizer(one_img, None, return_loss=False)
# Test forward gradcam
recognizer(imgs, gradcam=True)
for one_img in img_list:
recognizer(one_img, gradcam=True)
def test_slowfast():
model, train_cfg, test_cfg = _get_recognizer_cfg(
'slowfast/slowfast_r50_4x16x1_256e_kinetics400_rgb.py')
recognizer = build_recognizer(
model, train_cfg=train_cfg, test_cfg=test_cfg)
input_shape = (1, 3, 3, 8, 32, 32)
demo_inputs = generate_demo_inputs(input_shape, '3D')
imgs = demo_inputs['imgs']
gt_labels = demo_inputs['gt_labels']
# parrots 3dconv is only implemented on gpu
if torch.__version__ == 'parrots':
if torch.cuda.is_available():
recognizer = recognizer.cuda()
imgs = imgs.cuda()
gt_labels = gt_labels.cuda()
losses = recognizer(imgs, gt_labels)
assert isinstance(losses, dict)
# Test forward test
with torch.no_grad():
img_list = [img[None, :] for img in imgs]
for one_img in img_list:
recognizer(one_img, None, return_loss=False)
# Test forward gradcam
recognizer(imgs, gradcam=True)
for one_img in img_list:
recognizer(one_img, gradcam=True)
else:
losses = recognizer(imgs, gt_labels)
assert isinstance(losses, dict)
# Test forward test
with torch.no_grad():
img_list = [img[None, :] for img in imgs]
for one_img in img_list:
recognizer(one_img, None, return_loss=False)
# Test forward gradcam
recognizer(imgs, gradcam=True)
for one_img in img_list:
recognizer(one_img, gradcam=True)
def test_tsm():
model, train_cfg, test_cfg = _get_recognizer_cfg(
'tsm/tsm_r50_1x1x8_50e_kinetics400_rgb.py')
model['backbone']['pretrained'] = None
recognizer = build_recognizer(
model, train_cfg=train_cfg, test_cfg=test_cfg)
input_shape = (1, 8, 3, 32, 32)
demo_inputs = generate_demo_inputs(input_shape)
imgs = demo_inputs['imgs']
gt_labels = demo_inputs['gt_labels']
losses = recognizer(imgs, gt_labels)
assert isinstance(losses, dict)
# Test forward test
with torch.no_grad():
img_list = [img[None, :] for img in imgs]
for one_img in img_list:
recognizer(one_img, None, return_loss=False)
# test twice sample + 3 crops
input_shape = (2, 48, 3, 32, 32)
demo_inputs = generate_demo_inputs(input_shape)
imgs = demo_inputs['imgs']
test_cfg = dict(average_clips='prob')
recognizer = build_recognizer(
model, train_cfg=train_cfg, test_cfg=test_cfg)
# Test forward test
with torch.no_grad():
img_list = [img[None, :] for img in imgs]
for one_img in img_list:
recognizer(one_img, None, return_loss=False)
# Test forward gradcam
recognizer(imgs, gradcam=True)
for one_img in img_list:
recognizer(one_img, gradcam=True)
def test_csn():
model, train_cfg, test_cfg = _get_recognizer_cfg(
'csn/ircsn_ig65m_pretrained_r152_32x2x1_58e_kinetics400_rgb.py')
model['backbone']['pretrained2d'] = False
model['backbone']['pretrained'] = None
recognizer = build_recognizer(
model, train_cfg=train_cfg, test_cfg=test_cfg)
input_shape = (1, 3, 3, 8, 32, 32)
demo_inputs = generate_demo_inputs(input_shape, '3D')
imgs = demo_inputs['imgs']
gt_labels = demo_inputs['gt_labels']
# parrots 3dconv is only implemented on gpu
if torch.__version__ == 'parrots':
if torch.cuda.is_available():
recognizer = recognizer.cuda()
imgs = imgs.cuda()
gt_labels = gt_labels.cuda()
losses = recognizer(imgs, gt_labels)
assert isinstance(losses, dict)
# Test forward test
with torch.no_grad():
img_list = [img[None, :] for img in imgs]
for one_img in img_list:
recognizer(one_img, None, return_loss=False)
# Test forward gradcam
recognizer(imgs, gradcam=True)
for one_img in img_list:
recognizer(one_img, gradcam=True)
else:
losses = recognizer(imgs, gt_labels)
assert isinstance(losses, dict)
# Test forward test
with torch.no_grad():
img_list = [img[None, :] for img in imgs]
for one_img in img_list:
recognizer(one_img, None, return_loss=False)
# Test forward gradcam
recognizer(imgs, gradcam=True)
for one_img in img_list:
recognizer(one_img, gradcam=True)
def test_tpn():
model, train_cfg, test_cfg = _get_recognizer_cfg(
'tpn/tpn_tsm_r50_1x1x8_150e_sthv1_rgb.py')
model['backbone']['pretrained'] = None
recognizer = build_recognizer(
model, train_cfg=train_cfg, test_cfg=test_cfg)
input_shape = (1, 8, 3, 224, 224)
demo_inputs = generate_demo_inputs(input_shape)
imgs = demo_inputs['imgs']
gt_labels = demo_inputs['gt_labels']
losses = recognizer(imgs, gt_labels)
assert isinstance(losses, dict)
# Test forward test
with torch.no_grad():
img_list = [img[None, :] for img in imgs]
for one_img in img_list:
recognizer(one_img, None, return_loss=False)
# Test forward gradcam
recognizer(imgs, gradcam=True)
for one_img in img_list:
recognizer(one_img, gradcam=True)
model, train_cfg, test_cfg = _get_recognizer_cfg(
'tpn/tpn_slowonly_r50_8x8x1_150e_kinetics_rgb.py')
model['backbone']['pretrained'] = None
recognizer = build_recognizer(
model, train_cfg=train_cfg, test_cfg=test_cfg)
input_shape = (1, 8, 3, 1, 224, 224)
demo_inputs = generate_demo_inputs(input_shape, '3D')
imgs = demo_inputs['imgs']
gt_labels = demo_inputs['gt_labels']
losses = recognizer(imgs, gt_labels)
assert isinstance(losses, dict)
# Test forward test
with torch.no_grad():
img_list = [img[None, :] for img in imgs]
for one_img in img_list:
recognizer(one_img, None, return_loss=False)
# Test forward gradcam
recognizer(imgs, gradcam=True)
for one_img in img_list:
recognizer(one_img, gradcam=True)
def test_audio_recognizer():
model, train_cfg, test_cfg = _get_audio_recognizer_cfg(
'resnet/tsn_r18_64x1x1_100e_kinetics400_audio_feature.py')
model['backbone']['pretrained'] = None
recognizer = build_recognizer(
model, train_cfg=train_cfg, test_cfg=test_cfg)
input_shape = (1, 3, 1, 128, 80)
demo_inputs = generate_demo_inputs(input_shape, model_type='audio')
audios = demo_inputs['imgs']
gt_labels = demo_inputs['gt_labels']
losses = recognizer(audios, gt_labels)
assert isinstance(losses, dict)
# Test forward test
with torch.no_grad():
audio_list = [audio[None, :] for audio in audios]
for one_spectro in audio_list:
recognizer(one_spectro, None, return_loss=False)
def test_c3d():
model, train_cfg, test_cfg = _get_recognizer_cfg(
'c3d/c3d_sports1m_16x1x1_45e_ucf101_rgb.py')
model['backbone']['pretrained'] = None
recognizer = build_recognizer(
model, train_cfg=train_cfg, test_cfg=test_cfg)
input_shape = (1, 3, 3, 16, 112, 112)
demo_inputs = generate_demo_inputs(input_shape, '3D')
imgs = demo_inputs['imgs']
gt_labels = demo_inputs['gt_labels']
losses = recognizer(imgs, gt_labels)
assert isinstance(losses, dict)
# Test forward test
with torch.no_grad():
img_list = [img[None, :] for img in imgs]
for one_img in img_list:
recognizer(one_img, None, return_loss=False)
# Test forward gradcam
recognizer(imgs, gradcam=True)
for one_img in img_list:
recognizer(one_img, gradcam=True)
def generate_demo_inputs(input_shape=(1, 3, 3, 224, 224), model_type='2D'):
"""Create a superset of inputs needed to run test or train batches.
Args:
input_shape (tuple): input batch dimensions.
Default: (1, 250, 3, 224, 224).
model_type (str): Model type for data generation, from {'2D', '3D'}.
Default:'2D'
"""
if len(input_shape) == 5:
(N, L, C, H, W) = input_shape
elif len(input_shape) == 6:
(N, M, C, L, H, W) = input_shape
imgs = np.random.random(input_shape)
if model_type == '2D':
gt_labels = torch.LongTensor([2] * N)
elif model_type == '3D':
gt_labels = torch.LongTensor([2] * M)
elif model_type == 'audio':
gt_labels = torch.LongTensor([2] * L)
else:
raise ValueError(f'Data type {model_type} is not available')
inputs = {'imgs': torch.FloatTensor(imgs), 'gt_labels': gt_labels}
return inputs
| InternVideo-main | Downstream/Open-Set-Action-Recognition/tests/test_models/test_recognizers.py |
import pytest
import torch
from mmaction.models import Conv2plus1d, ConvAudio
def test_conv2plus1d():
with pytest.raises(AssertionError):
# Length of kernel size, stride and padding must be the same
Conv2plus1d(3, 8, (2, 2))
conv_2plus1d = Conv2plus1d(3, 8, 2)
conv_2plus1d.init_weights()
assert torch.equal(conv_2plus1d.bn_s.weight,
torch.ones_like(conv_2plus1d.bn_s.weight))
assert torch.equal(conv_2plus1d.bn_s.bias,
torch.zeros_like(conv_2plus1d.bn_s.bias))
x = torch.rand(1, 3, 8, 256, 256)
output = conv_2plus1d(x)
assert output.shape == torch.Size([1, 8, 7, 255, 255])
def test_conv_audio():
conv_audio = ConvAudio(3, 8, 3)
conv_audio.init_weights()
x = torch.rand(1, 3, 8, 8)
output = conv_audio(x)
assert output.shape == torch.Size([1, 16, 8, 8])
conv_audio_sum = ConvAudio(3, 8, 3, op='sum')
output = conv_audio_sum(x)
assert output.shape == torch.Size([1, 8, 8, 8])
| InternVideo-main | Downstream/Open-Set-Action-Recognition/tests/test_models/test_common_modules.py |
import torch
import torch.nn as nn
from mmaction.models import (AudioTSNHead, BaseHead, I3DHead, SlowFastHead,
TPNHead, TSMHead, TSNHead, X3DHead)
class ExampleHead(BaseHead):
# use a ExampleHead to success BaseHead
def init_weights(self):
pass
def forward(self, x):
pass
def test_base_head():
head = ExampleHead(3, 400, dict(type='CrossEntropyLoss'))
cls_scores = torch.rand((3, 4))
# When truth is non-empty then cls loss should be nonzero for random inputs
gt_labels = torch.LongTensor([2] * 3).squeeze()
losses = head.loss(cls_scores, gt_labels)
assert 'loss_cls' in losses.keys()
assert losses.get('loss_cls') > 0, 'cls loss should be non-zero'
def test_i3d_head():
"""Test loss method, layer construction, attributes and forward function in
i3d head."""
i3d_head = I3DHead(num_classes=4, in_channels=2048)
i3d_head.init_weights()
assert i3d_head.num_classes == 4
assert i3d_head.dropout_ratio == 0.5
assert i3d_head.in_channels == 2048
assert i3d_head.init_std == 0.01
assert isinstance(i3d_head.dropout, nn.Dropout)
assert i3d_head.dropout.p == i3d_head.dropout_ratio
assert isinstance(i3d_head.fc_cls, nn.Linear)
assert i3d_head.fc_cls.in_features == i3d_head.in_channels
assert i3d_head.fc_cls.out_features == i3d_head.num_classes
assert isinstance(i3d_head.avg_pool, nn.AdaptiveAvgPool3d)
assert i3d_head.avg_pool.output_size == (1, 1, 1)
input_shape = (3, 2048, 4, 7, 7)
feat = torch.rand(input_shape)
# i3d head inference
cls_scores = i3d_head(feat)
assert cls_scores.shape == torch.Size([3, 4])
def test_x3d_head():
"""Test loss method, layer construction, attributes and forward function in
x3d head."""
x3d_head = X3DHead(in_channels=432, num_classes=4, fc1_bias=False)
x3d_head.init_weights()
assert x3d_head.num_classes == 4
assert x3d_head.dropout_ratio == 0.5
assert x3d_head.in_channels == 432
assert x3d_head.init_std == 0.01
assert isinstance(x3d_head.dropout, nn.Dropout)
assert x3d_head.dropout.p == x3d_head.dropout_ratio
assert isinstance(x3d_head.fc1, nn.Linear)
assert x3d_head.fc1.in_features == x3d_head.in_channels
assert x3d_head.fc1.out_features == x3d_head.mid_channels
assert x3d_head.fc1.bias is None
assert isinstance(x3d_head.fc2, nn.Linear)
assert x3d_head.fc2.in_features == x3d_head.mid_channels
assert x3d_head.fc2.out_features == x3d_head.num_classes
assert isinstance(x3d_head.pool, nn.AdaptiveAvgPool3d)
assert x3d_head.pool.output_size == (1, 1, 1)
input_shape = (3, 432, 4, 7, 7)
feat = torch.rand(input_shape)
# i3d head inference
cls_scores = x3d_head(feat)
assert cls_scores.shape == torch.Size([3, 4])
def test_slowfast_head():
"""Test loss method, layer construction, attributes and forward function in
slowfast head."""
sf_head = SlowFastHead(num_classes=4, in_channels=2304)
sf_head.init_weights()
assert sf_head.num_classes == 4
assert sf_head.dropout_ratio == 0.8
assert sf_head.in_channels == 2304
assert sf_head.init_std == 0.01
assert isinstance(sf_head.dropout, nn.Dropout)
assert sf_head.dropout.p == sf_head.dropout_ratio
assert isinstance(sf_head.fc_cls, nn.Linear)
assert sf_head.fc_cls.in_features == sf_head.in_channels
assert sf_head.fc_cls.out_features == sf_head.num_classes
assert isinstance(sf_head.avg_pool, nn.AdaptiveAvgPool3d)
assert sf_head.avg_pool.output_size == (1, 1, 1)
input_shape = (3, 2048, 32, 7, 7)
feat_slow = torch.rand(input_shape)
input_shape = (3, 256, 4, 7, 7)
feat_fast = torch.rand(input_shape)
sf_head = SlowFastHead(num_classes=4, in_channels=2304)
cls_scores = sf_head((feat_slow, feat_fast))
assert cls_scores.shape == torch.Size([3, 4])
def test_tsn_head():
"""Test loss method, layer construction, attributes and forward function in
tsn head."""
tsn_head = TSNHead(num_classes=4, in_channels=2048)
tsn_head.init_weights()
assert tsn_head.num_classes == 4
assert tsn_head.dropout_ratio == 0.4
assert tsn_head.in_channels == 2048
assert tsn_head.init_std == 0.01
assert tsn_head.consensus.dim == 1
assert tsn_head.spatial_type == 'avg'
assert isinstance(tsn_head.dropout, nn.Dropout)
assert tsn_head.dropout.p == tsn_head.dropout_ratio
assert isinstance(tsn_head.fc_cls, nn.Linear)
assert tsn_head.fc_cls.in_features == tsn_head.in_channels
assert tsn_head.fc_cls.out_features == tsn_head.num_classes
assert isinstance(tsn_head.avg_pool, nn.AdaptiveAvgPool2d)
assert tsn_head.avg_pool.output_size == (1, 1)
input_shape = (8, 2048, 7, 7)
feat = torch.rand(input_shape)
# tsn head inference
num_segs = input_shape[0]
cls_scores = tsn_head(feat, num_segs)
assert cls_scores.shape == torch.Size([1, 4])
# Test multi-class recognition
multi_tsn_head = TSNHead(
num_classes=4,
in_channels=2048,
loss_cls=dict(type='BCELossWithLogits', loss_weight=160.0),
multi_class=True,
label_smooth_eps=0.01)
multi_tsn_head.init_weights()
assert multi_tsn_head.num_classes == 4
assert multi_tsn_head.dropout_ratio == 0.4
assert multi_tsn_head.in_channels == 2048
assert multi_tsn_head.init_std == 0.01
assert multi_tsn_head.consensus.dim == 1
assert isinstance(multi_tsn_head.dropout, nn.Dropout)
assert multi_tsn_head.dropout.p == multi_tsn_head.dropout_ratio
assert isinstance(multi_tsn_head.fc_cls, nn.Linear)
assert multi_tsn_head.fc_cls.in_features == multi_tsn_head.in_channels
assert multi_tsn_head.fc_cls.out_features == multi_tsn_head.num_classes
assert isinstance(multi_tsn_head.avg_pool, nn.AdaptiveAvgPool2d)
assert multi_tsn_head.avg_pool.output_size == (1, 1)
input_shape = (8, 2048, 7, 7)
feat = torch.rand(input_shape)
# multi-class tsn head inference
num_segs = input_shape[0]
cls_scores = tsn_head(feat, num_segs)
assert cls_scores.shape == torch.Size([1, 4])
def test_tsn_head_audio():
"""Test loss method, layer construction, attributes and forward function in
tsn head."""
tsn_head_audio = AudioTSNHead(num_classes=4, in_channels=5)
tsn_head_audio.init_weights()
assert tsn_head_audio.num_classes == 4
assert tsn_head_audio.dropout_ratio == 0.4
assert tsn_head_audio.in_channels == 5
assert tsn_head_audio.init_std == 0.01
assert tsn_head_audio.spatial_type == 'avg'
assert isinstance(tsn_head_audio.dropout, nn.Dropout)
assert tsn_head_audio.dropout.p == tsn_head_audio.dropout_ratio
assert isinstance(tsn_head_audio.fc_cls, nn.Linear)
assert tsn_head_audio.fc_cls.in_features == tsn_head_audio.in_channels
assert tsn_head_audio.fc_cls.out_features == tsn_head_audio.num_classes
assert isinstance(tsn_head_audio.avg_pool, nn.AdaptiveAvgPool2d)
assert tsn_head_audio.avg_pool.output_size == (1, 1)
input_shape = (8, 5, 7, 7)
feat = torch.rand(input_shape)
# tsn head inference
cls_scores = tsn_head_audio(feat)
assert cls_scores.shape == torch.Size([8, 4])
def test_tsm_head():
"""Test loss method, layer construction, attributes and forward function in
tsm head."""
tsm_head = TSMHead(num_classes=4, in_channels=2048)
tsm_head.init_weights()
assert tsm_head.num_classes == 4
assert tsm_head.dropout_ratio == 0.8
assert tsm_head.in_channels == 2048
assert tsm_head.init_std == 0.001
assert tsm_head.consensus.dim == 1
assert tsm_head.spatial_type == 'avg'
assert isinstance(tsm_head.dropout, nn.Dropout)
assert tsm_head.dropout.p == tsm_head.dropout_ratio
assert isinstance(tsm_head.fc_cls, nn.Linear)
assert tsm_head.fc_cls.in_features == tsm_head.in_channels
assert tsm_head.fc_cls.out_features == tsm_head.num_classes
assert isinstance(tsm_head.avg_pool, nn.AdaptiveAvgPool2d)
assert tsm_head.avg_pool.output_size == 1
input_shape = (8, 2048, 7, 7)
feat = torch.rand(input_shape)
# tsm head inference with no init
num_segs = input_shape[0]
cls_scores = tsm_head(feat, num_segs)
assert cls_scores.shape == torch.Size([1, 4])
# tsm head inference with init
tsm_head = TSMHead(num_classes=4, in_channels=2048, temporal_pool=True)
tsm_head.init_weights()
cls_scores = tsm_head(feat, num_segs)
assert cls_scores.shape == torch.Size([2, 4])
def test_tpn_head():
"""Test loss method, layer construction, attributes and forward function in
tpn head."""
tpn_head = TPNHead(num_classes=4, in_channels=2048)
tpn_head.init_weights()
assert hasattr(tpn_head, 'avg_pool2d')
assert hasattr(tpn_head, 'avg_pool3d')
assert isinstance(tpn_head.avg_pool3d, nn.AdaptiveAvgPool3d)
assert tpn_head.avg_pool3d.output_size == (1, 1, 1)
assert tpn_head.avg_pool2d is None
input_shape = (4, 2048, 7, 7)
feat = torch.rand(input_shape)
# tpn head inference with num_segs
num_segs = 2
cls_scores = tpn_head(feat, num_segs)
assert isinstance(tpn_head.avg_pool2d, nn.AvgPool3d)
assert tpn_head.avg_pool2d.kernel_size == (1, 7, 7)
assert cls_scores.shape == torch.Size([2, 4])
# tpn head inference with no num_segs
input_shape = (2, 2048, 3, 7, 7)
feat = torch.rand(input_shape)
cls_scores = tpn_head(feat)
assert isinstance(tpn_head.avg_pool2d, nn.AvgPool3d)
assert tpn_head.avg_pool2d.kernel_size == (1, 7, 7)
assert cls_scores.shape == torch.Size([2, 4])
| InternVideo-main | Downstream/Open-Set-Action-Recognition/tests/test_models/test_head.py |
import copy
import mmcv
import numpy as np
import pytest
import torch
from mmaction.models import build_localizer
from mmaction.models.localizers.utils import post_processing
def test_tem():
model_cfg = dict(
type='TEM',
temporal_dim=100,
boundary_ratio=0.1,
tem_feat_dim=400,
tem_hidden_dim=512,
tem_match_threshold=0.5)
localizer_tem = build_localizer(model_cfg)
raw_feature = torch.rand(8, 400, 100)
gt_bbox = torch.Tensor([[[1.0, 3.0], [3.0, 5.0]]] * 8)
losses = localizer_tem(raw_feature, gt_bbox)
assert isinstance(losses, dict)
# Test forward test
video_meta = [{'video_name': 'v_test'}]
with torch.no_grad():
for one_raw_feature in raw_feature:
one_raw_feature = one_raw_feature.reshape(1, 400, 100)
localizer_tem(
one_raw_feature, video_meta=video_meta, return_loss=False)
def test_pem():
model_cfg = dict(
type='PEM',
pem_feat_dim=32,
pem_hidden_dim=256,
pem_u_ratio_m=1,
pem_u_ratio_l=2,
pem_high_temporal_iou_threshold=0.6,
pem_low_temporal_iou_threshold=2.2,
soft_nms_alpha=0.75,
soft_nms_low_threshold=0.65,
soft_nms_high_threshold=0.9,
post_process_top_k=100)
localizer_pem = build_localizer(model_cfg)
bsp_feature = torch.rand(8, 100, 32)
reference_temporal_iou = torch.rand(8, 100)
losses = localizer_pem(bsp_feature, reference_temporal_iou)
assert isinstance(losses, dict)
# Test forward test
tmin = torch.rand(100)
tmax = torch.rand(100)
tmin_score = torch.rand(100)
tmax_score = torch.rand(100)
video_meta = [
dict(
video_name='v_test',
duration_second=100,
duration_frame=1000,
annotations=[{
'segment': [0.3, 0.6],
'label': 'Rock climbing'
}],
feature_frame=900)
]
with torch.no_grad():
for one_bsp_feature in bsp_feature:
one_bsp_feature = one_bsp_feature.reshape(1, 100, 32)
localizer_pem(
one_bsp_feature,
tmin=tmin,
tmax=tmax,
tmin_score=tmin_score,
tmax_score=tmax_score,
video_meta=video_meta,
return_loss=False)
def test_bmn():
model_cfg = dict(
type='BMN',
temporal_dim=100,
boundary_ratio=0.5,
num_samples=32,
num_samples_per_bin=3,
feat_dim=400,
soft_nms_alpha=0.4,
soft_nms_low_threshold=0.5,
soft_nms_high_threshold=0.9,
post_process_top_k=100)
if torch.cuda.is_available():
localizer_bmn = build_localizer(model_cfg).cuda()
raw_feature = torch.rand(8, 400, 100).cuda()
gt_bbox = np.array([[[0.1, 0.3], [0.375, 0.625]]] * 8)
losses = localizer_bmn(raw_feature, gt_bbox)
assert isinstance(losses, dict)
# Test forward test
video_meta = [
dict(
video_name='v_test',
duration_second=100,
duration_frame=960,
feature_frame=960)
]
with torch.no_grad():
one_raw_feature = torch.rand(1, 400, 100).cuda()
localizer_bmn(
one_raw_feature,
gt_bbox=None,
video_meta=video_meta,
return_loss=False)
else:
localizer_bmn = build_localizer(model_cfg)
raw_feature = torch.rand(8, 400, 100)
gt_bbox = torch.Tensor([[[0.1, 0.3], [0.375, 0.625]]] * 8)
losses = localizer_bmn(raw_feature, gt_bbox)
assert isinstance(losses, dict)
# Test forward test
video_meta = [
dict(
video_name='v_test',
duration_second=100,
duration_frame=960,
feature_frame=960)
]
with torch.no_grad():
one_raw_feature = torch.rand(1, 400, 100)
localizer_bmn(
one_raw_feature,
gt_bbox=None,
video_meta=video_meta,
return_loss=False)
def test_post_processing():
# test with multiple results
result = np.array([[0., 1., 1., 1., 0.5, 0.5], [0., 0.4, 1., 1., 0.4, 0.4],
[0., 0.95, 1., 1., 0.6, 0.6]])
video_info = dict(
video_name='v_test',
duration_second=100,
duration_frame=960,
feature_frame=960)
proposal_list = post_processing(result, video_info, 0.75, 0.65, 0.9, 2, 16)
assert isinstance(proposal_list[0], dict)
assert proposal_list[0]['score'] == 0.6
assert proposal_list[0]['segment'] == [0., 95.0]
assert isinstance(proposal_list[1], dict)
assert proposal_list[1]['score'] == 0.4
assert proposal_list[1]['segment'] == [0., 40.0]
# test with only result
result = np.array([[0., 1., 1., 1., 0.5, 0.5]])
video_info = dict(
video_name='v_test',
duration_second=100,
duration_frame=960,
feature_frame=960)
proposal_list = post_processing(result, video_info, 0.75, 0.65, 0.9, 1, 16)
assert isinstance(proposal_list[0], dict)
assert proposal_list[0]['score'] == 0.5
assert proposal_list[0]['segment'] == [0., 100.0]
def test_ssn_train():
train_cfg = mmcv.ConfigDict(
dict(
ssn=dict(
assigner=dict(
positive_iou_threshold=0.7,
background_iou_threshold=0.01,
incomplete_iou_threshold=0.3,
background_coverage_threshold=0.02,
incomplete_overlap_threshold=0.01),
sampler=dict(
num_per_video=8,
positive_ratio=1,
background_ratio=1,
incomplete_ratio=6,
add_gt_as_proposals=True),
loss_weight=dict(comp_loss_weight=0.1, reg_loss_weight=0.1),
debug=False)))
base_model_cfg = dict(
type='SSN',
backbone=dict(
type='ResNet', pretrained=None, depth=18, norm_eval=True),
spatial_type='avg',
dropout_ratio=0.8,
loss_cls=dict(type='SSNLoss'),
cls_head=dict(
type='SSNHead',
dropout_ratio=0.,
in_channels=512,
num_classes=20,
consensus=dict(
type='STPPTrain',
stpp_stage=(1, 1, 1),
num_segments_list=(2, 5, 2)),
use_regression=True),
train_cfg=train_cfg)
dropout_cfg = copy.deepcopy(base_model_cfg)
dropout_cfg['dropout_ratio'] = 0
dropout_cfg['cls_head']['dropout_ratio'] = 0.5
non_regression_cfg = copy.deepcopy(base_model_cfg)
non_regression_cfg['cls_head']['use_regression'] = False
imgs = torch.rand(1, 8, 9, 3, 224, 224)
proposal_scale_factor = torch.Tensor([[[1.0345, 1.0345], [1.0028, 0.0028],
[1.0013, 1.0013], [1.0008, 1.0008],
[0.3357, 1.0006], [1.0006, 1.0006],
[0.0818, 1.0005], [1.0030,
1.0030]]])
proposal_type = torch.Tensor([[0, 1, 1, 1, 1, 1, 1, 2]])
proposal_labels = torch.LongTensor([[8, 8, 8, 8, 8, 8, 8, 0]])
reg_targets = torch.Tensor([[[0.2929, 0.2694], [0.0000, 0.0000],
[0.0000, 0.0000], [0.0000, 0.0000],
[0.0000, 0.0000], [0.0000, 0.0000],
[0.0000, 0.0000], [0.0000, 0.0000]]])
localizer_ssn = build_localizer(base_model_cfg)
localizer_ssn_dropout = build_localizer(dropout_cfg)
localizer_ssn_non_regression = build_localizer(non_regression_cfg)
if torch.cuda.is_available():
localizer_ssn = localizer_ssn.cuda()
localizer_ssn_dropout = localizer_ssn_dropout.cuda()
localizer_ssn_non_regression = localizer_ssn_non_regression.cuda()
imgs = imgs.cuda()
proposal_scale_factor = proposal_scale_factor.cuda()
proposal_type = proposal_type.cuda()
proposal_labels = proposal_labels.cuda()
reg_targets = reg_targets.cuda()
# Train normal case
losses = localizer_ssn(
imgs,
proposal_scale_factor=proposal_scale_factor,
proposal_type=proposal_type,
proposal_labels=proposal_labels,
reg_targets=reg_targets)
assert isinstance(losses, dict)
# Train SSN without dropout in model, with dropout in head
losses = localizer_ssn_dropout(
imgs,
proposal_scale_factor=proposal_scale_factor,
proposal_type=proposal_type,
proposal_labels=proposal_labels,
reg_targets=reg_targets)
assert isinstance(losses, dict)
# Train SSN model without regression
losses = localizer_ssn_non_regression(
imgs,
proposal_scale_factor=proposal_scale_factor,
proposal_type=proposal_type,
proposal_labels=proposal_labels,
reg_targets=reg_targets)
assert isinstance(losses, dict)
def test_ssn_test():
test_cfg = mmcv.ConfigDict(
dict(
ssn=dict(
sampler=dict(test_interval=6, batch_size=16),
evaluater=dict(
top_k=2000,
nms=0.2,
softmax_before_filter=True,
cls_score_dict=None,
cls_top_k=2))))
base_model_cfg = dict(
type='SSN',
backbone=dict(
type='ResNet', pretrained=None, depth=18, norm_eval=True),
spatial_type='avg',
dropout_ratio=0.8,
cls_head=dict(
type='SSNHead',
dropout_ratio=0.,
in_channels=512,
num_classes=20,
consensus=dict(type='STPPTest', stpp_stage=(1, 1, 1)),
use_regression=True),
test_cfg=test_cfg)
maxpool_model_cfg = copy.deepcopy(base_model_cfg)
maxpool_model_cfg['spatial_type'] = 'max'
non_regression_cfg = copy.deepcopy(base_model_cfg)
non_regression_cfg['cls_head']['use_regression'] = False
non_regression_cfg['cls_head']['consensus']['use_regression'] = False
tuple_stage_cfg = copy.deepcopy(base_model_cfg)
tuple_stage_cfg['cls_head']['consensus']['stpp_stage'] = (1, (1, 2), 1)
str_stage_cfg = copy.deepcopy(base_model_cfg)
str_stage_cfg['cls_head']['consensus']['stpp_stage'] = ('error', )
imgs = torch.rand(1, 8, 3, 224, 224)
relative_proposal_list = torch.Tensor([[[0.2500, 0.6250], [0.3750,
0.7500]]])
scale_factor_list = torch.Tensor([[[1.0000, 1.0000], [1.0000, 0.2661]]])
proposal_tick_list = torch.LongTensor([[[1, 2, 5, 7], [20, 30, 60, 80]]])
reg_norm_consts = torch.Tensor([[[-0.0603, 0.0325], [0.0752, 0.1596]]])
localizer_ssn = build_localizer(base_model_cfg)
localizer_ssn_maxpool = build_localizer(maxpool_model_cfg)
localizer_ssn_non_regression = build_localizer(non_regression_cfg)
localizer_ssn_tuple_stage_cfg = build_localizer(tuple_stage_cfg)
with pytest.raises(ValueError):
build_localizer(str_stage_cfg)
if torch.cuda.is_available():
localizer_ssn = localizer_ssn.cuda()
localizer_ssn_maxpool = localizer_ssn_maxpool.cuda()
localizer_ssn_non_regression = localizer_ssn_non_regression.cuda()
localizer_ssn_tuple_stage_cfg = localizer_ssn_tuple_stage_cfg.cuda()
imgs = imgs.cuda()
relative_proposal_list = relative_proposal_list.cuda()
scale_factor_list = scale_factor_list.cuda()
proposal_tick_list = proposal_tick_list.cuda()
reg_norm_consts = reg_norm_consts.cuda()
with torch.no_grad():
# Test normal case
localizer_ssn(
imgs,
relative_proposal_list=relative_proposal_list,
scale_factor_list=scale_factor_list,
proposal_tick_list=proposal_tick_list,
reg_norm_consts=reg_norm_consts,
return_loss=False)
# Test SSN model with max spatial pooling
localizer_ssn_maxpool(
imgs,
relative_proposal_list=relative_proposal_list,
scale_factor_list=scale_factor_list,
proposal_tick_list=proposal_tick_list,
reg_norm_consts=reg_norm_consts,
return_loss=False)
# Test SSN model without regression
localizer_ssn_non_regression(
imgs,
relative_proposal_list=relative_proposal_list,
scale_factor_list=scale_factor_list,
proposal_tick_list=proposal_tick_list,
reg_norm_consts=reg_norm_consts,
return_loss=False)
# Test SSN model with tuple stage cfg.
localizer_ssn_tuple_stage_cfg(
imgs,
relative_proposal_list=relative_proposal_list,
scale_factor_list=scale_factor_list,
proposal_tick_list=proposal_tick_list,
reg_norm_consts=reg_norm_consts,
return_loss=False)
| InternVideo-main | Downstream/Open-Set-Action-Recognition/tests/test_models/test_localizers.py |
import copy
import numpy as np
import pytest
import torch
from mmaction.models import TPN
def test_tpn():
"""Test TPN backbone."""
tpn_cfg = dict(
in_channels=(1024, 2048),
out_channels=1024,
spatial_modulation_cfg=dict(
in_channels=(1024, 2048), out_channels=2048),
temporal_modulation_cfg=dict(downsample_scales=(8, 8)),
upsample_cfg=dict(scale_factor=(1, 1, 1)),
downsample_cfg=dict(downsample_scale=(1, 1, 1)),
level_fusion_cfg=dict(
in_channels=(1024, 1024),
mid_channels=(1024, 1024),
out_channels=2048,
downsample_scales=((1, 1, 1), (1, 1, 1))),
aux_head_cfg=dict(out_channels=400, loss_weight=0.5))
with pytest.raises(AssertionError):
tpn_cfg_ = copy.deepcopy(tpn_cfg)
tpn_cfg_['in_channels'] = list(tpn_cfg_['in_channels'])
TPN(**tpn_cfg_)
with pytest.raises(AssertionError):
tpn_cfg_ = copy.deepcopy(tpn_cfg)
tpn_cfg_['out_channels'] = float(tpn_cfg_['out_channels'])
TPN(**tpn_cfg_)
with pytest.raises(AssertionError):
tpn_cfg_ = copy.deepcopy(tpn_cfg)
tpn_cfg_['downsample_cfg']['downsample_position'] = 'unsupport'
TPN(**tpn_cfg_)
for k in tpn_cfg:
if not k.endswith('_cfg'):
continue
tpn_cfg_ = copy.deepcopy(tpn_cfg)
tpn_cfg_[k] = list()
with pytest.raises(AssertionError):
TPN(**tpn_cfg_)
with pytest.raises(ValueError):
tpn_cfg_ = copy.deepcopy(tpn_cfg)
tpn_cfg_['flow_type'] = 'unsupport'
TPN(**tpn_cfg_)
target_shape = (32, 1)
target = _demo_inputs(target_shape).long().squeeze()
x0_shape = (32, 1024, 1, 4, 4)
x1_shape = (32, 2048, 1, 2, 2)
x0 = _demo_inputs(x0_shape)
x1 = _demo_inputs(x1_shape)
x = [x0, x1]
# ResNetTPN with 'cascade' flow_type
tpn_cfg_ = copy.deepcopy(tpn_cfg)
tpn_cascade = TPN(**tpn_cfg_)
feat, loss_aux = tpn_cascade(x, target)
assert feat.shape == torch.Size([32, 2048, 1, 2, 2])
assert len(loss_aux) == 1
# ResNetTPN with 'parallel' flow_type
tpn_cfg_ = copy.deepcopy(tpn_cfg)
tpn_parallel = TPN(flow_type='parallel', **tpn_cfg_)
feat, loss_aux = tpn_parallel(x, target)
assert feat.shape == torch.Size([32, 2048, 1, 2, 2])
assert len(loss_aux) == 1
# ResNetTPN with 'cascade' flow_type and target is None
feat, loss_aux = tpn_cascade(x, None)
assert feat.shape == torch.Size([32, 2048, 1, 2, 2])
assert len(loss_aux) == 0
# ResNetTPN with 'parallel' flow_type and target is None
feat, loss_aux = tpn_parallel(x, None)
assert feat.shape == torch.Size([32, 2048, 1, 2, 2])
assert len(loss_aux) == 0
def _demo_inputs(input_shape=(1, 3, 64, 64)):
"""Create a superset of inputs needed to run backbone.
Args:
input_shape (tuple): input batch dimensions.
Default: (1, 3, 64, 64).
"""
imgs = np.random.random(input_shape)
imgs = torch.FloatTensor(imgs)
return imgs
| InternVideo-main | Downstream/Open-Set-Action-Recognition/tests/test_models/test_neck.py |
import numpy as np
import pytest
from mmaction.datasets.pipelines import Compose, ImageToTensor
def check_keys_equal(result_keys, target_keys):
"""Check if all elements in target_keys is in result_keys."""
return set(target_keys) == set(result_keys)
def test_compose():
with pytest.raises(TypeError):
# transform must be callable or a dict
Compose('LoadImage')
target_keys = ['img', 'img_meta']
# test Compose given a data pipeline
img = np.random.randn(256, 256, 3)
results = dict(img=img, abandoned_key=None, img_name='test_image.png')
test_pipeline = [
dict(type='Collect', keys=['img'], meta_keys=['img_name']),
dict(type='ImageToTensor', keys=['img'])
]
compose = Compose(test_pipeline)
compose_results = compose(results)
assert check_keys_equal(compose_results.keys(), target_keys)
assert check_keys_equal(compose_results['img_meta'].data.keys(),
['img_name'])
# test Compose when forward data is None
results = None
image_to_tensor = ImageToTensor(keys=[])
test_pipeline = [image_to_tensor]
compose = Compose(test_pipeline)
compose_results = compose(results)
assert compose_results is None
assert repr(compose) == compose.__class__.__name__ + \
f'(\n {image_to_tensor}\n)'
| InternVideo-main | Downstream/Open-Set-Action-Recognition/tests/test_data/test_compose.py |
import copy
import os.path as osp
import mmcv
import numpy as np
import pytest
import torch
from numpy.testing import assert_array_almost_equal, assert_array_equal
# yapf: disable
from mmaction.datasets.pipelines import (AudioDecode, AudioDecodeInit,
AudioFeatureSelector, DecordDecode,
DecordInit, DenseSampleFrames,
FrameSelector,
GenerateLocalizationLabels,
LoadAudioFeature, LoadHVULabel,
LoadLocalizationFeature,
LoadProposals, OpenCVDecode,
OpenCVInit, PyAVDecode,
PyAVDecodeMotionVector, PyAVInit,
RawFrameDecode, SampleAVAFrames,
SampleFrames, SampleProposalFrames,
UntrimmedSampleFrames)
# yapf: enable
class ExampleSSNInstance:
def __init__(self,
start_frame,
end_frame,
num_frames,
label=None,
best_iou=None,
overlap_self=None):
self.start_frame = start_frame
self.end_frame = min(end_frame, num_frames)
self.label = label if label is not None else -1
self.coverage = (end_frame - start_frame) / num_frames
self.best_iou = best_iou
self.overlap_self = overlap_self
class TestLoading:
@staticmethod
def check_keys_contain(result_keys, target_keys):
"""Check if all elements in target_keys is in result_keys."""
return set(target_keys).issubset(set(result_keys))
@classmethod
def setup_class(cls):
cls.img_path = osp.join(
osp.dirname(osp.dirname(__file__)), 'data/test.jpg')
cls.video_path = osp.join(
osp.dirname(osp.dirname(__file__)), 'data/test.mp4')
cls.wav_path = osp.join(
osp.dirname(osp.dirname(__file__)), 'data/test.wav')
cls.audio_spec_path = osp.join(
osp.dirname(osp.dirname(__file__)), 'data/test.npy')
cls.img_dir = osp.join(
osp.dirname(osp.dirname(__file__)), 'data/test_imgs')
cls.raw_feature_dir = osp.join(
osp.dirname(osp.dirname(__file__)),
'data/test_activitynet_features')
cls.bsp_feature_dir = osp.join(
osp.dirname(osp.dirname(__file__)), 'data/test_bsp_features')
cls.proposals_dir = osp.join(
osp.dirname(osp.dirname(__file__)), 'data/test_proposals')
cls.total_frames = 5
cls.filename_tmpl = 'img_{:05}.jpg'
cls.flow_filename_tmpl = '{}_{:05d}.jpg'
video_total_frames = len(mmcv.VideoReader(cls.video_path))
cls.audio_total_frames = video_total_frames
cls.video_results = dict(
filename=cls.video_path,
label=1,
total_frames=video_total_frames,
start_index=0)
cls.audio_results = dict(
audios=np.random.randn(1280, ),
audio_path=cls.wav_path,
total_frames=cls.audio_total_frames,
label=1,
start_index=0)
cls.audio_feature_results = dict(
audios=np.random.randn(128, 80),
audio_path=cls.audio_spec_path,
total_frames=cls.audio_total_frames,
label=1,
start_index=0)
cls.frame_results = dict(
frame_dir=cls.img_dir,
total_frames=cls.total_frames,
filename_tmpl=cls.filename_tmpl,
start_index=1,
modality='RGB',
offset=0,
label=1)
cls.flow_frame_results = dict(
frame_dir=cls.img_dir,
total_frames=cls.total_frames,
filename_tmpl=cls.flow_filename_tmpl,
modality='Flow',
offset=0,
label=1)
cls.action_results = dict(
video_name='v_test1',
data_prefix=cls.raw_feature_dir,
temporal_scale=5,
boundary_ratio=0.1,
duration_second=10,
duration_frame=10,
feature_frame=8,
annotations=[{
'segment': [3.0, 5.0],
'label': 'Rock climbing'
}])
cls.proposal_results = dict(
frame_dir=cls.img_dir,
video_id='test_imgs',
total_frames=cls.total_frames,
filename_tmpl=cls.filename_tmpl,
start_index=1,
out_proposals=[[[
'test_imgs',
ExampleSSNInstance(1, 4, 10, 1, 1, 1)
], 0], [['test_imgs',
ExampleSSNInstance(2, 5, 10, 2, 1, 1)], 0]])
cls.ava_results = dict(
fps=30, timestamp=902, timestamp_start=840, shot_info=(0, 27000))
cls.hvu_label_example1 = dict(
categories=['action', 'object', 'scene', 'concept'],
category_nums=[2, 5, 3, 2],
label=dict(action=[0], object=[2, 3], scene=[0, 1]))
cls.hvu_label_example2 = dict(
categories=['action', 'object', 'scene', 'concept'],
category_nums=[2, 5, 3, 2],
label=dict(action=[1], scene=[1, 2], concept=[1]))
def test_load_hvu_label(self):
hvu_label_example1 = copy.deepcopy(self.hvu_label_example1)
hvu_label_example2 = copy.deepcopy(self.hvu_label_example2)
categories = hvu_label_example1['categories']
category_nums = hvu_label_example1['category_nums']
num_tags = sum(category_nums)
num_categories = len(categories)
loader = LoadHVULabel()
assert repr(loader) == (f'{loader.__class__.__name__}('
f'hvu_initialized={False})')
result1 = loader(hvu_label_example1)
label1 = torch.zeros(num_tags)
mask1 = torch.zeros(num_tags)
category_mask1 = torch.zeros(num_categories)
assert repr(loader) == (f'{loader.__class__.__name__}('
f'hvu_initialized={True})')
label1[[0, 4, 5, 7, 8]] = 1.
mask1[:10] = 1.
category_mask1[:3] = 1.
assert torch.all(torch.eq(label1, result1['label']))
assert torch.all(torch.eq(mask1, result1['mask']))
assert torch.all(torch.eq(category_mask1, result1['category_mask']))
result2 = loader(hvu_label_example2)
label2 = torch.zeros(num_tags)
mask2 = torch.zeros(num_tags)
category_mask2 = torch.zeros(num_categories)
label2[[1, 8, 9, 11]] = 1.
mask2[:2] = 1.
mask2[7:] = 1.
category_mask2[[0, 2, 3]] = 1.
assert torch.all(torch.eq(label2, result2['label']))
assert torch.all(torch.eq(mask2, result2['mask']))
assert torch.all(torch.eq(category_mask2, result2['category_mask']))
def test_sample_frames(self):
target_keys = [
'frame_inds', 'clip_len', 'frame_interval', 'num_clips',
'total_frames'
]
with pytest.warns(UserWarning):
# start_index has been deprecated
config = dict(
clip_len=3, frame_interval=1, num_clips=5, start_index=1)
SampleFrames(**config)
# Sample Frame with no temporal_jitter
# clip_len=3, frame_interval=1, num_clips=5
video_result = copy.deepcopy(self.video_results)
frame_result = copy.deepcopy(self.frame_results)
config = dict(
clip_len=3, frame_interval=1, num_clips=5, temporal_jitter=False)
sample_frames = SampleFrames(**config)
sample_frames_results = sample_frames(video_result)
assert self.check_keys_contain(sample_frames_results.keys(),
target_keys)
assert len(sample_frames_results['frame_inds']) == 15
sample_frames_results = sample_frames(frame_result)
assert len(sample_frames_results['frame_inds']) == 15
assert np.max(sample_frames_results['frame_inds']) <= 5
assert np.min(sample_frames_results['frame_inds']) >= 1
assert repr(sample_frames) == (f'{sample_frames.__class__.__name__}('
f'clip_len={3}, '
f'frame_interval={1}, '
f'num_clips={5}, '
f'temporal_jitter={False}, '
f'twice_sample={False}, '
f'out_of_bound_opt=loop, '
f'test_mode={False})')
# Sample Frame with no temporal_jitter
# clip_len=5, frame_interval=1, num_clips=5,
# out_of_bound_opt='repeat_last'
video_result = copy.deepcopy(self.video_results)
frame_result = copy.deepcopy(self.frame_results)
config = dict(
clip_len=5,
frame_interval=1,
num_clips=5,
temporal_jitter=False,
out_of_bound_opt='repeat_last')
sample_frames = SampleFrames(**config)
sample_frames_results = sample_frames(video_result)
assert repr(sample_frames) == (f'{sample_frames.__class__.__name__}('
f'clip_len={5}, '
f'frame_interval={1}, '
f'num_clips={5}, '
f'temporal_jitter={False}, '
f'twice_sample={False}, '
f'out_of_bound_opt=repeat_last, '
f'test_mode={False})')
def check_monotonous(arr):
length = arr.shape[0]
for i in range(length - 1):
if arr[i] > arr[i + 1]:
return False
return True
assert self.check_keys_contain(sample_frames_results.keys(),
target_keys)
assert len(sample_frames_results['frame_inds']) == 25
frame_inds = sample_frames_results['frame_inds'].reshape([5, 5])
for i in range(5):
assert check_monotonous(frame_inds[i])
sample_frames_results = sample_frames(frame_result)
assert len(sample_frames_results['frame_inds']) == 25
frame_inds = sample_frames_results['frame_inds'].reshape([5, 5])
for i in range(5):
assert check_monotonous(frame_inds[i])
assert np.max(sample_frames_results['frame_inds']) <= 5
assert np.min(sample_frames_results['frame_inds']) >= 1
# Sample Frame with temporal_jitter
# clip_len=4, frame_interval=2, num_clips=5
video_result = copy.deepcopy(self.video_results)
frame_result = copy.deepcopy(self.frame_results)
config = dict(
clip_len=4, frame_interval=2, num_clips=5, temporal_jitter=True)
sample_frames = SampleFrames(**config)
sample_frames_results = sample_frames(video_result)
assert self.check_keys_contain(sample_frames_results.keys(),
target_keys)
assert len(sample_frames_results['frame_inds']) == 20
sample_frames_results = sample_frames(frame_result)
assert len(sample_frames_results['frame_inds']) == 20
assert np.max(sample_frames_results['frame_inds']) <= 5
assert np.min(sample_frames_results['frame_inds']) >= 1
assert repr(sample_frames) == (f'{sample_frames.__class__.__name__}('
f'clip_len={4}, '
f'frame_interval={2}, '
f'num_clips={5}, '
f'temporal_jitter={True}, '
f'twice_sample={False}, '
f'out_of_bound_opt=loop, '
f'test_mode={False})')
# Sample Frame with no temporal_jitter in test mode
# clip_len=4, frame_interval=1, num_clips=6
video_result = copy.deepcopy(self.video_results)
frame_result = copy.deepcopy(self.frame_results)
config = dict(
clip_len=4,
frame_interval=1,
num_clips=6,
temporal_jitter=False,
test_mode=True)
sample_frames = SampleFrames(**config)
sample_frames_results = sample_frames(video_result)
assert self.check_keys_contain(sample_frames_results.keys(),
target_keys)
assert len(sample_frames_results['frame_inds']) == 24
sample_frames_results = sample_frames(frame_result)
assert len(sample_frames_results['frame_inds']) == 24
assert np.max(sample_frames_results['frame_inds']) <= 5
assert np.min(sample_frames_results['frame_inds']) >= 1
assert repr(sample_frames) == (f'{sample_frames.__class__.__name__}('
f'clip_len={4}, '
f'frame_interval={1}, '
f'num_clips={6}, '
f'temporal_jitter={False}, '
f'twice_sample={False}, '
f'out_of_bound_opt=loop, '
f'test_mode={True})')
# Sample Frame with no temporal_jitter in test mode
# clip_len=3, frame_interval=1, num_clips=6
video_result = copy.deepcopy(self.video_results)
frame_result = copy.deepcopy(self.frame_results)
config = dict(
clip_len=3,
frame_interval=1,
num_clips=6,
temporal_jitter=False,
test_mode=True)
sample_frames = SampleFrames(**config)
sample_frames_results = sample_frames(video_result)
assert self.check_keys_contain(sample_frames_results.keys(),
target_keys)
assert len(sample_frames_results['frame_inds']) == 18
sample_frames_results = sample_frames(frame_result)
assert len(sample_frames_results['frame_inds']) == 18
assert np.max(sample_frames_results['frame_inds']) <= 5
assert np.min(sample_frames_results['frame_inds']) >= 1
# Sample Frame with no temporal_jitter to get clip_offsets
# clip_len=1, frame_interval=1, num_clips=8
video_result = copy.deepcopy(self.video_results)
frame_result = copy.deepcopy(self.frame_results)
frame_result['total_frames'] = 6
config = dict(
clip_len=1,
frame_interval=1,
num_clips=8,
temporal_jitter=False,
test_mode=True)
sample_frames = SampleFrames(**config)
sample_frames_results = sample_frames(video_result)
assert self.check_keys_contain(sample_frames_results.keys(),
target_keys)
assert len(sample_frames_results['frame_inds']) == 8
sample_frames_results = sample_frames(frame_result)
assert len(sample_frames_results['frame_inds']) == 8
assert_array_equal(sample_frames_results['frame_inds'],
np.array([1, 2, 2, 3, 4, 5, 5, 6]))
# Sample Frame with no temporal_jitter to get clip_offsets
# clip_len=1, frame_interval=1, num_clips=8
video_result = copy.deepcopy(self.video_results)
frame_result = copy.deepcopy(self.frame_results)
frame_result['total_frames'] = 6
config = dict(
clip_len=1,
frame_interval=1,
num_clips=8,
temporal_jitter=False,
test_mode=True)
sample_frames = SampleFrames(**config)
sample_frames_results = sample_frames(video_result)
assert sample_frames_results['start_index'] == 0
assert self.check_keys_contain(sample_frames_results.keys(),
target_keys)
assert len(sample_frames_results['frame_inds']) == 8
sample_frames_results = sample_frames(frame_result)
assert len(sample_frames_results['frame_inds']) == 8
assert_array_equal(sample_frames_results['frame_inds'],
np.array([1, 2, 2, 3, 4, 5, 5, 6]))
# Sample Frame with no temporal_jitter to get clip_offsets zero
# clip_len=6, frame_interval=1, num_clips=1
video_result = copy.deepcopy(self.video_results)
frame_result = copy.deepcopy(self.frame_results)
frame_result['total_frames'] = 5
config = dict(
clip_len=6,
frame_interval=1,
num_clips=1,
temporal_jitter=False,
test_mode=True)
sample_frames = SampleFrames(**config)
sample_frames_results = sample_frames(video_result)
assert sample_frames_results['start_index'] == 0
assert self.check_keys_contain(sample_frames_results.keys(),
target_keys)
assert len(sample_frames_results['frame_inds']) == 6
sample_frames_results = sample_frames(frame_result)
assert len(sample_frames_results['frame_inds']) == 6
assert_array_equal(sample_frames_results['frame_inds'],
[1, 2, 3, 4, 5, 1])
# Sample Frame with no temporal_jitter to get avg_interval <= 0
# clip_len=12, frame_interval=1, num_clips=20
video_result = copy.deepcopy(self.video_results)
frame_result = copy.deepcopy(self.frame_results)
frame_result['total_frames'] = 30
config = dict(
clip_len=12,
frame_interval=1,
num_clips=20,
temporal_jitter=False,
test_mode=False)
sample_frames = SampleFrames(**config)
sample_frames_results = sample_frames(video_result)
assert sample_frames_results['start_index'] == 0
assert self.check_keys_contain(sample_frames_results.keys(),
target_keys)
assert len(sample_frames_results['frame_inds']) == 240
sample_frames_results = sample_frames(frame_result)
assert len(sample_frames_results['frame_inds']) == 240
assert np.max(sample_frames_results['frame_inds']) <= 30
assert np.min(sample_frames_results['frame_inds']) >= 1
# Sample Frame with no temporal_jitter to get clip_offsets
# clip_len=1, frame_interval=1, num_clips=8
video_result = copy.deepcopy(self.video_results)
frame_result = copy.deepcopy(self.frame_results)
frame_result['total_frames'] = 6
config = dict(
clip_len=1,
frame_interval=1,
num_clips=8,
temporal_jitter=False,
test_mode=False)
sample_frames = SampleFrames(**config)
sample_frames_results = sample_frames(video_result)
assert self.check_keys_contain(sample_frames_results.keys(),
target_keys)
assert sample_frames_results['start_index'] == 0
assert len(sample_frames_results['frame_inds']) == 8
sample_frames_results = sample_frames(frame_result)
assert len(sample_frames_results['frame_inds']) == 8
assert_array_equal(sample_frames_results['frame_inds'],
np.array([1, 2, 3, 3, 4, 5, 5, 6]))
# Sample Frame with no temporal_jitter to get clip_offsets zero
# clip_len=12, frame_interval=1, num_clips=2
video_result = copy.deepcopy(self.video_results)
frame_result = copy.deepcopy(self.frame_results)
frame_result['total_frames'] = 10
config = dict(
clip_len=12,
frame_interval=1,
num_clips=2,
temporal_jitter=False,
test_mode=False)
sample_frames = SampleFrames(**config)
sample_frames_results = sample_frames(video_result)
assert sample_frames_results['start_index'] == 0
assert self.check_keys_contain(sample_frames_results.keys(),
target_keys)
assert len(sample_frames_results['frame_inds']) == 24
sample_frames_results = sample_frames(frame_result)
assert len(sample_frames_results['frame_inds']) == 24
assert np.max(sample_frames_results['frame_inds']) <= 10
assert np.min(sample_frames_results['frame_inds']) >= 1
# Sample Frame using twice sample
# clip_len=12, frame_interval=1, num_clips=2
video_result = copy.deepcopy(self.video_results)
frame_result = copy.deepcopy(self.frame_results)
frame_result['total_frames'] = 40
config = dict(
clip_len=12,
frame_interval=1,
num_clips=2,
temporal_jitter=False,
twice_sample=True,
test_mode=True)
sample_frames = SampleFrames(**config)
sample_frames_results = sample_frames(video_result)
assert sample_frames_results['start_index'] == 0
assert self.check_keys_contain(sample_frames_results.keys(),
target_keys)
assert len(sample_frames_results['frame_inds']) == 48
sample_frames_results = sample_frames(frame_result)
assert len(sample_frames_results['frame_inds']) == 48
assert np.max(sample_frames_results['frame_inds']) <= 40
assert np.min(sample_frames_results['frame_inds']) >= 1
def test_dense_sample_frames(self):
target_keys = [
'frame_inds', 'clip_len', 'frame_interval', 'num_clips',
'total_frames'
]
# Dense sample with no temporal_jitter in test mode
# clip_len=4, frame_interval=1, num_clips=6
video_result = copy.deepcopy(self.video_results)
frame_result = copy.deepcopy(self.frame_results)
config = dict(
clip_len=4,
frame_interval=1,
num_clips=6,
temporal_jitter=False,
test_mode=True)
dense_sample_frames = DenseSampleFrames(**config)
dense_sample_frames_results = dense_sample_frames(video_result)
assert dense_sample_frames_results['start_index'] == 0
assert self.check_keys_contain(dense_sample_frames_results.keys(),
target_keys)
assert len(dense_sample_frames_results['frame_inds']) == 240
dense_sample_frames_results = dense_sample_frames(frame_result)
assert len(dense_sample_frames_results['frame_inds']) == 240
assert repr(dense_sample_frames) == (
f'{dense_sample_frames.__class__.__name__}('
f'clip_len={4}, '
f'frame_interval={1}, '
f'num_clips={6}, '
f'sample_range={64}, '
f'num_sample_positions={10}, '
f'temporal_jitter={False}, '
f'out_of_bound_opt=loop, '
f'test_mode={True})')
# Dense sample with no temporal_jitter
# clip_len=4, frame_interval=1, num_clips=6
video_result = copy.deepcopy(self.video_results)
frame_result = copy.deepcopy(self.frame_results)
config = dict(
clip_len=4, frame_interval=1, num_clips=6, temporal_jitter=False)
dense_sample_frames = DenseSampleFrames(**config)
dense_sample_frames_results = dense_sample_frames(video_result)
assert dense_sample_frames_results['start_index'] == 0
assert self.check_keys_contain(dense_sample_frames_results.keys(),
target_keys)
assert len(dense_sample_frames_results['frame_inds']) == 24
dense_sample_frames_results = dense_sample_frames(frame_result)
assert len(dense_sample_frames_results['frame_inds']) == 24
# Dense sample with no temporal_jitter, sample_range=32 in test mode
# clip_len=4, frame_interval=1, num_clips=6
video_result = copy.deepcopy(self.video_results)
frame_result = copy.deepcopy(self.frame_results)
config = dict(
clip_len=4,
frame_interval=1,
num_clips=6,
sample_range=32,
temporal_jitter=False,
test_mode=True)
dense_sample_frames = DenseSampleFrames(**config)
dense_sample_frames_results = dense_sample_frames(video_result)
assert dense_sample_frames_results['start_index'] == 0
assert self.check_keys_contain(dense_sample_frames_results.keys(),
target_keys)
assert len(dense_sample_frames_results['frame_inds']) == 240
dense_sample_frames_results = dense_sample_frames(frame_result)
assert len(dense_sample_frames_results['frame_inds']) == 240
# Dense sample with no temporal_jitter, sample_range=32
# clip_len=4, frame_interval=1, num_clips=6
video_result = copy.deepcopy(self.video_results)
frame_result = copy.deepcopy(self.frame_results)
config = dict(
clip_len=4,
frame_interval=1,
num_clips=6,
sample_range=32,
temporal_jitter=False)
dense_sample_frames = DenseSampleFrames(**config)
dense_sample_frames_results = dense_sample_frames(video_result)
assert dense_sample_frames_results['start_index'] == 0
assert self.check_keys_contain(dense_sample_frames_results.keys(),
target_keys)
assert len(dense_sample_frames_results['frame_inds']) == 24
dense_sample_frames_results = dense_sample_frames(frame_result)
assert len(dense_sample_frames_results['frame_inds']) == 24
assert repr(dense_sample_frames) == (
f'{dense_sample_frames.__class__.__name__}('
f'clip_len={4}, '
f'frame_interval={1}, '
f'num_clips={6}, '
f'sample_range={32}, '
f'num_sample_positions={10}, '
f'temporal_jitter={False}, '
f'out_of_bound_opt=loop, '
f'test_mode={False})')
# Dense sample with no temporal_jitter, sample_range=1000 to check mod
# clip_len=4, frame_interval=1, num_clips=6
video_result = copy.deepcopy(self.video_results)
frame_result = copy.deepcopy(self.frame_results)
config = dict(
clip_len=4,
frame_interval=1,
num_clips=6,
sample_range=1000,
temporal_jitter=False)
dense_sample_frames = DenseSampleFrames(**config)
dense_sample_frames_results = dense_sample_frames(video_result)
assert dense_sample_frames_results['start_index'] == 0
assert self.check_keys_contain(dense_sample_frames_results.keys(),
target_keys)
assert len(dense_sample_frames_results['frame_inds']) == 24
dense_sample_frames_results = dense_sample_frames(frame_result)
assert len(dense_sample_frames_results['frame_inds']) == 24
# Dense sample with no temporal_jitter in test mode
# sample_range=32, num_sample_positions=5
# clip_len=4, frame_interval=1, num_clips=6
video_result = copy.deepcopy(self.video_results)
frame_result = copy.deepcopy(self.frame_results)
config = dict(
clip_len=4,
frame_interval=1,
num_clips=6,
num_sample_positions=5,
sample_range=32,
temporal_jitter=False,
test_mode=True)
dense_sample_frames = DenseSampleFrames(**config)
dense_sample_frames_results = dense_sample_frames(video_result)
assert dense_sample_frames_results['start_index'] == 0
assert self.check_keys_contain(dense_sample_frames_results.keys(),
target_keys)
assert len(dense_sample_frames_results['frame_inds']) == 120
dense_sample_frames_results = dense_sample_frames(frame_result)
assert len(dense_sample_frames_results['frame_inds']) == 120
assert repr(dense_sample_frames) == (
f'{dense_sample_frames.__class__.__name__}('
f'clip_len={4}, '
f'frame_interval={1}, '
f'num_clips={6}, '
f'sample_range={32}, '
f'num_sample_positions={5}, '
f'temporal_jitter={False}, '
f'out_of_bound_opt=loop, '
f'test_mode={True})')
def test_untrim_sample_frames(self):
target_keys = [
'frame_inds', 'clip_len', 'frame_interval', 'num_clips',
'total_frames'
]
frame_result = dict(
frame_dir=None,
total_frames=100,
filename_tmpl=None,
modality='RGB',
start_index=0,
label=1)
video_result = copy.deepcopy(self.video_results)
config = dict(clip_len=1, frame_interval=16, start_index=0)
sample_frames = UntrimmedSampleFrames(**config)
sample_frames_results = sample_frames(frame_result)
assert self.check_keys_contain(sample_frames_results.keys(),
target_keys)
assert len(sample_frames_results['frame_inds']) == 6
assert_array_equal(sample_frames_results['frame_inds'],
np.array([8, 24, 40, 56, 72, 88]))
assert repr(sample_frames) == (f'{sample_frames.__class__.__name__}('
f'clip_len={1}, '
f'frame_interval={16})')
config = dict(clip_len=1, frame_interval=16, start_index=0)
sample_frames = UntrimmedSampleFrames(**config)
sample_frames_results = sample_frames(video_result)
assert self.check_keys_contain(sample_frames_results.keys(),
target_keys)
frame_inds = np.array(list(range(8, 300, 16)))
assert len(sample_frames_results['frame_inds']) == frame_inds.shape[0]
assert_array_equal(sample_frames_results['frame_inds'], frame_inds)
assert repr(sample_frames) == (f'{sample_frames.__class__.__name__}('
f'clip_len={1}, '
f'frame_interval={16})')
config = dict(clip_len=1, frame_interval=16)
sample_frames = UntrimmedSampleFrames(**config)
frame_result_ = copy.deepcopy(frame_result)
frame_result_['start_index'] = 1
sample_frames_results = sample_frames(frame_result_)
assert self.check_keys_contain(sample_frames_results.keys(),
target_keys)
assert len(sample_frames_results['frame_inds']) == 6
assert_array_equal(sample_frames_results['frame_inds'],
np.array([8, 24, 40, 56, 72, 88]) + 1)
assert repr(sample_frames) == (f'{sample_frames.__class__.__name__}('
f'clip_len={1}, '
f'frame_interval={16})')
config = dict(clip_len=3, frame_interval=16, start_index=0)
sample_frames = UntrimmedSampleFrames(**config)
sample_frames_results = sample_frames(frame_result)
assert self.check_keys_contain(sample_frames_results.keys(),
target_keys)
assert len(sample_frames_results['frame_inds']) == 18
assert_array_equal(
sample_frames_results['frame_inds'],
np.array([
7, 8, 9, 23, 24, 25, 39, 40, 41, 55, 56, 57, 71, 72, 73, 87,
88, 89
]))
assert repr(sample_frames) == (f'{sample_frames.__class__.__name__}('
f'clip_len={3}, '
f'frame_interval={16})')
def test_sample_ava_frames(self):
target_keys = [
'fps', 'timestamp', 'timestamp_start', 'shot_info', 'frame_inds',
'clip_len', 'frame_interval'
]
config = dict(clip_len=32, frame_interval=2)
sample_ava_dataset = SampleAVAFrames(**config)
ava_result = sample_ava_dataset(results=self.ava_results)
assert self.check_keys_contain(ava_result.keys(), target_keys)
assert ava_result['clip_len'] == 32
assert ava_result['frame_interval'] == 2
assert len(ava_result['frame_inds']) == 32
assert repr(sample_ava_dataset) == (
f'{sample_ava_dataset.__class__.__name__}('
f'clip_len={32}, '
f'frame_interval={2}, '
f'test_mode={False})')
# add test case in Issue #306
config = dict(clip_len=8, frame_interval=8)
sample_ava_dataset = SampleAVAFrames(**config)
ava_result = sample_ava_dataset(results=self.ava_results)
assert self.check_keys_contain(ava_result.keys(), target_keys)
assert ava_result['clip_len'] == 8
assert ava_result['frame_interval'] == 8
assert len(ava_result['frame_inds']) == 8
assert repr(sample_ava_dataset) == (
f'{sample_ava_dataset.__class__.__name__}('
f'clip_len={8}, '
f'frame_interval={8}, '
f'test_mode={False})')
def test_sample_proposal_frames(self):
target_keys = [
'frame_inds', 'clip_len', 'frame_interval', 'num_clips',
'total_frames', 'start_index'
]
# test error cases
with pytest.raises(TypeError):
proposal_result = copy.deepcopy(self.proposal_results)
config = dict(
clip_len=1,
frame_interval=1,
body_segments=2,
aug_segments=('error', 'error'),
aug_ratio=0.5,
temporal_jitter=False)
sample_frames = SampleProposalFrames(**config)
sample_frames(proposal_result)
# test normal cases
# Sample Frame with no temporal_jitter
# clip_len=1, frame_interval=1
# body_segments=2, aug_segments=(1, 1)
proposal_result = copy.deepcopy(self.proposal_results)
proposal_result['total_frames'] = 9
config = dict(
clip_len=1,
frame_interval=1,
body_segments=2,
aug_segments=(1, 1),
aug_ratio=0.5,
temporal_jitter=False)
sample_frames = SampleProposalFrames(**config)
sample_frames_results = sample_frames(proposal_result)
assert self.check_keys_contain(sample_frames_results.keys(),
target_keys)
assert len(sample_frames_results['frame_inds']) == 8
assert repr(sample_frames) == (f'{sample_frames.__class__.__name__}('
f'clip_len={1}, '
f'body_segments={2}, '
f'aug_segments={(1, 1)}, '
f'aug_ratio={(0.5, 0.5)}, '
f'frame_interval={1}, '
f'test_interval={6}, '
f'temporal_jitter={False}, '
f'mode=train)')
# Sample Frame with temporal_jitter
# clip_len=1, frame_interval=1
# body_segments=2, aug_segments=(1, 1)
proposal_result = copy.deepcopy(self.proposal_results)
proposal_result['total_frames'] = 9
config = dict(
clip_len=1,
frame_interval=1,
body_segments=2,
aug_segments=(1, 1),
aug_ratio=0.5,
temporal_jitter=True)
sample_frames = SampleProposalFrames(**config)
sample_frames_results = sample_frames(proposal_result)
assert self.check_keys_contain(sample_frames_results.keys(),
target_keys)
assert len(sample_frames_results['frame_inds']) == 8
assert repr(sample_frames) == (f'{sample_frames.__class__.__name__}('
f'clip_len={1}, '
f'body_segments={2}, '
f'aug_segments={(1, 1)}, '
f'aug_ratio={(0.5, 0.5)}, '
f'frame_interval={1}, '
f'test_interval={6}, '
f'temporal_jitter={True}, '
f'mode=train)')
# Sample Frame with no temporal_jitter in val mode
# clip_len=1, frame_interval=1
# body_segments=2, aug_segments=(1, 1)
proposal_result = copy.deepcopy(self.proposal_results)
proposal_result['total_frames'] = 9
config = dict(
clip_len=1,
frame_interval=1,
body_segments=2,
aug_segments=(1, 1),
aug_ratio=0.5,
temporal_jitter=False,
mode='val')
sample_frames = SampleProposalFrames(**config)
sample_frames_results = sample_frames(proposal_result)
assert self.check_keys_contain(sample_frames_results.keys(),
target_keys)
assert len(sample_frames_results['frame_inds']) == 8
assert repr(sample_frames) == (f'{sample_frames.__class__.__name__}('
f'clip_len={1}, '
f'body_segments={2}, '
f'aug_segments={(1, 1)}, '
f'aug_ratio={(0.5, 0.5)}, '
f'frame_interval={1}, '
f'test_interval={6}, '
f'temporal_jitter={False}, '
f'mode=val)')
# Sample Frame with no temporal_jitter in test mode
# test_interval=2
proposal_result = copy.deepcopy(self.proposal_results)
proposal_result['out_proposals'] = None
proposal_result['total_frames'] = 10
config = dict(
clip_len=1,
frame_interval=1,
body_segments=2,
aug_segments=(1, 1),
aug_ratio=0.5,
test_interval=2,
temporal_jitter=False,
mode='test')
sample_frames = SampleProposalFrames(**config)
sample_frames_results = sample_frames(proposal_result)
assert self.check_keys_contain(sample_frames_results.keys(),
target_keys)
assert len(sample_frames_results['frame_inds']) == 5
assert repr(sample_frames) == (f'{sample_frames.__class__.__name__}('
f'clip_len={1}, '
f'body_segments={2}, '
f'aug_segments={(1, 1)}, '
f'aug_ratio={(0.5, 0.5)}, '
f'frame_interval={1}, '
f'test_interval={2}, '
f'temporal_jitter={False}, '
f'mode=test)')
# Sample Frame with no temporal_jitter to get clip_offsets zero
# clip_len=1, frame_interval=1
# body_segments=2, aug_segments=(1, 1)
proposal_result = copy.deepcopy(self.proposal_results)
proposal_result['total_frames'] = 3
config = dict(
clip_len=1,
frame_interval=1,
body_segments=2,
aug_segments=(1, 1),
aug_ratio=0.5,
temporal_jitter=False)
sample_frames = SampleProposalFrames(**config)
sample_frames_results = sample_frames(proposal_result)
assert self.check_keys_contain(sample_frames_results.keys(),
target_keys)
assert len(sample_frames_results['frame_inds']) == 8
assert repr(sample_frames) == (f'{sample_frames.__class__.__name__}('
f'clip_len={1}, '
f'body_segments={2}, '
f'aug_segments={(1, 1)}, '
f'aug_ratio={(0.5, 0.5)}, '
f'frame_interval={1}, '
f'test_interval={6}, '
f'temporal_jitter={False}, '
f'mode=train)')
# Sample Frame with no temporal_jitter to
# get clip_offsets zero in val mode
# clip_len=1, frame_interval=1
# body_segments=4, aug_segments=(2, 2)
proposal_result = copy.deepcopy(self.proposal_results)
proposal_result['total_frames'] = 3
config = dict(
clip_len=1,
frame_interval=1,
body_segments=4,
aug_segments=(2, 2),
aug_ratio=0.5,
temporal_jitter=False,
mode='val')
sample_frames = SampleProposalFrames(**config)
sample_frames_results = sample_frames(proposal_result)
assert self.check_keys_contain(sample_frames_results.keys(),
target_keys)
assert len(sample_frames_results['frame_inds']) == 16
assert repr(sample_frames) == (f'{sample_frames.__class__.__name__}('
f'clip_len={1}, '
f'body_segments={4}, '
f'aug_segments={(2, 2)}, '
f'aug_ratio={(0.5, 0.5)}, '
f'frame_interval={1}, '
f'test_interval={6}, '
f'temporal_jitter={False}, '
f'mode=val)')
def test_pyav_init(self):
target_keys = ['video_reader', 'total_frames']
video_result = copy.deepcopy(self.video_results)
pyav_init = PyAVInit()
pyav_init_result = pyav_init(video_result)
assert self.check_keys_contain(pyav_init_result.keys(), target_keys)
assert pyav_init_result['total_frames'] == 300
assert repr(
pyav_init) == f'{pyav_init.__class__.__name__}(io_backend=disk)'
def test_pyav_decode(self):
target_keys = ['frame_inds', 'imgs', 'original_shape']
# test PyAV with 2 dim input and start_index = 0
video_result = copy.deepcopy(self.video_results)
video_result['frame_inds'] = np.arange(0, self.total_frames,
2)[:, np.newaxis]
pyav_init = PyAVInit()
pyav_init_result = pyav_init(video_result)
video_result['video_reader'] = pyav_init_result['video_reader']
pyav_decode = PyAVDecode()
pyav_decode_result = pyav_decode(video_result)
assert self.check_keys_contain(pyav_decode_result.keys(), target_keys)
assert pyav_decode_result['original_shape'] == (256, 340)
assert np.shape(pyav_decode_result['imgs']) == (len(
video_result['frame_inds']), 256, 340, 3)
assert repr(pyav_decode) == (f'{pyav_decode.__class__.__name__}('
f'multi_thread={False})')
# test PyAV with 1 dim input and start_index = 0
video_result = copy.deepcopy(self.video_results)
video_result['frame_inds'] = np.arange(0, self.total_frames, 5)
pyav_init = PyAVInit()
pyav_init_result = pyav_init(video_result)
video_result['video_reader'] = pyav_init_result['video_reader']
pyav_decode = PyAVDecode()
pyav_decode_result = pyav_decode(video_result)
assert self.check_keys_contain(pyav_decode_result.keys(), target_keys)
assert pyav_decode_result['original_shape'] == (256, 340)
assert np.shape(pyav_decode_result['imgs']) == (len(
video_result['frame_inds']), 256, 340, 3)
# PyAV with multi thread and start_index = 0
video_result = copy.deepcopy(self.video_results)
video_result['frame_inds'] = np.arange(0, self.total_frames, 5)
pyav_init = PyAVInit()
pyav_init_result = pyav_init(video_result)
video_result['video_reader'] = pyav_init_result['video_reader']
pyav_decode = PyAVDecode(multi_thread=True)
pyav_decode_result = pyav_decode(video_result)
assert self.check_keys_contain(pyav_decode_result.keys(), target_keys)
assert pyav_decode_result['original_shape'] == (256, 340)
assert np.shape(pyav_decode_result['imgs']) == (len(
video_result['frame_inds']), 256, 340, 3)
assert repr(pyav_decode) == (f'{pyav_decode.__class__.__name__}('
f'multi_thread={True})')
# test PyAV with 2 dim input
video_result = copy.deepcopy(self.video_results)
video_result['frame_inds'] = np.arange(1, self.total_frames,
2)[:, np.newaxis]
pyav_init = PyAVInit()
pyav_init_result = pyav_init(video_result)
video_result['video_reader'] = pyav_init_result['video_reader']
pyav_decode = PyAVDecode()
pyav_decode_result = pyav_decode(video_result)
assert self.check_keys_contain(pyav_decode_result.keys(), target_keys)
assert pyav_decode_result['original_shape'] == (256, 340)
assert np.shape(pyav_decode_result['imgs']) == (len(
video_result['frame_inds']), 256, 340, 3)
# test PyAV with 1 dim input
video_result = copy.deepcopy(self.video_results)
video_result['frame_inds'] = np.arange(1, self.total_frames, 5)
pyav_init = PyAVInit()
pyav_init_result = pyav_init(video_result)
video_result['video_reader'] = pyav_init_result['video_reader']
pyav_decode = PyAVDecode()
pyav_decode_result = pyav_decode(video_result)
assert self.check_keys_contain(pyav_decode_result.keys(), target_keys)
assert pyav_decode_result['original_shape'] == (256, 340)
assert np.shape(pyav_decode_result['imgs']) == (len(
video_result['frame_inds']), 256, 340, 3)
# PyAV with multi thread
video_result = copy.deepcopy(self.video_results)
video_result['frame_inds'] = np.arange(1, self.total_frames, 5)
pyav_init = PyAVInit()
pyav_init_result = pyav_init(video_result)
video_result['video_reader'] = pyav_init_result['video_reader']
pyav_decode = PyAVDecode(multi_thread=True)
pyav_decode_result = pyav_decode(video_result)
assert self.check_keys_contain(pyav_decode_result.keys(), target_keys)
assert pyav_decode_result['original_shape'] == (256, 340)
assert np.shape(pyav_decode_result['imgs']) == (len(
video_result['frame_inds']), 256, 340, 3)
assert repr(pyav_decode) == pyav_decode.__class__.__name__ + \
f'(multi_thread={True})'
def test_decord_init(self):
target_keys = ['video_reader', 'total_frames']
video_result = copy.deepcopy(self.video_results)
decord_init = DecordInit()
decord_init_result = decord_init(video_result)
assert self.check_keys_contain(decord_init_result.keys(), target_keys)
assert decord_init_result['total_frames'] == len(
decord_init_result['video_reader'])
assert repr(decord_init) == (f'{decord_init.__class__.__name__}('
f'io_backend=disk, '
f'num_threads={1})')
def test_decord_decode(self):
target_keys = ['frame_inds', 'imgs', 'original_shape']
# test Decord with 2 dim input and start_index = 0
video_result = copy.deepcopy(self.video_results)
video_result['frame_inds'] = np.arange(0, self.total_frames,
3)[:, np.newaxis]
decord_init = DecordInit()
decord_init_result = decord_init(video_result)
video_result['video_reader'] = decord_init_result['video_reader']
decord_decode = DecordDecode()
decord_decode_result = decord_decode(video_result)
assert self.check_keys_contain(decord_decode_result.keys(),
target_keys)
assert decord_decode_result['original_shape'] == (256, 340)
assert np.shape(decord_decode_result['imgs']) == (len(
video_result['frame_inds']), 256, 340, 3)
# test Decord with 1 dim input and start_index = 0
video_result = copy.deepcopy(self.video_results)
video_result['frame_inds'] = np.arange(0, self.total_frames, 3)
decord_init = DecordInit()
decord_init_result = decord_init(video_result)
video_result['video_reader'] = decord_init_result['video_reader']
decord_decode = DecordDecode()
decord_decode_result = decord_decode(video_result)
assert self.check_keys_contain(decord_decode_result.keys(),
target_keys)
assert decord_decode_result['original_shape'] == (256, 340)
assert np.shape(decord_decode_result['imgs']) == (len(
video_result['frame_inds']), 256, 340, 3)
# test Decord with 2 dim input and start_index = 0
video_result = copy.deepcopy(self.video_results)
video_result['frame_inds'] = np.arange(0, self.total_frames,
3)[:, np.newaxis]
decord_init = DecordInit()
decord_init_result = decord_init(video_result)
video_result['video_reader'] = decord_init_result['video_reader']
decord_decode = DecordDecode()
decord_decode_result = decord_decode(video_result)
assert self.check_keys_contain(decord_decode_result.keys(),
target_keys)
assert decord_decode_result['original_shape'] == (256, 340)
assert np.shape(decord_decode_result['imgs']) == (len(
video_result['frame_inds']), 256, 340, 3)
# test Decord with 1 dim input
video_result = copy.deepcopy(self.video_results)
video_result['frame_inds'] = np.arange(1, self.total_frames, 3)
decord_init = DecordInit()
decord_init_result = decord_init(video_result)
video_result['video_reader'] = decord_init_result['video_reader']
decord_decode = DecordDecode()
decord_decode_result = decord_decode(video_result)
assert self.check_keys_contain(decord_decode_result.keys(),
target_keys)
assert decord_decode_result['original_shape'] == (256, 340)
assert np.shape(decord_decode_result['imgs']) == (len(
video_result['frame_inds']), 256, 340, 3)
def test_opencv_init(self):
target_keys = ['new_path', 'video_reader', 'total_frames']
video_result = copy.deepcopy(self.video_results)
opencv_init = OpenCVInit()
opencv_init_result = opencv_init(video_result)
assert self.check_keys_contain(opencv_init_result.keys(), target_keys)
assert opencv_init_result['total_frames'] == len(
opencv_init_result['video_reader'])
assert repr(opencv_init) == (f'{opencv_init.__class__.__name__}('
f'io_backend=disk)')
def test_opencv_decode(self):
target_keys = ['frame_inds', 'imgs', 'original_shape']
# test OpenCV with 2 dim input when start_index = 0
video_result = copy.deepcopy(self.video_results)
video_result['frame_inds'] = np.arange(0, self.total_frames,
2)[:, np.newaxis]
opencv_init = OpenCVInit()
opencv_init_result = opencv_init(video_result)
video_result['video_reader'] = opencv_init_result['video_reader']
opencv_decode = OpenCVDecode()
opencv_decode_result = opencv_decode(video_result)
assert self.check_keys_contain(opencv_decode_result.keys(),
target_keys)
assert opencv_decode_result['original_shape'] == (256, 340)
assert np.shape(opencv_decode_result['imgs']) == (len(
video_result['frame_inds']), 256, 340, 3)
# test OpenCV with 2 dim input
video_result = copy.deepcopy(self.video_results)
video_result['frame_inds'] = np.arange(1, self.total_frames,
2)[:, np.newaxis]
opencv_init = OpenCVInit()
opencv_init_result = opencv_init(video_result)
video_result['video_reader'] = opencv_init_result['video_reader']
opencv_decode = OpenCVDecode()
opencv_decode_result = opencv_decode(video_result)
assert self.check_keys_contain(opencv_decode_result.keys(),
target_keys)
assert opencv_decode_result['original_shape'] == (256, 340)
assert np.shape(opencv_decode_result['imgs']) == (len(
video_result['frame_inds']), 256, 340, 3)
# test OpenCV with 1 dim input when start_index = 0
video_result = copy.deepcopy(self.video_results)
video_result['frame_inds'] = np.arange(0, self.total_frames, 3)
opencv_init = OpenCVInit()
opencv_init_result = opencv_init(video_result)
video_result['video_reader'] = opencv_init_result['video_reader']
# test OpenCV with 1 dim input
video_result = copy.deepcopy(self.video_results)
video_result['frame_inds'] = np.arange(1, self.total_frames, 3)
opencv_init = OpenCVInit()
opencv_init_result = opencv_init(video_result)
video_result['video_reader'] = opencv_init_result['video_reader']
opencv_decode = OpenCVDecode()
opencv_decode_result = opencv_decode(video_result)
assert self.check_keys_contain(opencv_decode_result.keys(),
target_keys)
assert opencv_decode_result['original_shape'] == (256, 340)
assert np.shape(opencv_decode_result['imgs']) == (len(
video_result['frame_inds']), 256, 340, 3)
def test_rawframe_selector(self):
with pytest.warns(UserWarning):
FrameSelector(io_backend='disk')
def test_rawframe_decode(self):
target_keys = ['frame_inds', 'imgs', 'original_shape', 'modality']
# test frame selector with 2 dim input
inputs = copy.deepcopy(self.frame_results)
inputs['frame_inds'] = np.arange(0, self.total_frames, 2)[:,
np.newaxis]
# since the test images start with index 1, we plus 1 to frame_inds
# in order to pass the CI
inputs['frame_inds'] = inputs['frame_inds'] + 1
frame_selector = RawFrameDecode(io_backend='disk')
results = frame_selector(inputs)
assert self.check_keys_contain(results.keys(), target_keys)
assert np.shape(results['imgs']) == (len(inputs['frame_inds']), 240,
320, 3)
assert results['original_shape'] == (240, 320)
# test frame selector with 2 dim input
inputs = copy.deepcopy(self.frame_results)
inputs['frame_inds'] = np.arange(1, self.total_frames, 2)[:,
np.newaxis]
frame_selector = RawFrameDecode(io_backend='disk')
results = frame_selector(inputs)
assert self.check_keys_contain(results.keys(), target_keys)
assert np.shape(results['imgs']) == (len(inputs['frame_inds']), 240,
320, 3)
assert results['original_shape'] == (240, 320)
# test frame selector with 1 dim input when start_index = 0
inputs = copy.deepcopy(self.frame_results)
inputs['frame_inds'] = np.arange(0, self.total_frames, 5)
# since the test images start with index 1, we plus 1 to frame_inds
# in order to pass the CI
inputs['frame_inds'] = inputs['frame_inds'] + 1
frame_selector = RawFrameDecode(io_backend='disk')
results = frame_selector(inputs)
assert self.check_keys_contain(results.keys(), target_keys)
assert np.shape(results['imgs']) == (len(inputs['frame_inds']), 240,
320, 3)
assert results['original_shape'] == (240, 320)
# test frame selector with 1 dim input
inputs = copy.deepcopy(self.frame_results)
inputs['frame_inds'] = np.arange(1, self.total_frames, 5)
frame_selector = RawFrameDecode(io_backend='disk')
results = frame_selector(inputs)
assert self.check_keys_contain(results.keys(), target_keys)
assert np.shape(results['imgs']) == (len(inputs['frame_inds']), 240,
320, 3)
assert results['original_shape'] == (240, 320)
# test frame selector with 1 dim input
inputs = copy.deepcopy(self.frame_results)
inputs['frame_inds'] = np.arange(0, self.total_frames, 2)
# since the test images start with index 1, we plus 1 to frame_inds
# in order to pass the CI
inputs['frame_inds'] = inputs['frame_inds'] + 1
frame_selector = RawFrameDecode(io_backend='disk')
results = frame_selector(inputs)
assert self.check_keys_contain(results.keys(), target_keys)
assert np.shape(results['imgs']) == (len(inputs['frame_inds']), 240,
320, 3)
assert results['original_shape'] == (240, 320)
# test frame selector with 1 dim input
inputs = copy.deepcopy(self.frame_results)
inputs['frame_inds'] = np.arange(1, self.total_frames, 2)
frame_selector = RawFrameDecode(io_backend='disk')
results = frame_selector(inputs)
assert self.check_keys_contain(results.keys(), target_keys)
assert np.shape(results['imgs']) == (len(inputs['frame_inds']), 240,
320, 3)
assert results['original_shape'] == (240, 320)
# test frame selector with 1 dim input for flow images
inputs = copy.deepcopy(self.flow_frame_results)
inputs['frame_inds'] = np.arange(0, self.total_frames, 2)
# since the test images start with index 1, we plus 1 to frame_inds
# in order to pass the CI
inputs['frame_inds'] = inputs['frame_inds'] + 1
frame_selector = RawFrameDecode(io_backend='disk')
results = frame_selector(inputs)
assert self.check_keys_contain(results.keys(), target_keys)
assert np.shape(results['imgs']) == (len(inputs['frame_inds']) * 2,
240, 320)
assert results['original_shape'] == (240, 320)
# test frame selector with 1 dim input for flow images
inputs = copy.deepcopy(self.flow_frame_results)
inputs['frame_inds'] = np.arange(1, self.total_frames, 2)
frame_selector = RawFrameDecode(io_backend='disk')
results = frame_selector(inputs)
assert self.check_keys_contain(results.keys(), target_keys)
assert np.shape(results['imgs']) == (len(inputs['frame_inds']) * 2,
240, 320)
assert results['original_shape'] == (240, 320)
# test frame selector in turbojpeg decording backend
# when start_index = 0
inputs = copy.deepcopy(self.frame_results)
inputs['frame_inds'] = np.arange(0, self.total_frames, 5)
# since the test images start with index 1, we plus 1 to frame_inds
# in order to pass the CI
inputs['frame_inds'] = inputs['frame_inds'] + 1
frame_selector = RawFrameDecode(
io_backend='disk', decoding_backend='turbojpeg')
results = frame_selector(inputs)
assert self.check_keys_contain(results.keys(), target_keys)
assert np.shape(results['imgs']) == (len(inputs['frame_inds']), 240,
320, 3)
assert results['original_shape'] == (240, 320)
# test frame selector in turbojpeg decording backend
inputs = copy.deepcopy(self.frame_results)
inputs['frame_inds'] = np.arange(1, self.total_frames, 5)
frame_selector = RawFrameDecode(
io_backend='disk', decoding_backend='turbojpeg')
results = frame_selector(inputs)
assert self.check_keys_contain(results.keys(), target_keys)
assert np.shape(results['imgs']) == (len(inputs['frame_inds']), 240,
320, 3)
assert results['original_shape'] == (240, 320)
assert repr(frame_selector) == (f'{frame_selector.__class__.__name__}('
f'io_backend=disk, '
f'decoding_backend=turbojpeg)')
def test_load_localization_feature(self):
target_keys = ['raw_feature']
action_result = copy.deepcopy(self.action_results)
# test error cases
with pytest.raises(NotImplementedError):
load_localization_feature = LoadLocalizationFeature(
'unsupport_ext')
# test normal cases
load_localization_feature = LoadLocalizationFeature()
load_localization_feature_result = load_localization_feature(
action_result)
assert self.check_keys_contain(load_localization_feature_result.keys(),
target_keys)
assert load_localization_feature_result['raw_feature'].shape == (400,
5)
assert repr(load_localization_feature) == (
f'{load_localization_feature.__class__.__name__}('
f'raw_feature_ext=.csv)')
def test_generate_localization_label(self):
action_result = copy.deepcopy(self.action_results)
action_result['raw_feature'] = np.random.randn(400, 5)
# test default setting
target_keys = ['gt_bbox']
generate_localization_labels = GenerateLocalizationLabels()
generate_localization_labels_result = generate_localization_labels(
action_result)
assert self.check_keys_contain(
generate_localization_labels_result.keys(), target_keys)
assert_array_almost_equal(
generate_localization_labels_result['gt_bbox'], [[0.375, 0.625]],
decimal=4)
def test_load_proposals(self):
target_keys = [
'bsp_feature', 'tmin', 'tmax', 'tmin_score', 'tmax_score',
'reference_temporal_iou'
]
action_result = copy.deepcopy(self.action_results)
# test error cases
with pytest.raises(NotImplementedError):
load_proposals = LoadProposals(5, self.proposals_dir,
self.bsp_feature_dir,
'unsupport_ext')
with pytest.raises(NotImplementedError):
load_proposals = LoadProposals(5, self.proposals_dir,
self.bsp_feature_dir, '.csv',
'unsupport_ext')
# test normal cases
load_proposals = LoadProposals(5, self.proposals_dir,
self.bsp_feature_dir)
load_proposals_result = load_proposals(action_result)
assert self.check_keys_contain(load_proposals_result.keys(),
target_keys)
assert (load_proposals_result['bsp_feature'].shape[0] == 5)
assert load_proposals_result['tmin'].shape == (5, )
assert_array_almost_equal(
load_proposals_result['tmin'], np.arange(0.1, 0.6, 0.1), decimal=4)
assert load_proposals_result['tmax'].shape == (5, )
assert_array_almost_equal(
load_proposals_result['tmax'], np.arange(0.2, 0.7, 0.1), decimal=4)
assert load_proposals_result['tmin_score'].shape == (5, )
assert_array_almost_equal(
load_proposals_result['tmin_score'],
np.arange(0.95, 0.90, -0.01),
decimal=4)
assert load_proposals_result['tmax_score'].shape == (5, )
assert_array_almost_equal(
load_proposals_result['tmax_score'],
np.arange(0.96, 0.91, -0.01),
decimal=4)
assert load_proposals_result['reference_temporal_iou'].shape == (5, )
assert_array_almost_equal(
load_proposals_result['reference_temporal_iou'],
np.arange(0.85, 0.80, -0.01),
decimal=4)
assert repr(load_proposals) == (
f'{load_proposals.__class__.__name__}('
f'top_k={5}, '
f'pgm_proposals_dir={self.proposals_dir}, '
f'pgm_features_dir={self.bsp_feature_dir}, '
f'proposal_ext=.csv, '
f'feature_ext=.npy)')
def test_audio_decode_init(self):
target_keys = ['audios', 'length', 'sample_rate']
inputs = copy.deepcopy(self.audio_results)
audio_decode_init = AudioDecodeInit()
results = audio_decode_init(inputs)
assert self.check_keys_contain(results.keys(), target_keys)
# test when no audio file exists
inputs = copy.deepcopy(self.audio_results)
inputs['audio_path'] = 'foo/foo/bar.wav'
audio_decode_init = AudioDecodeInit()
results = audio_decode_init(inputs)
assert self.check_keys_contain(results.keys(), target_keys)
assert results['audios'].shape == (10.0 *
audio_decode_init.sample_rate, )
assert repr(audio_decode_init) == (
f'{audio_decode_init.__class__.__name__}('
f'io_backend=disk, '
f'sample_rate=16000, '
f'pad_method=zero)')
def test_audio_decode(self):
target_keys = ['frame_inds', 'audios']
inputs = copy.deepcopy(self.audio_results)
inputs['frame_inds'] = np.arange(0, self.audio_total_frames,
2)[:, np.newaxis]
inputs['num_clips'] = 1
inputs['length'] = 1280
audio_selector = AudioDecode()
results = audio_selector(inputs)
assert self.check_keys_contain(results.keys(), target_keys)
def test_load_audio_feature(self):
target_keys = ['audios']
inputs = copy.deepcopy(self.audio_feature_results)
load_audio_feature = LoadAudioFeature()
results = load_audio_feature(inputs)
assert self.check_keys_contain(results.keys(), target_keys)
# test when no audio feature file exists
inputs = copy.deepcopy(self.audio_feature_results)
inputs['audio_path'] = 'foo/foo/bar.npy'
load_audio_feature = LoadAudioFeature()
results = load_audio_feature(inputs)
assert results['audios'].shape == (640, 80)
assert self.check_keys_contain(results.keys(), target_keys)
assert repr(load_audio_feature) == (
f'{load_audio_feature.__class__.__name__}('
f'pad_method=zero)')
def test_audio_feature_selector(self):
target_keys = ['audios']
# test frame selector with 2 dim input
inputs = copy.deepcopy(self.audio_feature_results)
inputs['frame_inds'] = np.arange(0, self.audio_total_frames,
2)[:, np.newaxis]
inputs['num_clips'] = 1
inputs['length'] = 1280
audio_feature_selector = AudioFeatureSelector()
results = audio_feature_selector(inputs)
assert self.check_keys_contain(results.keys(), target_keys)
assert repr(audio_feature_selector) == (
f'{audio_feature_selector.__class__.__name__}('
f'fix_length={128})')
def test_pyav_decode_motion_vector(self):
pyav_init = PyAVInit()
pyav = PyAVDecodeMotionVector()
# test pyav with 2-dim input
results = {
'filename': self.video_path,
'frame_inds': np.arange(0, 32, 1)[:, np.newaxis]
}
results = pyav_init(results)
results = pyav(results)
target_keys = ['motion_vectors']
assert self.check_keys_contain(results.keys(), target_keys)
# test pyav with 1 dim input
results = {
'filename': self.video_path,
'frame_inds': np.arange(0, 32, 1)
}
pyav_init = PyAVInit()
results = pyav_init(results)
pyav = PyAVDecodeMotionVector()
results = pyav(results)
assert self.check_keys_contain(results.keys(), target_keys)
| InternVideo-main | Downstream/Open-Set-Action-Recognition/tests/test_data/test_loading.py |
import numpy as np
import pytest
import torch
from mmcv.parallel import DataContainer as DC
from mmaction.datasets.pipelines import (Collect, FormatAudioShape,
FormatShape, ImageToTensor,
ToDataContainer, ToTensor, Transpose)
def check_keys_contain(result_keys, target_keys):
"""Check if all elements in target_keys is in result_keys."""
return set(target_keys).issubset(set(result_keys))
def test_to_tensor():
to_tensor = ToTensor(['str'])
with pytest.raises(TypeError):
# str cannot be converted to tensor
results = dict(str='0')
to_tensor(results)
# convert tensor, numpy, squence, int, float to tensor
target_keys = ['tensor', 'numpy', 'sequence', 'int', 'float']
to_tensor = ToTensor(target_keys)
original_results = dict(
tensor=torch.randn(2, 3),
numpy=np.random.randn(2, 3),
sequence=list(range(10)),
int=1,
float=0.1)
results = to_tensor(original_results)
assert check_keys_contain(results.keys(), target_keys)
for key in target_keys:
assert isinstance(results[key], torch.Tensor)
assert torch.equal(results[key].data, original_results[key])
# Add an additional key which is not in keys.
original_results = dict(
tensor=torch.randn(2, 3),
numpy=np.random.randn(2, 3),
sequence=list(range(10)),
int=1,
float=0.1,
str='test')
results = to_tensor(original_results)
assert check_keys_contain(results.keys(), target_keys)
for key in target_keys:
assert isinstance(results[key], torch.Tensor)
assert torch.equal(results[key].data, original_results[key])
assert repr(to_tensor) == to_tensor.__class__.__name__ + \
f'(keys={target_keys})'
def test_to_data_container():
# check user-defined fields
fields = (dict(key='key1', stack=True), dict(key='key2'))
to_data_container = ToDataContainer(fields=fields)
target_keys = ['key1', 'key2']
original_results = dict(key1=np.random.randn(10, 20), key2=['a', 'b'])
results = to_data_container(original_results.copy())
assert check_keys_contain(results.keys(), target_keys)
for key in target_keys:
assert isinstance(results[key], DC)
assert np.all(results[key].data == original_results[key])
assert results['key1'].stack
assert not results['key2'].stack
# Add an additional key which is not in keys.
original_results = dict(
key1=np.random.randn(10, 20), key2=['a', 'b'], key3='value3')
results = to_data_container(original_results.copy())
assert check_keys_contain(results.keys(), target_keys)
for key in target_keys:
assert isinstance(results[key], DC)
assert np.all(results[key].data == original_results[key])
assert results['key1'].stack
assert not results['key2'].stack
assert repr(to_data_container) == (
to_data_container.__class__.__name__ + f'(fields={fields})')
def test_image_to_tensor():
original_results = dict(imgs=np.random.randn(256, 256, 3))
keys = ['imgs']
image_to_tensor = ImageToTensor(keys)
results = image_to_tensor(original_results)
assert results['imgs'].shape == torch.Size([3, 256, 256])
assert isinstance(results['imgs'], torch.Tensor)
assert torch.equal(results['imgs'].data, original_results['imgs'])
assert repr(image_to_tensor) == image_to_tensor.__class__.__name__ + \
f'(keys={keys})'
def test_transpose():
results = dict(imgs=np.random.randn(256, 256, 3))
keys = ['imgs']
order = [2, 0, 1]
transpose = Transpose(keys, order)
results = transpose(results)
assert results['imgs'].shape == (3, 256, 256)
assert repr(transpose) == transpose.__class__.__name__ + \
f'(keys={keys}, order={order})'
def test_collect():
inputs = dict(
imgs=np.random.randn(256, 256, 3),
label=[1],
filename='test.txt',
original_shape=(256, 256, 3),
img_shape=(256, 256, 3),
pad_shape=(256, 256, 3),
flip_direction='vertical',
img_norm_cfg=dict(to_bgr=False))
keys = ['imgs', 'label']
collect = Collect(keys)
results = collect(inputs)
assert sorted(list(results.keys())) == sorted(
['imgs', 'label', 'img_meta'])
inputs.pop('imgs')
assert set(results['img_meta'].data.keys()) == set(inputs.keys())
for key in results['img_meta'].data:
assert results['img_meta'].data[key] == inputs[key]
assert repr(collect) == collect.__class__.__name__ + \
f'(keys={keys}, meta_keys={collect.meta_keys})'
def test_format_shape():
with pytest.raises(ValueError):
# invalid input format
FormatShape('NHWC')
# 'NCHW' input format
results = dict(
imgs=np.random.randn(3, 224, 224, 3), num_clips=1, clip_len=3)
format_shape = FormatShape('NCHW')
assert format_shape(results)['input_shape'] == (3, 3, 224, 224)
# `NCTHW` input format with num_clips=1, clip_len=3
results = dict(
imgs=np.random.randn(3, 224, 224, 3), num_clips=1, clip_len=3)
format_shape = FormatShape('NCTHW')
assert format_shape(results)['input_shape'] == (1, 3, 3, 224, 224)
# `NCTHW` input format with num_clips=2, clip_len=3
results = dict(
imgs=np.random.randn(18, 224, 224, 3), num_clips=2, clip_len=3)
assert format_shape(results)['input_shape'] == (6, 3, 3, 224, 224)
target_keys = ['imgs', 'input_shape']
assert check_keys_contain(results.keys(), target_keys)
assert repr(format_shape) == format_shape.__class__.__name__ + \
"(input_format='NCTHW')"
# 'NPTCHW' input format
results = dict(
imgs=np.random.randn(72, 224, 224, 3),
num_clips=9,
clip_len=1,
num_proposals=8)
format_shape = FormatShape('NPTCHW')
assert format_shape(results)['input_shape'] == (8, 9, 3, 224, 224)
def test_format_audio_shape():
with pytest.raises(ValueError):
# invalid input format
FormatAudioShape('XXXX')
# 'NCTF' input format
results = dict(audios=np.random.randn(3, 128, 8))
format_shape = FormatAudioShape('NCTF')
assert format_shape(results)['input_shape'] == (3, 1, 128, 8)
assert repr(format_shape) == format_shape.__class__.__name__ + \
"(input_format='NCTF')"
| InternVideo-main | Downstream/Open-Set-Action-Recognition/tests/test_data/test_formating.py |
import os
import os.path as osp
import tempfile
import mmcv
import numpy as np
import pytest
from mmcv import ConfigDict
from numpy.testing import assert_array_almost_equal, assert_array_equal
from mmaction.datasets import (ActivityNetDataset, AudioDataset,
AudioFeatureDataset, AudioVisualDataset,
HVUDataset, RawframeDataset, RawVideoDataset,
RepeatDataset, SSNDataset, VideoDataset)
class TestDataset:
@staticmethod
def check_keys_contain(result_keys, target_keys):
"""Check if all elements in target_keys is in result_keys."""
return set(target_keys).issubset(set(result_keys))
@classmethod
def setup_class(cls):
cls.data_prefix = osp.join(osp.dirname(osp.dirname(__file__)), 'data')
cls.frame_ann_file = osp.join(cls.data_prefix, 'frame_test_list.txt')
cls.frame_ann_file_with_offset = osp.join(
cls.data_prefix, 'frame_test_list_with_offset.txt')
cls.frame_ann_file_multi_label = osp.join(
cls.data_prefix, 'frame_test_list_multi_label.txt')
cls.video_ann_file = osp.join(cls.data_prefix, 'video_test_list.txt')
cls.hvu_video_ann_file = osp.join(cls.data_prefix,
'hvu_video_test_anno.json')
cls.hvu_video_eval_ann_file = osp.join(
cls.data_prefix, 'hvu_video_eval_test_anno.json')
cls.hvu_frame_ann_file = osp.join(cls.data_prefix,
'hvu_frame_test_anno.json')
cls.action_ann_file = osp.join(cls.data_prefix,
'action_test_anno.json')
cls.proposal_ann_file = osp.join(cls.data_prefix,
'proposal_test_list.txt')
cls.proposal_norm_ann_file = osp.join(cls.data_prefix,
'proposal_normalized_list.txt')
cls.audio_ann_file = osp.join(cls.data_prefix, 'audio_test_list.txt')
cls.audio_feature_ann_file = osp.join(cls.data_prefix,
'audio_feature_test_list.txt')
cls.rawvideo_test_anno_txt = osp.join(cls.data_prefix,
'rawvideo_test_anno.txt')
cls.rawvideo_test_anno_json = osp.join(cls.data_prefix,
'rawvideo_test_anno.json')
cls.rawvideo_pipeline = []
cls.frame_pipeline = [
dict(
type='SampleFrames',
clip_len=32,
frame_interval=2,
num_clips=1),
dict(type='RawFrameDecode', io_backend='disk')
]
cls.audio_pipeline = [
dict(type='AudioDecodeInit'),
dict(
type='SampleFrames',
clip_len=32,
frame_interval=2,
num_clips=1),
dict(type='AudioDecode')
]
cls.audio_feature_pipeline = [
dict(type='LoadAudioFeature'),
dict(
type='SampleFrames',
clip_len=32,
frame_interval=2,
num_clips=1),
dict(type='AudioFeatureSelector')
]
cls.video_pipeline = [
dict(type='OpenCVInit'),
dict(
type='SampleFrames',
clip_len=32,
frame_interval=2,
num_clips=1),
dict(type='OpenCVDecode')
]
cls.action_pipeline = []
cls.proposal_pipeline = [
dict(
type='SampleProposalFrames',
clip_len=1,
body_segments=5,
aug_segments=(2, 2),
aug_ratio=0.5),
dict(type='RawFrameDecode', io_backend='disk')
]
cls.proposal_test_pipeline = [
dict(
type='SampleProposalFrames',
clip_len=1,
body_segments=5,
aug_segments=(2, 2),
aug_ratio=0.5,
mode='test'),
dict(type='RawFrameDecode', io_backend='disk')
]
cls.proposal_train_cfg = ConfigDict(
dict(
ssn=dict(
assigner=dict(
positive_iou_threshold=0.7,
background_iou_threshold=0.01,
incomplete_iou_threshold=0.5,
background_coverage_threshold=0.02,
incomplete_overlap_threshold=0.01),
sampler=dict(
num_per_video=8,
positive_ratio=1,
background_ratio=1,
incomplete_ratio=6,
add_gt_as_proposals=True),
loss_weight=dict(
comp_loss_weight=0.1, reg_loss_weight=0.1),
debug=False)))
cls.proposal_test_cfg = ConfigDict(
dict(
ssn=dict(
sampler=dict(test_interval=6, batch_size=16),
evaluater=dict(
top_k=2000,
nms=0.2,
softmax_before_filter=True,
cls_top_k=2))))
cls.proposal_test_cfg_topall = ConfigDict(
dict(
ssn=dict(
sampler=dict(test_interval=6, batch_size=16),
evaluater=dict(
top_k=-1,
nms=0.2,
softmax_before_filter=True,
cls_top_k=2))))
cls.hvu_categories = [
'action', 'attribute', 'concept', 'event', 'object', 'scene'
]
cls.hvu_category_nums = [739, 117, 291, 69, 1679, 248]
cls.hvu_categories_for_eval = ['action', 'scene', 'object']
cls.hvu_category_nums_for_eval = [3, 3, 3]
cls.filename_tmpl = 'img_{:05d}.jpg'
def test_rawvideo_dataset(self):
# Try to load txt file
rawvideo_dataset = RawVideoDataset(
ann_file=self.rawvideo_test_anno_txt,
pipeline=self.rawvideo_pipeline,
clipname_tmpl='part_{}.mp4',
sampling_strategy='positive',
data_prefix=self.data_prefix)
result = rawvideo_dataset[0]
clipname = osp.join(self.data_prefix, 'test_rawvideo_dataset',
'part_0.mp4')
assert result['filename'] == clipname
# Try to load json file
rawvideo_dataset = RawVideoDataset(
ann_file=self.rawvideo_test_anno_json,
pipeline=self.rawvideo_pipeline,
clipname_tmpl='part_{}.mp4',
sampling_strategy='random',
data_prefix=self.data_prefix,
test_mode=True)
result = rawvideo_dataset[0]
def test_hvu_dataset(self):
hvu_frame_dataset = HVUDataset(
ann_file=self.hvu_frame_ann_file,
pipeline=self.frame_pipeline,
tag_categories=self.hvu_categories,
tag_category_nums=self.hvu_category_nums,
filename_tmpl=self.filename_tmpl,
data_prefix=self.data_prefix,
start_index=1)
hvu_frame_infos = hvu_frame_dataset.video_infos
frame_dir = osp.join(self.data_prefix, 'test_imgs')
assert hvu_frame_infos == [
dict(
frame_dir=frame_dir,
total_frames=5,
label=dict(
concept=[250, 131, 42, 51, 57, 155, 122],
object=[1570, 508],
event=[16],
action=[180],
scene=[206]),
categories=self.hvu_categories,
category_nums=self.hvu_category_nums,
filename_tmpl=self.filename_tmpl,
start_index=1,
modality='RGB')
] * 2
hvu_video_dataset = HVUDataset(
ann_file=self.hvu_video_ann_file,
pipeline=self.video_pipeline,
tag_categories=self.hvu_categories,
tag_category_nums=self.hvu_category_nums,
data_prefix=self.data_prefix)
hvu_video_infos = hvu_video_dataset.video_infos
filename = osp.join(self.data_prefix, 'tmp.mp4')
assert hvu_video_infos == [
dict(
filename=filename,
label=dict(
concept=[250, 131, 42, 51, 57, 155, 122],
object=[1570, 508],
event=[16],
action=[180],
scene=[206]),
categories=self.hvu_categories,
category_nums=self.hvu_category_nums)
] * 2
hvu_video_eval_dataset = HVUDataset(
ann_file=self.hvu_video_eval_ann_file,
pipeline=self.video_pipeline,
tag_categories=self.hvu_categories_for_eval,
tag_category_nums=self.hvu_category_nums_for_eval,
data_prefix=self.data_prefix)
results = [
np.array([
-1.59812844, 0.24459082, 1.38486497, 0.28801252, 1.09813449,
-0.28696971, 0.0637848, 0.22877678, -1.82406999
]),
np.array([
0.87904563, 1.64264224, 0.46382051, 0.72865088, -2.13712525,
1.28571358, 1.01320328, 0.59292737, -0.05502892
])
]
mAP = hvu_video_eval_dataset.evaluate(results)
assert_array_almost_equal(mAP['action_mAP'], 1.0)
assert_array_almost_equal(mAP['scene_mAP'], 0.5)
assert_array_almost_equal(mAP['object_mAP'], 0.75)
def test_rawframe_dataset(self):
rawframe_dataset = RawframeDataset(self.frame_ann_file,
self.frame_pipeline,
self.data_prefix)
rawframe_infos = rawframe_dataset.video_infos
frame_dir = osp.join(self.data_prefix, 'test_imgs')
assert rawframe_infos == [
dict(frame_dir=frame_dir, total_frames=5, label=127)
] * 2
assert rawframe_dataset.start_index == 1
def test_audio_dataset(self):
audio_dataset = AudioDataset(
self.audio_ann_file,
self.audio_pipeline,
data_prefix=self.data_prefix)
audio_infos = audio_dataset.video_infos
wav_path = osp.join(self.data_prefix, 'test.wav')
assert audio_infos == [
dict(audio_path=wav_path, total_frames=100, label=127)
] * 2
def test_audio_feature_dataset(self):
audio_dataset = AudioFeatureDataset(
self.audio_feature_ann_file,
self.audio_feature_pipeline,
data_prefix=self.data_prefix)
audio_infos = audio_dataset.video_infos
feature_path = osp.join(self.data_prefix, 'test.npy')
assert audio_infos == [
dict(audio_path=feature_path, total_frames=100, label=127)
] * 2
def test_rawframe_dataset_with_offset(self):
rawframe_dataset = RawframeDataset(
self.frame_ann_file_with_offset,
self.frame_pipeline,
self.data_prefix,
with_offset=True)
rawframe_infos = rawframe_dataset.video_infos
frame_dir = osp.join(self.data_prefix, 'test_imgs')
assert rawframe_infos == [
dict(frame_dir=frame_dir, offset=2, total_frames=5, label=127)
] * 2
assert rawframe_dataset.start_index == 1
def test_rawframe_dataset_multi_label(self):
rawframe_dataset = RawframeDataset(
self.frame_ann_file_multi_label,
self.frame_pipeline,
self.data_prefix,
multi_class=True,
num_classes=100)
rawframe_infos = rawframe_dataset.video_infos
frame_dir = osp.join(self.data_prefix, 'test_imgs')
label0 = [1]
label1 = [3, 5]
labels = [label0, label1]
for info, label in zip(rawframe_infos, labels):
assert info['frame_dir'] == frame_dir
assert info['total_frames'] == 5
assert set(info['label']) == set(label)
assert rawframe_dataset.start_index == 1
def test_audio_visual_dataset(self):
test_dataset = AudioVisualDataset(
self.frame_ann_file,
self.frame_pipeline,
self.data_prefix,
video_prefix=self.data_prefix,
data_prefix=self.data_prefix)
video_infos = test_dataset.video_infos
frame_dir = osp.join(self.data_prefix, 'test_imgs')
audio_path = osp.join(self.data_prefix, 'test_imgs.npy')
filename = osp.join(self.data_prefix, 'test_imgs.mp4')
assert video_infos == [
dict(
frame_dir=frame_dir,
audio_path=audio_path,
filename=filename,
total_frames=5,
label=127)
] * 2
assert test_dataset.start_index == 1
def test_dataset_realpath(self):
dataset = RawframeDataset(self.frame_ann_file, self.frame_pipeline,
'.')
assert dataset.data_prefix == osp.realpath('.')
dataset = RawframeDataset(self.frame_ann_file, self.frame_pipeline,
's3://good')
assert dataset.data_prefix == 's3://good'
dataset = RawframeDataset(self.frame_ann_file, self.frame_pipeline)
assert dataset.data_prefix is None
assert dataset.video_infos[0]['frame_dir'] == 'test_imgs'
def test_video_dataset(self):
video_dataset = VideoDataset(
self.video_ann_file,
self.video_pipeline,
data_prefix=self.data_prefix)
video_infos = video_dataset.video_infos
video_filename = osp.join(self.data_prefix, 'test.mp4')
assert video_infos == [dict(filename=video_filename, label=0)] * 2
assert video_dataset.start_index == 0
def test_rawframe_pipeline(self):
target_keys = [
'frame_dir', 'total_frames', 'label', 'filename_tmpl',
'start_index', 'modality'
]
# RawframeDataset not in test mode
rawframe_dataset = RawframeDataset(
self.frame_ann_file,
self.frame_pipeline,
self.data_prefix,
test_mode=False)
result = rawframe_dataset[0]
assert self.check_keys_contain(result.keys(), target_keys)
# RawframeDataset in multi-class tasks
rawframe_dataset = RawframeDataset(
self.frame_ann_file,
self.frame_pipeline,
self.data_prefix,
multi_class=True,
num_classes=400,
test_mode=False)
result = rawframe_dataset[0]
assert self.check_keys_contain(result.keys(), target_keys)
# RawframeDataset with offset
rawframe_dataset = RawframeDataset(
self.frame_ann_file_with_offset,
self.frame_pipeline,
self.data_prefix,
with_offset=True,
num_classes=400,
test_mode=False)
result = rawframe_dataset[0]
assert self.check_keys_contain(result.keys(), target_keys + ['offset'])
# RawframeDataset in test mode
rawframe_dataset = RawframeDataset(
self.frame_ann_file,
self.frame_pipeline,
self.data_prefix,
test_mode=True)
result = rawframe_dataset[0]
assert self.check_keys_contain(result.keys(), target_keys)
# RawframeDataset in multi-class tasks in test mode
rawframe_dataset = RawframeDataset(
self.frame_ann_file,
self.frame_pipeline,
self.data_prefix,
multi_class=True,
num_classes=400,
test_mode=True)
result = rawframe_dataset[0]
assert self.check_keys_contain(result.keys(), target_keys)
# RawframeDataset with offset
rawframe_dataset = RawframeDataset(
self.frame_ann_file_with_offset,
self.frame_pipeline,
self.data_prefix,
with_offset=True,
num_classes=400,
test_mode=True)
result = rawframe_dataset[0]
assert self.check_keys_contain(result.keys(), target_keys + ['offset'])
def test_audio_pipeline(self):
target_keys = [
'audio_path', 'label', 'start_index', 'modality', 'audios_shape',
'length', 'sample_rate', 'total_frames'
]
# Audio dataset not in test mode
audio_dataset = AudioDataset(
self.audio_ann_file,
self.audio_pipeline,
data_prefix=self.data_prefix,
test_mode=False)
result = audio_dataset[0]
assert self.check_keys_contain(result.keys(), target_keys)
# Audio dataset in test mode
audio_dataset = AudioDataset(
self.audio_ann_file,
self.audio_pipeline,
data_prefix=self.data_prefix,
test_mode=True)
result = audio_dataset[0]
assert self.check_keys_contain(result.keys(), target_keys)
def test_audio_feature_pipeline(self):
target_keys = [
'audio_path', 'label', 'start_index', 'modality', 'audios',
'total_frames'
]
# Audio feature dataset not in test mode
audio_feature_dataset = AudioFeatureDataset(
self.audio_feature_ann_file,
self.audio_feature_pipeline,
data_prefix=self.data_prefix,
test_mode=False)
result = audio_feature_dataset[0]
assert self.check_keys_contain(result.keys(), target_keys)
# Audio dataset in test mode
audio_feature_dataset = AudioFeatureDataset(
self.audio_feature_ann_file,
self.audio_feature_pipeline,
data_prefix=self.data_prefix,
test_mode=True)
result = audio_feature_dataset[0]
assert self.check_keys_contain(result.keys(), target_keys)
def test_video_pipeline(self):
target_keys = ['filename', 'label', 'start_index', 'modality']
# VideoDataset not in test mode
video_dataset = VideoDataset(
self.video_ann_file,
self.video_pipeline,
data_prefix=self.data_prefix,
test_mode=False)
result = video_dataset[0]
assert self.check_keys_contain(result.keys(), target_keys)
# VideoDataset in test mode
video_dataset = VideoDataset(
self.video_ann_file,
self.video_pipeline,
data_prefix=self.data_prefix,
test_mode=True)
result = video_dataset[0]
assert self.check_keys_contain(result.keys(), target_keys)
def test_action_pipeline(self):
target_keys = ['video_name', 'data_prefix']
# ActivityNet Dataset not in test mode
action_dataset = ActivityNetDataset(
self.action_ann_file,
self.action_pipeline,
self.data_prefix,
test_mode=False)
result = action_dataset[0]
assert self.check_keys_contain(result.keys(), target_keys)
# ActivityNet Dataset in test mode
action_dataset = ActivityNetDataset(
self.action_ann_file,
self.action_pipeline,
self.data_prefix,
test_mode=True)
result = action_dataset[0]
assert self.check_keys_contain(result.keys(), target_keys)
def test_proposal_pipeline(self):
target_keys = [
'frame_dir', 'video_id', 'total_frames', 'gts', 'proposals',
'filename_tmpl', 'modality', 'out_proposals', 'reg_targets',
'proposal_scale_factor', 'proposal_labels', 'proposal_type',
'start_index'
]
# SSN Dataset not in test mode
proposal_dataset = SSNDataset(
self.proposal_ann_file,
self.proposal_pipeline,
self.proposal_train_cfg,
self.proposal_test_cfg,
data_prefix=self.data_prefix)
result = proposal_dataset[0]
assert self.check_keys_contain(result.keys(), target_keys)
# SSN Dataset with random sampling proposals
proposal_dataset = SSNDataset(
self.proposal_ann_file,
self.proposal_pipeline,
self.proposal_train_cfg,
self.proposal_test_cfg,
data_prefix=self.data_prefix,
video_centric=False)
result = proposal_dataset[0]
assert self.check_keys_contain(result.keys(), target_keys)
target_keys = [
'frame_dir', 'video_id', 'total_frames', 'gts', 'proposals',
'filename_tmpl', 'modality', 'relative_proposal_list',
'scale_factor_list', 'proposal_tick_list', 'reg_norm_consts',
'start_index'
]
# SSN Dataset in test mode
proposal_dataset = SSNDataset(
self.proposal_ann_file,
self.proposal_test_pipeline,
self.proposal_train_cfg,
self.proposal_test_cfg,
data_prefix=self.data_prefix,
test_mode=True)
result = proposal_dataset[0]
assert self.check_keys_contain(result.keys(), target_keys)
def test_rawframe_evaluate(self):
rawframe_dataset = RawframeDataset(self.frame_ann_file,
self.frame_pipeline,
self.data_prefix)
with pytest.raises(TypeError):
# results must be a list
rawframe_dataset.evaluate('0.5')
with pytest.raises(AssertionError):
# The length of results must be equal to the dataset len
rawframe_dataset.evaluate([0] * 5)
with pytest.raises(TypeError):
# topk must be int or tuple of int
rawframe_dataset.evaluate(
[0] * len(rawframe_dataset),
metric_options=dict(top_k_accuracy=dict(topk=1.)))
with pytest.raises(KeyError):
# unsupported metric
rawframe_dataset.evaluate(
[0] * len(rawframe_dataset), metrics='iou')
# evaluate top_k_accuracy and mean_class_accuracy metric
results = [np.array([0.1, 0.5, 0.4])] * 2
eval_result = rawframe_dataset.evaluate(
results, metrics=['top_k_accuracy', 'mean_class_accuracy'])
assert set(eval_result.keys()) == set(
['top1_acc', 'top5_acc', 'mean_class_accuracy'])
def test_video_evaluate(self):
video_dataset = VideoDataset(
self.video_ann_file,
self.video_pipeline,
data_prefix=self.data_prefix)
with pytest.raises(TypeError):
# results must be a list
video_dataset.evaluate('0.5')
with pytest.raises(AssertionError):
# The length of results must be equal to the dataset len
video_dataset.evaluate([0] * 5)
with pytest.raises(TypeError):
# topk must be int or tuple of int
video_dataset.evaluate(
[0] * len(video_dataset),
metric_options=dict(top_k_accuracy=dict(topk=1.)))
with pytest.raises(KeyError):
# unsupported metric
video_dataset.evaluate([0] * len(video_dataset), metrics='iou')
# evaluate top_k_accuracy and mean_class_accuracy metric
results = [np.array([0.1, 0.5, 0.4])] * 2
eval_result = video_dataset.evaluate(
results, metrics=['top_k_accuracy', 'mean_class_accuracy'])
assert set(eval_result.keys()) == set(
['top1_acc', 'top5_acc', 'mean_class_accuracy'])
def test_base_dataset(self):
video_dataset = VideoDataset(
self.video_ann_file,
self.video_pipeline,
data_prefix=self.data_prefix,
start_index=3)
assert len(video_dataset) == 2
assert video_dataset.start_index == 3
def test_repeat_dataset(self):
rawframe_dataset = RawframeDataset(self.frame_ann_file,
self.frame_pipeline,
self.data_prefix)
repeat_dataset = RepeatDataset(rawframe_dataset, 5)
assert len(repeat_dataset) == 10
result_a = repeat_dataset[0]
result_b = repeat_dataset[2]
assert set(result_a.keys()) == set(result_b.keys())
for key in result_a:
if isinstance(result_a[key], np.ndarray):
assert np.equal(result_a[key], result_b[key]).all()
elif isinstance(result_a[key], list):
assert all(
np.array_equal(a, b)
for (a, b) in zip(result_a[key], result_b[key]))
else:
assert result_a[key] == result_b[key]
def test_activitynet_dataset(self):
activitynet_dataset = ActivityNetDataset(self.action_ann_file,
self.action_pipeline,
self.data_prefix)
activitynet_infos = activitynet_dataset.video_infos
assert activitynet_infos == [
dict(
video_name='v_test1',
duration_second=1,
duration_frame=30,
annotations=[dict(segment=[0.3, 0.6], label='Rock climbing')],
feature_frame=30,
fps=30.0,
rfps=30),
dict(
video_name='v_test2',
duration_second=2,
duration_frame=48,
annotations=[dict(segment=[1.0, 2.0], label='Drinking beer')],
feature_frame=48,
fps=24.0,
rfps=24.0)
]
def test_activitynet_proposals2json(self):
activitynet_dataset = ActivityNetDataset(self.action_ann_file,
self.action_pipeline,
self.data_prefix)
results = [
dict(
video_name='v_test1',
proposal_list=[dict(segment=[0.1, 0.9], score=0.1)]),
dict(
video_name='v_test2',
proposal_list=[dict(segment=[10.1, 20.9], score=0.9)])
]
result_dict = activitynet_dataset.proposals2json(results)
assert result_dict == dict(
test1=[{
'segment': [0.1, 0.9],
'score': 0.1
}],
test2=[{
'segment': [10.1, 20.9],
'score': 0.9
}])
result_dict = activitynet_dataset.proposals2json(results, True)
assert result_dict == dict(
test1=[{
'segment': [0.1, 0.9],
'score': 0.1
}],
test2=[{
'segment': [10.1, 20.9],
'score': 0.9
}])
def test_activitynet_evaluate(self):
activitynet_dataset = ActivityNetDataset(self.action_ann_file,
self.action_pipeline,
self.data_prefix)
with pytest.raises(TypeError):
# results must be a list
activitynet_dataset.evaluate('0.5')
with pytest.raises(AssertionError):
# The length of results must be equal to the dataset len
activitynet_dataset.evaluate([0] * 5)
with pytest.raises(KeyError):
# unsupported metric
activitynet_dataset.evaluate(
[0] * len(activitynet_dataset), metrics='iou')
# evaluate AR@AN metric
results = [
dict(
video_name='v_test1',
proposal_list=[dict(segment=[0.1, 0.9], score=0.1)]),
dict(
video_name='v_test2',
proposal_list=[dict(segment=[10.1, 20.9], score=0.9)])
]
eval_result = activitynet_dataset.evaluate(results, metrics=['AR@AN'])
assert set(eval_result) == set(
['auc', 'AR@1', 'AR@5', 'AR@10', 'AR@100'])
def test_activitynet_dump_results(self):
activitynet_dataset = ActivityNetDataset(self.action_ann_file,
self.action_pipeline,
self.data_prefix)
# test dumping json file
results = [
dict(
video_name='v_test1',
proposal_list=[dict(segment=[0.1, 0.9], score=0.1)]),
dict(
video_name='v_test2',
proposal_list=[dict(segment=[10.1, 20.9], score=0.9)])
]
dump_results = {
'version': 'VERSION 1.3',
'results': {
'test1': [{
'segment': [0.1, 0.9],
'score': 0.1
}],
'test2': [{
'segment': [10.1, 20.9],
'score': 0.9
}]
},
'external_data': {}
}
tmp_filename = osp.join(tempfile.gettempdir(), 'result.json')
activitynet_dataset.dump_results(results, tmp_filename, 'json')
assert osp.isfile(tmp_filename)
with open(tmp_filename, 'r+') as f:
load_obj = mmcv.load(f, file_format='json')
assert load_obj == dump_results
os.remove(tmp_filename)
# test dumping csv file
results = [('test_video', np.array([[1, 2, 3, 4, 5], [6, 7, 8, 9,
10]]))]
with tempfile.TemporaryDirectory() as tmpdir:
activitynet_dataset.dump_results(results, tmpdir, 'csv')
load_obj = np.loadtxt(
osp.join(tmpdir, 'test_video.csv'),
dtype=np.float32,
delimiter=',',
skiprows=1)
assert_array_equal(
load_obj,
np.array([[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]],
dtype=np.float32))
def test_ssn_dataset(self):
# test ssn dataset
ssn_dataset = SSNDataset(
self.proposal_ann_file,
self.proposal_pipeline,
self.proposal_train_cfg,
self.proposal_test_cfg,
data_prefix=self.data_prefix)
ssn_infos = ssn_dataset.video_infos
assert ssn_infos[0]['video_id'] == 'test_imgs'
assert ssn_infos[0]['total_frames'] == 5
# test ssn dataset with verbose
ssn_dataset = SSNDataset(
self.proposal_ann_file,
self.proposal_pipeline,
self.proposal_train_cfg,
self.proposal_test_cfg,
data_prefix=self.data_prefix,
verbose=True)
ssn_infos = ssn_dataset.video_infos
assert ssn_infos[0]['video_id'] == 'test_imgs'
assert ssn_infos[0]['total_frames'] == 5
# test ssn datatset with normalized proposal file
with pytest.raises(Exception):
ssn_dataset = SSNDataset(
self.proposal_norm_ann_file,
self.proposal_pipeline,
self.proposal_train_cfg,
self.proposal_test_cfg,
data_prefix=self.data_prefix)
ssn_infos = ssn_dataset.video_infos
# test ssn dataset with reg_normalize_constants
ssn_dataset = SSNDataset(
self.proposal_ann_file,
self.proposal_pipeline,
self.proposal_train_cfg,
self.proposal_test_cfg,
data_prefix=self.data_prefix,
reg_normalize_constants=[[[-0.0603, 0.0325], [0.0752, 0.1596]]])
ssn_infos = ssn_dataset.video_infos
assert ssn_infos[0]['video_id'] == 'test_imgs'
assert ssn_infos[0]['total_frames'] == 5
# test error case
with pytest.raises(TypeError):
ssn_dataset = SSNDataset(
self.proposal_ann_file,
self.proposal_pipeline,
self.proposal_train_cfg,
self.proposal_test_cfg,
data_prefix=self.data_prefix,
aug_ratio=('error', 'error'))
ssn_infos = ssn_dataset.video_infos
def test_ssn_evaluate(self):
ssn_dataset = SSNDataset(
self.proposal_ann_file,
self.proposal_pipeline,
self.proposal_train_cfg,
self.proposal_test_cfg,
data_prefix=self.data_prefix)
ssn_dataset_topall = SSNDataset(
self.proposal_ann_file,
self.proposal_pipeline,
self.proposal_train_cfg,
self.proposal_test_cfg_topall,
data_prefix=self.data_prefix)
with pytest.raises(TypeError):
# results must be a list
ssn_dataset.evaluate('0.5')
with pytest.raises(AssertionError):
# The length of results must be equal to the dataset len
ssn_dataset.evaluate([0] * 5)
with pytest.raises(KeyError):
# unsupported metric
ssn_dataset.evaluate([0] * len(ssn_dataset), metrics='iou')
# evaluate mAP metric
results_relative_proposal_list = np.random.randn(16, 2)
results_activity_scores = np.random.randn(16, 21)
results_completeness_scores = np.random.randn(16, 20)
results_bbox_preds = np.random.randn(16, 20, 2)
results = [
dict(
relative_proposal_list=results_relative_proposal_list,
activity_scores=results_activity_scores,
completeness_scores=results_completeness_scores,
bbox_preds=results_bbox_preds)
]
eval_result = ssn_dataset.evaluate(results, metrics=['mAP'])
assert set(eval_result) == set([
'[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]',
'[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]'
])
# evaluate mAP metric without filtering topk
results_relative_proposal_list = np.random.randn(16, 2)
results_activity_scores = np.random.randn(16, 21)
results_completeness_scores = np.random.randn(16, 20)
results_bbox_preds = np.random.randn(16, 20, 2)
results = [
dict(
relative_proposal_list=results_relative_proposal_list,
activity_scores=results_activity_scores,
completeness_scores=results_completeness_scores,
bbox_preds=results_bbox_preds)
]
eval_result = ssn_dataset_topall.evaluate(results, metrics=['mAP'])
assert set(eval_result) == set([
'[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]',
'[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]'
])
def test_audio_evaluate(self):
audio_dataset = AudioDataset(
self.audio_ann_file,
self.audio_pipeline,
data_prefix=self.data_prefix)
with pytest.raises(TypeError):
# results must be a list
audio_dataset.evaluate('0.5')
with pytest.raises(AssertionError):
# The length of results must be equal to the dataset len
audio_dataset.evaluate([0] * 5)
with pytest.raises(TypeError):
# topk must be int or tuple of int
audio_dataset.evaluate(
[0] * len(audio_dataset),
metric_options=dict(top_k_accuracy=dict(topk=1.)))
with pytest.raises(KeyError):
# unsupported metric
audio_dataset.evaluate([0] * len(audio_dataset), metrics='iou')
# evaluate top_k_accuracy and mean_class_accuracy metric
results = [np.array([0.1, 0.5, 0.4])] * 2
eval_result = audio_dataset.evaluate(
results, metrics=['top_k_accuracy', 'mean_class_accuracy'])
assert set(eval_result.keys()) == set(
['top1_acc', 'top5_acc', 'mean_class_accuracy'])
def test_audio_feature_evaluate(self):
audio_dataset = AudioFeatureDataset(
self.audio_feature_ann_file,
self.audio_feature_pipeline,
data_prefix=self.data_prefix)
with pytest.raises(TypeError):
# results must be a list
audio_dataset.evaluate('0.5')
with pytest.raises(AssertionError):
# The length of results must be equal to the dataset len
audio_dataset.evaluate([0] * 5)
with pytest.raises(TypeError):
# topk must be int or tuple of int
audio_dataset.evaluate(
[0] * len(audio_dataset),
metric_options=dict(top_k_accuracy=dict(topk=1.)))
with pytest.raises(KeyError):
# unsupported metric
audio_dataset.evaluate([0] * len(audio_dataset), metrics='iou')
# evaluate top_k_accuracy and mean_class_accuracy metric
results = [np.array([0.1, 0.5, 0.4])] * 2
eval_result = audio_dataset.evaluate(
results, metrics=['top_k_accuracy', 'mean_class_accuracy'])
assert set(eval_result) == set(
['top1_acc', 'top5_acc', 'mean_class_accuracy'])
| InternVideo-main | Downstream/Open-Set-Action-Recognition/tests/test_data/test_dataset.py |
import os.path as osp
import mmcv
import numpy as np
from numpy.testing import assert_array_equal
from mmaction.datasets import AVADataset
def check_keys_contain(result_keys, target_keys):
"""Check if all elements in target_keys is in result_keys."""
return set(target_keys).issubset(set(result_keys))
class TestAVADataset(object):
@classmethod
def setup_class(cls):
cls.data_prefix = osp.join(
osp.dirname(osp.dirname(__file__)), 'data', 'test_ava_dataset')
cls.ann_file = osp.join(cls.data_prefix, 'ava_sample.csv')
cls.exclude_file = osp.join(cls.data_prefix,
'ava_excluded_timestamps_sample.csv')
cls.proposal_file = osp.join(cls.data_prefix,
'ava_proposals_sample.pkl')
cls.pipeline = [
dict(dict(type='SampleAVAFrames', clip_len=32, frame_interval=2))
]
cls.proposal = mmcv.load(cls.proposal_file)
def test_ava_dataset(self):
target_keys = [
'frame_dir', 'video_id', 'timestamp', 'img_key', 'shot_info',
'fps', 'ann'
]
ann_keys = ['labels', 'entity_boxes', 'entity_ids']
pkl_keys = ['0f39OWEqJ24,0902', '0f39OWEqJ24,0903', '_-Z6wFjXtGQ,0902']
ava_dataset = AVADataset(
self.ann_file,
self.exclude_file,
self.pipeline,
data_prefix=self.data_prefix,
proposal_file=self.proposal_file)
ava_infos = ava_dataset.video_infos
assert check_keys_contain(ava_dataset.proposals.keys(), pkl_keys)
assert check_keys_contain(ava_infos[0].keys(), target_keys)
assert check_keys_contain(ava_infos[0]['ann'].keys(), ann_keys)
assert len(ava_infos) == 1
assert ava_infos[0]['frame_dir'] == osp.join(self.data_prefix,
'0f39OWEqJ24')
assert ava_infos[0]['video_id'] == '0f39OWEqJ24'
assert ava_infos[0]['timestamp'] == 902
assert ava_infos[0]['img_key'] == '0f39OWEqJ24,0902'
assert ava_infos[0]['shot_info'] == (0, 26880)
assert ava_infos[0]['fps'] == 30
assert len(ava_infos[0]['ann']) == 3
target_labels = np.array([12, 17, 79] + [
-1,
] * 78)
target_labels = target_labels[None, ...]
assert_array_equal(ava_infos[0]['ann']['labels'], target_labels)
assert_array_equal(ava_infos[0]['ann']['entity_boxes'],
np.array([[0.031, 0.162, 0.67, 0.995]]))
assert_array_equal(ava_infos[0]['ann']['entity_ids'], np.array([0]))
ava_dataset = AVADataset(
self.ann_file,
None,
self.pipeline,
data_prefix=self.data_prefix,
proposal_file=self.proposal_file)
ava_infos = ava_dataset.video_infos
assert len(ava_infos) == 3
ava_dataset = AVADataset(
self.ann_file,
None,
self.pipeline,
test_mode=True,
data_prefix=self.data_prefix,
proposal_file=self.proposal_file)
ava_infos = ava_dataset.video_infos
assert len(ava_infos) == 3
ava_dataset = AVADataset(
self.ann_file,
None,
self.pipeline,
test_mode=True,
data_prefix=self.data_prefix,
proposal_file=None)
assert ava_dataset.proposals is None
def test_ava_pipeline(self):
target_keys = [
'frame_dir', 'video_id', 'timestamp', 'img_key', 'shot_info',
'fps', 'ann', 'filename_tmpl', 'modality', 'start_index',
'timestamp_start', 'timestamp_end', 'proposals', 'frame_inds',
'clip_len', 'frame_interval'
]
ann_keys = ['labels', 'entity_boxes', 'entity_ids']
ava_dataset = AVADataset(
self.ann_file,
self.exclude_file,
self.pipeline,
data_prefix=self.data_prefix,
proposal_file=self.proposal_file)
result = ava_dataset[0]
assert check_keys_contain(result.keys(), target_keys)
assert check_keys_contain(result['ann'].keys(), ann_keys)
assert result['filename_tmpl'] == 'img_{:05}.jpg'
assert result['modality'] == 'RGB'
assert result['start_index'] == 1
assert result['timestamp_start'] == 902
assert result['timestamp_end'] == 1798
assert_array_equal(result['proposals'],
np.array([[0.011, 0.157, 0.655, 0.983, 0.998163]]))
assert result['clip_len'] == 32
assert result['frame_interval'] == 2
assert len(result['frame_inds']) == 32
| InternVideo-main | Downstream/Open-Set-Action-Recognition/tests/test_data/test_ava_dataset.py |
import copy
import mmcv
import numpy as np
import pytest
from numpy.testing import assert_array_almost_equal, assert_array_equal
# yapf: disable
from mmaction.datasets.pipelines import (AudioAmplify, CenterCrop, ColorJitter,
EntityBoxClip, EntityBoxCrop,
EntityBoxFlip, EntityBoxPad,
EntityBoxRescale, Flip, Fuse,
MelSpectrogram, MultiGroupCrop,
MultiScaleCrop, Normalize, RandomCrop,
RandomRescale, RandomResizedCrop,
RandomScale, Resize, TenCrop,
ThreeCrop)
# yapf: enable
class TestAugumentations:
@staticmethod
def check_keys_contain(result_keys, target_keys):
"""Check if all elements in target_keys is in result_keys."""
return set(target_keys).issubset(set(result_keys))
@staticmethod
def check_crop(origin_imgs, result_imgs, result_bbox, num_crops=1):
"""Check if the result_bbox is in correspond to result_imgs."""
def check_single_crop(origin_imgs, result_imgs, result_bbox):
result_img_shape = result_imgs[0].shape[:2]
crop_w = result_bbox[2] - result_bbox[0]
crop_h = result_bbox[3] - result_bbox[1]
crop_shape = (crop_h, crop_w)
if not crop_shape == result_img_shape:
return False
left, top, right, bottom = result_bbox
return np.array_equal(
np.array(origin_imgs)[:, top:bottom, left:right, :],
np.array(result_imgs))
if result_bbox.ndim == 1:
return check_single_crop(origin_imgs, result_imgs, result_bbox)
elif result_bbox.ndim == 2:
num_batch = len(origin_imgs)
for i, bbox in enumerate(result_bbox):
if num_crops == 10:
if (i // num_batch) % 2 == 0:
flag = check_single_crop([origin_imgs[i % num_batch]],
[result_imgs[i]], bbox)
else:
flag = check_single_crop(
[origin_imgs[i % num_batch]],
[np.flip(result_imgs[i], axis=1)], bbox)
else:
flag = check_single_crop([origin_imgs[i % num_batch]],
[result_imgs[i]], bbox)
if not flag:
return False
return True
else:
# bbox has a wrong dimension
return False
@staticmethod
def check_flip(origin_imgs, result_imgs, flip_type):
"""Check if the origin_imgs are flipped correctly into result_imgs in
different flip_types."""
n = len(origin_imgs)
h, w, c = origin_imgs[0].shape
if flip_type == 'horizontal':
# yapf: disable
for i in range(n):
for j in range(h):
for k in range(w):
for channel in range(c):
if result_imgs[i][j, k, channel] != origin_imgs[i][j, w - 1 - k, channel]: # noqa:E501
return False
# yapf: enable
else:
# yapf: disable
for i in range(n):
for j in range(h):
for k in range(w):
for channel in range(c):
if result_imgs[i][j, k, channel] != origin_imgs[i][h - 1 - j, k, channel]: # noqa:E501
return False
# yapf: enable
return True
@staticmethod
def check_normalize(origin_imgs, result_imgs, norm_cfg):
"""Check if the origin_imgs are normalized correctly into result_imgs
in a given norm_cfg."""
target_imgs = result_imgs.copy()
target_imgs *= norm_cfg['std']
target_imgs += norm_cfg['mean']
if norm_cfg['to_bgr']:
target_imgs = target_imgs[..., ::-1].copy()
assert_array_almost_equal(origin_imgs, target_imgs, decimal=4)
def test_init_lazy(self):
from mmaction.datasets.pipelines.augmentations import \
_init_lazy_if_proper # noqa: E501
with pytest.raises(AssertionError):
# use lazy operation but "lazy" not in results
result = dict(lazy=dict(), img_shape=[64, 64])
_init_lazy_if_proper(result, False)
lazy_keys = [
'original_shape', 'crop_bbox', 'flip', 'flip_direction',
'interpolation'
]
# 'img_shape' not in results
result = dict(imgs=list(np.random.randn(3, 64, 64, 3)))
_init_lazy_if_proper(result, True)
assert self.check_keys_contain(result, ['imgs', 'lazy', 'img_shape'])
assert self.check_keys_contain(result['lazy'], lazy_keys)
# 'img_shape' in results
result = dict(img_shape=[64, 64])
_init_lazy_if_proper(result, True)
assert self.check_keys_contain(result, ['lazy', 'img_shape'])
assert self.check_keys_contain(result['lazy'], lazy_keys)
# do not use lazy operation
result = dict(img_shape=[64, 64])
_init_lazy_if_proper(result, False)
assert self.check_keys_contain(result, ['img_shape'])
assert 'lazy' not in result
def test_random_crop(self):
with pytest.raises(TypeError):
# size must be an int
RandomCrop(size=(112, 112))
with pytest.raises(AssertionError):
# "size > height" or "size > width" is not allowed
imgs = list(np.random.rand(2, 224, 341, 3))
results = dict(imgs=imgs)
random_crop = RandomCrop(size=320)
random_crop(results)
target_keys = ['imgs', 'crop_bbox', 'img_shape']
# General case
imgs = list(np.random.rand(2, 224, 341, 3))
results = dict(imgs=imgs)
random_crop = RandomCrop(size=224)
random_crop_result = random_crop(results)
assert self.check_keys_contain(random_crop_result.keys(), target_keys)
assert self.check_crop(imgs, random_crop_result['imgs'],
results['crop_bbox'])
h, w = random_crop_result['img_shape']
assert h == w == 224
# Test the case that no need for cropping
imgs = list(np.random.rand(2, 224, 224, 3))
results = dict(imgs=imgs)
random_crop = RandomCrop(size=224)
random_crop_result = random_crop(results)
assert self.check_keys_contain(random_crop_result.keys(), target_keys)
assert self.check_crop(imgs, random_crop_result['imgs'],
results['crop_bbox'])
h, w = random_crop_result['img_shape']
assert h == w == 224
# Test the one-side-equal case
imgs = list(np.random.rand(2, 224, 225, 3))
results = dict(imgs=imgs)
random_crop = RandomCrop(size=224)
random_crop_result = random_crop(results)
assert self.check_keys_contain(random_crop_result.keys(), target_keys)
assert self.check_crop(imgs, random_crop_result['imgs'],
results['crop_bbox'])
h, w = random_crop_result['img_shape']
assert h == w == 224
assert repr(random_crop) == (f'{random_crop.__class__.__name__}'
f'(size={224}, lazy={False})')
def test_random_crop_lazy(self):
with pytest.raises(TypeError):
# size must be an int
RandomCrop(size=(112, 112), lazy=True)
with pytest.raises(AssertionError):
# "size > height" or "size > width" is not allowed
imgs = list(np.random.rand(2, 224, 341, 3))
results = dict(imgs=imgs)
random_crop = RandomCrop(size=320, lazy=True)
random_crop(results)
target_keys = ['imgs', 'crop_bbox', 'img_shape', 'lazy']
# General case
imgs = list(np.random.rand(2, 224, 341, 3))
results = dict(imgs=imgs)
random_crop = RandomCrop(size=224, lazy=True)
random_crop_result = random_crop(results)
assert self.check_keys_contain(random_crop_result.keys(), target_keys)
assert id(imgs) == id(random_crop_result['imgs'])
random_crop_result_fuse = Fuse()(random_crop_result)
assert 'lazy' not in random_crop_result_fuse
assert self.check_crop(imgs, random_crop_result_fuse['imgs'],
results['crop_bbox'])
h, w = random_crop_result_fuse['img_shape']
assert h == w == 224
# Test the case that no need for cropping
imgs = list(np.random.rand(2, 224, 224, 3))
results = dict(imgs=imgs)
random_crop = RandomCrop(size=224, lazy=True)
random_crop_result = random_crop(results)
assert self.check_keys_contain(random_crop_result.keys(), target_keys)
assert id(imgs) == id(random_crop_result['imgs'])
random_crop_result_fuse = Fuse()(random_crop_result)
assert 'lazy' not in random_crop_result_fuse
assert self.check_crop(imgs, random_crop_result_fuse['imgs'],
results['crop_bbox'])
h, w = random_crop_result_fuse['img_shape']
assert h == w == 224
# Test the one-side-equal case
imgs = list(np.random.rand(2, 224, 225, 3))
results = dict(imgs=imgs)
random_crop = RandomCrop(size=224, lazy=True)
random_crop_result = random_crop(results)
assert self.check_keys_contain(random_crop_result.keys(), target_keys)
assert id(imgs) == id(random_crop_result['imgs'])
random_crop_result_fuse = Fuse()(random_crop_result)
assert 'lazy' not in random_crop_result_fuse
assert self.check_crop(imgs, random_crop_result_fuse['imgs'],
results['crop_bbox'])
h, w = random_crop_result_fuse['img_shape']
assert h == w == 224
assert repr(random_crop) == (f'{random_crop.__class__.__name__}'
f'(size={224}, lazy={True})')
def test_random_resized_crop(self):
with pytest.raises(TypeError):
# area_range must be a tuple of float
RandomResizedCrop(area_range=0.5)
with pytest.raises(TypeError):
# aspect_ratio_range must be a tuple of float
RandomResizedCrop(area_range=(0.08, 1.0), aspect_ratio_range=0.1)
target_keys = ['imgs', 'crop_bbox', 'img_shape']
# There will be a slight difference because of rounding
eps = 0.01
imgs = list(np.random.rand(2, 256, 341, 3))
results = dict(imgs=imgs)
with pytest.raises(AssertionError):
# area_range[0] > area_range[1], which is wrong
random_crop = RandomResizedCrop(area_range=(0.9, 0.7))
random_crop(results)
with pytest.raises(AssertionError):
# 0 > area_range[0] and area_range[1] > 1, which is wrong
random_crop = RandomResizedCrop(aspect_ratio_range=(-0.1, 2.0))
random_crop(results)
random_crop = RandomResizedCrop()
random_crop_result = random_crop(results)
assert self.check_keys_contain(random_crop_result.keys(), target_keys)
assert self.check_crop(imgs, random_crop_result['imgs'],
results['crop_bbox'])
h, w = random_crop_result['img_shape']
assert ((0.08 - eps <= h * w / 256 / 341)
and (h * w / 256 / 341 <= 1 + eps))
assert (3. / 4. - eps <= h / w) and (h / w - eps <= 4. / 3.)
assert repr(random_crop) == (f'{random_crop.__class__.__name__}'
f'(area_range={(0.08, 1.0)}, '
f'aspect_ratio_range={(3 / 4, 4 / 3)}, '
f'lazy={False})')
random_crop = RandomResizedCrop(
area_range=(0.9, 0.9), aspect_ratio_range=(10.0, 10.1))
# Test fallback cases by very big area range
imgs = list(np.random.rand(2, 256, 341, 3))
results = dict(imgs=imgs)
random_crop_result = random_crop(results)
assert self.check_keys_contain(random_crop_result.keys(), target_keys)
assert self.check_crop(imgs, random_crop_result['imgs'],
results['crop_bbox'])
h, w = random_crop_result['img_shape']
assert h == w == 256
def test_random_rescale(self):
with pytest.raises(AssertionError):
# scale_range must be a tuple of int
RandomRescale(scale_range=224)
with pytest.raises(AssertionError):
# scale_range must be a tuple of int
RandomRescale(scale_range=(224.0, 256.0))
with pytest.raises(AssertionError):
# scale_range[0] > scale_range[1], which is wrong
RandomRescale(scale_range=(320, 256))
with pytest.raises(AssertionError):
# scale_range[0] <= 0, which is wrong
RandomRescale(scale_range=(0, 320))
target_keys = ['imgs', 'short_edge', 'img_shape']
# There will be a slight difference because of rounding
eps = 0.01
imgs = list(np.random.rand(2, 256, 340, 3))
results = dict(imgs=imgs, img_shape=(256, 340), modality='RGB')
random_rescale = RandomRescale(scale_range=(300, 400))
random_rescale_result = random_rescale(results)
assert self.check_keys_contain(random_rescale_result.keys(),
target_keys)
h, w = random_rescale_result['img_shape']
# check rescale
assert np.abs(h / 256 - w / 340) < eps
assert 300 / 256 - eps <= h / 256 <= 400 / 256 + eps
assert repr(random_rescale) == (f'{random_rescale.__class__.__name__}'
f'(scale_range={(300, 400)}, '
'interpolation=bilinear)')
def test_random_resized_crop_lazy(self):
target_keys = ['imgs', 'crop_bbox', 'img_shape', 'lazy']
# There will be a slight difference because of rounding
eps = 0.01
imgs = list(np.random.rand(2, 256, 341, 3))
results = dict(imgs=imgs)
with pytest.raises(AssertionError):
# area_range[0] > area_range[1], which is wrong
random_crop = RandomResizedCrop(area_range=(0.9, 0.7), lazy=True)
random_crop(results)
with pytest.raises(AssertionError):
# 0 > area_range[0] and area_range[1] > 1, which is wrong
random_crop = RandomResizedCrop(
aspect_ratio_range=(-0.1, 2.0), lazy=True)
random_crop(results)
random_crop = RandomResizedCrop(lazy=True)
random_crop_result = random_crop(results)
assert self.check_keys_contain(random_crop_result.keys(), target_keys)
assert id(imgs) == id(random_crop_result['imgs'])
random_crop_result_fuse = Fuse()(random_crop_result)
assert self.check_crop(imgs, random_crop_result_fuse['imgs'],
results['crop_bbox'])
h, w = random_crop_result['img_shape']
assert ((0.08 - eps <= h * w / 256 / 341)
and (h * w / 256 / 341 <= 1 + eps))
assert (3. / 4. - eps <= h / w) and (h / w - eps <= 4. / 3.)
assert repr(random_crop) == (f'{random_crop.__class__.__name__}'
f'(area_range={(0.08, 1.0)}, '
f'aspect_ratio_range={(3 / 4, 4 / 3)}, '
f'lazy={True})')
random_crop = RandomResizedCrop(
area_range=(0.9, 0.9), aspect_ratio_range=(10.0, 10.1), lazy=True)
# Test fallback cases by very big area range
imgs = np.random.rand(2, 256, 341, 3)
results = dict(imgs=imgs)
random_crop_result = random_crop(results)
assert self.check_keys_contain(random_crop_result.keys(), target_keys)
assert id(imgs) == id(random_crop_result['imgs'])
random_crop_result_fuse = Fuse()(random_crop_result)
assert self.check_crop(imgs, random_crop_result_fuse['imgs'],
results['crop_bbox'])
h, w = random_crop_result['img_shape']
assert h == w == 256
def test_multi_scale_crop(self):
with pytest.raises(TypeError):
# input_size must be int or tuple of int
MultiScaleCrop(0.5)
with pytest.raises(TypeError):
# input_size must be int or tuple of int
MultiScaleCrop('224')
with pytest.raises(TypeError):
# input_size must be int or tuple of int
MultiScaleCrop([224, 224])
with pytest.raises(TypeError):
# scales must be tuple.
MultiScaleCrop(
224, scales=[
1,
])
with pytest.raises(ValueError):
# num_fix_crops must be in [5, 13]
MultiScaleCrop(224, num_fixed_crops=6)
target_keys = ['imgs', 'crop_bbox', 'img_shape', 'scales']
# MultiScaleCrop with normal crops.
imgs = list(np.random.rand(2, 256, 341, 3))
results = dict(imgs=imgs)
config = dict(
input_size=224,
scales=(1, 0.8),
random_crop=False,
max_wh_scale_gap=0)
multi_scale_crop = MultiScaleCrop(**config)
multi_scale_crop_results = multi_scale_crop(results)
assert self.check_keys_contain(multi_scale_crop_results.keys(),
target_keys)
assert self.check_crop(imgs, multi_scale_crop_results['imgs'],
multi_scale_crop_results['crop_bbox'])
assert multi_scale_crop_results['img_shape'] in [(256, 256),
(204, 204)]
# MultiScaleCrop with more fixed crops.
imgs = list(np.random.rand(2, 256, 341, 3))
results = dict(imgs=imgs)
config = dict(
input_size=224,
scales=(1, 0.8),
random_crop=False,
max_wh_scale_gap=0,
num_fixed_crops=13)
multi_scale_crop = MultiScaleCrop(**config)
multi_scale_crop_results = multi_scale_crop(results)
assert self.check_keys_contain(multi_scale_crop_results.keys(),
target_keys)
assert self.check_crop(imgs, multi_scale_crop_results['imgs'],
multi_scale_crop_results['crop_bbox'])
assert multi_scale_crop_results['img_shape'] in [(256, 256),
(204, 204)]
# MultiScaleCrop with random crop.
imgs = list(np.random.rand(2, 256, 341, 3))
results = dict(imgs=imgs)
config = dict(
input_size=224,
scales=(1, 0.8),
random_crop=True,
max_wh_scale_gap=0)
multi_scale_crop = MultiScaleCrop(**config)
multi_scale_crop_results = multi_scale_crop(results)
assert self.check_keys_contain(multi_scale_crop_results.keys(),
target_keys)
assert self.check_crop(imgs, multi_scale_crop_results['imgs'],
multi_scale_crop_results['crop_bbox'])
assert (multi_scale_crop_results['img_shape'] in [(256, 256),
(204, 204)])
assert repr(multi_scale_crop) == (
f'{multi_scale_crop.__class__.__name__}'
f'(input_size={(224, 224)}, scales={(1, 0.8)}, '
f'max_wh_scale_gap={0}, random_crop={True}, '
f'num_fixed_crops=5, lazy={False})')
def test_multi_scale_crop_lazy(self):
with pytest.raises(TypeError):
# input_size must be int or tuple of int
MultiScaleCrop(0.5, lazy=True)
with pytest.raises(TypeError):
# input_size must be int or tuple of int
MultiScaleCrop('224', lazy=True)
with pytest.raises(TypeError):
# input_size must be int or tuple of int
MultiScaleCrop([224, 224], lazy=True)
with pytest.raises(TypeError):
# scales must be tuple.
MultiScaleCrop(
224, scales=[
1,
], lazy=True)
with pytest.raises(ValueError):
# num_fix_crops must be in [5, 13]
MultiScaleCrop(224, num_fixed_crops=6, lazy=True)
target_keys = ['imgs', 'crop_bbox', 'img_shape', 'scales']
# MultiScaleCrop with normal crops.
imgs = list(np.random.rand(2, 256, 341, 3))
results = dict(imgs=imgs)
config = dict(
input_size=224,
scales=(1, 0.8),
random_crop=False,
max_wh_scale_gap=0,
lazy=True)
multi_scale_crop = MultiScaleCrop(**config)
multi_scale_crop_result = multi_scale_crop(results)
assert id(imgs) == id(multi_scale_crop_result['imgs'])
assert self.check_keys_contain(multi_scale_crop_result.keys(),
target_keys)
multi_scale_crop_result_fuse = Fuse()(multi_scale_crop_result)
assert self.check_crop(imgs, multi_scale_crop_result_fuse['imgs'],
multi_scale_crop_result['crop_bbox'])
assert multi_scale_crop_result_fuse['img_shape'] in [(256, 256),
(204, 204)]
# MultiScaleCrop with more fixed crops.
imgs = list(np.random.rand(2, 256, 341, 3))
results = dict(imgs=imgs)
config = dict(
input_size=224,
scales=(1, 0.8),
random_crop=False,
max_wh_scale_gap=0,
num_fixed_crops=13,
lazy=True)
multi_scale_crop = MultiScaleCrop(**config)
multi_scale_crop_result = multi_scale_crop(results)
assert id(imgs) == id(multi_scale_crop_result['imgs'])
assert self.check_keys_contain(multi_scale_crop_result.keys(),
target_keys)
multi_scale_crop_result_fuse = Fuse()(multi_scale_crop_result)
assert self.check_crop(imgs, multi_scale_crop_result_fuse['imgs'],
multi_scale_crop_result['crop_bbox'])
assert multi_scale_crop_result_fuse['img_shape'] in [(256, 256),
(204, 204)]
# MultiScaleCrop with random crop.
imgs = list(np.random.rand(2, 256, 341, 3))
results = dict(imgs=imgs)
config = dict(
input_size=224,
scales=(1, 0.8),
random_crop=True,
max_wh_scale_gap=0,
lazy=True)
multi_scale_crop = MultiScaleCrop(**config)
multi_scale_crop_result = multi_scale_crop(results)
assert id(imgs) == id(multi_scale_crop_result['imgs'])
assert self.check_keys_contain(multi_scale_crop_result.keys(),
target_keys)
multi_scale_crop_result_fuse = Fuse()(multi_scale_crop_result)
assert self.check_crop(imgs, multi_scale_crop_result_fuse['imgs'],
multi_scale_crop_result['crop_bbox'])
assert (multi_scale_crop_result_fuse['img_shape'] in [(256, 256),
(204, 204)])
assert repr(multi_scale_crop) == (
f'{multi_scale_crop.__class__.__name__}'
f'(input_size={(224, 224)}, scales={(1, 0.8)}, '
f'max_wh_scale_gap={0}, random_crop={True}, '
f'num_fixed_crops={5}, lazy={True})')
def test_resize(self):
with pytest.raises(ValueError):
# scale must be positive
Resize(-0.5)
with pytest.raises(TypeError):
# scale must be tuple of int
Resize('224')
target_keys = [
'imgs', 'img_shape', 'keep_ratio', 'scale_factor', 'modality'
]
# test resize for flow images
imgs = list(np.random.rand(2, 240, 320))
results = dict(imgs=imgs, modality='Flow')
resize = Resize(scale=(160, 80), keep_ratio=False)
resize_results = resize(results)
assert self.check_keys_contain(resize_results.keys(), target_keys)
assert np.all(resize_results['scale_factor'] == np.array(
[.5, 1. / 3.], dtype=np.float32))
assert resize_results['img_shape'] == (80, 160)
# scale with -1 to indicate np.inf
imgs = list(np.random.rand(2, 240, 320, 3))
results = dict(imgs=imgs, modality='RGB')
resize = Resize(scale=(-1, 256), keep_ratio=True)
resize_results = resize(results)
assert self.check_keys_contain(resize_results.keys(), target_keys)
assert np.all(resize_results['scale_factor'] == np.array(
[341 / 320, 256 / 240], dtype=np.float32))
assert resize_results['img_shape'] == (256, 341)
# scale with a normal tuple (320, 320) to indicate np.inf
imgs = list(np.random.rand(2, 240, 320, 3))
results = dict(imgs=imgs, modality='RGB')
resize = Resize(scale=(320, 320), keep_ratio=False)
resize_results = resize(results)
assert self.check_keys_contain(resize_results.keys(), target_keys)
assert np.all(resize_results['scale_factor'] == np.array(
[1, 320 / 240], dtype=np.float32))
assert resize_results['img_shape'] == (320, 320)
# scale with a normal tuple (341, 256) to indicate np.inf
imgs = list(np.random.rand(2, 240, 320, 3))
results = dict(imgs=imgs, modality='RGB')
resize = Resize(scale=(341, 256), keep_ratio=False)
resize_results = resize(results)
assert self.check_keys_contain(resize_results.keys(), target_keys)
assert np.all(resize_results['scale_factor'] == np.array(
[341 / 320, 256 / 240], dtype=np.float32))
assert resize_results['img_shape'] == (256, 341)
assert repr(resize) == (
resize.__class__.__name__ +
f'(scale={(341, 256)}, keep_ratio={False}, ' +
f'interpolation=bilinear, lazy={False})')
def test_resize_lazy(self):
with pytest.raises(ValueError):
# scale must be positive
Resize(-0.5, lazy=True)
with pytest.raises(TypeError):
# scale must be tuple of int
Resize('224', lazy=True)
target_keys = [
'imgs', 'img_shape', 'keep_ratio', 'scale_factor', 'modality'
]
# scale with -1 to indicate np.inf
imgs = list(np.random.rand(2, 240, 320, 3))
results = dict(imgs=imgs, modality='RGB')
resize = Resize(scale=(-1, 256), keep_ratio=True, lazy=True)
resize_results = resize(results)
assert id(imgs) == id(resize_results['imgs'])
assert self.check_keys_contain(resize_results.keys(), target_keys)
resize_results_fuse = Fuse()(resize_results)
assert np.all(resize_results_fuse['scale_factor'] == np.array(
[341 / 320, 256 / 240], dtype=np.float32))
assert resize_results_fuse['img_shape'] == (256, 341)
# scale with a normal tuple (320, 320) to indicate np.inf
imgs = list(np.random.rand(2, 240, 320, 3))
results = dict(imgs=imgs, modality='RGB')
resize = Resize(scale=(320, 320), keep_ratio=False, lazy=True)
resize_results = resize(results)
assert id(imgs) == id(resize_results['imgs'])
assert self.check_keys_contain(resize_results.keys(), target_keys)
resize_results_fuse = Fuse()(resize_results)
assert np.all(resize_results_fuse['scale_factor'] == np.array(
[1, 320 / 240], dtype=np.float32))
assert resize_results_fuse['img_shape'] == (320, 320)
# scale with a normal tuple (341, 256) to indicate np.inf
imgs = list(np.random.rand(2, 240, 320, 3))
results = dict(imgs=imgs, modality='RGB')
resize = Resize(scale=(341, 256), keep_ratio=False, lazy=True)
resize_results = resize(results)
assert id(imgs) == id(resize_results['imgs'])
assert self.check_keys_contain(resize_results.keys(), target_keys)
resize_results_fuse = Fuse()(resize_results)
assert np.all(resize_results_fuse['scale_factor'] == np.array(
[341 / 320, 256 / 240], dtype=np.float32))
assert resize_results_fuse['img_shape'] == (256, 341)
assert repr(resize) == (f'{resize.__class__.__name__ }'
f'(scale={(341, 256)}, keep_ratio={False}, ' +
f'interpolation=bilinear, lazy={True})')
def test_flip(self):
with pytest.raises(ValueError):
# direction must be in ['horizontal', 'vertical']
Flip(direction='vertically')
target_keys = ['imgs', 'flip_direction', 'modality']
# do not flip imgs.
imgs = list(np.random.rand(2, 64, 64, 3))
results = dict(imgs=copy.deepcopy(imgs), modality='RGB')
flip = Flip(flip_ratio=0, direction='horizontal')
flip_results = flip(results)
assert self.check_keys_contain(flip_results.keys(), target_keys)
assert np.array_equal(imgs, results['imgs'])
assert id(flip_results['imgs']) == id(results['imgs'])
assert np.shape(flip_results['imgs']) == np.shape(imgs)
# always flip imgs horizontally.
imgs = list(np.random.rand(2, 64, 64, 3))
results = dict(imgs=copy.deepcopy(imgs), modality='RGB')
flip = Flip(flip_ratio=1, direction='horizontal')
flip_results = flip(results)
assert self.check_keys_contain(flip_results.keys(), target_keys)
if flip_results['flip'] is True:
assert self.check_flip(imgs, flip_results['imgs'],
flip_results['flip_direction'])
assert id(flip_results['imgs']) == id(results['imgs'])
assert np.shape(flip_results['imgs']) == np.shape(imgs)
# flip flow images horizontally
imgs = [
np.arange(16).reshape(4, 4).astype(np.float32),
np.arange(16, 32).reshape(4, 4).astype(np.float32)
]
results = dict(imgs=copy.deepcopy(imgs), modality='Flow')
flip = Flip(flip_ratio=1, direction='horizontal')
flip_results = flip(results)
assert self.check_keys_contain(flip_results.keys(), target_keys)
imgs = [x.reshape(4, 4, 1) for x in imgs]
flip_results['imgs'] = [
x.reshape(4, 4, 1) for x in flip_results['imgs']
]
if flip_results['flip'] is True:
assert self.check_flip([imgs[0]],
[mmcv.iminvert(flip_results['imgs'][0])],
flip_results['flip_direction'])
assert self.check_flip([imgs[1]], [flip_results['imgs'][1]],
flip_results['flip_direction'])
assert id(flip_results['imgs']) == id(results['imgs'])
assert np.shape(flip_results['imgs']) == np.shape(imgs)
# always flip imgs vertivally.
imgs = list(np.random.rand(2, 64, 64, 3))
results = dict(imgs=copy.deepcopy(imgs), modality='RGB')
flip = Flip(flip_ratio=1, direction='vertical')
flip_results = flip(results)
assert self.check_keys_contain(flip_results.keys(), target_keys)
if flip_results['flip'] is True:
assert self.check_flip(imgs, flip_results['imgs'],
flip_results['flip_direction'])
assert id(flip_results['imgs']) == id(results['imgs'])
assert np.shape(flip_results['imgs']) == np.shape(imgs)
assert repr(flip) == (f'{flip.__class__.__name__}'
f'(flip_ratio={1}, direction=vertical, '
f'lazy={False})')
def test_flip_lazy(self):
with pytest.raises(ValueError):
Flip(direction='vertically', lazy=True)
target_keys = ['imgs', 'flip_direction', 'modality']
# do not flip imgs.
imgs = list(np.random.rand(2, 64, 64, 3))
imgs_tmp = imgs.copy()
results = dict(imgs=imgs_tmp, modality='RGB')
flip = Flip(flip_ratio=0, direction='horizontal', lazy=True)
flip_results = flip(results)
assert id(imgs_tmp) == id(flip_results['imgs'])
assert self.check_keys_contain(flip_results.keys(), target_keys)
flip_results_fuse = Fuse()(flip_results)
assert np.equal(imgs, results['imgs']).all()
assert id(flip_results['imgs']) == id(results['imgs'])
assert flip_results_fuse['imgs'][0].shape == (64, 64, 3)
# always flip imgs horizontally.
imgs = list(np.random.rand(2, 64, 64, 3))
imgs_tmp = imgs.copy()
results = dict(imgs=imgs_tmp, modality='RGB')
flip = Flip(flip_ratio=1, direction='horizontal', lazy=True)
flip_results = flip(results)
assert id(imgs_tmp) == id(flip_results['imgs'])
assert self.check_keys_contain(flip_results.keys(), target_keys)
flip_results_fuse = Fuse()(flip_results)
assert self.check_flip(imgs, flip_results['imgs'],
flip_results['flip_direction'])
assert id(flip_results['imgs']) == id(results['imgs'])
assert flip_results_fuse['imgs'][0].shape == (64, 64, 3)
# always flip imgs vertivally.
imgs = list(np.random.rand(2, 64, 64, 3))
imgs_tmp = imgs.copy()
results = dict(imgs=imgs_tmp, modality='RGB')
flip = Flip(flip_ratio=1, direction='vertical', lazy=True)
flip_results = flip(results)
assert id(imgs_tmp) == id(flip_results['imgs'])
assert self.check_keys_contain(flip_results.keys(), target_keys)
flip_results_fuse = Fuse()(flip_results)
assert self.check_flip(imgs, flip_results['imgs'],
flip_results['flip_direction'])
assert id(flip_results['imgs']) == id(results['imgs'])
assert flip_results_fuse['imgs'][0].shape == (64, 64, 3)
assert repr(flip) == (f'{flip.__class__.__name__}'
f'(flip_ratio={1}, direction=vertical, '
f'lazy={True})')
def test_normalize(self):
with pytest.raises(TypeError):
# mean must be list, tuple or np.ndarray
Normalize(
dict(mean=[123.675, 116.28, 103.53]), [58.395, 57.12, 57.375])
with pytest.raises(TypeError):
# std must be list, tuple or np.ndarray
Normalize([123.675, 116.28, 103.53],
dict(std=[58.395, 57.12, 57.375]))
target_keys = ['imgs', 'img_norm_cfg', 'modality']
# normalize imgs in RGB format
imgs = list(np.random.rand(2, 240, 320, 3).astype(np.float32))
results = dict(imgs=imgs, modality='RGB')
config = dict(
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_bgr=False)
normalize = Normalize(**config)
normalize_results = normalize(results)
assert self.check_keys_contain(normalize_results.keys(), target_keys)
self.check_normalize(imgs, normalize_results['imgs'],
normalize_results['img_norm_cfg'])
# normalize flow imgs
imgs = list(np.random.rand(4, 240, 320).astype(np.float32))
results = dict(imgs=imgs, modality='Flow')
config = dict(mean=[128, 128], std=[128, 128])
normalize = Normalize(**config)
normalize_results = normalize(results)
assert self.check_keys_contain(normalize_results.keys(), target_keys)
assert normalize_results['imgs'].shape == (2, 240, 320, 2)
x_components = np.array(imgs[0::2])
y_components = np.array(imgs[1::2])
x_components = (x_components - config['mean'][0]) / config['std'][0]
y_components = (y_components - config['mean'][1]) / config['std'][1]
result_imgs = np.stack([x_components, y_components], axis=-1)
assert np.all(np.isclose(result_imgs, normalize_results['imgs']))
# normalize imgs in BGR format
imgs = list(np.random.rand(2, 240, 320, 3).astype(np.float32))
results = dict(imgs=imgs, modality='RGB')
config = dict(
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_bgr=True)
normalize = Normalize(**config)
normalize_results = normalize(results)
assert self.check_keys_contain(normalize_results.keys(), target_keys)
self.check_normalize(imgs, normalize_results['imgs'],
normalize_results['img_norm_cfg'])
assert normalize.__repr__() == (
normalize.__class__.__name__ +
f'(mean={np.array([123.675, 116.28, 103.53])}, ' +
f'std={np.array([58.395, 57.12, 57.375])}, to_bgr={True}, '
f'adjust_magnitude={False})')
def test_color_jitter(self):
imgs = list(
np.random.randint(0, 255, size=(3, 240, 320, 3), dtype=np.uint8))
results = dict(imgs=imgs)
eig_val = np.array([55.46, 4.794, 1.148], dtype=np.float32)
eig_vec = np.array([[-0.5675, 0.7192, 0.4009],
[-0.5808, -0.0045, -0.8140],
[-0.5836, -0.6948, 0.4203]],
dtype=np.float32)
color_jitter = ColorJitter()
assert_array_equal(color_jitter.eig_val, eig_val)
assert_array_equal(color_jitter.eig_vec, eig_vec)
assert color_jitter.alpha_std == 0.1
assert color_jitter.color_space_aug is False
color_jitter_results = color_jitter(results)
target_keys = [
'imgs', 'eig_val', 'eig_vec', 'alpha_std', 'color_space_aug'
]
assert self.check_keys_contain(color_jitter_results.keys(),
target_keys)
assert np.shape(color_jitter_results['imgs']) == (3, 240, 320, 3)
assert_array_equal(color_jitter_results['eig_val'], eig_val)
assert_array_equal(color_jitter_results['eig_vec'], eig_vec)
assert color_jitter_results['alpha_std'] == 0.1
assert color_jitter_results['color_space_aug'] is False
custom_eig_val = np.ones(3, )
custom_eig_vec = np.ones((3, 3))
imgs = list(
np.random.randint(0, 255, size=(3, 240, 320, 3), dtype=np.uint8))
results = dict(imgs=imgs)
custom_color_jitter = ColorJitter(True, 0.5, custom_eig_val,
custom_eig_vec)
assert_array_equal(color_jitter.eig_val, eig_val)
assert_array_equal(color_jitter.eig_vec, eig_vec)
assert custom_color_jitter.alpha_std == 0.5
assert custom_color_jitter.color_space_aug is True
custom_color_jitter_results = custom_color_jitter(results)
assert np.shape(custom_color_jitter_results['imgs']) == (3, 240, 320,
3)
assert_array_equal(custom_color_jitter_results['eig_val'],
custom_eig_val)
assert_array_equal(custom_color_jitter_results['eig_vec'],
custom_eig_vec)
assert custom_color_jitter_results['alpha_std'] == 0.5
assert custom_color_jitter_results['color_space_aug'] is True
color_jitter = ColorJitter()
assert repr(color_jitter) == (f'{color_jitter.__class__.__name__}('
f'color_space_aug={False}, '
f'alpha_std={0.1}, '
f'eig_val={eig_val}, '
f'eig_vec={eig_vec})')
def test_center_crop(self):
with pytest.raises(TypeError):
# crop_size must be int or tuple of int
CenterCrop(0.5)
with pytest.raises(TypeError):
# crop_size must be int or tuple of int
CenterCrop('224')
with pytest.raises(TypeError):
# crop_size must be int or tuple of int
CenterCrop([224, 224])
# center crop with crop_size 224
imgs = list(np.random.rand(2, 240, 320, 3))
results = dict(imgs=imgs)
center_crop = CenterCrop(crop_size=224)
center_crop_results = center_crop(results)
target_keys = ['imgs', 'crop_bbox', 'img_shape']
assert self.check_keys_contain(center_crop_results.keys(), target_keys)
assert self.check_crop(imgs, center_crop_results['imgs'],
center_crop_results['crop_bbox'])
assert np.all(
center_crop_results['crop_bbox'] == np.array([48, 8, 272, 232]))
assert center_crop_results['img_shape'] == (224, 224)
assert repr(center_crop) == (f'{center_crop.__class__.__name__}'
f'(crop_size={(224, 224)}, lazy={False})')
def test_center_crop_lazy(self):
with pytest.raises(TypeError):
# crop_size must be int or tuple of int
CenterCrop(0.5)
with pytest.raises(TypeError):
# crop_size must be int or tuple of int
CenterCrop('224')
with pytest.raises(TypeError):
# crop_size must be int or tuple of int
CenterCrop([224, 224])
# center crop with crop_size 224
imgs = list(np.random.rand(2, 240, 320, 3))
results = dict(imgs=imgs)
center_crop = CenterCrop(crop_size=224, lazy=True)
center_crop_results = center_crop(results)
target_keys = ['imgs', 'crop_bbox', 'img_shape']
assert self.check_keys_contain(center_crop_results.keys(), target_keys)
center_crop_results_fuse = Fuse()(center_crop_results)
assert self.check_crop(imgs, center_crop_results_fuse['imgs'],
center_crop_results['crop_bbox'])
assert np.all(center_crop_results_fuse['crop_bbox'] == np.array(
[48, 8, 272, 232]))
assert center_crop_results_fuse['img_shape'] == (224, 224)
assert repr(center_crop) == (f'{center_crop.__class__.__name__}'
f'(crop_size={(224, 224)}, lazy={True})')
def test_three_crop(self):
with pytest.raises(TypeError):
# crop_size must be int or tuple of int
ThreeCrop(0.5)
with pytest.raises(TypeError):
# crop_size must be int or tuple of int
ThreeCrop('224')
with pytest.raises(TypeError):
# crop_size must be int or tuple of int
ThreeCrop([224, 224])
# three crop with crop_size 120
imgs = list(np.random.rand(2, 240, 120, 3))
results = dict(imgs=imgs)
three_crop = ThreeCrop(crop_size=120)
three_crop_results = three_crop(results)
target_keys = ['imgs', 'crop_bbox', 'img_shape']
assert self.check_keys_contain(three_crop_results.keys(), target_keys)
assert self.check_crop(imgs, three_crop_results['imgs'],
three_crop_results['crop_bbox'], 3)
assert three_crop_results['img_shape'] == (120, 120)
# three crop with crop_size 224
imgs = list(np.random.rand(2, 224, 224, 3))
results = dict(imgs=imgs)
three_crop = ThreeCrop(crop_size=224)
three_crop_results = three_crop(results)
target_keys = ['imgs', 'crop_bbox', 'img_shape']
assert self.check_keys_contain(three_crop_results.keys(), target_keys)
assert self.check_crop(imgs, three_crop_results['imgs'],
three_crop_results['crop_bbox'], 3)
assert three_crop_results['img_shape'] == (224, 224)
assert repr(three_crop) == (f'{three_crop.__class__.__name__}'
f'(crop_size={(224, 224)})')
def test_ten_crop(self):
with pytest.raises(TypeError):
# crop_size must be int or tuple of int
TenCrop(0.5)
with pytest.raises(TypeError):
# crop_size must be int or tuple of int
TenCrop('224')
with pytest.raises(TypeError):
# crop_size must be int or tuple of int
TenCrop([224, 224])
# ten crop with crop_size 256
imgs = list(np.random.rand(2, 256, 256, 3))
results = dict(imgs=imgs)
ten_crop = TenCrop(crop_size=224)
ten_crop_results = ten_crop(results)
target_keys = ['imgs', 'crop_bbox', 'img_shape']
assert self.check_keys_contain(ten_crop_results.keys(), target_keys)
assert self.check_crop(imgs, ten_crop_results['imgs'],
ten_crop_results['crop_bbox'], 10)
assert ten_crop_results['img_shape'] == (224, 224)
assert repr(ten_crop) == (f'{ten_crop.__class__.__name__}'
f'(crop_size={(224, 224)})')
def test_multi_group_crop(self):
with pytest.raises(TypeError):
# crop_size must be int or tuple of int
MultiGroupCrop(0.5, 1)
with pytest.raises(TypeError):
# crop_size must be int or tuple of int
MultiGroupCrop('224', 1)
with pytest.raises(TypeError):
# crop_size must be int or tuple of int
MultiGroupCrop([224, 224], 1)
with pytest.raises(TypeError):
# groups must be int
MultiGroupCrop(224, '1')
with pytest.raises(ValueError):
# groups must be positive
MultiGroupCrop(224, 0)
target_keys = ['imgs', 'crop_bbox', 'img_shape']
# multi_group_crop with crop_size 224, groups 3
imgs = list(np.random.rand(2, 256, 341, 3))
results = dict(imgs=imgs)
multi_group_crop = MultiGroupCrop(224, 3)
multi_group_crop_result = multi_group_crop(results)
assert self.check_keys_contain(multi_group_crop_result.keys(),
target_keys)
assert self.check_crop(imgs, multi_group_crop_result['imgs'],
multi_group_crop_result['crop_bbox'],
multi_group_crop.groups)
assert multi_group_crop_result['img_shape'] == (224, 224)
assert repr(multi_group_crop) == (
f'{multi_group_crop.__class__.__name__}'
f'(crop_size={(224, 224)}, groups={3})')
def test_audio_amplify(self):
target_keys = ['audios', 'amplify_ratio']
with pytest.raises(TypeError):
# ratio should be float
AudioAmplify(1)
audio = (np.random.rand(8, ))
results = dict(audios=audio)
amplifier = AudioAmplify(1.5)
results = amplifier(results)
assert self.check_keys_contain(results.keys(), target_keys)
assert repr(amplifier) == (f'{amplifier.__class__.__name__}'
f'(ratio={amplifier.ratio})')
def test_melspectrogram(self):
target_keys = ['audios']
with pytest.raises(TypeError):
# ratio should be float
MelSpectrogram(window_size=12.5)
audio = (np.random.rand(1, 160000))
# test padding
results = dict(audios=audio, sample_rate=16000)
results['num_clips'] = 1
results['sample_rate'] = 16000
mel = MelSpectrogram()
results = mel(results)
assert self.check_keys_contain(results.keys(), target_keys)
# test truncating
audio = (np.random.rand(1, 160000))
results = dict(audios=audio, sample_rate=16000)
results['num_clips'] = 1
results['sample_rate'] = 16000
mel = MelSpectrogram(fixed_length=1)
results = mel(results)
assert self.check_keys_contain(results.keys(), target_keys)
assert repr(mel) == (f'{mel.__class__.__name__}'
f'(window_size={mel.window_size}), '
f'step_size={mel.step_size}, '
f'n_mels={mel.n_mels}, '
f'fixed_length={mel.fixed_length})')
def test_random_scale(self):
scales = ((200, 64), (250, 80))
with pytest.raises(ValueError):
RandomScale(scales, 'unsupport')
with pytest.raises(ValueError):
random_scale = RandomScale([(800, 256), (1000, 320), (800, 320)])
random_scale({})
imgs = list(np.random.rand(2, 340, 256, 3))
results = dict(imgs=imgs, img_shape=(340, 256))
results_ = copy.deepcopy(results)
random_scale_range = RandomScale(scales)
results_ = random_scale_range(results_)
assert 200 <= results_['scale'][0] <= 250
assert 64 <= results_['scale'][1] <= 80
results_ = copy.deepcopy(results)
random_scale_value = RandomScale(scales, 'value')
results_ = random_scale_value(results_)
assert results_['scale'] in scales
random_scale_single = RandomScale([(200, 64)])
results_ = copy.deepcopy(results)
results_ = random_scale_single(results_)
assert results_['scale'] == (200, 64)
assert repr(random_scale_range) == (
f'{random_scale_range.__class__.__name__}'
f'(scales={((200, 64), (250, 80))}, '
'mode=range)')
def test_box_rescale(self):
target_keys = ['img_shape', 'scale_factor', 'ann', 'proposals']
results = dict(
img_shape=(520, 480),
scale_factor=(0.7, 0.8),
proposals=np.array([[0.011, 0.157, 0.655, 0.983, 0.998163]]),
ann=dict(entity_boxes=np.array([[0.031, 0.162, 0.67, 0.995]])))
with pytest.raises(AssertionError):
box_scale = EntityBoxRescale()
results_ = copy.deepcopy(results)
results_['proposals'] = np.array([[0.011, 0.157, 0.655]])
box_scale(results_)
box_scale = EntityBoxRescale()
results_ = copy.deepcopy(results)
results_ = box_scale(results_)
self.check_keys_contain(results_.keys(), target_keys + ['scores'])
assert_array_almost_equal(
results_['proposals'],
np.array([[3.696000, 65.311999, 220.079995, 408.928002]]))
assert_array_almost_equal(
results_['ann']['entity_boxes'],
np.array([[10.416000, 67.391998, 225.120004, 413.920019]]))
assert results_['scores'] == np.array([0.998163], dtype=np.float32)
results_ = copy.deepcopy(results)
results_['proposals'] = None
results_ = box_scale(results_)
self.check_keys_contain(results_.keys(), target_keys)
assert results_['proposals'] is None
def test_box_crop(self):
target_keys = ['ann', 'proposals', 'crop_bbox']
results = dict(
proposals=np.array([[3.696000, 65.311999, 220.079995,
408.928002]]),
crop_bbox=[13, 75, 200, 380],
ann=dict(
entity_boxes=np.array(
[[10.416000, 67.391998, 225.120004, 413.920019]])))
box_crop = EntityBoxCrop()
results_ = copy.deepcopy(results)
results_ = box_crop(results_)
self.check_keys_contain(results_.keys(), target_keys)
assert_array_almost_equal(
results_['ann']['entity_boxes'],
np.array([[-2.584, -7.608002, 212.120004, 338.920019]]))
assert_array_almost_equal(
results_['proposals'],
np.array([[-9.304, -9.688001, 207.079995, 333.928002]]))
results_ = copy.deepcopy(results)
results_['proposals'] = None
results_ = box_crop(results_)
assert results_['proposals'] is None
def test_box_flip(self):
target_keys = ['ann', 'proposals', 'img_shape']
results = dict(
proposals=np.array([[-9.304, -9.688001, 207.079995, 333.928002]]),
img_shape=(520, 480),
ann=dict(
entity_boxes=np.array(
[[-2.584, -7.608002, 212.120004, 338.920019]])))
with pytest.raises(ValueError):
EntityBoxFlip(0, 'unsupport')
box_flip = EntityBoxFlip(flip_ratio=1)
results_ = copy.deepcopy(results)
results_ = box_flip(results_)
self.check_keys_contain(results_.keys(), target_keys)
assert_array_almost_equal(
results_['ann']['entity_boxes'],
np.array([[266.879996, -7.608002, 481.584, 338.920019]]))
assert_array_almost_equal(
results_['proposals'],
np.array([[271.920005, -9.688001, 488.304, 333.928002]]))
box_flip = EntityBoxFlip(flip_ratio=1, direction='vertical')
results_ = copy.deepcopy(results)
results_ = box_flip(results_)
self.check_keys_contain(results_.keys(), target_keys)
assert_array_almost_equal(
results_['ann']['entity_boxes'],
np.array([[-2.584, 180.079981, 212.120004, 526.608002]]))
assert_array_almost_equal(
results_['proposals'],
np.array([[-9.304, 185.071998, 207.079995, 528.688001]]))
box_flip = EntityBoxFlip()
results_ = copy.deepcopy(results)
results_['proposals'] = None
results_ = box_flip(results_)
assert results_['proposals'] is None
assert repr(box_flip) == (f'{box_flip.__class__.__name__}'
'(flip_ratio=0.5, direction=horizontal)')
def test_box_clip(self):
target_keys = ['ann', 'proposals', 'img_shape']
results = dict(
proposals=np.array([[-9.304, -9.688001, 207.079995, 333.928002]]),
img_shape=(335, 210),
ann=dict(
entity_boxes=np.array(
[[-2.584, -7.608002, 212.120004, 338.920019]])))
box_clip = EntityBoxClip()
results_ = copy.deepcopy(results)
results_ = box_clip(results_)
self.check_keys_contain(results_.keys(), target_keys)
assert_array_equal(results_['ann']['entity_boxes'],
np.array([[0., 0., 209., 334.]]))
assert_array_equal(results_['proposals'],
np.array([[0., 0., 207.079995, 333.928002]]))
results_ = copy.deepcopy(results)
results_['proposals'] = None
results_ = box_clip(results_)
assert results_['proposals'] is None
def test_box_pad(self):
target_keys = ['ann', 'proposals', 'img_shape']
results = dict(
proposals=np.array([[-9.304, -9.688001, 207.079995, 333.928002],
[-2.584, -7.608002, 212.120004, 338.920019]]),
img_shape=(335, 210),
ann=dict(
entity_boxes=np.array([[
-2.584, -7.608002, 212.120004, 338.920019
], [-9.304, -9.688001, 207.079995, 333.928002]])))
box_pad_none = EntityBoxPad()
results_ = copy.deepcopy(results)
results_ = box_pad_none(results_)
self.check_keys_contain(results_.keys(), target_keys)
assert_array_equal(results_['proposals'], results['proposals'])
assert_array_equal(results_['ann']['entity_boxes'],
results['ann']['entity_boxes'])
box_pad = EntityBoxPad(3)
results_ = copy.deepcopy(results)
results_ = box_pad(results_)
self.check_keys_contain(results_.keys(), target_keys)
assert_array_equal(
results_['proposals'],
np.array([[-9.304, -9.688001, 207.079995, 333.928002],
[-2.584, -7.608002, 212.120004, 338.920019],
[0., 0., 0., 0.]],
dtype=np.float32))
assert_array_equal(
results_['ann']['entity_boxes'],
np.array([[-2.584, -7.608002, 212.120004, 338.920019],
[-9.304, -9.688001, 207.079995, 333.928002],
[0., 0., 0., 0.]],
dtype=np.float32))
results_ = copy.deepcopy(results)
results_['proposals'] = None
results_ = box_pad(results_)
assert results_['proposals'] is None
assert repr(box_pad) == (f'{box_pad.__class__.__name__}'
'(max_num_gts=3)')
| InternVideo-main | Downstream/Open-Set-Action-Recognition/tests/test_data/test_augmentations.py |
import os.path as osp
import tempfile
import unittest.mock as mock
from unittest.mock import MagicMock, patch
import mmcv
import pytest
import torch
import torch.nn as nn
from mmcv.runner import EpochBasedRunner, build_optimizer
from mmcv.utils import get_logger
from torch.utils.data import DataLoader, Dataset
from mmaction.core import DistEpochEvalHook, EpochEvalHook
class ExampleDataset(Dataset):
def __init__(self):
self.index = 0
self.eval_result = [1, 4, 3, 7, 2, -3, 4, 6]
def __getitem__(self, idx):
results = dict(imgs=torch.tensor([1]))
return results
def __len__(self):
return 1
@mock.create_autospec
def evaluate(self, results, logger=None):
pass
class EvalDataset(ExampleDataset):
def evaluate(self, results, logger=None):
acc = self.eval_result[self.index]
output = dict(acc=acc, index=self.index, score=acc)
self.index += 1
return output
class ExampleModel(nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.Linear(1, 1)
self.test_cfg = None
def forward(self, imgs, return_loss=False):
return imgs
def train_step(self, data_batch, optimizer, **kwargs):
outputs = {
'loss': 0.5,
'log_vars': {
'accuracy': 0.98
},
'num_samples': 1
}
return outputs
def _build_demo_runner():
class Model(nn.Module):
def __init__(self):
super().__init__()
self.linear = nn.Linear(2, 1)
def forward(self, x):
return self.linear(x)
def train_step(self, x, optimizer, **kwargs):
return dict(loss=self(x))
def val_step(self, x, optimizer, **kwargs):
return dict(loss=self(x))
model = Model()
tmp_dir = tempfile.mkdtemp()
runner = EpochBasedRunner(
model=model, work_dir=tmp_dir, logger=get_logger('demo'))
return runner
def test_eval_hook():
with pytest.raises(TypeError):
# `save_best` should be a boolean
test_dataset = ExampleModel()
data_loader = DataLoader(
test_dataset,
batch_size=1,
sampler=None,
num_workers=0,
shuffle=False)
EpochEvalHook(data_loader, save_best='True')
with pytest.raises(TypeError):
# dataloader must be a pytorch DataLoader
test_dataset = ExampleModel()
data_loader = [
DataLoader(
test_dataset,
batch_size=1,
sampler=None,
num_worker=0,
shuffle=False)
]
EpochEvalHook(data_loader)
with pytest.raises(ValueError):
# when `save_best` is True, `key_indicator` should not be None
test_dataset = ExampleModel()
data_loader = DataLoader(
test_dataset,
batch_size=1,
sampler=None,
num_workers=0,
shuffle=False)
EpochEvalHook(data_loader, key_indicator=None)
with pytest.raises(KeyError):
# rule must be in keys of rule_map
test_dataset = ExampleModel()
data_loader = DataLoader(
test_dataset,
batch_size=1,
sampler=None,
num_workers=0,
shuffle=False)
EpochEvalHook(data_loader, save_best=False, rule='unsupport')
with pytest.raises(ValueError):
# key_indicator must be valid when rule_map is None
test_dataset = ExampleModel()
data_loader = DataLoader(
test_dataset,
batch_size=1,
sampler=None,
num_workers=0,
shuffle=False)
EpochEvalHook(data_loader, key_indicator='unsupport')
optimizer_cfg = dict(
type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
test_dataset = ExampleDataset()
loader = DataLoader(test_dataset, batch_size=1)
model = ExampleModel()
optimizer = build_optimizer(model, optimizer_cfg)
data_loader = DataLoader(test_dataset, batch_size=1)
eval_hook = EpochEvalHook(data_loader, save_best=False)
with tempfile.TemporaryDirectory() as tmpdir:
logger = get_logger('test_eval')
runner = EpochBasedRunner(
model=model,
batch_processor=None,
optimizer=optimizer,
work_dir=tmpdir,
logger=logger)
runner.register_hook(eval_hook)
runner.run([loader], [('train', 1)], 1)
test_dataset.evaluate.assert_called_with(
test_dataset, [torch.tensor([1])], logger=runner.logger)
best_json_path = osp.join(tmpdir, 'best.json')
assert not osp.exists(best_json_path)
loader = DataLoader(EvalDataset(), batch_size=1)
model = ExampleModel()
data_loader = DataLoader(EvalDataset(), batch_size=1)
eval_hook = EpochEvalHook(
data_loader, interval=1, save_best=True, key_indicator='acc')
with tempfile.TemporaryDirectory() as tmpdir:
logger = get_logger('test_eval')
runner = EpochBasedRunner(
model=model,
batch_processor=None,
optimizer=optimizer,
work_dir=tmpdir,
logger=logger)
runner.register_checkpoint_hook(dict(interval=1))
runner.register_hook(eval_hook)
runner.run([loader], [('train', 1)], 8)
best_json_path = osp.join(tmpdir, 'best.json')
best_json = mmcv.load(best_json_path)
real_path = osp.join(tmpdir, 'epoch_4.pth')
assert best_json['best_ckpt'] == osp.realpath(real_path)
assert best_json['best_score'] == 7
assert best_json['key_indicator'] == 'acc'
data_loader = DataLoader(EvalDataset(), batch_size=1)
eval_hook = EpochEvalHook(
data_loader,
interval=1,
save_best=True,
key_indicator='score',
rule='greater')
with tempfile.TemporaryDirectory() as tmpdir:
logger = get_logger('test_eval')
runner = EpochBasedRunner(
model=model,
batch_processor=None,
optimizer=optimizer,
work_dir=tmpdir,
logger=logger)
runner.register_checkpoint_hook(dict(interval=1))
runner.register_hook(eval_hook)
runner.run([loader], [('train', 1)], 8)
best_json_path = osp.join(tmpdir, 'best.json')
best_json = mmcv.load(best_json_path)
real_path = osp.join(tmpdir, 'epoch_4.pth')
assert best_json['best_ckpt'] == osp.realpath(real_path)
assert best_json['best_score'] == 7
assert best_json['key_indicator'] == 'score'
data_loader = DataLoader(EvalDataset(), batch_size=1)
eval_hook = EpochEvalHook(data_loader, rule='less', key_indicator='acc')
with tempfile.TemporaryDirectory() as tmpdir:
logger = get_logger('test_eval')
runner = EpochBasedRunner(
model=model,
batch_processor=None,
optimizer=optimizer,
work_dir=tmpdir,
logger=logger)
runner.register_checkpoint_hook(dict(interval=1))
runner.register_hook(eval_hook)
runner.run([loader], [('train', 1)], 8)
best_json_path = osp.join(tmpdir, 'best.json')
best_json = mmcv.load(best_json_path)
real_path = osp.join(tmpdir, 'epoch_6.pth')
assert best_json['best_ckpt'] == osp.realpath(real_path)
assert best_json['best_score'] == -3
assert best_json['key_indicator'] == 'acc'
data_loader = DataLoader(EvalDataset(), batch_size=1)
eval_hook = EpochEvalHook(data_loader, key_indicator='acc')
with tempfile.TemporaryDirectory() as tmpdir:
logger = get_logger('test_eval')
runner = EpochBasedRunner(
model=model,
batch_processor=None,
optimizer=optimizer,
work_dir=tmpdir,
logger=logger)
runner.register_checkpoint_hook(dict(interval=1))
runner.register_hook(eval_hook)
runner.run([loader], [('train', 1)], 2)
best_json_path = osp.join(tmpdir, 'best.json')
best_json = mmcv.load(best_json_path)
real_path = osp.join(tmpdir, 'epoch_2.pth')
assert best_json['best_ckpt'] == osp.realpath(real_path)
assert best_json['best_score'] == 4
assert best_json['key_indicator'] == 'acc'
resume_from = osp.join(tmpdir, 'latest.pth')
loader = DataLoader(ExampleDataset(), batch_size=1)
eval_hook = EpochEvalHook(data_loader, key_indicator='acc')
runner = EpochBasedRunner(
model=model,
batch_processor=None,
optimizer=optimizer,
work_dir=tmpdir,
logger=logger)
runner.register_checkpoint_hook(dict(interval=1))
runner.register_hook(eval_hook)
runner.resume(resume_from)
runner.run([loader], [('train', 1)], 8)
best_json_path = osp.join(tmpdir, 'best.json')
best_json = mmcv.load(best_json_path)
real_path = osp.join(tmpdir, 'epoch_4.pth')
assert best_json['best_ckpt'] == osp.realpath(real_path)
assert best_json['best_score'] == 7
assert best_json['key_indicator'] == 'acc'
@patch('mmaction.apis.single_gpu_test', MagicMock)
@patch('mmaction.apis.multi_gpu_test', MagicMock)
@pytest.mark.parametrize('EpochEvalHookParam',
(EpochEvalHook, DistEpochEvalHook))
def test_start_param(EpochEvalHookParam):
# create dummy data
dataloader = DataLoader(torch.ones((5, 2)))
# 0.1. dataloader is not a DataLoader object
with pytest.raises(TypeError):
EpochEvalHookParam(dataloader=MagicMock(), interval=-1)
# 0.2. negative interval
with pytest.raises(ValueError):
EpochEvalHookParam(dataloader, interval=-1)
# 1. start=None, interval=1: perform evaluation after each epoch.
runner = _build_demo_runner()
evalhook = EpochEvalHookParam(dataloader, interval=1, save_best=False)
evalhook.evaluate = MagicMock()
runner.register_hook(evalhook)
runner.run([dataloader], [('train', 1)], 2)
assert evalhook.evaluate.call_count == 2 # after epoch 1 & 2
# 2. start=1, interval=1: perform evaluation after each epoch.
runner = _build_demo_runner()
evalhook = EpochEvalHookParam(
dataloader, start=1, interval=1, save_best=False)
evalhook.evaluate = MagicMock()
runner.register_hook(evalhook)
runner.run([dataloader], [('train', 1)], 2)
assert evalhook.evaluate.call_count == 2 # after epoch 1 & 2
# 3. start=None, interval=2: perform evaluation after epoch 2, 4, 6, etc
runner = _build_demo_runner()
evalhook = EpochEvalHookParam(dataloader, interval=2, save_best=False)
evalhook.evaluate = MagicMock()
runner.register_hook(evalhook)
runner.run([dataloader], [('train', 1)], 2)
assert evalhook.evaluate.call_count == 1 # after epoch 2
# 4. start=1, interval=2: perform evaluation after epoch 1, 3, 5, etc
runner = _build_demo_runner()
evalhook = EpochEvalHookParam(
dataloader, start=1, interval=2, save_best=False)
evalhook.evaluate = MagicMock()
runner.register_hook(evalhook)
runner.run([dataloader], [('train', 1)], 3)
assert evalhook.evaluate.call_count == 2 # after epoch 1 & 3
# 5. start=0/negative, interval=1: perform evaluation after each epoch and
# before epoch 1.
runner = _build_demo_runner()
evalhook = EpochEvalHookParam(dataloader, start=0, save_best=False)
evalhook.evaluate = MagicMock()
runner.register_hook(evalhook)
runner.run([dataloader], [('train', 1)], 2)
assert evalhook.evaluate.call_count == 3 # before epoch1 and after e1 & e2
runner = _build_demo_runner()
with pytest.warns(UserWarning):
evalhook = EpochEvalHookParam(dataloader, start=-2, save_best=False)
evalhook.evaluate = MagicMock()
runner.register_hook(evalhook)
runner.run([dataloader], [('train', 1)], 2)
assert evalhook.evaluate.call_count == 3 # before epoch1 and after e1 & e2
# 6. resuming from epoch i, start = x (x<=i), interval =1: perform
# evaluation after each epoch and before the first epoch.
runner = _build_demo_runner()
evalhook = EpochEvalHookParam(dataloader, start=1, save_best=False)
evalhook.evaluate = MagicMock()
runner.register_hook(evalhook)
runner._epoch = 2
runner.run([dataloader], [('train', 1)], 3)
assert evalhook.evaluate.call_count == 2 # before & after epoch 3
# 7. resuming from epoch i, start = i+1/None, interval =1: perform
# evaluation after each epoch.
runner = _build_demo_runner()
evalhook = EpochEvalHookParam(dataloader, start=2, save_best=False)
evalhook.evaluate = MagicMock()
runner.register_hook(evalhook)
runner._epoch = 1
runner.run([dataloader], [('train', 1)], 3)
assert evalhook.evaluate.call_count == 2 # after epoch 2 & 3
| InternVideo-main | Downstream/Open-Set-Action-Recognition/tests/test_runtime/test_eval_hook.py |
import torch
import torch.nn as nn
from mmcv.runner import build_optimizer_constructor
class SubModel(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(2, 2, kernel_size=1, groups=2)
self.gn = nn.GroupNorm(2, 2)
self.fc = nn.Linear(2, 2)
self.param1 = nn.Parameter(torch.ones(1))
def forward(self, x):
return x
class ExampleModel(nn.Module):
def __init__(self):
super().__init__()
self.param1 = nn.Parameter(torch.ones(1))
self.conv1 = nn.Conv2d(3, 4, kernel_size=1, bias=False)
self.conv2 = nn.Conv2d(4, 2, kernel_size=1)
self.bn = nn.BatchNorm2d(2)
self.sub = SubModel()
self.fc = nn.Linear(2, 1)
def forward(self, x):
return x
class PseudoDataParallel(nn.Module):
def __init__(self):
super().__init__()
self.module = ExampleModel()
def forward(self, x):
return x
base_lr = 0.01
base_wd = 0.0001
momentum = 0.9
def check_optimizer(optimizer,
model,
prefix='',
bias_lr_mult=1,
bias_decay_mult=1,
norm_decay_mult=1,
dwconv_decay_mult=1):
param_groups = optimizer.param_groups
assert isinstance(optimizer, torch.optim.SGD)
assert optimizer.defaults['lr'] == base_lr
assert optimizer.defaults['momentum'] == momentum
assert optimizer.defaults['weight_decay'] == base_wd
model_parameters = list(model.parameters())
assert len(param_groups) == len(model_parameters)
for i, param in enumerate(model_parameters):
param_group = param_groups[i]
assert torch.equal(param_group['params'][0], param)
assert param_group['momentum'] == momentum
# param1
param1 = param_groups[0]
assert param1['lr'] == base_lr
assert param1['weight_decay'] == base_wd
# conv1.weight
conv1_weight = param_groups[1]
assert conv1_weight['lr'] == base_lr
assert conv1_weight['weight_decay'] == base_wd
# conv2.weight
conv2_weight = param_groups[2]
assert conv2_weight['lr'] == base_lr
assert conv2_weight['weight_decay'] == base_wd
# conv2.bias
conv2_bias = param_groups[3]
assert conv2_bias['lr'] == base_lr * bias_lr_mult
assert conv2_bias['weight_decay'] == base_wd * bias_decay_mult
# bn.weight
bn_weight = param_groups[4]
assert bn_weight['lr'] == base_lr
assert bn_weight['weight_decay'] == base_wd * norm_decay_mult
# bn.bias
bn_bias = param_groups[5]
assert bn_bias['lr'] == base_lr
assert bn_bias['weight_decay'] == base_wd * norm_decay_mult
# sub.param1
sub_param1 = param_groups[6]
assert sub_param1['lr'] == base_lr
assert sub_param1['weight_decay'] == base_wd
# sub.conv1.weight
sub_conv1_weight = param_groups[7]
assert sub_conv1_weight['lr'] == base_lr
assert sub_conv1_weight['weight_decay'] == base_wd * dwconv_decay_mult
# sub.conv1.bias
sub_conv1_bias = param_groups[8]
assert sub_conv1_bias['lr'] == base_lr * bias_lr_mult
assert sub_conv1_bias['weight_decay'] == base_wd * dwconv_decay_mult
# sub.gn.weight
sub_gn_weight = param_groups[9]
assert sub_gn_weight['lr'] == base_lr
assert sub_gn_weight['weight_decay'] == base_wd * norm_decay_mult
# sub.gn.bias
sub_gn_bias = param_groups[10]
assert sub_gn_bias['lr'] == base_lr
assert sub_gn_bias['weight_decay'] == base_wd * norm_decay_mult
# sub.fc1.weight
sub_fc_weight = param_groups[11]
assert sub_fc_weight['lr'] == base_lr
assert sub_fc_weight['weight_decay'] == base_wd
# sub.fc1.bias
sub_fc_bias = param_groups[12]
assert sub_fc_bias['lr'] == base_lr * bias_lr_mult
assert sub_fc_bias['weight_decay'] == base_wd * bias_decay_mult
# fc1.weight
fc_weight = param_groups[13]
assert fc_weight['lr'] == base_lr
assert fc_weight['weight_decay'] == base_wd
# fc1.bias
fc_bias = param_groups[14]
assert fc_bias['lr'] == base_lr * bias_lr_mult
assert fc_bias['weight_decay'] == base_wd * bias_decay_mult
def check_tsm_optimizer(optimizer, model, fc_lr5=True):
param_groups = optimizer.param_groups
assert isinstance(optimizer, torch.optim.SGD)
assert optimizer.defaults['lr'] == base_lr
assert optimizer.defaults['momentum'] == momentum
assert optimizer.defaults['weight_decay'] == base_wd
model_parameters = list(model.parameters())
# first_conv_weight
first_conv_weight = param_groups[0]
assert torch.equal(first_conv_weight['params'][0], model_parameters[1])
assert first_conv_weight['lr'] == base_lr
assert first_conv_weight['weight_decay'] == base_wd
# first_conv_bias
first_conv_bias = param_groups[1]
assert first_conv_bias['params'] == []
assert first_conv_bias['lr'] == base_lr * 2
assert first_conv_bias['weight_decay'] == 0
# normal_weight
normal_weight = param_groups[2]
assert torch.equal(normal_weight['params'][0], model_parameters[2])
assert torch.equal(normal_weight['params'][1], model_parameters[7])
assert normal_weight['lr'] == base_lr
assert normal_weight['weight_decay'] == base_wd
# normal_bias
normal_bias = param_groups[3]
assert torch.equal(normal_bias['params'][0], model_parameters[3])
assert torch.equal(normal_bias['params'][1], model_parameters[8])
assert normal_bias['lr'] == base_lr * 2
assert normal_bias['weight_decay'] == 0
# bn
bn = param_groups[4]
assert torch.equal(bn['params'][0], model_parameters[4])
assert torch.equal(bn['params'][1], model_parameters[5])
assert torch.equal(bn['params'][2], model_parameters[9])
assert torch.equal(bn['params'][3], model_parameters[10])
assert bn['lr'] == base_lr
assert bn['weight_decay'] == 0
# normal linear weight
assert torch.equal(normal_weight['params'][2], model_parameters[11])
# normal linear bias
assert torch.equal(normal_bias['params'][2], model_parameters[12])
# fc_lr5
lr5_weight = param_groups[5]
lr10_bias = param_groups[6]
assert lr5_weight['lr'] == base_lr * 5
assert lr5_weight['weight_decay'] == base_wd
assert lr10_bias['lr'] == base_lr * 10
assert lr10_bias['weight_decay'] == 0
if fc_lr5:
# lr5_weight
assert torch.equal(lr5_weight['params'][0], model_parameters[13])
# lr10_bias
assert torch.equal(lr10_bias['params'][0], model_parameters[14])
else:
# lr5_weight
assert lr5_weight['params'] == []
# lr10_bias
assert lr10_bias['params'] == []
assert torch.equal(normal_weight['params'][3], model_parameters[13])
assert torch.equal(normal_bias['params'][3], model_parameters[14])
def test_tsm_optimizer_constructor():
model = ExampleModel()
optimizer_cfg = dict(
type='SGD', lr=base_lr, weight_decay=base_wd, momentum=momentum)
# fc_lr5 is True
paramwise_cfg = dict(fc_lr5=True)
optim_constructor_cfg = dict(
type='TSMOptimizerConstructor',
optimizer_cfg=optimizer_cfg,
paramwise_cfg=paramwise_cfg)
optim_constructor = build_optimizer_constructor(optim_constructor_cfg)
optimizer = optim_constructor(model)
check_tsm_optimizer(optimizer, model, **paramwise_cfg)
# fc_lr5 is False
paramwise_cfg = dict(fc_lr5=False)
optim_constructor_cfg = dict(
type='TSMOptimizerConstructor',
optimizer_cfg=optimizer_cfg,
paramwise_cfg=paramwise_cfg)
optim_constructor = build_optimizer_constructor(optim_constructor_cfg)
optimizer = optim_constructor(model)
check_tsm_optimizer(optimizer, model, **paramwise_cfg)
| InternVideo-main | Downstream/Open-Set-Action-Recognition/tests/test_runtime/test_optimizer.py |
import tempfile
import pytest
import torch
import torch.nn as nn
from mmcv import Config
from torch.utils.data import Dataset
from mmaction.apis import train_model
from mmaction.datasets.registry import DATASETS
@DATASETS.register_module()
class ExampleDataset(Dataset):
def __init__(self, test_mode=False):
self.test_mode = test_mode
def evaluate(self, results, logger=None):
eval_results = dict()
eval_results['acc'] = 1
return eval_results
def __getitem__(self, idx):
results = dict(imgs=torch.tensor([1]))
return results
def __len__(self):
return 1
class ExampleModel(nn.Module):
def __init__(self):
super().__init__()
self.test_cfg = None
self.conv1 = nn.Conv2d(3, 8, kernel_size=1)
self.norm1 = nn.BatchNorm1d(2)
def forward(self, imgs, return_loss=False):
self.norm1(torch.rand(3, 2).cuda())
losses = dict()
losses['test_loss'] = torch.tensor([0.5], requires_grad=True)
return losses
def train_step(self, data_batch, optimizer, **kwargs):
imgs = data_batch['imgs']
losses = self.forward(imgs, True)
loss = torch.tensor([0.5], requires_grad=True)
outputs = dict(loss=loss, log_vars=losses, num_samples=3)
return outputs
def val_step(self, data_batch, optimizer, **kwargs):
imgs = data_batch['imgs']
self.forward(imgs, False)
outputs = dict(results=0.5)
return outputs
@pytest.mark.skipif(
not torch.cuda.is_available(), reason='requires CUDA support')
def test_train_model():
model = ExampleModel()
dataset = ExampleDataset()
datasets = [ExampleDataset(), ExampleDataset()]
cfg = dict(
seed=0,
gpus=1,
gpu_ids=[0],
resume_from=None,
load_from=None,
workflow=[('train', 1)],
total_epochs=5,
evaluation=dict(interval=1, key_indicator='acc'),
data=dict(
videos_per_gpu=1,
workers_per_gpu=0,
val=dict(type='ExampleDataset')),
optimizer=dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001),
optimizer_config=dict(grad_clip=dict(max_norm=40, norm_type=2)),
lr_config=dict(policy='step', step=[40, 80]),
omnisource=False,
checkpoint_config=dict(interval=1),
log_level='INFO',
log_config=dict(interval=20, hooks=[dict(type='TextLoggerHook')]))
with tempfile.TemporaryDirectory() as tmpdir:
# normal train
cfg['work_dir'] = tmpdir
config = Config(cfg)
train_model(model, dataset, config)
with tempfile.TemporaryDirectory() as tmpdir:
# train with validation
cfg['work_dir'] = tmpdir
config = Config(cfg)
train_model(model, dataset, config, validate=True)
with tempfile.TemporaryDirectory() as tmpdir:
# train with Fp16OptimizerHook
cfg['work_dir'] = tmpdir
cfg['fp16'] = dict(loss_scale=512.)
config = Config(cfg)
model.fp16_enabled = None
train_model(model, dataset, config)
with tempfile.TemporaryDirectory() as tmpdir:
cfg['work_dir'] = tmpdir
cfg['omnisource'] = True
config = Config(cfg)
train_model(model, datasets, config)
| InternVideo-main | Downstream/Open-Set-Action-Recognition/tests/test_runtime/test_train.py |
import sys
from unittest.mock import MagicMock, Mock, patch
import pytest
import torch
import torch.nn as nn
from torch.utils.data import DataLoader, Dataset
from mmaction.apis.test import (collect_results_cpu, multi_gpu_test,
single_gpu_test)
class OldStyleModel(nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.Conv2d(3, 3, 1)
self.cnt = 0
def forward(self, return_loss, **kwargs):
result = [self.cnt]
self.cnt += 1
return result
class Model(OldStyleModel):
def train_step(self):
pass
def val_step(self):
pass
class ExampleDataset(Dataset):
def __init__(self):
self.index = 0
self.eval_result = [1, 4, 3, 7, 2, -3, 4, 6]
def __getitem__(self, idx):
results = dict(imgs=torch.tensor([1]))
return results
def __len__(self):
return len(self.eval_result)
def test_single_gpu_test():
test_dataset = ExampleDataset()
loader = DataLoader(test_dataset, batch_size=1)
model = Model()
results = single_gpu_test(model, loader)
assert results == list(range(8))
def mock_tensor_without_cuda(*args, **kwargs):
if 'device' not in kwargs:
return torch.Tensor(*args)
return torch.IntTensor(*args, device='cpu')
@patch('mmaction.apis.test.collect_results_gpu',
Mock(return_value=list(range(8))))
@patch('mmaction.apis.test.collect_results_cpu',
Mock(return_value=list(range(8))))
def test_multi_gpu_test():
test_dataset = ExampleDataset()
loader = DataLoader(test_dataset, batch_size=1)
model = Model()
results = multi_gpu_test(model, loader)
assert results == list(range(8))
results = multi_gpu_test(model, loader, gpu_collect=False)
assert results == list(range(8))
@patch('mmcv.runner.get_dist_info', Mock(return_value=(0, 1)))
@patch('torch.distributed.broadcast', MagicMock)
@patch('torch.distributed.barrier', Mock)
@pytest.mark.skipif(
sys.version_info[:2] == (3, 8), reason='Not for python 3.8')
def test_collect_results_cpu():
def content_for_unittest():
results_part = list(range(8))
size = 8
results = collect_results_cpu(results_part, size)
assert results == list(range(8))
results = collect_results_cpu(results_part, size, 'unittest')
assert results == list(range(8))
if not torch.cuda.is_available():
with patch(
'torch.full',
Mock(
return_value=torch.full(
(512, ), 32, dtype=torch.uint8, device='cpu'))):
with patch('torch.tensor', mock_tensor_without_cuda):
content_for_unittest()
else:
content_for_unittest()
| InternVideo-main | Downstream/Open-Set-Action-Recognition/tests/test_runtime/test_apis_test.py |
import logging
import shutil
import sys
import tempfile
from unittest.mock import MagicMock, call
import torch
import torch.nn as nn
from mmcv.runner import IterTimerHook, PaviLoggerHook, build_runner
from torch.utils.data import DataLoader
def test_tin_lr_updater_hook():
sys.modules['pavi'] = MagicMock()
loader = DataLoader(torch.ones((10, 2)))
runner = _build_demo_runner()
hook_cfg = dict(type='TINLrUpdaterHook', min_lr=0.1)
runner.register_hook_from_cfg(hook_cfg)
hook_cfg = dict(
type='TINLrUpdaterHook',
by_epoch=False,
min_lr=0.1,
warmup='exp',
warmup_iters=2,
warmup_ratio=0.9)
runner.register_hook_from_cfg(hook_cfg)
runner.register_hook_from_cfg(dict(type='IterTimerHook'))
runner.register_hook(IterTimerHook())
hook_cfg = dict(
type='TINLrUpdaterHook',
by_epoch=False,
min_lr=0.1,
warmup='constant',
warmup_iters=2,
warmup_ratio=0.9)
runner.register_hook_from_cfg(hook_cfg)
runner.register_hook_from_cfg(dict(type='IterTimerHook'))
runner.register_hook(IterTimerHook())
hook_cfg = dict(
type='TINLrUpdaterHook',
by_epoch=False,
min_lr=0.1,
warmup='linear',
warmup_iters=2,
warmup_ratio=0.9)
runner.register_hook_from_cfg(hook_cfg)
runner.register_hook_from_cfg(dict(type='IterTimerHook'))
runner.register_hook(IterTimerHook())
# add pavi hook
hook = PaviLoggerHook(interval=1, add_graph=False, add_last_ckpt=True)
runner.register_hook(hook)
runner.run([loader], [('train', 1)])
shutil.rmtree(runner.work_dir)
assert hasattr(hook, 'writer')
calls = [
call('train', {
'learning_rate': 0.028544155877284292,
'momentum': 0.95
}, 1),
call('train', {
'learning_rate': 0.04469266270539641,
'momentum': 0.95
}, 6),
call('train', {
'learning_rate': 0.09695518130045147,
'momentum': 0.95
}, 10)
]
hook.writer.add_scalars.assert_has_calls(calls, any_order=True)
def _build_demo_runner(runner_type='EpochBasedRunner',
max_epochs=1,
max_iters=None):
class Model(nn.Module):
def __init__(self):
super().__init__()
self.linear = nn.Linear(2, 1)
def forward(self, x):
return self.linear(x)
def train_step(self, x, optimizer, **kwargs):
return dict(loss=self(x))
def val_step(self, x, optimizer, **kwargs):
return dict(loss=self(x))
model = Model()
optimizer = torch.optim.SGD(model.parameters(), lr=0.02, momentum=0.95)
log_config = dict(
interval=1, hooks=[
dict(type='TextLoggerHook'),
])
tmp_dir = tempfile.mkdtemp()
runner = build_runner(
dict(type=runner_type),
default_args=dict(
model=model,
work_dir=tmp_dir,
optimizer=optimizer,
logger=logging.getLogger(),
max_epochs=max_epochs,
max_iters=max_iters))
runner.register_checkpoint_hook(dict(interval=1))
runner.register_logger_hooks(log_config)
return runner
| InternVideo-main | Downstream/Open-Set-Action-Recognition/tests/test_runtime/test_lr.py |
import os.path as osp
import tempfile
import torch.nn as nn
from tools.pytorch2onnx import _convert_batchnorm, pytorch2onnx
class TestModel(nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.Conv3d(1, 2, 1)
self.bn = nn.SyncBatchNorm(2)
def forward(self, x):
return self.bn(self.conv(x))
def forward_dummy(self, x):
return (self.forward(x), )
def test_onnx_exporting():
with tempfile.TemporaryDirectory() as tmpdir:
out_file = osp.join(tmpdir, 'tmp.onnx')
model = TestModel()
model = _convert_batchnorm(model)
# test exporting
pytorch2onnx(model, (1, 1, 1, 1, 1), output_file=out_file)
| InternVideo-main | Downstream/Open-Set-Action-Recognition/tests/test_runtime/test_onnx.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.