code
stringlengths 66
870k
| docstring
stringlengths 19
26.7k
| func_name
stringlengths 1
138
| language
stringclasses 1
value | repo
stringlengths 7
68
| path
stringlengths 5
324
| url
stringlengths 46
389
| license
stringclasses 7
values |
---|---|---|---|---|---|---|---|
def __getitem__(self, img_key):
"""Sample long term features like `lfb['0f39OWEqJ24,0902']` where `lfb`
is a instance of class LFB."""
video_id, timestamp = img_key.split(',')
return self.sample_long_term_features(video_id, int(timestamp)) | Sample long term features like `lfb['0f39OWEqJ24,0902']` where `lfb`
is a instance of class LFB. | __getitem__ | python | open-mmlab/mmaction2 | mmaction/models/roi_heads/shared_heads/lfb.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/roi_heads/shared_heads/lfb.py | Apache-2.0 |
def forward(self, x, rois, img_metas, **kwargs):
"""Defines the computation performed at every call.
Args:
x (torch.Tensor): The extracted RoI feature.
rois (torch.Tensor): The regions of interest.
img_metas (List[dict]): The meta information of the data.
Returns:
torch.Tensor: The RoI features that have interacted with context
"""
# [N, C, 1, 1, 1]
features = self.temporal_pool(x)
features = self.spatial_pool(features)
if self.use_half_precision:
features = features.half()
inds = rois[:, 0].type(torch.int64)
for ind in inds:
self.all_metadata.append(img_metas[ind]['img_key'])
self.all_features += list(features)
# Return the input directly and doesn't affect the input.
return x | Defines the computation performed at every call.
Args:
x (torch.Tensor): The extracted RoI feature.
rois (torch.Tensor): The regions of interest.
img_metas (List[dict]): The meta information of the data.
Returns:
torch.Tensor: The RoI features that have interacted with context
| forward | python | open-mmlab/mmaction2 | mmaction/models/roi_heads/shared_heads/lfb_infer_head.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/roi_heads/shared_heads/lfb_infer_head.py | Apache-2.0 |
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Defines the computation performed at every call."""
orig_type = x.dtype
ret = super().forward(x.type(torch.float32))
return ret.type(orig_type) | Defines the computation performed at every call. | forward | python | open-mmlab/mmaction2 | mmaction/models/similarity/adapters.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/similarity/adapters.py | Apache-2.0 |
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Defines the computation performed at every call."""
x = x + self.attention(self.ln_1(x))
x = x + self.mlp(self.ln_2(x))
return x | Defines the computation performed at every call. | forward | python | open-mmlab/mmaction2 | mmaction/models/similarity/adapters.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/similarity/adapters.py | Apache-2.0 |
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Defines the computation performed at every call."""
b, seq_length, c = x.size()
x_original = x
x = x + self.positional_embedding
x = x.transpose(0, 1) # NLD -> LND
x = self.transformer(x)
x = x.transpose(0, 1) # LND -> NLD
x = x.type(x_original.dtype) + x_original
return x.mean(dim=1) | Defines the computation performed at every call. | forward | python | open-mmlab/mmaction2 | mmaction/models/similarity/adapters.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/similarity/adapters.py | Apache-2.0 |
def _freeze_stages(self) -> None:
"""Prevent all the parameters from being optimized before
``self.frozen_layers``."""
if self.frozen_layers >= 0:
top_layers = [
'ln_final', 'text_projection', 'logit_scale', 'visual.ln_post',
'visual.proj'
]
mid_layers = [
'visual.transformer.resblocks', 'transformer.resblocks'
]
for name, param in self.clip.named_parameters():
if any(name.find(n) == 0 for n in top_layers):
continue
elif any(name.find(n) == 0 for n in mid_layers):
layer_n = int(name.split('.resblocks.')[1].split('.')[0])
if layer_n >= self.frozen_layers:
continue
param.requires_grad = False | Prevent all the parameters from being optimized before
``self.frozen_layers``. | _freeze_stages | python | open-mmlab/mmaction2 | mmaction/models/similarity/clip_similarity.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/similarity/clip_similarity.py | Apache-2.0 |
def assign_wrt_overlaps(self, overlaps: Tensor,
gt_labels: Tensor) -> AssignResult:
"""Assign w.r.t. the overlaps of bboxes with gts.
Args:
overlaps (Tensor): Overlaps between k gt_bboxes and n bboxes,
shape(k, n).
gt_labels (Tensor): Labels of k gt_bboxes, shape
(k, num_classes).
Returns:
:obj:`AssignResult`: The assign result.
"""
num_gts, num_bboxes = overlaps.size(0), overlaps.size(1)
# 1. assign -1 by default
assigned_gt_inds = overlaps.new_full((num_bboxes, ),
-1,
dtype=torch.long)
if num_gts == 0 or num_bboxes == 0:
# No ground truth or boxes, return empty assignment
max_overlaps = overlaps.new_zeros((num_bboxes, ))
assigned_labels = overlaps.new_full((num_bboxes, ),
-1,
dtype=torch.long)
if num_gts == 0:
# No truth, assign everything to background
assigned_gt_inds[:] = 0
return AssignResult(
num_gts=num_gts,
gt_inds=assigned_gt_inds,
max_overlaps=max_overlaps,
labels=assigned_labels)
# for each anchor, which gt best overlaps with it
# for each anchor, the max iou of all gts
max_overlaps, argmax_overlaps = overlaps.max(dim=0)
# for each gt, which anchor best overlaps with it
# for each gt, the max iou of all proposals
gt_max_overlaps, gt_argmax_overlaps = overlaps.max(dim=1)
# 2. assign negative: below
# the negative inds are set to be 0
if isinstance(self.neg_iou_thr, float):
assigned_gt_inds[(max_overlaps >= 0)
& (max_overlaps < self.neg_iou_thr)] = 0
elif isinstance(self.neg_iou_thr, tuple):
assert len(self.neg_iou_thr) == 2
assigned_gt_inds[(max_overlaps >= self.neg_iou_thr[0])
& (max_overlaps < self.neg_iou_thr[1])] = 0
# 3. assign positive: above positive IoU threshold
pos_inds = max_overlaps >= self.pos_iou_thr
assigned_gt_inds[pos_inds] = argmax_overlaps[pos_inds] + 1
if self.match_low_quality:
# Low-quality matching will overwrite the assigned_gt_inds
# assigned in Step 3. Thus, the assigned gt might not be the
# best one for prediction.
# For example, if bbox A has 0.9 and 0.8 iou with GT bbox
# 1 & 2, bbox 1 will be assigned as the best target for bbox A
# in step 3. However, if GT bbox 2's gt_argmax_overlaps = A,
# bbox A's assigned_gt_inds will be overwritten to be bbox B.
# This might be the reason that it is not used in ROI Heads.
for i in range(num_gts):
if gt_max_overlaps[i] >= self.min_pos_iou:
if self.gt_max_assign_all:
max_iou_inds = overlaps[i, :] == gt_max_overlaps[i]
assigned_gt_inds[max_iou_inds] = i + 1
else:
assigned_gt_inds[gt_argmax_overlaps[i]] = i + 1
# consider multi-class case (AVA)
assert len(gt_labels[0]) > 1
assigned_labels = assigned_gt_inds.new_zeros(
(num_bboxes, len(gt_labels[0])), dtype=torch.float32)
# If not assigned, labels will be all 0
pos_inds = torch.nonzero(
assigned_gt_inds > 0, as_tuple=False).squeeze()
if pos_inds.numel() > 0:
assigned_labels[pos_inds] = gt_labels[assigned_gt_inds[pos_inds] -
1]
return AssignResult(
num_gts=num_gts,
gt_inds=assigned_gt_inds,
max_overlaps=max_overlaps,
labels=assigned_labels) | Assign w.r.t. the overlaps of bboxes with gts.
Args:
overlaps (Tensor): Overlaps between k gt_bboxes and n bboxes,
shape(k, n).
gt_labels (Tensor): Labels of k gt_bboxes, shape
(k, num_classes).
Returns:
:obj:`AssignResult`: The assign result.
| assign_wrt_overlaps | python | open-mmlab/mmaction2 | mmaction/models/task_modules/assigners/max_iou_assigner_ava.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/task_modules/assigners/max_iou_assigner_ava.py | Apache-2.0 |
def __call__(self, imgs: torch.Tensor, batch_data_samples: SampleList,
**kwargs) -> Tuple:
"""Blending data in a mini-batch.
Images are float tensors with the shape of (B, N, C, H, W) for 2D
recognizers or (B, N, C, T, H, W) for 3D recognizers.
Besides, labels are converted from hard labels to soft labels.
Hard labels are integer tensors with the shape of (B, ) and all of the
elements are in the range [0, num_classes - 1].
Soft labels (probability distribution over classes) are float tensors
with the shape of (B, num_classes) and all of the elements are in
the range [0, 1].
Args:
imgs (torch.Tensor): Model input images, float tensor with the
shape of (B, N, C, H, W) or (B, N, C, T, H, W).
batch_data_samples (List[:obj:`ActionDataSample`]): The batch
data samples. It usually includes information such
as `gt_label`.
Returns:
mixed_imgs (torch.Tensor): Blending images, float tensor with the
same shape of the input imgs.
batch_data_samples (List[:obj:`ActionDataSample`]): The modified
batch data samples. ``gt_label`` in each data sample are
converted from a hard label to a blended soft label, float
tensor with the shape of (num_classes, ) and all elements are
in range [0, 1].
"""
label = [x.gt_label for x in batch_data_samples]
# single-label classification
if label[0].size(0) == 1:
label = torch.tensor(label, dtype=torch.long).to(imgs.device)
one_hot_label = F.one_hot(label, num_classes=self.num_classes)
# multi-label classification
else:
one_hot_label = torch.stack(label)
mixed_imgs, mixed_label = self.do_blending(imgs, one_hot_label,
**kwargs)
for label_item, sample in zip(mixed_label, batch_data_samples):
sample.set_gt_label(label_item)
return mixed_imgs, batch_data_samples | Blending data in a mini-batch.
Images are float tensors with the shape of (B, N, C, H, W) for 2D
recognizers or (B, N, C, T, H, W) for 3D recognizers.
Besides, labels are converted from hard labels to soft labels.
Hard labels are integer tensors with the shape of (B, ) and all of the
elements are in the range [0, num_classes - 1].
Soft labels (probability distribution over classes) are float tensors
with the shape of (B, num_classes) and all of the elements are in
the range [0, 1].
Args:
imgs (torch.Tensor): Model input images, float tensor with the
shape of (B, N, C, H, W) or (B, N, C, T, H, W).
batch_data_samples (List[:obj:`ActionDataSample`]): The batch
data samples. It usually includes information such
as `gt_label`.
Returns:
mixed_imgs (torch.Tensor): Blending images, float tensor with the
same shape of the input imgs.
batch_data_samples (List[:obj:`ActionDataSample`]): The modified
batch data samples. ``gt_label`` in each data sample are
converted from a hard label to a blended soft label, float
tensor with the shape of (num_classes, ) and all elements are
in range [0, 1].
| __call__ | python | open-mmlab/mmaction2 | mmaction/models/utils/blending_utils.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/utils/blending_utils.py | Apache-2.0 |
def do_blending(self, imgs: torch.Tensor, label: torch.Tensor,
**kwargs) -> Tuple:
"""Blending images with mixup.
Args:
imgs (torch.Tensor): Model input images, float tensor with the
shape of (B, N, C, H, W) or (B, N, C, T, H, W).
label (torch.Tensor): One hot labels, integer tensor with the shape
of (B, num_classes).
Returns:
tuple: A tuple of blended images and labels.
"""
assert len(kwargs) == 0, f'unexpected kwargs for mixup {kwargs}'
lam = self.beta.sample()
batch_size = imgs.size(0)
rand_index = torch.randperm(batch_size)
mixed_imgs = lam * imgs + (1 - lam) * imgs[rand_index, :]
mixed_label = lam * label + (1 - lam) * label[rand_index, :]
return mixed_imgs, mixed_label | Blending images with mixup.
Args:
imgs (torch.Tensor): Model input images, float tensor with the
shape of (B, N, C, H, W) or (B, N, C, T, H, W).
label (torch.Tensor): One hot labels, integer tensor with the shape
of (B, num_classes).
Returns:
tuple: A tuple of blended images and labels.
| do_blending | python | open-mmlab/mmaction2 | mmaction/models/utils/blending_utils.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/utils/blending_utils.py | Apache-2.0 |
def do_blending(self, imgs: torch.Tensor, label: torch.Tensor,
**kwargs) -> Tuple:
"""Blending images with cutmix.
Args:
imgs (torch.Tensor): Model input images, float tensor with the
shape of (B, N, C, H, W) or (B, N, C, T, H, W).
label (torch.Tensor): One hot labels, integer tensor with the shape
of (B, num_classes).
Returns:
tuple: A tuple of blended images and labels.
"""
assert len(kwargs) == 0, f'unexpected kwargs for cutmix {kwargs}'
batch_size = imgs.size(0)
rand_index = torch.randperm(batch_size)
lam = self.beta.sample()
bbx1, bby1, bbx2, bby2 = self.rand_bbox(imgs.size(), lam)
imgs[:, ..., bby1:bby2, bbx1:bbx2] = imgs[rand_index, ..., bby1:bby2,
bbx1:bbx2]
lam = 1 - (1.0 * (bbx2 - bbx1) * (bby2 - bby1) /
(imgs.size()[-1] * imgs.size()[-2]))
label = lam * label + (1 - lam) * label[rand_index, :]
return imgs, label | Blending images with cutmix.
Args:
imgs (torch.Tensor): Model input images, float tensor with the
shape of (B, N, C, H, W) or (B, N, C, T, H, W).
label (torch.Tensor): One hot labels, integer tensor with the shape
of (B, num_classes).
Returns:
tuple: A tuple of blended images and labels.
| do_blending | python | open-mmlab/mmaction2 | mmaction/models/utils/blending_utils.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/utils/blending_utils.py | Apache-2.0 |
def do_blending(self, imgs: torch.Tensor, label: torch.Tensor,
**kwargs) -> Tuple:
"""Randomly apply batch augmentations to the batch inputs and batch
data samples."""
aug_index = np.random.choice(len(self.augments), p=self.probs)
aug = self.augments[aug_index]
if aug is not None:
return aug.do_blending(imgs, label, **kwargs)
else:
return imgs, label | Randomly apply batch augmentations to the batch inputs and batch
data samples. | do_blending | python | open-mmlab/mmaction2 | mmaction/models/utils/blending_utils.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/utils/blending_utils.py | Apache-2.0 |
def get_pad_shape(self, input_shape):
"""Calculate the padding size of input.
Args:
input_shape (:obj:`torch.Size`): arrange as (H, W).
Returns:
Tuple[int]: The padding size along the
original H and W directions
"""
input_t, input_h, input_w = input_shape
kernel_d, kernel_h, kernel_w = self.kernel_size
stride_d, stride_h, stride_w = self.stride
output_d = math.ceil(input_t / stride_d)
output_h = math.ceil(input_h / stride_h)
output_w = math.ceil(input_w / stride_w)
pad_d = max((output_d - 1) * stride_d +
(kernel_d - 1) * self.dilation[0] + 1 - input_t, 0)
pad_h = max((output_h - 1) * stride_h +
(kernel_h - 1) * self.dilation[1] + 1 - input_h, 0)
pad_w = max((output_w - 1) * stride_w +
(kernel_w - 1) * self.dilation[2] + 1 - input_w, 0)
return pad_d, pad_h, pad_w | Calculate the padding size of input.
Args:
input_shape (:obj:`torch.Size`): arrange as (H, W).
Returns:
Tuple[int]: The padding size along the
original H and W directions
| get_pad_shape | python | open-mmlab/mmaction2 | mmaction/models/utils/embed.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/utils/embed.py | Apache-2.0 |
def forward(self, x):
"""Add padding to `x`
Args:
x (Tensor): Input tensor has shape (B, C, H, W).
Returns:
Tensor: The tensor with adaptive padding
"""
pad_d, pad_h, pad_w = self.get_pad_shape(x.size()[-2:])
if pad_d > 0 or pad_h > 0 or pad_w > 0:
if self.padding == 'corner':
x = F.pad(x, [0, pad_w, 0, pad_h, 0, pad_d])
elif self.padding == 'same':
x = F.pad(x, [
pad_w // 2,
pad_w - pad_w // 2,
pad_h // 2,
pad_h - pad_h // 2,
pad_d // 2,
pad_d - pad_d // 2,
])
return x | Add padding to `x`
Args:
x (Tensor): Input tensor has shape (B, C, H, W).
Returns:
Tensor: The tensor with adaptive padding
| forward | python | open-mmlab/mmaction2 | mmaction/models/utils/embed.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/utils/embed.py | Apache-2.0 |
def forward(self, x):
"""
Args:
x (Tensor): Has shape (B, C, T, H, W). In most case, C is 3.
Returns:
tuple: Contains merged results and its spatial shape.
- x (Tensor): Has shape (B, out_t * out_h * out_w, embed_dims)
- out_size (tuple[int]): Spatial shape of x, arrange as
(out_t, out_h, out_w).
"""
if self.adaptive_padding:
x = self.adaptive_padding(x)
x = self.projection(x)
out_size = (x.shape[2], x.shape[3], x.shape[4])
x = x.flatten(2).transpose(1, 2)
if self.norm is not None:
x = self.norm(x)
return x, out_size |
Args:
x (Tensor): Has shape (B, C, T, H, W). In most case, C is 3.
Returns:
tuple: Contains merged results and its spatial shape.
- x (Tensor): Has shape (B, out_t * out_h * out_w, embed_dims)
- out_size (tuple[int]): Spatial shape of x, arrange as
(out_t, out_h, out_w).
| forward | python | open-mmlab/mmaction2 | mmaction/models/utils/embed.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/utils/embed.py | Apache-2.0 |
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Defines the computation performed at every call."""
n, c, t, v = x.shape
res = self.down(x) if self.with_res else 0
A_switch = {None: self.A, 'init': self.A}
if hasattr(self, 'PA'):
A_switch.update({
'offset': self.A + self.PA,
'importance': self.A * self.PA
})
A = A_switch[self.adaptive]
if self.conv_pos == 'pre':
x = self.conv(x)
x = x.view(n, self.num_subsets, -1, t, v)
x = torch.einsum('nkctv,kvw->nctw', (x, A)).contiguous()
elif self.conv_pos == 'post':
x = torch.einsum('nctv,kvw->nkctw', (x, A)).contiguous()
x = x.view(n, -1, t, v)
x = self.conv(x)
return self.act(self.bn(x) + res) | Defines the computation performed at every call. | forward | python | open-mmlab/mmaction2 | mmaction/models/utils/gcn_utils.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/utils/gcn_utils.py | Apache-2.0 |
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Defines the computation performed at every call."""
N, C, T, V = x.size()
y = None
if self.adaptive:
for i in range(self.num_subset):
A1 = self.conv_a[i](x).permute(0, 3, 1, 2).contiguous().view(
N, V, self.inter_c * T)
A2 = self.conv_b[i](x).view(N, self.inter_c * T, V)
A1 = self.tan(torch.matmul(A1, A2) / A1.size(-1)) # N V V
A1 = self.A[i] + A1 * self.alpha
A2 = x.view(N, C * T, V)
z = self.conv_d[i](torch.matmul(A2, A1).view(N, C, T, V))
y = z + y if y is not None else z
else:
for i in range(self.num_subset):
A1 = self.A[i]
A2 = x.view(N, C * T, V)
z = self.conv_d[i](torch.matmul(A2, A1).view(N, C, T, V))
y = z + y if y is not None else z
y = self.relu(self.bn(y) + self.down(x))
if self.attention:
# spatial attention first
se = y.mean(-2) # N C V
se1 = self.sigmoid(self.conv_sa(se)) # N 1 V
y = y * se1.unsqueeze(-2) + y
# then temporal attention
se = y.mean(-1) # N C T
se1 = self.sigmoid(self.conv_ta(se)) # N 1 T
y = y * se1.unsqueeze(-1) + y
# then spatial temporal attention ??
se = y.mean(-1).mean(-1) # N C
se1 = self.relu(self.fc1c(se))
se2 = self.sigmoid(self.fc2c(se1)) # N C
y = y * se2.unsqueeze(-1).unsqueeze(-1) + y
# A little bit weird
return y | Defines the computation performed at every call. | forward | python | open-mmlab/mmaction2 | mmaction/models/utils/gcn_utils.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/utils/gcn_utils.py | Apache-2.0 |
def inner_forward(self, x: torch.Tensor) -> torch.Tensor:
"""Defines the computation performed at every call."""
N, C, T, V = x.shape
branch_outs = []
for tempconv in self.branches:
out = tempconv(x)
branch_outs.append(out)
feat = torch.cat(branch_outs, dim=1)
feat = self.transform(feat)
return feat | Defines the computation performed at every call. | inner_forward | python | open-mmlab/mmaction2 | mmaction/models/utils/gcn_utils.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/utils/gcn_utils.py | Apache-2.0 |
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Defines the computation performed at every call."""
out = self.inner_forward(x)
out = self.bn(out)
return self.drop(out) | Defines the computation performed at every call. | forward | python | open-mmlab/mmaction2 | mmaction/models/utils/gcn_utils.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/utils/gcn_utils.py | Apache-2.0 |
def k_adjacency(A: Union[torch.Tensor, np.ndarray],
k: int,
with_self: bool = False,
self_factor: float = 1) -> np.ndarray:
"""Construct k-adjacency matrix.
Args:
A (torch.Tensor or np.ndarray): The adjacency matrix.
k (int): The number of hops.
with_self (bool): Whether to add self-loops to the
k-adjacency matrix. The self-loops is critical
for learning the relationships between the current
joint and its k-hop neighbors. Defaults to False.
self_factor (float): The scale factor to the added
identity matrix. Defaults to 1.
Returns:
np.ndarray: The k-adjacency matrix.
"""
# A is a 2D square array
if isinstance(A, torch.Tensor):
A = A.data.cpu().numpy()
assert isinstance(A, np.ndarray)
Iden = np.eye(len(A), dtype=A.dtype)
if k == 0:
return Iden
Ak = np.minimum(np.linalg.matrix_power(A + Iden, k), 1) - np.minimum(
np.linalg.matrix_power(A + Iden, k - 1), 1)
if with_self:
Ak += (self_factor * Iden)
return Ak | Construct k-adjacency matrix.
Args:
A (torch.Tensor or np.ndarray): The adjacency matrix.
k (int): The number of hops.
with_self (bool): Whether to add self-loops to the
k-adjacency matrix. The self-loops is critical
for learning the relationships between the current
joint and its k-hop neighbors. Defaults to False.
self_factor (float): The scale factor to the added
identity matrix. Defaults to 1.
Returns:
np.ndarray: The k-adjacency matrix.
| k_adjacency | python | open-mmlab/mmaction2 | mmaction/models/utils/graph.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/utils/graph.py | Apache-2.0 |
def edge2mat(edges: List[Tuple[int, int]], num_node: int) -> np.ndarray:
"""Get adjacency matrix from edges.
Args:
edges (list[tuple[int, int]]): The edges of the graph.
num_node (int): The number of nodes of the graph.
Returns:
np.ndarray: The adjacency matrix.
"""
A = np.zeros((num_node, num_node))
for i, j in edges:
A[j, i] = 1
return A | Get adjacency matrix from edges.
Args:
edges (list[tuple[int, int]]): The edges of the graph.
num_node (int): The number of nodes of the graph.
Returns:
np.ndarray: The adjacency matrix.
| edge2mat | python | open-mmlab/mmaction2 | mmaction/models/utils/graph.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/utils/graph.py | Apache-2.0 |
def normalize_digraph(A: np.ndarray, dim: int = 0) -> np.ndarray:
"""Normalize the digraph according to the given dimension.
Args:
A (np.ndarray): The adjacency matrix.
dim (int): The dimension to perform normalization.
Defaults to 0.
Returns:
np.ndarray: The normalized adjacency matrix.
"""
# A is a 2D square array
Dl = np.sum(A, dim)
h, w = A.shape
Dn = np.zeros((w, w))
for i in range(w):
if Dl[i] > 0:
Dn[i, i] = Dl[i]**(-1)
AD = np.dot(A, Dn)
return AD | Normalize the digraph according to the given dimension.
Args:
A (np.ndarray): The adjacency matrix.
dim (int): The dimension to perform normalization.
Defaults to 0.
Returns:
np.ndarray: The normalized adjacency matrix.
| normalize_digraph | python | open-mmlab/mmaction2 | mmaction/models/utils/graph.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/utils/graph.py | Apache-2.0 |
def get_hop_distance(num_node: int,
edges: List[Tuple[int, int]],
max_hop: int = 1) -> np.ndarray:
"""Get n-hop distance matrix by edges.
Args:
num_node (int): The number of nodes of the graph.
edges (list[tuple[int, int]]): The edges of the graph.
max_hop (int): The maximal distance between two connected nodes.
Defaults to 1.
Returns:
np.ndarray: The n-hop distance matrix.
"""
A = np.eye(num_node)
for i, j in edges:
A[i, j] = 1
A[j, i] = 1
# compute hop steps
hop_dis = np.zeros((num_node, num_node)) + np.inf
transfer_mat = [np.linalg.matrix_power(A, d) for d in range(max_hop + 1)]
arrive_mat = (np.stack(transfer_mat) > 0)
for d in range(max_hop, -1, -1):
hop_dis[arrive_mat[d]] = d
return hop_dis | Get n-hop distance matrix by edges.
Args:
num_node (int): The number of nodes of the graph.
edges (list[tuple[int, int]]): The edges of the graph.
max_hop (int): The maximal distance between two connected nodes.
Defaults to 1.
Returns:
np.ndarray: The n-hop distance matrix.
| get_hop_distance | python | open-mmlab/mmaction2 | mmaction/models/utils/graph.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/utils/graph.py | Apache-2.0 |
def format_label(value: LABEL_TYPE) -> torch.Tensor:
"""Convert various python types to label-format tensor.
Supported types are: :class:`numpy.ndarray`, :class:`torch.Tensor`,
:class:`Sequence`, :class:`int`.
Args:
value (torch.Tensor | numpy.ndarray | Sequence | int): Label value.
Returns:
:obj:`torch.Tensor`: The formatted label tensor.
"""
# Handle single number
if isinstance(value, (torch.Tensor, np.ndarray)) and value.ndim == 0:
value = int(value.item())
if isinstance(value, np.ndarray):
value = torch.from_numpy(value).to(torch.long)
elif isinstance(value, Sequence) and not is_str(value):
value = torch.tensor(value).to(torch.long)
elif isinstance(value, int):
value = torch.LongTensor([value])
elif not isinstance(value, torch.Tensor):
raise TypeError(f'Type {type(value)} is not an available label type.')
return value | Convert various python types to label-format tensor.
Supported types are: :class:`numpy.ndarray`, :class:`torch.Tensor`,
:class:`Sequence`, :class:`int`.
Args:
value (torch.Tensor | numpy.ndarray | Sequence | int): Label value.
Returns:
:obj:`torch.Tensor`: The formatted label tensor.
| format_label | python | open-mmlab/mmaction2 | mmaction/structures/action_data_sample.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/structures/action_data_sample.py | Apache-2.0 |
def format_score(value: SCORE_TYPE) -> Union[torch.Tensor, Dict]:
"""Convert various python types to score-format tensor.
Supported types are: :class:`numpy.ndarray`, :class:`torch.Tensor`,
:class:`Sequence`.
Args:
value (torch.Tensor | numpy.ndarray | Sequence | dict):
Score values or dict of scores values.
Returns:
:obj:`torch.Tensor` | dict: The formatted scores.
"""
if isinstance(value, np.ndarray):
value = torch.from_numpy(value).float()
elif isinstance(value, Sequence) and not is_str(value):
value = torch.tensor(value).float()
elif isinstance(value, dict):
for k, v in value.items():
value[k] = format_score(v)
elif not isinstance(value, torch.Tensor):
raise TypeError(f'Type {type(value)} is not an available label type.')
return value | Convert various python types to score-format tensor.
Supported types are: :class:`numpy.ndarray`, :class:`torch.Tensor`,
:class:`Sequence`.
Args:
value (torch.Tensor | numpy.ndarray | Sequence | dict):
Score values or dict of scores values.
Returns:
:obj:`torch.Tensor` | dict: The formatted scores.
| format_score | python | open-mmlab/mmaction2 | mmaction/structures/action_data_sample.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/structures/action_data_sample.py | Apache-2.0 |
def bbox_target(pos_bboxes_list: List[torch.Tensor],
neg_bboxes_list: List[torch.Tensor],
gt_labels: List[torch.Tensor],
cfg: Union[dict, mmengine.ConfigDict]) -> tuple:
"""Generate classification targets for bboxes.
Args:
pos_bboxes_list (List[torch.Tensor]): Positive bboxes list.
neg_bboxes_list (List[torch.Tensor]): Negative bboxes list.
gt_labels (List[torch.Tensor]): Groundtruth classification label list.
cfg (dict | mmengine.ConfigDict): RCNN config.
Returns:
tuple: Label and label_weight for bboxes.
"""
labels, label_weights = [], []
pos_weight = 1.0 if cfg.pos_weight <= 0 else cfg.pos_weight
assert len(pos_bboxes_list) == len(neg_bboxes_list) == len(gt_labels)
length = len(pos_bboxes_list)
for i in range(length):
pos_bboxes = pos_bboxes_list[i]
neg_bboxes = neg_bboxes_list[i]
gt_label = gt_labels[i]
num_pos = pos_bboxes.size(0)
num_neg = neg_bboxes.size(0)
num_samples = num_pos + num_neg
label = F.pad(gt_label, (0, 0, 0, num_neg))
label_weight = pos_bboxes.new_zeros(num_samples)
label_weight[:num_pos] = pos_weight
label_weight[-num_neg:] = 1.
labels.append(label)
label_weights.append(label_weight)
labels = torch.cat(labels, 0)
label_weights = torch.cat(label_weights, 0)
return labels, label_weights | Generate classification targets for bboxes.
Args:
pos_bboxes_list (List[torch.Tensor]): Positive bboxes list.
neg_bboxes_list (List[torch.Tensor]): Negative bboxes list.
gt_labels (List[torch.Tensor]): Groundtruth classification label list.
cfg (dict | mmengine.ConfigDict): RCNN config.
Returns:
tuple: Label and label_weight for bboxes.
| bbox_target | python | open-mmlab/mmaction2 | mmaction/structures/bbox/bbox_target.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/structures/bbox/bbox_target.py | Apache-2.0 |
def bbox2result(bboxes: torch.Tensor,
labels: torch.Tensor,
num_classes: int,
thr: float = 0.01) -> list:
"""Convert detection results to a list of numpy arrays.
This identifies single-label classification (as opposed to multi-label)
through the thr parameter which is set to a negative value.
ToDo: The ideal way would be for this to be automatically set when the
Currently, the way to set this is to set ``test_cfg.rcnn.action_thr=-1.0``
model cfg uses multilabel=False, however this could be a breaking change
and is left as a future exercise.
NB - this should not interfere with the evaluation in any case.
Args:
bboxes (torch.Tensor): shape ``(n, 4)``.
labels (torch.Tensor): shape ``(n, num_classes)``.
num_classes (int): class number, including background class.
thr (float): The score threshold used when converting predictions to
detection results. If a single negative value, uses single-label
classification.
Returns:
List(ndarray): bbox results of each class.
"""
if bboxes.shape[0] == 0:
return list(np.zeros((num_classes - 1, 0, 5), dtype=np.float32))
bboxes = bboxes.cpu().numpy()
scores = labels.cpu().numpy() # rename for clarification
# Although we can handle single-label classification, we still want scores
assert scores.shape[-1] > 1
# Robustly check for multi/single-label:
if not hasattr(thr, '__len__'):
multilabel = thr >= 0
thr = (thr, ) * num_classes
else:
multilabel = True
# Check Shape
assert scores.shape[1] == num_classes
assert len(thr) == num_classes
result = []
for i in range(num_classes - 1):
if multilabel:
where = (scores[:, i + 1] > thr[i + 1])
else:
where = (scores[:, 1:].argmax(axis=1) == i)
result.append(
np.concatenate((bboxes[where, :4], scores[where, i + 1:i + 2]),
axis=1))
return result | Convert detection results to a list of numpy arrays.
This identifies single-label classification (as opposed to multi-label)
through the thr parameter which is set to a negative value.
ToDo: The ideal way would be for this to be automatically set when the
Currently, the way to set this is to set ``test_cfg.rcnn.action_thr=-1.0``
model cfg uses multilabel=False, however this could be a breaking change
and is left as a future exercise.
NB - this should not interfere with the evaluation in any case.
Args:
bboxes (torch.Tensor): shape ``(n, 4)``.
labels (torch.Tensor): shape ``(n, num_classes)``.
num_classes (int): class number, including background class.
thr (float): The score threshold used when converting predictions to
detection results. If a single negative value, uses single-label
classification.
Returns:
List(ndarray): bbox results of each class.
| bbox2result | python | open-mmlab/mmaction2 | mmaction/structures/bbox/transforms.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/structures/bbox/transforms.py | Apache-2.0 |
def check_norm_state(modules, train_state):
"""Check if norm layer is in correct train state."""
for mod in modules:
if isinstance(mod, _BatchNorm):
if mod.training != train_state:
return False
return True | Check if norm layer is in correct train state. | check_norm_state | python | open-mmlab/mmaction2 | mmaction/testing/_utils.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/testing/_utils.py | Apache-2.0 |
def generate_backbone_demo_inputs(input_shape=(1, 3, 64, 64)):
"""Create a superset of inputs needed to run backbone.
Args:
input_shape (tuple): input batch dimensions.
Defaults to ``(1, 3, 64, 64)``.
"""
imgs = np.random.random(input_shape)
imgs = torch.FloatTensor(imgs)
return imgs | Create a superset of inputs needed to run backbone.
Args:
input_shape (tuple): input batch dimensions.
Defaults to ``(1, 3, 64, 64)``.
| generate_backbone_demo_inputs | python | open-mmlab/mmaction2 | mmaction/testing/_utils.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/testing/_utils.py | Apache-2.0 |
def generate_recognizer_demo_inputs(
input_shape=(1, 3, 3, 224, 224), model_type='2D'):
"""Create a superset of inputs needed to run test or train batches.
Args:
input_shape (tuple): input batch dimensions.
Default: (1, 250, 3, 224, 224).
model_type (str): Model type for data generation, from {'2D', '3D'}.
Default:'2D'
"""
if len(input_shape) == 5:
(N, L, _, _, _) = input_shape
elif len(input_shape) == 6:
(N, M, _, L, _, _) = input_shape
imgs = np.random.random(input_shape)
if model_type == '2D' or model_type == 'skeleton':
gt_labels = torch.LongTensor([2] * N)
elif model_type == '3D':
gt_labels = torch.LongTensor([2] * M)
elif model_type == 'audio':
gt_labels = torch.LongTensor([2] * L)
else:
raise ValueError(f'Data type {model_type} is not available')
inputs = {'imgs': torch.FloatTensor(imgs), 'gt_labels': gt_labels}
return inputs | Create a superset of inputs needed to run test or train batches.
Args:
input_shape (tuple): input batch dimensions.
Default: (1, 250, 3, 224, 224).
model_type (str): Model type for data generation, from {'2D', '3D'}.
Default:'2D'
| generate_recognizer_demo_inputs | python | open-mmlab/mmaction2 | mmaction/testing/_utils.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/testing/_utils.py | Apache-2.0 |
def get_cfg(config_type, fname):
"""Grab configs necessary to create a recognizer.
These are deep copied to allow for safe modification of parameters without
influencing other tests.
"""
config_types = ('recognition', 'recognition_audio', 'localization',
'detection', 'skeleton', 'retrieval')
assert config_type in config_types
repo_dpath = osp.dirname(osp.dirname(osp.dirname(__file__)))
config_dpath = osp.join(repo_dpath, 'configs/' + config_type)
config_fpath = osp.join(config_dpath, fname)
if not osp.exists(config_dpath):
raise Exception('Cannot find config path')
config = mmengine.Config.fromfile(config_fpath)
return config | Grab configs necessary to create a recognizer.
These are deep copied to allow for safe modification of parameters without
influencing other tests.
| get_cfg | python | open-mmlab/mmaction2 | mmaction/testing/_utils.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/testing/_utils.py | Apache-2.0 |
def collect_env():
"""Collect the information of the running environments."""
env_info = collect_basic_env()
env_info['MMAction2'] = (
mmaction.__version__ + '+' + get_git_hash(digits=7))
env_info['MMCV'] = (mmcv.__version__)
try:
import mmdet
env_info['MMDetection'] = (mmdet.__version__)
except ImportError:
pass
try:
import mmpose
env_info['MMPose'] = (mmpose.__version__)
except ImportError:
pass
return env_info | Collect the information of the running environments. | collect_env | python | open-mmlab/mmaction2 | mmaction/utils/collect_env.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/utils/collect_env.py | Apache-2.0 |
def require(dep, install=None):
"""A wrapper of function for extra package requirements.
Args:
dep (str): The dependency package name, like ``transformers``
or ``transformers>=4.28.0``.
install (str, optional): The installation command hint. Defaults
to None, which means to use "pip install dep".
"""
def wrapper(fn):
assert isfunction(fn)
@wraps(fn)
def ask_install(*args, **kwargs):
name = fn.__qualname__.replace('.__init__', '')
ins = install or f'pip install "{dep}"'
raise ImportError(
f'{name} requires {dep}, please install it by `{ins}`.')
if satisfy_requirement(dep):
fn._verify_require = getattr(fn, '_verify_require', lambda: None)
return fn
ask_install._verify_require = ask_install
return ask_install
return wrapper | A wrapper of function for extra package requirements.
Args:
dep (str): The dependency package name, like ``transformers``
or ``transformers>=4.28.0``.
install (str, optional): The installation command hint. Defaults
to None, which means to use "pip install dep".
| require | python | open-mmlab/mmaction2 | mmaction/utils/dependency.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/utils/dependency.py | Apache-2.0 |
def _register_hooks(self, layer_name: str) -> None:
"""Register forward and backward hook to a layer, given layer_name, to
obtain gradients and activations.
Args:
layer_name (str): name of the layer.
"""
def get_gradients(module, grad_input, grad_output):
self.target_gradients = grad_output[0].detach()
def get_activations(module, input, output):
self.target_activations = output.clone().detach()
layer_ls = layer_name.split('/')
prev_module = self.model
for layer in layer_ls:
prev_module = prev_module._modules[layer]
target_layer = prev_module
target_layer.register_forward_hook(get_activations)
target_layer.register_backward_hook(get_gradients) | Register forward and backward hook to a layer, given layer_name, to
obtain gradients and activations.
Args:
layer_name (str): name of the layer.
| _register_hooks | python | open-mmlab/mmaction2 | mmaction/utils/gradcam_utils.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/utils/gradcam_utils.py | Apache-2.0 |
def _calculate_localization_map(self,
data: dict,
use_labels: bool,
delta=1e-20) -> tuple:
"""Calculate localization map for all inputs with Grad-CAM.
Args:
data (dict): model inputs, generated by test pipeline,
use_labels (bool): Whether to use given labels to generate
localization map.
delta (float): used in localization map normalization,
must be small enough. Please make sure
`localization_map_max - localization_map_min >> delta`
Returns:
localization_map (torch.Tensor): the localization map for
input imgs.
preds (torch.Tensor): Model predictions with shape
(batch_size, num_classes).
"""
inputs = data['inputs']
# use score before softmax
self.model.cls_head.average_clips = 'score'
# model forward & backward
results = self.model.test_step(data)
preds = [result.pred_score for result in results]
preds = torch.stack(preds)
if use_labels:
labels = [result.gt_label for result in results]
labels = torch.stack(labels)
score = torch.gather(preds, dim=1, index=labels)
else:
score = torch.max(preds, dim=-1)[0]
self.model.zero_grad()
score = torch.sum(score)
score.backward()
imgs = torch.stack(inputs)
if self.is_recognizer2d:
# [batch_size, num_segments, 3, H, W]
b, t, _, h, w = imgs.size()
else:
# [batch_size, num_crops*num_clips, 3, clip_len, H, W]
b1, b2, _, t, h, w = imgs.size()
b = b1 * b2
gradients = self.target_gradients
activations = self.target_activations
if self.is_recognizer2d:
# [B*Tg, C', H', W']
b_tg, c, _, _ = gradients.size()
tg = b_tg // b
else:
# source shape: [B, C', Tg, H', W']
_, c, tg, _, _ = gradients.size()
# target shape: [B, Tg, C', H', W']
gradients = gradients.permute(0, 2, 1, 3, 4)
activations = activations.permute(0, 2, 1, 3, 4)
# calculate & resize to [B, 1, T, H, W]
weights = torch.mean(gradients.view(b, tg, c, -1), dim=3)
weights = weights.view(b, tg, c, 1, 1)
activations = activations.view([b, tg, c] +
list(activations.size()[-2:]))
localization_map = torch.sum(
weights * activations, dim=2, keepdim=True)
localization_map = F.relu(localization_map)
localization_map = localization_map.permute(0, 2, 1, 3, 4)
localization_map = F.interpolate(
localization_map,
size=(t, h, w),
mode='trilinear',
align_corners=False)
# Normalize the localization map.
localization_map_min, localization_map_max = (
torch.min(localization_map.view(b, -1), dim=-1, keepdim=True)[0],
torch.max(localization_map.view(b, -1), dim=-1, keepdim=True)[0])
localization_map_min = torch.reshape(
localization_map_min, shape=(b, 1, 1, 1, 1))
localization_map_max = torch.reshape(
localization_map_max, shape=(b, 1, 1, 1, 1))
localization_map = (localization_map - localization_map_min) / (
localization_map_max - localization_map_min + delta)
localization_map = localization_map.data
return localization_map.squeeze(dim=1), preds | Calculate localization map for all inputs with Grad-CAM.
Args:
data (dict): model inputs, generated by test pipeline,
use_labels (bool): Whether to use given labels to generate
localization map.
delta (float): used in localization map normalization,
must be small enough. Please make sure
`localization_map_max - localization_map_min >> delta`
Returns:
localization_map (torch.Tensor): the localization map for
input imgs.
preds (torch.Tensor): Model predictions with shape
(batch_size, num_classes).
| _calculate_localization_map | python | open-mmlab/mmaction2 | mmaction/utils/gradcam_utils.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/utils/gradcam_utils.py | Apache-2.0 |
def _alpha_blending(self, localization_map: torch.Tensor,
input_imgs: torch.Tensor,
alpha: float) -> torch.Tensor:
"""Blend heatmaps and model input images and get visulization results.
Args:
localization_map (torch.Tensor): localization map for all inputs,
generated with Grad-CAM.
input_imgs (torch.Tensor): model inputs, raw images.
alpha (float): transparency level of the heatmap,
in the range [0, 1].
Returns:
torch.Tensor: blending results for localization map and input
images, with shape [B, T, H, W, 3] and pixel values in
RGB order within range [0, 1].
"""
# localization_map shape [B, T, H, W]
localization_map = localization_map.cpu()
# heatmap shape [B, T, H, W, 3] in RGB order
heatmap = self.colormap(localization_map.detach().numpy())
heatmap = heatmap[..., :3]
heatmap = torch.from_numpy(heatmap)
input_imgs = torch.stack(input_imgs)
# Permute input imgs to [B, T, H, W, 3], like heatmap
if self.is_recognizer2d:
# Recognizer2D input (B, T, C, H, W)
curr_inp = input_imgs.permute(0, 1, 3, 4, 2)
else:
# Recognizer3D input (B', num_clips*num_crops, C, T, H, W)
# B = B' * num_clips * num_crops
curr_inp = input_imgs.view([-1] + list(input_imgs.size()[2:]))
curr_inp = curr_inp.permute(0, 2, 3, 4, 1)
# renormalize input imgs to [0, 1]
curr_inp = curr_inp.cpu().float()
curr_inp /= 255.
# alpha blending
blended_imgs = alpha * heatmap + (1 - alpha) * curr_inp
return blended_imgs | Blend heatmaps and model input images and get visulization results.
Args:
localization_map (torch.Tensor): localization map for all inputs,
generated with Grad-CAM.
input_imgs (torch.Tensor): model inputs, raw images.
alpha (float): transparency level of the heatmap,
in the range [0, 1].
Returns:
torch.Tensor: blending results for localization map and input
images, with shape [B, T, H, W, 3] and pixel values in
RGB order within range [0, 1].
| _alpha_blending | python | open-mmlab/mmaction2 | mmaction/utils/gradcam_utils.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/utils/gradcam_utils.py | Apache-2.0 |
def __call__(self,
data: dict,
use_labels: bool = False,
alpha: float = 0.5) -> tuple:
"""Visualize the localization maps on their corresponding inputs as
heatmap, using Grad-CAM.
Generate visualization results for **ALL CROPS**.
For example, for I3D model, if `clip_len=32, num_clips=10` and
use `ThreeCrop` in test pipeline, then for every model inputs,
there are 960(32*10*3) images generated.
Args:
data (dict): model inputs, generated by test pipeline.
use_labels (bool): Whether to use given labels to generate
localization map.
alpha (float): transparency level of the heatmap,
in the range [0, 1].
Returns:
blended_imgs (torch.Tensor): Visualization results, blended by
localization maps and model inputs.
preds (torch.Tensor): Model predictions for inputs.
"""
# localization_map shape [B, T, H, W]
# preds shape [batch_size, num_classes]
localization_map, preds = self._calculate_localization_map(
data, use_labels=use_labels)
# blended_imgs shape [B, T, H, W, 3]
blended_imgs = self._alpha_blending(localization_map, data['inputs'],
alpha)
# blended_imgs shape [B, T, H, W, 3]
# preds shape [batch_size, num_classes]
# Recognizer2D: B = batch_size, T = num_segments
# Recognizer3D: B = batch_size * num_crops * num_clips, T = clip_len
return blended_imgs, preds | Visualize the localization maps on their corresponding inputs as
heatmap, using Grad-CAM.
Generate visualization results for **ALL CROPS**.
For example, for I3D model, if `clip_len=32, num_clips=10` and
use `ThreeCrop` in test pipeline, then for every model inputs,
there are 960(32*10*3) images generated.
Args:
data (dict): model inputs, generated by test pipeline.
use_labels (bool): Whether to use given labels to generate
localization map.
alpha (float): transparency level of the heatmap,
in the range [0, 1].
Returns:
blended_imgs (torch.Tensor): Visualization results, blended by
localization maps and model inputs.
preds (torch.Tensor): Model predictions for inputs.
| __call__ | python | open-mmlab/mmaction2 | mmaction/utils/gradcam_utils.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/utils/gradcam_utils.py | Apache-2.0 |
def get_random_string(length: int = 15) -> str:
"""Get random string with letters and digits.
Args:
length (int): Length of random string. Defaults to 15.
"""
return ''.join(
random.choice(string.ascii_letters + string.digits)
for _ in range(length)) | Get random string with letters and digits.
Args:
length (int): Length of random string. Defaults to 15.
| get_random_string | python | open-mmlab/mmaction2 | mmaction/utils/misc.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/utils/misc.py | Apache-2.0 |
def frame_extract(video_path: str,
short_side: Optional[int] = None,
out_dir: str = './tmp'):
"""Extract frames given video_path.
Args:
video_path (str): The video path.
short_side (int): Target short-side of the output image.
Defaults to None, means keeping original shape.
out_dir (str): The output directory. Defaults to ``'./tmp'``.
"""
# Load the video, extract frames into OUT_DIR/video_name
target_dir = osp.join(out_dir, osp.basename(osp.splitext(video_path)[0]))
os.makedirs(target_dir, exist_ok=True)
# Should be able to handle videos up to several hours
frame_tmpl = osp.join(target_dir, 'img_{:06d}.jpg')
assert osp.exists(video_path), f'file not exit {video_path}'
vid = cv2.VideoCapture(video_path)
frames = []
frame_paths = []
flag, frame = vid.read()
cnt = 0
new_h, new_w = None, None
while flag:
if short_side is not None:
if new_h is None:
h, w, _ = frame.shape
new_w, new_h = mmcv.rescale_size((w, h), (short_side, np.Inf))
frame = mmcv.imresize(frame, (new_w, new_h))
frames.append(frame)
frame_path = frame_tmpl.format(cnt + 1)
frame_paths.append(frame_path)
cv2.imwrite(frame_path, frame)
cnt += 1
flag, frame = vid.read()
return frame_paths, frames | Extract frames given video_path.
Args:
video_path (str): The video path.
short_side (int): Target short-side of the output image.
Defaults to None, means keeping original shape.
out_dir (str): The output directory. Defaults to ``'./tmp'``.
| frame_extract | python | open-mmlab/mmaction2 | mmaction/utils/misc.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/utils/misc.py | Apache-2.0 |
def get_str_type(module: Union[str, ModuleType, FunctionType]) -> str:
"""Return the string type name of module.
Args:
module (str | ModuleType | FunctionType):
The target module class
Returns:
Class name of the module
"""
if isinstance(module, str):
str_type = module
elif inspect.isclass(module) or inspect.isfunction(module):
str_type = module.__name__
else:
return None
return str_type | Return the string type name of module.
Args:
module (str | ModuleType | FunctionType):
The target module class
Returns:
Class name of the module
| get_str_type | python | open-mmlab/mmaction2 | mmaction/utils/misc.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/utils/misc.py | Apache-2.0 |
def register_all_modules(init_default_scope: bool = True) -> None:
"""Register all modules in mmaction into the registries.
Args:
init_default_scope (bool): Whether initialize the mmaction default
scope. If True, the global default scope will be set to `mmaction`,
and all registries will build modules from mmaction's registry
node. To understand more about the registry, please refer to
https://github.com/open-mmlab/mmengine/blob/main/docs/en/tutorials/registry.md
Defaults to True.
"""
import mmaction.datasets # noqa: F401,F403
import mmaction.engine # noqa: F401,F403
import mmaction.evaluation # noqa: F401,F403
import mmaction.models # noqa: F401,F403
import mmaction.structures # noqa: F401,F403
import mmaction.visualization # noqa: F401,F403
if init_default_scope:
never_created = DefaultScope.get_current_instance() is None \
or not DefaultScope.check_instance_created('mmaction')
if never_created:
DefaultScope.get_instance('mmaction', scope_name='mmaction')
return
current_scope = DefaultScope.get_current_instance()
if current_scope.scope_name != 'mmaction':
warnings.warn('The current default scope '
f'"{current_scope.scope_name}" is not "mmaction", '
'`register_all_modules` will force set the current'
'default scope to "mmaction". If this is not as '
'expected, please set `init_default_scope=False`.')
# avoid name conflict
new_instance_name = f'mmaction-{datetime.datetime.now()}'
DefaultScope.get_instance(new_instance_name, scope_name='mmaction') | Register all modules in mmaction into the registries.
Args:
init_default_scope (bool): Whether initialize the mmaction default
scope. If True, the global default scope will be set to `mmaction`,
and all registries will build modules from mmaction's registry
node. To understand more about the registry, please refer to
https://github.com/open-mmlab/mmengine/blob/main/docs/en/tutorials/registry.md
Defaults to True.
| register_all_modules | python | open-mmlab/mmaction2 | mmaction/utils/setup_env.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/utils/setup_env.py | Apache-2.0 |
def _load_video(self,
video: Union[np.ndarray, Sequence[np.ndarray], str],
target_resolution: Optional[Tuple[int]] = None):
"""Load video from multiple source and convert to target resolution.
Args:
video (np.ndarray, str): The video to draw.
target_resolution (Tuple[int], optional): Set to
(desired_width desired_height) to have resized frames. If
either dimension is None, the frames are resized by keeping
the existing aspect ratio. Defaults to None.
"""
if isinstance(video, np.ndarray) or isinstance(video, list):
frames = video
elif isinstance(video, str):
# video file path
if isfile(video):
try:
import decord
except ImportError:
raise ImportError(
'Please install decord to load video file.')
video = decord.VideoReader(video)
frames = [x.asnumpy()[..., ::-1] for x in video]
# rawframes folder path
elif isdir(video):
frame_list = sorted(list_dir_or_file(video, list_dir=False))
frames = [mmcv.imread(join_path(video, x)) for x in frame_list]
else:
raise TypeError(f'type of video {type(video)} not supported')
if target_resolution is not None:
w, h = target_resolution
frame_h, frame_w, _ = frames[0].shape
if w == -1:
w = int(h / frame_h * frame_w)
if h == -1:
h = int(w / frame_w * frame_h)
frames = [mmcv.imresize(f, (w, h)) for f in frames]
return frames | Load video from multiple source and convert to target resolution.
Args:
video (np.ndarray, str): The video to draw.
target_resolution (Tuple[int], optional): Set to
(desired_width desired_height) to have resized frames. If
either dimension is None, the frames are resized by keeping
the existing aspect ratio. Defaults to None.
| _load_video | python | open-mmlab/mmaction2 | mmaction/visualization/action_visualizer.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/visualization/action_visualizer.py | Apache-2.0 |
def add_datasample(self,
name: str,
video: Union[np.ndarray, Sequence[np.ndarray], str],
data_sample: Optional[ActionDataSample] = None,
draw_gt: bool = True,
draw_pred: bool = True,
draw_score: bool = True,
rescale_factor: Optional[float] = None,
show_frames: bool = False,
text_cfg: dict = dict(),
wait_time: float = 0.1,
out_path: Optional[str] = None,
out_type: str = 'img',
target_resolution: Optional[Tuple[int]] = None,
step: int = 0,
fps: int = 4) -> None:
"""Draw datasample and save to all backends.
- If ``out_path`` is specified, all storage backends are ignored
and save the videos to the ``out_path``.
- If ``show_frames`` is True, plot the frames in a window sequentially,
please confirm you are able to access the graphical interface.
Args:
name (str): The frame identifier.
video (np.ndarray, str): The video to draw. supports decoded
np.ndarray, video file path, rawframes folder path.
data_sample (:obj:`ActionDataSample`, optional): The annotation of
the frame. Defaults to None.
draw_gt (bool): Whether to draw ground truth labels.
Defaults to True.
draw_pred (bool): Whether to draw prediction labels.
Defaults to True.
draw_score (bool): Whether to draw the prediction scores
of prediction categories. Defaults to True.
rescale_factor (float, optional): Rescale the frame by the rescale
factor before visualization. Defaults to None.
show_frames (bool): Whether to display the frames of the video.
Defaults to False.
text_cfg (dict): Extra text setting, which accepts
arguments of :attr:`mmengine.Visualizer.draw_texts`.
Defaults to an empty dict.
wait_time (float): Delay in seconds. 0 is the special
value that means "forever". Defaults to 0.1.
out_path (str, optional): Extra folder to save the visualization
result. If specified, the visualizer will only save the result
frame to the out_path and ignore its storage backends.
Defaults to None.
out_type (str): Output format type, choose from 'img', 'gif',
'video'. Defaults to ``'img'``.
target_resolution (Tuple[int], optional): Set to
(desired_width desired_height) to have resized frames. If
either dimension is None, the frames are resized by keeping
the existing aspect ratio. Defaults to None.
step (int): Global step value to record. Defaults to 0.
fps (int): Frames per second for saving video. Defaults to 4.
"""
classes = None
video = self._load_video(video, target_resolution)
tol_video = len(video)
if self.dataset_meta is not None:
classes = self.dataset_meta.get('classes', None)
if data_sample is None:
data_sample = ActionDataSample()
resulted_video = []
for frame_idx, frame in enumerate(video):
frame_name = 'frame %d of %s' % (frame_idx + 1, name)
if rescale_factor is not None:
frame = mmcv.imrescale(frame, rescale_factor)
texts = ['Frame %d of total %d frames' % (frame_idx, tol_video)]
self.set_image(frame)
if draw_gt and 'gt_labels' in data_sample:
gt_labels = data_sample.gt_label
idx = gt_labels.tolist()
class_labels = [''] * len(idx)
if classes is not None:
class_labels = [f' ({classes[i]})' for i in idx]
labels = [
str(idx[i]) + class_labels[i] for i in range(len(idx))
]
prefix = 'Ground truth: '
texts.append(prefix + ('\n' + ' ' * len(prefix)).join(labels))
if draw_pred and 'pred_labels' in data_sample:
pred_labels = data_sample.pred_labels
idx = pred_labels.item.tolist()
score_labels = [''] * len(idx)
class_labels = [''] * len(idx)
if draw_score and 'score' in pred_labels:
score_labels = [
f', {pred_labels.score[i].item():.2f}' for i in idx
]
if classes is not None:
class_labels = [f' ({classes[i]})' for i in idx]
labels = [
str(idx[i]) + score_labels[i] + class_labels[i]
for i in range(len(idx))
]
prefix = 'Prediction: '
texts.append(prefix + ('\n' + ' ' * len(prefix)).join(labels))
img_scale = _get_adaptive_scale(frame.shape[:2])
_text_cfg = {
'positions':
np.array([(img_scale * 5, ) * 2]).astype(np.int32),
'font_sizes': int(img_scale * 7),
'font_families': 'monospace',
'colors': 'white',
'bboxes': dict(facecolor='black', alpha=0.5, boxstyle='Round'),
}
_text_cfg.update(text_cfg)
self.draw_texts('\n'.join(texts), **_text_cfg)
drawn_img = self.get_image()
resulted_video.append(drawn_img)
if show_frames:
frame_wait_time = 1. / fps
for frame_idx, drawn_img in enumerate(resulted_video):
frame_name = 'frame %d of %s' % (frame_idx + 1, name)
if frame_idx < len(resulted_video) - 1:
wait_time = frame_wait_time
else:
wait_time = wait_time
self.show(
drawn_img[:, :, ::-1],
win_name=frame_name,
wait_time=wait_time)
resulted_video = np.array(resulted_video)
if out_path is not None:
save_dir, save_name = osp.split(out_path)
vis_backend_cfg = dict(type='LocalVisBackend', save_dir=save_dir)
tmp_local_vis_backend = VISBACKENDS.build(vis_backend_cfg)
tmp_local_vis_backend.add_video(
save_name,
resulted_video,
step=step,
fps=fps,
out_type=out_type)
else:
self.add_video(
name, resulted_video, step=step, fps=fps, out_type=out_type)
return resulted_video | Draw datasample and save to all backends.
- If ``out_path`` is specified, all storage backends are ignored
and save the videos to the ``out_path``.
- If ``show_frames`` is True, plot the frames in a window sequentially,
please confirm you are able to access the graphical interface.
Args:
name (str): The frame identifier.
video (np.ndarray, str): The video to draw. supports decoded
np.ndarray, video file path, rawframes folder path.
data_sample (:obj:`ActionDataSample`, optional): The annotation of
the frame. Defaults to None.
draw_gt (bool): Whether to draw ground truth labels.
Defaults to True.
draw_pred (bool): Whether to draw prediction labels.
Defaults to True.
draw_score (bool): Whether to draw the prediction scores
of prediction categories. Defaults to True.
rescale_factor (float, optional): Rescale the frame by the rescale
factor before visualization. Defaults to None.
show_frames (bool): Whether to display the frames of the video.
Defaults to False.
text_cfg (dict): Extra text setting, which accepts
arguments of :attr:`mmengine.Visualizer.draw_texts`.
Defaults to an empty dict.
wait_time (float): Delay in seconds. 0 is the special
value that means "forever". Defaults to 0.1.
out_path (str, optional): Extra folder to save the visualization
result. If specified, the visualizer will only save the result
frame to the out_path and ignore its storage backends.
Defaults to None.
out_type (str): Output format type, choose from 'img', 'gif',
'video'. Defaults to ``'img'``.
target_resolution (Tuple[int], optional): Set to
(desired_width desired_height) to have resized frames. If
either dimension is None, the frames are resized by keeping
the existing aspect ratio. Defaults to None.
step (int): Global step value to record. Defaults to 0.
fps (int): Frames per second for saving video. Defaults to 4.
| add_datasample | python | open-mmlab/mmaction2 | mmaction/visualization/action_visualizer.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/visualization/action_visualizer.py | Apache-2.0 |
def add_video(
self,
name: str,
image: np.ndarray,
step: int = 0,
fps: int = 4,
out_type: str = 'img',
) -> None:
"""Record the image.
Args:
name (str): The image identifier.
image (np.ndarray, optional): The image to be saved. The format
should be RGB. Default to None.
step (int): Global step value to record. Default to 0.
fps (int): Frames per second for saving video. Defaults to 4.
out_type (str): Output format type, choose from 'img', 'gif',
'video'. Defaults to ``'img'``.
"""
for vis_backend in self._vis_backends.values():
vis_backend.add_video(
name, image, step=step, fps=fps,
out_type=out_type) # type: ignore | Record the image.
Args:
name (str): The image identifier.
image (np.ndarray, optional): The image to be saved. The format
should be RGB. Default to None.
step (int): Global step value to record. Default to 0.
fps (int): Frames per second for saving video. Defaults to 4.
out_type (str): Output format type, choose from 'img', 'gif',
'video'. Defaults to ``'img'``.
| add_video | python | open-mmlab/mmaction2 | mmaction/visualization/action_visualizer.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/visualization/action_visualizer.py | Apache-2.0 |
def add_video(self,
name: str,
frames: np.ndarray,
step: int = 0,
fps: Optional[int] = 4,
out_type: Optional[int] = 'img',
**kwargs) -> None:
"""Record the frames of a video to disk.
Args:
name (str): The video identifier (frame folder).
frames (np.ndarray): The frames to be saved. The format
should be RGB. The shape should be (T, H, W, C).
step (int): Global step value to record. Defaults to 0.
out_type (str): Output format type, choose from 'img', 'gif',
'video'. Defaults to ``'img'``.
fps (int): Frames per second for saving video. Defaults to 4.
"""
assert frames.dtype == np.uint8
if out_type == 'img':
frames_dir = osp.join(self._save_dir, name, f'frames_{step}')
os.makedirs(frames_dir, exist_ok=True)
for idx, frame in enumerate(frames):
drawn_image = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
save_file_name = f'{idx}.png'
cv2.imwrite(osp.join(frames_dir, save_file_name), drawn_image)
else:
try:
from moviepy.editor import ImageSequenceClip
except ImportError:
raise ImportError('Please install moviepy to enable '
'output file.')
frames = [x[..., ::-1] for x in frames]
video_clips = ImageSequenceClip(frames, fps=fps)
name = osp.splitext(name)[0]
if out_type == 'gif':
out_path = osp.join(self._save_dir, name + '.gif')
video_clips.write_gif(out_path, logger=None)
elif out_type == 'video':
out_path = osp.join(self._save_dir, name + '.mp4')
video_clips.write_videofile(
out_path, remove_temp=True, logger=None) | Record the frames of a video to disk.
Args:
name (str): The video identifier (frame folder).
frames (np.ndarray): The frames to be saved. The format
should be RGB. The shape should be (T, H, W, C).
step (int): Global step value to record. Defaults to 0.
out_type (str): Output format type, choose from 'img', 'gif',
'video'. Defaults to ``'img'``.
fps (int): Frames per second for saving video. Defaults to 4.
| add_video | python | open-mmlab/mmaction2 | mmaction/visualization/video_backend.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/visualization/video_backend.py | Apache-2.0 |
def add_video(self,
name: str,
frames: np.ndarray,
fps: int = 4,
**kwargs) -> None:
"""Record the frames of a video to wandb.
Note that this requires the ``moviepy`` package.
Args:
name (str): The video identifier (frame folder).
frames (np.ndarray): The frames to be saved. The format
should be RGB. The shape should be (T, H, W, C).
step is a useless parameter that Wandb does not need.
fps (int): Frames per second. Defaults to 4.
"""
frames = frames.transpose(0, 3, 1, 2)
self._wandb.log({'video': wandb.Video(frames, fps=fps, format='gif')}) | Record the frames of a video to wandb.
Note that this requires the ``moviepy`` package.
Args:
name (str): The video identifier (frame folder).
frames (np.ndarray): The frames to be saved. The format
should be RGB. The shape should be (T, H, W, C).
step is a useless parameter that Wandb does not need.
fps (int): Frames per second. Defaults to 4.
| add_video | python | open-mmlab/mmaction2 | mmaction/visualization/video_backend.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/visualization/video_backend.py | Apache-2.0 |
def add_video(self,
name: str,
frames: np.ndarray,
step: int = 0,
fps: int = 4,
**kwargs) -> None:
"""Record the frames of a video to tensorboard.
Note that this requires the ``moviepy`` package.
Args:
name (str): The video identifier (frame folder).
frames (np.ndarray): The frames to be saved. The format
should be RGB. The shape should be (T, H, W, C).
step (int): Global step value to record. Defaults to 0.
fps (int): Frames per second. Defaults to 4.
"""
frames = frames.transpose(0, 3, 1, 2)
frames = frames.reshape(1, *frames.shape)
self._tensorboard.add_video(name, frames, global_step=step, fps=fps) | Record the frames of a video to tensorboard.
Note that this requires the ``moviepy`` package.
Args:
name (str): The video identifier (frame folder).
frames (np.ndarray): The frames to be saved. The format
should be RGB. The shape should be (T, H, W, C).
step (int): Global step value to record. Defaults to 0.
fps (int): Frames per second. Defaults to 4.
| add_video | python | open-mmlab/mmaction2 | mmaction/visualization/video_backend.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/visualization/video_backend.py | Apache-2.0 |
def check_crop(origin_imgs, result_imgs, result_bbox, num_crops=1):
"""Check if the result_bbox is in correspond to result_imgs."""
def check_single_crop(origin_imgs, result_imgs, result_bbox):
result_img_shape = result_imgs[0].shape[:2]
crop_w = result_bbox[2] - result_bbox[0]
crop_h = result_bbox[3] - result_bbox[1]
crop_shape = (crop_h, crop_w)
if not crop_shape == result_img_shape:
return False
left, top, right, bottom = result_bbox
return np.array_equal(
np.array(origin_imgs)[:, top:bottom, left:right, :],
np.array(result_imgs))
if result_bbox.ndim == 1:
return check_single_crop(origin_imgs, result_imgs, result_bbox)
if result_bbox.ndim == 2:
num_batch = len(origin_imgs)
for i, bbox in enumerate(result_bbox):
if num_crops == 10:
if (i // num_batch) % 2 == 0:
flag = check_single_crop([origin_imgs[i % num_batch]],
[result_imgs[i]], bbox)
else:
flag = check_single_crop([origin_imgs[i % num_batch]],
[np.flip(result_imgs[i], axis=1)],
bbox)
else:
flag = check_single_crop([origin_imgs[i % num_batch]],
[result_imgs[i]], bbox)
if not flag:
return False
return True
else:
# bbox has a wrong dimension
return False | Check if the result_bbox is in correspond to result_imgs. | check_crop | python | open-mmlab/mmaction2 | tests/datasets/transforms/test_processing.py | https://github.com/open-mmlab/mmaction2/blob/master/tests/datasets/transforms/test_processing.py | Apache-2.0 |
def check_flip(origin_imgs, result_imgs, flip_type):
"""Check if the origin_imgs are flipped correctly into result_imgs in
different flip_types."""
n, _, _, _ = np.shape(origin_imgs)
if flip_type == 'horizontal':
for i in range(n):
if np.any(result_imgs[i] != np.fliplr(origin_imgs[i])):
return False
else:
# yapf: disable
for i in range(n):
if np.any(result_imgs[i] != np.transpose(np.fliplr(np.transpose(origin_imgs[i], (1, 0, 2))), (1, 0, 2))): # noqa:E501
return False
# yapf: enable
return True | Check if the origin_imgs are flipped correctly into result_imgs in
different flip_types. | check_flip | python | open-mmlab/mmaction2 | tests/datasets/transforms/test_processing.py | https://github.com/open-mmlab/mmaction2/blob/master/tests/datasets/transforms/test_processing.py | Apache-2.0 |
def check_flip(origin_imgs, result_imgs, flip_type):
"""Check if the origin_imgs are flipped correctly into result_imgs in
different flip_types."""
n, _, _, _ = np.shape(origin_imgs)
if flip_type == 'horizontal':
for i in range(n):
if np.any(result_imgs[i] != np.fliplr(origin_imgs[i])):
return False
else:
# yapf: disable
for i in range(n):
if np.any(result_imgs[i] != np.transpose(np.fliplr(np.transpose(origin_imgs[i], (1, 0, 2))), (1, 0, 2))): # noqa:E501
return False
# yapf: enable
return True | Check if the origin_imgs are flipped correctly into result_imgs in
different flip_types. | check_flip | python | open-mmlab/mmaction2 | tests/datasets/transforms/test_wrappers.py | https://github.com/open-mmlab/mmaction2/blob/master/tests/datasets/transforms/test_wrappers.py | Apache-2.0 |
def test_evaluate(self):
"""Test using the metric in the same way as Evalutor."""
pred = [
ActionDataSample().set_pred_score(i).set_pred_label(
j).set_gt_label(k).to_dict() for i, j, k in zip([
torch.tensor([0.7, 0.0, 0.3]),
torch.tensor([0.5, 0.2, 0.3]),
torch.tensor([0.4, 0.5, 0.1]),
torch.tensor([0.0, 0.0, 1.0]),
torch.tensor([0.0, 0.0, 1.0]),
torch.tensor([0.0, 0.0, 1.0]),
], [0, 0, 1, 2, 2, 2], [0, 0, 1, 2, 1, 0])
]
# Test with score (use score instead of label if score exists)
metric = METRICS.build(dict(type='ConfusionMatrix'))
metric.process(None, pred)
res = metric.evaluate(6)
self.assertIsInstance(res, dict)
self.assertTensorEqual(
res['confusion_matrix/result'],
torch.tensor([
[2, 0, 1],
[0, 1, 1],
[0, 0, 1],
]))
# Test with label
for sample in pred:
del sample['pred_score']
metric = METRICS.build(dict(type='ConfusionMatrix'))
metric.process(None, pred)
with self.assertRaisesRegex(AssertionError,
'Please specify the `num_classes`'):
metric.evaluate(6)
metric = METRICS.build(dict(type='ConfusionMatrix', num_classes=3))
metric.process(None, pred)
self.assertIsInstance(res, dict)
self.assertTensorEqual(
res['confusion_matrix/result'],
torch.tensor([
[2, 0, 1],
[0, 1, 1],
[0, 0, 1],
])) | Test using the metric in the same way as Evalutor. | test_evaluate | python | open-mmlab/mmaction2 | tests/evaluation/metrics/test_acc_metric.py | https://github.com/open-mmlab/mmaction2/blob/master/tests/evaluation/metrics/test_acc_metric.py | Apache-2.0 |
def gt_confusion_matrix(gt_labels, pred_labels, normalize=None):
"""Calculate the ground truth confusion matrix."""
max_index = max(max(gt_labels), max(pred_labels))
confusion_mat = np.zeros((max_index + 1, max_index + 1), dtype=np.int64)
for gt, pred in zip(gt_labels, pred_labels):
confusion_mat[gt][pred] += 1
del_index = []
for i in range(max_index):
if sum(confusion_mat[i]) == 0 and sum(confusion_mat[:, i]) == 0:
del_index.append(i)
confusion_mat = np.delete(confusion_mat, del_index, axis=0)
confusion_mat = np.delete(confusion_mat, del_index, axis=1)
if normalize is not None:
confusion_mat = np.array(confusion_mat, dtype=np.float64)
m, n = confusion_mat.shape
if normalize == 'true':
for i in range(m):
s = np.sum(confusion_mat[i], dtype=float)
if s == 0:
continue
confusion_mat[i, :] = confusion_mat[i, :] / s
print(confusion_mat[i, :])
elif normalize == 'pred':
for i in range(n):
s = sum(confusion_mat[:, i])
if s == 0:
continue
confusion_mat[:, i] = confusion_mat[:, i] / s
elif normalize == 'all':
s = np.sum(confusion_mat)
if s != 0:
confusion_mat /= s
return confusion_mat | Calculate the ground truth confusion matrix. | gt_confusion_matrix | python | open-mmlab/mmaction2 | tests/evaluation/metrics/test_metric_utils.py | https://github.com/open-mmlab/mmaction2/blob/master/tests/evaluation/metrics/test_metric_utils.py | Apache-2.0 |
def test_evaluate(self):
"""Test using the metric in the same way as Evalutor."""
pred = [
ActionDataSample().set_pred_score(i).set_gt_label(k).to_dict()
for i, k in zip([
torch.tensor([0.7, 0.0, 0.3]),
torch.tensor([0.5, 0.2, 0.3]),
torch.tensor([0.4, 0.5, 0.1]),
torch.tensor([0.0, 0.0, 1.0]),
torch.tensor([0.0, 0.0, 1.0]),
torch.tensor([0.0, 0.0, 1.0]),
], [[0], [0], [1], [2], [2], [0]])
]
# Test with score (use score instead of label if score exists)
metric = METRICS.build(dict(type='RetrievalRecall', topk=1))
metric.process(None, pred)
recall = metric.evaluate(6)
self.assertIsInstance(recall, dict)
self.assertAlmostEqual(
recall['retrieval/Recall@1'], 5 / 6 * 100, places=4)
# Test with invalid topk
with self.assertRaisesRegex(RuntimeError, 'selected index k'):
metric = METRICS.build(dict(type='RetrievalRecall', topk=10))
metric.process(None, pred)
metric.evaluate(6)
with self.assertRaisesRegex(ValueError, '`topk` must be a'):
METRICS.build(dict(type='RetrievalRecall', topk=-1))
# Test initialization
metric = METRICS.build(dict(type='RetrievalRecall', topk=5))
self.assertEqual(metric.topk, (5, ))
# Test initialization
metric = METRICS.build(dict(type='RetrievalRecall', topk=(1, 2, 5)))
self.assertEqual(metric.topk, (1, 2, 5)) | Test using the metric in the same way as Evalutor. | test_evaluate | python | open-mmlab/mmaction2 | tests/evaluation/metrics/test_retrieval_metric.py | https://github.com/open-mmlab/mmaction2/blob/master/tests/evaluation/metrics/test_retrieval_metric.py | Apache-2.0 |
def test_calculate(self):
"""Test using the metric from static method."""
# seq of indices format
y_true = [[0, 2, 5, 8, 9], [1, 4, 6]]
y_pred = [np.arange(10)] * 2
# test with average is 'macro'
recall_score = RetrievalRecall.calculate(
y_pred, y_true, topk=1, pred_indices=True, target_indices=True)
expect_recall = 50.
self.assertEqual(recall_score[0].item(), expect_recall)
# test with tensor input
y_true = torch.Tensor([[1, 0, 1, 0, 0, 1, 0, 0, 1, 1],
[0, 1, 0, 0, 1, 0, 1, 0, 0, 0]])
y_pred = np.array([np.linspace(0.95, 0.05, 10)] * 2)
recall_score = RetrievalRecall.calculate(y_pred, y_true, topk=1)
expect_recall = 50.
self.assertEqual(recall_score[0].item(), expect_recall)
# test with topk is 5
y_pred = np.array([np.linspace(0.95, 0.05, 10)] * 2)
recall_score = RetrievalRecall.calculate(y_pred, y_true, topk=2)
expect_recall = 100.
self.assertEqual(recall_score[0].item(), expect_recall)
# test with topk is (1, 5)
y_pred = np.array([np.linspace(0.95, 0.05, 10)] * 2)
recall_score = RetrievalRecall.calculate(y_pred, y_true, topk=(1, 5))
expect_recalls = [50., 100.]
self.assertEqual(len(recall_score), len(expect_recalls))
for i in range(len(expect_recalls)):
self.assertEqual(recall_score[i].item(), expect_recalls[i])
# Test with invalid pred
y_pred = dict()
y_true = [[0, 2, 5, 8, 9], [1, 4, 6]]
with self.assertRaisesRegex(AssertionError, '`pred` must be Seq'):
RetrievalRecall.calculate(y_pred, y_true, True, True)
# Test with invalid target
y_true = dict()
y_pred = [np.arange(10)] * 2
with self.assertRaisesRegex(AssertionError, '`target` must be Seq'):
RetrievalRecall.calculate(
y_pred, y_true, topk=1, pred_indices=True, target_indices=True)
# Test with different length `pred` with `target`
y_true = [[0, 2, 5, 8, 9], [1, 4, 6]]
y_pred = [np.arange(10)] * 3
with self.assertRaisesRegex(AssertionError, 'Length of `pred`'):
RetrievalRecall.calculate(
y_pred, y_true, topk=1, pred_indices=True, target_indices=True)
# Test with invalid pred
y_true = [[0, 2, 5, 8, 9], dict()]
y_pred = [np.arange(10)] * 2
with self.assertRaisesRegex(AssertionError, '`target` should be'):
RetrievalRecall.calculate(
y_pred, y_true, topk=1, pred_indices=True, target_indices=True)
# Test with invalid target
y_true = [[0, 2, 5, 8, 9], [1, 4, 6]]
y_pred = [np.arange(10), dict()]
with self.assertRaisesRegex(AssertionError, '`pred` should be'):
RetrievalRecall.calculate(
y_pred, y_true, topk=1, pred_indices=True, target_indices=True) | Test using the metric from static method. | test_calculate | python | open-mmlab/mmaction2 | tests/evaluation/metrics/test_retrieval_metric.py | https://github.com/open-mmlab/mmaction2/blob/master/tests/evaluation/metrics/test_retrieval_metric.py | Apache-2.0 |
def is_norm(modules):
"""Check if is one of the norms."""
if isinstance(modules, (GroupNorm, _BatchNorm)):
return True
return False | Check if is one of the norms. | is_norm | python | open-mmlab/mmaction2 | tests/models/backbones/test_mobilenet_v2.py | https://github.com/open-mmlab/mmaction2/blob/master/tests/models/backbones/test_mobilenet_v2.py | Apache-2.0 |
def is_block(modules):
"""Check if is ResNet building block."""
if isinstance(modules, (InvertedResidual, )):
return True
return False | Check if is ResNet building block. | is_block | python | open-mmlab/mmaction2 | tests/models/backbones/test_mobilenet_v2.py | https://github.com/open-mmlab/mmaction2/blob/master/tests/models/backbones/test_mobilenet_v2.py | Apache-2.0 |
def test_i3d_head():
"""Test loss method, layer construction, attributes and forward function in
i3d head."""
i3d_head = I3DHead(num_classes=4, in_channels=2048)
i3d_head.init_weights()
assert i3d_head.num_classes == 4
assert i3d_head.dropout_ratio == 0.5
assert i3d_head.in_channels == 2048
assert i3d_head.init_std == 0.01
assert isinstance(i3d_head.dropout, nn.Dropout)
assert i3d_head.dropout.p == i3d_head.dropout_ratio
assert isinstance(i3d_head.fc_cls, nn.Linear)
assert i3d_head.fc_cls.in_features == i3d_head.in_channels
assert i3d_head.fc_cls.out_features == i3d_head.num_classes
assert isinstance(i3d_head.avg_pool, nn.AdaptiveAvgPool3d)
assert i3d_head.avg_pool.output_size == (1, 1, 1)
input_shape = (3, 2048, 4, 7, 7)
feat = torch.rand(input_shape)
# i3d head inference
cls_scores = i3d_head(feat)
assert cls_scores.shape == torch.Size([3, 4]) | Test loss method, layer construction, attributes and forward function in
i3d head. | test_i3d_head | python | open-mmlab/mmaction2 | tests/models/heads/test_i3d_head.py | https://github.com/open-mmlab/mmaction2/blob/master/tests/models/heads/test_i3d_head.py | Apache-2.0 |
def test_slowfast_head():
"""Test loss method, layer construction, attributes and forward function in
slowfast head."""
sf_head = SlowFastHead(num_classes=4, in_channels=2304)
sf_head.init_weights()
assert sf_head.num_classes == 4
assert sf_head.dropout_ratio == 0.8
assert sf_head.in_channels == 2304
assert sf_head.init_std == 0.01
assert isinstance(sf_head.dropout, nn.Dropout)
assert sf_head.dropout.p == sf_head.dropout_ratio
assert isinstance(sf_head.fc_cls, nn.Linear)
assert sf_head.fc_cls.in_features == sf_head.in_channels
assert sf_head.fc_cls.out_features == sf_head.num_classes
assert isinstance(sf_head.avg_pool, nn.AdaptiveAvgPool3d)
assert sf_head.avg_pool.output_size == (1, 1, 1)
input_shape = (3, 2048, 32, 7, 7)
feat_slow = torch.rand(input_shape)
input_shape = (3, 256, 4, 7, 7)
feat_fast = torch.rand(input_shape)
sf_head = SlowFastHead(num_classes=4, in_channels=2304)
cls_scores = sf_head((feat_slow, feat_fast))
assert cls_scores.shape == torch.Size([3, 4]) | Test loss method, layer construction, attributes and forward function in
slowfast head. | test_slowfast_head | python | open-mmlab/mmaction2 | tests/models/heads/test_slowfast_head.py | https://github.com/open-mmlab/mmaction2/blob/master/tests/models/heads/test_slowfast_head.py | Apache-2.0 |
def test_timesformer_head():
"""Test loss method, layer construction, attributes and forward function in
timesformer head."""
timesformer_head = TimeSformerHead(num_classes=4, in_channels=64)
timesformer_head.init_weights()
assert timesformer_head.num_classes == 4
assert timesformer_head.in_channels == 64
assert timesformer_head.init_std == 0.02
input_shape = (2, 64)
feat = torch.rand(input_shape)
cls_scores = timesformer_head(feat)
assert cls_scores.shape == torch.Size([2, 4]) | Test loss method, layer construction, attributes and forward function in
timesformer head. | test_timesformer_head | python | open-mmlab/mmaction2 | tests/models/heads/test_timesformer_head.py | https://github.com/open-mmlab/mmaction2/blob/master/tests/models/heads/test_timesformer_head.py | Apache-2.0 |
def test_tpn_head():
"""Test loss method, layer construction, attributes and forward function in
tpn head."""
tpn_head = TPNHead(num_classes=4, in_channels=2048)
tpn_head.init_weights()
assert hasattr(tpn_head, 'avg_pool2d')
assert hasattr(tpn_head, 'avg_pool3d')
assert isinstance(tpn_head.avg_pool3d, nn.AdaptiveAvgPool3d)
assert tpn_head.avg_pool3d.output_size == (1, 1, 1)
assert tpn_head.avg_pool2d is None
input_shape = (4, 2048, 7, 7)
feat = torch.rand(input_shape)
# tpn head inference with num_segs
num_segs = 2
cls_scores = tpn_head(feat, num_segs)
assert isinstance(tpn_head.avg_pool2d, nn.AvgPool3d)
assert tpn_head.avg_pool2d.kernel_size == (1, 7, 7)
assert cls_scores.shape == torch.Size([2, 4])
# tpn head inference with no num_segs
input_shape = (2, 2048, 3, 7, 7)
feat = torch.rand(input_shape)
cls_scores = tpn_head(feat)
assert isinstance(tpn_head.avg_pool2d, nn.AvgPool3d)
assert tpn_head.avg_pool2d.kernel_size == (1, 7, 7)
assert cls_scores.shape == torch.Size([2, 4]) | Test loss method, layer construction, attributes and forward function in
tpn head. | test_tpn_head | python | open-mmlab/mmaction2 | tests/models/heads/test_tpn_head.py | https://github.com/open-mmlab/mmaction2/blob/master/tests/models/heads/test_tpn_head.py | Apache-2.0 |
def test_trn_head():
"""Test loss method, layer construction, attributes and forward function in
trn head."""
from mmaction.models.heads.trn_head import (RelationModule,
RelationModuleMultiScale)
trn_head = TRNHead(num_classes=4, in_channels=2048, relation_type='TRN')
trn_head.init_weights()
assert trn_head.num_classes == 4
assert trn_head.dropout_ratio == 0.8
assert trn_head.in_channels == 2048
assert trn_head.init_std == 0.001
assert trn_head.spatial_type == 'avg'
relation_module = trn_head.consensus
assert isinstance(relation_module, RelationModule)
assert relation_module.hidden_dim == 256
assert isinstance(relation_module.classifier[3], nn.Linear)
assert relation_module.classifier[3].out_features == trn_head.num_classes
assert trn_head.dropout.p == trn_head.dropout_ratio
assert isinstance(trn_head.dropout, nn.Dropout)
assert isinstance(trn_head.fc_cls, nn.Linear)
assert trn_head.fc_cls.in_features == trn_head.in_channels
assert trn_head.fc_cls.out_features == trn_head.hidden_dim
assert isinstance(trn_head.avg_pool, nn.AdaptiveAvgPool2d)
assert trn_head.avg_pool.output_size == 1
input_shape = (8, 2048, 7, 7)
feat = torch.rand(input_shape)
# tsm head inference with no init
num_segs = input_shape[0]
cls_scores = trn_head(feat, num_segs)
assert cls_scores.shape == torch.Size([1, 4])
# tsm head inference with init
trn_head = TRNHead(
num_classes=4,
in_channels=2048,
num_segments=8,
relation_type='TRNMultiScale')
trn_head.init_weights()
assert isinstance(trn_head.consensus, RelationModuleMultiScale)
assert trn_head.consensus.scales == range(8, 1, -1)
cls_scores = trn_head(feat, num_segs)
assert cls_scores.shape == torch.Size([1, 4])
with pytest.raises(ValueError):
trn_head = TRNHead(
num_classes=4,
in_channels=2048,
num_segments=8,
relation_type='RelationModlue') | Test loss method, layer construction, attributes and forward function in
trn head. | test_trn_head | python | open-mmlab/mmaction2 | tests/models/heads/test_trn_head.py | https://github.com/open-mmlab/mmaction2/blob/master/tests/models/heads/test_trn_head.py | Apache-2.0 |
def test_tsm_head():
"""Test loss method, layer construction, attributes and forward function in
tsm head."""
tsm_head = TSMHead(num_classes=4, in_channels=2048)
tsm_head.init_weights()
assert tsm_head.num_classes == 4
assert tsm_head.dropout_ratio == 0.8
assert tsm_head.in_channels == 2048
assert tsm_head.init_std == 0.001
assert tsm_head.consensus.dim == 1
assert tsm_head.spatial_type == 'avg'
assert isinstance(tsm_head.dropout, nn.Dropout)
assert tsm_head.dropout.p == tsm_head.dropout_ratio
assert isinstance(tsm_head.fc_cls, nn.Linear)
assert tsm_head.fc_cls.in_features == tsm_head.in_channels
assert tsm_head.fc_cls.out_features == tsm_head.num_classes
assert isinstance(tsm_head.avg_pool, nn.AdaptiveAvgPool2d)
assert tsm_head.avg_pool.output_size == 1
input_shape = (8, 2048, 7, 7)
feat = torch.rand(input_shape)
# tsm head inference with no init
num_segs = input_shape[0]
cls_scores = tsm_head(feat, num_segs)
assert cls_scores.shape == torch.Size([1, 4])
# tsm head inference with init
tsm_head = TSMHead(num_classes=4, in_channels=2048, temporal_pool=True)
tsm_head.init_weights()
cls_scores = tsm_head(feat, num_segs)
assert cls_scores.shape == torch.Size([2, 4]) | Test loss method, layer construction, attributes and forward function in
tsm head. | test_tsm_head | python | open-mmlab/mmaction2 | tests/models/heads/test_tsm_head.py | https://github.com/open-mmlab/mmaction2/blob/master/tests/models/heads/test_tsm_head.py | Apache-2.0 |
def test_tsn_head():
"""Test loss method, layer construction, attributes and forward function in
tsn head."""
tsn_head = TSNHead(num_classes=4, in_channels=2048)
tsn_head.init_weights()
assert tsn_head.num_classes == 4
assert tsn_head.dropout_ratio == 0.4
assert tsn_head.in_channels == 2048
assert tsn_head.init_std == 0.01
assert tsn_head.consensus.dim == 1
assert tsn_head.spatial_type == 'avg'
assert isinstance(tsn_head.dropout, nn.Dropout)
assert tsn_head.dropout.p == tsn_head.dropout_ratio
assert isinstance(tsn_head.fc_cls, nn.Linear)
assert tsn_head.fc_cls.in_features == tsn_head.in_channels
assert tsn_head.fc_cls.out_features == tsn_head.num_classes
assert isinstance(tsn_head.avg_pool, nn.AdaptiveAvgPool2d)
assert tsn_head.avg_pool.output_size == (1, 1)
input_shape = (8, 2048, 7, 7)
feat = torch.rand(input_shape)
# tsn head inference
num_segs = input_shape[0]
cls_scores = tsn_head(feat, num_segs)
assert cls_scores.shape == torch.Size([1, 4])
# Test multi-class recognition
multi_tsn_head = TSNHead(
num_classes=4,
in_channels=2048,
loss_cls=dict(type='BCELossWithLogits', loss_weight=160.0),
multi_class=True,
label_smooth_eps=0.01)
multi_tsn_head.init_weights()
assert multi_tsn_head.num_classes == 4
assert multi_tsn_head.dropout_ratio == 0.4
assert multi_tsn_head.in_channels == 2048
assert multi_tsn_head.init_std == 0.01
assert multi_tsn_head.consensus.dim == 1
assert isinstance(multi_tsn_head.dropout, nn.Dropout)
assert multi_tsn_head.dropout.p == multi_tsn_head.dropout_ratio
assert isinstance(multi_tsn_head.fc_cls, nn.Linear)
assert multi_tsn_head.fc_cls.in_features == multi_tsn_head.in_channels
assert multi_tsn_head.fc_cls.out_features == multi_tsn_head.num_classes
assert isinstance(multi_tsn_head.avg_pool, nn.AdaptiveAvgPool2d)
assert multi_tsn_head.avg_pool.output_size == (1, 1)
input_shape = (8, 2048, 7, 7)
feat = torch.rand(input_shape)
# multi-class tsn head inference
num_segs = input_shape[0]
cls_scores = tsn_head(feat, num_segs)
assert cls_scores.shape == torch.Size([1, 4]) | Test loss method, layer construction, attributes and forward function in
tsn head. | test_tsn_head | python | open-mmlab/mmaction2 | tests/models/heads/test_tsn_head.py | https://github.com/open-mmlab/mmaction2/blob/master/tests/models/heads/test_tsn_head.py | Apache-2.0 |
def test_x3d_head():
"""Test loss method, layer construction, attributes and forward function in
x3d head."""
x3d_head = X3DHead(in_channels=432, num_classes=4, fc1_bias=False)
x3d_head.init_weights()
assert x3d_head.num_classes == 4
assert x3d_head.dropout_ratio == 0.5
assert x3d_head.in_channels == 432
assert x3d_head.init_std == 0.01
assert isinstance(x3d_head.dropout, nn.Dropout)
assert x3d_head.dropout.p == x3d_head.dropout_ratio
assert isinstance(x3d_head.fc1, nn.Linear)
assert x3d_head.fc1.in_features == x3d_head.in_channels
assert x3d_head.fc1.out_features == x3d_head.mid_channels
assert x3d_head.fc1.bias is None
assert isinstance(x3d_head.fc2, nn.Linear)
assert x3d_head.fc2.in_features == x3d_head.mid_channels
assert x3d_head.fc2.out_features == x3d_head.num_classes
assert isinstance(x3d_head.pool, nn.AdaptiveAvgPool3d)
assert x3d_head.pool.output_size == (1, 1, 1)
input_shape = (3, 432, 4, 7, 7)
feat = torch.rand(input_shape)
# i3d head inference
cls_scores = x3d_head(feat)
assert cls_scores.shape == torch.Size([3, 4]) | Test loss method, layer construction, attributes and forward function in
x3d head. | test_x3d_head | python | open-mmlab/mmaction2 | tests/models/heads/test_x3d_head.py | https://github.com/open-mmlab/mmaction2/blob/master/tests/models/heads/test_x3d_head.py | Apache-2.0 |
def test_bbox_head_ava():
"""Test loss method, layer construction, attributes and forward function in
bbox head."""
with pytest.raises(TypeError):
# topk must be None, int or tuple[int]
BBoxHeadAVA(background_class=True, topk=0.1)
with pytest.raises(AssertionError):
# topk should be smaller than num_classes
BBoxHeadAVA(background_class=True, num_classes=5, topk=(3, 5))
bbox_head = BBoxHeadAVA(
background_class=True, in_channels=10, num_classes=4, topk=1)
input = torch.randn([3, 10, 2, 2, 2])
ret = bbox_head(input)
assert ret.shape == (3, 4)
cls_score = torch.tensor(
[[0.568, -0.162, 0.273, -0.390, 0.447, 0.102, -0.409],
[2.388, 0.609, 0.369, 1.630, -0.808, -0.212, 0.296],
[0.252, -0.533, -0.644, -0.591, 0.148, 0.963, -0.525],
[0.134, -0.311, -0.764, -0.752, 0.656, -1.517, 0.185]])
# Test topk_to_matrix()
assert torch.equal(
BBoxHeadAVA.topk_to_matrix(cls_score[:, 1:], 1),
torch.tensor([[0, 0, 0, 1, 0, 0], [0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 1, 0], [0, 0, 0, 1, 0, 0]],
dtype=bool))
assert torch.equal(
BBoxHeadAVA.topk_to_matrix(cls_score[:, 1:], 2),
torch.tensor([[0, 1, 0, 1, 0, 0], [1, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 1, 0], [0, 0, 0, 1, 0, 1]],
dtype=bool))
assert torch.equal(
BBoxHeadAVA.topk_to_matrix(cls_score[:, 1:], 3),
torch.tensor([[0, 1, 0, 1, 1, 0], [1, 1, 1, 0, 0, 0],
[0, 0, 0, 1, 1, 1], [1, 0, 0, 1, 0, 1]],
dtype=bool))
assert torch.equal(
BBoxHeadAVA.topk_to_matrix(cls_score[:, 1:], 6),
torch.ones([4, 6], dtype=bool))
# Test Multi-Label Loss
bbox_head = BBoxHeadAVA(
background_class=True) # Why is this here? isn't this redundant?
bbox_head.init_weights()
bbox_head = BBoxHeadAVA(
background_class=True,
temporal_pool_type='max',
spatial_pool_type='avg')
bbox_head.init_weights()
# test without background class
"""
losses = bbox_head.loss(
cls_score=cls_score,
bbox_pred=None,
rois=None,
labels=labels,
label_weights=label_weights)
assert torch.isclose(losses['loss_action_cls'], torch.tensor(0.7162495))
assert torch.isclose(losses['recall@thr=0.5'], torch.tensor(0.6666666))
assert torch.isclose(losses['prec@thr=0.5'], torch.tensor(0.4791665))
assert torch.isclose(losses['recall@top3'], torch.tensor(0.75))
assert torch.isclose(losses['prec@top3'], torch.tensor(0.5))
assert torch.isclose(losses['recall@top5'], torch.tensor(1.0))
assert torch.isclose(losses['prec@top5'], torch.tensor(0.45))
# Test Single-Label Loss
bbox_head = BBoxHeadAVA(multilabel=False)
losses = bbox_head.loss(
cls_score=cls_score,
bbox_pred=None,
rois=None,
labels=labels,
label_weights=label_weights)
assert torch.isclose(losses['loss_action_cls'], torch.tensor(1.639561))
assert torch.isclose(losses['recall@thr=0.5'], torch.tensor(0.25))
assert torch.isclose(losses['prec@thr=0.5'], torch.tensor(0.25))
assert torch.isclose(losses['recall@top3'], torch.tensor(0.75))
assert torch.isclose(losses['prec@top3'], torch.tensor(0.5))
assert torch.isclose(losses['recall@top5'], torch.tensor(1.0))
assert torch.isclose(losses['prec@top5'], torch.tensor(0.45))
# Test ROI
rois = torch.tensor([[0.0, 0.1, 0.2, 0.3, 0.4], [0.0, 0.5, 0.6, 0.7, 0.8]])
rois[1::2] *= 380
rois[2::2] *= 220
crop_quadruple = np.array([0.1, 0.2, 0.8, 0.7])
cls_score = torch.tensor([0.995, 0.728])
img_shape = (320, 480)
flip = True
bbox_head = BBoxHeadAVA(multilabel=True)
bboxes, scores = bbox_head.get_det_bboxes(
rois=rois,
cls_score=cls_score,
img_shape=img_shape,
flip=flip,
crop_quadruple=crop_quadruple)
assert torch.all(
torch.isclose(
bboxes,
torch.tensor([[0.89783341, 0.20043750, 0.89816672, 0.20087500],
[0.45499998, 0.69875002, 0.58166665, 0.86499995]])))
assert torch.all(
torch.isclose(scores, torch.tensor([0.73007441, 0.67436624])))
bbox_head = BBoxHeadAVA(multilabel=False)
bboxes, scores = bbox_head.get_det_bboxes(
rois=rois,
cls_score=cls_score,
img_shape=img_shape,
flip=flip,
crop_quadruple=crop_quadruple)
assert torch.all(
torch.isclose(
bboxes,
torch.tensor([[0.89783341, 0.20043750, 0.89816672, 0.20087500],
[0.45499998, 0.69875002, 0.58166665, 0.86499995]])))
assert torch.all(torch.isclose(scores, torch.tensor([0.56636, 0.43364])))
""" | Test loss method, layer construction, attributes and forward function in
bbox head. | test_bbox_head_ava | python | open-mmlab/mmaction2 | tests/models/roi_heads/test_bbox_heads.py | https://github.com/open-mmlab/mmaction2/blob/master/tests/models/roi_heads/test_bbox_heads.py | Apache-2.0 |
def test_fbo_head():
"""Test layer construction, attributes and forward function in fbo head."""
lfb_prefix_path = osp.normpath(
osp.join(osp.dirname(__file__), '../../data/lfb'))
st_feat_shape = (1, 16, 1, 8, 8)
st_feat = torch.rand(st_feat_shape)
rois = torch.randn(1, 5)
rois[0][0] = 0
img_metas = [dict(img_key='video_1, 930')]
# non local fbo
fbo_head = FBOHead(
lfb_cfg=dict(
lfb_prefix_path=lfb_prefix_path,
max_num_sampled_feat=5,
window_size=60,
lfb_channels=16,
dataset_modes=('unittest'),
device='cpu'),
fbo_cfg=dict(
type='non_local',
st_feat_channels=16,
lt_feat_channels=16,
latent_channels=8,
num_st_feat=1,
num_lt_feat=5 * 60,
))
fbo_head.init_weights()
out = fbo_head(st_feat, rois, img_metas)
assert out.shape == (1, 24, 1, 1, 1)
# avg fbo
fbo_head = FBOHead(
lfb_cfg=dict(
lfb_prefix_path=lfb_prefix_path,
max_num_sampled_feat=5,
window_size=60,
lfb_channels=16,
dataset_modes=('unittest'),
device='cpu'),
fbo_cfg=dict(type='avg'))
fbo_head.init_weights()
out = fbo_head(st_feat, rois, img_metas)
assert out.shape == (1, 32, 1, 1, 1)
# max fbo
fbo_head = FBOHead(
lfb_cfg=dict(
lfb_prefix_path=lfb_prefix_path,
max_num_sampled_feat=5,
window_size=60,
lfb_channels=16,
dataset_modes=('unittest'),
device='cpu'),
fbo_cfg=dict(type='max'))
fbo_head.init_weights()
out = fbo_head(st_feat, rois, img_metas)
assert out.shape == (1, 32, 1, 1, 1) | Test layer construction, attributes and forward function in fbo head. | test_fbo_head | python | open-mmlab/mmaction2 | tests/models/roi_heads/test_fbo_head.py | https://github.com/open-mmlab/mmaction2/blob/master/tests/models/roi_heads/test_fbo_head.py | Apache-2.0 |
def __call__(self, results):
"""Select frames to verify.
Select the first, last and three random frames, Required key is
"total_frames", added or modified key is "frame_inds".
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
assert results['total_frames'] > 0
# first and last frames
results['frame_inds'] = np.array([0, results['total_frames'] - 1])
# choose 3 random frames
if results['total_frames'] > 2:
results['frame_inds'] = np.concatenate([
results['frame_inds'],
np.random.randint(1, results['total_frames'] - 1, 3)
])
return results | Select frames to verify.
Select the first, last and three random frames, Required key is
"total_frames", added or modified key is "frame_inds".
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
| __call__ | python | open-mmlab/mmaction2 | tools/analysis_tools/check_videos.py | https://github.com/open-mmlab/mmaction2/blob/master/tools/analysis_tools/check_videos.py | Apache-2.0 |
def cuhk17_top1():
"""Assign label for each proposal with the cuhk17 result, which is the #2
entry in http://activity-net.org/challenges/2017/evaluation.html."""
if not osp.exists('cuhk_anet17_pred.json'):
os.system('wget https://download.openmmlab.com/'
'mmaction/localization/cuhk_anet17_pred.json')
proposal = mmengine.load(args.proposal)
results = proposal['results']
cuhk_pred = mmengine.load('cuhk_anet17_pred.json')['results']
def get_topk(preds, k):
preds.sort(key=lambda x: x['score'])
return preds[-k:]
for k, v in results.items():
action_pred = cuhk_pred[k]
top1 = get_topk(action_pred, 1)
top1_label = top1[0]['label']
new_value = []
for item in v:
x = dict(label=top1_label)
x.update(item)
new_value.append(x)
results[k] = new_value
proposal['results'] = results
mmengine.dump(proposal, args.det_output) | Assign label for each proposal with the cuhk17 result, which is the #2
entry in http://activity-net.org/challenges/2017/evaluation.html. | cuhk17_top1 | python | open-mmlab/mmaction2 | tools/analysis_tools/report_map.py | https://github.com/open-mmlab/mmaction2/blob/master/tools/analysis_tools/report_map.py | Apache-2.0 |
def lines2dictlist(lines, format):
"""Convert lines in 'txt' format to dictionaries in 'json' format.
Currently support single-label and multi-label.
Example of a single-label rawframes annotation txt file:
.. code-block:: txt
(frame_dir num_frames label)
some/directory-1 163 1
some/directory-2 122 1
some/directory-3 258 2
Example of a multi-label rawframes annotation txt file:
.. code-block:: txt
(frame_dir num_frames label1 label2 ...)
some/directory-1 163 1 3 5
some/directory-2 122 1 2
some/directory-3 258 2
Example of a single-label videos annotation txt file:
.. code-block:: txt
(filename label)
some/path/000.mp4 1
some/path/001.mp4 1
some/path/002.mp4 2
Example of a multi-label videos annotation txt file:
.. code-block:: txt
(filename label1 label2 ...)
some/path/000.mp4 1 3 5
some/path/001.mp4 1 4 8
some/path/002.mp4 2 4 9
Args:
lines (list): List of lines in 'txt' label format.
format (str): Data format, choices are 'rawframes' and 'videos'.
Returns:
list[dict]: For rawframes format, each dict has keys: frame_dir,
total_frames, label; for videos format, each diction has keys:
filename, label.
"""
lines = [x.split() for x in lines]
if format == 'rawframes':
data = [
dict(
frame_dir=line[0],
total_frames=int(line[1]),
label=[int(x) for x in line[2:]]) for line in lines
]
elif format == 'videos':
data = [
dict(filename=line[0], label=[int(x) for x in line[1:]])
for line in lines
]
return data | Convert lines in 'txt' format to dictionaries in 'json' format.
Currently support single-label and multi-label.
Example of a single-label rawframes annotation txt file:
.. code-block:: txt
(frame_dir num_frames label)
some/directory-1 163 1
some/directory-2 122 1
some/directory-3 258 2
Example of a multi-label rawframes annotation txt file:
.. code-block:: txt
(frame_dir num_frames label1 label2 ...)
some/directory-1 163 1 3 5
some/directory-2 122 1 2
some/directory-3 258 2
Example of a single-label videos annotation txt file:
.. code-block:: txt
(filename label)
some/path/000.mp4 1
some/path/001.mp4 1
some/path/002.mp4 2
Example of a multi-label videos annotation txt file:
.. code-block:: txt
(filename label1 label2 ...)
some/path/000.mp4 1 3 5
some/path/001.mp4 1 4 8
some/path/002.mp4 2 4 9
Args:
lines (list): List of lines in 'txt' label format.
format (str): Data format, choices are 'rawframes' and 'videos'.
Returns:
list[dict]: For rawframes format, each dict has keys: frame_dir,
total_frames, label; for videos format, each diction has keys:
filename, label.
| lines2dictlist | python | open-mmlab/mmaction2 | tools/data/anno_txt2json.py | https://github.com/open-mmlab/mmaction2/blob/master/tools/data/anno_txt2json.py | Apache-2.0 |
def generate_spectrogram_magphase(self, audio, with_phase=False):
"""Separate a complex-valued spectrogram D into its magnitude (S)
and phase (P) components, so that D = S * P.
Args:
audio (np.ndarray): The input audio signal.
with_phase (bool): Determines whether to output the
phase components. Default: False.
Returns:
np.ndarray: magnitude and phase component of the complex-valued
spectrogram.
"""
spectro = librosa.core.stft(
audio,
hop_length=self.get_hop_size(),
n_fft=self.fft_size,
center=True)
spectro_mag, spectro_phase = librosa.core.magphase(spectro)
spectro_mag = np.expand_dims(spectro_mag, axis=0)
if with_phase:
spectro_phase = np.expand_dims(np.angle(spectro_phase), axis=0)
return spectro_mag, spectro_phase
return spectro_mag | Separate a complex-valued spectrogram D into its magnitude (S)
and phase (P) components, so that D = S * P.
Args:
audio (np.ndarray): The input audio signal.
with_phase (bool): Determines whether to output the
phase components. Default: False.
Returns:
np.ndarray: magnitude and phase component of the complex-valued
spectrogram.
| generate_spectrogram_magphase | python | open-mmlab/mmaction2 | tools/data/build_audio_features.py | https://github.com/open-mmlab/mmaction2/blob/master/tools/data/build_audio_features.py | Apache-2.0 |
def adjust_time_resolution(self, quantized, mel):
"""Adjust time resolution by repeating features.
Args:
quantized (np.ndarray): (T,)
mel (np.ndarray): (N, D)
Returns:
tuple: Tuple of (T,) and (T, D)
"""
assert quantized.ndim == 1
assert mel.ndim == 2
upsample_factor = quantized.size // mel.shape[0]
mel = np.repeat(mel, upsample_factor, axis=0)
n_pad = quantized.size - mel.shape[0]
if n_pad != 0:
assert n_pad > 0
mel = np.pad(
mel, [(0, n_pad), (0, 0)], mode='constant', constant_values=0)
# trim
start, end = self.start_and_end_indices(quantized,
self.silence_threshold)
return quantized[start:end], mel[start:end, :] | Adjust time resolution by repeating features.
Args:
quantized (np.ndarray): (T,)
mel (np.ndarray): (N, D)
Returns:
tuple: Tuple of (T,) and (T, D)
| adjust_time_resolution | python | open-mmlab/mmaction2 | tools/data/build_audio_features.py | https://github.com/open-mmlab/mmaction2/blob/master/tools/data/build_audio_features.py | Apache-2.0 |
def start_and_end_indices(quantized, silence_threshold=2):
"""Trim the audio file when reaches the silence threshold."""
for start in range(quantized.size):
if abs(quantized[start] - 127) > silence_threshold:
break
for end in range(quantized.size - 1, 1, -1):
if abs(quantized[end] - 127) > silence_threshold:
break
assert abs(quantized[start] - 127) > silence_threshold
assert abs(quantized[end] - 127) > silence_threshold
return start, end | Trim the audio file when reaches the silence threshold. | start_and_end_indices | python | open-mmlab/mmaction2 | tools/data/build_audio_features.py | https://github.com/open-mmlab/mmaction2/blob/master/tools/data/build_audio_features.py | Apache-2.0 |
def lws_num_frames(length, fsize, fshift):
"""Compute number of time frames of lws spectrogram.
Please refer to <https://pypi.org/project/lws/1.2.6/>`_.
"""
pad = (fsize - fshift)
if length % fshift == 0:
M = (length + pad * 2 - fsize) // fshift + 1
else:
M = (length + pad * 2 - fsize) // fshift + 2
return M | Compute number of time frames of lws spectrogram.
Please refer to <https://pypi.org/project/lws/1.2.6/>`_.
| lws_num_frames | python | open-mmlab/mmaction2 | tools/data/build_audio_features.py | https://github.com/open-mmlab/mmaction2/blob/master/tools/data/build_audio_features.py | Apache-2.0 |
def lws_pad_lr(self, x, fsize, fshift):
"""Compute left and right padding lws internally uses.
Please refer to <https://pypi.org/project/lws/1.2.6/>`_.
"""
M = self.lws_num_frames(len(x), fsize, fshift)
pad = (fsize - fshift)
T = len(x) + 2 * pad
r = (M - 1) * fshift + fsize - T
return pad, pad + r | Compute left and right padding lws internally uses.
Please refer to <https://pypi.org/project/lws/1.2.6/>`_.
| lws_pad_lr | python | open-mmlab/mmaction2 | tools/data/build_audio_features.py | https://github.com/open-mmlab/mmaction2/blob/master/tools/data/build_audio_features.py | Apache-2.0 |
def _linear_to_mel(self, spectrogram):
"""Warp linear scale spectrograms to the mel scale.
Please refer to <https://github.com/r9y9/deepvoice3_pytorch>`_
"""
global _mel_basis
_mel_basis = self._build_mel_basis()
return np.dot(_mel_basis, spectrogram) | Warp linear scale spectrograms to the mel scale.
Please refer to <https://github.com/r9y9/deepvoice3_pytorch>`_
| _linear_to_mel | python | open-mmlab/mmaction2 | tools/data/build_audio_features.py | https://github.com/open-mmlab/mmaction2/blob/master/tools/data/build_audio_features.py | Apache-2.0 |
def _build_mel_basis(self):
"""Build mel filters.
Please refer to <https://github.com/r9y9/deepvoice3_pytorch>`_
"""
assert self.fmax <= self.sample_rate // 2
return librosa.filters.mel(
self.sample_rate,
self.fft_size,
fmin=self.fmin,
fmax=self.fmax,
n_mels=self.num_mels) | Build mel filters.
Please refer to <https://github.com/r9y9/deepvoice3_pytorch>`_
| _build_mel_basis | python | open-mmlab/mmaction2 | tools/data/build_audio_features.py | https://github.com/open-mmlab/mmaction2/blob/master/tools/data/build_audio_features.py | Apache-2.0 |
def build_file_list(splits, frame_info, shuffle=False):
"""Build file list for a certain data split.
Args:
splits (tuple): Data split to generate file list.
frame_info (dict): Dict mapping from frames to path. e.g.,
'Skiing/v_Skiing_g18_c02': ('data/ucf101/rawframes/Skiing/v_Skiing_g18_c02', 0, 0). # noqa: E501
shuffle (bool): Whether to shuffle the file list.
Returns:
tuple: RGB file list for training and testing, together with
Flow file list for training and testing.
"""
def build_list(split):
"""Build RGB and Flow file list with a given split.
Args:
split (list): Split to be generate file list.
Returns:
tuple[list, list]: (rgb_list, flow_list), rgb_list is the
generated file list for rgb, flow_list is the generated
file list for flow.
"""
rgb_list, flow_list = list(), list()
for item in split:
if item[0] not in frame_info:
continue
if frame_info[item[0]][1] > 0:
# rawframes
rgb_cnt = frame_info[item[0]][1]
flow_cnt = frame_info[item[0]][2]
if isinstance(item[1], int):
rgb_list.append(f'{item[0]} {rgb_cnt} {item[1]}\n')
flow_list.append(f'{item[0]} {flow_cnt} {item[1]}\n')
elif isinstance(item[1], list):
# only for multi-label datasets like mmit
rgb_list.append(f'{item[0]} {rgb_cnt} ' +
' '.join([str(digit)
for digit in item[1]]) + '\n')
rgb_list.append(f'{item[0]} {flow_cnt} ' +
' '.join([str(digit)
for digit in item[1]]) + '\n')
else:
raise ValueError(
'frame_info should be ' +
'[`video`(str), `label`(int)|`labels(list[int])`')
else:
# videos
if isinstance(item[1], int):
rgb_list.append(f'{frame_info[item[0]][0]} {item[1]}\n')
flow_list.append(f'{frame_info[item[0]][0]} {item[1]}\n')
elif isinstance(item[1], list):
# only for multi-label datasets like mmit
rgb_list.append(f'{frame_info[item[0]][0]} ' +
' '.join([str(digit)
for digit in item[1]]) + '\n')
flow_list.append(
f'{frame_info[item[0]][0]} ' +
' '.join([str(digit) for digit in item[1]]) + '\n')
else:
raise ValueError(
'frame_info should be ' +
'[`video`(str), `label`(int)|`labels(list[int])`')
if shuffle:
random.shuffle(rgb_list)
random.shuffle(flow_list)
return rgb_list, flow_list
train_rgb_list, train_flow_list = build_list(splits[0])
test_rgb_list, test_flow_list = build_list(splits[1])
return (train_rgb_list, test_rgb_list), (train_flow_list, test_flow_list) | Build file list for a certain data split.
Args:
splits (tuple): Data split to generate file list.
frame_info (dict): Dict mapping from frames to path. e.g.,
'Skiing/v_Skiing_g18_c02': ('data/ucf101/rawframes/Skiing/v_Skiing_g18_c02', 0, 0). # noqa: E501
shuffle (bool): Whether to shuffle the file list.
Returns:
tuple: RGB file list for training and testing, together with
Flow file list for training and testing.
| build_file_list | python | open-mmlab/mmaction2 | tools/data/build_file_list.py | https://github.com/open-mmlab/mmaction2/blob/master/tools/data/build_file_list.py | Apache-2.0 |
def build_list(split):
"""Build RGB and Flow file list with a given split.
Args:
split (list): Split to be generate file list.
Returns:
tuple[list, list]: (rgb_list, flow_list), rgb_list is the
generated file list for rgb, flow_list is the generated
file list for flow.
"""
rgb_list, flow_list = list(), list()
for item in split:
if item[0] not in frame_info:
continue
if frame_info[item[0]][1] > 0:
# rawframes
rgb_cnt = frame_info[item[0]][1]
flow_cnt = frame_info[item[0]][2]
if isinstance(item[1], int):
rgb_list.append(f'{item[0]} {rgb_cnt} {item[1]}\n')
flow_list.append(f'{item[0]} {flow_cnt} {item[1]}\n')
elif isinstance(item[1], list):
# only for multi-label datasets like mmit
rgb_list.append(f'{item[0]} {rgb_cnt} ' +
' '.join([str(digit)
for digit in item[1]]) + '\n')
rgb_list.append(f'{item[0]} {flow_cnt} ' +
' '.join([str(digit)
for digit in item[1]]) + '\n')
else:
raise ValueError(
'frame_info should be ' +
'[`video`(str), `label`(int)|`labels(list[int])`')
else:
# videos
if isinstance(item[1], int):
rgb_list.append(f'{frame_info[item[0]][0]} {item[1]}\n')
flow_list.append(f'{frame_info[item[0]][0]} {item[1]}\n')
elif isinstance(item[1], list):
# only for multi-label datasets like mmit
rgb_list.append(f'{frame_info[item[0]][0]} ' +
' '.join([str(digit)
for digit in item[1]]) + '\n')
flow_list.append(
f'{frame_info[item[0]][0]} ' +
' '.join([str(digit) for digit in item[1]]) + '\n')
else:
raise ValueError(
'frame_info should be ' +
'[`video`(str), `label`(int)|`labels(list[int])`')
if shuffle:
random.shuffle(rgb_list)
random.shuffle(flow_list)
return rgb_list, flow_list | Build RGB and Flow file list with a given split.
Args:
split (list): Split to be generate file list.
Returns:
tuple[list, list]: (rgb_list, flow_list), rgb_list is the
generated file list for rgb, flow_list is the generated
file list for flow.
| build_list | python | open-mmlab/mmaction2 | tools/data/build_file_list.py | https://github.com/open-mmlab/mmaction2/blob/master/tools/data/build_file_list.py | Apache-2.0 |
def extract_frame(vid_item):
"""Generate optical flow using dense flow.
Args:
vid_item (list): Video item containing video full path,
video (short) path, video id.
Returns:
bool: Whether generate optical flow successfully.
"""
full_path, vid_path, vid_id, method, task, report_file = vid_item
if '/' in vid_path:
act_name = osp.basename(osp.dirname(vid_path))
out_full_path = osp.join(args.out_dir, act_name)
else:
out_full_path = args.out_dir
run_success = -1
if task == 'rgb':
if args.use_opencv:
# Not like using denseflow,
# Use OpenCV will not make a sub directory with the video name
try:
video_name = osp.splitext(osp.basename(vid_path))[0]
out_full_path = osp.join(out_full_path, video_name)
vr = mmcv.VideoReader(full_path)
for i, vr_frame in enumerate(vr):
if vr_frame is not None:
w, h, _ = np.shape(vr_frame)
if args.new_short == 0:
if args.new_width == 0 or args.new_height == 0:
# Keep original shape
out_img = vr_frame
else:
out_img = mmcv.imresize(
vr_frame,
(args.new_width, args.new_height))
else:
if min(h, w) == h:
new_h = args.new_short
new_w = int((new_h / h) * w)
else:
new_w = args.new_short
new_h = int((new_w / w) * h)
out_img = mmcv.imresize(vr_frame, (new_h, new_w))
mmcv.imwrite(out_img,
f'{out_full_path}/img_{i + 1:05d}.jpg')
else:
warnings.warn(
'Length inconsistent!'
f'Early stop with {i + 1} out of {len(vr)} frames.'
)
break
run_success = 0
except Exception:
run_success = -1
else:
if args.new_short == 0:
cmd = osp.join(
f"denseflow '{full_path}' -b=20 -s=0 -o='{out_full_path}'"
f' -nw={args.new_width} -nh={args.new_height} -v')
else:
cmd = osp.join(
f"denseflow '{full_path}' -b=20 -s=0 -o='{out_full_path}'"
f' -ns={args.new_short} -v')
run_success = os.system(cmd)
elif task == 'flow':
if args.input_frames:
if args.new_short == 0:
cmd = osp.join(
f"denseflow '{full_path}' -a={method} -b=20 -s=1 -o='{out_full_path}'" # noqa: E501
f' -nw={args.new_width} --nh={args.new_height} -v --if')
else:
cmd = osp.join(
f"denseflow '{full_path}' -a={method} -b=20 -s=1 -o='{out_full_path}'" # noqa: E501
f' -ns={args.new_short} -v --if')
else:
if args.new_short == 0:
cmd = osp.join(
f"denseflow '{full_path}' -a={method} -b=20 -s=1 -o='{out_full_path}'" # noqa: E501
f' -nw={args.new_width} --nh={args.new_height} -v')
else:
cmd = osp.join(
f"denseflow '{full_path}' -a={method} -b=20 -s=1 -o='{out_full_path}'" # noqa: E501
f' -ns={args.new_short} -v')
run_success = os.system(cmd)
else:
if args.new_short == 0:
cmd_rgb = osp.join(
f"denseflow '{full_path}' -b=20 -s=0 -o='{out_full_path}'"
f' -nw={args.new_width} -nh={args.new_height} -v')
cmd_flow = osp.join(
f"denseflow '{full_path}' -a={method} -b=20 -s=1 -o='{out_full_path}'" # noqa: E501
f' -nw={args.new_width} -nh={args.new_height} -v')
else:
cmd_rgb = osp.join(
f"denseflow '{full_path}' -b=20 -s=0 -o='{out_full_path}'"
f' -ns={args.new_short} -v')
cmd_flow = osp.join(
f"denseflow '{full_path}' -a={method} -b=20 -s=1 -o='{out_full_path}'" # noqa: E501
f' -ns={args.new_short} -v')
run_success_rgb = os.system(cmd_rgb)
run_success_flow = os.system(cmd_flow)
if run_success_flow == 0 and run_success_rgb == 0:
run_success = 0
if run_success == 0:
print(f'{task} {vid_id} {vid_path} {method} done')
sys.stdout.flush()
lock.acquire()
with open(report_file, 'a') as f:
line = full_path + '\n'
f.write(line)
lock.release()
else:
print(f'{task} {vid_id} {vid_path} {method} got something wrong')
sys.stdout.flush()
return True | Generate optical flow using dense flow.
Args:
vid_item (list): Video item containing video full path,
video (short) path, video id.
Returns:
bool: Whether generate optical flow successfully.
| extract_frame | python | open-mmlab/mmaction2 | tools/data/build_rawframes.py | https://github.com/open-mmlab/mmaction2/blob/master/tools/data/build_rawframes.py | Apache-2.0 |
def encode_video(frame_dir_item):
"""Encode frames to video using ffmpeg.
Args:
frame_dir_item (list): Rawframe item containing raw frame directory
full path, rawframe directory (short) path, rawframe directory id.
Returns:
bool: Whether synthesize video successfully.
"""
full_path, frame_dir_path, frame_dir_id = frame_dir_item
out_full_path = args.out_dir
img_name_tmpl = args.filename_tmpl + '.' + args.in_format
img_path = osp.join(full_path, img_name_tmpl)
out_vid_name = frame_dir_path + '.' + args.ext
out_vid_path = osp.join(out_full_path, out_vid_name)
cmd = osp.join(
f"ffmpeg -start_number {args.start_idx} -r {args.fps} -i '{img_path}' "
f"-vcodec {args.vcodec} '{out_vid_path}'")
os.system(cmd)
print(f'{frame_dir_id} {frame_dir_path} done')
sys.stdout.flush()
return True | Encode frames to video using ffmpeg.
Args:
frame_dir_item (list): Rawframe item containing raw frame directory
full path, rawframe directory (short) path, rawframe directory id.
Returns:
bool: Whether synthesize video successfully.
| encode_video | python | open-mmlab/mmaction2 | tools/data/build_videos.py | https://github.com/open-mmlab/mmaction2/blob/master/tools/data/build_videos.py | Apache-2.0 |
def process_norm_proposal_file(norm_proposal_file, frame_dict):
"""Process the normalized proposal file and denormalize it.
Args:
norm_proposal_file (str): Name of normalized proposal file.
frame_dict (dict): Information of frame folders.
"""
proposal_file = norm_proposal_file.replace('normalized_', '')
norm_proposals = load_localize_proposal_file(norm_proposal_file)
processed_proposal_list = []
for idx, norm_proposal in enumerate(norm_proposals):
video_id = norm_proposal[0]
frame_info = frame_dict[video_id]
num_frames = frame_info[1]
frame_path = osp.basename(frame_info[0])
gt = [[
int(x[0]),
int(float(x[1]) * num_frames),
int(float(x[2]) * num_frames)
] for x in norm_proposal[2]]
proposal = [[
int(x[0]),
float(x[1]),
float(x[2]),
int(float(x[3]) * num_frames),
int(float(x[4]) * num_frames)
] for x in norm_proposal[3]]
gt_dump = '\n'.join(['{} {} {}'.format(*x) for x in gt])
gt_dump += '\n' if len(gt) else ''
proposal_dump = '\n'.join(
['{} {:.04f} {:.04f} {} {}'.format(*x) for x in proposal])
proposal_dump += '\n' if len(proposal) else ''
processed_proposal_list.append(
f'# {idx}\n{frame_path}\n{num_frames}\n1'
f'\n{len(gt)}\n{gt_dump}{len(proposal)}\n{proposal_dump}')
with open(proposal_file, 'w') as f:
f.writelines(processed_proposal_list) | Process the normalized proposal file and denormalize it.
Args:
norm_proposal_file (str): Name of normalized proposal file.
frame_dict (dict): Information of frame folders.
| process_norm_proposal_file | python | open-mmlab/mmaction2 | tools/data/denormalize_proposal_file.py | https://github.com/open-mmlab/mmaction2/blob/master/tools/data/denormalize_proposal_file.py | Apache-2.0 |
def extract_audio_wav(line):
"""Extract the audio wave from video streams using FFMPEG."""
video_id, _ = osp.splitext(osp.basename(line))
video_dir = osp.dirname(line)
video_rel_dir = osp.relpath(video_dir, args.root)
dst_dir = osp.join(args.dst_root, video_rel_dir)
os.popen(f'mkdir -p {dst_dir}')
try:
if osp.exists(f'{dst_dir}/{video_id}.wav'):
return
cmd = f'ffmpeg -i ./{line} -map 0:a -y {dst_dir}/{video_id}.wav'
os.popen(cmd)
except BaseException:
with open('extract_wav_err_file.txt', 'a+') as f:
f.write(f'{line}\n') | Extract the audio wave from video streams using FFMPEG. | extract_audio_wav | python | open-mmlab/mmaction2 | tools/data/extract_audio.py | https://github.com/open-mmlab/mmaction2/blob/master/tools/data/extract_audio.py | Apache-2.0 |
def parse_directory(path,
rgb_prefix='img_',
flow_x_prefix='flow_x_',
flow_y_prefix='flow_y_',
level=1):
"""Parse directories holding extracted frames from standard benchmarks.
Args:
path (str): Directory path to parse frames.
rgb_prefix (str): Prefix of generated rgb frames name.
default: 'img_'.
flow_x_prefix (str): Prefix of generated flow x name.
default: `flow_x_`.
flow_y_prefix (str): Prefix of generated flow y name.
default: `flow_y_`.
level (int): Directory level for glob searching. Options are 1 and 2.
default: 1.
Returns:
dict: frame info dict with video id as key and tuple(path(str),
rgb_num(int), flow_x_num(int)) as value.
"""
print(f'parse frames under directory {path}')
if level == 1:
# Only search for one-level directory
def locate_directory(x):
return osp.basename(x)
frame_dirs = glob.glob(osp.join(path, '*'))
elif level == 2:
# search for two-level directory
def locate_directory(x):
return osp.join(osp.basename(osp.dirname(x)), osp.basename(x))
frame_dirs = glob.glob(osp.join(path, '*', '*'))
else:
raise ValueError('level can be only 1 or 2')
def count_files(directory, prefix_list):
"""Count file number with a given directory and prefix.
Args:
directory (str): Data directory to be search.
prefix_list (list): List or prefix.
Returns:
list (int): Number list of the file with the prefix.
"""
lst = os.listdir(directory)
cnt_list = [len(fnmatch.filter(lst, x + '*')) for x in prefix_list]
return cnt_list
# check RGB
frame_dict = {}
for i, frame_dir in enumerate(frame_dirs):
total_num = count_files(frame_dir,
(rgb_prefix, flow_x_prefix, flow_y_prefix))
dir_name = locate_directory(frame_dir)
num_x = total_num[1]
num_y = total_num[2]
if num_x != num_y:
raise ValueError(f'x and y direction have different number '
f'of flow images in video directory: {frame_dir}')
if i % 200 == 0:
print(f'{i} videos parsed')
frame_dict[dir_name] = (frame_dir, total_num[0], num_x)
print('frame directory analysis done')
return frame_dict | Parse directories holding extracted frames from standard benchmarks.
Args:
path (str): Directory path to parse frames.
rgb_prefix (str): Prefix of generated rgb frames name.
default: 'img_'.
flow_x_prefix (str): Prefix of generated flow x name.
default: `flow_x_`.
flow_y_prefix (str): Prefix of generated flow y name.
default: `flow_y_`.
level (int): Directory level for glob searching. Options are 1 and 2.
default: 1.
Returns:
dict: frame info dict with video id as key and tuple(path(str),
rgb_num(int), flow_x_num(int)) as value.
| parse_directory | python | open-mmlab/mmaction2 | tools/data/parse_file_list.py | https://github.com/open-mmlab/mmaction2/blob/master/tools/data/parse_file_list.py | Apache-2.0 |
def count_files(directory, prefix_list):
"""Count file number with a given directory and prefix.
Args:
directory (str): Data directory to be search.
prefix_list (list): List or prefix.
Returns:
list (int): Number list of the file with the prefix.
"""
lst = os.listdir(directory)
cnt_list = [len(fnmatch.filter(lst, x + '*')) for x in prefix_list]
return cnt_list | Count file number with a given directory and prefix.
Args:
directory (str): Data directory to be search.
prefix_list (list): List or prefix.
Returns:
list (int): Number list of the file with the prefix.
| count_files | python | open-mmlab/mmaction2 | tools/data/parse_file_list.py | https://github.com/open-mmlab/mmaction2/blob/master/tools/data/parse_file_list.py | Apache-2.0 |
def parse_ucf101_splits(level):
"""Parse UCF-101 dataset into "train", "val", "test" splits.
Args:
level (int): Directory level of data. 1 for the single-level directory,
2 for the two-level directory.
Returns:
list: "train", "val", "test" splits of UCF-101.
"""
class_index_file = 'data/ucf101/annotations/classInd.txt'
train_file_template = 'data/ucf101/annotations/trainlist{:02d}.txt'
test_file_template = 'data/ucf101/annotations/testlist{:02d}.txt'
with open(class_index_file, 'r') as fin:
class_index = [x.strip().split() for x in fin]
class_mapping = {x[1]: int(x[0]) - 1 for x in class_index}
def line_to_map(line):
"""A function to map line string to video and label.
Args:
line (str): A long directory path, which is a text path.
Returns:
tuple[str, str]: (video, label), video is the video id,
label is the video label.
"""
items = line.strip().split()
video = osp.splitext(items[0])[0]
if level == 1:
video = osp.basename(video)
label = items[0]
elif level == 2:
video = osp.join(
osp.basename(osp.dirname(video)), osp.basename(video))
label = class_mapping[osp.dirname(items[0])]
return video, label
splits = []
for i in range(1, 4):
with open(train_file_template.format(i), 'r') as fin:
train_list = [line_to_map(x) for x in fin]
with open(test_file_template.format(i), 'r') as fin:
test_list = [line_to_map(x) for x in fin]
splits.append((train_list, test_list))
return splits | Parse UCF-101 dataset into "train", "val", "test" splits.
Args:
level (int): Directory level of data. 1 for the single-level directory,
2 for the two-level directory.
Returns:
list: "train", "val", "test" splits of UCF-101.
| parse_ucf101_splits | python | open-mmlab/mmaction2 | tools/data/parse_file_list.py | https://github.com/open-mmlab/mmaction2/blob/master/tools/data/parse_file_list.py | Apache-2.0 |
def line_to_map(line):
"""A function to map line string to video and label.
Args:
line (str): A long directory path, which is a text path.
Returns:
tuple[str, str]: (video, label), video is the video id,
label is the video label.
"""
items = line.strip().split()
video = osp.splitext(items[0])[0]
if level == 1:
video = osp.basename(video)
label = items[0]
elif level == 2:
video = osp.join(
osp.basename(osp.dirname(video)), osp.basename(video))
label = class_mapping[osp.dirname(items[0])]
return video, label | A function to map line string to video and label.
Args:
line (str): A long directory path, which is a text path.
Returns:
tuple[str, str]: (video, label), video is the video id,
label is the video label.
| line_to_map | python | open-mmlab/mmaction2 | tools/data/parse_file_list.py | https://github.com/open-mmlab/mmaction2/blob/master/tools/data/parse_file_list.py | Apache-2.0 |
def parse_jester_splits(level):
"""Parse Jester into "train", "val" splits.
Args:
level (int): Directory level of data. 1 for the single-level directory,
2 for the two-level directory.
Returns:
list: "train", "val", "test" splits of Jester dataset.
"""
# Read the annotations
class_index_file = 'data/jester/annotations/jester-v1-labels.csv'
train_file = 'data/jester/annotations/jester-v1-train.csv'
val_file = 'data/jester/annotations/jester-v1-validation.csv'
test_file = 'data/jester/annotations/jester-v1-test.csv'
with open(class_index_file, 'r') as fin:
class_index = [x.strip() for x in fin]
class_mapping = {class_index[idx]: idx for idx in range(len(class_index))}
def line_to_map(line, test_mode=False):
items = line.strip().split(';')
video = items[0]
if level == 1:
video = osp.basename(video)
elif level == 2:
video = osp.join(
osp.basename(osp.dirname(video)), osp.basename(video))
if test_mode:
return video
label = class_mapping[items[1]]
return video, label
with open(train_file, 'r') as fin:
train_list = [line_to_map(x) for x in fin]
with open(val_file, 'r') as fin:
val_list = [line_to_map(x) for x in fin]
with open(test_file, 'r') as fin:
test_list = [line_to_map(x, test_mode=True) for x in fin]
splits = ((train_list, val_list, test_list), )
return splits | Parse Jester into "train", "val" splits.
Args:
level (int): Directory level of data. 1 for the single-level directory,
2 for the two-level directory.
Returns:
list: "train", "val", "test" splits of Jester dataset.
| parse_jester_splits | python | open-mmlab/mmaction2 | tools/data/parse_file_list.py | https://github.com/open-mmlab/mmaction2/blob/master/tools/data/parse_file_list.py | Apache-2.0 |
def parse_sthv1_splits(level):
"""Parse Something-Something dataset V1 into "train", "val" splits.
Args:
level (int): Directory level of data. 1 for the single-level directory,
2 for the two-level directory.
Returns:
list: "train", "val", "test" splits of Something-Something V1 dataset.
"""
# Read the annotations
# yapf: disable
class_index_file = 'data/sthv1/annotations/something-something-v1-labels.csv' # noqa
# yapf: enable
train_file = 'data/sthv1/annotations/something-something-v1-train.csv'
val_file = 'data/sthv1/annotations/something-something-v1-validation.csv'
test_file = 'data/sthv1/annotations/something-something-v1-test.csv'
with open(class_index_file, 'r') as fin:
class_index = [x.strip() for x in fin]
class_mapping = {class_index[idx]: idx for idx in range(len(class_index))}
def line_to_map(line, test_mode=False):
items = line.strip().split(';')
video = items[0]
if level == 1:
video = osp.basename(video)
elif level == 2:
video = osp.join(
osp.basename(osp.dirname(video)), osp.basename(video))
if test_mode:
return video
label = class_mapping[items[1]]
return video, label
with open(train_file, 'r') as fin:
train_list = [line_to_map(x) for x in fin]
with open(val_file, 'r') as fin:
val_list = [line_to_map(x) for x in fin]
with open(test_file, 'r') as fin:
test_list = [line_to_map(x, test_mode=True) for x in fin]
splits = ((train_list, val_list, test_list), )
return splits | Parse Something-Something dataset V1 into "train", "val" splits.
Args:
level (int): Directory level of data. 1 for the single-level directory,
2 for the two-level directory.
Returns:
list: "train", "val", "test" splits of Something-Something V1 dataset.
| parse_sthv1_splits | python | open-mmlab/mmaction2 | tools/data/parse_file_list.py | https://github.com/open-mmlab/mmaction2/blob/master/tools/data/parse_file_list.py | Apache-2.0 |
def parse_sthv2_splits(level):
"""Parse Something-Something dataset V2 into "train", "val" splits.
Args:
level (int): Directory level of data. 1 for the single-level directory,
2 for the two-level directory.
Returns:
list: "train", "val", "test" splits of Something-Something V2 dataset.
"""
# Read the annotations
# yapf: disable
class_index_file = 'data/sthv2/annotations/something-something-v2-labels.json' # noqa
# yapf: enable
train_file = 'data/sthv2/annotations/something-something-v2-train.json'
val_file = 'data/sthv2/annotations/something-something-v2-validation.json'
test_file = 'data/sthv2/annotations/something-something-v2-test.json'
with open(class_index_file, 'r') as fin:
class_mapping = json.loads(fin.read())
def line_to_map(item, test_mode=False):
video = item['id']
if level == 1:
video = osp.basename(video)
elif level == 2:
video = osp.join(
osp.basename(osp.dirname(video)), osp.basename(video))
if test_mode:
return video
template = item['template'].replace('[', '')
template = template.replace(']', '')
label = int(class_mapping[template])
return video, label
with open(train_file, 'r') as fin:
items = json.loads(fin.read())
train_list = [line_to_map(item) for item in items]
with open(val_file, 'r') as fin:
items = json.loads(fin.read())
val_list = [line_to_map(item) for item in items]
with open(test_file, 'r') as fin:
items = json.loads(fin.read())
test_list = [line_to_map(item, test_mode=True) for item in items]
splits = ((train_list, val_list, test_list), )
return splits | Parse Something-Something dataset V2 into "train", "val" splits.
Args:
level (int): Directory level of data. 1 for the single-level directory,
2 for the two-level directory.
Returns:
list: "train", "val", "test" splits of Something-Something V2 dataset.
| parse_sthv2_splits | python | open-mmlab/mmaction2 | tools/data/parse_file_list.py | https://github.com/open-mmlab/mmaction2/blob/master/tools/data/parse_file_list.py | Apache-2.0 |
def parse_mmit_splits():
"""Parse Multi-Moments in Time dataset into "train", "val" splits.
Returns:
list: "train", "val", "test" splits of Multi-Moments in Time.
"""
# Read the annotations
def line_to_map(x):
video = osp.splitext(x[0])[0]
labels = [int(digit) for digit in x[1:]]
return video, labels
csv_reader = csv.reader(open('data/mmit/annotations/trainingSet.csv'))
train_list = [line_to_map(x) for x in csv_reader]
csv_reader = csv.reader(open('data/mmit/annotations/validationSet.csv'))
val_list = [line_to_map(x) for x in csv_reader]
test_list = val_list # not test for mit
splits = ((train_list, val_list, test_list), )
return splits | Parse Multi-Moments in Time dataset into "train", "val" splits.
Returns:
list: "train", "val", "test" splits of Multi-Moments in Time.
| parse_mmit_splits | python | open-mmlab/mmaction2 | tools/data/parse_file_list.py | https://github.com/open-mmlab/mmaction2/blob/master/tools/data/parse_file_list.py | Apache-2.0 |
def parse_kinetics_splits(level, dataset):
"""Parse Kinetics dataset into "train", "val", "test" splits.
Args:
level (int): Directory level of data. 1 for the single-level directory,
2 for the two-level directory.
dataset (str): Denotes the version of Kinetics that needs to be parsed,
choices are "kinetics400", "kinetics600" and "kinetics700".
Returns:
list: "train", "val", "test" splits of Kinetics.
"""
def convert_label(s, keep_whitespaces=False):
"""Convert label name to a formal string.
Remove redundant '"' and convert whitespace to '_'.
Args:
s (str): String to be converted.
keep_whitespaces(bool): Whether to keep whitespace. Default: False.
Returns:
str: Converted string.
"""
if not keep_whitespaces:
return s.replace('"', '').replace(' ', '_')
return s.replace('"', '')
def line_to_map(x, test=False):
"""A function to map line string to video and label.
Args:
x (str): A single line from Kinetics csv file.
test (bool): Indicate whether the line comes from test
annotation file.
Returns:
tuple[str, str]: (video, label), video is the video id,
label is the video label.
"""
if test:
# video = f'{x[0]}_{int(x[1]):06d}_{int(x[2]):06d}'
video = f'{x[1]}_{int(float(x[2])):06d}_{int(float(x[3])):06d}'
label = -1 # label unknown
return video, label
video = f'{x[1]}_{int(float(x[2])):06d}_{int(float(x[3])):06d}'
if level == 2:
video = f'{convert_label(x[0])}/{video}'
else:
assert level == 1
label = class_mapping[convert_label(x[0])]
return video, label
train_file = f'data/{dataset}/annotations/kinetics_train.csv'
val_file = f'data/{dataset}/annotations/kinetics_val.csv'
test_file = f'data/{dataset}/annotations/kinetics_test.csv'
csv_reader = csv.reader(open(train_file))
# skip the first line
next(csv_reader)
labels_sorted = sorted({convert_label(row[0]) for row in csv_reader})
class_mapping = {label: i for i, label in enumerate(labels_sorted)}
csv_reader = csv.reader(open(train_file))
next(csv_reader)
train_list = [line_to_map(x) for x in csv_reader]
csv_reader = csv.reader(open(val_file))
next(csv_reader)
val_list = [line_to_map(x) for x in csv_reader]
csv_reader = csv.reader(open(test_file))
next(csv_reader)
test_list = [line_to_map(x, test=True) for x in csv_reader]
splits = ((train_list, val_list, test_list), )
return splits | Parse Kinetics dataset into "train", "val", "test" splits.
Args:
level (int): Directory level of data. 1 for the single-level directory,
2 for the two-level directory.
dataset (str): Denotes the version of Kinetics that needs to be parsed,
choices are "kinetics400", "kinetics600" and "kinetics700".
Returns:
list: "train", "val", "test" splits of Kinetics.
| parse_kinetics_splits | python | open-mmlab/mmaction2 | tools/data/parse_file_list.py | https://github.com/open-mmlab/mmaction2/blob/master/tools/data/parse_file_list.py | Apache-2.0 |
def convert_label(s, keep_whitespaces=False):
"""Convert label name to a formal string.
Remove redundant '"' and convert whitespace to '_'.
Args:
s (str): String to be converted.
keep_whitespaces(bool): Whether to keep whitespace. Default: False.
Returns:
str: Converted string.
"""
if not keep_whitespaces:
return s.replace('"', '').replace(' ', '_')
return s.replace('"', '') | Convert label name to a formal string.
Remove redundant '"' and convert whitespace to '_'.
Args:
s (str): String to be converted.
keep_whitespaces(bool): Whether to keep whitespace. Default: False.
Returns:
str: Converted string.
| convert_label | python | open-mmlab/mmaction2 | tools/data/parse_file_list.py | https://github.com/open-mmlab/mmaction2/blob/master/tools/data/parse_file_list.py | Apache-2.0 |
def line_to_map(x, test=False):
"""A function to map line string to video and label.
Args:
x (str): A single line from Kinetics csv file.
test (bool): Indicate whether the line comes from test
annotation file.
Returns:
tuple[str, str]: (video, label), video is the video id,
label is the video label.
"""
if test:
# video = f'{x[0]}_{int(x[1]):06d}_{int(x[2]):06d}'
video = f'{x[1]}_{int(float(x[2])):06d}_{int(float(x[3])):06d}'
label = -1 # label unknown
return video, label
video = f'{x[1]}_{int(float(x[2])):06d}_{int(float(x[3])):06d}'
if level == 2:
video = f'{convert_label(x[0])}/{video}'
else:
assert level == 1
label = class_mapping[convert_label(x[0])]
return video, label | A function to map line string to video and label.
Args:
x (str): A single line from Kinetics csv file.
test (bool): Indicate whether the line comes from test
annotation file.
Returns:
tuple[str, str]: (video, label), video is the video id,
label is the video label.
| line_to_map | python | open-mmlab/mmaction2 | tools/data/parse_file_list.py | https://github.com/open-mmlab/mmaction2/blob/master/tools/data/parse_file_list.py | Apache-2.0 |
def parse_mit_splits():
"""Parse Moments in Time dataset into "train", "val" splits.
Returns:
list: "train", "val", "test" splits of Moments in Time.
"""
# Read the annotations
class_mapping = {}
with open('data/mit/annotations/moments_categories.txt') as f_cat:
for line in f_cat.readlines():
cat, digit = line.rstrip().split(',')
class_mapping[cat] = int(digit)
def line_to_map(x):
video = osp.splitext(x[0])[0]
label = class_mapping[osp.dirname(x[0])]
return video, label
csv_reader = csv.reader(open('data/mit/annotations/trainingSet.csv'))
train_list = [line_to_map(x) for x in csv_reader]
csv_reader = csv.reader(open('data/mit/annotations/validationSet.csv'))
val_list = [line_to_map(x) for x in csv_reader]
test_list = val_list # no test for mit
splits = ((train_list, val_list, test_list), )
return splits | Parse Moments in Time dataset into "train", "val" splits.
Returns:
list: "train", "val", "test" splits of Moments in Time.
| parse_mit_splits | python | open-mmlab/mmaction2 | tools/data/parse_file_list.py | https://github.com/open-mmlab/mmaction2/blob/master/tools/data/parse_file_list.py | Apache-2.0 |
def generate_class_index_file():
"""This function will generate a `ClassInd.txt` for HMDB51 in a format
like UCF101, where class id starts with 1."""
video_path = 'data/hmdb51/videos'
annotation_dir = 'data/hmdb51/annotations'
class_list = sorted(os.listdir(video_path))
class_dict = dict()
if not osp.exists(class_index_file):
with open(class_index_file, 'w') as f:
content = []
for class_id, class_name in enumerate(class_list):
# like `ClassInd.txt` in UCF-101,
# the class_id begins with 1
class_dict[class_name] = class_id + 1
cur_line = ' '.join([str(class_id + 1), class_name])
content.append(cur_line)
content = '\n'.join(content)
f.write(content)
else:
print(f'{class_index_file} has been generated before.')
class_dict = {
class_name: class_id + 1
for class_id, class_name in enumerate(class_list)
}
for i in range(1, 4):
train_content = []
test_content = []
for class_name in class_dict:
filename = class_name + f'_test_split{i}.txt'
filename_path = osp.join(annotation_dir, filename)
with open(filename_path, 'r') as fin:
for line in fin:
video_info = line.strip().split()
video_name = video_info[0]
if video_info[1] == '1':
target_line = ' '.join([
osp.join(class_name, video_name),
str(class_dict[class_name])
])
train_content.append(target_line)
elif video_info[1] == '2':
target_line = ' '.join([
osp.join(class_name, video_name),
str(class_dict[class_name])
])
test_content.append(target_line)
train_content = '\n'.join(train_content)
test_content = '\n'.join(test_content)
with open(train_file_template.format(i), 'w') as fout:
fout.write(train_content)
with open(test_file_template.format(i), 'w') as fout:
fout.write(test_content) | This function will generate a `ClassInd.txt` for HMDB51 in a format
like UCF101, where class id starts with 1. | generate_class_index_file | python | open-mmlab/mmaction2 | tools/data/parse_file_list.py | https://github.com/open-mmlab/mmaction2/blob/master/tools/data/parse_file_list.py | Apache-2.0 |
def resize_videos(vid_item):
"""Generate resized video cache.
Args:
vid_item (list): Video item containing video full path,
video relative path.
Returns:
bool: Whether generate video cache successfully.
"""
full_path, vid_path = vid_item
# Change the output video extension to .mp4 if '--to-mp4' flag is set
if args.to_mp4:
vid_path = vid_path.split('.')
assert len(vid_path) == 2, \
f"Video path '{vid_path}' contain more than one dot"
vid_path = vid_path[0] + '.mp4'
out_full_path = osp.join(args.out_dir, vid_path)
dir_name = osp.dirname(vid_path)
out_dir = osp.join(args.out_dir, dir_name)
if not osp.exists(out_dir):
os.makedirs(out_dir)
result = os.popen(
f'ffprobe -hide_banner -loglevel error -select_streams v:0 -show_entries stream=width,height -of csv=p=0 {full_path}' # noqa:E501
)
w, h = [int(d) for d in result.readline().rstrip().split(',')]
if w > h:
cmd = (f'ffmpeg -hide_banner -loglevel error -i {full_path} '
f'-vf {"mpdecimate," if args.remove_dup else ""}'
f'scale=-2:{args.scale} '
f'{"-vsync vfr" if args.remove_dup else ""} '
f'-c:v libx264 {"-g 16" if args.dense else ""} '
f'-an {out_full_path} -y')
else:
cmd = (f'ffmpeg -hide_banner -loglevel error -i {full_path} '
f'-vf {"mpdecimate," if args.remove_dup else ""}'
f'scale={args.scale}:-2 '
f'{"-vsync vfr" if args.remove_dup else ""} '
f'-c:v libx264 {"-g 16" if args.dense else ""} '
f'-an {out_full_path} -y')
os.popen(cmd)
print(f'{vid_path} done')
sys.stdout.flush()
return True | Generate resized video cache.
Args:
vid_item (list): Video item containing video full path,
video relative path.
Returns:
bool: Whether generate video cache successfully.
| resize_videos | python | open-mmlab/mmaction2 | tools/data/resize_videos.py | https://github.com/open-mmlab/mmaction2/blob/master/tools/data/resize_videos.py | Apache-2.0 |
def pool_feature(data, num_proposals=100, num_sample_bins=3, pool_type='mean'):
"""Pool features with arbitrary temporal length.
Args:
data (list[np.ndarray] | np.ndarray): Features of an untrimmed video,
with arbitrary temporal length.
num_proposals (int): The temporal dim of pooled feature. Default: 100.
num_sample_bins (int): How many points to sample to get the feature
vector at one timestamp. Default: 3.
pool_type (str): Type of pooling to pool features. Choices are
['mean', 'max']. Default: 'mean'.
Returns:
np.ndarray: The pooled feature with shape num_proposals x feature_dim.
"""
if len(data) == 1:
return np.concatenate([data] * num_proposals)
x_range = list(range(len(data)))
f = scipy.interpolate.interp1d(x_range, data, axis=0)
eps = 1e-4
start, end = eps, len(data) - 1 - eps
anchor_size = (end - start) / num_proposals
ptr = start
feature = []
for _ in range(num_proposals):
x_new = [
ptr + i / num_sample_bins * anchor_size
for i in range(num_sample_bins)
]
y_new = f(x_new)
if pool_type == 'mean':
y_new = np.mean(y_new, axis=0)
elif pool_type == 'max':
y_new = np.max(y_new, axis=0)
else:
raise NotImplementedError('Unsupported pool type')
feature.append(y_new)
ptr += anchor_size
feature = np.stack(feature)
return feature | Pool features with arbitrary temporal length.
Args:
data (list[np.ndarray] | np.ndarray): Features of an untrimmed video,
with arbitrary temporal length.
num_proposals (int): The temporal dim of pooled feature. Default: 100.
num_sample_bins (int): How many points to sample to get the feature
vector at one timestamp. Default: 3.
pool_type (str): Type of pooling to pool features. Choices are
['mean', 'max']. Default: 'mean'.
Returns:
np.ndarray: The pooled feature with shape num_proposals x feature_dim.
| pool_feature | python | open-mmlab/mmaction2 | tools/data/activitynet/activitynet_feature_postprocessing.py | https://github.com/open-mmlab/mmaction2/blob/master/tools/data/activitynet/activitynet_feature_postprocessing.py | Apache-2.0 |
def load_annotations(ann_file):
"""Load the annotation according to ann_file into video_infos."""
video_infos = []
anno_database = mmengine.load(ann_file)
for video_name in anno_database:
video_info = anno_database[video_name]
video_info['video_name'] = video_name
video_infos.append(video_info)
return video_infos | Load the annotation according to ann_file into video_infos. | load_annotations | python | open-mmlab/mmaction2 | tools/data/activitynet/convert_proposal_format.py | https://github.com/open-mmlab/mmaction2/blob/master/tools/data/activitynet/convert_proposal_format.py | Apache-2.0 |
def dump_formatted_proposal(video_idx, video_id, num_frames, fps, gts,
proposals, tiou, t_overlap_self,
formatted_proposal_file):
"""dump the formatted proposal file, which is the input proposal file of
action classifier (e.g: SSN).
Args:
video_idx (int): Index of video.
video_id (str): ID of video.
num_frames (int): Total frames of the video.
fps (float): Fps of the video.
gts (np.ndarray[float]): t_start, t_end and label of groundtruths.
proposals (np.ndarray[float]): t_start, t_end and score of proposals.
tiou (np.ndarray[float]): 2-dim array with IoU ratio.
t_overlap_self (np.ndarray[float]): 2-dim array with overlap_self
(union / self_len) ratio.
formatted_proposal_file (open file object): Open file object of
formatted_proposal_file.
"""
formatted_proposal_file.write(
f'#{video_idx}\n{video_id}\n{num_frames}\n{fps}\n{gts.shape[0]}\n')
for gt in gts:
formatted_proposal_file.write(f'{int(gt[2])} {gt[0]} {gt[1]}\n')
formatted_proposal_file.write(f'{proposals.shape[0]}\n')
best_iou = np.amax(tiou, axis=0)
best_iou_index = np.argmax(tiou, axis=0)
best_overlap = np.amax(t_overlap_self, axis=0)
best_overlap_index = np.argmax(t_overlap_self, axis=0)
for i in range(proposals.shape[0]):
index_iou = best_iou_index[i]
index_overlap = best_overlap_index[i]
label_iou = gts[index_iou][2]
label_overlap = gts[index_overlap][2]
if label_iou != label_overlap:
label = label_iou if label_iou != 0 else label_overlap
else:
label = label_iou
if best_iou[i] == 0 and best_overlap[i] == 0:
formatted_proposal_file.write(
f'0 0 0 {proposals[i][0]} {proposals[i][1]}\n')
else:
formatted_proposal_file.write(
f'{int(label)} {best_iou[i]} {best_overlap[i]} '
f'{proposals[i][0]} {proposals[i][1]}\n') | dump the formatted proposal file, which is the input proposal file of
action classifier (e.g: SSN).
Args:
video_idx (int): Index of video.
video_id (str): ID of video.
num_frames (int): Total frames of the video.
fps (float): Fps of the video.
gts (np.ndarray[float]): t_start, t_end and label of groundtruths.
proposals (np.ndarray[float]): t_start, t_end and score of proposals.
tiou (np.ndarray[float]): 2-dim array with IoU ratio.
t_overlap_self (np.ndarray[float]): 2-dim array with overlap_self
(union / self_len) ratio.
formatted_proposal_file (open file object): Open file object of
formatted_proposal_file.
| dump_formatted_proposal | python | open-mmlab/mmaction2 | tools/data/activitynet/convert_proposal_format.py | https://github.com/open-mmlab/mmaction2/blob/master/tools/data/activitynet/convert_proposal_format.py | Apache-2.0 |
def download_clip(video_identifier,
output_filename,
num_attempts=5,
url_base='https://www.youtube.com/watch?v='):
"""Download a video from youtube if exists and is not blocked.
arguments:
---------
video_identifier: str
Unique YouTube video identifier (11 characters)
output_filename: str
File path where the video will be stored.
"""
# Defensive argument checking.
assert isinstance(video_identifier, str), 'video_identifier must be string'
assert isinstance(output_filename, str), 'output_filename must be string'
assert len(video_identifier) == 11, 'video_identifier must have length 11'
status = False
if not os.path.exists(output_filename):
command = [
'youtube-dl', '--quiet', '--no-warnings', '--no-check-certificate',
'-f', 'mp4', '-o',
'"%s"' % output_filename,
'"%s"' % (url_base + video_identifier)
]
command = ' '.join(command)
print(command)
attempts = 0
while True:
try:
subprocess.check_output(
command, shell=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError:
attempts += 1
if attempts == num_attempts:
return status, 'Fail'
else:
break
# Check if the video was successfully saved.
status = os.path.exists(output_filename)
return status, 'Downloaded' | Download a video from youtube if exists and is not blocked.
arguments:
---------
video_identifier: str
Unique YouTube video identifier (11 characters)
output_filename: str
File path where the video will be stored.
| download_clip | python | open-mmlab/mmaction2 | tools/data/activitynet/download.py | https://github.com/open-mmlab/mmaction2/blob/master/tools/data/activitynet/download.py | Apache-2.0 |
def parse_activitynet_annotations(input_csv, is_bsn_case=False):
"""Returns a list of YoutubeID.
arguments:
---------
input_csv: str
Path to CSV file containing the following columns:
'video,numFrame,seconds,fps,rfps,subset,featureFrame'
returns:
-------
youtube_ids: list
List of all YoutubeIDs in ActivityNet.
"""
if is_bsn_case:
lines = open(input_csv).readlines()
lines = lines[1:]
# YoutubeIDs do not have prefix `v_`
youtube_ids = [x.split(',')[0][2:] for x in lines]
else:
data = mmengine.load(anno_file)['database']
youtube_ids = list(data.keys())
return youtube_ids | Returns a list of YoutubeID.
arguments:
---------
input_csv: str
Path to CSV file containing the following columns:
'video,numFrame,seconds,fps,rfps,subset,featureFrame'
returns:
-------
youtube_ids: list
List of all YoutubeIDs in ActivityNet.
| parse_activitynet_annotations | python | open-mmlab/mmaction2 | tools/data/activitynet/download.py | https://github.com/open-mmlab/mmaction2/blob/master/tools/data/activitynet/download.py | Apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.