code
stringlengths 66
870k
| docstring
stringlengths 19
26.7k
| func_name
stringlengths 1
138
| language
stringclasses 1
value | repo
stringlengths 7
68
| path
stringlengths 5
324
| url
stringlengths 46
389
| license
stringclasses 7
values |
---|---|---|---|---|---|---|---|
def load_original_weights(self, logger):
"""Load weights from original checkpoint, which required converting
keys."""
state_dict_torchvision = _load_checkpoint(
self.pretrained, map_location='cpu')
if 'state_dict' in state_dict_torchvision:
state_dict_torchvision = state_dict_torchvision['state_dict']
wrapped_layers_map = dict()
for name, module in self.named_modules():
# convert torchvision keys
ori_name = name
for wrap_prefix in self._get_wrap_prefix():
if wrap_prefix in ori_name:
ori_name = ori_name.replace(wrap_prefix, '')
wrapped_layers_map[ori_name] = name
if isinstance(module, ConvModule):
if 'downsample' in ori_name:
# layer{X}.{Y}.downsample.conv->layer{X}.{Y}.downsample.0
tv_conv_name = ori_name + '.0'
# layer{X}.{Y}.downsample.bn->layer{X}.{Y}.downsample.1
tv_bn_name = ori_name + '.1'
else:
# layer{X}.{Y}.conv{n}.conv->layer{X}.{Y}.conv{n}
tv_conv_name = ori_name
# layer{X}.{Y}.conv{n}.bn->layer{X}.{Y}.bn{n}
tv_bn_name = ori_name.replace('conv', 'bn')
for conv_param in ['.weight', '.bias']:
if tv_conv_name + conv_param in state_dict_torchvision:
state_dict_torchvision[ori_name+'.conv'+conv_param] = \
state_dict_torchvision.pop(tv_conv_name+conv_param)
for bn_param in [
'.weight', '.bias', '.running_mean', '.running_var'
]:
if tv_bn_name + bn_param in state_dict_torchvision:
state_dict_torchvision[ori_name+'.bn'+bn_param] = \
state_dict_torchvision.pop(tv_bn_name+bn_param)
# convert wrapped keys
for param_name in list(state_dict_torchvision.keys()):
layer_name = '.'.join(param_name.split('.')[:-1])
if layer_name in wrapped_layers_map:
wrapped_name = param_name.replace(
layer_name, wrapped_layers_map[layer_name])
print(f'wrapped_name {wrapped_name}')
state_dict_torchvision[
wrapped_name] = state_dict_torchvision.pop(param_name)
msg = self.load_state_dict(state_dict_torchvision, strict=False)
logger.info(msg) | Load weights from original checkpoint, which required converting
keys. | load_original_weights | python | open-mmlab/mmaction2 | mmaction/models/backbones/resnet_tsm.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/resnet_tsm.py | Apache-2.0 |
def init_weights(self):
"""Initiate the parameters either from existing checkpoint or from
scratch."""
if self.pretrained2d:
logger = MMLogger.get_current_instance()
self.load_original_weights(logger)
else:
if self.pretrained:
self.init_cfg = dict(
type='Pretrained', checkpoint=self.pretrained)
super().init_weights() | Initiate the parameters either from existing checkpoint or from
scratch. | init_weights | python | open-mmlab/mmaction2 | mmaction/models/backbones/resnet_tsm.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/resnet_tsm.py | Apache-2.0 |
def init_weights(self) -> None:
"""Initiate the parameters either from existing checkpoint or from
scratch."""
for m in self.modules():
if isinstance(m, nn.Conv3d):
kaiming_init(m)
elif isinstance(m, _BatchNorm):
constant_init(m, 1)
if isinstance(self.pretrained, str):
logger = MMLogger.get_current_instance()
msg = f'load model from: {self.pretrained}'
print_log(msg, logger=logger)
load_checkpoint(self, self.pretrained, strict=True, logger=logger)
elif self.pretrained is None:
# Init two branch separately.
self.rgb_path.init_weights()
self.pose_path.init_weights()
else:
raise TypeError('pretrained must be a str or None') | Initiate the parameters either from existing checkpoint or from
scratch. | init_weights | python | open-mmlab/mmaction2 | mmaction/models/backbones/rgbposeconv3d.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/rgbposeconv3d.py | Apache-2.0 |
def forward(self, imgs: torch.Tensor, heatmap_imgs: torch.Tensor) -> tuple:
"""Defines the computation performed at every call.
Args:
imgs (torch.Tensor): The input data.
heatmap_imgs (torch.Tensor): The input data.
Returns:
tuple[torch.Tensor]: The feature of the input
samples extracted by the backbone.
"""
if self.training:
rgb_drop_path = torch.rand(1) < self.rgb_drop_path
pose_drop_path = torch.rand(1) < self.pose_drop_path
else:
rgb_drop_path, pose_drop_path = False, False
# We assume base_channel for RGB and Pose are 64 and 32.
x_rgb = self.rgb_path.conv1(imgs)
x_rgb = self.rgb_path.maxpool(x_rgb)
# N x 64 x 8 x 56 x 56
x_pose = self.pose_path.conv1(heatmap_imgs)
x_pose = self.pose_path.maxpool(x_pose)
x_rgb = self.rgb_path.layer1(x_rgb)
x_rgb = self.rgb_path.layer2(x_rgb)
x_pose = self.pose_path.layer1(x_pose)
if hasattr(self.rgb_path, 'layer2_lateral'):
feat = x_pose.detach() if self.rgb_detach else x_pose
x_pose_lateral = self.rgb_path.layer2_lateral(feat)
if rgb_drop_path:
x_pose_lateral = x_pose_lateral.new_zeros(x_pose_lateral.shape)
if hasattr(self.pose_path, 'layer1_lateral'):
feat = x_rgb.detach() if self.pose_detach else x_rgb
x_rgb_lateral = self.pose_path.layer1_lateral(feat)
if pose_drop_path:
x_rgb_lateral = x_rgb_lateral.new_zeros(x_rgb_lateral.shape)
if hasattr(self.rgb_path, 'layer2_lateral'):
x_rgb = torch.cat((x_rgb, x_pose_lateral), dim=1)
if hasattr(self.pose_path, 'layer1_lateral'):
x_pose = torch.cat((x_pose, x_rgb_lateral), dim=1)
x_rgb = self.rgb_path.layer3(x_rgb)
x_pose = self.pose_path.layer2(x_pose)
if hasattr(self.rgb_path, 'layer3_lateral'):
feat = x_pose.detach() if self.rgb_detach else x_pose
x_pose_lateral = self.rgb_path.layer3_lateral(feat)
if rgb_drop_path:
x_pose_lateral = x_pose_lateral.new_zeros(x_pose_lateral.shape)
if hasattr(self.pose_path, 'layer2_lateral'):
feat = x_rgb.detach() if self.pose_detach else x_rgb
x_rgb_lateral = self.pose_path.layer2_lateral(feat)
if pose_drop_path:
x_rgb_lateral = x_rgb_lateral.new_zeros(x_rgb_lateral.shape)
if hasattr(self.rgb_path, 'layer3_lateral'):
x_rgb = torch.cat((x_rgb, x_pose_lateral), dim=1)
if hasattr(self.pose_path, 'layer2_lateral'):
x_pose = torch.cat((x_pose, x_rgb_lateral), dim=1)
x_rgb = self.rgb_path.layer4(x_rgb)
x_pose = self.pose_path.layer3(x_pose)
return x_rgb, x_pose | Defines the computation performed at every call.
Args:
imgs (torch.Tensor): The input data.
heatmap_imgs (torch.Tensor): The input data.
Returns:
tuple[torch.Tensor]: The feature of the input
samples extracted by the backbone.
| forward | python | open-mmlab/mmaction2 | mmaction/models/backbones/rgbposeconv3d.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/rgbposeconv3d.py | Apache-2.0 |
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Defines the computation performed at every call."""
res = self.residual(x)
x = self.tcn(self.gcn(x)) + res
return self.relu(x) | Defines the computation performed at every call. | forward | python | open-mmlab/mmaction2 | mmaction/models/backbones/stgcn.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/stgcn.py | Apache-2.0 |
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Defines the computation performed at every call."""
N, M, T, V, C = x.size()
x = x.permute(0, 1, 3, 4, 2).contiguous()
if self.data_bn_type == 'MVC':
x = self.data_bn(x.view(N, M * V * C, T))
else:
x = self.data_bn(x.view(N * M, V * C, T))
x = x.view(N, M, V, C, T).permute(0, 1, 3, 4,
2).contiguous().view(N * M, C, T, V)
for i in range(self.num_stages):
x = self.gcn[i](x)
x = x.reshape((N, M) + x.shape[1:])
return x | Defines the computation performed at every call. | forward | python | open-mmlab/mmaction2 | mmaction/models/backbones/stgcn.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/stgcn.py | Apache-2.0 |
def window_partition(x: torch.Tensor,
window_size: Sequence[int]) -> torch.Tensor:
"""
Args:
x (torch.Tensor): The input features of shape :math:`(B, D, H, W, C)`.
window_size (Sequence[int]): The window size, :math:`(w_d, w_h, w_w)`.
Returns:
torch.Tensor: The partitioned windows of shape
:math:`(B*num_windows, w_d*w_h*w_w, C)`.
"""
B, D, H, W, C = x.shape
x = x.view(B, D // window_size[0], window_size[0], H // window_size[1],
window_size[1], W // window_size[2], window_size[2], C)
windows = x.permute(0, 1, 3, 5, 2, 4, 6,
7).contiguous().view(-1, reduce(mul, window_size), C)
return windows |
Args:
x (torch.Tensor): The input features of shape :math:`(B, D, H, W, C)`.
window_size (Sequence[int]): The window size, :math:`(w_d, w_h, w_w)`.
Returns:
torch.Tensor: The partitioned windows of shape
:math:`(B*num_windows, w_d*w_h*w_w, C)`.
| window_partition | python | open-mmlab/mmaction2 | mmaction/models/backbones/swin.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/swin.py | Apache-2.0 |
def window_reverse(windows: torch.Tensor, window_size: Sequence[int], B: int,
D: int, H: int, W: int) -> torch.Tensor:
"""
Args:
windows (torch.Tensor): Input windows of shape
:meth:`(B*num_windows, w_d, w_h, w_w, C)`.
window_size (Sequence[int]): The window size, :meth:`(w_d, w_h, w_w)`.
B (int): Batch size of feature maps.
D (int): Temporal length of feature maps.
H (int): Height of feature maps.
W (int): Width of feature maps.
Returns:
torch.Tensor: The feature maps reversed from windows of
shape :math:`(B, D, H, W, C)`.
"""
x = windows.view(B, D // window_size[0], H // window_size[1],
W // window_size[2], window_size[0], window_size[1],
window_size[2], -1)
x = x.permute(0, 1, 4, 2, 5, 3, 6, 7).contiguous().view(B, D, H, W, -1)
return x |
Args:
windows (torch.Tensor): Input windows of shape
:meth:`(B*num_windows, w_d, w_h, w_w, C)`.
window_size (Sequence[int]): The window size, :meth:`(w_d, w_h, w_w)`.
B (int): Batch size of feature maps.
D (int): Temporal length of feature maps.
H (int): Height of feature maps.
W (int): Width of feature maps.
Returns:
torch.Tensor: The feature maps reversed from windows of
shape :math:`(B, D, H, W, C)`.
| window_reverse | python | open-mmlab/mmaction2 | mmaction/models/backbones/swin.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/swin.py | Apache-2.0 |
def get_window_size(
x_size: Sequence[int],
window_size: Sequence[int],
shift_size: Optional[Sequence[int]] = None
) -> Union[Tuple[int], Tuple[Tuple[int]]]:
"""Calculate window size and shift size according to the input size.
Args:
x_size (Sequence[int]): The input size.
window_size (Sequence[int]): The expected window size.
shift_size (Sequence[int], optional): The expected shift size.
Defaults to None.
Returns:
tuple: The calculated window size and shift size.
"""
use_window_size = list(window_size)
if shift_size is not None:
use_shift_size = list(shift_size)
for i in range(len(x_size)):
if x_size[i] <= window_size[i]:
use_window_size[i] = x_size[i]
if shift_size is not None:
use_shift_size[i] = 0
if shift_size is None:
return tuple(use_window_size)
else:
return tuple(use_window_size), tuple(use_shift_size) | Calculate window size and shift size according to the input size.
Args:
x_size (Sequence[int]): The input size.
window_size (Sequence[int]): The expected window size.
shift_size (Sequence[int], optional): The expected shift size.
Defaults to None.
Returns:
tuple: The calculated window size and shift size.
| get_window_size | python | open-mmlab/mmaction2 | mmaction/models/backbones/swin.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/swin.py | Apache-2.0 |
def compute_mask(D: int, H: int, W: int, window_size: Sequence[int],
shift_size: Sequence[int],
device: Union[str, torch.device]) -> torch.Tensor:
"""Compute attention mask.
Args:
D (int): Temporal length of feature maps.
H (int): Height of feature maps.
W (int): Width of feature maps.
window_size (Sequence[int]): The window size.
shift_size (Sequence[int]): The shift size.
device (str or :obj:`torch.device`): The device of the mask.
Returns:
torch.Tensor: The attention mask used for shifted window attention.
"""
img_mask = torch.zeros((1, D, H, W, 1), device=device) # 1 Dp Hp Wp 1
cnt = 0
for d in slice(-window_size[0]), slice(-window_size[0],
-shift_size[0]), slice(
-shift_size[0], None):
for h in slice(-window_size[1]), slice(-window_size[1],
-shift_size[1]), slice(
-shift_size[1], None):
for w in slice(-window_size[2]), slice(-window_size[2],
-shift_size[2]), slice(
-shift_size[2], None):
img_mask[:, d, h, w, :] = cnt
cnt += 1
mask_windows = window_partition(img_mask,
window_size) # nW, ws[0]*ws[1]*ws[2], 1
mask_windows = mask_windows.squeeze(-1) # nW, ws[0]*ws[1]*ws[2]
attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)
attn_mask = attn_mask.masked_fill(attn_mask != 0,
float(-100.0)).masked_fill(
attn_mask == 0, float(0.0))
return attn_mask | Compute attention mask.
Args:
D (int): Temporal length of feature maps.
H (int): Height of feature maps.
W (int): Width of feature maps.
window_size (Sequence[int]): The window size.
shift_size (Sequence[int]): The shift size.
device (str or :obj:`torch.device`): The device of the mask.
Returns:
torch.Tensor: The attention mask used for shifted window attention.
| compute_mask | python | open-mmlab/mmaction2 | mmaction/models/backbones/swin.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/swin.py | Apache-2.0 |
def forward(self,
x: torch.Tensor,
mask: Optional[torch.Tensor] = None) -> torch.Tensor:
"""Forward function.
Args:
x (torch.Tensor): Input feature maps of shape
:meth:`(B*num_windows, N, C)`.
mask (torch.Tensor, optional): (0/-inf) mask of shape
:meth:`(num_windows, N, N)`. Defaults to None.
"""
B_, N, C = x.shape
qkv = self.qkv(x).reshape(B_, N, 3, self.num_heads,
C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2] # B_, nH, N, C
q = q * self.scale
attn = q @ k.transpose(-2, -1)
relative_position_bias = self.relative_position_bias_table[
self.relative_position_index[:N, :N].reshape(-1)].reshape(
N, N, -1) # Wd*Wh*Ww,Wd*Wh*Ww,nH
relative_position_bias = relative_position_bias.permute(
2, 0, 1).contiguous() # nH, Wd*Wh*Ww, Wd*Wh*Ww
attn = attn + relative_position_bias.unsqueeze(0) # B_, nH, N, N
if mask is not None:
nW = mask.shape[0]
attn = attn.view(B_ // nW, nW, self.num_heads, N,
N) + mask.unsqueeze(1).unsqueeze(0)
attn = attn.view(-1, self.num_heads, N, N)
attn = self.softmax(attn)
else:
attn = self.softmax(attn)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B_, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x | Forward function.
Args:
x (torch.Tensor): Input feature maps of shape
:meth:`(B*num_windows, N, C)`.
mask (torch.Tensor, optional): (0/-inf) mask of shape
:meth:`(num_windows, N, N)`. Defaults to None.
| forward | python | open-mmlab/mmaction2 | mmaction/models/backbones/swin.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/swin.py | Apache-2.0 |
def forward(self, x: torch.Tensor,
mask_matrix: torch.Tensor) -> torch.Tensor:
"""
Args:
x (torch.Tensor): Input features of shape :math:`(B, D, H, W, C)`.
mask_matrix (torch.Tensor): Attention mask for cyclic shift.
"""
shortcut = x
if self.with_cp:
x = checkpoint.checkpoint(self.forward_part1, x, mask_matrix)
else:
x = self.forward_part1(x, mask_matrix)
x = shortcut + self.drop_path(x)
if self.with_cp:
x = x + checkpoint.checkpoint(self.forward_part2, x)
else:
x = x + self.forward_part2(x)
return x |
Args:
x (torch.Tensor): Input features of shape :math:`(B, D, H, W, C)`.
mask_matrix (torch.Tensor): Attention mask for cyclic shift.
| forward | python | open-mmlab/mmaction2 | mmaction/models/backbones/swin.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/swin.py | Apache-2.0 |
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Perform patch merging.
Args:
x (torch.Tensor): Input feature maps of shape
:math:`(B, D, H, W, C)`.
Returns:
torch.Tensor: The merged feature maps of shape
:math:`(B, D, H/2, W/2, 2*C)`.
"""
B, D, H, W, C = x.shape
# padding
pad_input = (H % 2 == 1) or (W % 2 == 1)
if pad_input:
x = F.pad(x, (0, 0, 0, W % 2, 0, H % 2))
x0 = x[:, :, 0::2, 0::2, :] # B D H/2 W/2 C
x1 = x[:, :, 1::2, 0::2, :] # B D H/2 W/2 C
x2 = x[:, :, 0::2, 1::2, :] # B D H/2 W/2 C
x3 = x[:, :, 1::2, 1::2, :] # B D H/2 W/2 C
x = torch.cat([x0, x1, x2, x3], -1) # B D H/2 W/2 4*C
x = self.norm(x)
x = self.reduction(x)
return x | Perform patch merging.
Args:
x (torch.Tensor): Input feature maps of shape
:math:`(B, D, H, W, C)`.
Returns:
torch.Tensor: The merged feature maps of shape
:math:`(B, D, H/2, W/2, 2*C)`.
| forward | python | open-mmlab/mmaction2 | mmaction/models/backbones/swin.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/swin.py | Apache-2.0 |
def forward(self,
x: torch.Tensor,
do_downsample: bool = True) -> torch.Tensor:
"""Forward function.
Args:
x (torch.Tensor): Input feature maps of shape
:math:`(B, C, D, H, W)`.
do_downsample (bool): Whether to downsample the output of
the current layer. Defaults to True.
"""
# calculate attention mask for SW-MSA
B, C, D, H, W = x.shape
window_size, shift_size = get_window_size((D, H, W), self.window_size,
self.shift_size)
x = rearrange(x, 'b c d h w -> b d h w c')
Dp = int(np.ceil(D / window_size[0])) * window_size[0]
Hp = int(np.ceil(H / window_size[1])) * window_size[1]
Wp = int(np.ceil(W / window_size[2])) * window_size[2]
attn_mask = compute_mask(Dp, Hp, Wp, window_size, shift_size, x.device)
for blk in self.blocks:
x = blk(x, attn_mask)
if self.downsample is not None and do_downsample:
x = self.downsample(x)
return x | Forward function.
Args:
x (torch.Tensor): Input feature maps of shape
:math:`(B, C, D, H, W)`.
do_downsample (bool): Whether to downsample the output of
the current layer. Defaults to True.
| forward | python | open-mmlab/mmaction2 | mmaction/models/backbones/swin.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/swin.py | Apache-2.0 |
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Perform video to patch embedding.
Args:
x (torch.Tensor): The input videos of shape
:math:`(B, C, D, H, W)`. In most cases, C is 3.
Returns:
torch.Tensor: The video patches of shape
:math:`(B, embed_dims, Dp, Hp, Wp)`.
"""
_, _, D, H, W = x.size()
if W % self.patch_size[2] != 0:
x = F.pad(x, (0, self.patch_size[2] - W % self.patch_size[2]))
if H % self.patch_size[1] != 0:
x = F.pad(x,
(0, 0, 0, self.patch_size[1] - H % self.patch_size[1]))
if D % self.patch_size[0] != 0:
x = F.pad(x, (0, 0, 0, 0, 0,
self.patch_size[0] - D % self.patch_size[0]))
x = self.proj(x) # B C Dp Wp Wp
if self.norm is not None:
Dp, Hp, Wp = x.size(2), x.size(3), x.size(4)
x = x.flatten(2).transpose(1, 2) # B Dp*Hp*Wp C
x = self.norm(x)
x = x.transpose(1, 2).view(-1, self.embed_dims, Dp, Hp, Wp)
return x | Perform video to patch embedding.
Args:
x (torch.Tensor): The input videos of shape
:math:`(B, C, D, H, W)`. In most cases, C is 3.
Returns:
torch.Tensor: The video patches of shape
:math:`(B, embed_dims, Dp, Hp, Wp)`.
| forward | python | open-mmlab/mmaction2 | mmaction/models/backbones/swin.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/swin.py | Apache-2.0 |
def _freeze_stages(self) -> None:
"""Prevent all the parameters from being optimized before
``self.frozen_stages``."""
if self.frozen_stages >= 0:
self.patch_embed.eval()
for param in self.patch_embed.parameters():
param.requires_grad = False
if self.frozen_stages >= 1:
self.pos_drop.eval()
for i in range(0, self.frozen_stages):
m = self.layers[i]
m.eval()
for param in m.parameters():
param.requires_grad = False | Prevent all the parameters from being optimized before
``self.frozen_stages``. | _freeze_stages | python | open-mmlab/mmaction2 | mmaction/models/backbones/swin.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/swin.py | Apache-2.0 |
def inflate_weights(self, logger: MMLogger) -> None:
"""Inflate the swin2d parameters to swin3d.
The differences between swin3d and swin2d mainly lie in an extra
axis. To utilize the pretrained parameters in 2d model, the weight
of swin2d models should be inflated to fit in the shapes of the
3d counterpart.
Args:
logger (MMLogger): The logger used to print debugging information.
"""
checkpoint = _load_checkpoint(self.pretrained, map_location='cpu')
state_dict = checkpoint['model']
# delete relative_position_index since we always re-init it
relative_position_index_keys = [
k for k in state_dict.keys() if 'relative_position_index' in k
]
for k in relative_position_index_keys:
del state_dict[k]
# delete attn_mask since we always re-init it
attn_mask_keys = [k for k in state_dict.keys() if 'attn_mask' in k]
for k in attn_mask_keys:
del state_dict[k]
state_dict['patch_embed.proj.weight'] = \
state_dict['patch_embed.proj.weight'].unsqueeze(2).\
repeat(1, 1, self.patch_size[0], 1, 1) / self.patch_size[0]
# bicubic interpolate relative_position_bias_table if not match
relative_position_bias_table_keys = [
k for k in state_dict.keys() if 'relative_position_bias_table' in k
]
for k in relative_position_bias_table_keys:
relative_position_bias_table_pretrained = state_dict[k]
relative_position_bias_table_current = self.state_dict()[k]
L1, nH1 = relative_position_bias_table_pretrained.size()
L2, nH2 = relative_position_bias_table_current.size()
L2 = (2 * self.window_size[1] - 1) * (2 * self.window_size[2] - 1)
wd = self.window_size[0]
if nH1 != nH2:
logger.warning(f'Error in loading {k}, passing')
else:
if L1 != L2:
S1 = int(L1**0.5)
relative_position_bias_table_pretrained_resized = \
torch.nn.functional.interpolate(
relative_position_bias_table_pretrained.permute(
1, 0).view(1, nH1, S1, S1),
size=(2 * self.window_size[1] - 1,
2 * self.window_size[2] - 1),
mode='bicubic')
relative_position_bias_table_pretrained = \
relative_position_bias_table_pretrained_resized. \
view(nH2, L2).permute(1, 0)
state_dict[k] = relative_position_bias_table_pretrained.repeat(
2 * wd - 1, 1)
# In the original swin2d checkpoint, the last layer of the
# backbone is the norm layer, and the original attribute
# name is `norm`. We changed it to `norm3` which means it
# is the last norm layer of stage 4.
if hasattr(self, 'norm3'):
state_dict['norm3.weight'] = state_dict['norm.weight']
state_dict['norm3.bias'] = state_dict['norm.bias']
del state_dict['norm.weight']
del state_dict['norm.bias']
msg = self.load_state_dict(state_dict, strict=False)
logger.info(msg) | Inflate the swin2d parameters to swin3d.
The differences between swin3d and swin2d mainly lie in an extra
axis. To utilize the pretrained parameters in 2d model, the weight
of swin2d models should be inflated to fit in the shapes of the
3d counterpart.
Args:
logger (MMLogger): The logger used to print debugging information.
| inflate_weights | python | open-mmlab/mmaction2 | mmaction/models/backbones/swin.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/swin.py | Apache-2.0 |
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Defines the computation performed at every call."""
assert isinstance(self.block, Bottleneck)
def _inner_forward(x):
"""Forward wrapper for utilizing checkpoint."""
identity = x
out = self.block.conv1(x)
out = self.tam(out)
out = self.block.conv2(out)
out = self.block.conv3(out)
if self.block.downsample is not None:
identity = self.block.downsample(x)
out = out + identity
return out
if self.block.with_cp and x.requires_grad:
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
out = self.block.relu(out)
return out | Defines the computation performed at every call. | forward | python | open-mmlab/mmaction2 | mmaction/models/backbones/tanet.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/tanet.py | Apache-2.0 |
def make_tam_modeling(self):
"""Replace ResNet-Block with TA-Block."""
def make_tam_block(stage, num_segments, tam_cfg=dict()):
blocks = list(stage.children())
for i, block in enumerate(blocks):
blocks[i] = TABlock(block, num_segments, deepcopy(tam_cfg))
return nn.Sequential(*blocks)
for i in range(self.num_stages):
layer_name = f'layer{i + 1}'
res_layer = getattr(self, layer_name)
setattr(self, layer_name,
make_tam_block(res_layer, self.num_segments, self.tam_cfg)) | Replace ResNet-Block with TA-Block. | make_tam_modeling | python | open-mmlab/mmaction2 | mmaction/models/backbones/tanet.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/tanet.py | Apache-2.0 |
def forward(self, x):
"""Defines the computation performed at every call.
Args:
x (Tensor): The input data.
Returns:
Tensor: The output of the module.
"""
x = rearrange(x, 'b c t h w -> (b t) c h w')
x = self.projection(x).flatten(2).transpose(1, 2)
return x | Defines the computation performed at every call.
Args:
x (Tensor): The input data.
Returns:
Tensor: The output of the module.
| forward | python | open-mmlab/mmaction2 | mmaction/models/backbones/timesformer.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/timesformer.py | Apache-2.0 |
def init_weights(self, pretrained=None):
"""Initiate the parameters either from existing checkpoint or from
scratch."""
trunc_normal_(self.pos_embed, std=.02)
trunc_normal_(self.cls_token, std=.02)
if pretrained:
self.pretrained = pretrained
if isinstance(self.pretrained, str):
logger = MMLogger.get_current_instance()
logger.info(f'load model from: {self.pretrained}')
state_dict = _load_checkpoint(self.pretrained, map_location='cpu')
if 'state_dict' in state_dict:
state_dict = state_dict['state_dict']
if self.attention_type == 'divided_space_time':
# modify the key names of norm layers
old_state_dict_keys = list(state_dict.keys())
for old_key in old_state_dict_keys:
if 'norms' in old_key:
new_key = old_key.replace('norms.0',
'attentions.0.norm')
new_key = new_key.replace('norms.1', 'ffns.0.norm')
state_dict[new_key] = state_dict.pop(old_key)
# copy the parameters of space attention to time attention
old_state_dict_keys = list(state_dict.keys())
for old_key in old_state_dict_keys:
if 'attentions.0' in old_key:
new_key = old_key.replace('attentions.0',
'attentions.1')
state_dict[new_key] = state_dict[old_key].clone()
load_state_dict(self, state_dict, strict=False, logger=logger) | Initiate the parameters either from existing checkpoint or from
scratch. | init_weights | python | open-mmlab/mmaction2 | mmaction/models/backbones/timesformer.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/timesformer.py | Apache-2.0 |
def forward(self, x):
"""Defines the computation performed at every call."""
# x [batch_size * num_frames, num_patches, embed_dims]
batches = x.shape[0]
x = self.patch_embed(x)
# x [batch_size * num_frames, num_patches + 1, embed_dims]
cls_tokens = self.cls_token.expand(x.size(0), -1, -1)
x = torch.cat((cls_tokens, x), dim=1)
x = x + self.pos_embed
x = self.drop_after_pos(x)
# Add Time Embedding
if self.attention_type != 'space_only':
# x [batch_size, num_patches * num_frames + 1, embed_dims]
cls_tokens = x[:batches, 0, :].unsqueeze(1)
x = rearrange(x[:, 1:, :], '(b t) p m -> (b p) t m', b=batches)
x = x + self.time_embed
x = self.drop_after_time(x)
x = rearrange(x, '(b p) t m -> b (p t) m', b=batches)
x = torch.cat((cls_tokens, x), dim=1)
x = self.transformer_layers(x, None, None)
if self.attention_type == 'space_only':
# x [batch_size, num_patches + 1, embed_dims]
x = x.view(-1, self.num_frames, *x.size()[-2:])
x = torch.mean(x, 1)
x = self.norm(x)
# Return Class Token
return x[:, 0] | Defines the computation performed at every call. | forward | python | open-mmlab/mmaction2 | mmaction/models/backbones/timesformer.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/timesformer.py | Apache-2.0 |
def conv_3xnxn(inp: int,
oup: int,
kernel_size: int = 3,
stride: int = 3,
groups: int = 1):
"""3D convolution with kernel size of 3xnxn.
Args:
inp (int): Dimension of input features.
oup (int): Dimension of output features.
kernel_size (int): The spatial kernel size (i.e., n).
Defaults to 3.
stride (int): The spatial stride.
Defaults to 3.
groups (int): Group number of operated features.
Defaults to 1.
"""
return nn.Conv3d(
inp,
oup, (3, kernel_size, kernel_size), (2, stride, stride), (1, 0, 0),
groups=groups) | 3D convolution with kernel size of 3xnxn.
Args:
inp (int): Dimension of input features.
oup (int): Dimension of output features.
kernel_size (int): The spatial kernel size (i.e., n).
Defaults to 3.
stride (int): The spatial stride.
Defaults to 3.
groups (int): Group number of operated features.
Defaults to 1.
| conv_3xnxn | python | open-mmlab/mmaction2 | mmaction/models/backbones/uniformer.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/uniformer.py | Apache-2.0 |
def conv_1xnxn(inp: int,
oup: int,
kernel_size: int = 3,
stride: int = 3,
groups: int = 1):
"""3D convolution with kernel size of 1xnxn.
Args:
inp (int): Dimension of input features.
oup (int): Dimension of output features.
kernel_size (int): The spatial kernel size (i.e., n).
Defaults to 3.
stride (int): The spatial stride.
Defaults to 3.
groups (int): Group number of operated features.
Defaults to 1.
"""
return nn.Conv3d(
inp,
oup, (1, kernel_size, kernel_size), (1, stride, stride), (0, 0, 0),
groups=groups) | 3D convolution with kernel size of 1xnxn.
Args:
inp (int): Dimension of input features.
oup (int): Dimension of output features.
kernel_size (int): The spatial kernel size (i.e., n).
Defaults to 3.
stride (int): The spatial stride.
Defaults to 3.
groups (int): Group number of operated features.
Defaults to 1.
| conv_1xnxn | python | open-mmlab/mmaction2 | mmaction/models/backbones/uniformer.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/uniformer.py | Apache-2.0 |
def _load_pretrained(self, pretrained: str = None) -> None:
"""Load ImageNet-1K pretrained model.
The model is pretrained with ImageNet-1K.
https://github.com/Sense-X/UniFormer
Args:
pretrained (str): Model name of ImageNet-1K pretrained model.
Defaults to None.
"""
if pretrained is not None:
model_path = _MODELS[pretrained]
logger.info(f'Load ImageNet pretrained model from {model_path}')
state_dict = _load_checkpoint(model_path, map_location='cpu')
state_dict_3d = self.state_dict()
for k in state_dict.keys():
if k in state_dict_3d.keys(
) and state_dict[k].shape != state_dict_3d[k].shape:
if len(state_dict_3d[k].shape) <= 2:
logger.info(f'Ignore: {k}')
continue
logger.info(f'Inflate: {k}, {state_dict[k].shape}' +
f' => {state_dict_3d[k].shape}')
time_dim = state_dict_3d[k].shape[2]
state_dict[k] = self._inflate_weight(
state_dict[k], time_dim)
self.load_state_dict(state_dict, strict=False) | Load ImageNet-1K pretrained model.
The model is pretrained with ImageNet-1K.
https://github.com/Sense-X/UniFormer
Args:
pretrained (str): Model name of ImageNet-1K pretrained model.
Defaults to None.
| _load_pretrained | python | open-mmlab/mmaction2 | mmaction/models/backbones/uniformer.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/uniformer.py | Apache-2.0 |
def _load_pretrained(self, pretrained: str = None) -> None:
"""Load CLIP pretrained visual encoder.
The visual encoder is extracted from CLIP.
https://github.com/openai/CLIP
Args:
pretrained (str): Model name of pretrained CLIP visual encoder.
Defaults to None.
"""
assert pretrained is not None, \
'please specify clip pretraied checkpoint'
model_path = _MODELS[pretrained]
logger.info(f'Load CLIP pretrained model from {model_path}')
state_dict = _load_checkpoint(model_path, map_location='cpu')
state_dict_3d = self.state_dict()
for k in state_dict.keys():
if k in state_dict_3d.keys(
) and state_dict[k].shape != state_dict_3d[k].shape:
if len(state_dict_3d[k].shape) <= 2:
logger.info(f'Ignore: {k}')
continue
logger.info(f'Inflate: {k}, {state_dict[k].shape}' +
f' => {state_dict_3d[k].shape}')
time_dim = state_dict_3d[k].shape[2]
state_dict[k] = self._inflate_weight(state_dict[k], time_dim)
self.load_state_dict(state_dict, strict=False) | Load CLIP pretrained visual encoder.
The visual encoder is extracted from CLIP.
https://github.com/openai/CLIP
Args:
pretrained (str): Model name of pretrained CLIP visual encoder.
Defaults to None.
| _load_pretrained | python | open-mmlab/mmaction2 | mmaction/models/backbones/uniformerv2.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/uniformerv2.py | Apache-2.0 |
def forward(self, x: Tensor) -> Tensor:
"""Defines the computation performed at every call.
Args:
x (Tensor): The input data with size of (B, N, C).
Returns:
Tensor: The output of the attention block, same size as inputs.
"""
B, N, C = x.shape
if hasattr(self, 'q_bias'):
k_bias = torch.zeros_like(self.v_bias, requires_grad=False)
qkv_bias = torch.cat((self.q_bias, k_bias, self.v_bias))
qkv = F.linear(input=x, weight=self.qkv.weight, bias=qkv_bias)
else:
qkv = self.qkv(x)
qkv = qkv.reshape(B, N, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2]
q = q * self.scale
attn = q @ k.transpose(-2, -1)
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, -1)
x = self.proj(x)
x = self.proj_drop(x)
return x | Defines the computation performed at every call.
Args:
x (Tensor): The input data with size of (B, N, C).
Returns:
Tensor: The output of the attention block, same size as inputs.
| forward | python | open-mmlab/mmaction2 | mmaction/models/backbones/vit_mae.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/vit_mae.py | Apache-2.0 |
def forward(self, x: Tensor) -> Tensor:
"""Defines the computation performed at every call.
Args:
x (Tensor): The input data with size of (B, N, C).
Returns:
Tensor: The output of the transformer block, same size as inputs.
"""
if hasattr(self, 'gamma_1'):
x = x + self.drop_path(self.gamma_1 * self.attn(self.norm1(x)))
x = x + self.drop_path(self.gamma_2 * self.mlp(self.norm2(x)))
else:
x = x + self.drop_path(self.attn(self.norm1(x)))
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x | Defines the computation performed at every call.
Args:
x (Tensor): The input data with size of (B, N, C).
Returns:
Tensor: The output of the transformer block, same size as inputs.
| forward | python | open-mmlab/mmaction2 | mmaction/models/backbones/vit_mae.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/vit_mae.py | Apache-2.0 |
def get_sinusoid_encoding(n_position: int, embed_dims: int) -> Tensor:
"""Generate sinusoid encoding table.
Sinusoid encoding is a kind of relative position encoding method came from
`Attention Is All You Need<https://arxiv.org/abs/1706.03762>`_.
Args:
n_position (int): The length of the input token.
embed_dims (int): The position embedding dimension.
Returns:
:obj:`torch.FloatTensor`: The sinusoid encoding table of size
(1, n_position, embed_dims)
"""
vec = torch.arange(embed_dims, dtype=torch.float64)
vec = (vec - vec % 2) / embed_dims
vec = torch.pow(10000, -vec).view(1, -1)
sinusoid_table = torch.arange(n_position).view(-1, 1) * vec
sinusoid_table[:, 0::2].sin_() # dim 2i
sinusoid_table[:, 1::2].cos_() # dim 2i+1
sinusoid_table = sinusoid_table.to(torch.float32)
return sinusoid_table.unsqueeze(0) | Generate sinusoid encoding table.
Sinusoid encoding is a kind of relative position encoding method came from
`Attention Is All You Need<https://arxiv.org/abs/1706.03762>`_.
Args:
n_position (int): The length of the input token.
embed_dims (int): The position embedding dimension.
Returns:
:obj:`torch.FloatTensor`: The sinusoid encoding table of size
(1, n_position, embed_dims)
| get_sinusoid_encoding | python | open-mmlab/mmaction2 | mmaction/models/backbones/vit_mae.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/vit_mae.py | Apache-2.0 |
def forward(self, x: Tensor) -> Tensor:
"""Defines the computation performed at every call.
Args:
x (Tensor): The input data.
Returns:
Tensor: The feature of the input
samples extracted by the backbone.
"""
b, _, _, h, w = x.shape
h //= self.patch_size
w //= self.patch_size
x = self.patch_embed(x)[0]
if (h, w) != self.grid_size:
pos_embed = self.pos_embed.reshape(-1, *self.grid_size,
self.embed_dims)
pos_embed = pos_embed.permute(0, 3, 1, 2)
pos_embed = F.interpolate(
pos_embed, size=(h, w), mode='bicubic', align_corners=False)
pos_embed = pos_embed.permute(0, 2, 3, 1).flatten(1, 2)
pos_embed = pos_embed.reshape(1, -1, self.embed_dims)
else:
pos_embed = self.pos_embed
x = x + pos_embed
x = self.pos_drop(x)
for blk in self.blocks:
x = blk(x)
x = self.norm(x)
if self.return_feat_map:
x = x.reshape(b, -1, h, w, self.embed_dims)
x = x.permute(0, 4, 1, 2, 3)
return x
if self.fc_norm is not None:
return self.fc_norm(x.mean(1))
return x[:, 0] | Defines the computation performed at every call.
Args:
x (Tensor): The input data.
Returns:
Tensor: The feature of the input
samples extracted by the backbone.
| forward | python | open-mmlab/mmaction2 | mmaction/models/backbones/vit_mae.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/vit_mae.py | Apache-2.0 |
def _round_width(width, multiplier, min_width=8, divisor=8):
"""Round width of filters based on width multiplier."""
width *= multiplier
min_width = min_width or divisor
width_out = max(min_width,
int(width + divisor / 2) // divisor * divisor)
if width_out < 0.9 * width:
width_out += divisor
return int(width_out) | Round width of filters based on width multiplier. | _round_width | python | open-mmlab/mmaction2 | mmaction/models/backbones/x3d.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/x3d.py | Apache-2.0 |
def forward(self, x):
"""Defines the computation performed at every call.
Args:
x (Tensor): The input data.
Returns:
Tensor: The output of the module.
"""
module_input = x
x = self.avg_pool(x)
x = self.fc1(x)
x = self.relu(x)
x = self.fc2(x)
x = self.sigmoid(x)
return module_input * x | Defines the computation performed at every call.
Args:
x (Tensor): The input data.
Returns:
Tensor: The output of the module.
| forward | python | open-mmlab/mmaction2 | mmaction/models/backbones/x3d.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/x3d.py | Apache-2.0 |
def forward(self, x):
"""Defines the computation performed at every call."""
def _inner_forward(x):
"""Forward wrapper for utilizing checkpoint."""
identity = x
out = self.conv1(x)
out = self.conv2(out)
if self.se_ratio is not None:
out = self.se_module(out)
out = self.swish(out)
out = self.conv3(out)
if self.downsample is not None:
identity = self.downsample(x)
out = out + identity
return out
if self.with_cp and x.requires_grad:
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
out = self.relu(out)
return out | Defines the computation performed at every call. | forward | python | open-mmlab/mmaction2 | mmaction/models/backbones/x3d.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/x3d.py | Apache-2.0 |
def _round_width(width, multiplier, min_depth=8, divisor=8):
"""Round width of filters based on width multiplier."""
if not multiplier:
return width
width *= multiplier
min_depth = min_depth or divisor
new_filters = max(min_depth,
int(width + divisor / 2) // divisor * divisor)
if new_filters < 0.9 * width:
new_filters += divisor
return int(new_filters) | Round width of filters based on width multiplier. | _round_width | python | open-mmlab/mmaction2 | mmaction/models/backbones/x3d.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/x3d.py | Apache-2.0 |
def _round_repeats(repeats, multiplier):
"""Round number of layers based on depth multiplier."""
if not multiplier:
return repeats
return int(math.ceil(multiplier * repeats)) | Round number of layers based on depth multiplier. | _round_repeats | python | open-mmlab/mmaction2 | mmaction/models/backbones/x3d.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/x3d.py | Apache-2.0 |
def make_res_layer(self,
block,
layer_inplanes,
inplanes,
planes,
blocks,
spatial_stride=1,
se_style='half',
se_ratio=None,
use_swish=True,
norm_cfg=None,
act_cfg=None,
conv_cfg=None,
with_cp=False,
**kwargs):
"""Build residual layer for ResNet3D.
Args:
block (nn.Module): Residual module to be built.
layer_inplanes (int): Number of channels for the input feature
of the res layer.
inplanes (int): Number of channels for the input feature in each
block, which equals to base_channels * gamma_w.
planes (int): Number of channels for the output feature in each
block, which equals to base_channel * gamma_w * gamma_b.
blocks (int): Number of residual blocks.
spatial_stride (int): Spatial strides in residual and conv layers.
Default: 1.
se_style (str): The style of inserting SE modules into BlockX3D,
'half' denotes insert into half of the blocks, while 'all'
denotes insert into all blocks. Default: 'half'.
se_ratio (float | None): The reduction ratio of squeeze and
excitation unit. If set as None, it means not using SE unit.
Default: None.
use_swish (bool): Whether to use swish as the activation function
before and after the 3x3x3 conv. Default: True.
conv_cfg (dict | None): Config for norm layers. Default: None.
norm_cfg (dict | None): Config for norm layers. Default: None.
act_cfg (dict | None): Config for activate layers. Default: None.
with_cp (bool | None): Use checkpoint or not. Using checkpoint
will save some memory while slowing down the training speed.
Default: False.
Returns:
nn.Module: A residual layer for the given config.
"""
downsample = None
if spatial_stride != 1 or layer_inplanes != inplanes:
downsample = ConvModule(
layer_inplanes,
inplanes,
kernel_size=1,
stride=(1, spatial_stride, spatial_stride),
padding=0,
bias=False,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=None)
use_se = [False] * blocks
if self.se_style == 'all':
use_se = [True] * blocks
elif self.se_style == 'half':
use_se = [i % 2 == 0 for i in range(blocks)]
else:
raise NotImplementedError
layers = []
layers.append(
block(
layer_inplanes,
planes,
inplanes,
spatial_stride=spatial_stride,
downsample=downsample,
se_ratio=se_ratio if use_se[0] else None,
use_swish=use_swish,
norm_cfg=norm_cfg,
conv_cfg=conv_cfg,
act_cfg=act_cfg,
with_cp=with_cp,
**kwargs))
for i in range(1, blocks):
layers.append(
block(
inplanes,
planes,
inplanes,
spatial_stride=1,
se_ratio=se_ratio if use_se[i] else None,
use_swish=use_swish,
norm_cfg=norm_cfg,
conv_cfg=conv_cfg,
act_cfg=act_cfg,
with_cp=with_cp,
**kwargs))
return nn.Sequential(*layers) | Build residual layer for ResNet3D.
Args:
block (nn.Module): Residual module to be built.
layer_inplanes (int): Number of channels for the input feature
of the res layer.
inplanes (int): Number of channels for the input feature in each
block, which equals to base_channels * gamma_w.
planes (int): Number of channels for the output feature in each
block, which equals to base_channel * gamma_w * gamma_b.
blocks (int): Number of residual blocks.
spatial_stride (int): Spatial strides in residual and conv layers.
Default: 1.
se_style (str): The style of inserting SE modules into BlockX3D,
'half' denotes insert into half of the blocks, while 'all'
denotes insert into all blocks. Default: 'half'.
se_ratio (float | None): The reduction ratio of squeeze and
excitation unit. If set as None, it means not using SE unit.
Default: None.
use_swish (bool): Whether to use swish as the activation function
before and after the 3x3x3 conv. Default: True.
conv_cfg (dict | None): Config for norm layers. Default: None.
norm_cfg (dict | None): Config for norm layers. Default: None.
act_cfg (dict | None): Config for activate layers. Default: None.
with_cp (bool | None): Use checkpoint or not. Using checkpoint
will save some memory while slowing down the training speed.
Default: False.
Returns:
nn.Module: A residual layer for the given config.
| make_res_layer | python | open-mmlab/mmaction2 | mmaction/models/backbones/x3d.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/x3d.py | Apache-2.0 |
def _make_stem_layer(self):
"""Construct the stem layers consists of a conv+norm+act module and a
pooling layer."""
self.conv1_s = ConvModule(
self.in_channels,
self.base_channels,
kernel_size=(1, 3, 3),
stride=(1, 2, 2),
padding=(0, 1, 1),
bias=False,
conv_cfg=self.conv_cfg,
norm_cfg=None,
act_cfg=None)
self.conv1_t = ConvModule(
self.base_channels,
self.base_channels,
kernel_size=(5, 1, 1),
stride=(1, 1, 1),
padding=(2, 0, 0),
groups=self.base_channels,
bias=False,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg) | Construct the stem layers consists of a conv+norm+act module and a
pooling layer. | _make_stem_layer | python | open-mmlab/mmaction2 | mmaction/models/backbones/x3d.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/x3d.py | Apache-2.0 |
def _freeze_stages(self):
"""Prevent all the parameters from being optimized before
``self.frozen_stages``."""
if self.frozen_stages >= 0:
self.conv1_s.eval()
self.conv1_t.eval()
for param in self.conv1_s.parameters():
param.requires_grad = False
for param in self.conv1_t.parameters():
param.requires_grad = False
for i in range(1, self.frozen_stages + 1):
m = getattr(self, f'layer{i}')
m.eval()
for param in m.parameters():
param.requires_grad = False | Prevent all the parameters from being optimized before
``self.frozen_stages``. | _freeze_stages | python | open-mmlab/mmaction2 | mmaction/models/backbones/x3d.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/x3d.py | Apache-2.0 |
def init_weights(self):
"""Initiate the parameters either from existing checkpoint or from
scratch."""
if isinstance(self.pretrained, str):
logger = MMLogger.get_current_instance()
logger.info(f'load model from: {self.pretrained}')
load_checkpoint(self, self.pretrained, strict=False, logger=logger)
elif self.pretrained is None:
for m in self.modules():
if isinstance(m, nn.Conv3d):
kaiming_init(m)
elif isinstance(m, _BatchNorm):
constant_init(m, 1)
if self.zero_init_residual:
for m in self.modules():
if isinstance(m, BlockX3D):
constant_init(m.conv3.bn, 0)
else:
raise TypeError('pretrained must be a str or None') | Initiate the parameters either from existing checkpoint or from
scratch. | init_weights | python | open-mmlab/mmaction2 | mmaction/models/backbones/x3d.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/x3d.py | Apache-2.0 |
def forward(self, x):
"""Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data.
Returns:
torch.Tensor: The feature of the input
samples extracted by the backbone.
"""
x = self.conv1_s(x)
x = self.conv1_t(x)
for layer_name in self.res_layers:
res_layer = getattr(self, layer_name)
x = res_layer(x)
x = self.conv5(x)
return x | Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data.
Returns:
torch.Tensor: The feature of the input
samples extracted by the backbone.
| forward | python | open-mmlab/mmaction2 | mmaction/models/backbones/x3d.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/x3d.py | Apache-2.0 |
def train(self, mode=True):
"""Set the optimization status when training."""
super().train(mode)
self._freeze_stages()
if mode and self.norm_eval:
for m in self.modules():
if isinstance(m, _BatchNorm):
m.eval() | Set the optimization status when training. | train | python | open-mmlab/mmaction2 | mmaction/models/backbones/x3d.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/x3d.py | Apache-2.0 |
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data.
Returns:
torch.Tensor: The output of the module.
"""
x = self.conv_s(x)
x = self.bn_s(x)
x = self.relu(x)
x = self.conv_t(x)
return x | Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data.
Returns:
torch.Tensor: The output of the module.
| forward | python | open-mmlab/mmaction2 | mmaction/models/common/conv2plus1d.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/common/conv2plus1d.py | Apache-2.0 |
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data.
Returns:
torch.Tensor: The output of the module.
"""
x_1 = self.conv_1(x)
x_2 = self.conv_2(x)
if self.op == 'concat':
out = torch.cat([x_1, x_2], 1)
else:
out = x_1 + x_2
return out | Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data.
Returns:
torch.Tensor: The output of the module.
| forward | python | open-mmlab/mmaction2 | mmaction/models/common/conv_audio.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/common/conv_audio.py | Apache-2.0 |
def aggregate_stats(self):
"""Synchronize running_mean, and running_var to self.bn.
Call this before eval, then call model.eval(); When eval, forward
function will call self.bn instead of self.split_bn, During this time
the running_mean, and running_var of self.bn has been obtained from
self.split_bn.
"""
if self.split_bn.track_running_stats:
aggre_func = self._get_aggregated_mean_std
self.bn.running_mean.data, self.bn.running_var.data = aggre_func(
self.split_bn.running_mean, self.split_bn.running_var,
self.num_splits)
self.bn.num_batches_tracked = self.split_bn.num_batches_tracked.detach(
) | Synchronize running_mean, and running_var to self.bn.
Call this before eval, then call model.eval(); When eval, forward
function will call self.bn instead of self.split_bn, During this time
the running_mean, and running_var of self.bn has been obtained from
self.split_bn.
| aggregate_stats | python | open-mmlab/mmaction2 | mmaction/models/common/sub_batchnorm3d.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/common/sub_batchnorm3d.py | Apache-2.0 |
def forward(self, x):
"""Defines the computation performed at every call."""
if self.training:
n, c, t, h, w = x.shape
assert n % self.num_splits == 0
x = x.view(n // self.num_splits, c * self.num_splits, t, h, w)
x = self.split_bn(x)
x = x.view(n, c, t, h, w)
else:
x = self.bn(x)
if self.affine:
x = x * self.weight.view(-1, 1, 1, 1)
x = x + self.bias.view(-1, 1, 1, 1)
return x | Defines the computation performed at every call. | forward | python | open-mmlab/mmaction2 | mmaction/models/common/sub_batchnorm3d.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/common/sub_batchnorm3d.py | Apache-2.0 |
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data.
Returns:
torch.Tensor: The output of the module.
"""
# [n, c, h, w]
n, c, h, w = x.size()
num_segments = self.num_segments
num_batches = n // num_segments
assert c == self.in_channels
# [num_batches, c, num_segments, h, w]
x = x.view(num_batches, num_segments, c, h, w)
x = x.permute(0, 2, 1, 3, 4).contiguous()
# [num_batches * c, num_segments, 1, 1]
theta_out = F.adaptive_avg_pool2d(
x.view(-1, num_segments, h, w), (1, 1))
# [num_batches * c, 1, adaptive_kernel_size, 1]
conv_kernel = self.G(theta_out.view(-1, num_segments)).view(
num_batches * c, 1, -1, 1)
# [num_batches, c, num_segments, 1, 1]
local_activation = self.L(theta_out.view(-1, c, num_segments)).view(
num_batches, c, num_segments, 1, 1)
# [num_batches, c, num_segments, h, w]
new_x = x * local_activation
# [1, num_batches * c, num_segments, h * w]
y = F.conv2d(
new_x.view(1, num_batches * c, num_segments, h * w),
conv_kernel,
bias=None,
stride=(self.adaptive_convolution_stride, 1),
padding=(self.adaptive_convolution_padding, 0),
groups=num_batches * c)
# [n, c, h, w]
y = y.view(num_batches, c, num_segments, h, w)
y = y.permute(0, 2, 1, 3, 4).contiguous().view(n, c, h, w)
return y | Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data.
Returns:
torch.Tensor: The output of the module.
| forward | python | open-mmlab/mmaction2 | mmaction/models/common/tam.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/common/tam.py | Apache-2.0 |
def forward(self, query, key=None, value=None, residual=None, **kwargs):
"""Defines the computation performed at every call."""
assert residual is None, (
'Always adding the shortcut in the forward function')
init_cls_token = query[:, 0, :].unsqueeze(1)
identity = query_t = query[:, 1:, :]
# query_t [batch_size, num_patches * num_frames, embed_dims]
b, pt, m = query_t.size()
p, t = pt // self.num_frames, self.num_frames
# res_temporal [batch_size * num_patches, num_frames, embed_dims]
query_t = self.norm(query_t.reshape(b * p, t, m)).permute(1, 0, 2)
res_temporal = self.attn(query_t, query_t, query_t)[0].permute(1, 0, 2)
res_temporal = self.dropout_layer(
self.proj_drop(res_temporal.contiguous()))
res_temporal = self.temporal_fc(res_temporal)
# res_temporal [batch_size, num_patches * num_frames, embed_dims]
res_temporal = res_temporal.reshape(b, p * t, m)
# ret_value [batch_size, num_patches * num_frames + 1, embed_dims]
new_query_t = identity + res_temporal
new_query = torch.cat((init_cls_token, new_query_t), 1)
return new_query | Defines the computation performed at every call. | forward | python | open-mmlab/mmaction2 | mmaction/models/common/transformer.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/common/transformer.py | Apache-2.0 |
def forward(self, query, key=None, value=None, residual=None, **kwargs):
"""Defines the computation performed at every call."""
assert residual is None, (
'Always adding the shortcut in the forward function')
identity = query
init_cls_token = query[:, 0, :].unsqueeze(1)
query_s = query[:, 1:, :]
# query_s [batch_size, num_patches * num_frames, embed_dims]
b, pt, m = query_s.size()
p, t = pt // self.num_frames, self.num_frames
# cls_token [batch_size * num_frames, 1, embed_dims]
cls_token = init_cls_token.repeat(1, t, 1).reshape(b * t,
m).unsqueeze(1)
# query_s [batch_size * num_frames, num_patches + 1, embed_dims]
query_s = rearrange(query_s, 'b (p t) m -> (b t) p m', p=p, t=t)
query_s = torch.cat((cls_token, query_s), 1)
# res_spatial [batch_size * num_frames, num_patches + 1, embed_dims]
query_s = self.norm(query_s).permute(1, 0, 2)
res_spatial = self.attn(query_s, query_s, query_s)[0].permute(1, 0, 2)
res_spatial = self.dropout_layer(
self.proj_drop(res_spatial.contiguous()))
# cls_token [batch_size, 1, embed_dims]
cls_token = res_spatial[:, 0, :].reshape(b, t, m)
cls_token = torch.mean(cls_token, 1, True)
# res_spatial [batch_size * num_frames, num_patches + 1, embed_dims]
res_spatial = rearrange(
res_spatial[:, 1:, :], '(b t) p m -> b (p t) m', p=p, t=t)
res_spatial = torch.cat((cls_token, res_spatial), 1)
new_query = identity + res_spatial
return new_query | Defines the computation performed at every call. | forward | python | open-mmlab/mmaction2 | mmaction/models/common/transformer.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/common/transformer.py | Apache-2.0 |
def forward(self, x, residual=None):
"""Defines the computation performed at every call."""
assert residual is None, ('Cannot apply pre-norm with FFNWithNorm')
return super().forward(self.norm(x), x) | Defines the computation performed at every call. | forward | python | open-mmlab/mmaction2 | mmaction/models/common/transformer.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/common/transformer.py | Apache-2.0 |
def forward(self,
data: Union[dict, Tuple[dict]],
training: bool = False) -> Union[dict, Tuple[dict]]:
"""Perform normalization, padding, bgr2rgb conversion and batch
augmentation based on ``BaseDataPreprocessor``.
Args:
data (dict or Tuple[dict]): data sampled from dataloader.
training (bool): Whether to enable training time augmentation.
Returns:
dict or Tuple[dict]: Data in the same format as the model input.
"""
data = self.cast_data(data)
if isinstance(data, dict):
return self.forward_onesample(data, training=training)
elif isinstance(data, (tuple, list)):
outputs = []
for data_sample in data:
output = self.forward_onesample(data_sample, training=training)
outputs.append(output)
return tuple(outputs)
else:
raise TypeError(f'Unsupported data type: {type(data)}!') | Perform normalization, padding, bgr2rgb conversion and batch
augmentation based on ``BaseDataPreprocessor``.
Args:
data (dict or Tuple[dict]): data sampled from dataloader.
training (bool): Whether to enable training time augmentation.
Returns:
dict or Tuple[dict]: Data in the same format as the model input.
| forward | python | open-mmlab/mmaction2 | mmaction/models/data_preprocessors/data_preprocessor.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/data_preprocessors/data_preprocessor.py | Apache-2.0 |
def forward_onesample(self, data, training: bool = False) -> dict:
"""Perform normalization, padding, bgr2rgb conversion and batch
augmentation on one data sample.
Args:
data (dict): data sampled from dataloader.
training (bool): Whether to enable training time augmentation.
Returns:
dict: Data in the same format as the model input.
"""
inputs, data_samples = data['inputs'], data['data_samples']
inputs, data_samples = self.preprocess(inputs, data_samples, training)
data['inputs'] = inputs
data['data_samples'] = data_samples
return data | Perform normalization, padding, bgr2rgb conversion and batch
augmentation on one data sample.
Args:
data (dict): data sampled from dataloader.
training (bool): Whether to enable training time augmentation.
Returns:
dict: Data in the same format as the model input.
| forward_onesample | python | open-mmlab/mmaction2 | mmaction/models/data_preprocessors/data_preprocessor.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/data_preprocessors/data_preprocessor.py | Apache-2.0 |
def forward(self, data: Dict, training: bool = False) -> Dict:
"""Preprocesses the data into the model input format.
Args:
data (dict): Data returned by dataloader.
training (bool): Whether to enable training time augmentation.
Returns:
dict: Data in the same format as the model input.
"""
data = self.cast_data(data)
inputs, data_samples = data['inputs'], data['data_samples']
for modality, modality_data in inputs.items():
preprocessor = self.preprocessors[modality]
modality_data, data_samples = preprocessor.preprocess(
modality_data, data_samples, training)
inputs[modality] = modality_data
data['inputs'] = inputs
data['data_samples'] = data_samples
return data | Preprocesses the data into the model input format.
Args:
data (dict): Data returned by dataloader.
training (bool): Whether to enable training time augmentation.
Returns:
dict: Data in the same format as the model input.
| forward | python | open-mmlab/mmaction2 | mmaction/models/data_preprocessors/multimodal_data_preprocessor.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/data_preprocessors/multimodal_data_preprocessor.py | Apache-2.0 |
def loss(self, feats: Union[torch.Tensor, Tuple[torch.Tensor]],
data_samples: SampleList, **kwargs) -> Dict:
"""Perform forward propagation of head and loss calculation on the
features of the upstream network.
Args:
feats (torch.Tensor | tuple[torch.Tensor]): Features from
upstream network.
data_samples (list[:obj:`ActionDataSample`]): The batch
data samples.
Returns:
dict: A dictionary of loss components.
"""
cls_scores = self(feats, **kwargs)
return self.loss_by_feat(cls_scores, data_samples) | Perform forward propagation of head and loss calculation on the
features of the upstream network.
Args:
feats (torch.Tensor | tuple[torch.Tensor]): Features from
upstream network.
data_samples (list[:obj:`ActionDataSample`]): The batch
data samples.
Returns:
dict: A dictionary of loss components.
| loss | python | open-mmlab/mmaction2 | mmaction/models/heads/base.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/heads/base.py | Apache-2.0 |
def loss_by_feat(self, cls_scores: torch.Tensor,
data_samples: SampleList) -> Dict:
"""Calculate the loss based on the features extracted by the head.
Args:
cls_scores (torch.Tensor): Classification prediction results of
all class, has shape (batch_size, num_classes).
data_samples (list[:obj:`ActionDataSample`]): The batch
data samples.
Returns:
dict: A dictionary of loss components.
"""
labels = [x.gt_label for x in data_samples]
labels = torch.stack(labels).to(cls_scores.device)
labels = labels.squeeze()
losses = dict()
if labels.shape == torch.Size([]):
labels = labels.unsqueeze(0)
elif labels.dim() == 1 and labels.size()[0] == self.num_classes \
and cls_scores.size()[0] == 1:
# Fix a bug when training with soft labels and batch size is 1.
# When using soft labels, `labels` and `cls_score` share the same
# shape.
labels = labels.unsqueeze(0)
if cls_scores.size() != labels.size():
top_k_acc = top_k_accuracy(cls_scores.detach().cpu().numpy(),
labels.detach().cpu().numpy(),
self.topk)
for k, a in zip(self.topk, top_k_acc):
losses[f'top{k}_acc'] = torch.tensor(
a, device=cls_scores.device)
if self.label_smooth_eps != 0:
if cls_scores.size() != labels.size():
labels = F.one_hot(labels, num_classes=self.num_classes)
labels = ((1 - self.label_smooth_eps) * labels +
self.label_smooth_eps / self.num_classes)
loss_cls = self.loss_cls(cls_scores, labels)
# loss_cls may be dictionary or single tensor
if isinstance(loss_cls, dict):
losses.update(loss_cls)
else:
losses['loss_cls'] = loss_cls
return losses | Calculate the loss based on the features extracted by the head.
Args:
cls_scores (torch.Tensor): Classification prediction results of
all class, has shape (batch_size, num_classes).
data_samples (list[:obj:`ActionDataSample`]): The batch
data samples.
Returns:
dict: A dictionary of loss components.
| loss_by_feat | python | open-mmlab/mmaction2 | mmaction/models/heads/base.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/heads/base.py | Apache-2.0 |
def predict(self, feats: Union[torch.Tensor, Tuple[torch.Tensor]],
data_samples: SampleList, **kwargs) -> SampleList:
"""Perform forward propagation of head and predict recognition results
on the features of the upstream network.
Args:
feats (torch.Tensor | tuple[torch.Tensor]): Features from
upstream network.
data_samples (list[:obj:`ActionDataSample`]): The batch
data samples.
Returns:
list[:obj:`ActionDataSample`]: Recognition results wrapped
by :obj:`ActionDataSample`.
"""
cls_scores = self(feats, **kwargs)
return self.predict_by_feat(cls_scores, data_samples) | Perform forward propagation of head and predict recognition results
on the features of the upstream network.
Args:
feats (torch.Tensor | tuple[torch.Tensor]): Features from
upstream network.
data_samples (list[:obj:`ActionDataSample`]): The batch
data samples.
Returns:
list[:obj:`ActionDataSample`]: Recognition results wrapped
by :obj:`ActionDataSample`.
| predict | python | open-mmlab/mmaction2 | mmaction/models/heads/base.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/heads/base.py | Apache-2.0 |
def predict_by_feat(self, cls_scores: torch.Tensor,
data_samples: SampleList) -> SampleList:
"""Transform a batch of output features extracted from the head into
prediction results.
Args:
cls_scores (torch.Tensor): Classification scores, has a shape
(B*num_segs, num_classes)
data_samples (list[:obj:`ActionDataSample`]): The
annotation data of every samples. It usually includes
information such as `gt_label`.
Returns:
List[:obj:`ActionDataSample`]: Recognition results wrapped
by :obj:`ActionDataSample`.
"""
num_segs = cls_scores.shape[0] // len(data_samples)
cls_scores = self.average_clip(cls_scores, num_segs=num_segs)
pred_labels = cls_scores.argmax(dim=-1, keepdim=True).detach()
for data_sample, score, pred_label in zip(data_samples, cls_scores,
pred_labels):
data_sample.set_pred_score(score)
data_sample.set_pred_label(pred_label)
return data_samples | Transform a batch of output features extracted from the head into
prediction results.
Args:
cls_scores (torch.Tensor): Classification scores, has a shape
(B*num_segs, num_classes)
data_samples (list[:obj:`ActionDataSample`]): The
annotation data of every samples. It usually includes
information such as `gt_label`.
Returns:
List[:obj:`ActionDataSample`]: Recognition results wrapped
by :obj:`ActionDataSample`.
| predict_by_feat | python | open-mmlab/mmaction2 | mmaction/models/heads/base.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/heads/base.py | Apache-2.0 |
def average_clip(self,
cls_scores: torch.Tensor,
num_segs: int = 1) -> torch.Tensor:
"""Averaging class scores over multiple clips.
Using different averaging types ('score' or 'prob' or None,
which defined in test_cfg) to computed the final averaged
class score. Only called in test mode.
Args:
cls_scores (torch.Tensor): Class scores to be averaged.
num_segs (int): Number of clips for each input sample.
Returns:
torch.Tensor: Averaged class scores.
"""
if self.average_clips not in ['score', 'prob', None]:
raise ValueError(f'{self.average_clips} is not supported. '
f'Currently supported ones are '
f'["score", "prob", None]')
batch_size = cls_scores.shape[0]
cls_scores = cls_scores.view((batch_size // num_segs, num_segs) +
cls_scores.shape[1:])
if self.average_clips is None:
return cls_scores
elif self.average_clips == 'prob':
cls_scores = F.softmax(cls_scores, dim=2).mean(dim=1)
elif self.average_clips == 'score':
cls_scores = cls_scores.mean(dim=1)
return cls_scores | Averaging class scores over multiple clips.
Using different averaging types ('score' or 'prob' or None,
which defined in test_cfg) to computed the final averaged
class score. Only called in test mode.
Args:
cls_scores (torch.Tensor): Class scores to be averaged.
num_segs (int): Number of clips for each input sample.
Returns:
torch.Tensor: Averaged class scores.
| average_clip | python | open-mmlab/mmaction2 | mmaction/models/heads/base.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/heads/base.py | Apache-2.0 |
def forward(self,
x: Tensor,
num_segs: Optional[int] = None,
**kwargs) -> Tensor:
"""Defines the computation performed at every call.
Args:
x (Tensor): The input data.
num_segs (int): For 2D backbone. Number of segments into which
a video is divided. Defaults to None.
Returns:
Tensor: The output features after pooling.
"""
if isinstance(x, Tensor):
n_dims = x.ndim
elif isinstance(x, tuple):
n_dims = x[0].ndim
assert self.backbone_name == 'slowfast', \
'Only support SlowFast backbone to input tuple'
else:
raise NotImplementedError(f'Unsupported feature type: {type(x)}')
# For 2D backbone with spatial dimension
if n_dims == 4:
assert num_segs is not None
if self.backbone_name == 'tsm':
assert self.num_segments is not None, \
'Please Specify num_segments for TSM'
num_segs = self.num_segments
# [N, T, channels, H, W]
x = x.view((-1, num_segs) + x.shape[1:])
feat = self.pool1d(self.pool2d(x, dim=[-2, -1]), dim=1)
elif n_dims == 5:
if self.backbone_name == 'slowfast':
x_slow, x_fast = x
assert self.temporal_type is not None, \
'slowfast backbone has to pool temporal dimension'
x_fast = self.pool1d(self.pool2d(x_fast, dim=[-2, -1]), dim=2)
x_slow = self.pool1d(self.pool2d(x_slow, dim=[-2, -1]), dim=2)
feat = torch.cat((x_slow, x_fast), dim=1)
# For GCN-based backbone
elif self.backbone_name == 'gcn':
# N, M, C, T, V
feat = self.pool1d(self.pool2d(x, dim=[-2, -1]), dim=1)
# For 3D backbone with spatial dimension
else:
# [N, channels, T, H, W]
feat = self.pool1d(self.pool2d(x, dim=[-2, -1]), dim=2)
# For backbone output feature without spatial and temporal dimension
elif n_dims == 2:
# [N, channels]
feat = x
return feat | Defines the computation performed at every call.
Args:
x (Tensor): The input data.
num_segs (int): For 2D backbone. Number of segments into which
a video is divided. Defaults to None.
Returns:
Tensor: The output features after pooling.
| forward | python | open-mmlab/mmaction2 | mmaction/models/heads/feature_head.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/heads/feature_head.py | Apache-2.0 |
def predict_by_feat(self, feats: Union[Tensor, Tuple[Tensor]],
data_samples) -> Tensor:
"""Integrate multi-view features into one tensor.
Args:
feats (torch.Tensor | tuple[torch.Tensor]): Features from
upstream network.
data_samples (list[:obj:`ActionDataSample`]): The batch
data samples.
Returns:
Tensor: The integrated multi-view features.
"""
num_segs = feats.shape[0] // len(data_samples)
feats = self.average_clip(feats, num_segs=num_segs)
return feats | Integrate multi-view features into one tensor.
Args:
feats (torch.Tensor | tuple[torch.Tensor]): Features from
upstream network.
data_samples (list[:obj:`ActionDataSample`]): The batch
data samples.
Returns:
Tensor: The integrated multi-view features.
| predict_by_feat | python | open-mmlab/mmaction2 | mmaction/models/heads/feature_head.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/heads/feature_head.py | Apache-2.0 |
def forward(self, x: torch.Tensor, **kwargs) -> torch.Tensor:
"""Forward features from the upstream network.
Args:
x (torch.Tensor): Features from the upstream network.
Returns:
torch.Tensor: Classification scores with shape (B, num_classes).
"""
N, M, C, T, V = x.shape
x = x.view(N * M, C, T, V)
x = self.pool(x)
x = x.view(N, M, C)
x = x.mean(dim=1)
assert x.shape[1] == self.in_channels
if self.dropout is not None:
x = self.dropout(x)
cls_scores = self.fc(x)
return cls_scores | Forward features from the upstream network.
Args:
x (torch.Tensor): Features from the upstream network.
Returns:
torch.Tensor: Classification scores with shape (B, num_classes).
| forward | python | open-mmlab/mmaction2 | mmaction/models/heads/gcn_head.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/heads/gcn_head.py | Apache-2.0 |
def forward(self, x: Tensor, **kwargs) -> Tensor:
"""Defines the computation performed at every call.
Args:
x (Tensor): The input data.
Returns:
Tensor: The classification scores for input samples.
"""
# [N, in_channels, 4, 7, 7]
if self.avg_pool is not None:
x = self.avg_pool(x)
# [N, in_channels, 1, 1, 1]
if self.dropout is not None:
x = self.dropout(x)
# [N, in_channels, 1, 1, 1]
x = x.view(x.shape[0], -1)
# [N, in_channels]
cls_score = self.fc_cls(x)
# [N, num_classes]
return cls_score | Defines the computation performed at every call.
Args:
x (Tensor): The input data.
Returns:
Tensor: The classification scores for input samples.
| forward | python | open-mmlab/mmaction2 | mmaction/models/heads/i3d_head.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/heads/i3d_head.py | Apache-2.0 |
def pre_logits(self, feats: Tuple[List[Tensor]]) -> Tensor:
"""The process before the final classification head.
The input ``feats`` is a tuple of list of tensor, and each tensor is
the feature of a backbone stage.
"""
if self.with_cls_token:
_, cls_token = feats[-1]
return cls_token
else:
patch_token = feats[-1]
return patch_token.mean(dim=(2, 3, 4)) | The process before the final classification head.
The input ``feats`` is a tuple of list of tensor, and each tensor is
the feature of a backbone stage.
| pre_logits | python | open-mmlab/mmaction2 | mmaction/models/heads/mvit_head.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/heads/mvit_head.py | Apache-2.0 |
def forward(self, x: Tuple[List[Tensor]], **kwargs) -> Tensor:
"""Defines the computation performed at every call.
Args:
x (Tuple[List[Tensor]]): The input data.
Returns:
Tensor: The classification scores for input samples.
"""
x = self.pre_logits(x)
if self.dropout is not None:
x = self.dropout(x)
# [N, in_channels]
cls_score = self.fc_cls(x)
# [N, num_classes]
return cls_score | Defines the computation performed at every call.
Args:
x (Tuple[List[Tensor]]): The input data.
Returns:
Tensor: The classification scores for input samples.
| forward | python | open-mmlab/mmaction2 | mmaction/models/heads/mvit_head.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/heads/mvit_head.py | Apache-2.0 |
def forward(self, x: Tensor, **kwargs) -> Tensor:
"""Defines the computation performed at every call.
Args:
x (Tensor): The input data.
Returns:
Tensor: The classification scores for input samples.
"""
if len(x.shape) == 4:
cls_score = self.fc2d(x)
else:
cls_score = self.fc3d(x)
return cls_score | Defines the computation performed at every call.
Args:
x (Tensor): The input data.
Returns:
Tensor: The classification scores for input samples.
| forward | python | open-mmlab/mmaction2 | mmaction/models/heads/omni_head.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/heads/omni_head.py | Apache-2.0 |
def loss_by_feat(self, cls_scores: Union[Tensor, Tuple[Tensor]],
data_samples: SampleList) -> dict:
"""Calculate the loss based on the features extracted by the head.
Args:
cls_scores (Tensor): Classification prediction results of
all class, has shape (batch_size, num_classes).
data_samples (List[:obj:`ActionDataSample`]): The batch
data samples.
Returns:
dict: A dictionary of loss components.
"""
labels = [x.gt_label for x in data_samples]
labels = torch.stack(labels).to(cls_scores.device)
labels = labels.squeeze()
losses = dict()
if labels.shape == torch.Size([]):
labels = labels.unsqueeze(0)
elif labels.dim() == 1 and cls_scores.size()[0] == 1:
# Fix a bug when training with soft labels and batch size is 1.
# When using soft labels, `labels` and `cls_socre` share the same
# shape.
labels = labels.unsqueeze(0)
if cls_scores.size() != labels.size():
top_k_acc = top_k_accuracy(cls_scores.detach().cpu().numpy(),
labels.detach().cpu().numpy(),
self.topk)
for k, a in zip(self.topk, top_k_acc):
losses[f'top{k}_acc'] = torch.tensor(
a, device=cls_scores.device)
if self.label_smooth_eps != 0:
if cls_scores.size() != labels.size():
labels = F.one_hot(labels, num_classes=self.num_classes)
labels = ((1 - self.label_smooth_eps) * labels +
self.label_smooth_eps / self.num_classes)
loss_cls = self.loss_cls(cls_scores, labels)
# loss_cls may be dictionary or single tensor
if isinstance(loss_cls, dict):
losses.update(loss_cls)
else:
losses['loss_cls'] = loss_cls
return losses | Calculate the loss based on the features extracted by the head.
Args:
cls_scores (Tensor): Classification prediction results of
all class, has shape (batch_size, num_classes).
data_samples (List[:obj:`ActionDataSample`]): The batch
data samples.
Returns:
dict: A dictionary of loss components.
| loss_by_feat | python | open-mmlab/mmaction2 | mmaction/models/heads/omni_head.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/heads/omni_head.py | Apache-2.0 |
def forward(self, x: Tuple[torch.Tensor]) -> Dict:
"""Defines the computation performed at every call."""
x_rgb, x_pose = self.avg_pool(x[0]), self.avg_pool(x[1])
x_rgb = x_rgb.view(x_rgb.size(0), -1)
x_pose = x_pose.view(x_pose.size(0), -1)
x_rgb = self.dropout_rgb(x_rgb)
x_pose = self.dropout_pose(x_pose)
cls_scores = dict()
cls_scores['rgb'] = self.fc_rgb(x_rgb)
cls_scores['pose'] = self.fc_pose(x_pose)
return cls_scores | Defines the computation performed at every call. | forward | python | open-mmlab/mmaction2 | mmaction/models/heads/rgbpose_head.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/heads/rgbpose_head.py | Apache-2.0 |
def loss(self, feats: Tuple[torch.Tensor], data_samples: SampleList,
**kwargs) -> Dict:
"""Perform forward propagation of head and loss calculation on the
features of the upstream network.
Args:
feats (tuple[torch.Tensor]): Features from upstream network.
data_samples (list[:obj:`ActionDataSample`]): The batch
data samples.
Returns:
dict: A dictionary of loss components.
"""
cls_scores = self(feats, **kwargs)
return self.loss_by_feat(cls_scores, data_samples) | Perform forward propagation of head and loss calculation on the
features of the upstream network.
Args:
feats (tuple[torch.Tensor]): Features from upstream network.
data_samples (list[:obj:`ActionDataSample`]): The batch
data samples.
Returns:
dict: A dictionary of loss components.
| loss | python | open-mmlab/mmaction2 | mmaction/models/heads/rgbpose_head.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/heads/rgbpose_head.py | Apache-2.0 |
def loss_by_feat(self, cls_scores: Dict[str, torch.Tensor],
data_samples: SampleList) -> Dict:
"""Calculate the loss based on the features extracted by the head.
Args:
cls_scores (dict[str, torch.Tensor]): The dict of
classification scores,
data_samples (list[:obj:`ActionDataSample`]): The batch
data samples.
Returns:
dict: A dictionary of loss components.
"""
labels = torch.stack([x.gt_label for x in data_samples])
labels = labels.squeeze()
if labels.shape == torch.Size([]):
labels = labels.unsqueeze(0)
elif labels.dim() == 1 and labels.size()[0] == self.num_classes \
and cls_scores.size()[0] == 1:
# Fix a bug when training with soft labels and batch size is 1.
# When using soft labels, `labels` and `cls_score` share the same
# shape.
labels = labels.unsqueeze(0)
losses = dict()
for loss_name, weight in zip(self.loss_components, self.loss_weights):
cls_score = cls_scores[loss_name]
loss_cls = self.loss_by_scores(cls_score, labels)
loss_cls = {loss_name + '_' + k: v for k, v in loss_cls.items()}
loss_cls[f'{loss_name}_loss_cls'] *= weight
losses.update(loss_cls)
return losses | Calculate the loss based on the features extracted by the head.
Args:
cls_scores (dict[str, torch.Tensor]): The dict of
classification scores,
data_samples (list[:obj:`ActionDataSample`]): The batch
data samples.
Returns:
dict: A dictionary of loss components.
| loss_by_feat | python | open-mmlab/mmaction2 | mmaction/models/heads/rgbpose_head.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/heads/rgbpose_head.py | Apache-2.0 |
def loss_by_scores(self, cls_scores: torch.Tensor,
labels: torch.Tensor) -> Dict:
"""Calculate the loss based on the features extracted by the head.
Args:
cls_scores (torch.Tensor): Classification prediction
results of all class, has shape (batch_size, num_classes).
labels (torch.Tensor): The labels used to calculate the loss.
Returns:
dict: A dictionary of loss components.
"""
losses = dict()
if cls_scores.size() != labels.size():
top_k_acc = top_k_accuracy(cls_scores.detach().cpu().numpy(),
labels.detach().cpu().numpy(),
self.topk)
for k, a in zip(self.topk, top_k_acc):
losses[f'top{k}_acc'] = torch.tensor(
a, device=cls_scores.device)
if self.label_smooth_eps != 0:
if cls_scores.size() != labels.size():
labels = F.one_hot(labels, num_classes=self.num_classes)
labels = ((1 - self.label_smooth_eps) * labels +
self.label_smooth_eps / self.num_classes)
loss_cls = self.loss_cls(cls_scores, labels)
# loss_cls may be dictionary or single tensor
if isinstance(loss_cls, dict):
losses.update(loss_cls)
else:
losses['loss_cls'] = loss_cls
return losses | Calculate the loss based on the features extracted by the head.
Args:
cls_scores (torch.Tensor): Classification prediction
results of all class, has shape (batch_size, num_classes).
labels (torch.Tensor): The labels used to calculate the loss.
Returns:
dict: A dictionary of loss components.
| loss_by_scores | python | open-mmlab/mmaction2 | mmaction/models/heads/rgbpose_head.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/heads/rgbpose_head.py | Apache-2.0 |
def predict(self, feats: Tuple[torch.Tensor], data_samples: SampleList,
**kwargs) -> SampleList:
"""Perform forward propagation of head and predict recognition results
on the features of the upstream network.
Args:
feats (tuple[torch.Tensor]): Features from upstream network.
data_samples (list[:obj:`ActionDataSample`]): The batch
data samples.
Returns:
list[:obj:`ActionDataSample`]: Recognition results wrapped
by :obj:`ActionDataSample`.
"""
cls_scores = self(feats, **kwargs)
return self.predict_by_feat(cls_scores, data_samples) | Perform forward propagation of head and predict recognition results
on the features of the upstream network.
Args:
feats (tuple[torch.Tensor]): Features from upstream network.
data_samples (list[:obj:`ActionDataSample`]): The batch
data samples.
Returns:
list[:obj:`ActionDataSample`]: Recognition results wrapped
by :obj:`ActionDataSample`.
| predict | python | open-mmlab/mmaction2 | mmaction/models/heads/rgbpose_head.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/heads/rgbpose_head.py | Apache-2.0 |
def predict_by_feat(self, cls_scores: Dict[str, torch.Tensor],
data_samples: SampleList) -> SampleList:
"""Transform a batch of output features extracted from the head into
prediction results.
Args:
cls_scores (dict[str, torch.Tensor]): The dict of
classification scores,
data_samples (list[:obj:`ActionDataSample`]): The
annotation data of every samples. It usually includes
information such as `gt_label`.
Returns:
list[:obj:`ActionDataSample`]: Recognition results wrapped
by :obj:`ActionDataSample`.
"""
pred_scores = [dict() for _ in range(len(data_samples))]
for name in self.loss_components:
cls_score = cls_scores[name]
cls_score = self.predict_by_scores(cls_score, data_samples)
for pred_score, score in zip(pred_scores, cls_score):
pred_score[f'{name}'] = score
for data_sample, pred_score, in zip(data_samples, pred_scores):
data_sample.set_pred_score(pred_score)
return data_samples | Transform a batch of output features extracted from the head into
prediction results.
Args:
cls_scores (dict[str, torch.Tensor]): The dict of
classification scores,
data_samples (list[:obj:`ActionDataSample`]): The
annotation data of every samples. It usually includes
information such as `gt_label`.
Returns:
list[:obj:`ActionDataSample`]: Recognition results wrapped
by :obj:`ActionDataSample`.
| predict_by_feat | python | open-mmlab/mmaction2 | mmaction/models/heads/rgbpose_head.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/heads/rgbpose_head.py | Apache-2.0 |
def predict_by_scores(self, cls_scores: torch.Tensor,
data_samples: SampleList) -> torch.Tensor:
"""Transform a batch of output features extracted from the head into
prediction results.
Args:
cls_scores (torch.Tensor): Classification scores, has a shape
(B*num_segs, num_classes)
data_samples (list[:obj:`ActionDataSample`]): The annotation
data of every samples.
Returns:
torch.Tensor: The averaged classification scores.
"""
num_segs = cls_scores.shape[0] // len(data_samples)
cls_scores = self.average_clip(cls_scores, num_segs=num_segs)
return cls_scores | Transform a batch of output features extracted from the head into
prediction results.
Args:
cls_scores (torch.Tensor): Classification scores, has a shape
(B*num_segs, num_classes)
data_samples (list[:obj:`ActionDataSample`]): The annotation
data of every samples.
Returns:
torch.Tensor: The averaged classification scores.
| predict_by_scores | python | open-mmlab/mmaction2 | mmaction/models/heads/rgbpose_head.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/heads/rgbpose_head.py | Apache-2.0 |
def forward(self, x: Tuple[Tensor], **kwargs) -> None:
"""Defines the computation performed at every call.
Args:
x (tuple[torch.Tensor]): The input data.
Returns:
Tensor: The classification scores for input samples.
"""
# ([N, channel_slow, T1, H, W], [(N, channel_fast, T2, H, W)])
x_slow, x_fast = x
# ([N, channel_slow, 1, 1, 1], [N, channel_fast, 1, 1, 1])
x_slow = self.avg_pool(x_slow)
x_fast = self.avg_pool(x_fast)
# [N, channel_fast + channel_slow, 1, 1, 1]
x = torch.cat((x_fast, x_slow), dim=1)
if self.dropout is not None:
x = self.dropout(x)
# [N x C]
x = x.view(x.size(0), -1)
# [N x num_classes]
cls_score = self.fc_cls(x)
return cls_score | Defines the computation performed at every call.
Args:
x (tuple[torch.Tensor]): The input data.
Returns:
Tensor: The classification scores for input samples.
| forward | python | open-mmlab/mmaction2 | mmaction/models/heads/slowfast_head.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/heads/slowfast_head.py | Apache-2.0 |
def forward(self, x: Tensor, **kwargs) -> Tensor:
"""Defines the computation performed at every call.
Args:
x (Tensor): The input data.
Returns:
Tensor: The classification scores for input samples.
"""
# [N, in_channels]
if self.dropout is not None:
x = self.dropout(x)
# [N, in_channels]
cls_score = self.fc_cls(x)
# [N, num_classes]
return cls_score | Defines the computation performed at every call.
Args:
x (Tensor): The input data.
Returns:
Tensor: The classification scores for input samples.
| forward | python | open-mmlab/mmaction2 | mmaction/models/heads/timesformer_head.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/heads/timesformer_head.py | Apache-2.0 |
def forward(self,
x,
num_segs: Optional[int] = None,
fcn_test: bool = False,
**kwargs) -> Tensor:
"""Defines the computation performed at every call.
Args:
x (Tensor): The input data.
num_segs (int, optional): Number of segments into which a video
is divided. Defaults to None.
fcn_test (bool): Whether to apply full convolution (fcn) testing.
Defaults to False.
Returns:
Tensor: The classification scores for input samples.
"""
if fcn_test:
if self.avg_pool3d:
x = self.avg_pool3d(x)
if self.new_cls is None:
self._init_new_cls()
x = self.new_cls(x)
cls_score_feat_map = x.view(x.size(0), -1)
return cls_score_feat_map
if self.avg_pool2d is None:
kernel_size = (1, x.shape[-2], x.shape[-1])
self.avg_pool2d = nn.AvgPool3d(kernel_size, stride=1, padding=0)
if num_segs is None:
# [N, in_channels, 3, 7, 7]
x = self.avg_pool3d(x)
else:
# [N * num_segs, in_channels, 7, 7]
x = self.avg_pool2d(x)
# [N * num_segs, in_channels, 1, 1]
x = x.reshape((-1, num_segs) + x.shape[1:])
# [N, num_segs, in_channels, 1, 1]
x = self.consensus(x)
# [N, 1, in_channels, 1, 1]
x = x.squeeze(1)
# [N, in_channels, 1, 1]
if self.dropout is not None:
x = self.dropout(x)
# [N, in_channels, 1, 1]
x = x.view(x.size(0), -1)
# [N, in_channels]
cls_score = self.fc_cls(x)
# [N, num_classes]
return cls_score | Defines the computation performed at every call.
Args:
x (Tensor): The input data.
num_segs (int, optional): Number of segments into which a video
is divided. Defaults to None.
fcn_test (bool): Whether to apply full convolution (fcn) testing.
Defaults to False.
Returns:
Tensor: The classification scores for input samples.
| forward | python | open-mmlab/mmaction2 | mmaction/models/heads/tpn_head.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/heads/tpn_head.py | Apache-2.0 |
def forward(self, x):
"""Defines the computation performed at every call.
Args:
x (Tensor): The input data.
Returns:
Tensor: The classification scores for input samples.
"""
# [N, num_segs * hidden_dim]
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x | Defines the computation performed at every call.
Args:
x (Tensor): The input data.
Returns:
Tensor: The classification scores for input samples.
| forward | python | open-mmlab/mmaction2 | mmaction/models/heads/trn_head.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/heads/trn_head.py | Apache-2.0 |
def forward(self, x, num_segs, **kwargs):
"""Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data.
num_segs (int): Useless in TRNHead. By default, `num_segs`
is equal to `clip_len * num_clips * num_crops`, which is
automatically generated in Recognizer forward phase and
useless in TRN models. The `self.num_segments` we need is a
hyper parameter to build TRN models.
Returns:
torch.Tensor: The classification scores for input samples.
"""
# [N * num_segs, in_channels, 7, 7]
if self.avg_pool is not None:
x = self.avg_pool(x)
# [N * num_segs, in_channels, 1, 1]
x = torch.flatten(x, 1)
# [N * num_segs, in_channels]
if self.dropout is not None:
x = self.dropout(x)
# [N, num_segs, hidden_dim]
cls_score = self.fc_cls(x)
cls_score = cls_score.view((-1, self.num_segments) +
cls_score.size()[1:])
# [N, num_classes]
cls_score = self.consensus(cls_score)
return cls_score | Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data.
num_segs (int): Useless in TRNHead. By default, `num_segs`
is equal to `clip_len * num_clips * num_crops`, which is
automatically generated in Recognizer forward phase and
useless in TRN models. The `self.num_segments` we need is a
hyper parameter to build TRN models.
Returns:
torch.Tensor: The classification scores for input samples.
| forward | python | open-mmlab/mmaction2 | mmaction/models/heads/trn_head.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/heads/trn_head.py | Apache-2.0 |
def forward(self, x: Tensor, num_segs: int, **kwargs) -> Tensor:
"""Defines the computation performed at every call.
Args:
x (Tensor): The input data.
num_segs (int): Useless in TSMHead. By default, `num_segs`
is equal to `clip_len * num_clips * num_crops`, which is
automatically generated in Recognizer forward phase and
useless in TSM models. The `self.num_segments` we need is a
hyper parameter to build TSM models.
Returns:
Tensor: The classification scores for input samples.
"""
# [N * num_segs, in_channels, 7, 7]
if self.avg_pool is not None:
x = self.avg_pool(x)
# [N * num_segs, in_channels, 1, 1]
x = torch.flatten(x, 1)
# [N * num_segs, in_channels]
if self.dropout is not None:
x = self.dropout(x)
# [N * num_segs, num_classes]
cls_score = self.fc_cls(x)
if self.is_shift and self.temporal_pool:
# [2 * N, num_segs // 2, num_classes]
cls_score = cls_score.view((-1, self.num_segments // 2) +
cls_score.size()[1:])
else:
# [N, num_segs, num_classes]
cls_score = cls_score.view((-1, self.num_segments) +
cls_score.size()[1:])
# [N, 1, num_classes]
cls_score = self.consensus(cls_score)
# [N, num_classes]
return cls_score.squeeze(1) | Defines the computation performed at every call.
Args:
x (Tensor): The input data.
num_segs (int): Useless in TSMHead. By default, `num_segs`
is equal to `clip_len * num_clips * num_crops`, which is
automatically generated in Recognizer forward phase and
useless in TSM models. The `self.num_segments` we need is a
hyper parameter to build TSM models.
Returns:
Tensor: The classification scores for input samples.
| forward | python | open-mmlab/mmaction2 | mmaction/models/heads/tsm_head.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/heads/tsm_head.py | Apache-2.0 |
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data.
Returns:
torch.Tensor: The classification scores for input samples.
"""
# [N * num_segs, in_channels, h, w]
x = self.avg_pool(x)
# [N, in_channels, 1, 1]
x = x.view(x.size(0), -1)
# [N, in_channels]
if self.dropout is not None:
x = self.dropout(x)
# [N, in_channels]
cls_score = self.fc_cls(x)
# [N, num_classes]
return cls_score | Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data.
Returns:
torch.Tensor: The classification scores for input samples.
| forward | python | open-mmlab/mmaction2 | mmaction/models/heads/tsn_audio_head.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/heads/tsn_audio_head.py | Apache-2.0 |
def forward(self, x: Tensor, num_segs: int, **kwargs) -> Tensor:
"""Defines the computation performed at every call.
Args:
x (Tensor): The input data.
num_segs (int): Number of segments into which a video
is divided.
Returns:
Tensor: The classification scores for input samples.
"""
# [N * num_segs, in_channels, 7, 7]
if self.avg_pool is not None:
if isinstance(x, tuple):
shapes = [y.shape for y in x]
assert 1 == 0, f'x is tuple {shapes}'
x = self.avg_pool(x)
# [N * num_segs, in_channels, 1, 1]
x = x.reshape((-1, num_segs) + x.shape[1:])
# [N, num_segs, in_channels, 1, 1]
x = self.consensus(x)
# [N, 1, in_channels, 1, 1]
x = x.squeeze(1)
# [N, in_channels, 1, 1]
if self.dropout is not None:
x = self.dropout(x)
# [N, in_channels, 1, 1]
x = x.view(x.size(0), -1)
# [N, in_channels]
cls_score = self.fc_cls(x)
# [N, num_classes]
return cls_score | Defines the computation performed at every call.
Args:
x (Tensor): The input data.
num_segs (int): Number of segments into which a video
is divided.
Returns:
Tensor: The classification scores for input samples.
| forward | python | open-mmlab/mmaction2 | mmaction/models/heads/tsn_head.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/heads/tsn_head.py | Apache-2.0 |
def forward(self, x: Tensor, **kwargs) -> Tensor:
"""Defines the computation performed at every call.
Args:
x (Tensor): The input data.
Returns:
Tensor: The classification scores for input samples.
"""
# [N, in_channels]
if self.dropout is not None:
x = self.dropout(x)
# [N, in_channels]
cls_score = self.fc_cls(x)
# [N, num_classes]
return cls_score | Defines the computation performed at every call.
Args:
x (Tensor): The input data.
Returns:
Tensor: The classification scores for input samples.
| forward | python | open-mmlab/mmaction2 | mmaction/models/heads/uniformer_head.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/heads/uniformer_head.py | Apache-2.0 |
def forward(self, x: Tensor, **kwargs) -> Tensor:
"""Defines the computation performed at every call.
Args:
x (Tensor): The input data.
Returns:
Tensor: The classification scores for input samples.
"""
# [N, in_channels, T, H, W]
assert self.pool is not None
x = self.pool(x)
# [N, in_channels, 1, 1, 1]
# [N, in_channels, 1, 1, 1]
x = x.view(x.shape[0], -1)
# [N, in_channels]
x = self.fc1(x)
# [N, 2048]
x = self.relu(x)
if self.dropout is not None:
x = self.dropout(x)
cls_score = self.fc2(x)
# [N, num_classes]
return cls_score | Defines the computation performed at every call.
Args:
x (Tensor): The input data.
Returns:
Tensor: The classification scores for input samples.
| forward | python | open-mmlab/mmaction2 | mmaction/models/heads/x3d_head.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/heads/x3d_head.py | Apache-2.0 |
def forward(self, inputs, data_samples, mode, **kwargs):
"""The unified entry for a forward process in both training and test.
The method should accept three modes:
- ``tensor``: Forward the whole network and return tensor or tuple of
tensor without any post-processing, same as a common nn.Module.
- ``predict``: Forward and return the predictions, which are fully
processed to a list of :obj:`ActionDataSample`.
- ``loss``: Forward and return a dict of losses according to the given
inputs and data samples.
Note that this method doesn't handle neither back propagation nor
optimizer updating, which are done in the :meth:`train_step`.
Args:
inputs (Tensor): The input tensor with shape
(N, C, ...) in general.
data_samples (List[:obj:`ActionDataSample`], optional): The
annotation data of every samples. Defaults to None.
mode (str): Return what kind of value. Defaults to ``tensor``.
Returns:
The return type depends on ``mode``.
- If ``mode="tensor"``, return a tensor or a tuple of tensor.
- If ``mode="predict"``, return a list of ``ActionDataSample``.
- If ``mode="loss"``, return a dict of tensor.
"""
inputs = torch.stack(inputs)
if mode == 'tensor':
return self._forward(inputs, **kwargs)
if mode == 'predict':
return self.predict(inputs, data_samples, **kwargs)
elif mode == 'loss':
return self.loss(inputs, data_samples, **kwargs)
else:
raise RuntimeError(f'Invalid mode "{mode}". '
'Only supports loss, predict and tensor mode') | The unified entry for a forward process in both training and test.
The method should accept three modes:
- ``tensor``: Forward the whole network and return tensor or tuple of
tensor without any post-processing, same as a common nn.Module.
- ``predict``: Forward and return the predictions, which are fully
processed to a list of :obj:`ActionDataSample`.
- ``loss``: Forward and return a dict of losses according to the given
inputs and data samples.
Note that this method doesn't handle neither back propagation nor
optimizer updating, which are done in the :meth:`train_step`.
Args:
inputs (Tensor): The input tensor with shape
(N, C, ...) in general.
data_samples (List[:obj:`ActionDataSample`], optional): The
annotation data of every samples. Defaults to None.
mode (str): Return what kind of value. Defaults to ``tensor``.
Returns:
The return type depends on ``mode``.
- If ``mode="tensor"``, return a tensor or a tuple of tensor.
- If ``mode="predict"``, return a list of ``ActionDataSample``.
- If ``mode="loss"``, return a dict of tensor.
| forward | python | open-mmlab/mmaction2 | mmaction/models/localizers/bmn.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/localizers/bmn.py | Apache-2.0 |
def loss(self, batch_inputs, batch_data_samples, **kwargs):
"""Calculate losses from a batch of inputs and data samples.
Args:
batch_inputs (Tensor): Raw Inputs of the recognizer.
These should usually be mean centered and std scaled.
batch_data_samples (List[:obj:`ActionDataSample`]): The batch
data samples. It usually includes information such
as ``gt_labels``.
Returns:
dict: A dictionary of loss components.
"""
gt_bbox = [
sample.gt_instances['gt_bbox'] for sample in batch_data_samples
]
label_confidence, label_start, label_end = self.generate_labels(
gt_bbox)
device = batch_inputs.device
label_confidence = label_confidence.to(device)
label_start = label_start.to(device)
label_end = label_end.to(device)
confidence_map, start, end = self._forward(batch_inputs)
loss = self.loss_cls(confidence_map, start, end, label_confidence,
label_start, label_end, self.bm_mask)
loss_dict = dict(loss=loss[0])
return loss_dict | Calculate losses from a batch of inputs and data samples.
Args:
batch_inputs (Tensor): Raw Inputs of the recognizer.
These should usually be mean centered and std scaled.
batch_data_samples (List[:obj:`ActionDataSample`]): The batch
data samples. It usually includes information such
as ``gt_labels``.
Returns:
dict: A dictionary of loss components.
| loss | python | open-mmlab/mmaction2 | mmaction/models/localizers/bmn.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/localizers/bmn.py | Apache-2.0 |
def predict(self, batch_inputs, batch_data_samples, **kwargs):
"""Define the computation performed at every call when testing."""
confidence_map, start, end = self._forward(batch_inputs)
start_scores = start[0].cpu().numpy()
end_scores = end[0].cpu().numpy()
cls_confidence = (confidence_map[0][1]).cpu().numpy()
reg_confidence = (confidence_map[0][0]).cpu().numpy()
max_start = max(start_scores)
max_end = max(end_scores)
# generate the set of start points and end points
start_bins = np.zeros(len(start_scores))
start_bins[0] = 1 # [1,0,0...,0,0]
end_bins = np.zeros(len(end_scores))
end_bins[-1] = 1 # [0,0,0...,0,1]
for idx in range(1, self.tscale - 1):
if start_scores[idx] > start_scores[
idx + 1] and start_scores[idx] > start_scores[idx - 1]:
start_bins[idx] = 1
elif start_scores[idx] > (0.5 * max_start):
start_bins[idx] = 1
if end_scores[idx] > end_scores[
idx + 1] and end_scores[idx] > end_scores[idx - 1]:
end_bins[idx] = 1
elif end_scores[idx] > (0.5 * max_end):
end_bins[idx] = 1
# iterate through all combinations of start_index and end_index
new_proposals = []
for idx in range(self.tscale):
for jdx in range(self.tscale):
start_index = jdx
end_index = start_index + idx + 1
if end_index < self.tscale and start_bins[
start_index] == 1 and end_bins[end_index] == 1:
tmin = start_index / self.tscale
tmax = end_index / self.tscale
tmin_score = start_scores[start_index]
tmax_score = end_scores[end_index]
cls_score = cls_confidence[idx, jdx]
reg_score = reg_confidence[idx, jdx]
score = tmin_score * tmax_score * cls_score * reg_score
new_proposals.append([
tmin, tmax, tmin_score, tmax_score, cls_score,
reg_score, score
])
new_proposals = np.stack(new_proposals)
video_info = batch_data_samples[0].metainfo
proposal_list = post_processing(new_proposals, video_info,
self.soft_nms_alpha,
self.soft_nms_low_threshold,
self.soft_nms_high_threshold,
self.post_process_top_k,
self.feature_extraction_interval)
output = [
dict(
video_name=video_info['video_name'],
proposal_list=proposal_list)
]
return output | Define the computation performed at every call when testing. | predict | python | open-mmlab/mmaction2 | mmaction/models/localizers/bmn.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/localizers/bmn.py | Apache-2.0 |
def _get_interp1d_bin_mask(seg_tmin, seg_tmax, tscale, num_samples,
num_samples_per_bin):
"""Generate sample mask for a boundary-matching pair."""
plen = float(seg_tmax - seg_tmin)
plen_sample = plen / (num_samples * num_samples_per_bin - 1.0)
total_samples = [
seg_tmin + plen_sample * i
for i in range(num_samples * num_samples_per_bin)
]
p_mask = []
for idx in range(num_samples):
bin_samples = total_samples[idx * num_samples_per_bin:(idx + 1) *
num_samples_per_bin]
bin_vector = np.zeros(tscale)
for sample in bin_samples:
sample_upper = math.ceil(sample)
sample_decimal, sample_down = math.modf(sample)
if 0 <= int(sample_down) <= (tscale - 1):
bin_vector[int(sample_down)] += 1 - sample_decimal
if 0 <= int(sample_upper) <= (tscale - 1):
bin_vector[int(sample_upper)] += sample_decimal
bin_vector = 1.0 / num_samples_per_bin * bin_vector
p_mask.append(bin_vector)
p_mask = np.stack(p_mask, axis=1)
return p_mask | Generate sample mask for a boundary-matching pair. | _get_interp1d_bin_mask | python | open-mmlab/mmaction2 | mmaction/models/localizers/bmn.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/localizers/bmn.py | Apache-2.0 |
def _get_interp1d_mask(self):
"""Generate sample mask for each point in Boundary-Matching Map."""
mask_mat = []
for start_index in range(self.tscale):
mask_mat_vector = []
for duration_index in range(self.tscale):
if start_index + duration_index < self.tscale:
p_tmin = start_index
p_tmax = start_index + duration_index
center_len = float(p_tmax - p_tmin) + 1
sample_tmin = p_tmin - (center_len * self.boundary_ratio)
sample_tmax = p_tmax + (center_len * self.boundary_ratio)
p_mask = self._get_interp1d_bin_mask(
sample_tmin, sample_tmax, self.tscale,
self.num_samples, self.num_samples_per_bin)
else:
p_mask = np.zeros([self.tscale, self.num_samples])
mask_mat_vector.append(p_mask)
mask_mat_vector = np.stack(mask_mat_vector, axis=2)
mask_mat.append(mask_mat_vector)
mask_mat = np.stack(mask_mat, axis=3)
mask_mat = mask_mat.astype(np.float32)
self.sample_mask = nn.Parameter(
torch.tensor(mask_mat).view(self.tscale, -1), requires_grad=False) | Generate sample mask for each point in Boundary-Matching Map. | _get_interp1d_mask | python | open-mmlab/mmaction2 | mmaction/models/localizers/bmn.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/localizers/bmn.py | Apache-2.0 |
def _temporal_anchors(self, tmin_offset=0., tmax_offset=1.):
"""Generate temporal anchors.
Args:
tmin_offset (int): Offset for the minimum value of temporal anchor.
Default: 0.
tmax_offset (int): Offset for the maximum value of temporal anchor.
Default: 1.
Returns:
tuple[Sequence[float]]: The minimum and maximum values of temporal
anchors.
"""
temporal_gap = 1. / self.tscale
anchors_tmins = []
anchors_tmaxs = []
for i in range(self.tscale):
anchors_tmins.append(temporal_gap * (i + tmin_offset))
anchors_tmaxs.append(temporal_gap * (i + tmax_offset))
return anchors_tmins, anchors_tmaxs | Generate temporal anchors.
Args:
tmin_offset (int): Offset for the minimum value of temporal anchor.
Default: 0.
tmax_offset (int): Offset for the maximum value of temporal anchor.
Default: 1.
Returns:
tuple[Sequence[float]]: The minimum and maximum values of temporal
anchors.
| _temporal_anchors | python | open-mmlab/mmaction2 | mmaction/models/localizers/bmn.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/localizers/bmn.py | Apache-2.0 |
def _forward(self, x):
"""Define the computation performed at every call.
Args:
x (torch.Tensor): The input data.
Returns:
torch.Tensor: The output of the module.
"""
# x.shape [batch_size, self.feat_dim, self.tscale]
base_feature = self.x_1d_b(x)
# base_feature.shape [batch_size, self.hidden_dim_1d, self.tscale]
start = self.x_1d_s(base_feature).squeeze(1)
# start.shape [batch_size, self.tscale]
end = self.x_1d_e(base_feature).squeeze(1)
# end.shape [batch_size, self.tscale]
confidence_map = self.x_1d_p(base_feature)
# [batch_size, self.hidden_dim_1d, self.tscale]
confidence_map = self._boundary_matching_layer(confidence_map)
# [batch_size, self.hidden_dim_1d,, self.num_sampls, self.tscale, self.tscale] # noqa
confidence_map = self.x_3d_p(confidence_map).squeeze(2)
# [batch_size, self.hidden_dim_3d, self.tscale, self.tscale]
confidence_map = self.x_2d_p(confidence_map)
# [batch_size, 2, self.tscale, self.tscale]
return confidence_map, start, end | Define the computation performed at every call.
Args:
x (torch.Tensor): The input data.
Returns:
torch.Tensor: The output of the module.
| _forward | python | open-mmlab/mmaction2 | mmaction/models/localizers/bmn.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/localizers/bmn.py | Apache-2.0 |
def init_weights(self) -> None:
"""Initiate the parameters either from existing checkpoint or from
scratch."""
for m in self.modules():
if isinstance(m, nn.Conv2d):
kaiming_init(m)
elif isinstance(m, nn.BatchNorm2d):
constant_init(m, 1) | Initiate the parameters either from existing checkpoint or from
scratch. | init_weights | python | open-mmlab/mmaction2 | mmaction/models/localizers/bsn.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/localizers/bsn.py | Apache-2.0 |
def _temporal_anchors(self, tmin_offset=0., tmax_offset=1.):
"""Generate temporal anchors.
Args:
tmin_offset (int): Offset for the minimum value of temporal anchor.
Default: 0.
tmax_offset (int): Offset for the maximum value of temporal anchor.
Default: 1.
Returns:
tuple[Sequence[float]]: The minimum and maximum values of temporal
anchors.
"""
temporal_gap = 1. / self.temporal_dim
anchors_tmins = []
anchors_tmaxs = []
for i in range(self.temporal_dim):
anchors_tmins.append(temporal_gap * (i + tmin_offset))
anchors_tmaxs.append(temporal_gap * (i + tmax_offset))
return anchors_tmins, anchors_tmaxs | Generate temporal anchors.
Args:
tmin_offset (int): Offset for the minimum value of temporal anchor.
Default: 0.
tmax_offset (int): Offset for the maximum value of temporal anchor.
Default: 1.
Returns:
tuple[Sequence[float]]: The minimum and maximum values of temporal
anchors.
| _temporal_anchors | python | open-mmlab/mmaction2 | mmaction/models/localizers/bsn.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/localizers/bsn.py | Apache-2.0 |
def _forward(self, x):
"""Define the computation performed at every call.
Args:
x (torch.Tensor): The input data.
Returns:
torch.Tensor: The output of the module.
"""
x = F.relu(self.conv1_ratio * self.conv1(x))
x = F.relu(self.conv2_ratio * self.conv2(x))
x = torch.sigmoid(self.conv3_ratio * self.conv3(x))
return x | Define the computation performed at every call.
Args:
x (torch.Tensor): The input data.
Returns:
torch.Tensor: The output of the module.
| _forward | python | open-mmlab/mmaction2 | mmaction/models/localizers/bsn.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/localizers/bsn.py | Apache-2.0 |
def loss(self, batch_inputs, batch_data_samples, **kwargs):
"""Calculate losses from a batch of inputs and data samples.
Args:
batch_inputs (Tensor): Raw Inputs of the recognizer.
These should usually be mean centered and std scaled.
batch_data_samples (List[:obj:`ActionDataSample`]): The batch
data samples. It usually includes information such
as ``gt_labels``.
Returns:
dict: A dictionary of loss components.
"""
tem_output = self._forward(batch_inputs)
score_action = tem_output[:, 0, :]
score_start = tem_output[:, 1, :]
score_end = tem_output[:, 2, :]
gt_bbox = [
sample.gt_instances['gt_bbox'] for sample in batch_data_samples
]
label_action, label_start, label_end = self.generate_labels(gt_bbox)
device = batch_inputs.device
label_action = label_action.to(device)
label_start = label_start.to(device)
label_end = label_end.to(device)
loss_action = self.loss_cls(score_action, label_action,
self.match_threshold)
loss_start = self.loss_cls(score_start, label_start,
self.match_threshold)
loss_end = self.loss_cls(score_end, label_end, self.match_threshold)
loss_dict = {
'loss_action': loss_action * self.loss_weight,
'loss_start': loss_start,
'loss_end': loss_end
}
return loss_dict | Calculate losses from a batch of inputs and data samples.
Args:
batch_inputs (Tensor): Raw Inputs of the recognizer.
These should usually be mean centered and std scaled.
batch_data_samples (List[:obj:`ActionDataSample`]): The batch
data samples. It usually includes information such
as ``gt_labels``.
Returns:
dict: A dictionary of loss components.
| loss | python | open-mmlab/mmaction2 | mmaction/models/localizers/bsn.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/localizers/bsn.py | Apache-2.0 |
def predict(self, batch_inputs, batch_data_samples, **kwargs):
"""Define the computation performed at every call when testing."""
tem_output = self._forward(batch_inputs).cpu().numpy()
batch_action = tem_output[:, 0, :]
batch_start = tem_output[:, 1, :]
batch_end = tem_output[:, 2, :]
video_results = []
for batch_idx, _ in enumerate(batch_action):
video_name = batch_data_samples[batch_idx].metainfo['video_name']
video_action = batch_action[batch_idx]
video_start = batch_start[batch_idx]
video_end = batch_end[batch_idx]
video_result = np.stack((video_action, video_start, video_end,
self.anchors_tmins, self.anchors_tmaxs),
axis=1)
video_results.append((video_name, video_result))
return video_results | Define the computation performed at every call when testing. | predict | python | open-mmlab/mmaction2 | mmaction/models/localizers/bsn.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/localizers/bsn.py | Apache-2.0 |
def forward(self, inputs, data_samples, mode, **kwargs):
"""The unified entry for a forward process in both training and test.
The method should accept three modes:
- ``tensor``: Forward the whole network and return tensor or tuple of
tensor without any post-processing, same as a common nn.Module.
- ``predict``: Forward and return the predictions, which are fully
processed to a list of :obj:`ActionDataSample`.
- ``loss``: Forward and return a dict of losses according to the given
inputs and data samples.
Note that this method doesn't handle neither back propagation nor
optimizer updating, which are done in the :meth:`train_step`.
Args:
inputs (Tensor): The input tensor with shape
(N, C, ...) in general.
data_samples (List[:obj:`ActionDataSample`], optional): The
annotation data of every samples. Defaults to None.
mode (str): Return what kind of value. Defaults to ``tensor``.
Returns:
The return type depends on ``mode``.
- If ``mode="tensor"``, return a tensor or a tuple of tensor.
- If ``mode="predict"``, return a list of ``ActionDataSample``.
- If ``mode="loss"``, return a dict of tensor.
"""
if type(inputs) is not torch.Tensor:
inputs = torch.stack(inputs)
if mode == 'tensor':
return self._forward(inputs, **kwargs)
if mode == 'predict':
return self.predict(inputs, data_samples, **kwargs)
elif mode == 'loss':
return self.loss(inputs, data_samples, **kwargs)
else:
raise RuntimeError(f'Invalid mode "{mode}". '
'Only supports loss, predict and tensor mode') | The unified entry for a forward process in both training and test.
The method should accept three modes:
- ``tensor``: Forward the whole network and return tensor or tuple of
tensor without any post-processing, same as a common nn.Module.
- ``predict``: Forward and return the predictions, which are fully
processed to a list of :obj:`ActionDataSample`.
- ``loss``: Forward and return a dict of losses according to the given
inputs and data samples.
Note that this method doesn't handle neither back propagation nor
optimizer updating, which are done in the :meth:`train_step`.
Args:
inputs (Tensor): The input tensor with shape
(N, C, ...) in general.
data_samples (List[:obj:`ActionDataSample`], optional): The
annotation data of every samples. Defaults to None.
mode (str): Return what kind of value. Defaults to ``tensor``.
Returns:
The return type depends on ``mode``.
- If ``mode="tensor"``, return a tensor or a tuple of tensor.
- If ``mode="predict"``, return a list of ``ActionDataSample``.
- If ``mode="loss"``, return a dict of tensor.
| forward | python | open-mmlab/mmaction2 | mmaction/models/localizers/bsn.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/localizers/bsn.py | Apache-2.0 |
def init_weights(self) -> None:
"""Initiate the parameters either from existing checkpoint or from
scratch."""
for m in self.modules():
if isinstance(m, nn.Conv2d):
kaiming_init(m)
elif isinstance(m, nn.BatchNorm2d):
constant_init(m, 1) | Initiate the parameters either from existing checkpoint or from
scratch. | init_weights | python | open-mmlab/mmaction2 | mmaction/models/localizers/bsn.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/localizers/bsn.py | Apache-2.0 |
def _forward(self, x):
"""Define the computation performed at every call.
Args:
x (torch.Tensor): The input data.
Returns:
torch.Tensor: The output of the module.
"""
x = F.relu(self.fc1_ratio * self.fc1(x))
x = torch.sigmoid(self.fc2_ratio * self.fc2(x))
return x | Define the computation performed at every call.
Args:
x (torch.Tensor): The input data.
Returns:
torch.Tensor: The output of the module.
| _forward | python | open-mmlab/mmaction2 | mmaction/models/localizers/bsn.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/localizers/bsn.py | Apache-2.0 |
def loss(self, batch_inputs, batch_data_samples, **kwargs):
"""Calculate losses from a batch of inputs and data samples.
Args:
batch_inputs (Tensor): Raw Inputs of the recognizer.
These should usually be mean centered and std scaled.
batch_data_samples (List[:obj:`ActionDataSample`]): The batch
data samples. It usually includes information such
as ``gt_labels``.
Returns:
dict: A dictionary of loss components.
"""
device = self.fc1.weight.device
bsp_feature = torch.cat([
sample.gt_instances['bsp_feature'] for sample in batch_data_samples
]).to(device)
reference_temporal_iou = torch.cat([
sample.gt_instances['reference_temporal_iou']
for sample in batch_data_samples
]).to(device)
pem_output = self._forward(bsp_feature)
anchors_temporal_iou = pem_output.view(-1)
u_hmask = (reference_temporal_iou >
self.pem_high_temporal_iou_threshold).float()
u_mmask = (
(reference_temporal_iou <= self.pem_high_temporal_iou_threshold)
& (reference_temporal_iou > self.pem_low_temporal_iou_threshold)
).float()
u_lmask = (reference_temporal_iou <=
self.pem_low_temporal_iou_threshold).float()
num_h = torch.sum(u_hmask)
num_m = torch.sum(u_mmask)
num_l = torch.sum(u_lmask)
r_m = self.u_ratio_m * num_h / (num_m)
r_m = torch.min(r_m, torch.Tensor([1.0]).to(device))[0]
u_smmask = torch.rand(u_hmask.size()[0], device=device)
u_smmask = u_smmask * u_mmask
u_smmask = (u_smmask > (1. - r_m)).float()
r_l = self.u_ratio_l * num_h / (num_l)
r_l = torch.min(r_l, torch.Tensor([1.0]).to(device))[0]
u_slmask = torch.rand(u_hmask.size()[0], device=device)
u_slmask = u_slmask * u_lmask
u_slmask = (u_slmask > (1. - r_l)).float()
temporal_iou_weights = u_hmask + u_smmask + u_slmask
temporal_iou_loss = F.smooth_l1_loss(anchors_temporal_iou,
reference_temporal_iou)
temporal_iou_loss = torch.sum(
temporal_iou_loss *
temporal_iou_weights) / torch.sum(temporal_iou_weights)
loss_dict = dict(temporal_iou_loss=temporal_iou_loss)
return loss_dict | Calculate losses from a batch of inputs and data samples.
Args:
batch_inputs (Tensor): Raw Inputs of the recognizer.
These should usually be mean centered and std scaled.
batch_data_samples (List[:obj:`ActionDataSample`]): The batch
data samples. It usually includes information such
as ``gt_labels``.
Returns:
dict: A dictionary of loss components.
| loss | python | open-mmlab/mmaction2 | mmaction/models/localizers/bsn.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/localizers/bsn.py | Apache-2.0 |
def predict(self, batch_inputs, batch_data_samples, **kwargs):
"""Define the computation performed at every call when testing."""
device = self.fc1.weight.device
bsp_feature = torch.cat([
sample.gt_instances['bsp_feature'] for sample in batch_data_samples
]).to(device)
pem_output = self._forward(bsp_feature).view(-1).cpu().numpy()
pem_output = pem_output.reshape(-1, 1)
gt_instances = [sample.gt_instances for sample in batch_data_samples]
tmin = self._parse(gt_instances, 'tmin')
tmax = self._parse(gt_instances, 'tmax')
tmin_score = self._parse(gt_instances, 'tmin_score')
tmax_score = self._parse(gt_instances, 'tmax_score')
score = np.array(pem_output * tmin_score * tmax_score).reshape(-1, 1)
result = np.concatenate(
(tmin, tmax, tmin_score, tmax_score, pem_output, score), axis=1)
result = result.reshape(-1, 6)
video_info = batch_data_samples[0].metainfo
proposal_list = post_processing(result, video_info,
self.soft_nms_alpha,
self.soft_nms_low_threshold,
self.soft_nms_high_threshold,
self.post_process_top_k,
self.feature_extraction_interval)
output = [
dict(
video_name=video_info['video_name'],
proposal_list=proposal_list)
]
return output | Define the computation performed at every call when testing. | predict | python | open-mmlab/mmaction2 | mmaction/models/localizers/bsn.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/localizers/bsn.py | Apache-2.0 |
def forward(self, inputs, data_samples, mode, **kwargs):
"""The unified entry for a forward process in both training and test.
The method should accept three modes:
- ``tensor``: Forward the whole network and return tensor or tuple of
tensor without any post-processing, same as a common nn.Module.
- ``predict``: Forward and return the predictions, which are fully
processed to a list of :obj:`ActionDataSample`.
- ``loss``: Forward and return a dict of losses according to the given
inputs and data samples.
Note that this method doesn't handle neither back propagation nor
optimizer updating, which are done in the :meth:`train_step`.
Args:
batch_inputs (Tensor): The input tensor with shape
(N, C, ...) in general.
batch_data_samples (List[:obj:`ActionDataSample`], optional): The
annotation data of every samples. Defaults to None.
mode (str): Return what kind of value. Defaults to ``tensor``.
Returns:
The return type depends on ``mode``.
- If ``mode="tensor"``, return a tensor or a tuple of tensor.
- If ``mode="predict"``, return a list of ``ActionDataSample``.
- If ``mode="loss"``, return a dict of tensor.
"""
inputs = torch.stack(inputs)
if mode == 'tensor':
return self._forward(inputs, **kwargs)
if mode == 'predict':
return self.predict(inputs, data_samples, **kwargs)
elif mode == 'loss':
return self.loss(inputs, data_samples, **kwargs)
else:
raise RuntimeError(f'Invalid mode "{mode}". '
'Only supports loss, predict and tensor mode') | The unified entry for a forward process in both training and test.
The method should accept three modes:
- ``tensor``: Forward the whole network and return tensor or tuple of
tensor without any post-processing, same as a common nn.Module.
- ``predict``: Forward and return the predictions, which are fully
processed to a list of :obj:`ActionDataSample`.
- ``loss``: Forward and return a dict of losses according to the given
inputs and data samples.
Note that this method doesn't handle neither back propagation nor
optimizer updating, which are done in the :meth:`train_step`.
Args:
batch_inputs (Tensor): The input tensor with shape
(N, C, ...) in general.
batch_data_samples (List[:obj:`ActionDataSample`], optional): The
annotation data of every samples. Defaults to None.
mode (str): Return what kind of value. Defaults to ``tensor``.
Returns:
The return type depends on ``mode``.
- If ``mode="tensor"``, return a tensor or a tuple of tensor.
- If ``mode="predict"``, return a list of ``ActionDataSample``.
- If ``mode="loss"``, return a dict of tensor.
| forward | python | open-mmlab/mmaction2 | mmaction/models/localizers/bsn.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/localizers/bsn.py | Apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.