code
stringlengths 66
870k
| docstring
stringlengths 19
26.7k
| func_name
stringlengths 1
138
| language
stringclasses 1
value | repo
stringlengths 7
68
| path
stringlengths 5
324
| url
stringlengths 46
389
| license
stringclasses 7
values |
---|---|---|---|---|---|---|---|
def forward(self, x: Tensor) -> Tensor:
"""Forward call for LGTE.
Args:
x (torch.Tensor): The input tensor with shape (B, C, L)
"""
x = x.permute(2, 0, 1)
mask = self.mask.repeat(x.size(1), 1, 1, 1)
L = x.shape[0]
x = self.atten(x, attn_mask=mask.reshape(-1, L, L))
x = self.norm1(x)
x = self.ffn(x)
x = self.norm2(x)
x = x.permute(1, 2, 0)
return x | Forward call for LGTE.
Args:
x (torch.Tensor): The input tensor with shape (B, C, L)
| forward | python | open-mmlab/mmaction2 | mmaction/models/localizers/tcanet.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/localizers/tcanet.py | Apache-2.0 |
def StartEndRegressor(sample_num: int, feat_dim: int) -> nn.Module:
"""Start and End Regressor in the Temporal Boundary Regressor.
Args:
sample_num (int): number of samples for the start & end.
feat_dim (int): feature dimension.
Returns:
A pytorch module that works as the start and end regressor. The input
of the module should have a shape of (B, feat_dim * 2, sample_num).
"""
hidden_dim = 128
regressor = nn.Sequential(
nn.Conv1d(
feat_dim * 2,
hidden_dim * 2,
kernel_size=3,
padding=1,
groups=8,
stride=2), nn.ReLU(inplace=True),
nn.Conv1d(
hidden_dim * 2,
hidden_dim * 2,
kernel_size=3,
padding=1,
groups=8,
stride=2), nn.ReLU(inplace=True),
nn.Conv1d(hidden_dim * 2, 2, kernel_size=sample_num // 4, groups=2),
nn.Flatten())
return regressor | Start and End Regressor in the Temporal Boundary Regressor.
Args:
sample_num (int): number of samples for the start & end.
feat_dim (int): feature dimension.
Returns:
A pytorch module that works as the start and end regressor. The input
of the module should have a shape of (B, feat_dim * 2, sample_num).
| StartEndRegressor | python | open-mmlab/mmaction2 | mmaction/models/localizers/tcanet.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/localizers/tcanet.py | Apache-2.0 |
def CenterWidthRegressor(temporal_len: int, feat_dim: int) -> nn.Module:
"""Center Width in the Temporal Boundary Regressor.
Args:
temporal_len (int): temporal dimension of the inputs.
feat_dim (int): feature dimension.
Returns:
A pytorch module that works as the start and end regressor. The input
of the module should have a shape of (B, feat_dim, temporal_len).
"""
hidden_dim = 512
regressor = nn.Sequential(
nn.Conv1d(
feat_dim, hidden_dim, kernel_size=3, padding=1, groups=4,
stride=2), nn.ReLU(inplace=True),
nn.Conv1d(
hidden_dim,
hidden_dim,
kernel_size=3,
padding=1,
groups=4,
stride=2), nn.ReLU(inplace=True),
nn.Conv1d(
hidden_dim, hidden_dim, kernel_size=temporal_len // 4, groups=4),
nn.ReLU(inplace=True), nn.Conv1d(hidden_dim, 3, kernel_size=1))
return regressor | Center Width in the Temporal Boundary Regressor.
Args:
temporal_len (int): temporal dimension of the inputs.
feat_dim (int): feature dimension.
Returns:
A pytorch module that works as the start and end regressor. The input
of the module should have a shape of (B, feat_dim, temporal_len).
| CenterWidthRegressor | python | open-mmlab/mmaction2 | mmaction/models/localizers/tcanet.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/localizers/tcanet.py | Apache-2.0 |
def forward(self, inputs, data_samples, mode, **kwargs):
"""The unified entry for a forward process in both training and test.
The method should accept three modes:
- ``tensor``: Forward the whole network and return tensor or tuple of
tensor without any post-processing, same as a common nn.Module.
- ``predict``: Forward and return the predictions, which are fully
processed to a list of :obj:`ActionDataSample`.
- ``loss``: Forward and return a dict of losses according to the given
inputs and data samples.
Note that this method doesn't handle neither back propagation nor
optimizer updating, which are done in the :meth:`train_step`.
Args:
inputs (Tensor): The input tensor with shape
(N, C, ...) in general.
data_samples (List[:obj:`ActionDataSample`], optional): The
annotation data of every samples. Defaults to None.
mode (str): Return what kind of value. Defaults to ``tensor``.
Returns:
The return type depends on ``mode``.
- If ``mode="tensor"``, return a tensor or a tuple of tensor.
- If ``mode="predict"``, return a list of ``ActionDataSample``.
- If ``mode="loss"``, return a dict of tensor.
"""
if not isinstance(input, Tensor):
inputs = torch.stack(inputs)
if mode == 'tensor':
return self._forward(inputs, **kwargs)
if mode == 'predict':
return self.predict(inputs, data_samples, **kwargs)
elif mode == 'loss':
return self.loss(inputs, data_samples, **kwargs)
else:
raise RuntimeError(f'Invalid mode "{mode}". '
'Only supports loss, predict and tensor mode') | The unified entry for a forward process in both training and test.
The method should accept three modes:
- ``tensor``: Forward the whole network and return tensor or tuple of
tensor without any post-processing, same as a common nn.Module.
- ``predict``: Forward and return the predictions, which are fully
processed to a list of :obj:`ActionDataSample`.
- ``loss``: Forward and return a dict of losses according to the given
inputs and data samples.
Note that this method doesn't handle neither back propagation nor
optimizer updating, which are done in the :meth:`train_step`.
Args:
inputs (Tensor): The input tensor with shape
(N, C, ...) in general.
data_samples (List[:obj:`ActionDataSample`], optional): The
annotation data of every samples. Defaults to None.
mode (str): Return what kind of value. Defaults to ``tensor``.
Returns:
The return type depends on ``mode``.
- If ``mode="tensor"``, return a tensor or a tuple of tensor.
- If ``mode="predict"``, return a list of ``ActionDataSample``.
- If ``mode="loss"``, return a dict of tensor.
| forward | python | open-mmlab/mmaction2 | mmaction/models/localizers/tcanet.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/localizers/tcanet.py | Apache-2.0 |
def _forward(self, x):
"""Define the computation performed at every call.
Args:
x (torch.Tensor): The input data.
Returns:
torch.Tensor: The output of the module.
"""
x = self.x_1d_b_f(x)
for layer in self.lgtes:
x = layer(x)
return x | Define the computation performed at every call.
Args:
x (torch.Tensor): The input data.
Returns:
torch.Tensor: The output of the module.
| _forward | python | open-mmlab/mmaction2 | mmaction/models/localizers/tcanet.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/localizers/tcanet.py | Apache-2.0 |
def __init__(self, pre_nms_thresh, pre_nms_top_n, nms_thresh,
fpn_post_nms_top_n, min_size, num_classes, is_first_stage):
"""
Arguments:
pre_nms_thresh (float)
pre_nms_top_n (int)
nms_thresh (float)
fpn_post_nms_top_n (int)
min_size (int)
num_classes (int)
box_coder (BoxCoder)
"""
super(FCOSPostProcessor, self).__init__()
self.pre_nms_thresh = pre_nms_thresh
self.pre_nms_top_n = pre_nms_top_n
self.nms_thresh = nms_thresh
self.fpn_post_nms_top_n = fpn_post_nms_top_n
self.min_size = min_size
self.num_classes = num_classes
self.innerness_threshold = 0.15
self.downsample_scale = 32
self.is_first_stage = is_first_stage |
Arguments:
pre_nms_thresh (float)
pre_nms_top_n (int)
nms_thresh (float)
fpn_post_nms_top_n (int)
min_size (int)
num_classes (int)
box_coder (BoxCoder)
| __init__ | python | open-mmlab/mmaction2 | mmaction/models/localizers/drn/drn_utils/inference.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/localizers/drn/drn_utils/inference.py | Apache-2.0 |
def forward_for_single_feature_map(self, locations, box_cls,
box_regression, level, iou_scores):
"""
Arguments:
anchors: list[BoxList]
box_cls: tensor of size N, A * C, H, W
box_regression: tensor of size N, A * 4, H, W
"""
N, C, T = box_cls.shape
# put in the same format as locations
box_cls = box_cls.permute(0, 2, 1).contiguous().sigmoid()
iou_scores = iou_scores.permute(0, 2, 1).contiguous().sigmoid()
box_regression = box_regression.permute(0, 2, 1)
# centerness = centerness.permute(0, 2, 1)
# centerness = centerness.reshape(N, -1).sigmoid()
# inner = inner.squeeze().sigmoid()
candidate_inds = (box_cls > self.pre_nms_thresh)
pre_nms_top_n = candidate_inds.view(N, -1).sum(1)
pre_nms_top_n = pre_nms_top_n.clamp(max=self.pre_nms_top_n)
# multiply the classification scores with centerness scores
# box_cls = box_cls * centerness[:, :, None]
# box_cls = box_cls + centerness[:, :, None]
if not self.is_first_stage:
box_cls = box_cls * iou_scores
results = []
for i in range(N):
# per_centerness = centerness[i]
per_box_cls = box_cls[i]
per_candidate_inds = candidate_inds[i]
per_box_cls = per_box_cls[per_candidate_inds]
per_candidate_nonzeros = per_candidate_inds.nonzero()
per_box_loc = per_candidate_nonzeros[:, 0]
per_class = per_candidate_nonzeros[:, 1] + 1
per_box_regression = box_regression[i]
per_box_regression = per_box_regression[per_box_loc]
per_locations = locations[per_box_loc]
# per_centerness = per_centerness[per_box_loc]
per_pre_nms_top_n = pre_nms_top_n[i]
if per_candidate_inds.sum().item() > per_pre_nms_top_n.item():
per_box_cls, top_k_indices = \
per_box_cls.topk(per_pre_nms_top_n, sorted=False)
per_class = per_class[top_k_indices]
per_box_regression = per_box_regression[top_k_indices]
per_locations = per_locations[top_k_indices]
# per_centerness = per_centerness[top_k_indices]
detections = torch.stack([
per_locations - per_box_regression[:, 0],
per_locations + per_box_regression[:, 1],
],
dim=1) / self.downsample_scale
detections[:, 0].clamp_(min=0, max=1)
detections[:, 1].clamp_(min=0, max=1)
# remove small boxes
p_start, p_end = detections.unbind(dim=1)
duration = p_end - p_start
keep = (duration >= self.min_size).nonzero().squeeze(1)
detections = detections[keep]
temp_dict = {}
temp_dict['detections'] = detections
temp_dict['labels'] = per_class
temp_dict['scores'] = torch.sqrt(per_box_cls)
temp_dict['level'] = [level]
# temp_dict['centerness'] = per_centerness
temp_dict['locations'] = per_locations / 32
results.append(temp_dict)
return results |
Arguments:
anchors: list[BoxList]
box_cls: tensor of size N, A * C, H, W
box_regression: tensor of size N, A * 4, H, W
| forward_for_single_feature_map | python | open-mmlab/mmaction2 | mmaction/models/localizers/drn/drn_utils/inference.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/localizers/drn/drn_utils/inference.py | Apache-2.0 |
def forward(self, locations, box_cls, box_regression, iou_scores):
"""
Arguments:
anchors: list[list[BoxList]]
box_cls: list[tensor]
box_regression: list[tensor]
image_sizes: list[(h, w)]
Returns:
boxlists (list[BoxList]): the post-processed anchors, after
applying box decoding and NMS
"""
sampled_boxes = []
for i, (l, o, b, iou_s) in enumerate(
zip(locations, box_cls, box_regression, iou_scores)):
sampled_boxes.append(
self.forward_for_single_feature_map(l, o, b, i, iou_s))
boxlists = list(zip(*sampled_boxes))
# boxlists = [cat_boxlist(boxlist) for boxlist in boxlists]
boxlists = self.select_over_all_levels(boxlists)
return boxlists |
Arguments:
anchors: list[list[BoxList]]
box_cls: list[tensor]
box_regression: list[tensor]
image_sizes: list[(h, w)]
Returns:
boxlists (list[BoxList]): the post-processed anchors, after
applying box decoding and NMS
| forward | python | open-mmlab/mmaction2 | mmaction/models/localizers/drn/drn_utils/inference.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/localizers/drn/drn_utils/inference.py | Apache-2.0 |
def generate_candidate_proposals(video_list,
video_infos,
tem_results_dir,
temporal_scale,
peak_threshold,
tem_results_ext='.csv',
result_dict=None):
"""Generate Candidate Proposals with given temporal evaluation results.
Each proposal file will contain:
'tmin,tmax,tmin_score,tmax_score,score,match_iou,match_ioa'.
Args:
video_list (list[int]): List of video indexes to generate proposals.
video_infos (list[dict]): List of video_info dict that contains
'video_name', 'duration_frame', 'duration_second',
'feature_frame', and 'annotations'.
tem_results_dir (str): Directory to load temporal evaluation
results.
temporal_scale (int): The number (scale) on temporal axis.
peak_threshold (float): The threshold for proposal generation.
tem_results_ext (str): File extension for temporal evaluation
model output. Default: '.csv'.
result_dict (dict | None): The dict to save the results. Default: None.
Returns:
dict: A dict contains video_name as keys and proposal list as value.
If result_dict is not None, save the results to it.
"""
if tem_results_ext != '.csv':
raise NotImplementedError('Only support csv format now.')
tscale = temporal_scale
tgap = 1. / tscale
proposal_dict = {}
for video_index in video_list:
video_name = video_infos[video_index]['video_name']
tem_path = osp.join(tem_results_dir, video_name + tem_results_ext)
tem_results = np.loadtxt(
tem_path, dtype=np.float32, delimiter=',', skiprows=1)
start_scores = tem_results[:, 1]
end_scores = tem_results[:, 2]
max_start = max(start_scores)
max_end = max(end_scores)
start_bins = np.zeros(len(start_scores))
start_bins[[0, -1]] = 1
end_bins = np.zeros(len(end_scores))
end_bins[[0, -1]] = 1
for idx in range(1, tscale - 1):
if start_scores[idx] > start_scores[
idx + 1] and start_scores[idx] > start_scores[idx - 1]:
start_bins[idx] = 1
elif start_scores[idx] > (peak_threshold * max_start):
start_bins[idx] = 1
if end_scores[idx] > end_scores[
idx + 1] and end_scores[idx] > end_scores[idx - 1]:
end_bins[idx] = 1
elif end_scores[idx] > (peak_threshold * max_end):
end_bins[idx] = 1
tmin_list = []
tmin_score_list = []
tmax_list = []
tmax_score_list = []
for idx in range(tscale):
if start_bins[idx] == 1:
tmin_list.append(tgap / 2 + tgap * idx)
tmin_score_list.append(start_scores[idx])
if end_bins[idx] == 1:
tmax_list.append(tgap / 2 + tgap * idx)
tmax_score_list.append(end_scores[idx])
new_props = []
for tmax, tmax_score in zip(tmax_list, tmax_score_list):
for tmin, tmin_score in zip(tmin_list, tmin_score_list):
if tmin >= tmax:
break
new_props.append([tmin, tmax, tmin_score, tmax_score])
new_props = np.stack(new_props)
score = (new_props[:, 2] * new_props[:, 3]).reshape(-1, 1)
new_props = np.concatenate((new_props, score), axis=1)
new_props = new_props[new_props[:, -1].argsort()[::-1]]
video_info = video_infos[video_index]
video_frame = video_info['duration_frame']
video_second = video_info['duration_second']
feature_frame = video_info['feature_frame']
corrected_second = float(feature_frame) / video_frame * video_second
gt_tmins = []
gt_tmaxs = []
for annotations in video_info['annotations']:
gt_tmins.append(annotations['segment'][0] / corrected_second)
gt_tmaxs.append(annotations['segment'][1] / corrected_second)
new_iou_list = []
new_ioa_list = []
for new_prop in new_props:
new_iou = max(
temporal_iou(new_prop[0], new_prop[1], gt_tmins, gt_tmaxs))
new_ioa = max(
temporal_iop(new_prop[0], new_prop[1], gt_tmins, gt_tmaxs))
new_iou_list.append(new_iou)
new_ioa_list.append(new_ioa)
new_iou_list = np.array(new_iou_list).reshape(-1, 1)
new_ioa_list = np.array(new_ioa_list).reshape(-1, 1)
new_props = np.concatenate((new_props, new_iou_list), axis=1)
new_props = np.concatenate((new_props, new_ioa_list), axis=1)
proposal_dict[video_name] = new_props
if result_dict is not None:
result_dict[video_name] = new_props
return proposal_dict | Generate Candidate Proposals with given temporal evaluation results.
Each proposal file will contain:
'tmin,tmax,tmin_score,tmax_score,score,match_iou,match_ioa'.
Args:
video_list (list[int]): List of video indexes to generate proposals.
video_infos (list[dict]): List of video_info dict that contains
'video_name', 'duration_frame', 'duration_second',
'feature_frame', and 'annotations'.
tem_results_dir (str): Directory to load temporal evaluation
results.
temporal_scale (int): The number (scale) on temporal axis.
peak_threshold (float): The threshold for proposal generation.
tem_results_ext (str): File extension for temporal evaluation
model output. Default: '.csv'.
result_dict (dict | None): The dict to save the results. Default: None.
Returns:
dict: A dict contains video_name as keys and proposal list as value.
If result_dict is not None, save the results to it.
| generate_candidate_proposals | python | open-mmlab/mmaction2 | mmaction/models/localizers/utils/bsn_utils.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/localizers/utils/bsn_utils.py | Apache-2.0 |
def generate_bsp_feature(video_list,
video_infos,
tem_results_dir,
pgm_proposals_dir,
top_k=1000,
bsp_boundary_ratio=0.2,
num_sample_start=8,
num_sample_end=8,
num_sample_action=16,
num_sample_interp=3,
tem_results_ext='.csv',
pgm_proposal_ext='.csv',
result_dict=None):
"""Generate Boundary-Sensitive Proposal Feature with given proposals.
Args:
video_list (list[int]): List of video indexes to generate bsp_feature.
video_infos (list[dict]): List of video_info dict that contains
'video_name'.
tem_results_dir (str): Directory to load temporal evaluation
results.
pgm_proposals_dir (str): Directory to load proposals.
top_k (int): Number of proposals to be considered. Default: 1000
bsp_boundary_ratio (float): Ratio for proposal boundary
(start/end). Default: 0.2.
num_sample_start (int): Num of samples for actionness in
start region. Default: 8.
num_sample_end (int): Num of samples for actionness in end region.
Default: 8.
num_sample_action (int): Num of samples for actionness in center
region. Default: 16.
num_sample_interp (int): Num of samples for interpolation for
each sample point. Default: 3.
tem_results_ext (str): File extension for temporal evaluation
model output. Default: '.csv'.
pgm_proposal_ext (str): File extension for proposals. Default: '.csv'.
result_dict (dict | None): The dict to save the results. Default: None.
Returns:
bsp_feature_dict (dict): A dict contains video_name as keys and
bsp_feature as value. If result_dict is not None, save the
results to it.
"""
if tem_results_ext != '.csv' or pgm_proposal_ext != '.csv':
raise NotImplementedError('Only support csv format now.')
bsp_feature_dict = {}
for video_index in video_list:
video_name = video_infos[video_index]['video_name']
# Load temporal evaluation results
tem_path = osp.join(tem_results_dir, video_name + tem_results_ext)
tem_results = np.loadtxt(
tem_path, dtype=np.float32, delimiter=',', skiprows=1)
score_action = tem_results[:, 0]
seg_tmins = tem_results[:, 3]
seg_tmaxs = tem_results[:, 4]
video_scale = len(tem_results)
video_gap = seg_tmaxs[0] - seg_tmins[0]
video_extend = int(video_scale / 4 + 10)
# Load proposals results
proposal_path = osp.join(pgm_proposals_dir,
video_name + pgm_proposal_ext)
pgm_proposals = np.loadtxt(
proposal_path, dtype=np.float32, delimiter=',', skiprows=1)
pgm_proposals = pgm_proposals[:top_k]
# Generate temporal sample points
boundary_zeros = np.zeros([video_extend])
score_action = np.concatenate(
(boundary_zeros, score_action, boundary_zeros))
begin_tp = []
middle_tp = []
end_tp = []
for i in range(video_extend):
begin_tp.append(-video_gap / 2 -
(video_extend - 1 - i) * video_gap)
end_tp.append(video_gap / 2 + seg_tmaxs[-1] + i * video_gap)
for i in range(video_scale):
middle_tp.append(video_gap / 2 + i * video_gap)
t_points = begin_tp + middle_tp + end_tp
bsp_feature = []
for pgm_proposal in pgm_proposals:
tmin = pgm_proposal[0]
tmax = pgm_proposal[1]
tlen = tmax - tmin
# Temporal range for start
tmin_0 = tmin - tlen * bsp_boundary_ratio
tmin_1 = tmin + tlen * bsp_boundary_ratio
# Temporal range for end
tmax_0 = tmax - tlen * bsp_boundary_ratio
tmax_1 = tmax + tlen * bsp_boundary_ratio
# Generate features at start boundary
tlen_start = (tmin_1 - tmin_0) / (num_sample_start - 1)
tlen_start_sample = tlen_start / num_sample_interp
t_new = [
tmin_0 - tlen_start / 2 + tlen_start_sample * i
for i in range(num_sample_start * num_sample_interp + 1)
]
y_new_start_action = np.interp(t_new, t_points, score_action)
y_new_start = [
np.mean(y_new_start_action[i * num_sample_interp:(i + 1) *
num_sample_interp + 1])
for i in range(num_sample_start)
]
# Generate features at end boundary
tlen_end = (tmax_1 - tmax_0) / (num_sample_end - 1)
tlen_end_sample = tlen_end / num_sample_interp
t_new = [
tmax_0 - tlen_end / 2 + tlen_end_sample * i
for i in range(num_sample_end * num_sample_interp + 1)
]
y_new_end_action = np.interp(t_new, t_points, score_action)
y_new_end = [
np.mean(y_new_end_action[i * num_sample_interp:(i + 1) *
num_sample_interp + 1])
for i in range(num_sample_end)
]
# Generate features for action
tlen_action = (tmax - tmin) / (num_sample_action - 1)
tlen_action_sample = tlen_action / num_sample_interp
t_new = [
tmin - tlen_action / 2 + tlen_action_sample * i
for i in range(num_sample_action * num_sample_interp + 1)
]
y_new_action = np.interp(t_new, t_points, score_action)
y_new_action = [
np.mean(y_new_action[i * num_sample_interp:(i + 1) *
num_sample_interp + 1])
for i in range(num_sample_action)
]
feature = np.concatenate([y_new_action, y_new_start, y_new_end])
bsp_feature.append(feature)
bsp_feature = np.array(bsp_feature)
bsp_feature_dict[video_name] = bsp_feature
if result_dict is not None:
result_dict[video_name] = bsp_feature
return bsp_feature_dict | Generate Boundary-Sensitive Proposal Feature with given proposals.
Args:
video_list (list[int]): List of video indexes to generate bsp_feature.
video_infos (list[dict]): List of video_info dict that contains
'video_name'.
tem_results_dir (str): Directory to load temporal evaluation
results.
pgm_proposals_dir (str): Directory to load proposals.
top_k (int): Number of proposals to be considered. Default: 1000
bsp_boundary_ratio (float): Ratio for proposal boundary
(start/end). Default: 0.2.
num_sample_start (int): Num of samples for actionness in
start region. Default: 8.
num_sample_end (int): Num of samples for actionness in end region.
Default: 8.
num_sample_action (int): Num of samples for actionness in center
region. Default: 16.
num_sample_interp (int): Num of samples for interpolation for
each sample point. Default: 3.
tem_results_ext (str): File extension for temporal evaluation
model output. Default: '.csv'.
pgm_proposal_ext (str): File extension for proposals. Default: '.csv'.
result_dict (dict | None): The dict to save the results. Default: None.
Returns:
bsp_feature_dict (dict): A dict contains video_name as keys and
bsp_feature as value. If result_dict is not None, save the
results to it.
| generate_bsp_feature | python | open-mmlab/mmaction2 | mmaction/models/localizers/utils/bsn_utils.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/localizers/utils/bsn_utils.py | Apache-2.0 |
def temporal_iou(proposal_min, proposal_max, gt_min, gt_max):
"""Compute IoU score between a groundtruth bbox and the proposals.
Args:
proposal_min (list[float]): List of temporal anchor min.
proposal_max (list[float]): List of temporal anchor max.
gt_min (float): Groundtruth temporal box min.
gt_max (float): Groundtruth temporal box max.
Returns:
list[float]: List of iou scores.
"""
len_anchors = proposal_max - proposal_min
int_tmin = np.maximum(proposal_min, gt_min)
int_tmax = np.minimum(proposal_max, gt_max)
inter_len = np.maximum(int_tmax - int_tmin, 0.)
union_len = len_anchors - inter_len + gt_max - gt_min
jaccard = np.divide(inter_len, union_len)
return jaccard | Compute IoU score between a groundtruth bbox and the proposals.
Args:
proposal_min (list[float]): List of temporal anchor min.
proposal_max (list[float]): List of temporal anchor max.
gt_min (float): Groundtruth temporal box min.
gt_max (float): Groundtruth temporal box max.
Returns:
list[float]: List of iou scores.
| temporal_iou | python | open-mmlab/mmaction2 | mmaction/models/localizers/utils/proposal_utils.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/localizers/utils/proposal_utils.py | Apache-2.0 |
def temporal_iop(proposal_min, proposal_max, gt_min, gt_max):
"""Compute IoP score between a groundtruth bbox and the proposals.
Compute the IoP which is defined as the overlap ratio with
groundtruth proportional to the duration of this proposal.
Args:
proposal_min (list[float]): List of temporal anchor min.
proposal_max (list[float]): List of temporal anchor max.
gt_min (float): Groundtruth temporal box min.
gt_max (float): Groundtruth temporal box max.
Returns:
list[float]: List of intersection over anchor scores.
"""
len_anchors = np.array(proposal_max - proposal_min)
int_tmin = np.maximum(proposal_min, gt_min)
int_tmax = np.minimum(proposal_max, gt_max)
inter_len = np.maximum(int_tmax - int_tmin, 0.)
scores = np.divide(inter_len, len_anchors)
return scores | Compute IoP score between a groundtruth bbox and the proposals.
Compute the IoP which is defined as the overlap ratio with
groundtruth proportional to the duration of this proposal.
Args:
proposal_min (list[float]): List of temporal anchor min.
proposal_max (list[float]): List of temporal anchor max.
gt_min (float): Groundtruth temporal box min.
gt_max (float): Groundtruth temporal box max.
Returns:
list[float]: List of intersection over anchor scores.
| temporal_iop | python | open-mmlab/mmaction2 | mmaction/models/localizers/utils/proposal_utils.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/localizers/utils/proposal_utils.py | Apache-2.0 |
def soft_nms(proposals, alpha, low_threshold, high_threshold, top_k):
"""Soft NMS for temporal proposals.
Args:
proposals (np.ndarray): Proposals generated by network.
alpha (float): Alpha value of Gaussian decaying function.
low_threshold (float): Low threshold for soft nms.
high_threshold (float): High threshold for soft nms.
top_k (int): Top k values to be considered.
Returns:
np.ndarray: The updated proposals.
"""
proposals = proposals[proposals[:, -1].argsort()[::-1]]
tstart = list(proposals[:, 0])
tend = list(proposals[:, 1])
tscore = list(proposals[:, -1])
rstart = []
rend = []
rscore = []
while len(tscore) > 0 and len(rscore) <= top_k:
max_index = np.argmax(tscore)
max_width = tend[max_index] - tstart[max_index]
iou_list = temporal_iou(tstart[max_index], tend[max_index],
np.array(tstart), np.array(tend))
iou_exp_list = np.exp(-np.square(iou_list) / alpha)
for idx, _ in enumerate(tscore):
if idx != max_index:
current_iou = iou_list[idx]
if current_iou > low_threshold + (high_threshold -
low_threshold) * max_width:
tscore[idx] = tscore[idx] * iou_exp_list[idx]
rstart.append(tstart[max_index])
rend.append(tend[max_index])
rscore.append(tscore[max_index])
tstart.pop(max_index)
tend.pop(max_index)
tscore.pop(max_index)
rstart = np.array(rstart).reshape(-1, 1)
rend = np.array(rend).reshape(-1, 1)
rscore = np.array(rscore).reshape(-1, 1)
new_proposals = np.concatenate((rstart, rend, rscore), axis=1)
return new_proposals | Soft NMS for temporal proposals.
Args:
proposals (np.ndarray): Proposals generated by network.
alpha (float): Alpha value of Gaussian decaying function.
low_threshold (float): Low threshold for soft nms.
high_threshold (float): High threshold for soft nms.
top_k (int): Top k values to be considered.
Returns:
np.ndarray: The updated proposals.
| soft_nms | python | open-mmlab/mmaction2 | mmaction/models/localizers/utils/proposal_utils.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/localizers/utils/proposal_utils.py | Apache-2.0 |
def post_processing(result, video_info, soft_nms_alpha, soft_nms_low_threshold,
soft_nms_high_threshold, post_process_top_k,
feature_extraction_interval):
"""Post process for temporal proposals generation.
Args:
result (np.ndarray): Proposals generated by network.
video_info (dict): Meta data of video. Required keys are
'duration_frame', 'duration_second'.
soft_nms_alpha (float): Alpha value of Gaussian decaying function.
soft_nms_low_threshold (float): Low threshold for soft nms.
soft_nms_high_threshold (float): High threshold for soft nms.
post_process_top_k (int): Top k values to be considered.
feature_extraction_interval (int): Interval used in feature extraction.
Returns:
list[dict]: The updated proposals, e.g.
[{'score': 0.9, 'segment': [0, 1]},
{'score': 0.8, 'segment': [0, 2]},
...].
"""
if len(result) > 1:
result = soft_nms(result, soft_nms_alpha, soft_nms_low_threshold,
soft_nms_high_threshold, post_process_top_k)
result = result[result[:, -1].argsort()[::-1]]
video_duration = float(
video_info['duration_frame'] // feature_extraction_interval *
feature_extraction_interval
) / video_info['duration_frame'] * video_info['duration_second']
proposal_list = []
for j in range(min(post_process_top_k, len(result))):
proposal = {}
proposal['score'] = float(result[j, -1])
proposal['segment'] = [
max(0, result[j, 0]) * video_duration,
min(1, result[j, 1]) * video_duration
]
proposal_list.append(proposal)
return proposal_list | Post process for temporal proposals generation.
Args:
result (np.ndarray): Proposals generated by network.
video_info (dict): Meta data of video. Required keys are
'duration_frame', 'duration_second'.
soft_nms_alpha (float): Alpha value of Gaussian decaying function.
soft_nms_low_threshold (float): Low threshold for soft nms.
soft_nms_high_threshold (float): High threshold for soft nms.
post_process_top_k (int): Top k values to be considered.
feature_extraction_interval (int): Interval used in feature extraction.
Returns:
list[dict]: The updated proposals, e.g.
[{'score': 0.9, 'segment': [0, 1]},
{'score': 0.8, 'segment': [0, 2]},
...].
| post_processing | python | open-mmlab/mmaction2 | mmaction/models/localizers/utils/proposal_utils.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/localizers/utils/proposal_utils.py | Apache-2.0 |
def forward(self, *args, **kwargs):
"""Defines the computation performed at every call.
Args:
*args: The positional arguments for the corresponding
loss.
**kwargs: The keyword arguments for the corresponding
loss.
Returns:
torch.Tensor: The calculated loss.
"""
ret = self._forward(*args, **kwargs)
if isinstance(ret, dict):
for k in ret:
if 'loss' in k:
ret[k] *= self.loss_weight
else:
ret *= self.loss_weight
return ret | Defines the computation performed at every call.
Args:
*args: The positional arguments for the corresponding
loss.
**kwargs: The keyword arguments for the corresponding
loss.
Returns:
torch.Tensor: The calculated loss.
| forward | python | open-mmlab/mmaction2 | mmaction/models/losses/base.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/losses/base.py | Apache-2.0 |
def forward(self,
reg_score,
label,
threshold=0.5,
ratio_range=(1.05, 21),
eps=1e-5):
"""Calculate Binary Logistic Regression Loss.
Args:
reg_score (torch.Tensor): Predicted score by model.
label (torch.Tensor): Groundtruth labels.
threshold (float): Threshold for positive instances.
Default: 0.5.
ratio_range (tuple): Lower bound and upper bound for ratio.
Default: (1.05, 21)
eps (float): Epsilon for small value. Default: 1e-5.
Returns:
torch.Tensor: Returned binary logistic loss.
"""
return binary_logistic_regression_loss(reg_score, label, threshold,
ratio_range, eps) | Calculate Binary Logistic Regression Loss.
Args:
reg_score (torch.Tensor): Predicted score by model.
label (torch.Tensor): Groundtruth labels.
threshold (float): Threshold for positive instances.
Default: 0.5.
ratio_range (tuple): Lower bound and upper bound for ratio.
Default: (1.05, 21)
eps (float): Epsilon for small value. Default: 1e-5.
Returns:
torch.Tensor: Returned binary logistic loss.
| forward | python | open-mmlab/mmaction2 | mmaction/models/losses/binary_logistic_regression_loss.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/losses/binary_logistic_regression_loss.py | Apache-2.0 |
def tem_loss(pred_start, pred_end, gt_start, gt_end):
"""Calculate Temporal Evaluation Module Loss.
This function calculate the binary_logistic_regression_loss for start
and end respectively and returns the sum of their losses.
Args:
pred_start (torch.Tensor): Predicted start score by BMN model.
pred_end (torch.Tensor): Predicted end score by BMN model.
gt_start (torch.Tensor): Groundtruth confidence score for start.
gt_end (torch.Tensor): Groundtruth confidence score for end.
Returns:
torch.Tensor: Returned binary logistic loss.
"""
loss_start = binary_logistic_regression_loss(pred_start, gt_start)
loss_end = binary_logistic_regression_loss(pred_end, gt_end)
loss = loss_start + loss_end
return loss | Calculate Temporal Evaluation Module Loss.
This function calculate the binary_logistic_regression_loss for start
and end respectively and returns the sum of their losses.
Args:
pred_start (torch.Tensor): Predicted start score by BMN model.
pred_end (torch.Tensor): Predicted end score by BMN model.
gt_start (torch.Tensor): Groundtruth confidence score for start.
gt_end (torch.Tensor): Groundtruth confidence score for end.
Returns:
torch.Tensor: Returned binary logistic loss.
| tem_loss | python | open-mmlab/mmaction2 | mmaction/models/losses/bmn_loss.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/losses/bmn_loss.py | Apache-2.0 |
def pem_reg_loss(pred_score,
gt_iou_map,
mask,
high_temporal_iou_threshold=0.7,
low_temporal_iou_threshold=0.3):
"""Calculate Proposal Evaluation Module Regression Loss.
Args:
pred_score (torch.Tensor): Predicted temporal_iou score by BMN.
gt_iou_map (torch.Tensor): Groundtruth temporal_iou score.
mask (torch.Tensor): Boundary-Matching mask.
high_temporal_iou_threshold (float): Higher threshold of
temporal_iou. Default: 0.7.
low_temporal_iou_threshold (float): Higher threshold of
temporal_iou. Default: 0.3.
Returns:
torch.Tensor: Proposal evaluation regression loss.
"""
u_hmask = (gt_iou_map > high_temporal_iou_threshold).float()
u_mmask = ((gt_iou_map <= high_temporal_iou_threshold) &
(gt_iou_map > low_temporal_iou_threshold)).float()
u_lmask = ((gt_iou_map <= low_temporal_iou_threshold) &
(gt_iou_map > 0.)).float()
u_lmask = u_lmask * mask
num_h = torch.sum(u_hmask)
num_m = torch.sum(u_mmask)
num_l = torch.sum(u_lmask)
r_m = num_h / num_m
u_smmask = torch.rand_like(gt_iou_map)
u_smmask = u_mmask * u_smmask
u_smmask = (u_smmask > (1. - r_m)).float()
r_l = num_h / num_l
u_slmask = torch.rand_like(gt_iou_map)
u_slmask = u_lmask * u_slmask
u_slmask = (u_slmask > (1. - r_l)).float()
weights = u_hmask + u_smmask + u_slmask
loss = F.mse_loss(pred_score * weights, gt_iou_map * weights)
loss = 0.5 * torch.sum(
loss * torch.ones_like(weights)) / torch.sum(weights)
return loss | Calculate Proposal Evaluation Module Regression Loss.
Args:
pred_score (torch.Tensor): Predicted temporal_iou score by BMN.
gt_iou_map (torch.Tensor): Groundtruth temporal_iou score.
mask (torch.Tensor): Boundary-Matching mask.
high_temporal_iou_threshold (float): Higher threshold of
temporal_iou. Default: 0.7.
low_temporal_iou_threshold (float): Higher threshold of
temporal_iou. Default: 0.3.
Returns:
torch.Tensor: Proposal evaluation regression loss.
| pem_reg_loss | python | open-mmlab/mmaction2 | mmaction/models/losses/bmn_loss.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/losses/bmn_loss.py | Apache-2.0 |
def pem_cls_loss(pred_score,
gt_iou_map,
mask,
threshold=0.9,
ratio_range=(1.05, 21),
eps=1e-5):
"""Calculate Proposal Evaluation Module Classification Loss.
Args:
pred_score (torch.Tensor): Predicted temporal_iou score by BMN.
gt_iou_map (torch.Tensor): Groundtruth temporal_iou score.
mask (torch.Tensor): Boundary-Matching mask.
threshold (float): Threshold of temporal_iou for positive
instances. Default: 0.9.
ratio_range (tuple): Lower bound and upper bound for ratio.
Default: (1.05, 21)
eps (float): Epsilon for small value. Default: 1e-5
Returns:
torch.Tensor: Proposal evaluation classification loss.
"""
pmask = (gt_iou_map > threshold).float()
nmask = (gt_iou_map <= threshold).float()
nmask = nmask * mask
num_positive = max(torch.sum(pmask), 1)
num_entries = num_positive + torch.sum(nmask)
ratio = num_entries / num_positive
ratio = torch.clamp(ratio, ratio_range[0], ratio_range[1])
coef_0 = 0.5 * ratio / (ratio - 1)
coef_1 = 0.5 * ratio
loss_pos = coef_1 * torch.log(pred_score + eps) * pmask
loss_neg = coef_0 * torch.log(1.0 - pred_score + eps) * nmask
loss = -1 * torch.sum(loss_pos + loss_neg) / num_entries
return loss | Calculate Proposal Evaluation Module Classification Loss.
Args:
pred_score (torch.Tensor): Predicted temporal_iou score by BMN.
gt_iou_map (torch.Tensor): Groundtruth temporal_iou score.
mask (torch.Tensor): Boundary-Matching mask.
threshold (float): Threshold of temporal_iou for positive
instances. Default: 0.9.
ratio_range (tuple): Lower bound and upper bound for ratio.
Default: (1.05, 21)
eps (float): Epsilon for small value. Default: 1e-5
Returns:
torch.Tensor: Proposal evaluation classification loss.
| pem_cls_loss | python | open-mmlab/mmaction2 | mmaction/models/losses/bmn_loss.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/losses/bmn_loss.py | Apache-2.0 |
def forward(self,
pred_bm,
pred_start,
pred_end,
gt_iou_map,
gt_start,
gt_end,
bm_mask,
weight_tem=1.0,
weight_pem_reg=10.0,
weight_pem_cls=1.0):
"""Calculate Boundary Matching Network Loss.
Args:
pred_bm (torch.Tensor): Predicted confidence score for boundary
matching map.
pred_start (torch.Tensor): Predicted confidence score for start.
pred_end (torch.Tensor): Predicted confidence score for end.
gt_iou_map (torch.Tensor): Groundtruth score for boundary matching
map.
gt_start (torch.Tensor): Groundtruth temporal_iou score for start.
gt_end (torch.Tensor): Groundtruth temporal_iou score for end.
bm_mask (torch.Tensor): Boundary-Matching mask.
weight_tem (float): Weight for tem loss. Default: 1.0.
weight_pem_reg (float): Weight for pem regression loss.
Default: 10.0.
weight_pem_cls (float): Weight for pem classification loss.
Default: 1.0.
Returns:
tuple([torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]):
(loss, tem_loss, pem_reg_loss, pem_cls_loss). Loss is the bmn
loss, tem_loss is the temporal evaluation loss, pem_reg_loss is
the proposal evaluation regression loss, pem_cls_loss is the
proposal evaluation classification loss.
"""
pred_bm_reg = pred_bm[:, 0].contiguous()
pred_bm_cls = pred_bm[:, 1].contiguous()
gt_iou_map = gt_iou_map * bm_mask
pem_reg_loss = self.pem_reg_loss(pred_bm_reg, gt_iou_map, bm_mask)
pem_cls_loss = self.pem_cls_loss(pred_bm_cls, gt_iou_map, bm_mask)
tem_loss = self.tem_loss(pred_start, pred_end, gt_start, gt_end)
loss = (
weight_tem * tem_loss + weight_pem_reg * pem_reg_loss +
weight_pem_cls * pem_cls_loss)
return loss, tem_loss, pem_reg_loss, pem_cls_loss | Calculate Boundary Matching Network Loss.
Args:
pred_bm (torch.Tensor): Predicted confidence score for boundary
matching map.
pred_start (torch.Tensor): Predicted confidence score for start.
pred_end (torch.Tensor): Predicted confidence score for end.
gt_iou_map (torch.Tensor): Groundtruth score for boundary matching
map.
gt_start (torch.Tensor): Groundtruth temporal_iou score for start.
gt_end (torch.Tensor): Groundtruth temporal_iou score for end.
bm_mask (torch.Tensor): Boundary-Matching mask.
weight_tem (float): Weight for tem loss. Default: 1.0.
weight_pem_reg (float): Weight for pem regression loss.
Default: 10.0.
weight_pem_cls (float): Weight for pem classification loss.
Default: 1.0.
Returns:
tuple([torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]):
(loss, tem_loss, pem_reg_loss, pem_cls_loss). Loss is the bmn
loss, tem_loss is the temporal evaluation loss, pem_reg_loss is
the proposal evaluation regression loss, pem_cls_loss is the
proposal evaluation classification loss.
| forward | python | open-mmlab/mmaction2 | mmaction/models/losses/bmn_loss.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/losses/bmn_loss.py | Apache-2.0 |
def _forward(self, cls_score: torch.Tensor, label: torch.Tensor,
**kwargs) -> torch.Tensor:
"""Forward function.
Args:
cls_score (torch.Tensor): The class score.
label (torch.Tensor): The ground truth label.
kwargs: Any keyword argument to be used to calculate
CrossEntropy loss.
Returns:
torch.Tensor: The returned CrossEntropy loss.
"""
if cls_score.size() == label.size():
# calculate loss for soft label
assert cls_score.dim() == 2, 'Only support 2-dim soft label'
assert len(kwargs) == 0, \
('For now, no extra args are supported for soft label, '
f'but get {kwargs}')
lsm = F.log_softmax(cls_score, 1)
if self.class_weight is not None:
self.class_weight = self.class_weight.to(cls_score.device)
lsm = lsm * self.class_weight.unsqueeze(0)
loss_cls = -(label * lsm).sum(1)
# default reduction 'mean'
if self.class_weight is not None:
# Use weighted average as pytorch CrossEntropyLoss does.
# For more information, please visit https://pytorch.org/docs/stable/generated/torch.nn.CrossEntropyLoss.html # noqa
loss_cls = loss_cls.sum() / torch.sum(
self.class_weight.unsqueeze(0) * label)
else:
loss_cls = loss_cls.mean()
else:
# calculate loss for hard label
if self.class_weight is not None:
assert 'weight' not in kwargs, \
"The key 'weight' already exists."
kwargs['weight'] = self.class_weight.to(cls_score.device)
loss_cls = F.cross_entropy(cls_score, label, **kwargs)
return loss_cls | Forward function.
Args:
cls_score (torch.Tensor): The class score.
label (torch.Tensor): The ground truth label.
kwargs: Any keyword argument to be used to calculate
CrossEntropy loss.
Returns:
torch.Tensor: The returned CrossEntropy loss.
| _forward | python | open-mmlab/mmaction2 | mmaction/models/losses/cross_entropy_loss.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/losses/cross_entropy_loss.py | Apache-2.0 |
def _forward(self, cls_score: torch.Tensor, label: torch.Tensor,
**kwargs) -> torch.Tensor:
"""Forward function.
Args:
cls_score (torch.Tensor): The class score.
label (torch.Tensor): The ground truth label.
kwargs: Any keyword argument to be used to calculate
bce loss with logits.
Returns:
torch.Tensor: The returned bce loss with logits.
"""
if self.class_weight is not None:
assert 'weight' not in kwargs, "The key 'weight' already exists."
kwargs['weight'] = self.class_weight.to(cls_score.device)
loss_cls = F.binary_cross_entropy_with_logits(cls_score, label,
**kwargs)
return loss_cls | Forward function.
Args:
cls_score (torch.Tensor): The class score.
label (torch.Tensor): The ground truth label.
kwargs: Any keyword argument to be used to calculate
bce loss with logits.
Returns:
torch.Tensor: The returned bce loss with logits.
| _forward | python | open-mmlab/mmaction2 | mmaction/models/losses/cross_entropy_loss.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/losses/cross_entropy_loss.py | Apache-2.0 |
def _forward(self, cls_score: torch.Tensor, label: torch.Tensor,
**kwargs) -> torch.Tensor:
"""Forward function.
Args:
cls_score (torch.Tensor): The class score.
label (torch.Tensor): The ground truth label.
kwargs: Any keyword argument to be used to calculate
bce loss with logits.
Returns:
torch.Tensor: The returned bce loss with logits.
"""
weights = torch.tensor(self.weights).float().to(cls_score.device)
label_one_hot = F.one_hot(label, self.num_classes).float()
weights = weights.unsqueeze(0)
weights = weights.repeat(label_one_hot.shape[0], 1) * label_one_hot
weights = weights.sum(1)
weights = weights.unsqueeze(1)
weights = weights.repeat(1, self.num_classes)
BCELoss = F.binary_cross_entropy_with_logits(
input=cls_score, target=label_one_hot, reduction='none')
modulator = 1.0
if self.gamma:
modulator = torch.exp(-self.gamma * label_one_hot * cls_score -
self.gamma *
torch.log(1 + torch.exp(-1.0 * cls_score)))
loss = modulator * BCELoss
weighted_loss = weights * loss
focal_loss = torch.sum(weighted_loss)
focal_loss /= torch.sum(label_one_hot)
return focal_loss | Forward function.
Args:
cls_score (torch.Tensor): The class score.
label (torch.Tensor): The ground truth label.
kwargs: Any keyword argument to be used to calculate
bce loss with logits.
Returns:
torch.Tensor: The returned bce loss with logits.
| _forward | python | open-mmlab/mmaction2 | mmaction/models/losses/cross_entropy_loss.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/losses/cross_entropy_loss.py | Apache-2.0 |
def _forward(self, cls_score, label, mask, category_mask):
"""Forward function.
Args:
cls_score (torch.Tensor): The class score.
label (torch.Tensor): The ground truth label.
mask (torch.Tensor): The mask of tags. 0 indicates that the
category of this tag is missing in the label of the video.
category_mask (torch.Tensor): The category mask. For each sample,
it's a tensor with length `len(self.categories)`, denotes that
if the category is labeled for this video.
Returns:
torch.Tensor: The returned CrossEntropy loss.
"""
if self.loss_type == 'all':
loss_cls = F.binary_cross_entropy_with_logits(
cls_score, label, reduction='none')
if self.with_mask:
w_loss_cls = mask * loss_cls
w_loss_cls = torch.sum(w_loss_cls, dim=1)
if self.reduction == 'mean':
w_loss_cls = w_loss_cls / torch.sum(mask, dim=1)
w_loss_cls = torch.mean(w_loss_cls)
return dict(loss_cls=w_loss_cls)
if self.reduction == 'sum':
loss_cls = torch.sum(loss_cls, dim=-1)
return dict(loss_cls=torch.mean(loss_cls))
if self.loss_type == 'individual':
losses = {}
loss_weights = {}
for name, num, start_idx in zip(self.categories,
self.category_nums,
self.category_startidx):
category_score = cls_score[:, start_idx:start_idx + num]
category_label = label[:, start_idx:start_idx + num]
category_loss = F.binary_cross_entropy_with_logits(
category_score, category_label, reduction='none')
if self.reduction == 'mean':
category_loss = torch.mean(category_loss, dim=1)
elif self.reduction == 'sum':
category_loss = torch.sum(category_loss, dim=1)
idx = self.categories.index(name)
if self.with_mask:
category_mask_i = category_mask[:, idx].reshape(-1)
# there should be at least one sample which contains tags
# in this category
if torch.sum(category_mask_i) < 0.5:
losses[f'{name}_LOSS'] = torch.tensor(
.0, device=get_device())
loss_weights[f'{name}_LOSS'] = .0
continue
category_loss = torch.sum(category_loss * category_mask_i)
category_loss = category_loss / torch.sum(category_mask_i)
else:
category_loss = torch.mean(category_loss)
# We name the loss of each category as 'LOSS', since we only
# want to monitor them, not backward them. We will also provide
# the loss used for backward in the losses dictionary
losses[f'{name}_LOSS'] = category_loss
loss_weights[f'{name}_LOSS'] = self.category_loss_weights[idx]
loss_weight_sum = sum(loss_weights.values())
loss_weights = {
k: v / loss_weight_sum
for k, v in loss_weights.items()
}
loss_cls = sum([losses[k] * loss_weights[k] for k in losses])
losses['loss_cls'] = loss_cls
# We also trace the loss weights
losses.update({
k + '_weight': torch.tensor(v).to(losses[k].device)
for k, v in loss_weights.items()
})
# Note that the loss weights are just for reference.
return losses
else:
raise ValueError("loss_type should be 'all' or 'individual', "
f'but got {self.loss_type}') | Forward function.
Args:
cls_score (torch.Tensor): The class score.
label (torch.Tensor): The ground truth label.
mask (torch.Tensor): The mask of tags. 0 indicates that the
category of this tag is missing in the label of the video.
category_mask (torch.Tensor): The category mask. For each sample,
it's a tensor with length `len(self.categories)`, denotes that
if the category is labeled for this video.
Returns:
torch.Tensor: The returned CrossEntropy loss.
| _forward | python | open-mmlab/mmaction2 | mmaction/models/losses/hvu_loss.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/losses/hvu_loss.py | Apache-2.0 |
def forward(ctx, pred, labels, is_positive, ohem_ratio, group_size):
"""Calculate OHEM hinge loss.
Args:
pred (torch.Tensor): Predicted completeness score.
labels (torch.Tensor): Groundtruth class label.
is_positive (int): Set to 1 when proposals are positive and
set to -1 when proposals are incomplete.
ohem_ratio (float): Ratio of hard examples.
group_size (int): Number of proposals sampled per video.
Returns:
torch.Tensor: Returned class-wise hinge loss.
"""
num_samples = pred.size(0)
if num_samples != len(labels):
raise ValueError(f'Number of samples should be equal to that '
f'of labels, but got {num_samples} samples and '
f'{len(labels)} labels.')
losses = torch.zeros(num_samples, device=pred.device)
slopes = torch.zeros(num_samples, device=pred.device)
for i in range(num_samples):
losses[i] = max(0, 1 - is_positive * pred[i, labels[i] - 1])
slopes[i] = -is_positive if losses[i] != 0 else 0
losses = losses.view(-1, group_size).contiguous()
sorted_losses, indices = torch.sort(losses, dim=1, descending=True)
keep_length = int(group_size * ohem_ratio)
loss = torch.zeros(1, device=pred.device)
for i in range(losses.size(0)):
loss += sorted_losses[i, :keep_length].sum()
ctx.loss_index = indices[:, :keep_length]
ctx.labels = labels
ctx.slopes = slopes
ctx.shape = pred.size()
ctx.group_size = group_size
ctx.num_groups = losses.size(0)
return loss | Calculate OHEM hinge loss.
Args:
pred (torch.Tensor): Predicted completeness score.
labels (torch.Tensor): Groundtruth class label.
is_positive (int): Set to 1 when proposals are positive and
set to -1 when proposals are incomplete.
ohem_ratio (float): Ratio of hard examples.
group_size (int): Number of proposals sampled per video.
Returns:
torch.Tensor: Returned class-wise hinge loss.
| forward | python | open-mmlab/mmaction2 | mmaction/models/losses/ohem_hinge_loss.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/losses/ohem_hinge_loss.py | Apache-2.0 |
def backward(ctx, grad_output):
"""Defines a formula for differentiating the operation with backward
mode automatic differentiation."""
labels = ctx.labels
slopes = ctx.slopes
grad_in = torch.zeros(ctx.shape, device=ctx.slopes.device)
for group in range(ctx.num_groups):
for idx in ctx.loss_index[group]:
loc = idx + group * ctx.group_size
grad_in[loc, labels[loc] - 1] = (
slopes[loc] * grad_output.data[0])
return torch.autograd.Variable(grad_in), None, None, None, None | Defines a formula for differentiating the operation with backward
mode automatic differentiation. | backward | python | open-mmlab/mmaction2 | mmaction/models/losses/ohem_hinge_loss.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/losses/ohem_hinge_loss.py | Apache-2.0 |
def forward(self, activity_score, completeness_score, bbox_pred,
proposal_type, labels, bbox_targets, train_cfg):
"""Calculate Boundary Matching Network Loss.
Args:
activity_score (torch.Tensor): Predicted activity score.
completeness_score (torch.Tensor): Predicted completeness score.
bbox_pred (torch.Tensor): Predicted interval center and span
of positive proposals.
proposal_type (torch.Tensor): Type index slices of proposals.
labels (torch.Tensor): Groundtruth class label.
bbox_targets (torch.Tensor): Groundtruth center and span
of positive proposals.
train_cfg (dict): Config for training.
Returns:
dict([torch.Tensor, torch.Tensor, torch.Tensor]):
(loss_activity, loss_completeness, loss_reg).
Loss_activity is the activity loss, loss_completeness is
the class-wise completeness loss,
loss_reg is the class-wise regression loss.
"""
self.sampler = train_cfg.ssn.sampler
self.loss_weight = train_cfg.ssn.loss_weight
losses = dict()
proposal_type = proposal_type.view(-1)
labels = labels.view(-1)
activity_indexer = ((proposal_type == 0) +
(proposal_type == 2)).nonzero().squeeze(1)
completeness_indexer = ((proposal_type == 0) +
(proposal_type == 1)).nonzero().squeeze(1)
total_ratio = (
self.sampler.positive_ratio + self.sampler.background_ratio +
self.sampler.incomplete_ratio)
positive_per_video = int(self.sampler.num_per_video *
(self.sampler.positive_ratio / total_ratio))
background_per_video = int(
self.sampler.num_per_video *
(self.sampler.background_ratio / total_ratio))
incomplete_per_video = (
self.sampler.num_per_video - positive_per_video -
background_per_video)
losses['loss_activity'] = self.activity_loss(activity_score, labels,
activity_indexer)
losses['loss_completeness'] = self.completeness_loss(
completeness_score,
labels,
completeness_indexer,
positive_per_video,
incomplete_per_video,
ohem_ratio=positive_per_video / incomplete_per_video)
losses['loss_completeness'] *= self.loss_weight.comp_loss_weight
if bbox_pred is not None:
regression_indexer = (proposal_type == 0).nonzero().squeeze(1)
bbox_targets = bbox_targets.view(-1, 2)
losses['loss_reg'] = self.classwise_regression_loss(
bbox_pred, labels, bbox_targets, regression_indexer)
losses['loss_reg'] *= self.loss_weight.reg_loss_weight
return losses | Calculate Boundary Matching Network Loss.
Args:
activity_score (torch.Tensor): Predicted activity score.
completeness_score (torch.Tensor): Predicted completeness score.
bbox_pred (torch.Tensor): Predicted interval center and span
of positive proposals.
proposal_type (torch.Tensor): Type index slices of proposals.
labels (torch.Tensor): Groundtruth class label.
bbox_targets (torch.Tensor): Groundtruth center and span
of positive proposals.
train_cfg (dict): Config for training.
Returns:
dict([torch.Tensor, torch.Tensor, torch.Tensor]):
(loss_activity, loss_completeness, loss_reg).
Loss_activity is the activity loss, loss_completeness is
the class-wise completeness loss,
loss_reg is the class-wise regression loss.
| forward | python | open-mmlab/mmaction2 | mmaction/models/losses/ssn_loss.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/losses/ssn_loss.py | Apache-2.0 |
def forward(self, hidden_states: torch.Tensor):
"""forward function.
Args:
hidden_states (torch.Tensor): The input. Shape: [b,t,l,c]
Returns: TODO
"""
b = hidden_states.shape[0]
output = einops.rearrange(hidden_states, 'b t l c -> (b l) t c')
output = self.layernorm_before(output)
output = self.attention(output)
output = einops.rearrange(output[0], '(b l) t c -> b t l c', b=b)
return hidden_states + self.drop_path(output[0]) * self.scale | forward function.
Args:
hidden_states (torch.Tensor): The input. Shape: [b,t,l,c]
Returns: TODO
| forward | python | open-mmlab/mmaction2 | mmaction/models/multimodal/vindlu/beit3d.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/multimodal/vindlu/beit3d.py | Apache-2.0 |
def forward(self,
pixel_values: torch.Tensor,
bool_masked_pos: Optional[torch.BoolTensor] = None
) -> torch.Tensor:
"""
Args:
pixel_values (torch.Tensor): The input image patches.
Shape: [B, T, C, H, W].
"""
t = pixel_values.shape[1]
pixel_values = einops.rearrange(pixel_values,
'b t c h w -> (b t) c h w')
embeddings = self.patch_embeddings(pixel_values)
batch_size, seq_len, _ = embeddings.size() # [(b t) l c]
cls_tokens = self.cls_token.expand(batch_size, -1, -1)
if bool_masked_pos is not None:
mask_tokens = self.mask_token.expand(batch_size, seq_len, -1)
# replace the masked visual tokens by mask_tokens
w = bool_masked_pos.unsqueeze(-1).type_as(mask_tokens)
embeddings = embeddings * (1 - w) + mask_tokens * w
if self.prompt_tokens is not None:
prompt_tokens = self.prompt_tokens.expand(batch_size, -1, -1)
embeddings = torch.cat((cls_tokens, embeddings, prompt_tokens),
dim=1)
else:
embeddings = torch.cat((cls_tokens, embeddings),
dim=1) # [B*T, L, C]
if self.position_embeddings is not None:
embeddings = embeddings + self.position_embeddings
embeddings = einops.rearrange(embeddings, '(b t) l c -> b t l c', t=t)
if self.temporal_position_embeddings is not None:
if t <= self.temporal_position_embeddings.shape[1]:
embeddings = embeddings + \
self.temporal_position_embeddings[:, :t]
else:
tpe = interpolate_temporal_pos_embed(
self.temporal_position_embeddings, t)
embeddings = embeddings + tpe
embeddings = self.dropout(embeddings)
return embeddings |
Args:
pixel_values (torch.Tensor): The input image patches.
Shape: [B, T, C, H, W].
| forward | python | open-mmlab/mmaction2 | mmaction/models/multimodal/vindlu/beit3d.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/multimodal/vindlu/beit3d.py | Apache-2.0 |
def load_tf_weights_in_bert(model, config, tf_checkpoint_path):
"""Load tf checkpoints in a pytorch model."""
logger = MMLogger.get_current_instance()
try:
import re
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
'Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see '
'https://www.tensorflow.org/install/ for installation instructions.'
)
raise
tf_path = os.path.abspath(tf_checkpoint_path)
logger.info('Converting TensorFlow checkpoint from {}'.format(tf_path))
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
names = []
arrays = []
for name, shape in init_vars:
logger.info('Loading TF weight {} with shape {}'.format(name, shape))
array = tf.train.load_variable(tf_path, name)
names.append(name)
arrays.append(array)
for name, array in zip(names, arrays):
name = name.split('/')
# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
# which are not required for using pretrained model
if any(n in [
'adam_v',
'adam_m',
'AdamWeightDecayOptimizer',
'AdamWeightDecayOptimizer_1',
'global_step',
] for n in name):
logger.info('Skipping {}'.format('/'.join(name)))
continue
pointer = model
for m_name in name:
if re.fullmatch(r'[A-Za-z]+_\d+', m_name):
scope_names = re.split(r'_(\d+)', m_name)
else:
scope_names = [m_name]
if scope_names[0] == 'kernel' or scope_names[0] == 'gamma':
pointer = getattr(pointer, 'weight')
elif scope_names[0] == 'output_bias' or scope_names[0] == 'beta':
pointer = getattr(pointer, 'bias')
elif scope_names[0] == 'output_weights':
pointer = getattr(pointer, 'weight')
elif scope_names[0] == 'squad':
pointer = getattr(pointer, 'classifier')
else:
try:
pointer = getattr(pointer, scope_names[0])
except AttributeError:
logger.info('Skipping {}'.format('/'.join(name)))
continue
if len(scope_names) >= 2:
num = int(scope_names[1])
pointer = pointer[num]
if m_name[-11:] == '_embeddings':
pointer = getattr(pointer, 'weight')
elif m_name == 'kernel':
array = np.transpose(array)
try:
assert (
pointer.shape == array.shape
), f'Pointer shape {pointer.shape} and array shape {array.shape} mismatched'
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
logger.info('Initialize PyTorch weight {}'.format(name))
pointer.data = torch.from_numpy(array)
return model | Load tf checkpoints in a pytorch model. | load_tf_weights_in_bert | python | open-mmlab/mmaction2 | mmaction/models/multimodal/vindlu/modeling_bert.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/multimodal/vindlu/modeling_bert.py | Apache-2.0 |
def get_extended_attention_mask(self, attention_mask: Tensor,
input_shape: Tuple[int], device: device,
is_decoder: bool) -> Tensor:
"""Makes broadcastable attention and causal masks so that future and
masked tokens are ignored.
Arguments:
attention_mask (:obj:`torch.Tensor`):
Mask with ones indicating tokens to attend to, zeros for tokens to ignore.
input_shape (:obj:`Tuple[int]`):
The shape of the input to the model.
device: (:obj:`torch.device`):
The device of the input to the model.
Returns:
:obj:`torch.Tensor` The extended attention mask, with a the same dtype as :obj:`attention_mask.dtype`.
"""
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
if attention_mask.dim() == 3:
extended_attention_mask = attention_mask[:, None, :, :]
elif attention_mask.dim() == 2:
# Provided a padding mask of dimensions [batch_size, seq_length]
# - if the model is a decoder, apply a causal mask in addition to the padding mask
# - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length]
if is_decoder:
batch_size, seq_length = input_shape
seq_ids = torch.arange(seq_length, device=device)
causal_mask = (
seq_ids[None, None, :].repeat(batch_size, seq_length, 1) <=
seq_ids[None, :, None])
# in case past_key_values are used we need to add a prefix ones mask to the causal mask
# causal and attention masks must have same type with pytorch version < 1.3
causal_mask = causal_mask.to(attention_mask.dtype)
if causal_mask.shape[1] < attention_mask.shape[1]:
prefix_seq_len = attention_mask.shape[
1] - causal_mask.shape[1]
causal_mask = torch.cat(
[
torch.ones(
(batch_size, seq_length, prefix_seq_len),
device=device,
dtype=causal_mask.dtype,
),
causal_mask,
],
axis=-1,
)
extended_attention_mask = (
causal_mask[:, None, :, :] *
attention_mask[:, None, None, :])
else:
extended_attention_mask = attention_mask[:, None, None, :]
else:
raise ValueError(
'Wrong shape for input_ids (shape {}) or attention_mask (shape {})'
.format(input_shape, attention_mask.shape))
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = extended_attention_mask.to(
dtype=self.dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
return extended_attention_mask | Makes broadcastable attention and causal masks so that future and
masked tokens are ignored.
Arguments:
attention_mask (:obj:`torch.Tensor`):
Mask with ones indicating tokens to attend to, zeros for tokens to ignore.
input_shape (:obj:`Tuple[int]`):
The shape of the input to the model.
device: (:obj:`torch.device`):
The device of the input to the model.
Returns:
:obj:`torch.Tensor` The extended attention mask, with a the same dtype as :obj:`attention_mask.dtype`.
| get_extended_attention_mask | python | open-mmlab/mmaction2 | mmaction/models/multimodal/vindlu/modeling_bert.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/multimodal/vindlu/modeling_bert.py | Apache-2.0 |
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
is_decoder=False,
mode='multi_modal',
normalize_attention=True,
):
r"""
encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
the model is configured as a decoder.
encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`
(those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.
use_cache (:obj:`bool`, `optional`):
If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
decoding (see :obj:`past_key_values`).
"""
output_attentions = (
output_attentions if output_attentions is not None else
self.config.output_attentions)
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else
self.config.output_hidden_states)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if is_decoder:
use_cache = use_cache if use_cache is not None else self.config.use_cache
else:
use_cache = False
if input_ids is not None and inputs_embeds is not None:
raise ValueError(
'You cannot specify both input_ids and inputs_embeds at the same time'
)
elif input_ids is not None:
input_shape = input_ids.size()
batch_size, seq_length = input_shape
device = input_ids.device
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
batch_size, seq_length = input_shape
device = inputs_embeds.device
elif encoder_embeds is not None:
input_shape = encoder_embeds.size()[:-1]
batch_size, seq_length = input_shape
device = encoder_embeds.device
else:
raise ValueError(
'You have to specify either input_ids or inputs_embeds or encoder_embeds'
)
# past_key_values_length
past_key_values_length = (
past_key_values[0][0].shape[2]
if past_key_values is not None else 0)
if attention_mask is None:
attention_mask = torch.ones(
((batch_size, seq_length + past_key_values_length)),
device=device)
if token_type_ids is None:
token_type_ids = torch.zeros(
input_shape, dtype=torch.long, device=device)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(
attention_mask, input_shape, device, is_decoder)
# If a 2D or 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_hidden_states is not None:
if type(encoder_hidden_states) == list:
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states[
0].size()
else:
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size(
)
encoder_hidden_shape = (encoder_batch_size,
encoder_sequence_length)
if type(encoder_attention_mask) == list:
encoder_extended_attention_mask = [
self.invert_attention_mask(mask)
for mask in encoder_attention_mask
]
elif encoder_attention_mask is None:
encoder_attention_mask = torch.ones(
encoder_hidden_shape, device=device)
encoder_extended_attention_mask = self.invert_attention_mask(
encoder_attention_mask)
else:
encoder_extended_attention_mask = self.invert_attention_mask(
encoder_attention_mask)
else:
encoder_extended_attention_mask = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
head_mask = self.get_head_mask(head_mask,
self.config.num_hidden_layers)
if encoder_embeds is None:
embedding_output = self.embeddings(
input_ids=input_ids,
position_ids=position_ids,
token_type_ids=token_type_ids,
inputs_embeds=inputs_embeds,
past_key_values_length=past_key_values_length,
)
else:
embedding_output = encoder_embeds
encoder_outputs = self.encoder(
embedding_output,
attention_mask=extended_attention_mask,
head_mask=head_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_extended_attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
mode=mode,
normalize_attention=normalize_attention,
)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(
sequence_output) if self.pooler is not None else None
if not return_dict:
return (sequence_output, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndCrossAttentions(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
past_key_values=encoder_outputs.past_key_values,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
cross_attentions=encoder_outputs.cross_attentions,
) |
encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
the model is configured as a decoder.
encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`
(those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.
use_cache (:obj:`bool`, `optional`):
If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
decoding (see :obj:`past_key_values`).
| forward | python | open-mmlab/mmaction2 | mmaction/models/multimodal/vindlu/modeling_bert.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/multimodal/vindlu/modeling_bert.py | Apache-2.0 |
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
next_sentence_label=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape ``(batch_size, sequence_length)``, `optional`):
Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ...,
config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored
(masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``
next_sentence_label (``torch.LongTensor`` of shape ``(batch_size,)``, `optional`):
Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair
(see :obj:`input_ids` docstring) Indices should be in ``[0, 1]``:
- 0 indicates sequence B is a continuation of sequence A,
- 1 indicates sequence B is a random sequence.
kwargs (:obj:`Dict[str, any]`, optional, defaults to `{}`):
Used to hide legacy arguments that have been deprecated.
Returns:
Example::
>>> from transformers import BertTokenizer, BertForPreTraining
>>> import torch
>>> tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
>>> model = BertForPreTraining.from_pretrained('bert-base-uncased')
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs)
>>> prediction_logits = outputs.prediction_logits
>>> seq_relationship_logits = outputs.seq_relationship_logits
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output, pooled_output = outputs[:2]
prediction_scores, seq_relationship_score = self.cls(
sequence_output, pooled_output)
total_loss = None
if labels is not None and next_sentence_label is not None:
loss_fct = CrossEntropyLoss()
masked_lm_loss = loss_fct(
prediction_scores.view(-1, self.config.vocab_size),
labels.view(-1))
next_sentence_loss = loss_fct(
seq_relationship_score.view(-1, 2),
next_sentence_label.view(-1))
total_loss = masked_lm_loss + next_sentence_loss
if not return_dict:
output = (prediction_scores, seq_relationship_score) + outputs[2:]
return ((total_loss, ) +
output) if total_loss is not None else output
return BertForPreTrainingOutput(
loss=total_loss,
prediction_logits=prediction_scores,
seq_relationship_logits=seq_relationship_score,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
) |
labels (:obj:`torch.LongTensor` of shape ``(batch_size, sequence_length)``, `optional`):
Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ...,
config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored
(masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``
next_sentence_label (``torch.LongTensor`` of shape ``(batch_size,)``, `optional`):
Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair
(see :obj:`input_ids` docstring) Indices should be in ``[0, 1]``:
- 0 indicates sequence B is a continuation of sequence A,
- 1 indicates sequence B is a random sequence.
kwargs (:obj:`Dict[str, any]`, optional, defaults to `{}`):
Used to hide legacy arguments that have been deprecated.
Returns:
Example::
>>> from transformers import BertTokenizer, BertForPreTraining
>>> import torch
>>> tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
>>> model = BertForPreTraining.from_pretrained('bert-base-uncased')
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs)
>>> prediction_logits = outputs.prediction_logits
>>> seq_relationship_logits = outputs.seq_relationship_logits
| forward | python | open-mmlab/mmaction2 | mmaction/models/multimodal/vindlu/modeling_bert.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/multimodal/vindlu/modeling_bert.py | Apache-2.0 |
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
labels=None,
past_key_values=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
is_decoder=True,
reduction='mean',
mode='multi_modal',
normalize_attention=True,
soft_labels=None,
alpha=0,
return_logits=False,
):
r"""
encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
the model is configured as a decoder.
encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in
``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are
ignored (masked), the loss is only computed for the tokens with labels n ``[0, ..., config.vocab_size]``
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`
(those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.
use_cache (:obj:`bool`, `optional`):
If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
decoding (see :obj:`past_key_values`).
Returns:
Example::
>>> from transformers import BertTokenizer, BertLMHeadModel, BertConfig
>>> import torch
>>> tokenizer = BertTokenizer.from_pretrained('bert-base-cased')
>>> config = BertConfig.from_pretrained("bert-base-cased")
>>> model = BertLMHeadModel.from_pretrained('bert-base-cased', config=config)
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs)
>>> prediction_logits = outputs.logits
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if labels is not None:
use_cache = False
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
is_decoder=is_decoder,
mode=mode,
normalize_attention=normalize_attention,
)
sequence_output = outputs[0]
prediction_scores = self.cls(sequence_output)
if return_logits:
return prediction_scores[:, :-1, :].contiguous()
lm_loss = None
if labels is not None:
# we are doing next-token prediction; shift prediction scores and input ids by one
shifted_prediction_scores = prediction_scores[:, :
-1, :].contiguous()
labels = labels[:, 1:].contiguous()
loss_fct = CrossEntropyLoss(reduction=reduction)
lm_loss = loss_fct(
shifted_prediction_scores.view(-1, self.config.vocab_size),
labels.view(-1))
lm_loss = lm_loss.view(prediction_scores.size(0), -1).sum(1)
if soft_labels is not None:
loss_distill = -torch.sum(
F.log_softmax(shifted_prediction_scores, dim=1) * soft_labels,
dim=-1)
loss_distill = (loss_distill * (labels != -100)).sum(1)
lm_loss = (1 - alpha) * lm_loss + alpha * loss_distill
if not return_dict:
output = (prediction_scores, ) + outputs[2:]
return ((lm_loss, ) + output) if lm_loss is not None else output
return CausalLMOutputWithCrossAttentions(
loss=lm_loss,
logits=prediction_scores,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
cross_attentions=outputs.cross_attentions,
) |
encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
the model is configured as a decoder.
encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in
``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are
ignored (masked), the loss is only computed for the tokens with labels n ``[0, ..., config.vocab_size]``
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`
(those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.
use_cache (:obj:`bool`, `optional`):
If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
decoding (see :obj:`past_key_values`).
Returns:
Example::
>>> from transformers import BertTokenizer, BertLMHeadModel, BertConfig
>>> import torch
>>> tokenizer = BertTokenizer.from_pretrained('bert-base-cased')
>>> config = BertConfig.from_pretrained("bert-base-cased")
>>> model = BertLMHeadModel.from_pretrained('bert-base-cased', config=config)
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs)
>>> prediction_logits = outputs.logits
| forward | python | open-mmlab/mmaction2 | mmaction/models/multimodal/vindlu/modeling_bert.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/multimodal/vindlu/modeling_bert.py | Apache-2.0 |
def tie_aux_decoder_weights(self, module, aux_modules):
"""Tie decoder weights of all `aux_modules` to `module`, (not bias)"""
for m in aux_modules:
m.predictions.decoder.weight = module.predictions.decoder.weight | Tie decoder weights of all `aux_modules` to `module`, (not bias) | tie_aux_decoder_weights | python | open-mmlab/mmaction2 | mmaction/models/multimodal/vindlu/modeling_bert.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/multimodal/vindlu/modeling_bert.py | Apache-2.0 |
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
is_decoder=False,
mode='multi_modal',
normalize_attention=True,
soft_labels=None,
alpha=0,
return_logits=False,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ...,
config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored
(masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
encoder_embeds=encoder_embeds,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
is_decoder=is_decoder,
mode=mode,
normalize_attention=normalize_attention,
)
sequence_output = outputs[0]
prediction_scores = self.cls(sequence_output)
if return_logits:
return prediction_scores
masked_lm_loss = None
masked_lm_loss_aux = 0.0
if labels is not None:
loss_fct = CrossEntropyLoss() # -100 index = padding token
masked_lm_loss = loss_fct(
prediction_scores.view(-1, self.config.vocab_size),
labels.view(-1))
if soft_labels is not None:
loss_distill = -torch.sum(
F.log_softmax(prediction_scores, dim=1) * soft_labels, dim=-1)
loss_distill = loss_distill[labels != -100].mean()
masked_lm_loss = (1 -
alpha) * masked_lm_loss + alpha * loss_distill
if not return_dict:
output = (prediction_scores, ) + outputs[2:]
return ((masked_lm_loss, ) +
output) if masked_lm_loss is not None else output
# changed from MaskedLMOutput to MaskedLMOutputWithDistill
return MaskedLMOutputWithDistill(
loss=masked_lm_loss,
loss_aux=masked_lm_loss_aux,
logits=prediction_scores,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
) |
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ...,
config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored
(masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``
| forward | python | open-mmlab/mmaction2 | mmaction/models/multimodal/vindlu/modeling_bert.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/multimodal/vindlu/modeling_bert.py | Apache-2.0 |
def forward(self, x: torch.Tensor):
"""forward.
Args:
x (torch.Tensor): input features.
Shape: [bs, nframes, l, c]. l = 1 + h*w
Returns: features after adapter. The same shape as input.
"""
if x.shape[1] == 1: # for single frame, return itself.
return x
shortcut = x
x = self.linear1(x)
cls = x[:, :, :1, :]
tokens = x[:, :, 1:, :]
tokens = einops.rearrange(
tokens, 'b t (h w) c -> b c t h w', h=self.h).contiguous()
tokens = self.conv(tokens)
tokens = einops.rearrange(tokens, 'b c t h w -> b t (h w) c')
x = torch.cat([cls, tokens], dim=2) # [b, t, 1+h*w, c]
x = self.act(x)
x = self.linear2(x)
return shortcut + self.scale * self.droppath(x) | forward.
Args:
x (torch.Tensor): input features.
Shape: [bs, nframes, l, c]. l = 1 + h*w
Returns: features after adapter. The same shape as input.
| forward | python | open-mmlab/mmaction2 | mmaction/models/multimodal/vindlu/temporal_model.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/multimodal/vindlu/temporal_model.py | Apache-2.0 |
def __init__(self, input_dim=768, droppath_rate=0.1):
"""
Kwargs:
input_dim (int): The input feature dimension.
"""
super().__init__()
self._input_dim = input_dim
self.temporal_attn = MultiheadAttention(
input_dim, num_heads=input_dim // 64)
self.norm = LayerNorm(input_dim, eps=1e-12)
self.linear = Linear(input_dim, input_dim)
self.droppath = DropPath(droppath_rate)
self.scale = nn.parameter.Parameter(torch.zeros([])) |
Kwargs:
input_dim (int): The input feature dimension.
| __init__ | python | open-mmlab/mmaction2 | mmaction/models/multimodal/vindlu/temporal_model.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/multimodal/vindlu/temporal_model.py | Apache-2.0 |
def forward(self, x: torch.Tensor):
"""forward.
Args:
x (torch.Tensor): input features.
Shape: [bs, nframes, l, c]. l = 1 + h*w
Returns: features after adapter. The same shape as input.
"""
if x.shape[1] == 1: # for single frame, return itself.
return x
shortcut = x
x = einops.rearrange(x, 'b t l c -> t (b l) c')
x = self.norm(x)
x = self.temporal_attn(x, x, x)[0]
x = einops.rearrange(x, 't (b l) c -> b t l c', b=shortcut.shape[0])
return shortcut + self.scale * self.droppath(x) | forward.
Args:
x (torch.Tensor): input features.
Shape: [bs, nframes, l, c]. l = 1 + h*w
Returns: features after adapter. The same shape as input.
| forward | python | open-mmlab/mmaction2 | mmaction/models/multimodal/vindlu/temporal_model.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/multimodal/vindlu/temporal_model.py | Apache-2.0 |
def __init__(self, input_dim=768, droppath_rate=0.1, window_size=(2, 2)):
"""
Kwargs:
input_dim (int): The input feature dimension.
"""
super().__init__()
self._input_dim = input_dim
self.temporal_attn = MultiheadAttention(
input_dim, num_heads=input_dim // 64)
self.norm = LayerNorm(input_dim, eps=1e-12)
self.droppath = DropPath(droppath_rate)
self.scale = nn.parameter.Parameter(torch.zeros([]))
self.wh, self.ww = window_size |
Kwargs:
input_dim (int): The input feature dimension.
| __init__ | python | open-mmlab/mmaction2 | mmaction/models/multimodal/vindlu/temporal_model.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/multimodal/vindlu/temporal_model.py | Apache-2.0 |
def forward(self, x: torch.Tensor):
"""forward.
Args:
x (torch.Tensor): input features.
Shape: [bs, nframes, l, c]. l = 1 + h*w
Returns: features after adapter. The same shape as input.
"""
if x.shape[1] == 1: # for single frame, return itself.
return x
shortcut = x
h = w = int(math.sqrt(x.shape[2] - 1))
cls_token = x[:, :, :1, :]
x = einops.rearrange(
x[:, :, 1:, :],
'b t (nh wh nw ww) c -> (t wh ww) (b nh nw) c',
nh=h // self.wh,
wh=self.wh,
nw=w // self.ww,
ww=self.ww,
)
x = self.norm(x)
x = self.temporal_attn(x, x, x)[0]
x = einops.rearrange(
x,
'(t wh ww) (b nh nw) c -> b t (nh wh nw ww) c',
wh=self.wh,
ww=self.ww,
nh=h // self.wh,
nw=w // self.ww,
)
# add back cls token.
x = torch.concat([cls_token, x], dim=2)
return shortcut + self.scale * self.droppath(x) | forward.
Args:
x (torch.Tensor): input features.
Shape: [bs, nframes, l, c]. l = 1 + h*w
Returns: features after adapter. The same shape as input.
| forward | python | open-mmlab/mmaction2 | mmaction/models/multimodal/vindlu/temporal_model.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/multimodal/vindlu/temporal_model.py | Apache-2.0 |
def __init__(self, input_dim=768, droppath_rate=0.1, num_prompts=1):
"""
Kwargs:
input_dim (int): The input feature dimension.
"""
super().__init__()
d_model = input_dim
self.message_fc = nn.Linear(d_model, d_model)
self.message_ln = LayerNorm(d_model, eps=1e-12)
self.message_attn = nn.MultiheadAttention(d_model, d_model // 64)
self.num_prompts = num_prompts
self.droppath = DropPath(droppath_rate) |
Kwargs:
input_dim (int): The input feature dimension.
| __init__ | python | open-mmlab/mmaction2 | mmaction/models/multimodal/vindlu/temporal_model.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/multimodal/vindlu/temporal_model.py | Apache-2.0 |
def forward(self, x: torch.Tensor):
"""forward.
Args:
x (torch.Tensor): input features.
Shape: [bs, nframes, l, c]. l = 1 + h*w
Returns: features after adapter. The same shape as input.
"""
if x.shape[1] == 1: # for single frame, return itself.
return x
msg_token = self.message_ln(self.message_fc(x[:, :,
0, :])) # [b, t, c]
msg_token = rearrange(msg_token, 'b t c -> t b c')
msg_token = msg_token + self.droppath(
self.message_attn(msg_token, msg_token, msg_token)[0])
msg_token = rearrange(msg_token, 't b c -> b t c')
# replace the last prompt token with msg_token.
x = torch.cat([x[:, :, :-1, :],
msg_token.unsqueeze(2)], dim=2) # [b, t, l+1, c]
return x | forward.
Args:
x (torch.Tensor): input features.
Shape: [bs, nframes, l, c]. l = 1 + h*w
Returns: features after adapter. The same shape as input.
| forward | python | open-mmlab/mmaction2 | mmaction/models/multimodal/vindlu/temporal_model.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/multimodal/vindlu/temporal_model.py | Apache-2.0 |
def build_inputs_with_special_tokens(
self,
token_ids_0: List[int],
token_ids_1: Optional[List[int]] = None) -> List[int]:
"""Build model inputs from a sequence or a pair of sequence for
sequence classification tasks by concatenating and adding special
tokens. A BERT sequence has the following format:
- single sequence: `[CLS] X`
- pair of sequences: `[CLS] A [SEP] B [SEP]`
Args:
token_ids_0 (`List[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: List of [input IDs](../glossary#input-ids) with
the appropriate special tokens.
"""
if token_ids_1 is None:
return [self.cls_token_id] + token_ids_0
cls = [self.cls_token_id]
sep = [self.sep_token_id]
return cls + token_ids_0 + sep + token_ids_1 + sep | Build model inputs from a sequence or a pair of sequence for
sequence classification tasks by concatenating and adding special
tokens. A BERT sequence has the following format:
- single sequence: `[CLS] X`
- pair of sequences: `[CLS] A [SEP] B [SEP]`
Args:
token_ids_0 (`List[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: List of [input IDs](../glossary#input-ids) with
the appropriate special tokens.
| build_inputs_with_special_tokens | python | open-mmlab/mmaction2 | mmaction/models/multimodal/vindlu/tokenizer.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/multimodal/vindlu/tokenizer.py | Apache-2.0 |
def all_gather_concat(data: torch.Tensor) -> torch.Tensor:
"""Gather tensors with different first-dimension size and concat to one
tenosr.
Note:
Only the first dimension should be different.
Args:
data (Tensor): Tensor to be gathered.
Returns:
torch.Tensor: The concatenated tenosr.
"""
if dist.get_world_size() == 1:
return data
data_size = torch.tensor(data.size(0), device=data.device)
sizes_list = dist.all_gather(data_size)
total_length = sum(sizes_list)
max_length = max(sizes_list)
size_diff = max_length.item() - data_size.item()
if size_diff:
padding = torch.zeros(
size_diff, *data.size()[1:], device=data.device, dtype=data.dtype)
data = torch.cat((data, padding))
gather_list = dist.all_gather(data)
# gather all data according to the default DDP sampler. For instance,
# 8 samples on 2 GPUs, GPU0: [0,2,4,6], GPU1: [1,3,5,7], will be gathered
# as [0,1,2,3,4,5,6,7]
all_data = []
for gather_batch in zip(*gather_list):
all_data.extend(gather_batch)
return torch.stack(all_data)[:total_length] | Gather tensors with different first-dimension size and concat to one
tenosr.
Note:
Only the first dimension should be different.
Args:
data (Tensor): Tensor to be gathered.
Returns:
torch.Tensor: The concatenated tenosr.
| all_gather_concat | python | open-mmlab/mmaction2 | mmaction/models/multimodal/vindlu/utils.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/multimodal/vindlu/utils.py | Apache-2.0 |
def interpolate_pos_embed_beit(state_dict, new_model):
"""interpolate the positional embeddings. The spatial pe is relative and
temporal pe is absolute. additional temporal pe is padded with 0.
Args:
state_dict (dict): The state_dict.
new_model (nn.Module): The created model.
Returns: dict. The state_dict with updated positional embeddings.
"""
state_dict = interpolate_pos_relative_bias_beit(
state_dict_old=state_dict,
state_dict_new=new_model.state_dict(),
patch_shape_new=new_model.vision_encoder.embeddings.patch_embeddings.
patch_shape,
)
# absolute temporal pos bias
temporal_pe_key = 'vision_encoder.embeddings.temporal_position_embeddings'
if temporal_pe_key in state_dict:
logger = MMLogger.get_current_instance()
logger.info(
f'interpolate temporal positional embeddings: {temporal_pe_key}')
state_dict[temporal_pe_key] = load_temp_embed_with_mismatch(
temp_embed_old=state_dict[temporal_pe_key],
temp_embed_new=new_model.state_dict()[temporal_pe_key],
)
return state_dict | interpolate the positional embeddings. The spatial pe is relative and
temporal pe is absolute. additional temporal pe is padded with 0.
Args:
state_dict (dict): The state_dict.
new_model (nn.Module): The created model.
Returns: dict. The state_dict with updated positional embeddings.
| interpolate_pos_embed_beit | python | open-mmlab/mmaction2 | mmaction/models/multimodal/vindlu/utils.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/multimodal/vindlu/utils.py | Apache-2.0 |
def load_temp_embed_with_mismatch(temp_embed_old,
temp_embed_new,
add_zero=True):
"""Add/Remove extra temporal_embeddings as needed.
https://arxiv.org/abs/2104.00650 shows adding zero paddings works.
temp_embed_old: (1, num_frames_old, 1, d)
temp_embed_new: (1, num_frames_new, 1, d)
add_zero: bool, if True, add zero, else, interpolate trained embeddings.
"""
# TODO zero pad
num_frms_new = temp_embed_new.shape[1]
num_frms_old = temp_embed_old.shape[1]
logger = MMLogger.get_current_instance()
logger.info(
f'Load temporal_embeddings, lengths: {num_frms_old}-->{num_frms_new}')
if num_frms_new > num_frms_old:
if add_zero:
temp_embed_new[:, :num_frms_old] \
= temp_embed_old # untrained embeddings are zeros.
else:
temp_embed_new = interpolate_temporal_pos_embed(
temp_embed_old, num_frms_new)
elif num_frms_new < num_frms_old:
temp_embed_new = temp_embed_old[:, :num_frms_new]
else: # =
temp_embed_new = temp_embed_old
return temp_embed_new | Add/Remove extra temporal_embeddings as needed.
https://arxiv.org/abs/2104.00650 shows adding zero paddings works.
temp_embed_old: (1, num_frames_old, 1, d)
temp_embed_new: (1, num_frames_new, 1, d)
add_zero: bool, if True, add zero, else, interpolate trained embeddings.
| load_temp_embed_with_mismatch | python | open-mmlab/mmaction2 | mmaction/models/multimodal/vindlu/utils.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/multimodal/vindlu/utils.py | Apache-2.0 |
def interpolate_pos_relative_bias_beit(state_dict_old, state_dict_new,
patch_shape_new):
"""
Args:
state_dict_old: loaded state dict
state_dict_new: state dict for model with new image size
patch_shape_new: new model patch_shape
ref: https://github.com/microsoft/unilm/blob/master/beit/run_class_finetuning.py # noqa: E501
"""
all_keys = list(state_dict_old.keys())
for key in all_keys:
if 'relative_position_index' in key:
state_dict_old.pop(key)
if 'relative_position_bias_table' in key:
rel_pos_bias = state_dict_old[key]
src_num_pos, num_attn_heads = rel_pos_bias.size()
dst_num_pos, _ = state_dict_new[key].size()
dst_patch_shape = patch_shape_new
if dst_patch_shape[0] != dst_patch_shape[1]:
raise NotImplementedError()
num_extra_tokens = dst_num_pos - (dst_patch_shape[0] * 2 - 1) * (
dst_patch_shape[1] * 2 - 1)
src_size = int((src_num_pos - num_extra_tokens)**0.5)
dst_size = int((dst_num_pos - num_extra_tokens)**0.5)
if src_size != dst_size:
extra_tokens = rel_pos_bias[-num_extra_tokens:, :]
rel_pos_bias = rel_pos_bias[:-num_extra_tokens, :]
def geometric_progression(a, r, n):
return a * (1.0 - r**n) / (1.0 - r)
left, right = 1.01, 1.5
while right - left > 1e-6:
q = (left + right) / 2.0
gp = geometric_progression(1, q, src_size // 2)
if gp > dst_size // 2:
right = q
else:
left = q
dis = []
cur = 1
for i in range(src_size // 2):
dis.append(cur)
cur += q**(i + 1)
r_ids = [-_ for _ in reversed(dis)]
x = r_ids + [0] + dis
y = r_ids + [0] + dis
t = dst_size // 2.0
dx = np.arange(-t, t + 0.1, 1.0)
dy = np.arange(-t, t + 0.1, 1.0)
all_rel_pos_bias = []
for i in range(num_attn_heads):
z = rel_pos_bias[:, i].view(src_size,
src_size).float().numpy()
f = interpolate.interp2d(x, y, z, kind='cubic')
all_rel_pos_bias.append(
torch.Tensor(f(dx, dy)).contiguous().view(-1, 1).to(
rel_pos_bias.device))
rel_pos_bias = torch.cat(all_rel_pos_bias, dim=-1)
new_rel_pos_bias = torch.cat((rel_pos_bias, extra_tokens),
dim=0)
state_dict_old[key] = new_rel_pos_bias
return state_dict_old |
Args:
state_dict_old: loaded state dict
state_dict_new: state dict for model with new image size
patch_shape_new: new model patch_shape
ref: https://github.com/microsoft/unilm/blob/master/beit/run_class_finetuning.py # noqa: E501
| interpolate_pos_relative_bias_beit | python | open-mmlab/mmaction2 | mmaction/models/multimodal/vindlu/utils.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/multimodal/vindlu/utils.py | Apache-2.0 |
def loss(self, inputs: torch.Tensor, data_samples: SampleList,
**kwargs) -> dict:
"""Calculate losses from a batch of inputs and data samples.""" | Calculate losses from a batch of inputs and data samples. | loss | python | open-mmlab/mmaction2 | mmaction/models/multimodal/vindlu/vindlu.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/multimodal/vindlu/vindlu.py | Apache-2.0 |
def forward(self, inputs, data_samples, mode: str = 'loss'):
"""The unified entry for a forward process in both training and test.
The method should accept three modes:
- ``tensor``: Forward the whole network and return tensor or tuple of
tensor without any post-processing, same as a common nn.Module.
- ``predict``: Forward and return the predictions, which are fully
processed to a list of :obj:`ActionDataSample`.
- ``loss``: Forward and return a dict of losses according to the given
inputs and data samples.
Note that this method doesn't handle neither back propagation nor
optimizer updating, which are done in the :meth:`train_step`.
Args:
inputs (torch.Tensor): The input tensor with shape
(N, C, ...) in general.
data_samples (List[``ActionDataSample], optional): The
annotation data of every samples. Defaults to None.
mode (str): Return what kind of value. Defaults to ``tensor``.
Returns:
The return type depends on ``mode``.
- If ``mode="tensor"``, return a tensor or a tuple of tensor.
- If ``mode="predict"``, return a list of ``ActionDataSample``.
- If ``mode="loss"``, return a dict of tensor.
"""
if mode == 'tensor':
return self.extract_feat(inputs, data_samples)
elif mode == 'loss':
return self.loss(inputs, data_samples)
elif mode == 'predict':
return self.predict(inputs, data_samples)
else:
raise RuntimeError(f'Invalid mode "{mode}".') | The unified entry for a forward process in both training and test.
The method should accept three modes:
- ``tensor``: Forward the whole network and return tensor or tuple of
tensor without any post-processing, same as a common nn.Module.
- ``predict``: Forward and return the predictions, which are fully
processed to a list of :obj:`ActionDataSample`.
- ``loss``: Forward and return a dict of losses according to the given
inputs and data samples.
Note that this method doesn't handle neither back propagation nor
optimizer updating, which are done in the :meth:`train_step`.
Args:
inputs (torch.Tensor): The input tensor with shape
(N, C, ...) in general.
data_samples (List[``ActionDataSample], optional): The
annotation data of every samples. Defaults to None.
mode (str): Return what kind of value. Defaults to ``tensor``.
Returns:
The return type depends on ``mode``.
- If ``mode="tensor"``, return a tensor or a tuple of tensor.
- If ``mode="predict"``, return a list of ``ActionDataSample``.
- If ``mode="loss"``, return a dict of tensor.
| forward | python | open-mmlab/mmaction2 | mmaction/models/multimodal/vindlu/vindlu.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/multimodal/vindlu/vindlu.py | Apache-2.0 |
def encode_vision(self, image):
"""encode image / videos as features.
Args:
image (torch.Tensor): The input images.
Returns: tuple.
- vision_embeds (torch.Tensor): The features of all patches.
Shape: [B,T,L,C].
- pooled_vision_embeds (torch.Tensor): The pooled features.
Shape: [B,T,C].
"""
output_dict = self.vision_encoder(image)
vision_embeds = self.vision_layernorm(output_dict.last_hidden_state)
pooled_vision_embeds = output_dict.pooler_output
return vision_embeds, pooled_vision_embeds | encode image / videos as features.
Args:
image (torch.Tensor): The input images.
Returns: tuple.
- vision_embeds (torch.Tensor): The features of all patches.
Shape: [B,T,L,C].
- pooled_vision_embeds (torch.Tensor): The pooled features.
Shape: [B,T,C].
| encode_vision | python | open-mmlab/mmaction2 | mmaction/models/multimodal/vindlu/vindlu.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/multimodal/vindlu/vindlu.py | Apache-2.0 |
def encode_text(self, text):
"""encode text.
Args:
text (dict): The output of huggingface's `PreTrainedTokenizer`.
contains keys:
- input_ids (torch.Tensor): Token ids to be fed to a model.
Shape: [B,L].
- attention_mask (torch.Tensor): The mask indicate padded tokens.
Shape: [B,L]. 0 is padded token.
- other keys refer to "https://huggingface.co/docs/transformers/v4.21.2/en/main_classes/tokenizer#transformers.PreTrainedTokenizer.__call__". # noqa: E501
Returns: tuple.
- text_embeds (torch.Tensor): The features of all tokens. Shape: [B,L,C].
- pooled_text_embeds (torch.Tensor): The pooled features. Shape: [B,C].
"""
text_output = self.text_encoder(
text.input_ids,
attention_mask=text.attention_mask,
return_dict=True,
mode='text',
)
text_embeds = text_output.last_hidden_state
pooled_text_embeds = text_embeds[:, 0]
return text_embeds, pooled_text_embeds | encode text.
Args:
text (dict): The output of huggingface's `PreTrainedTokenizer`.
contains keys:
- input_ids (torch.Tensor): Token ids to be fed to a model.
Shape: [B,L].
- attention_mask (torch.Tensor): The mask indicate padded tokens.
Shape: [B,L]. 0 is padded token.
- other keys refer to "https://huggingface.co/docs/transformers/v4.21.2/en/main_classes/tokenizer#transformers.PreTrainedTokenizer.__call__". # noqa: E501
Returns: tuple.
- text_embeds (torch.Tensor): The features of all tokens. Shape: [B,L,C].
- pooled_text_embeds (torch.Tensor): The pooled features. Shape: [B,C].
| encode_text | python | open-mmlab/mmaction2 | mmaction/models/multimodal/vindlu/vindlu.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/multimodal/vindlu/vindlu.py | Apache-2.0 |
def loss(
self,
inputs: torch.Tensor,
data_samples: Optional[List[ActionDataSample]] = None,
) -> Dict[str, torch.tensor]:
"""Calculate losses from a batch of inputs and data samples.
Args:
inputs (dict): A batch of inputs. The input tensor with of
at least one modality. For image, the value is a tensor
of shape (N, C, ...) in general.
For text, the value is a dict of tokenized text inputs.
data_samples (Optional[List[DataSample]]):
The annotation data of every samples. Defaults to None.
Returns:
Dict[str, torch.tensor]: a dictionary of loss components of
"""
output = self.extract_feat(inputs, data_samples)
text_embeds = output['text_embeds']
text_attn_mask = output['text_attn_mask']
image_embeds = output['image_embeds']
image_feat = output['image_feat']
text_feat = output['text_feat']
image_atts = torch.ones(
image_embeds.size()[:-1], dtype=torch.long).to(self.device)
# ITC Loss
# B*world_size, D
image_feat_all = torch.cat(dist.all_gather(image_feat))
# B*world_size, D
text_feat_all = torch.cat(dist.all_gather(text_feat))
# image to text similarity
# B, B*world_size
sim_i2t = torch.einsum('mld,nd->mln', image_feat,
text_feat_all).mean(1) / self.temp
# text-image similarity
# B, B*world_size
sim_t2i = torch.einsum('md,nld->mln', text_feat,
image_feat_all).mean(1) / self.temp
rank = dist.get_rank()
bs = inputs.size(0)
itc_targets = torch.linspace(
rank * bs, rank * bs + bs - 1, bs, dtype=int).to(self.device)
itc_loss = (F.cross_entropy(sim_i2t, itc_targets) +
F.cross_entropy(sim_t2i, itc_targets)) / 2
# prepare for itm
output_pos = self.text_encoder(
encoder_embeds=text_embeds,
attention_mask=text_attn_mask,
encoder_hidden_states=image_embeds,
encoder_attention_mask=image_atts,
return_dict=True,
mode='fusion',
)
idx = torch.tensor([i.gt_video_id for i in data_samples]).view(-1, 1)
bs = idx.size(0)
if self.negative_all_rank:
idxs = torch.cat(dist.all_gather(idx))
image_feat_world = torch.cat(dist.all_gather(image_feat))
text_feat_world = torch.cat(dist.all_gather(text_feat))
att_mask_world = torch.cat(dist.all_gather(text_attn_mask))
text_embeds_world = torch.cat(all_gather_with_grad(text_embeds))
image_embeds_world = torch.cat(all_gather_with_grad(image_embeds))
else:
idxs = idx
image_feat_world = image_feat.detach()
text_feat_world = text_feat.detach()
image_embeds_world = image_embeds
text_embeds_world = text_embeds
att_mask_world = text_attn_mask
with torch.no_grad():
# compute sample similarity
sim_i2t = torch.einsum('mld,nd->mln', image_feat,
text_feat_world).mean(1) / self.temp
sim_t2i = torch.einsum('md,nld->mln', text_feat,
image_feat_world).mean(1) / self.temp
mask = torch.eq(idx, idxs.t()).to(self.device)
weights_i2t = F.softmax(sim_i2t + 1e-4, dim=1)
weights_i2t.masked_fill_(mask, 0)
weights_t2i = F.softmax(sim_t2i + 1e-4, dim=1)
weights_t2i.masked_fill_(mask, 0)
# select a negative image for each text
neg_idx = torch.multinomial(weights_t2i, 1).squeeze()
image_embeds_neg = image_embeds_world[neg_idx]
# select a negative text for each image
neg_idx = torch.multinomial(weights_i2t, 1).squeeze()
text_embeds_neg = text_embeds_world[neg_idx]
text_atts_neg = att_mask_world[neg_idx]
text_embeds_all = torch.cat([text_embeds, text_embeds_neg], dim=0)
text_atts_all = torch.cat([text_attn_mask, text_atts_neg], dim=0)
image_embeds_all = torch.cat([image_embeds_neg, image_embeds], dim=0)
image_atts_all = torch.cat([image_atts, image_atts], dim=0)
output_neg = self.text_encoder(
encoder_embeds=text_embeds_all,
attention_mask=text_atts_all,
encoder_hidden_states=image_embeds_all,
encoder_attention_mask=image_atts_all,
return_dict=True,
mode='fusion',
)
vl_embeddings = torch.cat(
[
output_pos.last_hidden_state[:, 0, :],
output_neg.last_hidden_state[:, 0, :],
],
dim=0,
)
itm_targets = torch.ones((3 * bs, ),
dtype=torch.long,
device=inputs.device)
itm_targets[bs:] = 0
itm_logit = self.itm_head(vl_embeddings)
itm_loss = F.cross_entropy(itm_logit, itm_targets)
return dict(itc_loss=itc_loss, itm_loss=itm_loss) | Calculate losses from a batch of inputs and data samples.
Args:
inputs (dict): A batch of inputs. The input tensor with of
at least one modality. For image, the value is a tensor
of shape (N, C, ...) in general.
For text, the value is a dict of tokenized text inputs.
data_samples (Optional[List[DataSample]]):
The annotation data of every samples. Defaults to None.
Returns:
Dict[str, torch.tensor]: a dictionary of loss components of
| loss | python | open-mmlab/mmaction2 | mmaction/models/multimodal/vindlu/vindlu_ret.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/multimodal/vindlu/vindlu_ret.py | Apache-2.0 |
def extract_feat(
self,
images: torch.Tensor = None,
data_samples: List[ActionDataSample] = None,
return_texts=True,
) -> Dict[str, torch.Tensor]:
"""Extract features from the input dict.
Args:
images (tensor, optional): The images to extract features.
Defaults to None.
data_samples (list, optional): The data samples containing texts
to extract features. Defaults to None.
return_texts (bool): Whether to return the tokenized text and the
corresponding attention masks. Defaults to True.
Returns:
Tuple[torch.Tensor]: The output features.
If multimodal_backbone is not exist, tuple of torch.Tensor
will be returned.
"""
if data_samples is not None:
texts = self.preprocess_text(data_samples)
else:
texts = None
assert images is not None or texts is not None, \
'At least single modality should be passed as inputs.'
results = {}
if texts is not None and return_texts:
results.update({
'text_ids': texts.input_ids,
'text_attn_mask': texts.attention_mask,
})
# extract image features
if images is not None:
image_embeds, pooled_image_embeds = self.encode_vision(images)
# concat temporal embeds
image_embeds = rearrange(image_embeds,
'b t l c -> b (t l) c').contiguous()
results['image_embeds'] = image_embeds
results['image_feat'] = F.normalize(
self.vision_proj(pooled_image_embeds), dim=-1)
# extract text features
if texts is not None:
texts_output = self.text_encoder(
texts.input_ids,
attention_mask=texts.attention_mask,
return_dict=True,
mode='text')
text_embeds = texts_output.last_hidden_state
pooled_text_feat = text_embeds[:, 0]
results['text_embeds'] = text_embeds
results['text_feat'] = F.normalize(
self.text_proj(pooled_text_feat), dim=-1)
return results | Extract features from the input dict.
Args:
images (tensor, optional): The images to extract features.
Defaults to None.
data_samples (list, optional): The data samples containing texts
to extract features. Defaults to None.
return_texts (bool): Whether to return the tokenized text and the
corresponding attention masks. Defaults to True.
Returns:
Tuple[torch.Tensor]: The output features.
If multimodal_backbone is not exist, tuple of torch.Tensor
will be returned.
| extract_feat | python | open-mmlab/mmaction2 | mmaction/models/multimodal/vindlu/vindlu_ret.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/multimodal/vindlu/vindlu_ret.py | Apache-2.0 |
def compute_score_matrix_i2t(self, img_feats, img_embeds, text_feats,
text_embeds, text_atts):
"""Compare the score matrix for image-to-text retrieval. Every image
should compare to all the text features.
Args:
img_feats (torch.Tensor): The input img feats tensor with shape
(M, C). M stands for numbers of samples on a single GPU.
img_embeds (torch.Tensor): The input img embeds tensor with shape
(M, C). M stands for numbers of samples on a single GPU.
text_feats (torch.Tensor): The input text feats tensor with shape
(N, C). N stands for numbers of all samples on all GPUs.
text_embeds (torch.Tensor): The input tensor with shape (N, C).
text_atts (torch.Tensor): The input tensor with shape (N, C).
Returns:
torch.Tensor: Score matrix of image-to-text retrieval.
"""
# compute i2t sim matrix
sim_matrix_i2t = torch.einsum('mld,nd->mln', img_feats,
text_feats).mean(1)
if self.fast_match:
return sim_matrix_i2t
score_matrix_i2t = torch.full((img_feats.size(0), text_feats.size(0)),
-100.0).to(self.device)
for i in track_on_main_process(
range(img_feats.size(0)), 'Compute I2T scores...'):
sims = sim_matrix_i2t[i]
topk_sim, topk_idx = sims.topk(k=self.topk, dim=0)
topk_bz = 32
encoder_output = img_embeds[i].repeat(topk_bz, 1, 1)
encoder_att = torch.ones(
encoder_output.size()[:-1], dtype=torch.long).to(self.device)
for j in range(0, self.topk // topk_bz):
batch_topk = topk_idx[j * topk_bz:(j + 1) * topk_bz]
output = self.text_encoder(
encoder_embeds=text_embeds[batch_topk],
attention_mask=text_atts[batch_topk],
encoder_hidden_states=encoder_output,
encoder_attention_mask=encoder_att,
return_dict=True,
mode='fusion')
score = self.itm_head(output.last_hidden_state[:, 0, :])[:, 1]
score_matrix_i2t[i, batch_topk] = score
return score_matrix_i2t | Compare the score matrix for image-to-text retrieval. Every image
should compare to all the text features.
Args:
img_feats (torch.Tensor): The input img feats tensor with shape
(M, C). M stands for numbers of samples on a single GPU.
img_embeds (torch.Tensor): The input img embeds tensor with shape
(M, C). M stands for numbers of samples on a single GPU.
text_feats (torch.Tensor): The input text feats tensor with shape
(N, C). N stands for numbers of all samples on all GPUs.
text_embeds (torch.Tensor): The input tensor with shape (N, C).
text_atts (torch.Tensor): The input tensor with shape (N, C).
Returns:
torch.Tensor: Score matrix of image-to-text retrieval.
| compute_score_matrix_i2t | python | open-mmlab/mmaction2 | mmaction/models/multimodal/vindlu/vindlu_ret.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/multimodal/vindlu/vindlu_ret.py | Apache-2.0 |
def compute_score_matrix_t2i(self, img_feats, img_embeds, text_feats,
text_embeds, text_atts):
"""Compare the score matrix for text-to-image retrieval. Every text
should compare to all the image features.
Args:
img_feats (torch.Tensor): The input img feats tensor with shape
(M, C). M stands for numbers of samples on a single GPU.
img_embeds (torch.Tensor): The input img embeds tensor with shape
(M, C). M stands for numbers of samples on a single GPU.
text_feats (torch.Tensor): The input text feats tensor with shape
(N, C). N stands for numbers of all samples on all GPUs.
text_embeds (torch.Tensor): The input tensor with shape (M, C).
text_atts (torch.Tensor): The input tensor with shape (M, C).
Returns:
torch.Tensor: Score matrix of text-to-image retrieval.
"""
# compute t2i sim matrix
sim_matrix_t2i = torch.einsum('md,nld->mln', text_feats,
img_feats).mean(1)
if self.fast_match:
return sim_matrix_t2i
score_matrix_t2i = torch.full((text_feats.size(0), img_feats.size(0)),
-100.0).to(self.device)
for i in track_on_main_process(
range(text_feats.size(0)), 'Compute T2I scores...'):
sims = sim_matrix_t2i[i]
topk_sim, topk_idx = sims.topk(k=self.topk, dim=0)
topk_bz = 32
for j in range(0, self.topk // topk_bz):
batch_topk = topk_idx[j * topk_bz:(j + 1) * topk_bz]
encoder_output = img_embeds[batch_topk]
encoder_att = torch.ones(
encoder_output.size()[:-1],
dtype=torch.long).to(self.device)
output = self.text_encoder(
encoder_embeds=text_embeds[i].repeat(topk_bz, 1, 1),
attention_mask=text_atts[i].repeat(topk_bz, 1),
encoder_hidden_states=encoder_output,
encoder_attention_mask=encoder_att,
return_dict=True,
mode='fusion')
score = self.itm_head(output.last_hidden_state[:, 0, :])[:, 1]
score_matrix_t2i[i, batch_topk] = score
return score_matrix_t2i | Compare the score matrix for text-to-image retrieval. Every text
should compare to all the image features.
Args:
img_feats (torch.Tensor): The input img feats tensor with shape
(M, C). M stands for numbers of samples on a single GPU.
img_embeds (torch.Tensor): The input img embeds tensor with shape
(M, C). M stands for numbers of samples on a single GPU.
text_feats (torch.Tensor): The input text feats tensor with shape
(N, C). N stands for numbers of all samples on all GPUs.
text_embeds (torch.Tensor): The input tensor with shape (M, C).
text_atts (torch.Tensor): The input tensor with shape (M, C).
Returns:
torch.Tensor: Score matrix of text-to-image retrieval.
| compute_score_matrix_t2i | python | open-mmlab/mmaction2 | mmaction/models/multimodal/vindlu/vindlu_ret.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/multimodal/vindlu/vindlu_ret.py | Apache-2.0 |
def _get_predictions(self,
result: torch.Tensor,
data_samples: List[ActionDataSample],
mode: str = 'i2t'):
"""Post-process the output of retriever.
Args:
result (torch.Tensor): Score matrix of single retrieve,
either from image or text.
data_samples (List[ActionDataSample], optional): The annotation
data of every samples.
mode (str): Retrieve mode, either `i2t` for image to text, or `t2i`
text to image. Defaults to `i2t`.
Returns:
List[ActionDataSample]: the raw data_samples with
the predicted results.
"""
# create data sample if not exists
if data_samples is None:
data_samples = [ActionDataSample() for _ in range(result.size(0))]
elif mode == 't2i':
# Process data samples to align with the num of texts.
new_data_samples = []
for sample in data_samples:
if isinstance(sample.text, (list, tuple)):
texts = sample.text
else:
texts = [sample.text]
for i, text in enumerate(texts):
new_sample = ActionDataSample(text=text)
if 'gt_video_id' in sample:
new_sample.gt_label = sample.gt_video_id[i]
new_data_samples.append(new_sample)
assert len(new_data_samples) == result.size(0)
data_samples = new_data_samples
elif mode == 'i2t':
for sample in data_samples:
if 'gt_text_id' in sample:
sample.gt_label = sample.gt_text_id
else:
raise ValueError(f'Type {mode} is not supported.')
for data_sample, score in zip(data_samples, result):
idx = score.argmax(keepdim=True).detach()
data_sample.set_pred_score(score)
data_sample.set_pred_label(idx)
return data_samples | Post-process the output of retriever.
Args:
result (torch.Tensor): Score matrix of single retrieve,
either from image or text.
data_samples (List[ActionDataSample], optional): The annotation
data of every samples.
mode (str): Retrieve mode, either `i2t` for image to text, or `t2i`
text to image. Defaults to `i2t`.
Returns:
List[ActionDataSample]: the raw data_samples with
the predicted results.
| _get_predictions | python | open-mmlab/mmaction2 | mmaction/models/multimodal/vindlu/vindlu_ret.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/multimodal/vindlu/vindlu_ret.py | Apache-2.0 |
def predict(self, inputs, data_samples, **kwargs):
"""Predict captions from a batch of inputs.
Args:
images (torch.Tensor): The input images tensor with shape
(N, C, ...) in general.
data_samples (List[DataSample], optional): The annotation
data of every samples. Defaults to None.
**kwargs: Other keyword arguments accepted by the ``predict``
Returns:
List[ActionDataSample]: Return list of data samples.
"""
num_options_per_q = len(data_samples[0].caption_options)
for sample in data_samples:
sample.text = sample.caption_options
output = self.extract_feat(inputs, data_samples)
text_embeds = output['text_embeds']
text_attn_mask = output['text_attn_mask']
image_embeds = output['image_embeds']
image_feat = output['image_feat']
text_feat = output['text_feat']
# compute similarity between vision feat and caption feat
text_feat = rearrange(
text_feat, '(b n) c -> b c n', n=num_options_per_q)
sim = torch.matmul(image_feat.mean(1, keepdim=True),
text_feat).squeeze(1) / self.temp
sim = F.softmax(sim, dim=1).flatten()
# cross-modal encode
encoder_output = image_embeds.repeat_interleave(
num_options_per_q, dim=0)
image_atts = torch.ones(
encoder_output.size()[:-1], dtype=torch.long).to(inputs.device)
output = self.text_encoder(
encoder_embeds=text_embeds,
attention_mask=text_attn_mask,
encoder_hidden_states=encoder_output,
encoder_attention_mask=image_atts,
return_dict=True,
mode='fusion',
)
itm_embeds = output.last_hidden_state[:, 0] # [CLS]
itm_score = F.softmax(self.itm_head(itm_embeds), dim=1)[:, 1] # [bs*5]
score = itm_score * self.score_weight + sim * self.similarity_weight
pred_answers = score.view(-1, num_options_per_q).max(1)[1].cpu()
# assemble predictions
ensemble_scores = score.view(-1, num_options_per_q).cpu() # (bsz, 5)
out_data_samples = []
for data_sample, ensemble_score, pred_ans in \
zip(data_samples, ensemble_scores, pred_answers):
data_sample.pred_label = pred_ans.item()
data_sample.score = ensemble_score.numpy()
out_data_samples.append(data_sample)
return out_data_samples | Predict captions from a batch of inputs.
Args:
images (torch.Tensor): The input images tensor with shape
(N, C, ...) in general.
data_samples (List[DataSample], optional): The annotation
data of every samples. Defaults to None.
**kwargs: Other keyword arguments accepted by the ``predict``
Returns:
List[ActionDataSample]: Return list of data samples.
| predict | python | open-mmlab/mmaction2 | mmaction/models/multimodal/vindlu/vindlu_ret_mc.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/multimodal/vindlu/vindlu_ret_mc.py | Apache-2.0 |
def loss(self, inputs, data_samples):
"""Calculate losses from a batch of inputs and data samples.
Args:
inputs (dict): A batch of inputs. The input tensor with of
at least one modality. For image, the value is a tensor
of shape (N, C, ...) in general.
For text, the value is a dict of tokenized text inputs.
data_samples (Optional[List[DataSample]]):
The annotation data of every samples. Defaults to None.
Returns:
Dict[str, torch.tensor]: a dictionary of loss components of
"""
questions, question_output = self.forward_encoder(inputs, data_samples)
weights = torch.cat(
[torch.tensor(sample.gt_answer_weight) for sample in data_samples],
dim=0).to(inputs.device)
raw_answers = []
for sample in data_samples:
raw_answers.extend(sample.gt_answer)
answer_count = torch.tensor([
len(sample.gt_answer) for sample in data_samples
]).to(inputs.device)
answers = [a + ' ' + '[SEP]' for a in raw_answers]
answers = self.tokenizer(
answers,
padding='max_length',
truncation=True,
max_length=self.max_answer_len,
return_tensors='pt').to(inputs.device)
answer_targets = answers.input_ids.masked_fill(
answers.input_ids == self.tokenizer.pad_token_id, -100)
question_states = []
question_atts = []
for b, n in enumerate(answer_count):
question_states += [question_output.last_hidden_state[b]] * n
question_atts += [questions.attention_mask[b]] * n
question_states = torch.stack(question_states, 0).to(inputs.device)
question_atts = torch.stack(question_atts, 0).to(inputs.device)
answer_output = self.text_decoder(
answers.input_ids,
attention_mask=answers.attention_mask,
encoder_hidden_states=question_states,
encoder_attention_mask=question_atts,
labels=answer_targets,
return_dict=True,
reduction='none',
)
loss = weights * answer_output.loss
loss = loss.sum() / inputs.size(0)
return dict(loss=loss) | Calculate losses from a batch of inputs and data samples.
Args:
inputs (dict): A batch of inputs. The input tensor with of
at least one modality. For image, the value is a tensor
of shape (N, C, ...) in general.
For text, the value is a dict of tokenized text inputs.
data_samples (Optional[List[DataSample]]):
The annotation data of every samples. Defaults to None.
Returns:
Dict[str, torch.tensor]: a dictionary of loss components of
| loss | python | open-mmlab/mmaction2 | mmaction/models/multimodal/vindlu/vindlu_vqa.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/multimodal/vindlu/vindlu_vqa.py | Apache-2.0 |
def rank_answer(self, question_states, question_atts, answer_ids,
answer_atts, k):
"""
question_states: (bsz, Lq, d)
answer_ids: answer input id after tokenization, (#answers, La)
"""
num_ques = question_states.size(0)
start_ids = answer_ids[0, 0].repeat(num_ques, 1) # bos token
start_output = self.text_decoder(
start_ids,
encoder_hidden_states=question_states,
encoder_attention_mask=question_atts,
return_dict=True,
reduction='none',
)
logits = start_output.logits[:, 0, :] # first token's logit
# topk_probs: top-k probability
# topk_ids: [num_question, k]
answer_first_token = answer_ids[:, 1]
prob_first_token = F.softmax(
logits, dim=1).index_select(
dim=1, index=answer_first_token)
topk_probs, topk_ids = prob_first_token.topk(k, dim=1)
# answer input: [num_question*k, answer_len]
input_ids = []
input_atts = []
for b, topk_id in enumerate(topk_ids):
input_ids.append(answer_ids.index_select(dim=0, index=topk_id))
input_atts.append(answer_atts.index_select(dim=0, index=topk_id))
input_ids = torch.cat(input_ids, dim=0)
input_atts = torch.cat(input_atts, dim=0)
targets_ids = input_ids.masked_fill(
input_ids == self.tokenizer.pad_token_id, -100)
question_states = question_states.repeat_interleave(k, dim=0)
question_atts = question_atts.repeat_interleave(k, dim=0)
output = self.text_decoder(
input_ids,
attention_mask=input_atts,
encoder_hidden_states=question_states,
encoder_attention_mask=question_atts,
labels=targets_ids,
return_dict=True,
reduction='none',
)
answer_loss = output.loss
answer_loss = answer_loss.view(input_ids.size(0), -1)
# topk_prob: first token probability
topk_probs = topk_probs.view(-1, 1)
log_probs = torch.cat([topk_probs.log(), -answer_loss], dim=1)
# re-calculate log probabilities for the answer sequences
# using chain rule
log_probs_sum = log_probs.sum(1)
log_probs_sum = log_probs_sum.view(num_ques, k)
topk_probs = F.softmax(log_probs_sum, dim=-1)
# get top-k after re-ranking
topk_probs, rerank_id = topk_probs.topk(k, dim=1)
topk_ids = torch.gather(topk_ids, 1, rerank_id)
return topk_ids, topk_probs |
question_states: (bsz, Lq, d)
answer_ids: answer input id after tokenization, (#answers, La)
| rank_answer | python | open-mmlab/mmaction2 | mmaction/models/multimodal/vindlu/vindlu_vqa.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/multimodal/vindlu/vindlu_vqa.py | Apache-2.0 |
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Defines the computation performed at every call."""
if self.downsample_position == 'before':
x = self.pool(x)
x = self.conv(x)
else:
x = self.conv(x)
x = self.pool(x)
return x | Defines the computation performed at every call. | forward | python | open-mmlab/mmaction2 | mmaction/models/necks/tpn.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/necks/tpn.py | Apache-2.0 |
def forward(self, x: Tuple[torch.Tensor]) -> torch.Tensor:
"""Defines the computation performed at every call."""
out = [self.downsamples[i](feature) for i, feature in enumerate(x)]
out = torch.cat(out, 1)
out = self.fusion_conv(out)
return out | Defines the computation performed at every call. | forward | python | open-mmlab/mmaction2 | mmaction/models/necks/tpn.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/necks/tpn.py | Apache-2.0 |
def forward(self, x: Tuple[torch.Tensor]) -> list:
"""Defines the computation performed at every call."""
out = []
for i, _ in enumerate(x):
if isinstance(self.spatial_modulation[i], nn.ModuleList):
out_ = x[i]
for op in self.spatial_modulation[i]:
out_ = op(out_)
out.append(out_)
else:
out.append(self.spatial_modulation[i](x[i]))
return out | Defines the computation performed at every call. | forward | python | open-mmlab/mmaction2 | mmaction/models/necks/tpn.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/necks/tpn.py | Apache-2.0 |
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Defines the computation performed at every call."""
x = self.conv(x)
x = self.pool(x)
return x | Defines the computation performed at every call. | forward | python | open-mmlab/mmaction2 | mmaction/models/necks/tpn.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/necks/tpn.py | Apache-2.0 |
def init_weights(self) -> None:
"""Default init_weights for conv(msra) and norm in ConvModule."""
for m in self.modules():
if isinstance(m, nn.Conv3d):
xavier_init(m, distribution='uniform')
if isinstance(m, nn.BatchNorm3d):
constant_init(m, 1)
if self.aux_head is not None:
self.aux_head.init_weights() | Default init_weights for conv(msra) and norm in ConvModule. | init_weights | python | open-mmlab/mmaction2 | mmaction/models/necks/tpn.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/necks/tpn.py | Apache-2.0 |
def forward(self,
x: Tuple[torch.Tensor],
data_samples: Optional[SampleList] = None) -> tuple:
"""Defines the computation performed at every call."""
loss_aux = dict()
# Calculate auxiliary loss if `self.aux_head`
# and `data_samples` are not None.
if self.aux_head is not None and data_samples is not None:
loss_aux = self.aux_head.loss(x[-2], data_samples)
# Spatial Modulation
spatial_modulation_outs = self.spatial_modulation(x)
# Temporal Modulation
temporal_modulation_outs = []
for i, temporal_modulation in enumerate(self.temporal_modulation_ops):
temporal_modulation_outs.append(
temporal_modulation(spatial_modulation_outs[i]))
outs = [out.clone() for out in temporal_modulation_outs]
if len(self.upsample_ops) != 0:
for i in range(self.num_tpn_stages - 1, 0, -1):
outs[i - 1] = outs[i - 1] + self.upsample_ops[i - 1](outs[i])
# Get top-down outs
top_down_outs = self.level_fusion_1(outs)
# Build bottom-up flow using downsample operation
if self.flow_type == 'parallel':
outs = [out.clone() for out in temporal_modulation_outs]
if len(self.downsample_ops) != 0:
for i in range(self.num_tpn_stages - 1):
outs[i + 1] = outs[i + 1] + self.downsample_ops[i](outs[i])
# Get bottom-up outs
botton_up_outs = self.level_fusion_2(outs)
# fuse two pyramid outs
outs = self.pyramid_fusion(
torch.cat([top_down_outs, botton_up_outs], 1))
return outs, loss_aux | Defines the computation performed at every call. | forward | python | open-mmlab/mmaction2 | mmaction/models/necks/tpn.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/necks/tpn.py | Apache-2.0 |
def loss(self, inputs: torch.Tensor, data_samples: SampleList,
**kwargs) -> dict:
"""Calculate losses from a batch of inputs and data samples.
Args:
inputs (torch.Tensor): Raw Inputs of the recognizer.
These should usually be mean centered and std scaled.
data_samples (List[``ActionDataSample``]): The batch
data samples. It usually includes information such
as ``gt_label``.
Returns:
dict: A dictionary of loss components.
"""
feats, loss_kwargs = \
self.extract_feat(inputs,
data_samples=data_samples)
# loss_aux will be a empty dict if `self.with_neck` is False.
loss_aux = loss_kwargs.get('loss_aux', dict())
loss_cls = self.cls_head.loss(feats, data_samples, **loss_kwargs)
losses = merge_dict(loss_cls, loss_aux)
return losses | Calculate losses from a batch of inputs and data samples.
Args:
inputs (torch.Tensor): Raw Inputs of the recognizer.
These should usually be mean centered and std scaled.
data_samples (List[``ActionDataSample``]): The batch
data samples. It usually includes information such
as ``gt_label``.
Returns:
dict: A dictionary of loss components.
| loss | python | open-mmlab/mmaction2 | mmaction/models/recognizers/base.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/recognizers/base.py | Apache-2.0 |
def predict(self, inputs: torch.Tensor, data_samples: SampleList,
**kwargs) -> SampleList:
"""Predict results from a batch of inputs and data samples with post-
processing.
Args:
inputs (torch.Tensor): Raw Inputs of the recognizer.
These should usually be mean centered and std scaled.
data_samples (List[``ActionDataSample``]): The batch
data samples. It usually includes information such
as ``gt_label``.
Returns:
List[``ActionDataSample``]: Return the recognition results.
The returns value is ``ActionDataSample``, which usually contains
``pred_scores``. And the ``pred_scores`` usually contains
following keys.
- item (torch.Tensor): Classification scores, has a shape
(num_classes, )
"""
feats, predict_kwargs = self.extract_feat(inputs, test_mode=True)
predictions = self.cls_head.predict(feats, data_samples,
**predict_kwargs)
return predictions | Predict results from a batch of inputs and data samples with post-
processing.
Args:
inputs (torch.Tensor): Raw Inputs of the recognizer.
These should usually be mean centered and std scaled.
data_samples (List[``ActionDataSample``]): The batch
data samples. It usually includes information such
as ``gt_label``.
Returns:
List[``ActionDataSample``]: Return the recognition results.
The returns value is ``ActionDataSample``, which usually contains
``pred_scores``. And the ``pred_scores`` usually contains
following keys.
- item (torch.Tensor): Classification scores, has a shape
(num_classes, )
| predict | python | open-mmlab/mmaction2 | mmaction/models/recognizers/base.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/recognizers/base.py | Apache-2.0 |
def _forward(self,
inputs: torch.Tensor,
stage: str = 'backbone',
**kwargs) -> ForwardResults:
"""Network forward process. Usually includes backbone, neck and head
forward without any post-processing.
Args:
inputs (torch.Tensor): Raw Inputs of the recognizer.
stage (str): Which stage to output the features.
Returns:
Union[tuple, torch.Tensor]: Features from ``backbone`` or ``neck``
or ``head`` forward.
"""
feats, _ = self.extract_feat(inputs, stage=stage)
return feats | Network forward process. Usually includes backbone, neck and head
forward without any post-processing.
Args:
inputs (torch.Tensor): Raw Inputs of the recognizer.
stage (str): Which stage to output the features.
Returns:
Union[tuple, torch.Tensor]: Features from ``backbone`` or ``neck``
or ``head`` forward.
| _forward | python | open-mmlab/mmaction2 | mmaction/models/recognizers/base.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/recognizers/base.py | Apache-2.0 |
def forward(self,
inputs: torch.Tensor,
data_samples: OptSampleList = None,
mode: str = 'tensor',
**kwargs) -> ForwardResults:
"""The unified entry for a forward process in both training and test.
The method should accept three modes:
- ``tensor``: Forward the whole network and return tensor or tuple of
tensor without any post-processing, same as a common nn.Module.
- ``predict``: Forward and return the predictions, which are fully
processed to a list of :obj:`ActionDataSample`.
- ``loss``: Forward and return a dict of losses according to the given
inputs and data samples.
Note that this method doesn't handle neither back propagation nor
optimizer updating, which are done in the :meth:`train_step`.
Args:
inputs (torch.Tensor): The input tensor with shape
(N, C, ...) in general.
data_samples (List[``ActionDataSample], optional): The
annotation data of every samples. Defaults to None.
mode (str): Return what kind of value. Defaults to ``tensor``.
Returns:
The return type depends on ``mode``.
- If ``mode="tensor"``, return a tensor or a tuple of tensor.
- If ``mode="predict"``, return a list of ``ActionDataSample``.
- If ``mode="loss"``, return a dict of tensor.
"""
if mode == 'tensor':
return self._forward(inputs, **kwargs)
if mode == 'predict':
return self.predict(inputs, data_samples, **kwargs)
elif mode == 'loss':
return self.loss(inputs, data_samples, **kwargs)
else:
raise RuntimeError(f'Invalid mode "{mode}". '
'Only supports loss, predict and tensor mode') | The unified entry for a forward process in both training and test.
The method should accept three modes:
- ``tensor``: Forward the whole network and return tensor or tuple of
tensor without any post-processing, same as a common nn.Module.
- ``predict``: Forward and return the predictions, which are fully
processed to a list of :obj:`ActionDataSample`.
- ``loss``: Forward and return a dict of losses according to the given
inputs and data samples.
Note that this method doesn't handle neither back propagation nor
optimizer updating, which are done in the :meth:`train_step`.
Args:
inputs (torch.Tensor): The input tensor with shape
(N, C, ...) in general.
data_samples (List[``ActionDataSample], optional): The
annotation data of every samples. Defaults to None.
mode (str): Return what kind of value. Defaults to ``tensor``.
Returns:
The return type depends on ``mode``.
- If ``mode="tensor"``, return a tensor or a tuple of tensor.
- If ``mode="predict"``, return a list of ``ActionDataSample``.
- If ``mode="loss"``, return a dict of tensor.
| forward | python | open-mmlab/mmaction2 | mmaction/models/recognizers/base.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/recognizers/base.py | Apache-2.0 |
def extract_feat(self,
inputs: torch.Tensor,
stage: str = 'neck',
data_samples: SampleList = None,
test_mode: bool = False) -> tuple:
"""Extract features of different stages.
Args:
inputs (Tensor): The input data.
stage (str): Which stage to output the feature.
Defaults to ``neck``.
data_samples (List[:obj:`ActionDataSample`]): Action data
samples, which are only needed in training. Defaults to None.
test_mode: (bool): Whether in test mode. Defaults to False.
Returns:
Tensor: The extracted features.
dict: A dict recording the kwargs for downstream
pipeline. These keys are usually included:
``num_segs``, ``fcn_test``, ``loss_aux``.
"""
# Record the kwargs required by `loss` and `predict`.
loss_predict_kwargs = dict()
num_segs = inputs.shape[1]
loss_predict_kwargs['num_segs'] = num_segs
# [N, num_crops * num_segs, C, H, W] ->
# [N * num_crops * num_segs, C, H, W]
# `num_crops` is calculated by:
# 1) `twice_sample` in `SampleFrames`
# 2) `num_sample_positions` in `DenseSampleFrames`
# 3) `ThreeCrop/TenCrop` in `test_pipeline`
# 4) `num_clips` in `SampleFrames` or its subclass if `clip_len != 1`
inputs = inputs.view((-1, ) + inputs.shape[2:])
def forward_once(batch_imgs):
# Extract features through backbone.
if (hasattr(self.backbone, 'features')
and self.backbone_from == 'torchvision'):
x = self.backbone.features(batch_imgs)
elif self.backbone_from == 'timm':
x = self.backbone.forward_features(batch_imgs)
elif self.backbone_from in ['mmcls', 'mmpretrain']:
x = self.backbone(batch_imgs)
if isinstance(x, tuple):
assert len(x) == 1
x = x[0]
else:
x = self.backbone(batch_imgs)
if self.backbone_from in ['torchvision', 'timm']:
if not self.feature_shape:
# Transformer-based feature shape: B x L x C.
if len(x.shape) == 3:
self.feature_shape = 'NLC'
# Resnet-based feature shape: B x C x Hs x Ws.
elif len(x.shape) == 4:
self.feature_shape = 'NCHW'
if self.feature_shape == 'NHWC':
x = nn.AdaptiveAvgPool2d(1)(x.permute(0, 3, 1,
2)) # B x C x 1 x 1
elif self.feature_shape == 'NCHW':
x = nn.AdaptiveAvgPool2d(1)(x) # B x C x 1 x 1
elif self.feature_shape == 'NLC':
x = nn.AdaptiveAvgPool1d(1)(x.transpose(1, 2)) # B x C x 1
x = x.reshape((x.shape[0], -1)) # B x C
x = x.reshape(x.shape + (1, 1)) # B x C x 1 x 1
return x
# Check settings of `fcn_test`.
fcn_test = False
if test_mode:
if self.test_cfg is not None and self.test_cfg.get(
'fcn_test', False):
fcn_test = True
num_segs = self.test_cfg.get('num_segs',
self.backbone.num_segments)
loss_predict_kwargs['fcn_test'] = fcn_test
# inference with batch size of `max_testing_views` if set
if self.test_cfg is not None and self.test_cfg.get(
'max_testing_views', False):
max_testing_views = self.test_cfg.get('max_testing_views')
assert isinstance(max_testing_views, int)
# backbone specify num_segments
num_segments = self.backbone.get('num_segments')
if num_segments is not None:
assert max_testing_views % num_segments == 0, \
'make sure that max_testing_views is a multiple of ' \
'num_segments, but got {max_testing_views} and '\
'{num_segments}'
total_views = inputs.shape[0]
view_ptr = 0
feats = []
while view_ptr < total_views:
batch_imgs = inputs[view_ptr:view_ptr + max_testing_views]
feat = forward_once(batch_imgs)
if self.with_neck:
feat, _ = self.neck(feat)
feats.append(feat)
view_ptr += max_testing_views
def recursively_cat(feats):
# recursively traverse feats until it's a tensor,
# then concat
out_feats = []
for e_idx, elem in enumerate(feats[0]):
batch_elem = [feat[e_idx] for feat in feats]
if not isinstance(elem, torch.Tensor):
batch_elem = recursively_cat(batch_elem)
else:
batch_elem = torch.cat(batch_elem)
out_feats.append(batch_elem)
return tuple(out_feats)
if isinstance(feats[0], tuple):
x = recursively_cat(feats)
else:
x = torch.cat(feats)
else:
x = forward_once(inputs)
else:
x = forward_once(inputs)
# Return features extracted through backbone.
if stage == 'backbone':
return x, loss_predict_kwargs
loss_aux = dict()
if self.with_neck:
# x is a tuple with multiple feature maps.
x = [
each.reshape((-1, num_segs) +
each.shape[1:]).transpose(1, 2).contiguous()
for each in x
]
x, loss_aux = self.neck(x, data_samples=data_samples)
if not fcn_test:
x = x.squeeze(2)
loss_predict_kwargs['num_segs'] = 1
elif fcn_test:
# full convolution (fcn) testing when no neck
# [N * num_crops * num_segs, C', H', W'] ->
# [N * num_crops, C', num_segs, H', W']
x = x.reshape((-1, num_segs) +
x.shape[1:]).transpose(1, 2).contiguous()
loss_predict_kwargs['loss_aux'] = loss_aux
# Return features extracted through neck.
if stage == 'neck':
return x, loss_predict_kwargs
# Return raw logits through head.
if self.with_cls_head and stage == 'head':
# [N * num_crops, num_classes]
x = self.cls_head(x, **loss_predict_kwargs)
return x, loss_predict_kwargs | Extract features of different stages.
Args:
inputs (Tensor): The input data.
stage (str): Which stage to output the feature.
Defaults to ``neck``.
data_samples (List[:obj:`ActionDataSample`]): Action data
samples, which are only needed in training. Defaults to None.
test_mode: (bool): Whether in test mode. Defaults to False.
Returns:
Tensor: The extracted features.
dict: A dict recording the kwargs for downstream
pipeline. These keys are usually included:
``num_segs``, ``fcn_test``, ``loss_aux``.
| extract_feat | python | open-mmlab/mmaction2 | mmaction/models/recognizers/recognizer2d.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/recognizers/recognizer2d.py | Apache-2.0 |
def extract_feat(self,
inputs: Tensor,
stage: str = 'neck',
data_samples: OptSampleList = None,
test_mode: bool = False) -> tuple:
"""Extract features of different stages.
Args:
inputs (torch.Tensor): The input data.
stage (str): Which stage to output the feature.
Defaults to ``'neck'``.
data_samples (list[:obj:`ActionDataSample`], optional): Action data
samples, which are only needed in training. Defaults to None.
test_mode (bool): Whether in test mode. Defaults to False.
Returns:
torch.Tensor: The extracted features.
dict: A dict recording the kwargs for downstream
pipeline. These keys are usually included:
``loss_aux``.
"""
# Record the kwargs required by `loss` and `predict`
loss_predict_kwargs = dict()
num_segs = inputs.shape[1]
# [N, num_crops, C, T, H, W] ->
# [N * num_crops, C, T, H, W]
# `num_crops` is calculated by:
# 1) `twice_sample` in `SampleFrames`
# 2) `num_sample_positions` in `DenseSampleFrames`
# 3) `ThreeCrop/TenCrop` in `test_pipeline`
# 4) `num_clips` in `SampleFrames` or its subclass if `clip_len != 1`
inputs = inputs.view((-1, ) + inputs.shape[2:])
# Check settings of test
if test_mode:
if self.test_cfg is not None:
loss_predict_kwargs['fcn_test'] = self.test_cfg.get(
'fcn_test', False)
if self.test_cfg is not None and self.test_cfg.get(
'max_testing_views', False):
max_testing_views = self.test_cfg.get('max_testing_views')
assert isinstance(max_testing_views, int)
total_views = inputs.shape[0]
assert num_segs == total_views, (
'max_testing_views is only compatible '
'with batch_size == 1')
view_ptr = 0
feats = []
while view_ptr < total_views:
batch_imgs = inputs[view_ptr:view_ptr + max_testing_views]
feat = self.backbone(batch_imgs)
if self.with_neck:
feat, _ = self.neck(feat)
feats.append(feat)
view_ptr += max_testing_views
def recursively_cat(feats):
# recursively traverse feats until it's a tensor,
# then concat
out_feats = []
for e_idx, elem in enumerate(feats[0]):
batch_elem = [feat[e_idx] for feat in feats]
if not isinstance(elem, torch.Tensor):
batch_elem = recursively_cat(batch_elem)
else:
batch_elem = torch.cat(batch_elem)
out_feats.append(batch_elem)
return tuple(out_feats)
if isinstance(feats[0], tuple):
x = recursively_cat(feats)
else:
x = torch.cat(feats)
else:
x = self.backbone(inputs)
if self.with_neck:
x, _ = self.neck(x)
return x, loss_predict_kwargs
else:
# Return features extracted through backbone
x = self.backbone(inputs)
if stage == 'backbone':
return x, loss_predict_kwargs
loss_aux = dict()
if self.with_neck:
x, loss_aux = self.neck(x, data_samples=data_samples)
# Return features extracted through neck
loss_predict_kwargs['loss_aux'] = loss_aux
if stage == 'neck':
return x, loss_predict_kwargs
# Return raw logits through head.
if self.with_cls_head and stage == 'head':
x = self.cls_head(x, **loss_predict_kwargs)
return x, loss_predict_kwargs | Extract features of different stages.
Args:
inputs (torch.Tensor): The input data.
stage (str): Which stage to output the feature.
Defaults to ``'neck'``.
data_samples (list[:obj:`ActionDataSample`], optional): Action data
samples, which are only needed in training. Defaults to None.
test_mode (bool): Whether in test mode. Defaults to False.
Returns:
torch.Tensor: The extracted features.
dict: A dict recording the kwargs for downstream
pipeline. These keys are usually included:
``loss_aux``.
| extract_feat | python | open-mmlab/mmaction2 | mmaction/models/recognizers/recognizer3d.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/recognizers/recognizer3d.py | Apache-2.0 |
def extract_feat(self,
inputs: Dict[str, torch.Tensor],
stage: str = 'backbone',
data_samples: OptSampleList = None,
test_mode: bool = False) -> Tuple:
"""Extract features.
Args:
inputs (dict[str, torch.Tensor]): The multi-modal input data.
stage (str): Which stage to output the feature.
Defaults to ``'backbone'``.
data_samples (list[:obj:`ActionDataSample`], optional): Action data
samples, which are only needed in training. Defaults to None.
test_mode (bool): Whether in test mode. Defaults to False.
Returns:
tuple[torch.Tensor]: The extracted features.
dict: A dict recording the kwargs for downstream
pipeline.
"""
# [N, num_views, C, T, H, W] ->
# [N * num_views, C, T, H, W]
for m, m_data in inputs.items():
m_data = m_data.reshape((-1, ) + m_data.shape[2:])
inputs[m] = m_data
# Record the kwargs required by `loss` and `predict`
loss_predict_kwargs = dict()
x = self.backbone(**inputs)
if stage == 'backbone':
return x, loss_predict_kwargs
if self.with_cls_head and stage == 'head':
x = self.cls_head(x, **loss_predict_kwargs)
return x, loss_predict_kwargs | Extract features.
Args:
inputs (dict[str, torch.Tensor]): The multi-modal input data.
stage (str): Which stage to output the feature.
Defaults to ``'backbone'``.
data_samples (list[:obj:`ActionDataSample`], optional): Action data
samples, which are only needed in training. Defaults to None.
test_mode (bool): Whether in test mode. Defaults to False.
Returns:
tuple[torch.Tensor]: The extracted features.
dict: A dict recording the kwargs for downstream
pipeline.
| extract_feat | python | open-mmlab/mmaction2 | mmaction/models/recognizers/recognizer3d_mm.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/recognizers/recognizer3d_mm.py | Apache-2.0 |
def extract_feat(self,
batch_inputs: Tensor,
stage: str = 'backbone',
**kwargs) -> tuple:
"""Extract features of different stages.
Args:
batch_inputs (Tensor): The input data.
stage (str): Which stage to output the feature.
Defaults to ``backbone``.
Returns:
Tensor: The extracted features.
dict: A dict recording the kwargs for downstream
pipeline. This will be an empty dict in audio recognizer.
"""
# Record the kwargs required by `loss` and `predict`
loss_predict_kwargs = dict()
batch_inputs = batch_inputs.view((-1, ) + batch_inputs.shape[2:])
x = self.backbone(batch_inputs)
if stage == 'backbone':
return x, loss_predict_kwargs
if self.with_cls_head and stage == 'head':
x = self.cls_head(x, **loss_predict_kwargs)
return x, loss_predict_kwargs | Extract features of different stages.
Args:
batch_inputs (Tensor): The input data.
stage (str): Which stage to output the feature.
Defaults to ``backbone``.
Returns:
Tensor: The extracted features.
dict: A dict recording the kwargs for downstream
pipeline. This will be an empty dict in audio recognizer.
| extract_feat | python | open-mmlab/mmaction2 | mmaction/models/recognizers/recognizer_audio.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/recognizers/recognizer_audio.py | Apache-2.0 |
def extract_feat(self,
inputs: torch.Tensor,
stage: str = 'backbone',
**kwargs) -> Tuple:
"""Extract features at the given stage.
Args:
inputs (torch.Tensor): The input skeleton with shape of
`(B, num_clips, num_person, clip_len, num_joints, 3 or 2)`.
stage (str): The stage to output the features.
Defaults to ``'backbone'``.
Returns:
tuple: THe extracted features and a dict recording the kwargs
for downstream pipeline, which is an empty dict for the
GCN-based recognizer.
"""
# Record the kwargs required by `loss` and `predict`
loss_predict_kwargs = dict()
bs, nc = inputs.shape[:2]
inputs = inputs.reshape((bs * nc, ) + inputs.shape[2:])
x = self.backbone(inputs)
if stage == 'backbone':
return x, loss_predict_kwargs
if self.with_cls_head and stage == 'head':
x = self.cls_head(x, **loss_predict_kwargs)
return x, loss_predict_kwargs | Extract features at the given stage.
Args:
inputs (torch.Tensor): The input skeleton with shape of
`(B, num_clips, num_person, clip_len, num_joints, 3 or 2)`.
stage (str): The stage to output the features.
Defaults to ``'backbone'``.
Returns:
tuple: THe extracted features and a dict recording the kwargs
for downstream pipeline, which is an empty dict for the
GCN-based recognizer.
| extract_feat | python | open-mmlab/mmaction2 | mmaction/models/recognizers/recognizer_gcn.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/recognizers/recognizer_gcn.py | Apache-2.0 |
def forward(self, *data_samples, mode: str, **kwargs) -> ForwardResults:
"""The unified entry for a forward process in both training and test.
The method should accept three modes:
- ``tensor``: Forward the whole network and return tensor or tuple of
tensor without any post-processing, same as a common nn.Module.
- ``predict``: Forward and return the predictions, which are fully
processed to a list of :obj:`ActionDataSample`.
- ``loss``: Forward and return a dict of losses according to the given
inputs and data samples.
Note that this method doesn't handle neither back propagation nor
optimizer updating, which are done in the :meth:`train_step`.
Args:
data_samples: should be a sequence of ``SampleList`` if
``mode="predict"`` or ``mode="loss"``. Each ``SampleList`` is
the annotation data of one data source.
It should be a single torch tensor if ``mode="tensor"``.
mode (str): Return what kind of value. Defaults to ``tensor``.
Returns:
The return type depends on ``mode``.
- If ``mode="tensor"``, return a tensor or a tuple of tensor.
- If ``mode="predict"``, return a list of ``ActionDataSample``.
- If ``mode="loss"``, return a dict of tensor.
"""
if mode == 'loss' or mode == 'predict':
if mode == 'loss':
return self.loss(data_samples)
return self.predict(data_samples)
elif mode == 'tensor':
assert isinstance(data_samples, torch.Tensor)
data_ndim = data_samples.ndim
if data_ndim not in [4, 5]:
info = f'Input is a {data_ndim}D tensor. '
info += 'Only 4D (BCHW) or 5D (BCTHW) tensors are supported!'
raise ValueError(info)
return self._forward(data_samples, **kwargs) | The unified entry for a forward process in both training and test.
The method should accept three modes:
- ``tensor``: Forward the whole network and return tensor or tuple of
tensor without any post-processing, same as a common nn.Module.
- ``predict``: Forward and return the predictions, which are fully
processed to a list of :obj:`ActionDataSample`.
- ``loss``: Forward and return a dict of losses according to the given
inputs and data samples.
Note that this method doesn't handle neither back propagation nor
optimizer updating, which are done in the :meth:`train_step`.
Args:
data_samples: should be a sequence of ``SampleList`` if
``mode="predict"`` or ``mode="loss"``. Each ``SampleList`` is
the annotation data of one data source.
It should be a single torch tensor if ``mode="tensor"``.
mode (str): Return what kind of value. Defaults to ``tensor``.
Returns:
The return type depends on ``mode``.
- If ``mode="tensor"``, return a tensor or a tuple of tensor.
- If ``mode="predict"``, return a list of ``ActionDataSample``.
- If ``mode="loss"``, return a dict of tensor.
| forward | python | open-mmlab/mmaction2 | mmaction/models/recognizers/recognizer_omni.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/recognizers/recognizer_omni.py | Apache-2.0 |
def loss(self, data_samples: Sequence[SampleList]) -> dict:
"""Calculate losses from a batch of inputs and data samples.
Args:
data_samples (Sequence[SampleList]): a sequence of SampleList. Each
SampleList contains data samples from the same data source.
Returns:
dict: A dictionary of loss components.
"""
loss_dict = {}
for idx, data in enumerate(data_samples):
inputs, data_samples = data['inputs'], data['data_samples']
feats = self.extract_feat(inputs)
loss_cls = self.cls_head.loss(feats, data_samples)
for key in loss_cls:
loss_dict[key + f'_{idx}'] = loss_cls[key]
return loss_dict | Calculate losses from a batch of inputs and data samples.
Args:
data_samples (Sequence[SampleList]): a sequence of SampleList. Each
SampleList contains data samples from the same data source.
Returns:
dict: A dictionary of loss components.
| loss | python | open-mmlab/mmaction2 | mmaction/models/recognizers/recognizer_omni.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/recognizers/recognizer_omni.py | Apache-2.0 |
def predict(self, data_samples: Sequence[SampleList]) -> SampleList:
"""Predict results from a batch of inputs and data samples with post-
processing.
Args:
data_samples (Sequence[SampleList]): a sequence of SampleList. Each
SampleList contains data samples from the same data source.
Returns:
List[``ActionDataSample``]: Return the recognition results.
The returns value is ``ActionDataSample``, which usually contains
``pred_scores``. And the ``pred_scores`` usually contains
following keys.
- item (torch.Tensor): Classification scores, has a shape
(num_classes, )
"""
assert len(data_samples) == 1
feats = self.extract_feat(data_samples[0]['inputs'], test_mode=True)
predictions = self.cls_head.predict(feats,
data_samples[0]['data_samples'])
return predictions | Predict results from a batch of inputs and data samples with post-
processing.
Args:
data_samples (Sequence[SampleList]): a sequence of SampleList. Each
SampleList contains data samples from the same data source.
Returns:
List[``ActionDataSample``]: Return the recognition results.
The returns value is ``ActionDataSample``, which usually contains
``pred_scores``. And the ``pred_scores`` usually contains
following keys.
- item (torch.Tensor): Classification scores, has a shape
(num_classes, )
| predict | python | open-mmlab/mmaction2 | mmaction/models/recognizers/recognizer_omni.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/recognizers/recognizer_omni.py | Apache-2.0 |
def _forward(self,
inputs: torch.Tensor,
stage: str = 'backbone',
**kwargs) -> ForwardResults:
"""Network forward process. Usually includes backbone, neck and head
forward without any post-processing.
Args:
inputs (torch.Tensor): Raw Inputs of the recognizer.
stage (str): Which stage to output the features.
Returns:
Union[tuple, torch.Tensor]: Features from ``backbone`` or ``head``
forward.
"""
feats, _ = self.extract_feat(inputs, stage=stage)
return feats | Network forward process. Usually includes backbone, neck and head
forward without any post-processing.
Args:
inputs (torch.Tensor): Raw Inputs of the recognizer.
stage (str): Which stage to output the features.
Returns:
Union[tuple, torch.Tensor]: Features from ``backbone`` or ``head``
forward.
| _forward | python | open-mmlab/mmaction2 | mmaction/models/recognizers/recognizer_omni.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/recognizers/recognizer_omni.py | Apache-2.0 |
def _run_forward(self, data: Union[dict, tuple, list],
mode: str) -> Union[Dict[str, torch.Tensor], list]:
"""Unpacks data for :meth:`forward`
Args:
data (dict or tuple or list): Data sampled from dataset.
mode (str): Mode of forward.
Returns:
dict or list: Results of training or testing mode.
"""
if isinstance(data, dict):
data = [data]
results = self(*data, mode=mode)
elif isinstance(data, (list, tuple)):
results = self(*data, mode=mode)
else:
raise TypeError
return results | Unpacks data for :meth:`forward`
Args:
data (dict or tuple or list): Data sampled from dataset.
mode (str): Mode of forward.
Returns:
dict or list: Results of training or testing mode.
| _run_forward | python | open-mmlab/mmaction2 | mmaction/models/recognizers/recognizer_omni.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/recognizers/recognizer_omni.py | Apache-2.0 |
def extract_feat(self,
inputs: torch.Tensor,
stage: str = 'backbone',
test_mode: bool = False) -> tuple:
"""Extract features of different stages.
Args:
inputs (torch.Tensor): The input data.
stage (str): Which stage to output the feature.
Defaults to ``'backbone'``.
test_mode (bool): Whether in test mode. Defaults to False.
Returns:
torch.Tensor: The extracted features.
dict: A dict recording the kwargs for downstream
pipeline. These keys are usually included:
``loss_aux``.
"""
if len(inputs.shape) == 6:
inputs = inputs.view((-1, ) + inputs.shape[2:])
# Check settings of test
if test_mode:
x = self.backbone(inputs)
return x
else:
# Return features extracted through backbone
x = self.backbone(inputs)
if stage == 'backbone':
return x
x = self.cls_head(x)
return x | Extract features of different stages.
Args:
inputs (torch.Tensor): The input data.
stage (str): Which stage to output the feature.
Defaults to ``'backbone'``.
test_mode (bool): Whether in test mode. Defaults to False.
Returns:
torch.Tensor: The extracted features.
dict: A dict recording the kwargs for downstream
pipeline. These keys are usually included:
``loss_aux``.
| extract_feat | python | open-mmlab/mmaction2 | mmaction/models/recognizers/recognizer_omni.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/recognizers/recognizer_omni.py | Apache-2.0 |
def loss(self, x: Union[Tensor,
Tuple[Tensor]], rpn_results_list: InstanceList,
data_samples: SampleList, **kwargs) -> dict:
"""Perform forward propagation and loss calculation of the detection
roi on the features of the upstream network.
Args:
x (Tensor or Tuple[Tensor]): The image features extracted by
the upstream network.
rpn_results_list (List[:obj:`InstanceData`]): List of region
proposals.
data_samples (List[:obj:`ActionDataSample`]): The batch
data samples.
Returns:
Dict[str, Tensor]: A dictionary of loss components.
"""
assert len(rpn_results_list) == len(data_samples)
batch_gt_instances = []
for data_sample in data_samples:
batch_gt_instances.append(data_sample.gt_instances)
# assign gts and sample proposals
num_imgs = len(data_samples)
sampling_results = []
for i in range(num_imgs):
# rename rpn_results.bboxes to rpn_results.priors
rpn_results = rpn_results_list[i]
rpn_results.priors = rpn_results.pop('bboxes')
assign_result = self.bbox_assigner.assign(rpn_results,
batch_gt_instances[i],
None)
sampling_result = self.bbox_sampler.sample(assign_result,
rpn_results,
batch_gt_instances[i])
sampling_results.append(sampling_result)
# LFB needs meta_info: 'img_key'
batch_img_metas = [
data_samples.metainfo for data_samples in data_samples
]
losses = dict()
# bbox head forward and loss
bbox_results = self.bbox_loss(x, sampling_results, batch_img_metas)
losses.update(bbox_results['loss_bbox'])
return losses | Perform forward propagation and loss calculation of the detection
roi on the features of the upstream network.
Args:
x (Tensor or Tuple[Tensor]): The image features extracted by
the upstream network.
rpn_results_list (List[:obj:`InstanceData`]): List of region
proposals.
data_samples (List[:obj:`ActionDataSample`]): The batch
data samples.
Returns:
Dict[str, Tensor]: A dictionary of loss components.
| loss | python | open-mmlab/mmaction2 | mmaction/models/roi_heads/roi_head.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/roi_heads/roi_head.py | Apache-2.0 |
def _bbox_forward(self, x: Union[Tensor, Tuple[Tensor]], rois: Tensor,
batch_img_metas: List[dict], **kwargs) -> dict:
"""Box head forward function used in both training and testing.
Args:
x (Tensor or Tuple[Tensor]): The image features extracted by
the upstream network.
rois (Tensor): RoIs with the shape (n, 5) where the first
column indicates batch id of each RoI.
batch_img_metas (List[dict]): List of image information.
Returns:
dict[str, Tensor]: Usually returns a dictionary with keys:
- `cls_score` (Tensor): Classification scores.
- `bbox_pred` (Tensor): Box energies / deltas.
- `bbox_feats` (Tensor): Extract bbox RoI features.
"""
bbox_feats, global_feat = self.bbox_roi_extractor(x, rois)
if self.with_shared_head:
bbox_feats = self.shared_head(
bbox_feats,
feat=global_feat,
rois=rois,
img_metas=batch_img_metas)
cls_score = self.bbox_head(bbox_feats)
bbox_results = dict(cls_score=cls_score, bbox_feats=bbox_feats)
return bbox_results | Box head forward function used in both training and testing.
Args:
x (Tensor or Tuple[Tensor]): The image features extracted by
the upstream network.
rois (Tensor): RoIs with the shape (n, 5) where the first
column indicates batch id of each RoI.
batch_img_metas (List[dict]): List of image information.
Returns:
dict[str, Tensor]: Usually returns a dictionary with keys:
- `cls_score` (Tensor): Classification scores.
- `bbox_pred` (Tensor): Box energies / deltas.
- `bbox_feats` (Tensor): Extract bbox RoI features.
| _bbox_forward | python | open-mmlab/mmaction2 | mmaction/models/roi_heads/roi_head.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/roi_heads/roi_head.py | Apache-2.0 |
def bbox_loss(self, x: Union[Tensor, Tuple[Tensor]],
sampling_results: List[SamplingResult],
batch_img_metas: List[dict], **kwargs) -> dict:
"""Perform forward propagation and loss calculation of the bbox head on
the features of the upstream network.
Args:
x (Tensor or Tuple[Tensor]): The image features extracted by
the upstream network.
sampling_results (List[SamplingResult]): Sampling results.
batch_img_metas (List[dict]): List of image information.
Returns:
dict[str, Tensor]: Usually returns a dictionary with keys:
- `cls_score` (Tensor): Classification scores.
- `bbox_pred` (Tensor): Box energies / deltas.
- `bbox_feats` (Tensor): Extract bbox RoI features.
- `loss_bbox` (dict): A dictionary of bbox loss components.
"""
rois = bbox2roi([res.priors for res in sampling_results])
bbox_results = self._bbox_forward(x, rois, batch_img_metas)
bbox_loss_and_target = self.bbox_head.loss_and_target(
cls_score=bbox_results['cls_score'],
rois=rois,
sampling_results=sampling_results,
rcnn_train_cfg=self.train_cfg)
bbox_results.update(loss_bbox=bbox_loss_and_target['loss_bbox'])
return bbox_results | Perform forward propagation and loss calculation of the bbox head on
the features of the upstream network.
Args:
x (Tensor or Tuple[Tensor]): The image features extracted by
the upstream network.
sampling_results (List[SamplingResult]): Sampling results.
batch_img_metas (List[dict]): List of image information.
Returns:
dict[str, Tensor]: Usually returns a dictionary with keys:
- `cls_score` (Tensor): Classification scores.
- `bbox_pred` (Tensor): Box energies / deltas.
- `bbox_feats` (Tensor): Extract bbox RoI features.
- `loss_bbox` (dict): A dictionary of bbox loss components.
| bbox_loss | python | open-mmlab/mmaction2 | mmaction/models/roi_heads/roi_head.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/roi_heads/roi_head.py | Apache-2.0 |
def predict(self, x: Union[Tensor,
Tuple[Tensor]], rpn_results_list: InstanceList,
data_samples: SampleList, **kwargs) -> InstanceList:
"""Perform forward propagation of the roi head and predict detection
results on the features of the upstream network.
Args:
x (Tensor or Tuple[Tensor]): The image features extracted by
the upstream network.
rpn_results_list (List[:obj:`InstanceData`]): list of region
proposals.
data_samples (List[:obj:`ActionDataSample`]): The batch
data samples.
Returns:
List[obj:`InstanceData`]: Detection results of each image.
Each item usually contains following keys.
- scores (Tensor): Classification scores, has a shape
(num_instance, )
- labels (Tensor): Labels of bboxes, has a shape
(num_instances, ).
"""
assert self.with_bbox, 'Bbox head must be implemented.'
batch_img_metas = [
data_samples.metainfo for data_samples in data_samples
]
if isinstance(x, tuple):
x_shape = x[0].shape
else:
x_shape = x.shape
assert x_shape[0] == 1, 'only accept 1 sample at test mode'
assert x_shape[0] == len(batch_img_metas) == len(rpn_results_list)
results_list = self.predict_bbox(
x, batch_img_metas, rpn_results_list, rcnn_test_cfg=self.test_cfg)
return results_list | Perform forward propagation of the roi head and predict detection
results on the features of the upstream network.
Args:
x (Tensor or Tuple[Tensor]): The image features extracted by
the upstream network.
rpn_results_list (List[:obj:`InstanceData`]): list of region
proposals.
data_samples (List[:obj:`ActionDataSample`]): The batch
data samples.
Returns:
List[obj:`InstanceData`]: Detection results of each image.
Each item usually contains following keys.
- scores (Tensor): Classification scores, has a shape
(num_instance, )
- labels (Tensor): Labels of bboxes, has a shape
(num_instances, ).
| predict | python | open-mmlab/mmaction2 | mmaction/models/roi_heads/roi_head.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/roi_heads/roi_head.py | Apache-2.0 |
def predict_bbox(self, x: Tuple[Tensor], batch_img_metas: List[dict],
rpn_results_list: InstanceList,
rcnn_test_cfg: ConfigType) -> InstanceList:
"""Perform forward propagation of the bbox head and predict detection
results on the features of the upstream network.
Args:
x (tuple[Tensor]): Feature maps of all scale level.
batch_img_metas (list[dict]): List of image information.
rpn_results_list (list[:obj:`InstanceData`]): List of region
proposals.
rcnn_test_cfg (obj:`ConfigDict`): `test_cfg` of R-CNN.
Returns:
list[:obj:`InstanceData`]: Detection results of each image
after the post process. Each item usually contains following
keys:
- scores (Tensor): Classification scores, has a shape
(num_instance, )
- labels (Tensor): Labels of bboxes, has a shape
(num_instances, ).
"""
proposals = [res.bboxes for res in rpn_results_list]
rois = bbox2roi(proposals)
bbox_results = self._bbox_forward(x, rois, batch_img_metas)
# split batch bbox prediction back to each image
cls_scores = bbox_results['cls_score']
num_proposals_per_img = tuple(len(p) for p in proposals)
rois = rois.split(num_proposals_per_img, 0)
cls_scores = cls_scores.split(num_proposals_per_img, 0)
result_list = self.bbox_head.predict_by_feat(
rois=rois,
cls_scores=cls_scores,
batch_img_metas=batch_img_metas,
rcnn_test_cfg=rcnn_test_cfg)
return result_list | Perform forward propagation of the bbox head and predict detection
results on the features of the upstream network.
Args:
x (tuple[Tensor]): Feature maps of all scale level.
batch_img_metas (list[dict]): List of image information.
rpn_results_list (list[:obj:`InstanceData`]): List of region
proposals.
rcnn_test_cfg (obj:`ConfigDict`): `test_cfg` of R-CNN.
Returns:
list[:obj:`InstanceData`]: Detection results of each image
after the post process. Each item usually contains following
keys:
- scores (Tensor): Classification scores, has a shape
(num_instance, )
- labels (Tensor): Labels of bboxes, has a shape
(num_instances, ).
| predict_bbox | python | open-mmlab/mmaction2 | mmaction/models/roi_heads/roi_head.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/roi_heads/roi_head.py | Apache-2.0 |
def forward(self, x: Tensor) -> Tensor:
"""Computes the classification logits given ROI features."""
if self.dropout_before_pool and self.dropout_ratio > 0:
x = self.dropout(x)
x = self.temporal_pool(x)
x = self.spatial_pool(x)
if not self.dropout_before_pool and self.dropout_ratio > 0:
x = self.dropout(x)
x = x.view(x.size(0), -1)
cls_score = self.fc_cls(x)
return cls_score | Computes the classification logits given ROI features. | forward | python | open-mmlab/mmaction2 | mmaction/models/roi_heads/bbox_heads/bbox_head.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/roi_heads/bbox_heads/bbox_head.py | Apache-2.0 |
def get_recall_prec(pred_vec: Tensor, target_vec: Tensor) -> tuple:
"""Computes the Recall/Precision for both multi-label and single label
scenarios.
Note that the computation calculates the micro average.
Note, that in both cases, the concept of correct/incorrect is the same.
Args:
pred_vec (tensor[N x C]): each element is either 0 or 1
target_vec (tensor[N x C]): each element is either 0 or 1 - for
single label it is expected that only one element is on (1)
although this is not enforced.
"""
correct = pred_vec & target_vec
recall = correct.sum(1) / target_vec.sum(1).float() # Enforce Float
prec = correct.sum(1) / (pred_vec.sum(1) + 1e-6)
return recall.mean(), prec.mean() | Computes the Recall/Precision for both multi-label and single label
scenarios.
Note that the computation calculates the micro average.
Note, that in both cases, the concept of correct/incorrect is the same.
Args:
pred_vec (tensor[N x C]): each element is either 0 or 1
target_vec (tensor[N x C]): each element is either 0 or 1 - for
single label it is expected that only one element is on (1)
although this is not enforced.
| get_recall_prec | python | open-mmlab/mmaction2 | mmaction/models/roi_heads/bbox_heads/bbox_head.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/roi_heads/bbox_heads/bbox_head.py | Apache-2.0 |
def topk_accuracy(self,
pred: Tensor,
target: Tensor,
thr: float = 0.5) -> tuple:
"""Computes the Top-K Accuracies for both single and multi-label
scenarios."""
# Define Target vector:
target_bool = target > 0.5
# Branch on Multilabel for computing output classification
if self.multilabel:
pred = pred.sigmoid()
else:
pred = pred.softmax(dim=1)
# Compute at threshold (K=1 for single)
if self.multilabel:
pred_bool = pred > thr
else:
pred_bool = self.topk_to_matrix(pred, 1)
recall_thr, prec_thr = self.get_recall_prec(pred_bool, target_bool)
# Compute at various K
recalls_k, precs_k = [], []
for k in self.topk:
pred_bool = self.topk_to_matrix(pred, k)
recall, prec = self.get_recall_prec(pred_bool, target_bool)
recalls_k.append(recall)
precs_k.append(prec)
# Return all
return recall_thr, prec_thr, recalls_k, precs_k | Computes the Top-K Accuracies for both single and multi-label
scenarios. | topk_accuracy | python | open-mmlab/mmaction2 | mmaction/models/roi_heads/bbox_heads/bbox_head.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/roi_heads/bbox_heads/bbox_head.py | Apache-2.0 |
def loss_and_target(self, cls_score: Tensor, rois: Tensor,
sampling_results: List[SamplingResult],
rcnn_train_cfg: ConfigDict, **kwargs) -> dict:
"""Calculate the loss based on the features extracted by the bbox head.
Args:
cls_score (Tensor): Classification prediction
results of all class, has shape
(batch_size * num_proposals_single_image, num_classes)
rois (Tensor): RoIs with the shape
(batch_size * num_proposals_single_image, 5) where the first
column indicates batch id of each RoI.
sampling_results (List[obj:SamplingResult]): Assign results of
all images in a batch after sampling.
rcnn_train_cfg (obj:ConfigDict): `train_cfg` of RCNN.
Returns:
dict: A dictionary of loss components.
"""
cls_targets = self.get_targets(sampling_results, rcnn_train_cfg)
labels, _ = cls_targets
losses = dict()
# Only use the cls_score
if cls_score is not None:
if self.background_class:
labels = labels[:, 1:] # Get valid labels (ignore first one)
cls_score = cls_score[:, 1:]
pos_inds = torch.sum(labels, dim=-1) > 0
cls_score = cls_score[pos_inds]
labels = labels[pos_inds]
# Compute First Recall/Precisions
# This has to be done first before normalising the label-space.
recall_thr, prec_thr, recall_k, prec_k = self.topk_accuracy(
cls_score, labels, thr=0.5)
losses['recall@thr=0.5'] = recall_thr
losses['prec@thr=0.5'] = prec_thr
for i, k in enumerate(self.topk):
losses[f'recall@top{k}'] = recall_k[i]
losses[f'prec@top{k}'] = prec_k[i]
# If Single-label, need to ensure that target labels sum to 1: ie
# that they are valid probabilities.
if not self.multilabel and self.background_class:
labels = labels / labels.sum(dim=1, keepdim=True)
# Select Loss function based on single/multi-label
# NB. Both losses auto-compute sigmoid/softmax on prediction
if self.multilabel:
loss_func = F.binary_cross_entropy_with_logits
else:
loss_func = cross_entropy_loss
# Compute loss
loss = loss_func(cls_score, labels, reduction='none')
pt = torch.exp(-loss)
F_loss = self.focal_alpha * (1 - pt)**self.focal_gamma * loss
losses['loss_action_cls'] = torch.mean(F_loss)
return dict(loss_bbox=losses, bbox_targets=cls_targets) | Calculate the loss based on the features extracted by the bbox head.
Args:
cls_score (Tensor): Classification prediction
results of all class, has shape
(batch_size * num_proposals_single_image, num_classes)
rois (Tensor): RoIs with the shape
(batch_size * num_proposals_single_image, 5) where the first
column indicates batch id of each RoI.
sampling_results (List[obj:SamplingResult]): Assign results of
all images in a batch after sampling.
rcnn_train_cfg (obj:ConfigDict): `train_cfg` of RCNN.
Returns:
dict: A dictionary of loss components.
| loss_and_target | python | open-mmlab/mmaction2 | mmaction/models/roi_heads/bbox_heads/bbox_head.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/roi_heads/bbox_heads/bbox_head.py | Apache-2.0 |
def predict_by_feat(self,
rois: Tuple[Tensor],
cls_scores: Tuple[Tensor],
batch_img_metas: List[dict],
rcnn_test_cfg: Optional[ConfigDict] = None,
**kwargs) -> InstanceList:
"""Transform a batch of output features extracted from the head into
bbox results.
Args:
rois (tuple[Tensor]): Tuple of boxes to be transformed.
Each has shape (num_boxes, 5). last dimension 5 arrange as
(batch_index, x1, y1, x2, y2).
cls_scores (tuple[Tensor]): Tuple of box scores, each has shape
(num_boxes, num_classes + 1).
bbox_preds (tuple[Tensor]): Tuple of box energies / deltas, each
has shape (num_boxes, num_classes * 4).
batch_img_metas (list[dict]): List of image information.
rcnn_test_cfg (obj:`ConfigDict`, optional): `test_cfg` of R-CNN.
Defaults to None.
rescale (bool): If True, return boxes in original image space.
Defaults to False.
Returns:
list[:obj:`InstanceData`]: Instance segmentation
results of each image after the post process.
Each item usually contains following keys.
- scores (Tensor): Classification scores, has a shape
(num_instance, )
- labels (Tensor): Labels of bboxes, has a shape
(num_instances, ).
- bboxes (Tensor): Has a shape (num_instances, 4),
the last dimension 4 arrange as (x1, y1, x2, y2).
"""
result_list = []
for img_id in range(len(batch_img_metas)):
img_meta = batch_img_metas[img_id]
results = self._predict_by_feat_single(
roi=rois[img_id],
cls_score=cls_scores[img_id],
img_meta=img_meta,
rcnn_test_cfg=rcnn_test_cfg,
**kwargs)
result_list.append(results)
return result_list | Transform a batch of output features extracted from the head into
bbox results.
Args:
rois (tuple[Tensor]): Tuple of boxes to be transformed.
Each has shape (num_boxes, 5). last dimension 5 arrange as
(batch_index, x1, y1, x2, y2).
cls_scores (tuple[Tensor]): Tuple of box scores, each has shape
(num_boxes, num_classes + 1).
bbox_preds (tuple[Tensor]): Tuple of box energies / deltas, each
has shape (num_boxes, num_classes * 4).
batch_img_metas (list[dict]): List of image information.
rcnn_test_cfg (obj:`ConfigDict`, optional): `test_cfg` of R-CNN.
Defaults to None.
rescale (bool): If True, return boxes in original image space.
Defaults to False.
Returns:
list[:obj:`InstanceData`]: Instance segmentation
results of each image after the post process.
Each item usually contains following keys.
- scores (Tensor): Classification scores, has a shape
(num_instance, )
- labels (Tensor): Labels of bboxes, has a shape
(num_instances, ).
- bboxes (Tensor): Has a shape (num_instances, 4),
the last dimension 4 arrange as (x1, y1, x2, y2).
| predict_by_feat | python | open-mmlab/mmaction2 | mmaction/models/roi_heads/bbox_heads/bbox_head.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/roi_heads/bbox_heads/bbox_head.py | Apache-2.0 |
def _predict_by_feat_single(self,
roi: Tensor,
cls_score: Tensor,
img_meta: dict,
rcnn_test_cfg: Optional[ConfigDict] = None,
**kwargs) -> InstanceData:
"""Transform a single image's features extracted from the head into
bbox results.
Args:
roi (Tensor): Boxes to be transformed. Has shape (num_boxes, 5).
last dimension 5 arrange as (batch_index, x1, y1, x2, y2).
cls_score (Tensor): Box scores, has shape
(num_boxes, num_classes + 1).
bbox_pred (Tensor): Box energies / deltas.
has shape (num_boxes, num_classes * 4).
img_meta (dict): image information.
rescale (bool): If True, return boxes in original image space.
Defaults to False.
rcnn_test_cfg (obj:`ConfigDict`): `test_cfg` of Bbox Head.
Defaults to None
Returns:
:obj:`InstanceData`: Detection results of each image\
Each item usually contains following keys.
- scores (Tensor): Classification scores, has a shape
(num_instance, )
- labels (Tensor): Labels of bboxes, has a shape
(num_instances, ).
- bboxes (Tensor): Has a shape (num_instances, 4),
the last dimension 4 arrange as (x1, y1, x2, y2).
"""
results = InstanceData()
# might be used by testing w. augmentation
if isinstance(cls_score, list):
cls_score = sum(cls_score) / float(len(cls_score))
# Handle Multi/Single Label
if cls_score is not None:
if self.multilabel:
scores = cls_score.sigmoid()
else:
scores = cls_score.softmax(dim=-1)
else:
scores = None
bboxes = roi[:, 1:]
assert bboxes.shape[-1] == 4
# First reverse the flip
img_h, img_w = img_meta['img_shape']
if img_meta.get('flip', False):
bboxes_ = bboxes.clone()
bboxes_[:, 0] = img_w - 1 - bboxes[:, 2]
bboxes_[:, 2] = img_w - 1 - bboxes[:, 0]
bboxes = bboxes_
# Then normalize the bbox to [0, 1]
bboxes[:, 0::2] /= img_w
bboxes[:, 1::2] /= img_h
def _bbox_crop_undo(bboxes, crop_quadruple):
decropped = bboxes.clone()
if crop_quadruple is not None:
x1, y1, tw, th = crop_quadruple
decropped[:, 0::2] = bboxes[..., 0::2] * tw + x1
decropped[:, 1::2] = bboxes[..., 1::2] * th + y1
return decropped
crop_quadruple = img_meta.get('crop_quadruple', np.array([0, 0, 1, 1]))
bboxes = _bbox_crop_undo(bboxes, crop_quadruple)
results.bboxes = bboxes
results.scores = scores
return results | Transform a single image's features extracted from the head into
bbox results.
Args:
roi (Tensor): Boxes to be transformed. Has shape (num_boxes, 5).
last dimension 5 arrange as (batch_index, x1, y1, x2, y2).
cls_score (Tensor): Box scores, has shape
(num_boxes, num_classes + 1).
bbox_pred (Tensor): Box energies / deltas.
has shape (num_boxes, num_classes * 4).
img_meta (dict): image information.
rescale (bool): If True, return boxes in original image space.
Defaults to False.
rcnn_test_cfg (obj:`ConfigDict`): `test_cfg` of Bbox Head.
Defaults to None
Returns:
:obj:`InstanceData`: Detection results of each image Each item usually contains following keys.
- scores (Tensor): Classification scores, has a shape
(num_instance, )
- labels (Tensor): Labels of bboxes, has a shape
(num_instances, ).
- bboxes (Tensor): Has a shape (num_instances, 4),
the last dimension 4 arrange as (x1, y1, x2, y2).
| _predict_by_feat_single | python | open-mmlab/mmaction2 | mmaction/models/roi_heads/bbox_heads/bbox_head.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/roi_heads/bbox_heads/bbox_head.py | Apache-2.0 |
def forward(self, feat: Union[Tensor, Tuple[Tensor]],
rois: Tensor) -> tuple:
"""Forward function for extract roi features.
Args:
feat (Tensor or Tuple[Tensor]): The image features extracted by
the upstream network. The shape of feat is N, C, T, H, W.
rois (Tensor): Input RoIs, shape (k, 5).
Returns:
tuple: A tuple of roi features and global features.
- roi_feats (Tensor): Extracted bbox RoI features.
- feat (Tensor): Global features of the video clip.
"""
if not isinstance(feat, tuple):
feat = (feat, )
if len(feat) >= 2:
maxT = max([x.shape[2] for x in feat])
max_shape = (maxT, ) + feat[0].shape[3:]
# resize each feat to the largest shape (w. nearest)
feat = [F.interpolate(x, max_shape).contiguous() for x in feat]
if self.with_temporal_pool:
if self.temporal_pool_mode == 'avg':
feat = [torch.mean(x, 2, keepdim=True) for x in feat]
elif self.temporal_pool_mode == 'max':
feat = [torch.max(x, 2, keepdim=True)[0] for x in feat]
else:
raise NotImplementedError
feat = torch.cat(feat, axis=1).contiguous()
roi_feats = []
for t in range(feat.size(2)):
frame_feat = feat[:, :, t].contiguous()
roi_feat = self.roi_layer(frame_feat, rois)
if self.with_global:
global_feat = self.global_pool(frame_feat.contiguous())
inds = rois[:, 0].type(torch.int64)
global_feat = global_feat[inds]
roi_feat = torch.cat([roi_feat, global_feat], dim=1)
roi_feat = roi_feat.contiguous()
roi_feats.append(roi_feat)
roi_feats = torch.stack(roi_feats, dim=2)
return roi_feats, feat | Forward function for extract roi features.
Args:
feat (Tensor or Tuple[Tensor]): The image features extracted by
the upstream network. The shape of feat is N, C, T, H, W.
rois (Tensor): Input RoIs, shape (k, 5).
Returns:
tuple: A tuple of roi features and global features.
- roi_feats (Tensor): Extracted bbox RoI features.
- feat (Tensor): Global features of the video clip.
| forward | python | open-mmlab/mmaction2 | mmaction/models/roi_heads/roi_extractors/single_straight3d.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/roi_heads/roi_extractors/single_straight3d.py | Apache-2.0 |
def forward(self, x, feat, rois, **kwargs):
"""Defines the computation performed at every call.
Args:
x (torch.Tensor): The extracted RoI feature.
feat (torch.Tensor): The context feature.
rois (torch.Tensor): The regions of interest.
Returns:
torch.Tensor: The RoI features that have interacted with context
feature.
"""
# We use max pooling by default
x = self.max_pool(x)
h, w = feat.shape[-2:]
x_tile = x.repeat(1, 1, 1, h, w)
roi_inds = rois[:, 0].type(torch.long)
roi_gfeat = feat[roi_inds]
new_feat = torch.cat([x_tile, roi_gfeat], dim=1)
new_feat = self.conv1(new_feat)
new_feat = self.conv2(new_feat)
for conv in self.convs:
new_feat = conv(new_feat)
return new_feat | Defines the computation performed at every call.
Args:
x (torch.Tensor): The extracted RoI feature.
feat (torch.Tensor): The context feature.
rois (torch.Tensor): The regions of interest.
Returns:
torch.Tensor: The RoI features that have interacted with context
feature.
| forward | python | open-mmlab/mmaction2 | mmaction/models/roi_heads/shared_heads/acrn_head.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/roi_heads/shared_heads/acrn_head.py | Apache-2.0 |
def init_weights(self, pretrained=None):
"""Initiate the parameters either from existing checkpoint or from
scratch."""
if isinstance(pretrained, str):
logger = MMLogger.get_current_instance()
logger.info(f'load model from: {pretrained}')
load_checkpoint(self, pretrained, strict=False, logger=logger)
elif pretrained is None:
for m in self.modules():
if isinstance(m, nn.Conv3d):
kaiming_init(m)
elif isinstance(m, _BatchNorm):
constant_init(m, 1)
if self.zero_init_out_conv:
constant_init(self.out_conv, 0, bias=0)
else:
raise TypeError('pretrained must be a str or None') | Initiate the parameters either from existing checkpoint or from
scratch. | init_weights | python | open-mmlab/mmaction2 | mmaction/models/roi_heads/shared_heads/fbo_head.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/roi_heads/shared_heads/fbo_head.py | Apache-2.0 |
def forward(self, st_feat, lt_feat):
"""Defines the computation performed at every call."""
n, c = st_feat.size(0), self.latent_channels
num_st_feat, num_lt_feat = self.num_st_feat, self.num_lt_feat
theta = self.st_feat_conv(st_feat)
theta = theta.view(n, c, num_st_feat)
phi = self.lt_feat_conv(lt_feat)
phi = phi.view(n, c, num_lt_feat)
g = self.global_conv(lt_feat)
g = g.view(n, c, num_lt_feat)
# (n, num_st_feat, c), (n, c, num_lt_feat)
# -> (n, num_st_feat, num_lt_feat)
theta_phi = torch.matmul(theta.permute(0, 2, 1), phi)
if self.use_scale:
theta_phi /= c**0.5
p = theta_phi.softmax(dim=-1)
# (n, c, num_lt_feat), (n, num_lt_feat, num_st_feat)
# -> (n, c, num_st_feat, 1, 1)
out = torch.matmul(g, p.permute(0, 2, 1)).view(n, c, num_st_feat, 1, 1)
# If need to activate it before out_conv, use relu here, otherwise
# use relu outside the non local layer.
if self.pre_activate:
if self.pre_activate_with_ln:
out = self.ln(out)
out = self.relu(out)
out = self.out_conv(out)
if not self.pre_activate:
out = self.ln(out)
if self.dropout_ratio > 0:
out = self.dropout(out)
return out | Defines the computation performed at every call. | forward | python | open-mmlab/mmaction2 | mmaction/models/roi_heads/shared_heads/fbo_head.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/roi_heads/shared_heads/fbo_head.py | Apache-2.0 |
def init_weights(self, pretrained=None):
"""Initiate the parameters either from existing checkpoint or from
scratch."""
if isinstance(pretrained, str):
logger = MMLogger.get_current_instance()
load_checkpoint(self, pretrained, strict=False, logger=logger)
elif pretrained is None:
kaiming_init(self.st_feat_conv)
kaiming_init(self.lt_feat_conv)
for layer_name in self.non_local_layers:
non_local_layer = getattr(self, layer_name)
non_local_layer.init_weights(pretrained=pretrained)
else:
raise TypeError('pretrained must be a str or None') | Initiate the parameters either from existing checkpoint or from
scratch. | init_weights | python | open-mmlab/mmaction2 | mmaction/models/roi_heads/shared_heads/fbo_head.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/roi_heads/shared_heads/fbo_head.py | Apache-2.0 |
def forward(self, st_feat, lt_feat):
"""Defines the computation performed at every call."""
# prepare st_feat
st_feat = self.st_feat_conv(st_feat)
if self.st_feat_dropout_ratio > 0:
st_feat = self.st_feat_dropout(st_feat)
# prepare lt_feat
lt_feat = self.lt_feat_conv(lt_feat)
if self.lt_feat_dropout_ratio > 0:
lt_feat = self.lt_feat_dropout(lt_feat)
# fuse short-term and long-term features in NonLocal Layer
for layer_name in self.non_local_layers:
identity = st_feat
non_local_layer = getattr(self, layer_name)
nl_out = non_local_layer(st_feat, lt_feat)
nl_out = identity + nl_out
if not self.pre_activate:
nl_out = self.relu(nl_out)
st_feat = nl_out
return nl_out | Defines the computation performed at every call. | forward | python | open-mmlab/mmaction2 | mmaction/models/roi_heads/shared_heads/fbo_head.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/roi_heads/shared_heads/fbo_head.py | Apache-2.0 |
def sample_lfb(self, rois, img_metas):
"""Sample long-term features for each ROI feature."""
inds = rois[:, 0].type(torch.int64)
lt_feat_list = []
for ind in inds:
lt_feat_list.append(self.lfb[img_metas[ind]['img_key']])
lt_feat = torch.stack(lt_feat_list, dim=0)
# [N, lfb_channels, window_size * max_num_feat_per_step]
lt_feat = lt_feat.permute(0, 2, 1).contiguous()
return lt_feat.unsqueeze(-1).unsqueeze(-1) | Sample long-term features for each ROI feature. | sample_lfb | python | open-mmlab/mmaction2 | mmaction/models/roi_heads/shared_heads/fbo_head.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/roi_heads/shared_heads/fbo_head.py | Apache-2.0 |
def forward(self, x, rois, img_metas, **kwargs):
"""Defines the computation performed at every call."""
# [N, C, 1, 1, 1]
st_feat = self.temporal_pool(x)
st_feat = self.spatial_pool(st_feat)
identity = st_feat
# [N, C, window_size * num_feat_per_step, 1, 1]
lt_feat = self.sample_lfb(rois, img_metas).to(st_feat.device)
fbo_feat = self.fbo(st_feat, lt_feat)
out = torch.cat([identity, fbo_feat], dim=1)
return out | Defines the computation performed at every call. | forward | python | open-mmlab/mmaction2 | mmaction/models/roi_heads/shared_heads/fbo_head.py | https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/roi_heads/shared_heads/fbo_head.py | Apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.