code
stringlengths
66
870k
docstring
stringlengths
19
26.7k
func_name
stringlengths
1
138
language
stringclasses
1 value
repo
stringlengths
7
68
path
stringlengths
5
324
url
stringlengths
46
389
license
stringclasses
7 values
def angle_between(self, v1: np.ndarray, v2: np.ndarray) -> float: """Returns the angle in radians between vectors 'v1' and 'v2'.""" if np.abs(v1).sum() < 1e-6 or np.abs(v2).sum() < 1e-6: return 0 v1_u = self.unit_vector(v1) v2_u = self.unit_vector(v2) return np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0))
Returns the angle in radians between vectors 'v1' and 'v2'.
angle_between
python
open-mmlab/mmaction2
mmaction/datasets/transforms/pose_transforms.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/pose_transforms.py
Apache-2.0
def rotation_matrix(self, axis: np.ndarray, theta: float) -> np.ndarray: """Returns the rotation matrix associated with counterclockwise rotation about the given axis by theta radians.""" if np.abs(axis).sum() < 1e-6 or np.abs(theta) < 1e-6: return np.eye(3) axis = np.asarray(axis) axis = axis / np.sqrt(np.dot(axis, axis)) a = np.cos(theta / 2.0) b, c, d = -axis * np.sin(theta / 2.0) aa, bb, cc, dd = a * a, b * b, c * c, d * d bc, ad, ac, ab, bd, cd = b * c, a * d, a * c, a * b, b * d, c * d return np.array([[aa + bb - cc - dd, 2 * (bc + ad), 2 * (bd - ac)], [2 * (bc - ad), aa + cc - bb - dd, 2 * (cd + ab)], [2 * (bd + ac), 2 * (cd - ab), aa + dd - bb - cc]])
Returns the rotation matrix associated with counterclockwise rotation about the given axis by theta radians.
rotation_matrix
python
open-mmlab/mmaction2
mmaction/datasets/transforms/pose_transforms.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/pose_transforms.py
Apache-2.0
def transform(self, results: Dict) -> Dict: """The transform function of :class:`PreNormalize3D`. Args: results (dict): The result dict. Returns: dict: The result dict. """ skeleton = results['keypoint'] total_frames = results.get('total_frames', skeleton.shape[1]) M, T, V, C = skeleton.shape assert T == total_frames if skeleton.sum() == 0: return results index0 = [ i for i in range(T) if not np.all(np.isclose(skeleton[0, i], 0)) ] assert M in [1, 2] if M == 2: index1 = [ i for i in range(T) if not np.all(np.isclose(skeleton[1, i], 0)) ] if len(index0) < len(index1): skeleton = skeleton[:, np.array(index1)] skeleton = skeleton[[1, 0]] else: skeleton = skeleton[:, np.array(index0)] else: skeleton = skeleton[:, np.array(index0)] T_new = skeleton.shape[1] if self.align_center: if skeleton.shape[2] == 25: main_body_center = skeleton[0, 0, 1].copy() else: main_body_center = skeleton[0, 0, -1].copy() mask = ((skeleton != 0).sum(-1) > 0)[..., None] skeleton = (skeleton - main_body_center) * mask if self.align_spine: joint_bottom = skeleton[0, 0, self.zaxis[0]] joint_top = skeleton[0, 0, self.zaxis[1]] axis = np.cross(joint_top - joint_bottom, [0, 0, 1]) angle = self.angle_between(joint_top - joint_bottom, [0, 0, 1]) matrix_z = self.rotation_matrix(axis, angle) skeleton = np.einsum('abcd,kd->abck', skeleton, matrix_z) if self.align_shoulder: joint_rshoulder = skeleton[0, 0, self.xaxis[0]] joint_lshoulder = skeleton[0, 0, self.xaxis[1]] axis = np.cross(joint_rshoulder - joint_lshoulder, [1, 0, 0]) angle = self.angle_between(joint_rshoulder - joint_lshoulder, [1, 0, 0]) matrix_x = self.rotation_matrix(axis, angle) skeleton = np.einsum('abcd,kd->abck', skeleton, matrix_x) results['keypoint'] = skeleton results['total_frames'] = T_new results['body_center'] = main_body_center return results
The transform function of :class:`PreNormalize3D`. Args: results (dict): The result dict. Returns: dict: The result dict.
transform
python
open-mmlab/mmaction2
mmaction/datasets/transforms/pose_transforms.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/pose_transforms.py
Apache-2.0
def transform(self, results: Dict) -> Dict: """The transform function of :class:`PreNormalize2D`. Args: results (dict): The result dict. Returns: dict: The result dict. """ h, w = results.get('img_shape', self.img_shape) results['keypoint'][..., 0] = \ (results['keypoint'][..., 0] - (w / 2)) / (w / 2) results['keypoint'][..., 1] = \ (results['keypoint'][..., 1] - (h / 2)) / (h / 2) return results
The transform function of :class:`PreNormalize2D`. Args: results (dict): The result dict. Returns: dict: The result dict.
transform
python
open-mmlab/mmaction2
mmaction/datasets/transforms/pose_transforms.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/pose_transforms.py
Apache-2.0
def transform(self, results: Dict) -> Dict: """The transform function of :class:`JointToBone`. Args: results (dict): The result dict. Returns: dict: The result dict. """ keypoint = results['keypoint'] M, T, V, C = keypoint.shape bone = np.zeros((M, T, V, C), dtype=np.float32) assert C in [2, 3] for v1, v2 in self.pairs: bone[..., v1, :] = keypoint[..., v1, :] - keypoint[..., v2, :] if C == 3 and self.dataset in ['openpose', 'coco']: score = (keypoint[..., v1, 2] + keypoint[..., v2, 2]) / 2 bone[..., v1, 2] = score results[self.target] = bone return results
The transform function of :class:`JointToBone`. Args: results (dict): The result dict. Returns: dict: The result dict.
transform
python
open-mmlab/mmaction2
mmaction/datasets/transforms/pose_transforms.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/pose_transforms.py
Apache-2.0
def transform(self, results: Dict) -> Dict: """The transform function of :class:`ToMotion`. Args: results (dict): The result dict. Returns: dict: The result dict. """ data = results[self.source] M, T, V, C = data.shape motion = np.zeros_like(data) assert C in [2, 3] motion[:, :T - 1] = np.diff(data, axis=1) if C == 3 and self.dataset in ['openpose', 'coco']: score = (data[:, :T - 1, :, 2] + data[:, 1:, :, 2]) / 2 motion[:, :T - 1, :, 2] = score results[self.target] = motion return results
The transform function of :class:`ToMotion`. Args: results (dict): The result dict. Returns: dict: The result dict.
transform
python
open-mmlab/mmaction2
mmaction/datasets/transforms/pose_transforms.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/pose_transforms.py
Apache-2.0
def transform(self, results: Dict) -> Dict: """The transform function of :class:`MergeSkeFeat`. Args: results (dict): The result dict. Returns: dict: The result dict. """ feats = [] for name in self.feat_list: feats.append(results.pop(name)) feats = np.concatenate(feats, axis=self.axis) results[self.target] = feats return results
The transform function of :class:`MergeSkeFeat`. Args: results (dict): The result dict. Returns: dict: The result dict.
transform
python
open-mmlab/mmaction2
mmaction/datasets/transforms/pose_transforms.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/pose_transforms.py
Apache-2.0
def transform(self, results: Dict) -> Dict: """The transform function of :class:`GenSkeFeat`. Args: results (dict): The result dict. Returns: dict: The result dict. """ if 'keypoint_score' in results and 'keypoint' in results: assert self.dataset != 'nturgb+d' assert results['keypoint'].shape[ -1] == 2, 'Only 2D keypoints have keypoint_score. ' keypoint = results.pop('keypoint') keypoint_score = results.pop('keypoint_score') results['keypoint'] = np.concatenate( [keypoint, keypoint_score[..., None]], -1) return self.ops(results)
The transform function of :class:`GenSkeFeat`. Args: results (dict): The result dict. Returns: dict: The result dict.
transform
python
open-mmlab/mmaction2
mmaction/datasets/transforms/pose_transforms.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/pose_transforms.py
Apache-2.0
def _get_train_clips(self, num_frames: int, clip_len: int) -> np.ndarray: """Uniformly sample indices for training clips. Args: num_frames (int): The number of frames. clip_len (int): The length of the clip. Returns: np.ndarray: The sampled indices for training clips. """ all_inds = [] for clip_idx in range(self.num_clips): if num_frames < clip_len: start = np.random.randint(0, num_frames) inds = np.arange(start, start + clip_len) elif clip_len <= num_frames < 2 * clip_len: basic = np.arange(clip_len) inds = np.random.choice( clip_len + 1, num_frames - clip_len, replace=False) offset = np.zeros(clip_len + 1, dtype=np.int32) offset[inds] = 1 offset = np.cumsum(offset) inds = basic + offset[:-1] else: bids = np.array( [i * num_frames // clip_len for i in range(clip_len + 1)]) bsize = np.diff(bids) bst = bids[:clip_len] offset = np.random.randint(bsize) inds = bst + offset all_inds.append(inds) return np.concatenate(all_inds)
Uniformly sample indices for training clips. Args: num_frames (int): The number of frames. clip_len (int): The length of the clip. Returns: np.ndarray: The sampled indices for training clips.
_get_train_clips
python
open-mmlab/mmaction2
mmaction/datasets/transforms/pose_transforms.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/pose_transforms.py
Apache-2.0
def _get_test_clips(self, num_frames: int, clip_len: int) -> np.ndarray: """Uniformly sample indices for testing clips. Args: num_frames (int): The number of frames. clip_len (int): The length of the clip. Returns: np.ndarray: The sampled indices for testing clips. """ np.random.seed(self.seed) all_inds = [] for i in range(self.num_clips): if num_frames < clip_len: start_ind = i if num_frames < self.num_clips \ else i * num_frames // self.num_clips inds = np.arange(start_ind, start_ind + clip_len) elif clip_len <= num_frames < clip_len * 2: basic = np.arange(clip_len) inds = np.random.choice( clip_len + 1, num_frames - clip_len, replace=False) offset = np.zeros(clip_len + 1, dtype=np.int64) offset[inds] = 1 offset = np.cumsum(offset) inds = basic + offset[:-1] else: bids = np.array( [i * num_frames // clip_len for i in range(clip_len + 1)]) bsize = np.diff(bids) bst = bids[:clip_len] offset = np.random.randint(bsize) inds = bst + offset all_inds.append(inds) return np.concatenate(all_inds)
Uniformly sample indices for testing clips. Args: num_frames (int): The number of frames. clip_len (int): The length of the clip. Returns: np.ndarray: The sampled indices for testing clips.
_get_test_clips
python
open-mmlab/mmaction2
mmaction/datasets/transforms/pose_transforms.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/pose_transforms.py
Apache-2.0
def transform(self, results: Dict) -> Dict: """The transform function of :class:`UniformSampleFrames`. Args: results (dict): The result dict. Returns: dict: The result dict. """ num_frames = results['total_frames'] if self.test_mode: inds = self._get_test_clips(num_frames, self.clip_len) else: inds = self._get_train_clips(num_frames, self.clip_len) inds = np.mod(inds, num_frames) start_index = results.get('start_index', 0) inds = inds + start_index if 'keypoint' in results: kp = results['keypoint'] assert num_frames == kp.shape[1] num_person = kp.shape[0] num_persons = [num_person] * num_frames for i in range(num_frames): j = num_person - 1 while j >= 0 and np.all(np.abs(kp[j, i]) < 1e-5): j -= 1 num_persons[i] = j + 1 transitional = [False] * num_frames for i in range(1, num_frames - 1): if num_persons[i] != num_persons[i - 1]: transitional[i] = transitional[i - 1] = True if num_persons[i] != num_persons[i + 1]: transitional[i] = transitional[i + 1] = True inds_int = inds.astype(np.int64) coeff = np.array([transitional[i] for i in inds_int]) inds = (coeff * inds_int + (1 - coeff) * inds).astype(np.float32) results['frame_inds'] = inds.astype(np.int32) results['clip_len'] = self.clip_len results['frame_interval'] = None results['num_clips'] = self.num_clips return results
The transform function of :class:`UniformSampleFrames`. Args: results (dict): The result dict. Returns: dict: The result dict.
transform
python
open-mmlab/mmaction2
mmaction/datasets/transforms/pose_transforms.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/pose_transforms.py
Apache-2.0
def transform(self, results: Dict) -> Dict: """The transform function of :class:`PadTo`. Args: results (dict): The result dict. Returns: dict: The result dict. """ total_frames = results['total_frames'] assert total_frames <= self.length start_index = results.get('start_index', 0) inds = np.arange(start_index, start_index + self.length) inds = np.mod(inds, total_frames) keypoint = results['keypoint'][:, inds].copy() if self.mode == 'zero': keypoint[:, total_frames:] = 0 results['keypoint'] = keypoint results['total_frames'] = self.length return results
The transform function of :class:`PadTo`. Args: results (dict): The result dict. Returns: dict: The result dict.
transform
python
open-mmlab/mmaction2
mmaction/datasets/transforms/pose_transforms.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/pose_transforms.py
Apache-2.0
def _load_kpscore(kpscore: np.ndarray, frame_inds: np.ndarray) -> np.ndarray: """Load keypoint scores according to sampled indexes.""" return kpscore[:, frame_inds].astype(np.float32)
Load keypoint scores according to sampled indexes.
_load_kpscore
python
open-mmlab/mmaction2
mmaction/datasets/transforms/pose_transforms.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/pose_transforms.py
Apache-2.0
def transform(self, results: Dict) -> Dict: """The transform function of :class:`PoseDecode`. Args: results (dict): The result dict. Returns: dict: The result dict. """ if 'total_frames' not in results: results['total_frames'] = results['keypoint'].shape[1] if 'frame_inds' not in results: results['frame_inds'] = np.arange(results['total_frames']) if results['frame_inds'].ndim != 1: results['frame_inds'] = np.squeeze(results['frame_inds']) offset = results.get('offset', 0) frame_inds = results['frame_inds'] + offset if 'keypoint_score' in results: results['keypoint_score'] = self._load_kpscore( results['keypoint_score'], frame_inds) results['keypoint'] = self._load_kp(results['keypoint'], frame_inds) return results
The transform function of :class:`PoseDecode`. Args: results (dict): The result dict. Returns: dict: The result dict.
transform
python
open-mmlab/mmaction2
mmaction/datasets/transforms/pose_transforms.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/pose_transforms.py
Apache-2.0
def transform(self, results: Dict) -> Dict: """The transform function of :class:`MMUniformSampleFrames`. Args: results (dict): The result dict. Returns: dict: The result dict. """ num_frames = results['total_frames'] modalities = [] for modality, clip_len in self.clip_len.items(): if self.test_mode: inds = self._get_test_clips(num_frames, clip_len) else: inds = self._get_train_clips(num_frames, clip_len) inds = np.mod(inds, num_frames) results[f'{modality}_inds'] = inds.astype(np.int32) modalities.append(modality) results['clip_len'] = self.clip_len results['frame_interval'] = None results['num_clips'] = self.num_clips if not isinstance(results['modality'], list): # should override results['modality'] = modalities return results
The transform function of :class:`MMUniformSampleFrames`. Args: results (dict): The result dict. Returns: dict: The result dict.
transform
python
open-mmlab/mmaction2
mmaction/datasets/transforms/pose_transforms.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/pose_transforms.py
Apache-2.0
def transform(self, results: Dict) -> Dict: """The transform function of :class:`MMDecode`. Args: results (dict): The result dict. Returns: dict: The result dict. """ for mod in results['modality']: if results[f'{mod}_inds'].ndim != 1: results[f'{mod}_inds'] = np.squeeze(results[f'{mod}_inds']) frame_inds = results[f'{mod}_inds'] if mod == 'RGB': if 'filename' not in results: results['filename'] = results['frame_dir'] + '.mp4' video_reader = self._get_video_reader(results['filename']) imgs = self._decord_load_frames(video_reader, frame_inds) del video_reader results['imgs'] = imgs elif mod == 'Pose': assert 'keypoint' in results if 'keypoint_score' not in results: keypoint_score = [ np.ones(keypoint.shape[:-1], dtype=np.float32) for keypoint in results['keypoint'] ] results['keypoint_score'] = np.stack(keypoint_score) results['keypoint'] = self._load_kp(results['keypoint'], frame_inds) results['keypoint_score'] = self._load_kpscore( results['keypoint_score'], frame_inds) else: raise NotImplementedError( f'MMDecode: Modality {mod} not supported') # We need to scale human keypoints to the new image size if 'imgs' in results and 'keypoint' in results: real_img_shape = results['imgs'][0].shape[:2] if real_img_shape != results['img_shape']: oh, ow = results['img_shape'] nh, nw = real_img_shape assert results['keypoint'].shape[-1] in [2, 3] results['keypoint'][..., 0] *= (nw / ow) results['keypoint'][..., 1] *= (nh / oh) results['img_shape'] = real_img_shape results['original_shape'] = real_img_shape return results
The transform function of :class:`MMDecode`. Args: results (dict): The result dict. Returns: dict: The result dict.
transform
python
open-mmlab/mmaction2
mmaction/datasets/transforms/pose_transforms.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/pose_transforms.py
Apache-2.0
def _get_box(self, keypoint: np.ndarray, img_shape: Tuple[int]) -> Tuple: """Calculate the bounding box surrounding all joints in the frames.""" h, w = img_shape kp_x = keypoint[..., 0] kp_y = keypoint[..., 1] min_x = np.min(kp_x[kp_x != 0], initial=np.Inf) min_y = np.min(kp_y[kp_y != 0], initial=np.Inf) max_x = np.max(kp_x[kp_x != 0], initial=-np.Inf) max_y = np.max(kp_y[kp_y != 0], initial=-np.Inf) # The compact area is too small if max_x - min_x < self.threshold or max_y - min_y < self.threshold: return 0, 0, w, h center = ((max_x + min_x) / 2, (max_y + min_y) / 2) half_width = (max_x - min_x) / 2 * (1 + self.padding) half_height = (max_y - min_y) / 2 * (1 + self.padding) if self.hw_ratio is not None: half_height = max(self.hw_ratio[0] * half_width, half_height) half_width = max(1 / self.hw_ratio[1] * half_height, half_width) min_x, max_x = center[0] - half_width, center[0] + half_width min_y, max_y = center[1] - half_height, center[1] + half_height # hot update if not self.allow_imgpad: min_x, min_y = int(max(0, min_x)), int(max(0, min_y)) max_x, max_y = int(min(w, max_x)), int(min(h, max_y)) else: min_x, min_y = int(min_x), int(min_y) max_x, max_y = int(max_x), int(max_y) return min_x, min_y, max_x, max_y
Calculate the bounding box surrounding all joints in the frames.
_get_box
python
open-mmlab/mmaction2
mmaction/datasets/transforms/pose_transforms.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/pose_transforms.py
Apache-2.0
def _compact_images(self, imgs: List[np.ndarray], img_shape: Tuple[int], box: Tuple[int]) -> List: """Crop the images acoordding the bounding box.""" h, w = img_shape min_x, min_y, max_x, max_y = box pad_l, pad_u, pad_r, pad_d = 0, 0, 0, 0 if min_x < 0: pad_l = -min_x min_x, max_x = 0, max_x + pad_l w += pad_l if min_y < 0: pad_u = -min_y min_y, max_y = 0, max_y + pad_u h += pad_u if max_x > w: pad_r = max_x - w w = max_x if max_y > h: pad_d = max_y - h h = max_y if pad_l > 0 or pad_r > 0 or pad_u > 0 or pad_d > 0: imgs = [ np.pad(img, ((pad_u, pad_d), (pad_l, pad_r), (0, 0))) for img in imgs ] imgs = [img[min_y:max_y, min_x:max_x] for img in imgs] return imgs
Crop the images acoordding the bounding box.
_compact_images
python
open-mmlab/mmaction2
mmaction/datasets/transforms/pose_transforms.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/pose_transforms.py
Apache-2.0
def transform(self, results: Dict) -> Dict: """The transform function of :class:`MMCompact`. Args: results (dict): The result dict. Returns: dict: The result dict. """ img_shape = results['img_shape'] kp = results['keypoint'] # Make NaN zero kp[np.isnan(kp)] = 0. min_x, min_y, max_x, max_y = self._get_box(kp, img_shape) kp_x, kp_y = kp[..., 0], kp[..., 1] kp_x[kp_x != 0] -= min_x kp_y[kp_y != 0] -= min_y new_shape = (max_y - min_y, max_x - min_x) results['img_shape'] = new_shape results['imgs'] = self._compact_images(results['imgs'], img_shape, (min_x, min_y, max_x, max_y)) return results
The transform function of :class:`MMCompact`. Args: results (dict): The result dict. Returns: dict: The result dict.
transform
python
open-mmlab/mmaction2
mmaction/datasets/transforms/pose_transforms.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/pose_transforms.py
Apache-2.0
def _init_lazy_if_proper(results, lazy): """Initialize lazy operation properly. Make sure that a lazy operation is properly initialized, and avoid a non-lazy operation accidentally getting mixed in. Required keys in results are "imgs" if "img_shape" not in results, otherwise, Required keys in results are "img_shape", add or modified keys are "img_shape", "lazy". Add or modified keys in "lazy" are "original_shape", "crop_bbox", "flip", "flip_direction", "interpolation". Args: results (dict): A dict stores data pipeline result. lazy (bool): Determine whether to apply lazy operation. Default: False. """ if 'img_shape' not in results: results['img_shape'] = results['imgs'][0].shape[:2] if lazy: if 'lazy' not in results: img_h, img_w = results['img_shape'] lazyop = dict() lazyop['original_shape'] = results['img_shape'] lazyop['crop_bbox'] = np.array([0, 0, img_w, img_h], dtype=np.float32) lazyop['flip'] = False lazyop['flip_direction'] = None lazyop['interpolation'] = None results['lazy'] = lazyop else: assert 'lazy' not in results, 'Use Fuse after lazy operations'
Initialize lazy operation properly. Make sure that a lazy operation is properly initialized, and avoid a non-lazy operation accidentally getting mixed in. Required keys in results are "imgs" if "img_shape" not in results, otherwise, Required keys in results are "img_shape", add or modified keys are "img_shape", "lazy". Add or modified keys in "lazy" are "original_shape", "crop_bbox", "flip", "flip_direction", "interpolation". Args: results (dict): A dict stores data pipeline result. lazy (bool): Determine whether to apply lazy operation. Default: False.
_init_lazy_if_proper
python
open-mmlab/mmaction2
mmaction/datasets/transforms/processing.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/processing.py
Apache-2.0
def transform(self, results): """Fuse lazy operations. Args: results (dict): The resulting dict to be modified and passed to the next transform in pipeline. """ if 'lazy' not in results: raise ValueError('No lazy operation detected') lazyop = results['lazy'] imgs = results['imgs'] # crop left, top, right, bottom = lazyop['crop_bbox'].round().astype(int) imgs = [img[top:bottom, left:right] for img in imgs] # resize img_h, img_w = results['img_shape'] if lazyop['interpolation'] is None: interpolation = 'bilinear' else: interpolation = lazyop['interpolation'] imgs = [ mmcv.imresize(img, (img_w, img_h), interpolation=interpolation) for img in imgs ] # flip if lazyop['flip']: for img in imgs: mmcv.imflip_(img, lazyop['flip_direction']) results['imgs'] = imgs del results['lazy'] return results
Fuse lazy operations. Args: results (dict): The resulting dict to be modified and passed to the next transform in pipeline.
transform
python
open-mmlab/mmaction2
mmaction/datasets/transforms/processing.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/processing.py
Apache-2.0
def _box_crop(box, crop_bbox): """Crop the bounding boxes according to the crop_bbox. Args: box (np.ndarray): The bounding boxes. crop_bbox(np.ndarray): The bbox used to crop the original image. """ x1, y1, x2, y2 = crop_bbox img_w, img_h = x2 - x1, y2 - y1 box_ = box.copy() box_[..., 0::2] = np.clip(box[..., 0::2] - x1, 0, img_w - 1) box_[..., 1::2] = np.clip(box[..., 1::2] - y1, 0, img_h - 1) return box_
Crop the bounding boxes according to the crop_bbox. Args: box (np.ndarray): The bounding boxes. crop_bbox(np.ndarray): The bbox used to crop the original image.
_box_crop
python
open-mmlab/mmaction2
mmaction/datasets/transforms/processing.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/processing.py
Apache-2.0
def _all_box_crop(self, results, crop_bbox): """Crop the gt_bboxes and proposals in results according to crop_bbox. Args: results (dict): All information about the sample, which contain 'gt_bboxes' and 'proposals' (optional). crop_bbox(np.ndarray): The bbox used to crop the original image. """ results['gt_bboxes'] = self._box_crop(results['gt_bboxes'], crop_bbox) if 'proposals' in results and results['proposals'] is not None: assert results['proposals'].shape[1] == 4 results['proposals'] = self._box_crop(results['proposals'], crop_bbox) return results
Crop the gt_bboxes and proposals in results according to crop_bbox. Args: results (dict): All information about the sample, which contain 'gt_bboxes' and 'proposals' (optional). crop_bbox(np.ndarray): The bbox used to crop the original image.
_all_box_crop
python
open-mmlab/mmaction2
mmaction/datasets/transforms/processing.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/processing.py
Apache-2.0
def transform(self, results): """Performs the RandomCrop augmentation. Args: results (dict): The resulting dict to be modified and passed to the next transform in pipeline. """ _init_lazy_if_proper(results, self.lazy) if 'keypoint' in results: assert not self.lazy, ('Keypoint Augmentations are not compatible ' 'with lazy == True') img_h, img_w = results['img_shape'] assert self.size <= img_h and self.size <= img_w y_offset = 0 x_offset = 0 if img_h > self.size: y_offset = int(np.random.randint(0, img_h - self.size)) if img_w > self.size: x_offset = int(np.random.randint(0, img_w - self.size)) if 'crop_quadruple' not in results: results['crop_quadruple'] = np.array( [0, 0, 1, 1], # x, y, w, h dtype=np.float32) x_ratio, y_ratio = x_offset / img_w, y_offset / img_h w_ratio, h_ratio = self.size / img_w, self.size / img_h old_crop_quadruple = results['crop_quadruple'] old_x_ratio, old_y_ratio = old_crop_quadruple[0], old_crop_quadruple[1] old_w_ratio, old_h_ratio = old_crop_quadruple[2], old_crop_quadruple[3] new_crop_quadruple = [ old_x_ratio + x_ratio * old_w_ratio, old_y_ratio + y_ratio * old_h_ratio, w_ratio * old_w_ratio, h_ratio * old_h_ratio ] results['crop_quadruple'] = np.array( new_crop_quadruple, dtype=np.float32) new_h, new_w = self.size, self.size crop_bbox = np.array( [x_offset, y_offset, x_offset + new_w, y_offset + new_h]) results['crop_bbox'] = crop_bbox results['img_shape'] = (new_h, new_w) if not self.lazy: if 'keypoint' in results: results['keypoint'] = self._crop_kps(results['keypoint'], crop_bbox) if 'imgs' in results: results['imgs'] = self._crop_imgs(results['imgs'], crop_bbox) else: lazyop = results['lazy'] if lazyop['flip']: raise NotImplementedError('Put Flip at last for now') # record crop_bbox in lazyop dict to ensure only crop once in Fuse lazy_left, lazy_top, lazy_right, lazy_bottom = lazyop['crop_bbox'] left = x_offset * (lazy_right - lazy_left) / img_w right = (x_offset + new_w) * (lazy_right - lazy_left) / img_w top = y_offset * (lazy_bottom - lazy_top) / img_h bottom = (y_offset + new_h) * (lazy_bottom - lazy_top) / img_h lazyop['crop_bbox'] = np.array([(lazy_left + left), (lazy_top + top), (lazy_left + right), (lazy_top + bottom)], dtype=np.float32) # Process entity boxes if 'gt_bboxes' in results: assert not self.lazy results = self._all_box_crop(results, results['crop_bbox']) return results
Performs the RandomCrop augmentation. Args: results (dict): The resulting dict to be modified and passed to the next transform in pipeline.
transform
python
open-mmlab/mmaction2
mmaction/datasets/transforms/processing.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/processing.py
Apache-2.0
def get_crop_bbox(img_shape, area_range, aspect_ratio_range, max_attempts=10): """Get a crop bbox given the area range and aspect ratio range. Args: img_shape (Tuple[int]): Image shape area_range (Tuple[float]): The candidate area scales range of output cropped images. Default: (0.08, 1.0). aspect_ratio_range (Tuple[float]): The candidate aspect ratio range of output cropped images. Default: (3 / 4, 4 / 3). max_attempts (int): The maximum of attempts. Default: 10. max_attempts (int): Max attempts times to generate random candidate bounding box. If it doesn't qualified one, the center bounding box will be used. Returns: (list[int]) A random crop bbox within the area range and aspect ratio range. """ assert 0 < area_range[0] <= area_range[1] <= 1 assert 0 < aspect_ratio_range[0] <= aspect_ratio_range[1] img_h, img_w = img_shape area = img_h * img_w min_ar, max_ar = aspect_ratio_range aspect_ratios = np.exp( np.random.uniform( np.log(min_ar), np.log(max_ar), size=max_attempts)) target_areas = np.random.uniform(*area_range, size=max_attempts) * area candidate_crop_w = np.round(np.sqrt(target_areas * aspect_ratios)).astype(np.int32) candidate_crop_h = np.round(np.sqrt(target_areas / aspect_ratios)).astype(np.int32) for i in range(max_attempts): crop_w = candidate_crop_w[i] crop_h = candidate_crop_h[i] if crop_h <= img_h and crop_w <= img_w: x_offset = random.randint(0, img_w - crop_w) y_offset = random.randint(0, img_h - crop_h) return x_offset, y_offset, x_offset + crop_w, y_offset + crop_h # Fallback crop_size = min(img_h, img_w) x_offset = (img_w - crop_size) // 2 y_offset = (img_h - crop_size) // 2 return x_offset, y_offset, x_offset + crop_size, y_offset + crop_size
Get a crop bbox given the area range and aspect ratio range. Args: img_shape (Tuple[int]): Image shape area_range (Tuple[float]): The candidate area scales range of output cropped images. Default: (0.08, 1.0). aspect_ratio_range (Tuple[float]): The candidate aspect ratio range of output cropped images. Default: (3 / 4, 4 / 3). max_attempts (int): The maximum of attempts. Default: 10. max_attempts (int): Max attempts times to generate random candidate bounding box. If it doesn't qualified one, the center bounding box will be used. Returns: (list[int]) A random crop bbox within the area range and aspect ratio range.
get_crop_bbox
python
open-mmlab/mmaction2
mmaction/datasets/transforms/processing.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/processing.py
Apache-2.0
def transform(self, results): """Performs the RandomResizeCrop augmentation. Args: results (dict): The resulting dict to be modified and passed to the next transform in pipeline. """ _init_lazy_if_proper(results, self.lazy) if 'keypoint' in results: assert not self.lazy, ('Keypoint Augmentations are not compatible ' 'with lazy == True') img_h, img_w = results['img_shape'] left, top, right, bottom = self.get_crop_bbox( (img_h, img_w), self.area_range, self.aspect_ratio_range) new_h, new_w = bottom - top, right - left if 'crop_quadruple' not in results: results['crop_quadruple'] = np.array( [0, 0, 1, 1], # x, y, w, h dtype=np.float32) x_ratio, y_ratio = left / img_w, top / img_h w_ratio, h_ratio = new_w / img_w, new_h / img_h old_crop_quadruple = results['crop_quadruple'] old_x_ratio, old_y_ratio = old_crop_quadruple[0], old_crop_quadruple[1] old_w_ratio, old_h_ratio = old_crop_quadruple[2], old_crop_quadruple[3] new_crop_quadruple = [ old_x_ratio + x_ratio * old_w_ratio, old_y_ratio + y_ratio * old_h_ratio, w_ratio * old_w_ratio, h_ratio * old_h_ratio ] results['crop_quadruple'] = np.array( new_crop_quadruple, dtype=np.float32) crop_bbox = np.array([left, top, right, bottom]) results['crop_bbox'] = crop_bbox results['img_shape'] = (new_h, new_w) if not self.lazy: if 'keypoint' in results: results['keypoint'] = self._crop_kps(results['keypoint'], crop_bbox) if 'imgs' in results: results['imgs'] = self._crop_imgs(results['imgs'], crop_bbox) else: lazyop = results['lazy'] if lazyop['flip']: raise NotImplementedError('Put Flip at last for now') # record crop_bbox in lazyop dict to ensure only crop once in Fuse lazy_left, lazy_top, lazy_right, lazy_bottom = lazyop['crop_bbox'] left = left * (lazy_right - lazy_left) / img_w right = right * (lazy_right - lazy_left) / img_w top = top * (lazy_bottom - lazy_top) / img_h bottom = bottom * (lazy_bottom - lazy_top) / img_h lazyop['crop_bbox'] = np.array([(lazy_left + left), (lazy_top + top), (lazy_left + right), (lazy_top + bottom)], dtype=np.float32) if 'gt_bboxes' in results: assert not self.lazy results = self._all_box_crop(results, results['crop_bbox']) return results
Performs the RandomResizeCrop augmentation. Args: results (dict): The resulting dict to be modified and passed to the next transform in pipeline.
transform
python
open-mmlab/mmaction2
mmaction/datasets/transforms/processing.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/processing.py
Apache-2.0
def transform(self, results): """Performs the MultiScaleCrop augmentation. Args: results (dict): The resulting dict to be modified and passed to the next transform in pipeline. """ _init_lazy_if_proper(results, self.lazy) if 'keypoint' in results: assert not self.lazy, ('Keypoint Augmentations are not compatible ' 'with lazy == True') img_h, img_w = results['img_shape'] base_size = min(img_h, img_w) crop_sizes = [int(base_size * s) for s in self.scales] candidate_sizes = [] for i, h in enumerate(crop_sizes): for j, w in enumerate(crop_sizes): if abs(i - j) <= self.max_wh_scale_gap: candidate_sizes.append([w, h]) crop_size = random.choice(candidate_sizes) for i in range(2): if abs(crop_size[i] - self.input_size[i]) < 3: crop_size[i] = self.input_size[i] crop_w, crop_h = crop_size if self.random_crop: x_offset = random.randint(0, img_w - crop_w) y_offset = random.randint(0, img_h - crop_h) else: w_step = (img_w - crop_w) // 4 h_step = (img_h - crop_h) // 4 candidate_offsets = [ (0, 0), # upper left (4 * w_step, 0), # upper right (0, 4 * h_step), # lower left (4 * w_step, 4 * h_step), # lower right (2 * w_step, 2 * h_step), # center ] if self.num_fixed_crops == 13: extra_candidate_offsets = [ (0, 2 * h_step), # center left (4 * w_step, 2 * h_step), # center right (2 * w_step, 4 * h_step), # lower center (2 * w_step, 0 * h_step), # upper center (1 * w_step, 1 * h_step), # upper left quarter (3 * w_step, 1 * h_step), # upper right quarter (1 * w_step, 3 * h_step), # lower left quarter (3 * w_step, 3 * h_step) # lower right quarter ] candidate_offsets.extend(extra_candidate_offsets) x_offset, y_offset = random.choice(candidate_offsets) new_h, new_w = crop_h, crop_w crop_bbox = np.array( [x_offset, y_offset, x_offset + new_w, y_offset + new_h]) results['crop_bbox'] = crop_bbox results['img_shape'] = (new_h, new_w) results['scales'] = self.scales if 'crop_quadruple' not in results: results['crop_quadruple'] = np.array( [0, 0, 1, 1], # x, y, w, h dtype=np.float32) x_ratio, y_ratio = x_offset / img_w, y_offset / img_h w_ratio, h_ratio = new_w / img_w, new_h / img_h old_crop_quadruple = results['crop_quadruple'] old_x_ratio, old_y_ratio = old_crop_quadruple[0], old_crop_quadruple[1] old_w_ratio, old_h_ratio = old_crop_quadruple[2], old_crop_quadruple[3] new_crop_quadruple = [ old_x_ratio + x_ratio * old_w_ratio, old_y_ratio + y_ratio * old_h_ratio, w_ratio * old_w_ratio, h_ratio * old_h_ratio ] results['crop_quadruple'] = np.array( new_crop_quadruple, dtype=np.float32) if not self.lazy: if 'keypoint' in results: results['keypoint'] = self._crop_kps(results['keypoint'], crop_bbox) if 'imgs' in results: results['imgs'] = self._crop_imgs(results['imgs'], crop_bbox) else: lazyop = results['lazy'] if lazyop['flip']: raise NotImplementedError('Put Flip at last for now') # record crop_bbox in lazyop dict to ensure only crop once in Fuse lazy_left, lazy_top, lazy_right, lazy_bottom = lazyop['crop_bbox'] left = x_offset * (lazy_right - lazy_left) / img_w right = (x_offset + new_w) * (lazy_right - lazy_left) / img_w top = y_offset * (lazy_bottom - lazy_top) / img_h bottom = (y_offset + new_h) * (lazy_bottom - lazy_top) / img_h lazyop['crop_bbox'] = np.array([(lazy_left + left), (lazy_top + top), (lazy_left + right), (lazy_top + bottom)], dtype=np.float32) if 'gt_bboxes' in results: assert not self.lazy results = self._all_box_crop(results, results['crop_bbox']) return results
Performs the MultiScaleCrop augmentation. Args: results (dict): The resulting dict to be modified and passed to the next transform in pipeline.
transform
python
open-mmlab/mmaction2
mmaction/datasets/transforms/processing.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/processing.py
Apache-2.0
def _box_resize(box, scale_factor): """Rescale the bounding boxes according to the scale_factor. Args: box (np.ndarray): The bounding boxes. scale_factor (np.ndarray): The scale factor used for rescaling. """ assert len(scale_factor) == 2 scale_factor = np.concatenate([scale_factor, scale_factor]) return box * scale_factor
Rescale the bounding boxes according to the scale_factor. Args: box (np.ndarray): The bounding boxes. scale_factor (np.ndarray): The scale factor used for rescaling.
_box_resize
python
open-mmlab/mmaction2
mmaction/datasets/transforms/processing.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/processing.py
Apache-2.0
def transform(self, results): """Performs the Resize augmentation. Args: results (dict): The resulting dict to be modified and passed to the next transform in pipeline. """ _init_lazy_if_proper(results, self.lazy) if 'keypoint' in results: assert not self.lazy, ('Keypoint Augmentations are not compatible ' 'with lazy == True') if 'scale_factor' not in results: results['scale_factor'] = np.array([1, 1], dtype=np.float32) img_h, img_w = results['img_shape'] if self.keep_ratio: new_w, new_h = mmcv.rescale_size((img_w, img_h), self.scale) else: new_w, new_h = self.scale self.scale_factor = np.array([new_w / img_w, new_h / img_h], dtype=np.float32) results['img_shape'] = (new_h, new_w) results['keep_ratio'] = self.keep_ratio results['scale_factor'] = results['scale_factor'] * self.scale_factor if not self.lazy: if 'imgs' in results: results['imgs'] = self._resize_imgs(results['imgs'], new_w, new_h) if 'keypoint' in results: results['keypoint'] = self._resize_kps(results['keypoint'], self.scale_factor) else: lazyop = results['lazy'] if lazyop['flip']: raise NotImplementedError('Put Flip at last for now') lazyop['interpolation'] = self.interpolation if 'gt_bboxes' in results: assert not self.lazy results['gt_bboxes'] = self._box_resize(results['gt_bboxes'], self.scale_factor) if 'proposals' in results and results['proposals'] is not None: assert results['proposals'].shape[1] == 4 results['proposals'] = self._box_resize( results['proposals'], self.scale_factor) return results
Performs the Resize augmentation. Args: results (dict): The resulting dict to be modified and passed to the next transform in pipeline.
transform
python
open-mmlab/mmaction2
mmaction/datasets/transforms/processing.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/processing.py
Apache-2.0
def transform(self, results): """Performs the Resize augmentation. Args: results (dict): The resulting dict to be modified and passed to the next transform in pipeline. """ short_edge = np.random.randint(self.scale_range[0], self.scale_range[1] + 1) resize = Resize((-1, short_edge), keep_ratio=True, interpolation=self.interpolation, lazy=False) results = resize(results) results['short_edge'] = short_edge return results
Performs the Resize augmentation. Args: results (dict): The resulting dict to be modified and passed to the next transform in pipeline.
transform
python
open-mmlab/mmaction2
mmaction/datasets/transforms/processing.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/processing.py
Apache-2.0
def _box_flip(box, img_width): """Flip the bounding boxes given the width of the image. Args: box (np.ndarray): The bounding boxes. img_width (int): The img width. """ box_ = box.copy() box_[..., 0::4] = img_width - box[..., 2::4] box_[..., 2::4] = img_width - box[..., 0::4] return box_
Flip the bounding boxes given the width of the image. Args: box (np.ndarray): The bounding boxes. img_width (int): The img width.
_box_flip
python
open-mmlab/mmaction2
mmaction/datasets/transforms/processing.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/processing.py
Apache-2.0
def transform(self, results): """Performs the Flip augmentation. Args: results (dict): The resulting dict to be modified and passed to the next transform in pipeline. """ _init_lazy_if_proper(results, self.lazy) if 'keypoint' in results: assert not self.lazy, ('Keypoint Augmentations are not compatible ' 'with lazy == True') assert self.direction == 'horizontal', ( 'Only horizontal flips are' 'supported for human keypoints') modality = results['modality'] if modality == 'Flow': assert self.direction == 'horizontal' flip = np.random.rand() < self.flip_ratio results['flip'] = flip results['flip_direction'] = self.direction img_width = results['img_shape'][1] if self.flip_label_map is not None and flip: results['label'] = self.flip_label_map.get(results['label'], results['label']) if not self.lazy: if flip: if 'imgs' in results: results['imgs'] = self._flip_imgs(results['imgs'], modality) if 'keypoint' in results: kp = results['keypoint'] kpscore = results.get('keypoint_score', None) kp, kpscore = self._flip_kps(kp, kpscore, img_width) results['keypoint'] = kp if 'keypoint_score' in results: results['keypoint_score'] = kpscore else: lazyop = results['lazy'] if lazyop['flip']: raise NotImplementedError('Use one Flip please') lazyop['flip'] = flip lazyop['flip_direction'] = self.direction if 'gt_bboxes' in results and flip: assert not self.lazy and self.direction == 'horizontal' width = results['img_shape'][1] results['gt_bboxes'] = self._box_flip(results['gt_bboxes'], width) if 'proposals' in results and results['proposals'] is not None: assert results['proposals'].shape[1] == 4 results['proposals'] = self._box_flip(results['proposals'], width) return results
Performs the Flip augmentation. Args: results (dict): The resulting dict to be modified and passed to the next transform in pipeline.
transform
python
open-mmlab/mmaction2
mmaction/datasets/transforms/processing.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/processing.py
Apache-2.0
def transform(self, results): """Perform ColorJitter. Args: results (dict): The resulting dict to be modified and passed to the next transform in pipeline. """ imgs = results['imgs'] num_clips, clip_len = 1, len(imgs) new_imgs = [] for i in range(num_clips): b = np.random.uniform( low=self.brightness[0], high=self.brightness[1]) c = np.random.uniform(low=self.contrast[0], high=self.contrast[1]) s = np.random.uniform( low=self.saturation[0], high=self.saturation[1]) h = np.random.uniform(low=self.hue[0], high=self.hue[1]) start, end = i * clip_len, (i + 1) * clip_len for img in imgs[start:end]: img = img.astype(np.float32) for fn_id in self.fn_idx: if fn_id == 0 and b != 1: img *= b if fn_id == 1 and c != 1: img = self.adjust_contrast(img, c) if fn_id == 2 and s != 1: img = self.adjust_saturation(img, s) if fn_id == 3 and h != 0: img = self.adjust_hue(img, h) img = np.clip(img, 0, 255).astype(np.uint8) new_imgs.append(img) results['imgs'] = new_imgs return results
Perform ColorJitter. Args: results (dict): The resulting dict to be modified and passed to the next transform in pipeline.
transform
python
open-mmlab/mmaction2
mmaction/datasets/transforms/processing.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/processing.py
Apache-2.0
def transform(self, results): """Performs the CenterCrop augmentation. Args: results (dict): The resulting dict to be modified and passed to the next transform in pipeline. """ _init_lazy_if_proper(results, self.lazy) if 'keypoint' in results: assert not self.lazy, ('Keypoint Augmentations are not compatible ' 'with lazy == True') img_h, img_w = results['img_shape'] crop_w, crop_h = self.crop_size left = (img_w - crop_w) // 2 top = (img_h - crop_h) // 2 right = left + crop_w bottom = top + crop_h new_h, new_w = bottom - top, right - left crop_bbox = np.array([left, top, right, bottom]) results['crop_bbox'] = crop_bbox results['img_shape'] = (new_h, new_w) if 'crop_quadruple' not in results: results['crop_quadruple'] = np.array( [0, 0, 1, 1], # x, y, w, h dtype=np.float32) x_ratio, y_ratio = left / img_w, top / img_h w_ratio, h_ratio = new_w / img_w, new_h / img_h old_crop_quadruple = results['crop_quadruple'] old_x_ratio, old_y_ratio = old_crop_quadruple[0], old_crop_quadruple[1] old_w_ratio, old_h_ratio = old_crop_quadruple[2], old_crop_quadruple[3] new_crop_quadruple = [ old_x_ratio + x_ratio * old_w_ratio, old_y_ratio + y_ratio * old_h_ratio, w_ratio * old_w_ratio, h_ratio * old_h_ratio ] results['crop_quadruple'] = np.array( new_crop_quadruple, dtype=np.float32) if not self.lazy: if 'keypoint' in results: results['keypoint'] = self._crop_kps(results['keypoint'], crop_bbox) if 'imgs' in results: results['imgs'] = self._crop_imgs(results['imgs'], crop_bbox) else: lazyop = results['lazy'] if lazyop['flip']: raise NotImplementedError('Put Flip at last for now') # record crop_bbox in lazyop dict to ensure only crop once in Fuse lazy_left, lazy_top, lazy_right, lazy_bottom = lazyop['crop_bbox'] left = left * (lazy_right - lazy_left) / img_w right = right * (lazy_right - lazy_left) / img_w top = top * (lazy_bottom - lazy_top) / img_h bottom = bottom * (lazy_bottom - lazy_top) / img_h lazyop['crop_bbox'] = np.array([(lazy_left + left), (lazy_top + top), (lazy_left + right), (lazy_top + bottom)], dtype=np.float32) if 'gt_bboxes' in results: assert not self.lazy results = self._all_box_crop(results, results['crop_bbox']) return results
Performs the CenterCrop augmentation. Args: results (dict): The resulting dict to be modified and passed to the next transform in pipeline.
transform
python
open-mmlab/mmaction2
mmaction/datasets/transforms/processing.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/processing.py
Apache-2.0
def transform(self, results): """Performs the ThreeCrop augmentation. Args: results (dict): The resulting dict to be modified and passed to the next transform in pipeline. """ _init_lazy_if_proper(results, False) if 'gt_bboxes' in results or 'proposals' in results: warnings.warn('ThreeCrop cannot process bounding boxes') imgs = results['imgs'] img_h, img_w = results['imgs'][0].shape[:2] crop_w, crop_h = self.crop_size assert crop_h == img_h or crop_w == img_w if crop_h == img_h: w_step = (img_w - crop_w) // 2 offsets = [ (0, 0), # left (2 * w_step, 0), # right (w_step, 0), # middle ] elif crop_w == img_w: h_step = (img_h - crop_h) // 2 offsets = [ (0, 0), # top (0, 2 * h_step), # down (0, h_step), # middle ] cropped = [] crop_bboxes = [] for x_offset, y_offset in offsets: bbox = [x_offset, y_offset, x_offset + crop_w, y_offset + crop_h] crop = [ img[y_offset:y_offset + crop_h, x_offset:x_offset + crop_w] for img in imgs ] cropped.extend(crop) crop_bboxes.extend([bbox for _ in range(len(imgs))]) crop_bboxes = np.array(crop_bboxes) results['imgs'] = cropped results['crop_bbox'] = crop_bboxes results['img_shape'] = results['imgs'][0].shape[:2] return results
Performs the ThreeCrop augmentation. Args: results (dict): The resulting dict to be modified and passed to the next transform in pipeline.
transform
python
open-mmlab/mmaction2
mmaction/datasets/transforms/processing.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/processing.py
Apache-2.0
def transform(self, results): """Performs the TenCrop augmentation. Args: results (dict): The resulting dict to be modified and passed to the next transform in pipeline. """ _init_lazy_if_proper(results, False) if 'gt_bboxes' in results or 'proposals' in results: warnings.warn('TenCrop cannot process bounding boxes') imgs = results['imgs'] img_h, img_w = results['imgs'][0].shape[:2] crop_w, crop_h = self.crop_size w_step = (img_w - crop_w) // 4 h_step = (img_h - crop_h) // 4 offsets = [ (0, 0), # upper left (4 * w_step, 0), # upper right (0, 4 * h_step), # lower left (4 * w_step, 4 * h_step), # lower right (2 * w_step, 2 * h_step), # center ] img_crops = list() crop_bboxes = list() for x_offset, y_offsets in offsets: crop = [ img[y_offsets:y_offsets + crop_h, x_offset:x_offset + crop_w] for img in imgs ] flip_crop = [np.flip(c, axis=1).copy() for c in crop] bbox = [x_offset, y_offsets, x_offset + crop_w, y_offsets + crop_h] img_crops.extend(crop) img_crops.extend(flip_crop) crop_bboxes.extend([bbox for _ in range(len(imgs) * 2)]) crop_bboxes = np.array(crop_bboxes) results['imgs'] = img_crops results['crop_bbox'] = crop_bboxes results['img_shape'] = results['imgs'][0].shape[:2] return results
Performs the TenCrop augmentation. Args: results (dict): The resulting dict to be modified and passed to the next transform in pipeline.
transform
python
open-mmlab/mmaction2
mmaction/datasets/transforms/processing.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/processing.py
Apache-2.0
def _img_fill_pixels(self, img, top, left, h, w): """Fill pixels to the patch of image.""" if self.mode == 'const': patch = np.empty((h, w, 3), dtype=np.uint8) patch[:, :] = np.array(self.fill_color, dtype=np.uint8) elif self.fill_std is None: # Uniform distribution patch = np.random.uniform(0, 256, (h, w, 3)).astype(np.uint8) else: # Normal distribution patch = np.random.normal(self.fill_color, self.fill_std, (h, w, 3)) patch = np.clip(patch.astype(np.int32), 0, 255).astype(np.uint8) img[top:top + h, left:left + w] = patch return img
Fill pixels to the patch of image.
_img_fill_pixels
python
open-mmlab/mmaction2
mmaction/datasets/transforms/processing.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/processing.py
Apache-2.0
def transform(self, results): """ Args: results (dict): Results dict from pipeline Returns: dict: Results after the transformation. """ if self.random_disable(): return results imgs = results['imgs'] img_h, img_w = imgs[0].shape[:2] imgs = self._fill_pixels(imgs, *self.random_patch(img_h, img_w)) results['imgs'] = imgs return results
Args: results (dict): Results dict from pipeline Returns: dict: Results after the transformation.
transform
python
open-mmlab/mmaction2
mmaction/datasets/transforms/processing.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/processing.py
Apache-2.0
def transform(self, results: Dict) -> Dict: """The transform function of :class:`CLIPTokenize`. Args: results (dict): The result dict. Returns: dict: The result dict. """ try: import clip except ImportError: raise ImportError('Please run `pip install ' 'git+https://github.com/openai/CLIP.git` ' 'to install clip first. ') text = results['text'] text_tokenized = clip.tokenize(text)[0] results['text'] = text_tokenized return results
The transform function of :class:`CLIPTokenize`. Args: results (dict): The result dict. Returns: dict: The result dict.
transform
python
open-mmlab/mmaction2
mmaction/datasets/transforms/text_transforms.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/text_transforms.py
Apache-2.0
def transform(self, results): """Perform Torchvision augmentations. Args: results (dict): The resulting dict to be modified and passed to the next transform in pipeline. """ assert 'imgs' in results imgs = [x.transpose(2, 0, 1) for x in results['imgs']] imgs = to_tensor(np.stack(imgs)) imgs = self.trans(imgs).data.numpy() imgs[imgs > 255] = 255 imgs[imgs < 0] = 0 imgs = imgs.astype(np.uint8) imgs = [x.transpose(1, 2, 0) for x in imgs] results['imgs'] = imgs return results
Perform Torchvision augmentations. Args: results (dict): The resulting dict to be modified and passed to the next transform in pipeline.
transform
python
open-mmlab/mmaction2
mmaction/datasets/transforms/wrappers.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/wrappers.py
Apache-2.0
def transform(self, results): """Perform PytorchVideoTrans augmentations. Args: results (dict): The resulting dict to be modified and passed to the next transform in pipeline. """ assert 'imgs' in results assert 'gt_bboxes' not in results,\ f'PytorchVideo {self.op} doesn\'t support bboxes yet.' assert 'proposals' not in results,\ f'PytorchVideo {self.op} doesn\'t support bboxes yet.' if self.op in ('AugMix', 'RandAugment'): # list[ndarray(h, w, 3)] -> torch.tensor(t, c, h, w) imgs = [x.transpose(2, 0, 1) for x in results['imgs']] imgs = to_tensor(np.stack(imgs)) else: # list[ndarray(h, w, 3)] -> torch.tensor(c, t, h, w) # uint8 -> float32 imgs = to_tensor((np.stack(results['imgs']).transpose(3, 0, 1, 2) / 255.).astype(np.float32)) imgs = self.trans(imgs).data.numpy() if self.op in ('AugMix', 'RandAugment'): imgs[imgs > 255] = 255 imgs[imgs < 0] = 0 imgs = imgs.astype(np.uint8) # torch.tensor(t, c, h, w) -> list[ndarray(h, w, 3)] imgs = [x.transpose(1, 2, 0) for x in imgs] else: # float32 -> uint8 imgs = imgs * 255 imgs[imgs > 255] = 255 imgs[imgs < 0] = 0 imgs = imgs.astype(np.uint8) # torch.tensor(c, t, h, w) -> list[ndarray(h, w, 3)] imgs = [x for x in imgs.transpose(1, 2, 3, 0)] results['imgs'] = imgs return results
Perform PytorchVideoTrans augmentations. Args: results (dict): The resulting dict to be modified and passed to the next transform in pipeline.
transform
python
open-mmlab/mmaction2
mmaction/datasets/transforms/wrappers.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/wrappers.py
Apache-2.0
def default_transforms(): """Default transforms for imgaug. Implement RandAugment by imgaug. Please visit `https://arxiv.org/abs/1909.13719` for more information. Augmenters and hyper parameters are borrowed from the following repo: https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/autoaugment.py # noqa Miss one augmenter ``SolarizeAdd`` since imgaug doesn't support this. Returns: dict: The constructed RandAugment transforms. """ # RandAugment hyper params num_augmenters = 2 cur_magnitude, max_magnitude = 9, 10 cur_level = 1.0 * cur_magnitude / max_magnitude return [ dict( type='SomeOf', n=num_augmenters, children=[ dict( type='ShearX', shear=17.19 * cur_level * random.choice([-1, 1])), dict( type='ShearY', shear=17.19 * cur_level * random.choice([-1, 1])), dict( type='TranslateX', percent=.2 * cur_level * random.choice([-1, 1])), dict( type='TranslateY', percent=.2 * cur_level * random.choice([-1, 1])), dict( type='Rotate', rotate=30 * cur_level * random.choice([-1, 1])), dict(type='Posterize', nb_bits=max(1, int(4 * cur_level))), dict(type='Solarize', threshold=256 * cur_level), dict(type='EnhanceColor', factor=1.8 * cur_level + .1), dict(type='EnhanceContrast', factor=1.8 * cur_level + .1), dict( type='EnhanceBrightness', factor=1.8 * cur_level + .1), dict(type='EnhanceSharpness', factor=1.8 * cur_level + .1), dict(type='Autocontrast', cutoff=0), dict(type='Equalize'), dict(type='Invert', p=1.), dict( type='Cutout', nb_iterations=1, size=0.2 * cur_level, squared=True) ]) ]
Default transforms for imgaug. Implement RandAugment by imgaug. Please visit `https://arxiv.org/abs/1909.13719` for more information. Augmenters and hyper parameters are borrowed from the following repo: https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/autoaugment.py # noqa Miss one augmenter ``SolarizeAdd`` since imgaug doesn't support this. Returns: dict: The constructed RandAugment transforms.
default_transforms
python
open-mmlab/mmaction2
mmaction/datasets/transforms/wrappers.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/wrappers.py
Apache-2.0
def imgaug_builder(self, cfg): """Import a module from imgaug. It follows the logic of :func:`build_from_cfg`. Use a dict object to create an iaa.Augmenter object. Args: cfg (dict): Config dict. It should at least contain the key "type". Returns: obj:`iaa.Augmenter`: The constructed imgaug augmenter. """ import imgaug.augmenters as iaa assert isinstance(cfg, dict) and 'type' in cfg args = cfg.copy() obj_type = args.pop('type') if mmengine.is_str(obj_type): obj_cls = getattr(iaa, obj_type) if hasattr(iaa, obj_type) \ else getattr(iaa.pillike, obj_type) elif issubclass(obj_type, iaa.Augmenter): obj_cls = obj_type else: raise TypeError( f'type must be a str or valid type, but got {type(obj_type)}') for aug_list_key in ['children', 'then_list', 'else_list']: if aug_list_key in args: args[aug_list_key] = [ self.imgaug_builder(child) for child in args[aug_list_key] ] return obj_cls(**args)
Import a module from imgaug. It follows the logic of :func:`build_from_cfg`. Use a dict object to create an iaa.Augmenter object. Args: cfg (dict): Config dict. It should at least contain the key "type". Returns: obj:`iaa.Augmenter`: The constructed imgaug augmenter.
imgaug_builder
python
open-mmlab/mmaction2
mmaction/datasets/transforms/wrappers.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/wrappers.py
Apache-2.0
def transform(self, results): """Perform Imgaug augmentations. Args: results (dict): The resulting dict to be modified and passed to the next transform in pipeline. """ assert results['modality'] == 'RGB', 'Imgaug only support RGB images.' in_type = results['imgs'][0].dtype cur_aug = self.aug.to_deterministic() results['imgs'] = [ cur_aug.augment_image(frame) for frame in results['imgs'] ] img_h, img_w, _ = results['imgs'][0].shape out_type = results['imgs'][0].dtype assert in_type == out_type, \ ('Imgaug input dtype and output dtype are not the same. ', f'Convert from {in_type} to {out_type}') if 'gt_bboxes' in results: from imgaug.augmentables import bbs bbox_list = [ bbs.BoundingBox( x1=bbox[0], y1=bbox[1], x2=bbox[2], y2=bbox[3]) for bbox in results['gt_bboxes'] ] bboxes = bbs.BoundingBoxesOnImage( bbox_list, shape=results['img_shape']) bbox_aug, *_ = cur_aug.augment_bounding_boxes([bboxes]) results['gt_bboxes'] = [[ max(bbox.x1, 0), max(bbox.y1, 0), min(bbox.x2, img_w), min(bbox.y2, img_h) ] for bbox in bbox_aug.items] if 'proposals' in results: bbox_list = [ bbs.BoundingBox( x1=bbox[0], y1=bbox[1], x2=bbox[2], y2=bbox[3]) for bbox in results['proposals'] ] bboxes = bbs.BoundingBoxesOnImage( bbox_list, shape=results['img_shape']) bbox_aug, *_ = cur_aug.augment_bounding_boxes([bboxes]) results['proposals'] = [[ max(bbox.x1, 0), max(bbox.y1, 0), min(bbox.x2, img_w), min(bbox.y2, img_h) ] for bbox in bbox_aug.items] results['img_shape'] = (img_h, img_w) return results
Perform Imgaug augmentations. Args: results (dict): The resulting dict to be modified and passed to the next transform in pipeline.
transform
python
open-mmlab/mmaction2
mmaction/datasets/transforms/wrappers.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/wrappers.py
Apache-2.0
def _draw_samples(self, batch_idx: int, data_batch: dict, data_samples: Sequence[ActionDataSample], step: int = 0) -> None: """Visualize every ``self.interval`` samples from a data batch. Args: batch_idx (int): The index of the current batch in the val loop. data_batch (dict): Data from dataloader. outputs (Sequence[:obj:`ActionDataSample`]): Outputs from model. step (int): Global step value to record. Defaults to 0. """ if self.enable is False: return batch_size = len(data_samples) videos = data_batch['inputs'] start_idx = batch_size * batch_idx end_idx = start_idx + batch_size # The first index divisible by the interval, after the start index first_sample_id = math.ceil(start_idx / self.interval) * self.interval for sample_id in range(first_sample_id, end_idx, self.interval): video = videos[sample_id - start_idx] # move channel to the last video = video.permute(1, 2, 3, 0).numpy().astype('uint8') data_sample = data_samples[sample_id - start_idx] if 'filename' in data_sample: # osp.basename works on different platforms even file clients. sample_name = osp.basename(data_sample.get('filename')) elif 'frame_dir' in data_sample: sample_name = osp.basename(data_sample.get('frame_dir')) else: sample_name = str(sample_id) draw_args = self.draw_args if self.out_dir is not None: draw_args['out_path'] = self.file_client.join_path( self.out_dir, f'{sample_name}_{step}') self._visualizer.add_datasample( sample_name, video=video, data_sample=data_sample, step=step, **self.draw_args, )
Visualize every ``self.interval`` samples from a data batch. Args: batch_idx (int): The index of the current batch in the val loop. data_batch (dict): Data from dataloader. outputs (Sequence[:obj:`ActionDataSample`]): Outputs from model. step (int): Global step value to record. Defaults to 0.
_draw_samples
python
open-mmlab/mmaction2
mmaction/engine/hooks/visualization_hook.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/engine/hooks/visualization_hook.py
Apache-2.0
def after_val_iter(self, runner: Runner, batch_idx: int, data_batch: dict, outputs: Sequence[ActionDataSample]) -> None: """Visualize every ``self.interval`` samples during validation. Args: runner (:obj:`Runner`): The runner of the validation process. batch_idx (int): The index of the current batch in the val loop. data_batch (dict): Data from dataloader. outputs (Sequence[:obj:`ActionDataSample`]): Outputs from model. """ if isinstance(runner.train_loop, EpochBasedTrainLoop): step = runner.epoch else: step = runner.iter self._draw_samples(batch_idx, data_batch, outputs, step=step)
Visualize every ``self.interval`` samples during validation. Args: runner (:obj:`Runner`): The runner of the validation process. batch_idx (int): The index of the current batch in the val loop. data_batch (dict): Data from dataloader. outputs (Sequence[:obj:`ActionDataSample`]): Outputs from model.
after_val_iter
python
open-mmlab/mmaction2
mmaction/engine/hooks/visualization_hook.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/engine/hooks/visualization_hook.py
Apache-2.0
def after_test_iter(self, runner: Runner, batch_idx: int, data_batch: dict, outputs: Sequence[ActionDataSample]) -> None: """Visualize every ``self.interval`` samples during test. Args: runner (:obj:`Runner`): The runner of the testing process. batch_idx (int): The index of the current batch in the test loop. data_batch (dict): Data from dataloader. outputs (Sequence[:obj:`DetDataSample`]): Outputs from model. """ self._draw_samples(batch_idx, data_batch, outputs, step=0)
Visualize every ``self.interval`` samples during test. Args: runner (:obj:`Runner`): The runner of the testing process. batch_idx (int): The index of the current batch in the test loop. data_batch (dict): Data from dataloader. outputs (Sequence[:obj:`DetDataSample`]): Outputs from model.
after_test_iter
python
open-mmlab/mmaction2
mmaction/engine/hooks/visualization_hook.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/engine/hooks/visualization_hook.py
Apache-2.0
def conv_branch_init(conv: nn.Module, branches: int) -> None: """Perform initialization for a conv branch. Args: conv (nn.Module): The conv module of a branch. branches (int): The number of branches. """ weight = conv.weight n = weight.size(0) k1 = weight.size(1) k2 = weight.size(2) nn.init.normal_(weight, 0, math.sqrt(2. / (n * k1 * k2 * branches))) nn.init.constant_(conv.bias, 0)
Perform initialization for a conv branch. Args: conv (nn.Module): The conv module of a branch. branches (int): The number of branches.
conv_branch_init
python
open-mmlab/mmaction2
mmaction/engine/model/weight_init.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/engine/model/weight_init.py
Apache-2.0
def get_layer_id_for_vit(var_name: str, max_layer_id: int) -> int: """Get the layer id to set the different learning rates for ViT. Args: var_name (str): The key of the model. num_max_layer (int): Maximum number of backbone layers. Returns: int: Returns the layer id of the key. """ if var_name in ('backbone.cls_token', 'backbone.mask_token', 'backbone.pos_embed'): return 0 elif var_name.startswith('backbone.patch_embed'): return 0 elif var_name.startswith('backbone.blocks'): layer_id = int(var_name.split('.')[2]) return layer_id + 1 else: return max_layer_id + 1
Get the layer id to set the different learning rates for ViT. Args: var_name (str): The key of the model. num_max_layer (int): Maximum number of backbone layers. Returns: int: Returns the layer id of the key.
get_layer_id_for_vit
python
open-mmlab/mmaction2
mmaction/engine/optimizers/layer_decay_optim_wrapper_constructor.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/engine/optimizers/layer_decay_optim_wrapper_constructor.py
Apache-2.0
def get_layer_id_for_mvit(var_name, max_layer_id): """Get the layer id to set the different learning rates in ``layer_wise`` decay_type. Args: var_name (str): The key of the model. max_layer_id (int): Maximum layer id. Returns: int: The id number corresponding to different learning rate in ``LearningRateDecayOptimizerConstructor``. """ if var_name in ('backbone.cls_token', 'backbone.mask_token', 'backbone.pos_embed'): return 0 elif var_name.startswith('backbone.patch_embed'): return 0 elif var_name.startswith('backbone.blocks'): layer_id = int(var_name.split('.')[2]) + 1 return layer_id else: return max_layer_id + 1
Get the layer id to set the different learning rates in ``layer_wise`` decay_type. Args: var_name (str): The key of the model. max_layer_id (int): Maximum layer id. Returns: int: The id number corresponding to different learning rate in ``LearningRateDecayOptimizerConstructor``.
get_layer_id_for_mvit
python
open-mmlab/mmaction2
mmaction/engine/optimizers/layer_decay_optim_wrapper_constructor.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/engine/optimizers/layer_decay_optim_wrapper_constructor.py
Apache-2.0
def add_params(self, params: List[dict], module: nn.Module, **kwargs) -> None: """Add all parameters of module to the params list. The parameters of the given module will be added to the list of param groups, with specific rules defined by paramwise_cfg. Args: params (list[dict]): A list of param groups, it will be modified in place. module (nn.Module): The module to be added. """ logger = MMLogger.get_current_instance() parameter_groups = {} logger.info(f'self.paramwise_cfg is {self.paramwise_cfg}') num_layers = self.paramwise_cfg.get('num_layers') decay_rate = self.paramwise_cfg.get('decay_rate') decay_type = self.paramwise_cfg.get('decay_type', 'layer_wise') logger.info('Build LearningRateDecayOptimizerConstructor ' f'{decay_type} {decay_rate} - {num_layers}') weight_decay = self.base_wd for m in module.modules(): assert not isinstance(m, nn.modules.batchnorm._NormBase ), 'BN is not supported with layer decay' for name, param in module.named_parameters(): if not param.requires_grad: continue # frozen weights if len(param.shape) == 1 or name.endswith('.bias'): group_name = 'no_decay' this_weight_decay = 0. else: group_name = 'decay' this_weight_decay = weight_decay if 'layer_wise' in decay_type: if 'MViT' in module.backbone.__class__.__name__: layer_id = get_layer_id_for_mvit( name, self.paramwise_cfg.get('num_layers')) logger.info(f'set param {name} as id {layer_id}') elif 'VisionTransformer' in module.backbone.__class__.__name__: layer_id = get_layer_id_for_vit(name, num_layers) logger.info(f'set param {name} as id {layer_id}') else: raise NotImplementedError() else: raise NotImplementedError(f'Only support layer wise decay,' f'but got {decay_type}') group_name = f'layer_{layer_id}_{group_name}' if group_name not in parameter_groups: scale = decay_rate**(num_layers - layer_id + 1) parameter_groups[group_name] = { 'weight_decay': this_weight_decay, 'params': [], 'param_names': [], 'lr_scale': scale, 'group_name': group_name, 'lr': scale * self.base_lr, } parameter_groups[group_name]['params'].append(param) parameter_groups[group_name]['param_names'].append(name) rank, _ = get_dist_info() if rank == 0: to_display = {} for key in parameter_groups: to_display[key] = { 'param_names': parameter_groups[key]['param_names'], 'lr_scale': parameter_groups[key]['lr_scale'], 'lr': parameter_groups[key]['lr'], 'weight_decay': parameter_groups[key]['weight_decay'], } logger.info(f'Param groups = {json.dumps(to_display, indent=2)}') params.extend(parameter_groups.values())
Add all parameters of module to the params list. The parameters of the given module will be added to the list of param groups, with specific rules defined by paramwise_cfg. Args: params (list[dict]): A list of param groups, it will be modified in place. module (nn.Module): The module to be added.
add_params
python
open-mmlab/mmaction2
mmaction/engine/optimizers/layer_decay_optim_wrapper_constructor.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/engine/optimizers/layer_decay_optim_wrapper_constructor.py
Apache-2.0
def add_params(self, params: List[dict], module: nn.Module, prefix: str = 'base', **kwargs) -> None: """Add all parameters of module to the params list. The parameters of the given module will be added to the list of param groups, with specific rules defined by paramwise_cfg. Args: params (list[dict]): A list of param groups, it will be modified in place. module (nn.Module): The module to be added. prefix (str): The prefix of the module. Defaults to ``'base'``. """ for name, param in module.named_parameters(recurse=False): param_group = {'params': [param]} if not param.requires_grad: params.append(param_group) continue param_group['lr'] = self.base_lr if self.base_wd is not None: param_group['weight_decay'] = self.base_wd processing_keys = [ key for key in self.paramwise_cfg if key in f'{prefix}.{name}' ] if processing_keys: param_group['lr'] *= \ reduce(mul, [self.paramwise_cfg[key].get('lr_mult', 1.) for key in processing_keys]) if self.base_wd is not None: param_group['weight_decay'] *= \ reduce(mul, [self.paramwise_cfg[key]. get('decay_mult', 1.) for key in processing_keys]) params.append(param_group) for key, value in param_group.items(): if key == 'params': continue full_name = f'{prefix}.{name}' if prefix else name print_log( f'paramwise_options -- ' f'{full_name}: {key} = {round(value, 8)}', logger='current') for child_name, child_mod in module.named_children(): child_prefix = f'{prefix}.{child_name}' if prefix else child_name self.add_params(params, child_mod, prefix=child_prefix)
Add all parameters of module to the params list. The parameters of the given module will be added to the list of param groups, with specific rules defined by paramwise_cfg. Args: params (list[dict]): A list of param groups, it will be modified in place. module (nn.Module): The module to be added. prefix (str): The prefix of the module. Defaults to ``'base'``.
add_params
python
open-mmlab/mmaction2
mmaction/engine/optimizers/swin_optim_wrapper_constructor.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/engine/optimizers/swin_optim_wrapper_constructor.py
Apache-2.0
def add_params(self, params, model, **kwargs): """Add parameters and their corresponding lr and wd to the params. Args: params (list): The list to be modified, containing all parameter groups and their corresponding lr and wd configurations. model (nn.Module): The model to be trained with the optimizer. """ # use fc_lr5 to determine whether to specify higher multi-factor # for fc layer weights and bias. fc_lr5 = self.paramwise_cfg['fc_lr5'] first_conv_weight = [] first_conv_bias = [] normal_weight = [] normal_bias = [] lr5_weight = [] lr10_bias = [] bn = [] conv_cnt = 0 for m in model.modules(): if isinstance(m, _ConvNd): m_params = list(m.parameters()) conv_cnt += 1 if conv_cnt == 1: first_conv_weight.append(m_params[0]) if len(m_params) == 2: first_conv_bias.append(m_params[1]) else: normal_weight.append(m_params[0]) if len(m_params) == 2: normal_bias.append(m_params[1]) elif isinstance(m, torch.nn.Linear): m_params = list(m.parameters()) normal_weight.append(m_params[0]) if len(m_params) == 2: normal_bias.append(m_params[1]) elif isinstance(m, (_BatchNorm, SyncBatchNorm_, torch.nn.GroupNorm)): for param in list(m.parameters()): if param.requires_grad: bn.append(param) elif len(m._modules) == 0: if len(list(m.parameters())) > 0: raise ValueError(f'New atomic module type: {type(m)}. ' 'Need to give it a learning policy') # pop the cls_head fc layer params last_fc_weight = normal_weight.pop() last_fc_bias = normal_bias.pop() if fc_lr5: lr5_weight.append(last_fc_weight) lr10_bias.append(last_fc_bias) else: normal_weight.append(last_fc_weight) normal_bias.append(last_fc_bias) params.append({ 'params': first_conv_weight, 'lr': self.base_lr, 'weight_decay': self.base_wd }) params.append({ 'params': first_conv_bias, 'lr': self.base_lr * 2, 'weight_decay': 0 }) params.append({ 'params': normal_weight, 'lr': self.base_lr, 'weight_decay': self.base_wd }) params.append({ 'params': normal_bias, 'lr': self.base_lr * 2, 'weight_decay': 0 }) params.append({'params': bn, 'lr': self.base_lr, 'weight_decay': 0}) params.append({ 'params': lr5_weight, 'lr': self.base_lr * 5, 'weight_decay': self.base_wd }) params.append({ 'params': lr10_bias, 'lr': self.base_lr * 10, 'weight_decay': 0 })
Add parameters and their corresponding lr and wd to the params. Args: params (list): The list to be modified, containing all parameter groups and their corresponding lr and wd configurations. model (nn.Module): The model to be trained with the optimizer.
add_params
python
open-mmlab/mmaction2
mmaction/engine/optimizers/tsm_optim_wrapper_constructor.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/engine/optimizers/tsm_optim_wrapper_constructor.py
Apache-2.0
def confusion_matrix(y_pred, y_real, normalize=None): """Compute confusion matrix. Args: y_pred (list[int] | np.ndarray[int]): Prediction labels. y_real (list[int] | np.ndarray[int]): Ground truth labels. normalize (str | None): Normalizes confusion matrix over the true (rows), predicted (columns) conditions or all the population. If None, confusion matrix will not be normalized. Options are "true", "pred", "all", None. Default: None. Returns: np.ndarray: Confusion matrix. """ if normalize not in ['true', 'pred', 'all', None]: raise ValueError("normalize must be one of {'true', 'pred', " "'all', None}") if isinstance(y_pred, list): y_pred = np.array(y_pred) if y_pred.dtype == np.int32: y_pred = y_pred.astype(np.int64) if not isinstance(y_pred, np.ndarray): raise TypeError( f'y_pred must be list or np.ndarray, but got {type(y_pred)}') if not y_pred.dtype == np.int64: raise TypeError( f'y_pred dtype must be np.int64, but got {y_pred.dtype}') if isinstance(y_real, list): y_real = np.array(y_real) if y_real.dtype == np.int32: y_real = y_real.astype(np.int64) if not isinstance(y_real, np.ndarray): raise TypeError( f'y_real must be list or np.ndarray, but got {type(y_real)}') if not y_real.dtype == np.int64: raise TypeError( f'y_real dtype must be np.int64, but got {y_real.dtype}') label_set = np.unique(np.concatenate((y_pred, y_real))) num_labels = len(label_set) max_label = label_set[-1] label_map = np.zeros(max_label + 1, dtype=np.int64) for i, label in enumerate(label_set): label_map[label] = i y_pred_mapped = label_map[y_pred] y_real_mapped = label_map[y_real] confusion_mat = np.bincount( num_labels * y_real_mapped + y_pred_mapped, minlength=num_labels**2).reshape(num_labels, num_labels) with np.errstate(all='ignore'): if normalize == 'true': confusion_mat = ( confusion_mat / confusion_mat.sum(axis=1, keepdims=True)) elif normalize == 'pred': confusion_mat = ( confusion_mat / confusion_mat.sum(axis=0, keepdims=True)) elif normalize == 'all': confusion_mat = (confusion_mat / confusion_mat.sum()) confusion_mat = np.nan_to_num(confusion_mat) return confusion_mat
Compute confusion matrix. Args: y_pred (list[int] | np.ndarray[int]): Prediction labels. y_real (list[int] | np.ndarray[int]): Ground truth labels. normalize (str | None): Normalizes confusion matrix over the true (rows), predicted (columns) conditions or all the population. If None, confusion matrix will not be normalized. Options are "true", "pred", "all", None. Default: None. Returns: np.ndarray: Confusion matrix.
confusion_matrix
python
open-mmlab/mmaction2
mmaction/evaluation/functional/accuracy.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/functional/accuracy.py
Apache-2.0
def mean_class_accuracy(scores, labels): """Calculate mean class accuracy. Args: scores (list[np.ndarray]): Prediction scores for each class. labels (list[int]): Ground truth labels. Returns: np.ndarray: Mean class accuracy. """ pred = np.argmax(scores, axis=1) cf_mat = confusion_matrix(pred, labels).astype(float) cls_cnt = cf_mat.sum(axis=1) cls_hit = np.diag(cf_mat) mean_class_acc = np.mean( [hit / cnt if cnt else 0.0 for cnt, hit in zip(cls_cnt, cls_hit)]) return mean_class_acc
Calculate mean class accuracy. Args: scores (list[np.ndarray]): Prediction scores for each class. labels (list[int]): Ground truth labels. Returns: np.ndarray: Mean class accuracy.
mean_class_accuracy
python
open-mmlab/mmaction2
mmaction/evaluation/functional/accuracy.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/functional/accuracy.py
Apache-2.0
def top_k_classes(scores, labels, k=10, mode='accurate'): """Calculate the most K accurate (inaccurate) classes. Given the prediction scores, ground truth label and top-k value, compute the top K accurate (inaccurate) classes. Args: scores (list[np.ndarray]): Prediction scores for each class. labels (list[int] | np.ndarray): Ground truth labels. k (int): Top-k values. Default: 10. mode (str): Comparison mode for Top-k. Options are 'accurate' and 'inaccurate'. Default: 'accurate'. Return: list: List of sorted (from high accuracy to low accuracy for 'accurate' mode, and from low accuracy to high accuracy for inaccurate mode) top K classes in format of (label_id, acc_ratio). """ assert mode in ['accurate', 'inaccurate'] pred = np.argmax(scores, axis=1) cf_mat = confusion_matrix(pred, labels).astype(float) cls_cnt = cf_mat.sum(axis=1) cls_hit = np.diag(cf_mat) hit_ratio = np.array( [hit / cnt if cnt else 0.0 for cnt, hit in zip(cls_cnt, cls_hit)]) if mode == 'accurate': max_index = np.argsort(hit_ratio)[-k:][::-1] max_value = hit_ratio[max_index] results = list(zip(max_index, max_value)) else: min_index = np.argsort(hit_ratio)[:k] min_value = hit_ratio[min_index] results = list(zip(min_index, min_value)) return results
Calculate the most K accurate (inaccurate) classes. Given the prediction scores, ground truth label and top-k value, compute the top K accurate (inaccurate) classes. Args: scores (list[np.ndarray]): Prediction scores for each class. labels (list[int] | np.ndarray): Ground truth labels. k (int): Top-k values. Default: 10. mode (str): Comparison mode for Top-k. Options are 'accurate' and 'inaccurate'. Default: 'accurate'. Return: list: List of sorted (from high accuracy to low accuracy for 'accurate' mode, and from low accuracy to high accuracy for inaccurate mode) top K classes in format of (label_id, acc_ratio).
top_k_classes
python
open-mmlab/mmaction2
mmaction/evaluation/functional/accuracy.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/functional/accuracy.py
Apache-2.0
def top_k_accuracy(scores, labels, topk=(1, )): """Calculate top k accuracy score. Args: scores (list[np.ndarray]): Prediction scores for each class. labels (list[int]): Ground truth labels. topk (tuple[int]): K value for top_k_accuracy. Default: (1, ). Returns: list[float]: Top k accuracy score for each k. """ res = [] labels = np.array(labels)[:, np.newaxis] for k in topk: max_k_preds = np.argsort(scores, axis=1)[:, -k:][:, ::-1] match_array = np.logical_or.reduce(max_k_preds == labels, axis=1) topk_acc_score = match_array.sum() / match_array.shape[0] res.append(topk_acc_score) return res
Calculate top k accuracy score. Args: scores (list[np.ndarray]): Prediction scores for each class. labels (list[int]): Ground truth labels. topk (tuple[int]): K value for top_k_accuracy. Default: (1, ). Returns: list[float]: Top k accuracy score for each k.
top_k_accuracy
python
open-mmlab/mmaction2
mmaction/evaluation/functional/accuracy.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/functional/accuracy.py
Apache-2.0
def mean_average_precision(scores, labels): """Mean average precision for multi-label recognition. Args: scores (list[np.ndarray]): Prediction scores of different classes for each sample. labels (list[np.ndarray]): Ground truth many-hot vector for each sample. Returns: np.float64: The mean average precision. """ results = [] scores = np.stack(scores).T labels = np.stack(labels).T for score, label in zip(scores, labels): precision, recall, _ = binary_precision_recall_curve(score, label) ap = -np.sum(np.diff(recall) * np.array(precision)[:-1]) results.append(ap) results = [x for x in results if not np.isnan(x)] if results == []: return np.nan return np.mean(results)
Mean average precision for multi-label recognition. Args: scores (list[np.ndarray]): Prediction scores of different classes for each sample. labels (list[np.ndarray]): Ground truth many-hot vector for each sample. Returns: np.float64: The mean average precision.
mean_average_precision
python
open-mmlab/mmaction2
mmaction/evaluation/functional/accuracy.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/functional/accuracy.py
Apache-2.0
def binary_precision_recall_curve(y_score, y_true): """Calculate the binary precision recall curve at step thresholds. Args: y_score (np.ndarray): Prediction scores for each class. Shape should be (num_classes, ). y_true (np.ndarray): Ground truth many-hot vector. Shape should be (num_classes, ). Returns: precision (np.ndarray): The precision of different thresholds. recall (np.ndarray): The recall of different thresholds. thresholds (np.ndarray): Different thresholds at which precision and recall are tested. """ assert isinstance(y_score, np.ndarray) assert isinstance(y_true, np.ndarray) assert y_score.shape == y_true.shape # make y_true a boolean vector y_true = (y_true == 1) # sort scores and corresponding truth values desc_score_indices = np.argsort(y_score, kind='mergesort')[::-1] y_score = y_score[desc_score_indices] y_true = y_true[desc_score_indices] # There may be ties in values, therefore find the `distinct_value_inds` distinct_value_inds = np.where(np.diff(y_score))[0] threshold_inds = np.r_[distinct_value_inds, y_true.size - 1] # accumulate the true positives with decreasing threshold tps = np.cumsum(y_true)[threshold_inds] fps = 1 + threshold_inds - tps thresholds = y_score[threshold_inds] precision = tps / (tps + fps) precision[np.isnan(precision)] = 0 recall = tps / tps[-1] # stop when full recall attained # and reverse the outputs so recall is decreasing last_ind = tps.searchsorted(tps[-1]) sl = slice(last_ind, None, -1) return np.r_[precision[sl], 1], np.r_[recall[sl], 0], thresholds[sl]
Calculate the binary precision recall curve at step thresholds. Args: y_score (np.ndarray): Prediction scores for each class. Shape should be (num_classes, ). y_true (np.ndarray): Ground truth many-hot vector. Shape should be (num_classes, ). Returns: precision (np.ndarray): The precision of different thresholds. recall (np.ndarray): The recall of different thresholds. thresholds (np.ndarray): Different thresholds at which precision and recall are tested.
binary_precision_recall_curve
python
open-mmlab/mmaction2
mmaction/evaluation/functional/accuracy.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/functional/accuracy.py
Apache-2.0
def pairwise_temporal_iou(candidate_segments, target_segments, calculate_overlap_self=False): """Compute intersection over union between segments. Args: candidate_segments (np.ndarray): 1-dim/2-dim array in format ``[init, end]/[m x 2:=[init, end]]``. target_segments (np.ndarray): 2-dim array in format ``[n x 2:=[init, end]]``. calculate_overlap_self (bool): Whether to calculate overlap_self (union / candidate_length) or not. Default: False. Returns: t_iou (np.ndarray): 1-dim array [n] / 2-dim array [n x m] with IoU ratio. t_overlap_self (np.ndarray, optional): 1-dim array [n] / 2-dim array [n x m] with overlap_self, returns when calculate_overlap_self is True. """ candidate_segments_ndim = candidate_segments.ndim if target_segments.ndim != 2 or candidate_segments_ndim not in [1, 2]: raise ValueError('Dimension of arguments is incorrect') if candidate_segments_ndim == 1: candidate_segments = candidate_segments[np.newaxis, :] n, m = target_segments.shape[0], candidate_segments.shape[0] t_iou = np.empty((n, m), dtype=np.float32) if calculate_overlap_self: t_overlap_self = np.empty((n, m), dtype=np.float32) for i in range(m): candidate_segment = candidate_segments[i, :] tt1 = np.maximum(candidate_segment[0], target_segments[:, 0]) tt2 = np.minimum(candidate_segment[1], target_segments[:, 1]) # Intersection including Non-negative overlap score. segments_intersection = (tt2 - tt1).clip(0) # Segment union. segments_union = ((target_segments[:, 1] - target_segments[:, 0]) + (candidate_segment[1] - candidate_segment[0]) - segments_intersection) # Compute overlap as the ratio of the intersection # over union of two segments. t_iou[:, i] = (segments_intersection.astype(float) / segments_union) if calculate_overlap_self: candidate_length = candidate_segment[1] - candidate_segment[0] t_overlap_self[:, i] = ( segments_intersection.astype(float) / candidate_length) if candidate_segments_ndim == 1: t_iou = np.squeeze(t_iou, axis=1) if calculate_overlap_self: if candidate_segments_ndim == 1: t_overlap_self = np.squeeze(t_overlap_self, axis=1) return t_iou, t_overlap_self return t_iou
Compute intersection over union between segments. Args: candidate_segments (np.ndarray): 1-dim/2-dim array in format ``[init, end]/[m x 2:=[init, end]]``. target_segments (np.ndarray): 2-dim array in format ``[n x 2:=[init, end]]``. calculate_overlap_self (bool): Whether to calculate overlap_self (union / candidate_length) or not. Default: False. Returns: t_iou (np.ndarray): 1-dim array [n] / 2-dim array [n x m] with IoU ratio. t_overlap_self (np.ndarray, optional): 1-dim array [n] / 2-dim array [n x m] with overlap_self, returns when calculate_overlap_self is True.
pairwise_temporal_iou
python
open-mmlab/mmaction2
mmaction/evaluation/functional/accuracy.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/functional/accuracy.py
Apache-2.0
def average_recall_at_avg_proposals(ground_truth, proposals, total_num_proposals, max_avg_proposals=None, temporal_iou_thresholds=np.linspace( 0.5, 0.95, 10)): """Computes the average recall given an average number (percentile) of proposals per video. Args: ground_truth (dict): Dict containing the ground truth instances. proposals (dict): Dict containing the proposal instances. total_num_proposals (int): Total number of proposals in the proposal dict. max_avg_proposals (int | None): Max number of proposals for one video. Default: None. temporal_iou_thresholds (np.ndarray): 1D array with temporal_iou thresholds. Default: ``np.linspace(0.5, 0.95, 10)``. Returns: tuple([np.ndarray, np.ndarray, np.ndarray, float]): (recall, average_recall, proposals_per_video, auc) In recall, ``recall[i,j]`` is recall at i-th temporal_iou threshold at the j-th average number (percentile) of average number of proposals per video. The average_recall is recall averaged over a list of temporal_iou threshold (1D array). This is equivalent to ``recall.mean(axis=0)``. The ``proposals_per_video`` is the average number of proposals per video. The auc is the area under ``AR@AN`` curve. """ total_num_videos = len(ground_truth) if not max_avg_proposals: max_avg_proposals = float(total_num_proposals) / total_num_videos ratio = (max_avg_proposals * float(total_num_videos) / total_num_proposals) # For each video, compute temporal_iou scores among the retrieved proposals score_list = [] total_num_retrieved_proposals = 0 for video_id in ground_truth: # Get proposals for this video. proposals_video_id = proposals[video_id] this_video_proposals = proposals_video_id[:, :2] # Sort proposals by score. sort_idx = proposals_video_id[:, 2].argsort()[::-1] this_video_proposals = this_video_proposals[sort_idx, :].astype( np.float32) # Get ground-truth instances associated to this video. ground_truth_video_id = ground_truth[video_id] this_video_ground_truth = ground_truth_video_id[:, :2].astype( np.float32) if this_video_proposals.shape[0] == 0: n = this_video_ground_truth.shape[0] score_list.append(np.zeros((n, 1))) continue if this_video_proposals.ndim != 2: this_video_proposals = np.expand_dims(this_video_proposals, axis=0) if this_video_ground_truth.ndim != 2: this_video_ground_truth = np.expand_dims( this_video_ground_truth, axis=0) num_retrieved_proposals = np.minimum( int(this_video_proposals.shape[0] * ratio), this_video_proposals.shape[0]) total_num_retrieved_proposals += num_retrieved_proposals this_video_proposals = this_video_proposals[: num_retrieved_proposals, :] # Compute temporal_iou scores. t_iou = pairwise_temporal_iou(this_video_proposals, this_video_ground_truth) score_list.append(t_iou) # Given that the length of the videos is really varied, we # compute the number of proposals in terms of a ratio of the total # proposals retrieved, i.e. average recall at a percentage of proposals # retrieved per video. # Computes average recall. pcn_list = np.arange(1, 101) / 100.0 * ( max_avg_proposals * float(total_num_videos) / total_num_retrieved_proposals) matches = np.empty((total_num_videos, pcn_list.shape[0])) positives = np.empty(total_num_videos) recall = np.empty((temporal_iou_thresholds.shape[0], pcn_list.shape[0])) # Iterates over each temporal_iou threshold. for ridx, temporal_iou in enumerate(temporal_iou_thresholds): # Inspect positives retrieved per video at different # number of proposals (percentage of the total retrieved). for i, score in enumerate(score_list): # Total positives per video. positives[i] = score.shape[0] # Find proposals that satisfies minimum temporal_iou threshold. true_positives_temporal_iou = score >= temporal_iou # Get number of proposals as a percentage of total retrieved. pcn_proposals = np.minimum( (score.shape[1] * pcn_list).astype(np.int32), score.shape[1]) for j, num_retrieved_proposals in enumerate(pcn_proposals): # Compute the number of matches # for each percentage of the proposals matches[i, j] = np.count_nonzero( (true_positives_temporal_iou[:, :num_retrieved_proposals] ).sum(axis=1)) # Computes recall given the set of matches per video. recall[ridx, :] = matches.sum(axis=0) / positives.sum() # Recall is averaged. avg_recall = recall.mean(axis=0) # Get the average number of proposals per video. proposals_per_video = pcn_list * ( float(total_num_retrieved_proposals) / total_num_videos) # Get AUC area_under_curve = np.trapz(avg_recall, proposals_per_video) auc = 100. * float(area_under_curve) / proposals_per_video[-1] return recall, avg_recall, proposals_per_video, auc
Computes the average recall given an average number (percentile) of proposals per video. Args: ground_truth (dict): Dict containing the ground truth instances. proposals (dict): Dict containing the proposal instances. total_num_proposals (int): Total number of proposals in the proposal dict. max_avg_proposals (int | None): Max number of proposals for one video. Default: None. temporal_iou_thresholds (np.ndarray): 1D array with temporal_iou thresholds. Default: ``np.linspace(0.5, 0.95, 10)``. Returns: tuple([np.ndarray, np.ndarray, np.ndarray, float]): (recall, average_recall, proposals_per_video, auc) In recall, ``recall[i,j]`` is recall at i-th temporal_iou threshold at the j-th average number (percentile) of average number of proposals per video. The average_recall is recall averaged over a list of temporal_iou threshold (1D array). This is equivalent to ``recall.mean(axis=0)``. The ``proposals_per_video`` is the average number of proposals per video. The auc is the area under ``AR@AN`` curve.
average_recall_at_avg_proposals
python
open-mmlab/mmaction2
mmaction/evaluation/functional/accuracy.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/functional/accuracy.py
Apache-2.0
def get_weighted_score(score_list, coeff_list): """Get weighted score with given scores and coefficients. Given n predictions by different classifier: [score_1, score_2, ..., score_n] (score_list) and their coefficients: [coeff_1, coeff_2, ..., coeff_n] (coeff_list), return weighted score: weighted_score = score_1 * coeff_1 + score_2 * coeff_2 + ... + score_n * coeff_n Args: score_list (list[list[np.ndarray]]): List of list of scores, with shape n(number of predictions) X num_samples X num_classes coeff_list (list[float]): List of coefficients, with shape n. Returns: list[np.ndarray]: List of weighted scores. """ assert len(score_list) == len(coeff_list) num_samples = len(score_list[0]) for i in range(1, len(score_list)): assert len(score_list[i]) == num_samples scores = np.array(score_list) # (num_coeff, num_samples, num_classes) coeff = np.array(coeff_list) # (num_coeff, ) weighted_scores = list(np.dot(scores.T, coeff).T) return weighted_scores
Get weighted score with given scores and coefficients. Given n predictions by different classifier: [score_1, score_2, ..., score_n] (score_list) and their coefficients: [coeff_1, coeff_2, ..., coeff_n] (coeff_list), return weighted score: weighted_score = score_1 * coeff_1 + score_2 * coeff_2 + ... + score_n * coeff_n Args: score_list (list[list[np.ndarray]]): List of list of scores, with shape n(number of predictions) X num_samples X num_classes coeff_list (list[float]): List of coefficients, with shape n. Returns: list[np.ndarray]: List of weighted scores.
get_weighted_score
python
open-mmlab/mmaction2
mmaction/evaluation/functional/accuracy.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/functional/accuracy.py
Apache-2.0
def softmax(x, dim=1): """Compute softmax values for each sets of scores in x.""" e_x = np.exp(x - np.max(x, axis=dim, keepdims=True)) return e_x / e_x.sum(axis=dim, keepdims=True)
Compute softmax values for each sets of scores in x.
softmax
python
open-mmlab/mmaction2
mmaction/evaluation/functional/accuracy.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/functional/accuracy.py
Apache-2.0
def average_precision_at_temporal_iou(ground_truth, prediction, temporal_iou_thresholds=(np.linspace( 0.5, 0.95, 10))): """Compute average precision (in detection task) between ground truth and predicted data frames. If multiple predictions match the same predicted segment, only the one with highest score is matched as true positive. This code is greatly inspired by Pascal VOC devkit. Args: ground_truth (dict): Dict containing the ground truth instances. Key: 'video_id' Value (np.ndarray): 1D array of 't-start' and 't-end'. prediction (np.ndarray): 2D array containing the information of proposal instances, including 'video_id', 'class_id', 't-start', 't-end' and 'score'. temporal_iou_thresholds (np.ndarray): 1D array with temporal_iou thresholds. Default: ``np.linspace(0.5, 0.95, 10)``. Returns: np.ndarray: 1D array of average precision score. """ ap = np.zeros(len(temporal_iou_thresholds), dtype=np.float32) if len(prediction) < 1: return ap num_gts = 0. lock_gt = dict() for key in ground_truth: lock_gt[key] = np.ones( (len(temporal_iou_thresholds), len(ground_truth[key]))) * -1 num_gts += len(ground_truth[key]) # Sort predictions by decreasing score order. prediction = np.array(prediction) scores = prediction[:, 4].astype(float) sort_idx = np.argsort(scores)[::-1] prediction = prediction[sort_idx] # Initialize true positive and false positive vectors. tp = np.zeros((len(temporal_iou_thresholds), len(prediction)), dtype=np.int32) fp = np.zeros((len(temporal_iou_thresholds), len(prediction)), dtype=np.int32) # Assigning true positive to truly grount truth instances. for idx, this_pred in enumerate(prediction): # Check if there is at least one ground truth in the video. if this_pred[0] in ground_truth: this_gt = np.array(ground_truth[this_pred[0]], dtype=float) else: fp[:, idx] = 1 continue t_iou = pairwise_temporal_iou(this_pred[2:4].astype(float), this_gt) # We would like to retrieve the predictions with highest t_iou score. t_iou_sorted_idx = t_iou.argsort()[::-1] for t_idx, t_iou_threshold in enumerate(temporal_iou_thresholds): for jdx in t_iou_sorted_idx: if t_iou[jdx] < t_iou_threshold: fp[t_idx, idx] = 1 break if lock_gt[this_pred[0]][t_idx, jdx] >= 0: continue # Assign as true positive after the filters above. tp[t_idx, idx] = 1 lock_gt[this_pred[0]][t_idx, jdx] = idx break if fp[t_idx, idx] == 0 and tp[t_idx, idx] == 0: fp[t_idx, idx] = 1 tp_cumsum = np.cumsum(tp, axis=1).astype(np.float32) fp_cumsum = np.cumsum(fp, axis=1).astype(np.float32) recall_cumsum = tp_cumsum / num_gts precision_cumsum = tp_cumsum / (tp_cumsum + fp_cumsum) for t_idx in range(len(temporal_iou_thresholds)): ap[t_idx] = interpolated_precision_recall(precision_cumsum[t_idx, :], recall_cumsum[t_idx, :]) return ap
Compute average precision (in detection task) between ground truth and predicted data frames. If multiple predictions match the same predicted segment, only the one with highest score is matched as true positive. This code is greatly inspired by Pascal VOC devkit. Args: ground_truth (dict): Dict containing the ground truth instances. Key: 'video_id' Value (np.ndarray): 1D array of 't-start' and 't-end'. prediction (np.ndarray): 2D array containing the information of proposal instances, including 'video_id', 'class_id', 't-start', 't-end' and 'score'. temporal_iou_thresholds (np.ndarray): 1D array with temporal_iou thresholds. Default: ``np.linspace(0.5, 0.95, 10)``. Returns: np.ndarray: 1D array of average precision score.
average_precision_at_temporal_iou
python
open-mmlab/mmaction2
mmaction/evaluation/functional/accuracy.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/functional/accuracy.py
Apache-2.0
def det2csv(results, custom_classes): """Convert detection results to csv file.""" csv_results = [] for idx in range(len(results)): video_id = results[idx]['video_id'] timestamp = results[idx]['timestamp'] result = results[idx]['outputs'] for label, _ in enumerate(result): for bbox in result[label]: bbox_ = tuple(bbox.tolist()) if custom_classes is not None: actual_label = custom_classes[label + 1] else: actual_label = label + 1 csv_results.append(( video_id, timestamp, ) + bbox_[:4] + (actual_label, ) + bbox_[4:]) return csv_results
Convert detection results to csv file.
det2csv
python
open-mmlab/mmaction2
mmaction/evaluation/functional/ava_utils.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/functional/ava_utils.py
Apache-2.0
def results2csv(results, out_file, custom_classes=None): """Convert detection results to csv file.""" csv_results = det2csv(results, custom_classes) # save space for float def to_str(item): if isinstance(item, float): return f'{item:.4f}' return str(item) with open(out_file, 'w') as f: for csv_result in csv_results: f.write(','.join(map(to_str, csv_result))) f.write('\n')
Convert detection results to csv file.
results2csv
python
open-mmlab/mmaction2
mmaction/evaluation/functional/ava_utils.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/functional/ava_utils.py
Apache-2.0
def read_csv(csv_file, class_whitelist=None): """Loads boxes and class labels from a CSV file in the AVA format. CSV file format described at https://research.google.com/ava/download.html. Args: csv_file: A file object. class_whitelist: If provided, boxes corresponding to (integer) class labels not in this set are skipped. Returns: boxes: A dictionary mapping each unique image key (string) to a list of boxes, given as coordinates [y1, x1, y2, x2]. labels: A dictionary mapping each unique image key (string) to a list of integer class labels, matching the corresponding box in `boxes`. scores: A dictionary mapping each unique image key (string) to a list of score values labels, matching the corresponding label in `labels`. If scores are not provided in the csv, then they will default to 1.0. """ entries = defaultdict(list) boxes = defaultdict(list) labels = defaultdict(list) scores = defaultdict(list) reader = csv.reader(csv_file) for row in reader: assert len(row) in [7, 8], 'Wrong number of columns: ' + row image_key = make_image_key(row[0], row[1]) x1, y1, x2, y2 = [float(n) for n in row[2:6]] action_id = int(row[6]) if class_whitelist and action_id not in class_whitelist: continue score = 1.0 if len(row) == 8: score = float(row[7]) entries[image_key].append((score, action_id, y1, x1, y2, x2)) for image_key in entries: # Evaluation API assumes boxes with descending scores entry = sorted(entries[image_key], key=lambda tup: -tup[0]) boxes[image_key] = [x[2:] for x in entry] labels[image_key] = [x[1] for x in entry] scores[image_key] = [x[0] for x in entry] return boxes, labels, scores
Loads boxes and class labels from a CSV file in the AVA format. CSV file format described at https://research.google.com/ava/download.html. Args: csv_file: A file object. class_whitelist: If provided, boxes corresponding to (integer) class labels not in this set are skipped. Returns: boxes: A dictionary mapping each unique image key (string) to a list of boxes, given as coordinates [y1, x1, y2, x2]. labels: A dictionary mapping each unique image key (string) to a list of integer class labels, matching the corresponding box in `boxes`. scores: A dictionary mapping each unique image key (string) to a list of score values labels, matching the corresponding label in `labels`. If scores are not provided in the csv, then they will default to 1.0.
read_csv
python
open-mmlab/mmaction2
mmaction/evaluation/functional/ava_utils.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/functional/ava_utils.py
Apache-2.0
def read_exclusions(exclusions_file): """Reads a CSV file of excluded timestamps. Args: exclusions_file: A file object containing a csv of video-id,timestamp. Returns: A set of strings containing excluded image keys, e.g. "aaaaaaaaaaa,0904", or an empty set if exclusions file is None. """ excluded = set() if exclusions_file: reader = csv.reader(exclusions_file) for row in reader: assert len(row) == 2, f'Expected only 2 columns, got: {row}' excluded.add(make_image_key(row[0], row[1])) return excluded
Reads a CSV file of excluded timestamps. Args: exclusions_file: A file object containing a csv of video-id,timestamp. Returns: A set of strings containing excluded image keys, e.g. "aaaaaaaaaaa,0904", or an empty set if exclusions file is None.
read_exclusions
python
open-mmlab/mmaction2
mmaction/evaluation/functional/ava_utils.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/functional/ava_utils.py
Apache-2.0
def read_labelmap(labelmap_file): """Reads a labelmap without the dependency on protocol buffers. Args: labelmap_file: A file object containing a label map protocol buffer. Returns: labelmap: The label map in the form used by the object_detection_evaluation module - a list of {"id": integer, "name": classname } dicts. class_ids: A set containing all of the valid class id integers. """ labelmap = [] class_ids = set() name = '' class_id = '' for line in labelmap_file: if line.startswith(' name:'): name = line.split('"')[1] elif line.startswith(' id:') or line.startswith(' label_id:'): class_id = int(line.strip().split(' ')[-1]) labelmap.append({'id': class_id, 'name': name}) class_ids.add(class_id) return labelmap, class_ids
Reads a labelmap without the dependency on protocol buffers. Args: labelmap_file: A file object containing a label map protocol buffer. Returns: labelmap: The label map in the form used by the object_detection_evaluation module - a list of {"id": integer, "name": classname } dicts. class_ids: A set containing all of the valid class id integers.
read_labelmap
python
open-mmlab/mmaction2
mmaction/evaluation/functional/ava_utils.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/functional/ava_utils.py
Apache-2.0
def _import_ground_truth(ground_truth_filename): """Read ground truth file and return the ground truth instances and the activity classes. Args: ground_truth_filename (str): Full path to the ground truth json file. Returns: tuple[list, dict]: (ground_truth, activity_index). ground_truth contains the ground truth instances, which is in a dict format. activity_index contains classes index. """ with open(ground_truth_filename, 'r') as f: data = json.load(f) # Checking format activity_index, class_idx = {}, 0 ground_truth = [] for video_id, video_info in data.items(): for anno in video_info['annotations']: if anno['label'] not in activity_index: activity_index[anno['label']] = class_idx class_idx += 1 # old video_anno ground_truth_item = {} ground_truth_item['video-id'] = video_id[2:] ground_truth_item['t-start'] = float(anno['segment'][0]) ground_truth_item['t-end'] = float(anno['segment'][1]) ground_truth_item['label'] = activity_index[anno['label']] ground_truth.append(ground_truth_item) return ground_truth, activity_index
Read ground truth file and return the ground truth instances and the activity classes. Args: ground_truth_filename (str): Full path to the ground truth json file. Returns: tuple[list, dict]: (ground_truth, activity_index). ground_truth contains the ground truth instances, which is in a dict format. activity_index contains classes index.
_import_ground_truth
python
open-mmlab/mmaction2
mmaction/evaluation/functional/eval_detection.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/functional/eval_detection.py
Apache-2.0
def _import_prediction(self, prediction_filename): """Read prediction file and return the prediction instances. Args: prediction_filename (str): Full path to the prediction json file. Returns: List: List containing the prediction instances (dictionaries). """ with open(prediction_filename, 'r') as f: data = json.load(f) # Read predictions. prediction = [] for video_id, video_info in data['results'].items(): for result in video_info: prediction_item = dict() prediction_item['video-id'] = video_id prediction_item['label'] = self.activity_index[result['label']] prediction_item['t-start'] = float(result['segment'][0]) prediction_item['t-end'] = float(result['segment'][1]) prediction_item['score'] = result['score'] prediction.append(prediction_item) return prediction
Read prediction file and return the prediction instances. Args: prediction_filename (str): Full path to the prediction json file. Returns: List: List containing the prediction instances (dictionaries).
_import_prediction
python
open-mmlab/mmaction2
mmaction/evaluation/functional/eval_detection.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/functional/eval_detection.py
Apache-2.0
def wrapper_compute_average_precision(self): """Computes average precision for each class.""" ap = np.zeros((len(self.tiou_thresholds), len(self.activity_index))) # Adaptation to query faster ground_truth_by_label = [] prediction_by_label = [] for i in range(len(self.activity_index)): ground_truth_by_label.append([]) prediction_by_label.append([]) for gt in self.ground_truth: ground_truth_by_label[gt['label']].append(gt) for pred in self.prediction: prediction_by_label[pred['label']].append(pred) for i in range(len(self.activity_index)): ap_result = compute_average_precision_detection( ground_truth_by_label[i], prediction_by_label[i], self.tiou_thresholds) ap[:, i] = ap_result return ap
Computes average precision for each class.
wrapper_compute_average_precision
python
open-mmlab/mmaction2
mmaction/evaluation/functional/eval_detection.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/functional/eval_detection.py
Apache-2.0
def evaluate(self): """Evaluates a prediction file. For the detection task we measure the interpolated mean average precision to measure the performance of a method. """ self.ap = self.wrapper_compute_average_precision() self.mAP = self.ap.mean(axis=1) self.average_mAP = self.mAP.mean() return self.mAP, self.average_mAP
Evaluates a prediction file. For the detection task we measure the interpolated mean average precision to measure the performance of a method.
evaluate
python
open-mmlab/mmaction2
mmaction/evaluation/functional/eval_detection.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/functional/eval_detection.py
Apache-2.0
def compute_average_precision_detection(ground_truth, prediction, tiou_thresholds=np.linspace( 0.5, 0.95, 10)): """Compute average precision (detection task) between ground truth and predictions data frames. If multiple predictions occurs for the same predicted segment, only the one with highest score is matches as true positive. This code is greatly inspired by Pascal VOC devkit. Args: ground_truth (list[dict]): List containing the ground truth instances (dictionaries). Required keys are 'video-id', 't-start' and 't-end'. prediction (list[dict]): List containing the prediction instances (dictionaries). Required keys are: 'video-id', 't-start', 't-end' and 'score'. tiou_thresholds (np.ndarray): A 1darray indicates the temporal intersection over union threshold, which is optional. Default: ``np.linspace(0.5, 0.95, 10)``. Returns: Float: ap, Average precision score. """ num_thresholds = len(tiou_thresholds) num_gts = len(ground_truth) num_preds = len(prediction) ap = np.zeros(num_thresholds) if len(prediction) == 0: return ap num_positive = float(num_gts) lock_gt = np.ones((num_thresholds, num_gts)) * -1 # Sort predictions by decreasing score order. prediction.sort(key=lambda x: -x['score']) # Initialize true positive and false positive vectors. tp = np.zeros((num_thresholds, num_preds)) fp = np.zeros((num_thresholds, num_preds)) # Adaptation to query faster ground_truth_by_videoid = {} for i, item in enumerate(ground_truth): item['index'] = i ground_truth_by_videoid.setdefault(item['video-id'], []).append(item) # Assigning true positive to truly grount truth instances. for idx, pred in enumerate(prediction): if pred['video-id'] in ground_truth_by_videoid: gts = ground_truth_by_videoid[pred['video-id']] else: fp[:, idx] = 1 continue tiou_arr = pairwise_temporal_iou( np.array([pred['t-start'], pred['t-end']]), np.array([np.array([gt['t-start'], gt['t-end']]) for gt in gts])) tiou_arr = tiou_arr.reshape(-1) # We would like to retrieve the predictions with highest tiou score. tiou_sorted_idx = tiou_arr.argsort()[::-1] for t_idx, tiou_threshold in enumerate(tiou_thresholds): for j_idx in tiou_sorted_idx: if tiou_arr[j_idx] < tiou_threshold: fp[t_idx, idx] = 1 break if lock_gt[t_idx, gts[j_idx]['index']] >= 0: continue # Assign as true positive after the filters above. tp[t_idx, idx] = 1 lock_gt[t_idx, gts[j_idx]['index']] = idx break if fp[t_idx, idx] == 0 and tp[t_idx, idx] == 0: fp[t_idx, idx] = 1 tp_cumsum = np.cumsum(tp, axis=1).astype(np.float64) fp_cumsum = np.cumsum(fp, axis=1).astype(np.float64) recall_cumsum = tp_cumsum / num_positive precision_cumsum = tp_cumsum / (tp_cumsum + fp_cumsum) for t_idx in range(len(tiou_thresholds)): ap[t_idx] = interpolated_precision_recall(precision_cumsum[t_idx, :], recall_cumsum[t_idx, :]) return ap
Compute average precision (detection task) between ground truth and predictions data frames. If multiple predictions occurs for the same predicted segment, only the one with highest score is matches as true positive. This code is greatly inspired by Pascal VOC devkit. Args: ground_truth (list[dict]): List containing the ground truth instances (dictionaries). Required keys are 'video-id', 't-start' and 't-end'. prediction (list[dict]): List containing the prediction instances (dictionaries). Required keys are: 'video-id', 't-start', 't-end' and 'score'. tiou_thresholds (np.ndarray): A 1darray indicates the temporal intersection over union threshold, which is optional. Default: ``np.linspace(0.5, 0.95, 10)``. Returns: Float: ap, Average precision score.
compute_average_precision_detection
python
open-mmlab/mmaction2
mmaction/evaluation/functional/eval_detection.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/functional/eval_detection.py
Apache-2.0
def overlap2d_voc(b1, b2): """Compute the overlaps between a set of boxes b1 and one box b2.""" xmin = np.maximum(b1[:, 0], b2[:, 0]) ymin = np.maximum(b1[:, 1], b2[:, 1]) xmax = np.minimum(b1[:, 2], b2[:, 2]) ymax = np.minimum(b1[:, 3], b2[:, 3]) width = np.maximum(0, xmax - xmin) height = np.maximum(0, ymax - ymin) return width * height
Compute the overlaps between a set of boxes b1 and one box b2.
overlap2d_voc
python
open-mmlab/mmaction2
mmaction/evaluation/functional/multisports_utils.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/functional/multisports_utils.py
Apache-2.0
def iou2d_voc(b1, b2): """Compute the IoU between a set of boxes b1 and 1 box b2.""" if b1.ndim == 1: b1 = b1[None, :] if b2.ndim == 1: b2 = b2[None, :] assert b2.shape[0] == 1 ov = overlap2d_voc(b1, b2) return ov / (area2d_voc(b1) + area2d_voc(b2) - ov)
Compute the IoU between a set of boxes b1 and 1 box b2.
iou2d_voc
python
open-mmlab/mmaction2
mmaction/evaluation/functional/multisports_utils.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/functional/multisports_utils.py
Apache-2.0
def iou3d_voc(b1, b2): """Compute the IoU between two tubes with same temporal extent.""" assert b1.shape[0] == b2.shape[0] assert np.all(b1[:, 0] == b2[:, 0]) ov = overlap2d_voc(b1[:, 1:5], b2[:, 1:5]) return np.mean(ov / (area2d_voc(b1[:, 1:5]) + area2d_voc(b2[:, 1:5]) - ov))
Compute the IoU between two tubes with same temporal extent.
iou3d_voc
python
open-mmlab/mmaction2
mmaction/evaluation/functional/multisports_utils.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/functional/multisports_utils.py
Apache-2.0
def iou3dt_voc(b1, b2, spatialonly=False, temporalonly=False): """Compute the spatio-temporal IoU between two tubes.""" tmin = max(b1[0, 0], b2[0, 0]) tmax = min(b1[-1, 0], b2[-1, 0]) if tmax < tmin: return 0.0 temporal_inter = tmax - tmin temporal_union = max(b1[-1, 0], b2[-1, 0]) - min(b1[0, 0], b2[0, 0]) tube1 = b1[int(np.where( b1[:, 0] == tmin)[0]):int(np.where(b1[:, 0] == tmax)[0]) + 1, :] tube2 = b2[int(np.where( b2[:, 0] == tmin)[0]):int(np.where(b2[:, 0] == tmax)[0]) + 1, :] if temporalonly: return temporal_inter / temporal_union return iou3d_voc(tube1, tube2) * (1. if spatialonly else temporal_inter / temporal_union)
Compute the spatio-temporal IoU between two tubes.
iou3dt_voc
python
open-mmlab/mmaction2
mmaction/evaluation/functional/multisports_utils.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/functional/multisports_utils.py
Apache-2.0
def nms_tubelets(dets, overlapThresh=0.3, top_k=None): """Compute the NMS for a set of scored tubelets scored tubelets are numpy array with 4K+1 columns, last one being the score return the indices of the tubelets to keep.""" # If there are no detections, return an empty list if len(dets) == 0: return dets if top_k is None: top_k = len(dets) K = int((dets.shape[1] - 1) / 4) # Coordinates of bounding boxes x1 = [dets[:, 4 * k] for k in range(K)] y1 = [dets[:, 4 * k + 1] for k in range(K)] x2 = [dets[:, 4 * k + 2] for k in range(K)] y2 = [dets[:, 4 * k + 3] for k in range(K)] # Compute the area of the bounding boxes and sort the bounding # boxes by the bottom-right y-coordinate of the bounding box # area = (x2 - x1 + 1) * (y2 - y1 + 1) scores = dets[:, -1] area = [(x2[k] - x1[k] + 1) * (y2[k] - y1[k] + 1) for k in range(K)] order = np.argsort(scores)[::-1] weight = np.zeros_like(scores) + 1 counter = 0 while order.size > 0: i = order[0] counter += 1 # Compute overlap xx1 = [np.maximum(x1[k][i], x1[k][order[1:]]) for k in range(K)] yy1 = [np.maximum(y1[k][i], y1[k][order[1:]]) for k in range(K)] xx2 = [np.minimum(x2[k][i], x2[k][order[1:]]) for k in range(K)] yy2 = [np.minimum(y2[k][i], y2[k][order[1:]]) for k in range(K)] w = [np.maximum(0, xx2[k] - xx1[k] + 1) for k in range(K)] h = [np.maximum(0, yy2[k] - yy1[k] + 1) for k in range(K)] inter_area = [w[k] * h[k] for k in range(K)] ious = sum([ inter_area[k] / (area[k][order[1:]] + area[k][i] - inter_area[k]) for k in range(K) ]) index = np.where(ious > overlapThresh * K)[0] weight[order[index + 1]] = 1 - ious[index] index2 = np.where(ious <= overlapThresh * K)[0] order = order[index2 + 1] dets[:, -1] = dets[:, -1] * weight new_scores = dets[:, -1] new_order = np.argsort(new_scores)[::-1] dets = dets[new_order, :] return dets[:top_k, :]
Compute the NMS for a set of scored tubelets scored tubelets are numpy array with 4K+1 columns, last one being the score return the indices of the tubelets to keep.
nms_tubelets
python
open-mmlab/mmaction2
mmaction/evaluation/functional/multisports_utils.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/functional/multisports_utils.py
Apache-2.0
def compute_precision_recall(scores, labels, num_gt): """Compute precision and recall. Args: scores: A float numpy array representing detection score labels: A boolean numpy array representing true/false positive labels num_gt: Number of ground truth instances Raises: ValueError: if the input is not of the correct format Returns: precision: Fraction of positive instances over detected ones. This value is None if no ground truth labels are present. recall: Fraction of detected positive instance over all positive instances. This value is None if no ground truth labels are present. """ if (not isinstance(labels, np.ndarray) or labels.dtype != bool or len(labels.shape) != 1): raise ValueError('labels must be single dimension bool numpy array') if not isinstance(scores, np.ndarray) or len(scores.shape) != 1: raise ValueError('scores must be single dimension numpy array') if num_gt < np.sum(labels): raise ValueError( 'Number of true positives must be smaller than num_gt.') if len(scores) != len(labels): raise ValueError('scores and labels must be of the same size.') if num_gt == 0: return None, None sorted_indices = np.argsort(scores) sorted_indices = sorted_indices[::-1] labels = labels.astype(int) true_positive_labels = labels[sorted_indices] false_positive_labels = 1 - true_positive_labels cum_true_positives = np.cumsum(true_positive_labels) cum_false_positives = np.cumsum(false_positive_labels) precision = cum_true_positives.astype(float) / ( cum_true_positives + cum_false_positives) recall = cum_true_positives.astype(float) / num_gt return precision, recall
Compute precision and recall. Args: scores: A float numpy array representing detection score labels: A boolean numpy array representing true/false positive labels num_gt: Number of ground truth instances Raises: ValueError: if the input is not of the correct format Returns: precision: Fraction of positive instances over detected ones. This value is None if no ground truth labels are present. recall: Fraction of detected positive instance over all positive instances. This value is None if no ground truth labels are present.
compute_precision_recall
python
open-mmlab/mmaction2
mmaction/evaluation/functional/ava_evaluation/metrics.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/functional/ava_evaluation/metrics.py
Apache-2.0
def compute_average_precision(precision, recall): """Compute Average Precision according to the definition in VOCdevkit. Precision is modified to ensure that it does not decrease as recall decrease. Args: precision: A float [N, 1] numpy array of precisions recall: A float [N, 1] numpy array of recalls Raises: ValueError: if the input is not of the correct format Returns: average_precison: The area under the precision recall curve. NaN if precision and recall are None. """ if precision is None: if recall is not None: raise ValueError('If precision is None, recall must also be None') return np.NAN if not isinstance(precision, np.ndarray) or not isinstance( recall, np.ndarray): raise ValueError('precision and recall must be numpy array') if precision.dtype != np.float64 or recall.dtype != np.float64: raise ValueError('input must be float numpy array.') if len(precision) != len(recall): raise ValueError('precision and recall must be of the same size.') if not precision.size: return 0.0 if np.amin(precision) < 0 or np.amax(precision) > 1: raise ValueError('Precision must be in the range of [0, 1].') if np.amin(recall) < 0 or np.amax(recall) > 1: raise ValueError('recall must be in the range of [0, 1].') if not all(recall[i] <= recall[i + 1] for i in range(len(recall) - 1)): raise ValueError('recall must be a non-decreasing array') recall = np.concatenate([[0], recall, [1]]) precision = np.concatenate([[0], precision, [0]]) # Preprocess precision to be a non-decreasing array for i in range(len(precision) - 2, -1, -1): precision[i] = np.maximum(precision[i], precision[i + 1]) indices = np.where(recall[1:] != recall[:-1])[0] + 1 average_precision = np.sum( (recall[indices] - recall[indices - 1]) * precision[indices]) return average_precision
Compute Average Precision according to the definition in VOCdevkit. Precision is modified to ensure that it does not decrease as recall decrease. Args: precision: A float [N, 1] numpy array of precisions recall: A float [N, 1] numpy array of recalls Raises: ValueError: if the input is not of the correct format Returns: average_precison: The area under the precision recall curve. NaN if precision and recall are None.
compute_average_precision
python
open-mmlab/mmaction2
mmaction/evaluation/functional/ava_evaluation/metrics.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/functional/ava_evaluation/metrics.py
Apache-2.0
def compute_cor_loc(num_gt_imgs_per_class, num_images_correctly_detected_per_class): """Compute CorLoc according to the definition in the following paper. https://www.robots.ox.ac.uk/~vgg/rg/papers/deselaers-eccv10.pdf Returns nans if there are no ground truth images for a class. Args: num_gt_imgs_per_class: 1D array, representing number of images containing at least one object instance of a particular class num_images_correctly_detected_per_class: 1D array, representing number of images that are correctly detected at least one object instance of a particular class Returns: corloc_per_class: A float numpy array represents the corloc score of each class """ # Divide by zero expected for classes with no gt examples. with np.errstate(divide='ignore', invalid='ignore'): return np.where( num_gt_imgs_per_class == 0, np.nan, num_images_correctly_detected_per_class / num_gt_imgs_per_class)
Compute CorLoc according to the definition in the following paper. https://www.robots.ox.ac.uk/~vgg/rg/papers/deselaers-eccv10.pdf Returns nans if there are no ground truth images for a class. Args: num_gt_imgs_per_class: 1D array, representing number of images containing at least one object instance of a particular class num_images_correctly_detected_per_class: 1D array, representing number of images that are correctly detected at least one object instance of a particular class Returns: corloc_per_class: A float numpy array represents the corloc score of each class
compute_cor_loc
python
open-mmlab/mmaction2
mmaction/evaluation/functional/ava_evaluation/metrics.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/functional/ava_evaluation/metrics.py
Apache-2.0
def __init__(self, data): """Constructs box collection. Args: data: a numpy array of shape [N, 4] representing box coordinates Raises: ValueError: if bbox data is not a numpy array ValueError: if invalid dimensions for bbox data """ if not isinstance(data, np.ndarray): raise ValueError('data must be a numpy array.') if len(data.shape) != 2 or data.shape[1] != 4: raise ValueError('Invalid dimensions for box data.') if data.dtype != np.float32 and data.dtype != np.float64: raise ValueError( 'Invalid data type for box data: float is required.') if not self._is_valid_boxes(data): raise ValueError('Invalid box data. data must be a numpy array of ' 'N*[y_min, x_min, y_max, x_max]') self.data = {'boxes': data}
Constructs box collection. Args: data: a numpy array of shape [N, 4] representing box coordinates Raises: ValueError: if bbox data is not a numpy array ValueError: if invalid dimensions for bbox data
__init__
python
open-mmlab/mmaction2
mmaction/evaluation/functional/ava_evaluation/np_box_list.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/functional/ava_evaluation/np_box_list.py
Apache-2.0
def add_field(self, field, field_data): """Add data to a specified field. Args: field: a string parameter used to specify a related field to be accessed. field_data: a numpy array of [N, ...] representing the data associated with the field. Raises: ValueError: if the field is already exist or the dimension of the field data does not matches the number of boxes. """ if self.has_field(field): raise ValueError('Field ' + field + 'already exists') if len(field_data.shape) < 1 or field_data.shape[0] != self.num_boxes( ): raise ValueError('Invalid dimensions for field data') self.data[field] = field_data
Add data to a specified field. Args: field: a string parameter used to specify a related field to be accessed. field_data: a numpy array of [N, ...] representing the data associated with the field. Raises: ValueError: if the field is already exist or the dimension of the field data does not matches the number of boxes.
add_field
python
open-mmlab/mmaction2
mmaction/evaluation/functional/ava_evaluation/np_box_list.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/functional/ava_evaluation/np_box_list.py
Apache-2.0
def get_field(self, field): """Accesses data associated with the specified field in the box collection. Args: field: a string parameter used to specify a related field to be accessed. Returns: a numpy 1-d array representing data of an associated field Raises: ValueError: if invalid field """ if not self.has_field(field): raise ValueError(f'field {field} does not exist') return self.data[field]
Accesses data associated with the specified field in the box collection. Args: field: a string parameter used to specify a related field to be accessed. Returns: a numpy 1-d array representing data of an associated field Raises: ValueError: if invalid field
get_field
python
open-mmlab/mmaction2
mmaction/evaluation/functional/ava_evaluation/np_box_list.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/functional/ava_evaluation/np_box_list.py
Apache-2.0
def get_coordinates(self): """Get corner coordinates of boxes. Returns: a list of 4 1-d numpy arrays [y_min, x_min, y_max, x_max] """ box_coordinates = self.get() y_min = box_coordinates[:, 0] x_min = box_coordinates[:, 1] y_max = box_coordinates[:, 2] x_max = box_coordinates[:, 3] return [y_min, x_min, y_max, x_max]
Get corner coordinates of boxes. Returns: a list of 4 1-d numpy arrays [y_min, x_min, y_max, x_max]
get_coordinates
python
open-mmlab/mmaction2
mmaction/evaluation/functional/ava_evaluation/np_box_list.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/functional/ava_evaluation/np_box_list.py
Apache-2.0
def _is_valid_boxes(data): """Check whether data fulfills the format of N*[ymin, xmin, ymax, xmin]. Args: data: a numpy array of shape [N, 4] representing box coordinates Returns: a boolean indicating whether all ymax of boxes are equal or greater than ymin, and all xmax of boxes are equal or greater than xmin. """ if len(data) != 0: for v in data: if v[0] > v[2] or v[1] > v[3]: return False return True
Check whether data fulfills the format of N*[ymin, xmin, ymax, xmin]. Args: data: a numpy array of shape [N, 4] representing box coordinates Returns: a boolean indicating whether all ymax of boxes are equal or greater than ymin, and all xmax of boxes are equal or greater than xmin.
_is_valid_boxes
python
open-mmlab/mmaction2
mmaction/evaluation/functional/ava_evaluation/np_box_list.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/functional/ava_evaluation/np_box_list.py
Apache-2.0
def intersection(boxes1, boxes2): """Compute pairwise intersection areas between boxes. Args: boxes1: a numpy array with shape [N, 4] holding N boxes boxes2: a numpy array with shape [M, 4] holding M boxes Returns: a numpy array with shape [N*M] representing pairwise intersection area """ [y_min1, x_min1, y_max1, x_max1] = np.split(boxes1, 4, axis=1) [y_min2, x_min2, y_max2, x_max2] = np.split(boxes2, 4, axis=1) all_pairs_min_ymax = np.minimum(y_max1, np.transpose(y_max2)) all_pairs_max_ymin = np.maximum(y_min1, np.transpose(y_min2)) intersect_heights = np.maximum( np.zeros(all_pairs_max_ymin.shape), all_pairs_min_ymax - all_pairs_max_ymin) all_pairs_min_xmax = np.minimum(x_max1, np.transpose(x_max2)) all_pairs_max_xmin = np.maximum(x_min1, np.transpose(x_min2)) intersect_widths = np.maximum( np.zeros(all_pairs_max_xmin.shape), all_pairs_min_xmax - all_pairs_max_xmin) return intersect_heights * intersect_widths
Compute pairwise intersection areas between boxes. Args: boxes1: a numpy array with shape [N, 4] holding N boxes boxes2: a numpy array with shape [M, 4] holding M boxes Returns: a numpy array with shape [N*M] representing pairwise intersection area
intersection
python
open-mmlab/mmaction2
mmaction/evaluation/functional/ava_evaluation/np_box_ops.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/functional/ava_evaluation/np_box_ops.py
Apache-2.0
def iou(boxes1, boxes2): """Computes pairwise intersection-over-union between box collections. Args: boxes1: a numpy array with shape [N, 4] holding N boxes. boxes2: a numpy array with shape [M, 4] holding N boxes. Returns: a numpy array with shape [N, M] representing pairwise iou scores. """ intersect = intersection(boxes1, boxes2) area1 = area(boxes1) area2 = area(boxes2) union = ( np.expand_dims(area1, axis=1) + np.expand_dims(area2, axis=0) - intersect) return intersect / union
Computes pairwise intersection-over-union between box collections. Args: boxes1: a numpy array with shape [N, 4] holding N boxes. boxes2: a numpy array with shape [M, 4] holding N boxes. Returns: a numpy array with shape [N, M] representing pairwise iou scores.
iou
python
open-mmlab/mmaction2
mmaction/evaluation/functional/ava_evaluation/np_box_ops.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/functional/ava_evaluation/np_box_ops.py
Apache-2.0
def ioa(boxes1, boxes2): """Computes pairwise intersection-over-area between box collections. Intersection-over-area (ioa) between two boxes box1 and box2 is defined as their intersection area over box2's area. Note that ioa is not symmetric, that is, IOA(box1, box2) != IOA(box2, box1). Args: boxes1: a numpy array with shape [N, 4] holding N boxes. boxes2: a numpy array with shape [M, 4] holding N boxes. Returns: a numpy array with shape [N, M] representing pairwise ioa scores. """ intersect = intersection(boxes1, boxes2) areas = np.expand_dims(area(boxes2), axis=0) return intersect / areas
Computes pairwise intersection-over-area between box collections. Intersection-over-area (ioa) between two boxes box1 and box2 is defined as their intersection area over box2's area. Note that ioa is not symmetric, that is, IOA(box1, box2) != IOA(box2, box1). Args: boxes1: a numpy array with shape [N, 4] holding N boxes. boxes2: a numpy array with shape [M, 4] holding N boxes. Returns: a numpy array with shape [N, M] representing pairwise ioa scores.
ioa
python
open-mmlab/mmaction2
mmaction/evaluation/functional/ava_evaluation/np_box_ops.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/functional/ava_evaluation/np_box_ops.py
Apache-2.0
def process(self, data_batch: Sequence[Tuple[Any, Dict]], data_samples: Sequence[Dict]) -> None: """Process one batch of data samples and data_samples. The processed results should be stored in ``self.results``, which will be used to compute the metrics when all batches have been processed. Args: data_batch (Sequence[dict]): A batch of data from the dataloader. data_samples (Sequence[dict]): A batch of outputs from the model. """ data_samples = copy.deepcopy(data_samples) for data_sample in data_samples: result = dict() pred = data_sample['pred_score'] label = data_sample['gt_label'] # Ad-hoc for RGBPoseConv3D if isinstance(pred, dict): for item_name, score in pred.items(): pred[item_name] = score.cpu().numpy() else: pred = pred.cpu().numpy() result['pred'] = pred if label.size(0) == 1: # single-label result['label'] = label.item() else: # multi-label result['label'] = label.cpu().numpy() self.results.append(result)
Process one batch of data samples and data_samples. The processed results should be stored in ``self.results``, which will be used to compute the metrics when all batches have been processed. Args: data_batch (Sequence[dict]): A batch of data from the dataloader. data_samples (Sequence[dict]): A batch of outputs from the model.
process
python
open-mmlab/mmaction2
mmaction/evaluation/metrics/acc_metric.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/metrics/acc_metric.py
Apache-2.0
def compute_metrics(self, results: List) -> Dict: """Compute the metrics from processed results. Args: results (list): The processed results of each batch. Returns: dict: The computed metrics. The keys are the names of the metrics, and the values are corresponding results. """ labels = [x['label'] for x in results] eval_results = dict() # Ad-hoc for RGBPoseConv3D if isinstance(results[0]['pred'], dict): for item_name in results[0]['pred'].keys(): preds = [x['pred'][item_name] for x in results] eval_result = self.calculate(preds, labels) eval_results.update( {f'{item_name}_{k}': v for k, v in eval_result.items()}) if len(results[0]['pred']) == 2 and \ 'rgb' in results[0]['pred'] and \ 'pose' in results[0]['pred']: rgb = [x['pred']['rgb'] for x in results] pose = [x['pred']['pose'] for x in results] preds = { '1:1': get_weighted_score([rgb, pose], [1, 1]), '2:1': get_weighted_score([rgb, pose], [2, 1]), '1:2': get_weighted_score([rgb, pose], [1, 2]) } for k in preds: eval_result = self.calculate(preds[k], labels) eval_results.update({ f'RGBPose_{k}_{key}': v for key, v in eval_result.items() }) return eval_results # Simple Acc Calculation else: preds = [x['pred'] for x in results] return self.calculate(preds, labels)
Compute the metrics from processed results. Args: results (list): The processed results of each batch. Returns: dict: The computed metrics. The keys are the names of the metrics, and the values are corresponding results.
compute_metrics
python
open-mmlab/mmaction2
mmaction/evaluation/metrics/acc_metric.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/metrics/acc_metric.py
Apache-2.0
def calculate(self, preds: List[np.ndarray], labels: List[Union[int, np.ndarray]]) -> Dict: """Compute the metrics from processed results. Args: preds (list[np.ndarray]): List of the prediction scores. labels (list[int | np.ndarray]): List of the labels. Returns: dict: The computed metrics. The keys are the names of the metrics, and the values are corresponding results. """ eval_results = OrderedDict() metric_options = copy.deepcopy(self.metric_options) for metric in self.metrics: if metric == 'top_k_accuracy': topk = metric_options.setdefault('top_k_accuracy', {}).setdefault( 'topk', (1, 5)) if not isinstance(topk, (int, tuple)): raise TypeError('topk must be int or tuple of int, ' f'but got {type(topk)}') if isinstance(topk, int): topk = (topk, ) top_k_acc = top_k_accuracy(preds, labels, topk) for k, acc in zip(topk, top_k_acc): eval_results[f'top{k}'] = acc if metric == 'mean_class_accuracy': mean1 = mean_class_accuracy(preds, labels) eval_results['mean1'] = mean1 if metric in [ 'mean_average_precision', 'mmit_mean_average_precision', ]: if metric == 'mean_average_precision': mAP = mean_average_precision(preds, labels) eval_results['mean_average_precision'] = mAP elif metric == 'mmit_mean_average_precision': mAP = mmit_mean_average_precision(preds, labels) eval_results['mmit_mean_average_precision'] = mAP return eval_results
Compute the metrics from processed results. Args: preds (list[np.ndarray]): List of the prediction scores. labels (list[int | np.ndarray]): List of the labels. Returns: dict: The computed metrics. The keys are the names of the metrics, and the values are corresponding results.
calculate
python
open-mmlab/mmaction2
mmaction/evaluation/metrics/acc_metric.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/metrics/acc_metric.py
Apache-2.0
def calculate(pred, target, num_classes=None) -> dict: """Calculate the confusion matrix for single-label task. Args: pred (torch.Tensor | np.ndarray | Sequence): The prediction results. It can be labels (N, ), or scores of every class (N, C). target (torch.Tensor | np.ndarray | Sequence): The target of each prediction with shape (N, ). num_classes (Optional, int): The number of classes. If the ``pred`` is label instead of scores, this argument is required. Defaults to None. Returns: torch.Tensor: The confusion matrix. """ pred = to_tensor(pred) target_label = to_tensor(target).int() assert pred.size(0) == target_label.size(0), \ f"The size of pred ({pred.size(0)}) doesn't match "\ f'the target ({target_label.size(0)}).' assert target_label.ndim == 1 if pred.ndim == 1: assert num_classes is not None, \ 'Please specify the `num_classes` if the `pred` is labels ' \ 'intead of scores.' pred_label = pred else: num_classes = num_classes or pred.size(1) pred_label = torch.argmax(pred, dim=1).flatten() with torch.no_grad(): indices = num_classes * target_label + pred_label matrix = torch.bincount(indices, minlength=num_classes**2) matrix = matrix.reshape(num_classes, num_classes) return matrix
Calculate the confusion matrix for single-label task. Args: pred (torch.Tensor | np.ndarray | Sequence): The prediction results. It can be labels (N, ), or scores of every class (N, C). target (torch.Tensor | np.ndarray | Sequence): The target of each prediction with shape (N, ). num_classes (Optional, int): The number of classes. If the ``pred`` is label instead of scores, this argument is required. Defaults to None. Returns: torch.Tensor: The confusion matrix.
calculate
python
open-mmlab/mmaction2
mmaction/evaluation/metrics/acc_metric.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/metrics/acc_metric.py
Apache-2.0
def plot(confusion_matrix: torch.Tensor, include_values: bool = False, cmap: str = 'viridis', classes: Optional[List[str]] = None, colorbar: bool = True, show: bool = True): """Draw a confusion matrix by matplotlib. Modified from `Scikit-Learn <https://github.com/scikit-learn/scikit-learn/blob/dc580a8ef/sklearn/metrics/_plot/confusion_matrix.py#L81>`_ Args: confusion_matrix (torch.Tensor): The confusion matrix to draw. include_values (bool): Whether to draw the values in the figure. Defaults to False. cmap (str): The color map to use. Defaults to use "viridis". classes (list[str], optional): The names of categories. Defaults to None, which means to use index number. colorbar (bool): Whether to show the colorbar. Defaults to True. show (bool): Whether to show the figure immediately. Defaults to True. """ # noqa: E501 import matplotlib.pyplot as plt fig, ax = plt.subplots(figsize=(10, 10)) num_classes = confusion_matrix.size(0) im_ = ax.imshow(confusion_matrix, interpolation='nearest', cmap=cmap) text_ = None cmap_min, cmap_max = im_.cmap(0), im_.cmap(1.0) if include_values: text_ = np.empty_like(confusion_matrix, dtype=object) # print text with appropriate color depending on background thresh = (confusion_matrix.max() + confusion_matrix.min()) / 2.0 for i, j in product(range(num_classes), range(num_classes)): color = cmap_max if confusion_matrix[i, j] < thresh else cmap_min text_cm = format(confusion_matrix[i, j], '.2g') text_d = format(confusion_matrix[i, j], 'd') if len(text_d) < len(text_cm): text_cm = text_d text_[i, j] = ax.text( j, i, text_cm, ha='center', va='center', color=color) display_labels = classes or np.arange(num_classes) if colorbar: fig.colorbar(im_, ax=ax) ax.set( xticks=np.arange(num_classes), yticks=np.arange(num_classes), xticklabels=display_labels, yticklabels=display_labels, ylabel='True label', xlabel='Predicted label', ) ax.invert_yaxis() ax.xaxis.tick_top() ax.set_ylim((num_classes - 0.5, -0.5)) # Automatically rotate the x labels. fig.autofmt_xdate(ha='center') if show: plt.show() return fig
Draw a confusion matrix by matplotlib. Modified from `Scikit-Learn <https://github.com/scikit-learn/scikit-learn/blob/dc580a8ef/sklearn/metrics/_plot/confusion_matrix.py#L81>`_ Args: confusion_matrix (torch.Tensor): The confusion matrix to draw. include_values (bool): Whether to draw the values in the figure. Defaults to False. cmap (str): The color map to use. Defaults to use "viridis". classes (list[str], optional): The names of categories. Defaults to None, which means to use index number. colorbar (bool): Whether to show the colorbar. Defaults to True. show (bool): Whether to show the figure immediately. Defaults to True.
plot
python
open-mmlab/mmaction2
mmaction/evaluation/metrics/acc_metric.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/metrics/acc_metric.py
Apache-2.0
def process(self, data_batch: Sequence[Tuple[Any, dict]], predictions: Sequence[dict]) -> None: """Process one batch of data samples and predictions. The processed results should be stored in ``self.results``, which will be used to compute the metrics when all batches have been processed. Args: data_batch (Sequence[Tuple[Any, dict]]): A batch of data from the dataloader. predictions (Sequence[dict]): A batch of outputs from the model. """ for pred in predictions: self.results.append(pred) if self.metric_type == 'AR@AN': data_batch = data_batch['data_samples'] for data_sample in data_batch: video_info = data_sample.metainfo video_id = video_info['video_name'][2:] this_video_gt = [] for ann in video_info['annotations']: t_start, t_end = ann['segment'] label = ann['label'] this_video_gt.append([t_start, t_end, label]) self.ground_truth[video_id] = np.array(this_video_gt)
Process one batch of data samples and predictions. The processed results should be stored in ``self.results``, which will be used to compute the metrics when all batches have been processed. Args: data_batch (Sequence[Tuple[Any, dict]]): A batch of data from the dataloader. predictions (Sequence[dict]): A batch of outputs from the model.
process
python
open-mmlab/mmaction2
mmaction/evaluation/metrics/anet_metric.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/metrics/anet_metric.py
Apache-2.0
def compute_metrics(self, results: list) -> dict: """Compute the metrics from processed results. If `metric_type` is 'TEM', only dump middle results and do not compute any metrics. Args: results (list): The processed results of each batch. Returns: dict: The computed metrics. The keys are the names of the metrics, and the values are corresponding results. """ self.dump_results(results) if self.metric_type == 'AR@AN': return self.compute_ARAN(results) return OrderedDict()
Compute the metrics from processed results. If `metric_type` is 'TEM', only dump middle results and do not compute any metrics. Args: results (list): The processed results of each batch. Returns: dict: The computed metrics. The keys are the names of the metrics, and the values are corresponding results.
compute_metrics
python
open-mmlab/mmaction2
mmaction/evaluation/metrics/anet_metric.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/metrics/anet_metric.py
Apache-2.0
def dump_results(self, results, version='VERSION 1.3'): """Save middle or final results to disk.""" if self.output_format == 'json': result_dict = self.proposals2json(results) output_dict = { 'version': version, 'results': result_dict, 'external_data': {} } mmengine.dump(output_dict, self.out) elif self.output_format == 'csv': os.makedirs(self.out, exist_ok=True) header = 'action,start,end,tmin,tmax' for result in results: video_name, outputs = result output_path = osp.join(self.out, video_name + '.csv') np.savetxt( output_path, outputs, header=header, delimiter=',', comments='') else: raise ValueError( f'The output format {self.output_format} is not supported.')
Save middle or final results to disk.
dump_results
python
open-mmlab/mmaction2
mmaction/evaluation/metrics/anet_metric.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/metrics/anet_metric.py
Apache-2.0
def proposals2json(results, show_progress=False): """Convert all proposals to a final dict(json) format. Args: results (list[dict]): All proposals. show_progress (bool): Whether to show the progress bar. Defaults: False. Returns: dict: The final result dict. E.g. .. code-block:: Python dict(video-1=[dict(segment=[1.1,2.0]. score=0.9), dict(segment=[50.1, 129.3], score=0.6)]) """ result_dict = {} print('Convert proposals to json format') if show_progress: prog_bar = mmcv.ProgressBar(len(results)) for result in results: video_name = result['video_name'] result_dict[video_name[2:]] = result['proposal_list'] if show_progress: prog_bar.update() return result_dict
Convert all proposals to a final dict(json) format. Args: results (list[dict]): All proposals. show_progress (bool): Whether to show the progress bar. Defaults: False. Returns: dict: The final result dict. E.g. .. code-block:: Python dict(video-1=[dict(segment=[1.1,2.0]. score=0.9), dict(segment=[50.1, 129.3], score=0.6)])
proposals2json
python
open-mmlab/mmaction2
mmaction/evaluation/metrics/anet_metric.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/metrics/anet_metric.py
Apache-2.0
def process(self, data_batch: Sequence[Tuple[Any, dict]], data_samples: Sequence[dict]) -> None: """Process one batch of data samples and predictions. The processed results should be stored in ``self.results``, which will be used to compute the metrics when all batches have been processed. Args: data_batch (Sequence[Tuple[Any, dict]]): A batch of data from the dataloader. data_samples (Sequence[dict]): A batch of outputs from the model. """ for data_sample in data_samples: result = dict() pred = data_sample['pred_instances'] result['video_id'] = data_sample['video_id'] result['timestamp'] = data_sample['timestamp'] outputs = bbox2result( pred['bboxes'], pred['scores'], num_classes=self.num_classes, thr=self.action_thr) result['outputs'] = outputs self.results.append(result)
Process one batch of data samples and predictions. The processed results should be stored in ``self.results``, which will be used to compute the metrics when all batches have been processed. Args: data_batch (Sequence[Tuple[Any, dict]]): A batch of data from the dataloader. data_samples (Sequence[dict]): A batch of outputs from the model.
process
python
open-mmlab/mmaction2
mmaction/evaluation/metrics/ava_metric.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/metrics/ava_metric.py
Apache-2.0