code
stringlengths
66
870k
docstring
stringlengths
19
26.7k
func_name
stringlengths
1
138
language
stringclasses
1 value
repo
stringlengths
7
68
path
stringlengths
5
324
url
stringlengths
46
389
license
stringclasses
7 values
def abbrev(name): """Get the abbreviation of label name: 'take (an object) from (a person)' -> 'take ... from ...' """ while name.find('(') != -1: st, ed = name.find('('), name.find(')') name = name[:st] + '...' + name[ed + 1:] return name
Get the abbreviation of label name: 'take (an object) from (a person)' -> 'take ... from ...'
abbrev
python
open-mmlab/mmaction2
demo/demo_spatiotemporal_det.py
https://github.com/open-mmlab/mmaction2/blob/master/demo/demo_spatiotemporal_det.py
Apache-2.0
def pack_result(human_detection, result, img_h, img_w): """Short summary. Args: human_detection (np.ndarray): Human detection result. result (type): The predicted label of each human proposal. img_h (int): The image height. img_w (int): The image width. Returns: tuple: Tuple of human proposal, label name and label score. """ human_detection[:, 0::2] /= img_w human_detection[:, 1::2] /= img_h results = [] if result is None: return None for prop, res in zip(human_detection, result): res.sort(key=lambda x: -x[1]) results.append( (prop.data.cpu().numpy(), [x[0] for x in res], [x[1] for x in res])) return results
Short summary. Args: human_detection (np.ndarray): Human detection result. result (type): The predicted label of each human proposal. img_h (int): The image height. img_w (int): The image width. Returns: tuple: Tuple of human proposal, label name and label score.
pack_result
python
open-mmlab/mmaction2
demo/demo_spatiotemporal_det.py
https://github.com/open-mmlab/mmaction2/blob/master/demo/demo_spatiotemporal_det.py
Apache-2.0
def visualize(frames, annotations, plate=plate_blue, max_num=5): """Visualize frames with predicted annotations. Args: frames (list[np.ndarray]): Frames for visualization, note that len(frames) % len(annotations) should be 0. annotations (list[list[tuple]]): The predicted results. plate (str): The plate used for visualization. Default: plate_blue. max_num (int): Max number of labels to visualize for a person box. Default: 5. Returns: list[np.ndarray]: Visualized frames. """ assert max_num + 1 <= len(plate) plate = [x[::-1] for x in plate] frames_out = cp.deepcopy(frames) nf, na = len(frames), len(annotations) assert nf % na == 0 nfpa = len(frames) // len(annotations) anno = None h, w, _ = frames[0].shape scale_ratio = np.array([w, h, w, h]) for i in range(na): anno = annotations[i] if anno is None: continue for j in range(nfpa): ind = i * nfpa + j frame = frames_out[ind] for ann in anno: box = ann[0] label = ann[1] if not len(label): continue score = ann[2] box = (box * scale_ratio).astype(np.int64) st, ed = tuple(box[:2]), tuple(box[2:]) cv2.rectangle(frame, st, ed, plate[0], 2) for k, lb in enumerate(label): if k >= max_num: break text = abbrev(lb) text = ': '.join([text, str(score[k])]) location = (0 + st[0], 18 + k * 18 + st[1]) textsize = cv2.getTextSize(text, FONTFACE, FONTSCALE, THICKNESS)[0] textwidth = textsize[0] diag0 = (location[0] + textwidth, location[1] - 14) diag1 = (location[0], location[1] + 2) cv2.rectangle(frame, diag0, diag1, plate[k + 1], -1) cv2.putText(frame, text, location, FONTFACE, FONTSCALE, FONTCOLOR, THICKNESS, LINETYPE) return frames_out
Visualize frames with predicted annotations. Args: frames (list[np.ndarray]): Frames for visualization, note that len(frames) % len(annotations) should be 0. annotations (list[list[tuple]]): The predicted results. plate (str): The plate used for visualization. Default: plate_blue. max_num (int): Max number of labels to visualize for a person box. Default: 5. Returns: list[np.ndarray]: Visualized frames.
visualize
python
open-mmlab/mmaction2
demo/demo_spatiotemporal_det_onnx.py
https://github.com/open-mmlab/mmaction2/blob/master/demo/demo_spatiotemporal_det_onnx.py
Apache-2.0
def load_label_map(file_path): """Load Label Map. Args: file_path (str): The file path of label map. Returns: dict: The label map (int -> label name). """ lines = open(file_path).readlines() lines = [x.strip().split(': ') for x in lines] return {int(x[0]): x[1] for x in lines}
Load Label Map. Args: file_path (str): The file path of label map. Returns: dict: The label map (int -> label name).
load_label_map
python
open-mmlab/mmaction2
demo/demo_spatiotemporal_det_onnx.py
https://github.com/open-mmlab/mmaction2/blob/master/demo/demo_spatiotemporal_det_onnx.py
Apache-2.0
def abbrev(name): """Get the abbreviation of label name: 'take (an object) from (a person)' -> 'take ... from ...' """ while name.find('(') != -1: st, ed = name.find('('), name.find(')') name = name[:st] + '...' + name[ed + 1:] return name
Get the abbreviation of label name: 'take (an object) from (a person)' -> 'take ... from ...'
abbrev
python
open-mmlab/mmaction2
demo/demo_spatiotemporal_det_onnx.py
https://github.com/open-mmlab/mmaction2/blob/master/demo/demo_spatiotemporal_det_onnx.py
Apache-2.0
def pack_result(human_detection, result, img_h, img_w): """Short summary. Args: human_detection (np.ndarray): Human detection result. result (type): The predicted label of each human proposal. img_h (int): The image height. img_w (int): The image width. Returns: tuple: Tuple of human proposal, label name and label score. """ human_detection[:, 0::2] /= img_w human_detection[:, 1::2] /= img_h results = [] if result is None: return None for prop, res in zip(human_detection, result): res.sort(key=lambda x: -x[1]) results.append( (prop.data.cpu().numpy(), [x[0] for x in res], [x[1] for x in res])) return results
Short summary. Args: human_detection (np.ndarray): Human detection result. result (type): The predicted label of each human proposal. img_h (int): The image height. img_w (int): The image width. Returns: tuple: Tuple of human proposal, label name and label score.
pack_result
python
open-mmlab/mmaction2
demo/demo_spatiotemporal_det_onnx.py
https://github.com/open-mmlab/mmaction2/blob/master/demo/demo_spatiotemporal_det_onnx.py
Apache-2.0
def visualize(args, frames, annotations, pose_data_samples, action_result, plate=PLATEBLUE, max_num=5): """Visualize frames with predicted annotations. Args: frames (list[np.ndarray]): Frames for visualization, note that len(frames) % len(annotations) should be 0. annotations (list[list[tuple]]): The predicted spatio-temporal detection results. pose_data_samples (list[list[PoseDataSample]): The pose results. action_result (str): The predicted action recognition results. pose_model (nn.Module): The constructed pose model. plate (str): The plate used for visualization. Default: PLATEBLUE. max_num (int): Max number of labels to visualize for a person box. Default: 5. Returns: list[np.ndarray]: Visualized frames. """ assert max_num + 1 <= len(plate) frames_ = cp.deepcopy(frames) frames_ = [mmcv.imconvert(f, 'bgr', 'rgb') for f in frames_] nf, na = len(frames), len(annotations) assert nf % na == 0 nfpa = len(frames) // len(annotations) anno = None h, w, _ = frames[0].shape scale_ratio = np.array([w, h, w, h]) # add pose results if pose_data_samples: pose_config = mmengine.Config.fromfile(args.pose_config) visualizer = VISUALIZERS.build(pose_config.visualizer) visualizer.set_dataset_meta(pose_data_samples[0].dataset_meta) for i, (d, f) in enumerate(zip(pose_data_samples, frames_)): visualizer.add_datasample( 'result', f, data_sample=d, draw_gt=False, draw_heatmap=False, draw_bbox=True, show=False, wait_time=0, out_file=None, kpt_thr=0.3) frames_[i] = visualizer.get_image() cv2.putText(frames_[i], action_result, (10, 30), FONTFACE, FONTSCALE, FONTCOLOR, THICKNESS, LINETYPE) for i in range(na): anno = annotations[i] if anno is None: continue for j in range(nfpa): ind = i * nfpa + j frame = frames_[ind] # add action result for whole video cv2.putText(frame, action_result, (10, 30), FONTFACE, FONTSCALE, FONTCOLOR, THICKNESS, LINETYPE) # add spatio-temporal action detection results for ann in anno: box = ann[0] label = ann[1] if not len(label): continue score = ann[2] box = (box * scale_ratio).astype(np.int64) st, ed = tuple(box[:2]), tuple(box[2:]) if not pose_data_samples: cv2.rectangle(frame, st, ed, plate[0], 2) for k, lb in enumerate(label): if k >= max_num: break text = abbrev(lb) text = ': '.join([text, f'{score[k]:.3f}']) location = (0 + st[0], 18 + k * 18 + st[1]) textsize = cv2.getTextSize(text, FONTFACE, FONTSCALE, THICKNESS)[0] textwidth = textsize[0] diag0 = (location[0] + textwidth, location[1] - 14) diag1 = (location[0], location[1] + 2) cv2.rectangle(frame, diag0, diag1, plate[k + 1], -1) cv2.putText(frame, text, location, FONTFACE, FONTSCALE, FONTCOLOR, THICKNESS, LINETYPE) return frames_
Visualize frames with predicted annotations. Args: frames (list[np.ndarray]): Frames for visualization, note that len(frames) % len(annotations) should be 0. annotations (list[list[tuple]]): The predicted spatio-temporal detection results. pose_data_samples (list[list[PoseDataSample]): The pose results. action_result (str): The predicted action recognition results. pose_model (nn.Module): The constructed pose model. plate (str): The plate used for visualization. Default: PLATEBLUE. max_num (int): Max number of labels to visualize for a person box. Default: 5. Returns: list[np.ndarray]: Visualized frames.
visualize
python
open-mmlab/mmaction2
demo/demo_video_structuralize.py
https://github.com/open-mmlab/mmaction2/blob/master/demo/demo_video_structuralize.py
Apache-2.0
def load_label_map(file_path): """Load Label Map. Args: file_path (str): The file path of label map. Returns: dict: The label map (int -> label name). """ lines = open(file_path).readlines() lines = [x.strip().split(': ') for x in lines] return {int(x[0]): x[1] for x in lines}
Load Label Map. Args: file_path (str): The file path of label map. Returns: dict: The label map (int -> label name).
load_label_map
python
open-mmlab/mmaction2
demo/demo_video_structuralize.py
https://github.com/open-mmlab/mmaction2/blob/master/demo/demo_video_structuralize.py
Apache-2.0
def abbrev(name): """Get the abbreviation of label name: 'take (an object) from (a person)' -> 'take ... from ...' """ while name.find('(') != -1: st, ed = name.find('('), name.find(')') name = name[:st] + '...' + name[ed + 1:] return name
Get the abbreviation of label name: 'take (an object) from (a person)' -> 'take ... from ...'
abbrev
python
open-mmlab/mmaction2
demo/demo_video_structuralize.py
https://github.com/open-mmlab/mmaction2/blob/master/demo/demo_video_structuralize.py
Apache-2.0
def pack_result(human_detection, result, img_h, img_w): """Short summary. Args: human_detection (np.ndarray): Human detection result. result (type): The predicted label of each human proposal. img_h (int): The image height. img_w (int): The image width. Returns: tuple: Tuple of human proposal, label name and label score. """ human_detection[:, 0::2] /= img_w human_detection[:, 1::2] /= img_h results = [] if result is None: return None for prop, res in zip(human_detection, result): res.sort(key=lambda x: -x[1]) results.append( (prop.data.cpu().numpy(), [x[0] for x in res], [x[1] for x in res])) return results
Short summary. Args: human_detection (np.ndarray): Human detection result. result (type): The predicted label of each human proposal. img_h (int): The image height. img_w (int): The image width. Returns: tuple: Tuple of human proposal, label name and label score.
pack_result
python
open-mmlab/mmaction2
demo/demo_video_structuralize.py
https://github.com/open-mmlab/mmaction2/blob/master/demo/demo_video_structuralize.py
Apache-2.0
def add_frames(self, idx, frames, processed_frames): """Add the clip and corresponding id. Args: idx (int): the current index of the clip. frames (list[ndarray]): list of images in "BGR" format. processed_frames (list[ndarray]): list of resize and normed images in "BGR" format. """ self.frames = frames self.processed_frames = processed_frames self.id = idx self.img_shape = processed_frames[0].shape[:2]
Add the clip and corresponding id. Args: idx (int): the current index of the clip. frames (list[ndarray]): list of images in "BGR" format. processed_frames (list[ndarray]): list of resize and normed images in "BGR" format.
add_frames
python
open-mmlab/mmaction2
demo/webcam_demo_spatiotemporal_det.py
https://github.com/open-mmlab/mmaction2/blob/master/demo/webcam_demo_spatiotemporal_det.py
Apache-2.0
def get_model_inputs(self, device): """Convert preprocessed images to MMAction2 STDet model inputs.""" cur_frames = [self.processed_frames[idx] for idx in self.frames_inds] input_array = np.stack(cur_frames).transpose((3, 0, 1, 2))[np.newaxis] input_tensor = torch.from_numpy(input_array).to(device) datasample = ActionDataSample() datasample.proposals = InstanceData(bboxes=self.stdet_bboxes) datasample.set_metainfo(dict(img_shape=self.img_shape)) return dict( inputs=input_tensor, data_samples=[datasample], mode='predict')
Convert preprocessed images to MMAction2 STDet model inputs.
get_model_inputs
python
open-mmlab/mmaction2
demo/webcam_demo_spatiotemporal_det.py
https://github.com/open-mmlab/mmaction2/blob/master/demo/webcam_demo_spatiotemporal_det.py
Apache-2.0
def _do_detect(self, image): """Get human bboxes with shape [n, 4]. The format of bboxes is (xmin, ymin, xmax, ymax) in pixels. """
Get human bboxes with shape [n, 4]. The format of bboxes is (xmin, ymin, xmax, ymax) in pixels.
_do_detect
python
open-mmlab/mmaction2
demo/webcam_demo_spatiotemporal_det.py
https://github.com/open-mmlab/mmaction2/blob/master/demo/webcam_demo_spatiotemporal_det.py
Apache-2.0
def _do_detect(self, image): """Get bboxes in shape [n, 4] and values in pixels.""" det_data_sample = inference_detector(self.model, image) pred_instance = det_data_sample.pred_instances.cpu().numpy() # We only keep human detection bboxs with score larger # than `det_score_thr` and category id equal to `det_cat_id`. valid_idx = np.logical_and(pred_instance.labels == self.person_classid, pred_instance.scores > self.score_thr) bboxes = pred_instance.bboxes[valid_idx] # result = result[result[:, 4] >= self.score_thr][:, :4] return bboxes
Get bboxes in shape [n, 4] and values in pixels.
_do_detect
python
open-mmlab/mmaction2
demo/webcam_demo_spatiotemporal_det.py
https://github.com/open-mmlab/mmaction2/blob/master/demo/webcam_demo_spatiotemporal_det.py
Apache-2.0
def predict(self, task): """Spatio-temporval Action Detection model inference.""" # No need to do inference if no one in keyframe if len(task.stdet_bboxes) == 0: return task with torch.no_grad(): result = self.model(**task.get_model_inputs(self.device)) scores = result[0].pred_instances.scores # pack results of human detector and stdet preds = [] for _ in range(task.stdet_bboxes.shape[0]): preds.append([]) for class_id in range(scores.shape[1]): if class_id not in self.label_map: continue for bbox_id in range(task.stdet_bboxes.shape[0]): if scores[bbox_id][class_id] > self.score_thr: preds[bbox_id].append((self.label_map[class_id], scores[bbox_id][class_id].item())) # update task # `preds` is `list[list[tuple]]`. The outer brackets indicate # different bboxes and the intter brackets indicate different action # results for the same bbox. tuple contains `class_name` and `score`. task.add_action_preds(preds) return task
Spatio-temporval Action Detection model inference.
predict
python
open-mmlab/mmaction2
demo/webcam_demo_spatiotemporal_det.py
https://github.com/open-mmlab/mmaction2/blob/master/demo/webcam_demo_spatiotemporal_det.py
Apache-2.0
def read_fn(self): """Main function for read thread. Contains three steps: 1) Read and preprocess (resize + norm) frames from source. 2) Create task by frames from previous step and buffer. 3) Put task into read queue. """ was_read = True start_time = time.time() while was_read and not self.stopped: # init task task = TaskInfo() task.clip_vis_length = self.clip_vis_length task.frames_inds = self.frames_inds task.ratio = self.ratio # read buffer frames = [] processed_frames = [] if len(self.buffer) != 0: frames = self.buffer if len(self.processed_buffer) != 0: processed_frames = self.processed_buffer # read and preprocess frames from source and update task with self.read_lock: before_read = time.time() read_frame_cnt = self.window_size - len(frames) while was_read and len(frames) < self.window_size: was_read, frame = self.cap.read() if not self.webcam: # Reading frames too fast may lead to unexpected # performance degradation. If you have enough # resource, this line could be commented. time.sleep(1 / self.output_fps) if was_read: frames.append(mmcv.imresize(frame, self.display_size)) processed_frame = mmcv.imresize( frame, self.stdet_input_size).astype(np.float32) _ = mmcv.imnormalize_(processed_frame, **self.img_norm_cfg) processed_frames.append(processed_frame) task.add_frames(self.read_id + 1, frames, processed_frames) # update buffer if was_read: self.buffer = frames[-self.buffer_size:] self.processed_buffer = processed_frames[-self.buffer_size:] # update read state with self.read_id_lock: self.read_id += 1 self.not_end = was_read self.read_queue.put((was_read, copy.deepcopy(task))) cur_time = time.time() logger.debug( f'Read thread: {1000*(cur_time - start_time):.0f} ms, ' f'{read_frame_cnt / (cur_time - before_read):.0f} fps') start_time = cur_time
Main function for read thread. Contains three steps: 1) Read and preprocess (resize + norm) frames from source. 2) Create task by frames from previous step and buffer. 3) Put task into read queue.
read_fn
python
open-mmlab/mmaction2
demo/webcam_demo_spatiotemporal_det.py
https://github.com/open-mmlab/mmaction2/blob/master/demo/webcam_demo_spatiotemporal_det.py
Apache-2.0
def display_fn(self): """Main function for display thread. Read data from display queue and display predictions. """ start_time = time.time() while not self.stopped: # get the state of the read thread with self.read_id_lock: read_id = self.read_id not_end = self.not_end with self.display_lock: # If video ended and we have display all frames. if not not_end and self.display_id == read_id: break # If the next task are not available, wait. if (len(self.display_queue) == 0 or self.display_queue.get(self.display_id + 1) is None): time.sleep(0.02) continue # get display data and update state self.display_id += 1 was_read, task = self.display_queue[self.display_id] del self.display_queue[self.display_id] display_id = self.display_id # do display predictions with self.output_lock: if was_read and task.id == 0: # the first task cur_display_inds = range(self.display_inds[-1] + 1) elif not was_read: # the last task cur_display_inds = range(self.display_inds[0], len(task.frames)) else: cur_display_inds = self.display_inds for frame_id in cur_display_inds: frame = task.frames[frame_id] if self.show: cv2.imshow('Demo', frame) cv2.waitKey(int(1000 / self.output_fps)) if self.video_writer: self.video_writer.write(frame) cur_time = time.time() logger.debug( f'Display thread: {1000*(cur_time - start_time):.0f} ms, ' f'read id {read_id}, display id {display_id}') start_time = cur_time
Main function for display thread. Read data from display queue and display predictions.
display_fn
python
open-mmlab/mmaction2
demo/webcam_demo_spatiotemporal_det.py
https://github.com/open-mmlab/mmaction2/blob/master/demo/webcam_demo_spatiotemporal_det.py
Apache-2.0
def __next__(self): """Get data from read queue. This function is part of the main thread. """ if self.read_queue.qsize() == 0: time.sleep(0.02) return not self.stopped, None was_read, task = self.read_queue.get() if not was_read: # If we reach the end of the video, there aren't enough frames # in the task.processed_frames, so no need to model inference # and draw predictions. Put task into display queue. with self.read_id_lock: read_id = self.read_id with self.display_lock: self.display_queue[read_id] = was_read, copy.deepcopy(task) # main thread doesn't need to handle this task again task = None return was_read, task
Get data from read queue. This function is part of the main thread.
__next__
python
open-mmlab/mmaction2
demo/webcam_demo_spatiotemporal_det.py
https://github.com/open-mmlab/mmaction2/blob/master/demo/webcam_demo_spatiotemporal_det.py
Apache-2.0
def start(self): """Start read thread and display thread.""" self.read_thread = threading.Thread( target=self.read_fn, args=(), name='VidRead-Thread', daemon=True) self.read_thread.start() self.display_thread = threading.Thread( target=self.display_fn, args=(), name='VidDisplay-Thread', daemon=True) self.display_thread.start() return self
Start read thread and display thread.
start
python
open-mmlab/mmaction2
demo/webcam_demo_spatiotemporal_det.py
https://github.com/open-mmlab/mmaction2/blob/master/demo/webcam_demo_spatiotemporal_det.py
Apache-2.0
def clean(self): """Close all threads and release all resources.""" self.stopped = True self.read_lock.acquire() self.cap.release() self.read_lock.release() self.output_lock.acquire() cv2.destroyAllWindows() if self.video_writer: self.video_writer.release() self.output_lock.release()
Close all threads and release all resources.
clean
python
open-mmlab/mmaction2
demo/webcam_demo_spatiotemporal_det.py
https://github.com/open-mmlab/mmaction2/blob/master/demo/webcam_demo_spatiotemporal_det.py
Apache-2.0
def display(self, task): """Add the visualized task to the display queue. Args: task (TaskInfo object): task object that contain the necessary information for prediction visualization. """ with self.display_lock: self.display_queue[task.id] = (True, task)
Add the visualized task to the display queue. Args: task (TaskInfo object): task object that contain the necessary information for prediction visualization.
display
python
open-mmlab/mmaction2
demo/webcam_demo_spatiotemporal_det.py
https://github.com/open-mmlab/mmaction2/blob/master/demo/webcam_demo_spatiotemporal_det.py
Apache-2.0
def get_output_video_writer(self, path): """Return a video writer object. Args: path (str): path to the output video file. """ return cv2.VideoWriter( filename=path, fourcc=cv2.VideoWriter_fourcc(*'mp4v'), fps=float(self.output_fps), frameSize=self.display_size, isColor=True)
Return a video writer object. Args: path (str): path to the output video file.
get_output_video_writer
python
open-mmlab/mmaction2
demo/webcam_demo_spatiotemporal_det.py
https://github.com/open-mmlab/mmaction2/blob/master/demo/webcam_demo_spatiotemporal_det.py
Apache-2.0
def draw_predictions(self, task): """Visualize stdet predictions on raw frames.""" # read bboxes from task bboxes = task.display_bboxes.cpu().numpy() # draw predictions and update task keyframe_idx = len(task.frames) // 2 draw_range = [ keyframe_idx - task.clip_vis_length // 2, keyframe_idx + (task.clip_vis_length - 1) // 2 ] assert draw_range[0] >= 0 and draw_range[1] < len(task.frames) task.frames = self.draw_clip_range(task.frames, task.action_preds, bboxes, draw_range) return task
Visualize stdet predictions on raw frames.
draw_predictions
python
open-mmlab/mmaction2
demo/webcam_demo_spatiotemporal_det.py
https://github.com/open-mmlab/mmaction2/blob/master/demo/webcam_demo_spatiotemporal_det.py
Apache-2.0
def draw_clip_range(self, frames, preds, bboxes, draw_range): """Draw a range of frames with the same bboxes and predictions.""" # no predictions to be draw if bboxes is None or len(bboxes) == 0: return frames # draw frames in `draw_range` left_frames = frames[:draw_range[0]] right_frames = frames[draw_range[1] + 1:] draw_frames = frames[draw_range[0]:draw_range[1] + 1] # get labels(texts) and draw predictions draw_frames = [ self.draw_one_image(frame, bboxes, preds) for frame in draw_frames ] return list(left_frames) + draw_frames + list(right_frames)
Draw a range of frames with the same bboxes and predictions.
draw_clip_range
python
open-mmlab/mmaction2
demo/webcam_demo_spatiotemporal_det.py
https://github.com/open-mmlab/mmaction2/blob/master/demo/webcam_demo_spatiotemporal_det.py
Apache-2.0
def abbrev(name): """Get the abbreviation of label name: 'take (an object) from (a person)' -> 'take ... from ...' """ while name.find('(') != -1: st, ed = name.find('('), name.find(')') name = name[:st] + '...' + name[ed + 1:] return name
Get the abbreviation of label name: 'take (an object) from (a person)' -> 'take ... from ...'
abbrev
python
open-mmlab/mmaction2
demo/webcam_demo_spatiotemporal_det.py
https://github.com/open-mmlab/mmaction2/blob/master/demo/webcam_demo_spatiotemporal_det.py
Apache-2.0
def parse_version_info(version_str: str): """Parse a version string into a tuple. Args: version_str (str): The version string. Returns: tuple[int or str]: The version info, e.g., "1.3.0" is parsed into (1, 3, 0), and "2.0.0rc1" is parsed into (2, 0, 0, 'rc1'). """ version_info = [] for x in version_str.split('.'): if x.isdigit(): version_info.append(int(x)) elif x.find('rc') != -1: patch_version = x.split('rc') version_info.append(int(patch_version[0])) version_info.append(f'rc{patch_version[1]}') return tuple(version_info)
Parse a version string into a tuple. Args: version_str (str): The version string. Returns: tuple[int or str]: The version info, e.g., "1.3.0" is parsed into (1, 3, 0), and "2.0.0rc1" is parsed into (2, 0, 0, 'rc1').
parse_version_info
python
open-mmlab/mmaction2
mmaction/version.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/version.py
Apache-2.0
def init_recognizer(config: Union[str, Path, mmengine.Config], checkpoint: Optional[str] = None, device: Union[str, torch.device] = 'cuda:0') -> nn.Module: """Initialize a recognizer from config file. Args: config (str or :obj:`Path` or :obj:`mmengine.Config`): Config file path, :obj:`Path` or the config object. checkpoint (str, optional): Checkpoint path/url. If set to None, the model will not load any weights. Defaults to None. device (str | torch.device): The desired device of returned tensor. Defaults to ``'cuda:0'``. Returns: nn.Module: The constructed recognizer. """ if isinstance(config, (str, Path)): config = mmengine.Config.fromfile(config) elif not isinstance(config, mmengine.Config): raise TypeError('config must be a filename or Config object, ' f'but got {type(config)}') init_default_scope(config.get('default_scope', 'mmaction')) if hasattr(config.model, 'backbone') and config.model.backbone.get( 'pretrained', None): config.model.backbone.pretrained = None model = MODELS.build(config.model) if checkpoint is not None: load_checkpoint(model, checkpoint, map_location='cpu') model.cfg = config model.to(device) model.eval() return model
Initialize a recognizer from config file. Args: config (str or :obj:`Path` or :obj:`mmengine.Config`): Config file path, :obj:`Path` or the config object. checkpoint (str, optional): Checkpoint path/url. If set to None, the model will not load any weights. Defaults to None. device (str | torch.device): The desired device of returned tensor. Defaults to ``'cuda:0'``. Returns: nn.Module: The constructed recognizer.
init_recognizer
python
open-mmlab/mmaction2
mmaction/apis/inference.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/apis/inference.py
Apache-2.0
def inference_recognizer(model: nn.Module, video: Union[str, dict], test_pipeline: Optional[Compose] = None ) -> ActionDataSample: """Inference a video with the recognizer. Args: model (nn.Module): The loaded recognizer. video (Union[str, dict]): The video file path or the results dictionary (the input of pipeline). test_pipeline (:obj:`Compose`, optional): The test pipeline. If not specified, the test pipeline in the config will be used. Defaults to None. Returns: :obj:`ActionDataSample`: The inference results. Specifically, the predicted scores are saved at ``result.pred_score``. """ if test_pipeline is None: cfg = model.cfg init_default_scope(cfg.get('default_scope', 'mmaction')) test_pipeline_cfg = cfg.test_pipeline test_pipeline = Compose(test_pipeline_cfg) input_flag = None if isinstance(video, dict): input_flag = 'dict' elif isinstance(video, str) and osp.exists(video): if video.endswith('.npy'): input_flag = 'audio' else: input_flag = 'video' else: raise RuntimeError(f'The type of argument `video` is not supported: ' f'{type(video)}') if input_flag == 'dict': data = video if input_flag == 'video': data = dict(filename=video, label=-1, start_index=0, modality='RGB') if input_flag == 'audio': data = dict( audio_path=video, total_frames=len(np.load(video)), start_index=0, label=-1) data = test_pipeline(data) data = pseudo_collate([data]) # Forward the model with torch.no_grad(): result = model.test_step(data)[0] return result
Inference a video with the recognizer. Args: model (nn.Module): The loaded recognizer. video (Union[str, dict]): The video file path or the results dictionary (the input of pipeline). test_pipeline (:obj:`Compose`, optional): The test pipeline. If not specified, the test pipeline in the config will be used. Defaults to None. Returns: :obj:`ActionDataSample`: The inference results. Specifically, the predicted scores are saved at ``result.pred_score``.
inference_recognizer
python
open-mmlab/mmaction2
mmaction/apis/inference.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/apis/inference.py
Apache-2.0
def inference_skeleton(model: nn.Module, pose_results: List[dict], img_shape: Tuple[int], test_pipeline: Optional[Compose] = None ) -> ActionDataSample: """Inference a pose results with the skeleton recognizer. Args: model (nn.Module): The loaded recognizer. pose_results (List[dict]): The pose estimation results dictionary (the results of `pose_inference`) img_shape (Tuple[int]): The original image shape used for inference skeleton recognizer. test_pipeline (:obj:`Compose`, optional): The test pipeline. If not specified, the test pipeline in the config will be used. Defaults to None. Returns: :obj:`ActionDataSample`: The inference results. Specifically, the predicted scores are saved at ``result.pred_score``. """ if test_pipeline is None: cfg = model.cfg init_default_scope(cfg.get('default_scope', 'mmaction')) test_pipeline_cfg = cfg.test_pipeline test_pipeline = Compose(test_pipeline_cfg) h, w = img_shape num_keypoint = pose_results[0]['keypoints'].shape[1] num_frame = len(pose_results) num_person = max([len(x['keypoints']) for x in pose_results]) fake_anno = dict( frame_dict='', label=-1, img_shape=(h, w), origin_shape=(h, w), start_index=0, modality='Pose', total_frames=num_frame) keypoint = np.zeros((num_frame, num_person, num_keypoint, 2), dtype=np.float16) keypoint_score = np.zeros((num_frame, num_person, num_keypoint), dtype=np.float16) for f_idx, frm_pose in enumerate(pose_results): frm_num_persons = frm_pose['keypoints'].shape[0] for p_idx in range(frm_num_persons): keypoint[f_idx, p_idx] = frm_pose['keypoints'][p_idx] keypoint_score[f_idx, p_idx] = frm_pose['keypoint_scores'][p_idx] fake_anno['keypoint'] = keypoint.transpose((1, 0, 2, 3)) fake_anno['keypoint_score'] = keypoint_score.transpose((1, 0, 2)) return inference_recognizer(model, fake_anno, test_pipeline)
Inference a pose results with the skeleton recognizer. Args: model (nn.Module): The loaded recognizer. pose_results (List[dict]): The pose estimation results dictionary (the results of `pose_inference`) img_shape (Tuple[int]): The original image shape used for inference skeleton recognizer. test_pipeline (:obj:`Compose`, optional): The test pipeline. If not specified, the test pipeline in the config will be used. Defaults to None. Returns: :obj:`ActionDataSample`: The inference results. Specifically, the predicted scores are saved at ``result.pred_score``.
inference_skeleton
python
open-mmlab/mmaction2
mmaction/apis/inference.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/apis/inference.py
Apache-2.0
def detection_inference(det_config: Union[str, Path, mmengine.Config, nn.Module], det_checkpoint: str, frame_paths: List[str], det_score_thr: float = 0.9, det_cat_id: int = 0, device: Union[str, torch.device] = 'cuda:0', with_score: bool = False) -> tuple: """Detect human boxes given frame paths. Args: det_config (Union[str, :obj:`Path`, :obj:`mmengine.Config`, :obj:`torch.nn.Module`]): Det config file path or Detection model object. It can be a :obj:`Path`, a config object, or a module object. det_checkpoint: Checkpoint path/url. frame_paths (List[str]): The paths of frames to do detection inference. det_score_thr (float): The threshold of human detection score. Defaults to 0.9. det_cat_id (int): The category id for human detection. Defaults to 0. device (Union[str, torch.device]): The desired device of returned tensor. Defaults to ``'cuda:0'``. with_score (bool): Whether to append detection score after box. Defaults to None. Returns: List[np.ndarray]: List of detected human boxes. List[:obj:`DetDataSample`]: List of data samples, generally used to visualize data. """ try: from mmdet.apis import inference_detector, init_detector from mmdet.structures import DetDataSample except (ImportError, ModuleNotFoundError): raise ImportError('Failed to import `inference_detector` and ' '`init_detector` from `mmdet.apis`. These apis are ' 'required in this inference api! ') if isinstance(det_config, nn.Module): model = det_config else: model = init_detector( config=det_config, checkpoint=det_checkpoint, device=device) results = [] data_samples = [] print('Performing Human Detection for each frame') for frame_path in track_iter_progress(frame_paths): det_data_sample: DetDataSample = inference_detector(model, frame_path) pred_instance = det_data_sample.pred_instances.cpu().numpy() bboxes = pred_instance.bboxes scores = pred_instance.scores # We only keep human detection bboxs with score larger # than `det_score_thr` and category id equal to `det_cat_id`. valid_idx = np.logical_and(pred_instance.labels == det_cat_id, pred_instance.scores > det_score_thr) bboxes = bboxes[valid_idx] scores = scores[valid_idx] if with_score: bboxes = np.concatenate((bboxes, scores[:, None]), axis=-1) results.append(bboxes) data_samples.append(det_data_sample) return results, data_samples
Detect human boxes given frame paths. Args: det_config (Union[str, :obj:`Path`, :obj:`mmengine.Config`, :obj:`torch.nn.Module`]): Det config file path or Detection model object. It can be a :obj:`Path`, a config object, or a module object. det_checkpoint: Checkpoint path/url. frame_paths (List[str]): The paths of frames to do detection inference. det_score_thr (float): The threshold of human detection score. Defaults to 0.9. det_cat_id (int): The category id for human detection. Defaults to 0. device (Union[str, torch.device]): The desired device of returned tensor. Defaults to ``'cuda:0'``. with_score (bool): Whether to append detection score after box. Defaults to None. Returns: List[np.ndarray]: List of detected human boxes. List[:obj:`DetDataSample`]: List of data samples, generally used to visualize data.
detection_inference
python
open-mmlab/mmaction2
mmaction/apis/inference.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/apis/inference.py
Apache-2.0
def pose_inference(pose_config: Union[str, Path, mmengine.Config, nn.Module], pose_checkpoint: str, frame_paths: List[str], det_results: List[np.ndarray], device: Union[str, torch.device] = 'cuda:0') -> tuple: """Perform Top-Down pose estimation. Args: pose_config (Union[str, :obj:`Path`, :obj:`mmengine.Config`, :obj:`torch.nn.Module`]): Pose config file path or pose model object. It can be a :obj:`Path`, a config object, or a module object. pose_checkpoint: Checkpoint path/url. frame_paths (List[str]): The paths of frames to do pose inference. det_results (List[np.ndarray]): List of detected human boxes. device (Union[str, torch.device]): The desired device of returned tensor. Defaults to ``'cuda:0'``. Returns: List[List[Dict[str, np.ndarray]]]: List of pose estimation results. List[:obj:`PoseDataSample`]: List of data samples, generally used to visualize data. """ try: from mmpose.apis import inference_topdown, init_model from mmpose.structures import PoseDataSample, merge_data_samples except (ImportError, ModuleNotFoundError): raise ImportError('Failed to import `inference_topdown` and ' '`init_model` from `mmpose.apis`. These apis ' 'are required in this inference api! ') if isinstance(pose_config, nn.Module): model = pose_config else: model = init_model(pose_config, pose_checkpoint, device) results = [] data_samples = [] print('Performing Human Pose Estimation for each frame') for f, d in track_iter_progress(list(zip(frame_paths, det_results))): pose_data_samples: List[PoseDataSample] \ = inference_topdown(model, f, d[..., :4], bbox_format='xyxy') pose_data_sample = merge_data_samples(pose_data_samples) pose_data_sample.dataset_meta = model.dataset_meta # make fake pred_instances if not hasattr(pose_data_sample, 'pred_instances'): num_keypoints = model.dataset_meta['num_keypoints'] pred_instances_data = dict( keypoints=np.empty(shape=(0, num_keypoints, 2)), keypoints_scores=np.empty(shape=(0, 17), dtype=np.float32), bboxes=np.empty(shape=(0, 4), dtype=np.float32), bbox_scores=np.empty(shape=(0), dtype=np.float32)) pose_data_sample.pred_instances = InstanceData( **pred_instances_data) poses = pose_data_sample.pred_instances.to_dict() results.append(poses) data_samples.append(pose_data_sample) return results, data_samples
Perform Top-Down pose estimation. Args: pose_config (Union[str, :obj:`Path`, :obj:`mmengine.Config`, :obj:`torch.nn.Module`]): Pose config file path or pose model object. It can be a :obj:`Path`, a config object, or a module object. pose_checkpoint: Checkpoint path/url. frame_paths (List[str]): The paths of frames to do pose inference. det_results (List[np.ndarray]): List of detected human boxes. device (Union[str, torch.device]): The desired device of returned tensor. Defaults to ``'cuda:0'``. Returns: List[List[Dict[str, np.ndarray]]]: List of pose estimation results. List[:obj:`PoseDataSample`]: List of data samples, generally used to visualize data.
pose_inference
python
open-mmlab/mmaction2
mmaction/apis/inference.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/apis/inference.py
Apache-2.0
def __call__(self, inputs: InputsType, return_datasamples: bool = False, batch_size: int = 1, return_vis: bool = False, show: bool = False, wait_time: int = 0, draw_pred: bool = True, vid_out_dir: str = '', out_type: str = 'video', print_result: bool = False, pred_out_file: str = '', target_resolution: Optional[Tuple[int]] = None, **kwargs) -> dict: """Call the inferencer. Args: inputs (InputsType): Inputs for the inferencer. return_datasamples (bool): Whether to return results as :obj:`BaseDataElement`. Defaults to False. batch_size (int): Inference batch size. Defaults to 1. show (bool): Whether to display the visualization results in a popup window. Defaults to False. wait_time (float): The interval of show (s). Defaults to 0. draw_pred (bool): Whether to draw predicted bounding boxes. Defaults to True. vid_out_dir (str): Output directory of visualization results. If left as empty, no file will be saved. Defaults to ''. out_type (str): Output type of visualization results. Defaults to 'video'. print_result (bool): Whether to print the inference result w/o visualization to the console. Defaults to False. pred_out_file: File to save the inference results w/o visualization. If left as empty, no file will be saved. Defaults to ''. **kwargs: Other keyword arguments passed to :meth:`preprocess`, :meth:`forward`, :meth:`visualize` and :meth:`postprocess`. Each key in kwargs should be in the corresponding set of ``preprocess_kwargs``, ``forward_kwargs``, ``visualize_kwargs`` and ``postprocess_kwargs``. Returns: dict: Inference and visualization results. """ return super().__call__( inputs, return_datasamples, batch_size, return_vis=return_vis, show=show, wait_time=wait_time, draw_pred=draw_pred, vid_out_dir=vid_out_dir, print_result=print_result, pred_out_file=pred_out_file, out_type=out_type, target_resolution=target_resolution, **kwargs)
Call the inferencer. Args: inputs (InputsType): Inputs for the inferencer. return_datasamples (bool): Whether to return results as :obj:`BaseDataElement`. Defaults to False. batch_size (int): Inference batch size. Defaults to 1. show (bool): Whether to display the visualization results in a popup window. Defaults to False. wait_time (float): The interval of show (s). Defaults to 0. draw_pred (bool): Whether to draw predicted bounding boxes. Defaults to True. vid_out_dir (str): Output directory of visualization results. If left as empty, no file will be saved. Defaults to ''. out_type (str): Output type of visualization results. Defaults to 'video'. print_result (bool): Whether to print the inference result w/o visualization to the console. Defaults to False. pred_out_file: File to save the inference results w/o visualization. If left as empty, no file will be saved. Defaults to ''. **kwargs: Other keyword arguments passed to :meth:`preprocess`, :meth:`forward`, :meth:`visualize` and :meth:`postprocess`. Each key in kwargs should be in the corresponding set of ``preprocess_kwargs``, ``forward_kwargs``, ``visualize_kwargs`` and ``postprocess_kwargs``. Returns: dict: Inference and visualization results.
__call__
python
open-mmlab/mmaction2
mmaction/apis/inferencers/actionrecog_inferencer.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/apis/inferencers/actionrecog_inferencer.py
Apache-2.0
def _inputs_to_list(self, inputs: InputsType) -> list: """Preprocess the inputs to a list. The main difference from mmengine version is that we don't list a directory cause input could be a frame folder. Preprocess inputs to a list according to its type: - list or tuple: return inputs - str: return a list containing the string. The string could be a path to file, a url or other types of string according to the task. Args: inputs (InputsType): Inputs for the inferencer. Returns: list: List of input for the :meth:`preprocess`. """ if not isinstance(inputs, (list, tuple)): inputs = [inputs] return list(inputs)
Preprocess the inputs to a list. The main difference from mmengine version is that we don't list a directory cause input could be a frame folder. Preprocess inputs to a list according to its type: - list or tuple: return inputs - str: return a list containing the string. The string could be a path to file, a url or other types of string according to the task. Args: inputs (InputsType): Inputs for the inferencer. Returns: list: List of input for the :meth:`preprocess`.
_inputs_to_list
python
open-mmlab/mmaction2
mmaction/apis/inferencers/actionrecog_inferencer.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/apis/inferencers/actionrecog_inferencer.py
Apache-2.0
def visualize( self, inputs: InputsType, preds: PredType, return_vis: bool = False, show: bool = False, wait_time: int = 0, draw_pred: bool = True, fps: int = 30, out_type: str = 'video', target_resolution: Optional[Tuple[int]] = None, vid_out_dir: str = '', ) -> Union[List[np.ndarray], None]: """Visualize predictions. Args: inputs (List[Union[str, np.ndarray]]): Inputs for the inferencer. preds (List[Dict]): Predictions of the model. return_vis (bool): Whether to return the visualization result. Defaults to False. show (bool): Whether to display the image in a popup window. Defaults to False. wait_time (float): The interval of show (s). Defaults to 0. draw_pred (bool): Whether to draw prediction labels. Defaults to True. fps (int): Frames per second for saving video. Defaults to 4. out_type (str): Output format type, choose from 'img', 'gif', 'video'. Defaults to ``'img'``. target_resolution (Tuple[int], optional): Set to (desired_width desired_height) to have resized frames. If either dimension is None, the frames are resized by keeping the existing aspect ratio. Defaults to None. vid_out_dir (str): Output directory of visualization results. If left as empty, no file will be saved. Defaults to ''. Returns: List[np.ndarray] or None: Returns visualization results only if applicable. """ if self.visualizer is None or (not show and vid_out_dir == '' and not return_vis): return None if getattr(self, 'visualizer') is None: raise ValueError('Visualization needs the "visualizer" term' 'defined in the config, but got None.') results = [] for single_input, pred in zip(inputs, preds): if isinstance(single_input, str): frames = single_input video_name = osp.basename(single_input) elif isinstance(single_input, np.ndarray): frames = single_input.copy() video_num = str(self.num_visualized_vids).zfill(8) video_name = f'{video_num}.mp4' else: raise ValueError('Unsupported input type: ' f'{type(single_input)}') out_path = osp.join(vid_out_dir, video_name) if vid_out_dir != '' \ else None visualization = self.visualizer.add_datasample( video_name, frames, pred, show_frames=show, wait_time=wait_time, draw_gt=False, draw_pred=draw_pred, fps=fps, out_type=out_type, out_path=out_path, target_resolution=target_resolution, ) results.append(visualization) self.num_visualized_vids += 1 return results
Visualize predictions. Args: inputs (List[Union[str, np.ndarray]]): Inputs for the inferencer. preds (List[Dict]): Predictions of the model. return_vis (bool): Whether to return the visualization result. Defaults to False. show (bool): Whether to display the image in a popup window. Defaults to False. wait_time (float): The interval of show (s). Defaults to 0. draw_pred (bool): Whether to draw prediction labels. Defaults to True. fps (int): Frames per second for saving video. Defaults to 4. out_type (str): Output format type, choose from 'img', 'gif', 'video'. Defaults to ``'img'``. target_resolution (Tuple[int], optional): Set to (desired_width desired_height) to have resized frames. If either dimension is None, the frames are resized by keeping the existing aspect ratio. Defaults to None. vid_out_dir (str): Output directory of visualization results. If left as empty, no file will be saved. Defaults to ''. Returns: List[np.ndarray] or None: Returns visualization results only if applicable.
visualize
python
open-mmlab/mmaction2
mmaction/apis/inferencers/actionrecog_inferencer.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/apis/inferencers/actionrecog_inferencer.py
Apache-2.0
def postprocess( self, preds: PredType, visualization: Optional[List[np.ndarray]] = None, return_datasample: bool = False, print_result: bool = False, pred_out_file: str = '', ) -> Union[ResType, Tuple[ResType, np.ndarray]]: """Process the predictions and visualization results from ``forward`` and ``visualize``. This method should be responsible for the following tasks: 1. Convert datasamples into a json-serializable dict if needed. 2. Pack the predictions and visualization results and return them. 3. Dump or log the predictions. Args: preds (List[Dict]): Predictions of the model. visualization (Optional[np.ndarray]): Visualized predictions. return_datasample (bool): Whether to use Datasample to store inference results. If False, dict will be used. print_result (bool): Whether to print the inference result w/o visualization to the console. Defaults to False. pred_out_file: File to save the inference results w/o visualization. If left as empty, no file will be saved. Defaults to ''. Returns: dict: Inference and visualization results with key ``predictions`` and ``visualization``. - ``visualization`` (Any): Returned by :meth:`visualize`. - ``predictions`` (dict or DataSample): Returned by :meth:`forward` and processed in :meth:`postprocess`. If ``return_datasample=False``, it usually should be a json-serializable dict containing only basic data elements such as strings and numbers. """ result_dict = {} results = preds if not return_datasample: results = [] for pred in preds: result = self.pred2dict(pred) results.append(result) # Add video to the results after printing and dumping result_dict['predictions'] = results if print_result: print(result_dict) if pred_out_file != '': mmengine.dump(result_dict, pred_out_file) result_dict['visualization'] = visualization return result_dict
Process the predictions and visualization results from ``forward`` and ``visualize``. This method should be responsible for the following tasks: 1. Convert datasamples into a json-serializable dict if needed. 2. Pack the predictions and visualization results and return them. 3. Dump or log the predictions. Args: preds (List[Dict]): Predictions of the model. visualization (Optional[np.ndarray]): Visualized predictions. return_datasample (bool): Whether to use Datasample to store inference results. If False, dict will be used. print_result (bool): Whether to print the inference result w/o visualization to the console. Defaults to False. pred_out_file: File to save the inference results w/o visualization. If left as empty, no file will be saved. Defaults to ''. Returns: dict: Inference and visualization results with key ``predictions`` and ``visualization``. - ``visualization`` (Any): Returned by :meth:`visualize`. - ``predictions`` (dict or DataSample): Returned by :meth:`forward` and processed in :meth:`postprocess`. If ``return_datasample=False``, it usually should be a json-serializable dict containing only basic data elements such as strings and numbers.
postprocess
python
open-mmlab/mmaction2
mmaction/apis/inferencers/actionrecog_inferencer.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/apis/inferencers/actionrecog_inferencer.py
Apache-2.0
def pred2dict(self, data_sample: ActionDataSample) -> Dict: """Extract elements necessary to represent a prediction into a dictionary. It's better to contain only basic data elements such as strings and numbers in order to guarantee it's json-serializable. Args: data_sample (ActionDataSample): The data sample to be converted. Returns: dict: The output dictionary. """ result = {} result['pred_labels'] = data_sample.pred_label.tolist() result['pred_scores'] = data_sample.pred_score.tolist() return result
Extract elements necessary to represent a prediction into a dictionary. It's better to contain only basic data elements such as strings and numbers in order to guarantee it's json-serializable. Args: data_sample (ActionDataSample): The data sample to be converted. Returns: dict: The output dictionary.
pred2dict
python
open-mmlab/mmaction2
mmaction/apis/inferencers/actionrecog_inferencer.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/apis/inferencers/actionrecog_inferencer.py
Apache-2.0
def forward(self, inputs: InputType, batch_size: int, **forward_kwargs) -> PredType: """Forward the inputs to the model. Args: inputs (InputsType): The inputs to be forwarded. batch_size (int): Batch size. Defaults to 1. Returns: Dict: The prediction results. Possibly with keys "rec". """ result = {} if self.mode == 'rec': predictions = self.actionrecog_inferencer( inputs, return_datasamples=True, batch_size=batch_size, **forward_kwargs)['predictions'] result['rec'] = [[p] for p in predictions] return result
Forward the inputs to the model. Args: inputs (InputsType): The inputs to be forwarded. batch_size (int): Batch size. Defaults to 1. Returns: Dict: The prediction results. Possibly with keys "rec".
forward
python
open-mmlab/mmaction2
mmaction/apis/inferencers/mmaction2_inferencer.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/apis/inferencers/mmaction2_inferencer.py
Apache-2.0
def visualize(self, inputs: InputsType, preds: PredType, **kwargs) -> List[np.ndarray]: """Visualize predictions. Args: inputs (List[Union[str, np.ndarray]]): Inputs for the inferencer. preds (List[Dict]): Predictions of the model. show (bool): Whether to display the image in a popup window. Defaults to False. wait_time (float): The interval of show (s). Defaults to 0. draw_pred (bool): Whether to draw predicted bounding boxes. Defaults to True. fps (int): Frames per second for saving video. Defaults to 4. out_type (str): Output format type, choose from 'img', 'gif', 'video'. Defaults to ``'img'``. target_resolution (Tuple[int], optional): Set to (desired_width desired_height) to have resized frames. If either dimension is None, the frames are resized by keeping the existing aspect ratio. Defaults to None. vid_out_dir (str): Output directory of visualization results. If left as empty, no file will be saved. Defaults to ''. """ if 'rec' in self.mode: return self.actionrecog_inferencer.visualize( inputs, preds['rec'][0], **kwargs)
Visualize predictions. Args: inputs (List[Union[str, np.ndarray]]): Inputs for the inferencer. preds (List[Dict]): Predictions of the model. show (bool): Whether to display the image in a popup window. Defaults to False. wait_time (float): The interval of show (s). Defaults to 0. draw_pred (bool): Whether to draw predicted bounding boxes. Defaults to True. fps (int): Frames per second for saving video. Defaults to 4. out_type (str): Output format type, choose from 'img', 'gif', 'video'. Defaults to ``'img'``. target_resolution (Tuple[int], optional): Set to (desired_width desired_height) to have resized frames. If either dimension is None, the frames are resized by keeping the existing aspect ratio. Defaults to None. vid_out_dir (str): Output directory of visualization results. If left as empty, no file will be saved. Defaults to ''.
visualize
python
open-mmlab/mmaction2
mmaction/apis/inferencers/mmaction2_inferencer.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/apis/inferencers/mmaction2_inferencer.py
Apache-2.0
def __call__( self, inputs: InputsType, batch_size: int = 1, **kwargs, ) -> dict: """Call the inferencer. Args: inputs (InputsType): Inputs for the inferencer. It can be a path to image / image directory, or an array, or a list of these. return_datasamples (bool): Whether to return results as :obj:`BaseDataElement`. Defaults to False. batch_size (int): Batch size. Defaults to 1. **kwargs: Key words arguments passed to :meth:`preprocess`, :meth:`forward`, :meth:`visualize` and :meth:`postprocess`. Each key in kwargs should be in the corresponding set of ``preprocess_kwargs``, ``forward_kwargs``, ``visualize_kwargs`` and ``postprocess_kwargs``. Returns: dict: Inference and visualization results. """ ( preprocess_kwargs, forward_kwargs, visualize_kwargs, postprocess_kwargs, ) = self._dispatch_kwargs(**kwargs) ori_inputs = self._inputs_to_list(inputs) preds = self.forward(ori_inputs, batch_size, **forward_kwargs) visualization = self.visualize( ori_inputs, preds, **visualize_kwargs) # type: ignore # noqa: E501 results = self.postprocess(preds, visualization, **postprocess_kwargs) return results
Call the inferencer. Args: inputs (InputsType): Inputs for the inferencer. It can be a path to image / image directory, or an array, or a list of these. return_datasamples (bool): Whether to return results as :obj:`BaseDataElement`. Defaults to False. batch_size (int): Batch size. Defaults to 1. **kwargs: Key words arguments passed to :meth:`preprocess`, :meth:`forward`, :meth:`visualize` and :meth:`postprocess`. Each key in kwargs should be in the corresponding set of ``preprocess_kwargs``, ``forward_kwargs``, ``visualize_kwargs`` and ``postprocess_kwargs``. Returns: dict: Inference and visualization results.
__call__
python
open-mmlab/mmaction2
mmaction/apis/inferencers/mmaction2_inferencer.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/apis/inferencers/mmaction2_inferencer.py
Apache-2.0
def _inputs_to_list(self, inputs: InputsType) -> list: """Preprocess the inputs to a list. The main difference from mmengine version is that we don't list a directory cause input could be a frame folder. Preprocess inputs to a list according to its type: - list or tuple: return inputs - str: return a list containing the string. The string could be a path to file, a url or other types of string according to the task. Args: inputs (InputsType): Inputs for the inferencer. Returns: list: List of input for the :meth:`preprocess`. """ if not isinstance(inputs, (list, tuple)): inputs = [inputs] return list(inputs)
Preprocess the inputs to a list. The main difference from mmengine version is that we don't list a directory cause input could be a frame folder. Preprocess inputs to a list according to its type: - list or tuple: return inputs - str: return a list containing the string. The string could be a path to file, a url or other types of string according to the task. Args: inputs (InputsType): Inputs for the inferencer. Returns: list: List of input for the :meth:`preprocess`.
_inputs_to_list
python
open-mmlab/mmaction2
mmaction/apis/inferencers/mmaction2_inferencer.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/apis/inferencers/mmaction2_inferencer.py
Apache-2.0
def load_data_list(self) -> List[dict]: """Load annotation file to get video information.""" exists(self.ann_file) data_list = [] anno_database = mmengine.load(self.ann_file) for video_name in anno_database: video_info = anno_database[video_name] feature_path = video_name + '.csv' feature_path = '%s/%s' % (self.data_prefix['video'], feature_path) video_info['feature_path'] = feature_path video_info['video_name'] = video_name data_list.append(video_info) return data_list
Load annotation file to get video information.
load_data_list
python
open-mmlab/mmaction2
mmaction/datasets/activitynet_dataset.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/activitynet_dataset.py
Apache-2.0
def load_data_list(self) -> List[Dict]: """Load annotation file to get audio information.""" check_file_exist(self.ann_file) data_list = [] with open(self.ann_file, 'r') as fin: for line in fin: line_split = line.strip().split() video_info = {} idx = 0 filename = line_split[idx] if self.data_prefix['audio'] is not None: filename = osp.join(self.data_prefix['audio'], filename) video_info['audio_path'] = filename idx += 1 # idx for total_frames video_info['total_frames'] = int(line_split[idx]) idx += 1 # idx for label label = [int(x) for x in line_split[idx:]] assert label, f'missing label in line: {line}' if self.multi_class: assert self.num_classes is not None video_info['label'] = label else: assert len(label) == 1 video_info['label'] = label[0] data_list.append(video_info) return data_list
Load annotation file to get audio information.
load_data_list
python
open-mmlab/mmaction2
mmaction/datasets/audio_dataset.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/audio_dataset.py
Apache-2.0
def parse_img_record(self, img_records: List[dict]) -> tuple: """Merge image records of the same entity at the same time. Args: img_records (List[dict]): List of img_records (lines in AVA annotations). Returns: Tuple(list): A tuple consists of lists of bboxes, action labels and entity_ids. """ bboxes, labels, entity_ids = [], [], [] while len(img_records) > 0: img_record = img_records[0] num_img_records = len(img_records) selected_records = [ x for x in img_records if np.array_equal(x['entity_box'], img_record['entity_box']) ] num_selected_records = len(selected_records) img_records = [ x for x in img_records if not np.array_equal(x['entity_box'], img_record['entity_box']) ] assert len(img_records) + num_selected_records == num_img_records bboxes.append(img_record['entity_box']) valid_labels = np.array([ selected_record['label'] for selected_record in selected_records ]) # The format can be directly used by BCELossWithLogits if self.multilabel: label = np.zeros(self.num_classes, dtype=np.float32) label[valid_labels] = 1. else: label = valid_labels labels.append(label) entity_ids.append(img_record['entity_id']) bboxes = np.stack(bboxes) labels = np.stack(labels) entity_ids = np.stack(entity_ids) return bboxes, labels, entity_ids
Merge image records of the same entity at the same time. Args: img_records (List[dict]): List of img_records (lines in AVA annotations). Returns: Tuple(list): A tuple consists of lists of bboxes, action labels and entity_ids.
parse_img_record
python
open-mmlab/mmaction2
mmaction/datasets/ava_dataset.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/ava_dataset.py
Apache-2.0
def parse_img_record(self, img_records: List[dict]) -> tuple: """Merge image records of the same entity at the same time. Args: img_records (List[dict]): List of img_records (lines in AVA annotations). Returns: Tuple(list): A tuple consists of lists of bboxes, action labels and entity_ids. """ bboxes, labels, entity_ids = [], [], [] while len(img_records) > 0: img_record = img_records[0] num_img_records = len(img_records) selected_records = [ x for x in img_records if np.array_equal(x['entity_box'], img_record['entity_box']) ] num_selected_records = len(selected_records) img_records = [ x for x in img_records if not np.array_equal(x['entity_box'], img_record['entity_box']) ] assert len(img_records) + num_selected_records == num_img_records bboxes.append(img_record['entity_box']) valid_labels = np.array([ selected_record['label'] for selected_record in selected_records ]) # The format can be directly used by BCELossWithLogits label = np.zeros(self.num_classes, dtype=np.float32) label[valid_labels] = 1. labels.append(label) entity_ids.append(img_record['entity_id']) bboxes = np.stack(bboxes) labels = np.stack(labels) entity_ids = np.stack(entity_ids) return bboxes, labels, entity_ids
Merge image records of the same entity at the same time. Args: img_records (List[dict]): List of img_records (lines in AVA annotations). Returns: Tuple(list): A tuple consists of lists of bboxes, action labels and entity_ids.
parse_img_record
python
open-mmlab/mmaction2
mmaction/datasets/ava_dataset.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/ava_dataset.py
Apache-2.0
def load_data_list(self) -> List[dict]: """Load annotation file to get video information.""" exists(self.ann_file) data_list = [] with open(self.ann_file) as f: anno_database = f.readlines() for item in anno_database: first_part, query_sentence = item.strip().split('##') query_sentence = query_sentence.replace('.', '') query_words = nltk.word_tokenize(query_sentence) query_tokens = [self.word2id[word] for word in query_words] query_length = len(query_tokens) query_tokens = torch.from_numpy(np.array(query_tokens)) vid_name, start_time, end_time = first_part.split() duration = float(self.duration_info[vid_name]) fps = float(self.fps_info[vid_name]) gt_start_time = float(start_time) gt_end_time = float(end_time) gt_bbox = (gt_start_time / duration, min(gt_end_time / duration, 1)) num_frames = int(self.num_frames[vid_name]) proposal_frames = self.get_proposals(num_frames) proposals = proposal_frames / num_frames proposals = torch.from_numpy(proposals) proposal_indexes = proposal_frames / self.ft_interval proposal_indexes = proposal_indexes.astype(np.int32) info = dict( vid_name=vid_name, fps=fps, num_frames=num_frames, duration=duration, query_tokens=query_tokens, query_length=query_length, gt_start_time=gt_start_time, gt_end_time=gt_end_time, gt_bbox=gt_bbox, proposals=proposals, num_proposals=proposals.shape[0], proposal_indexes=proposal_indexes) data_list.append(info) return data_list
Load annotation file to get video information.
load_data_list
python
open-mmlab/mmaction2
mmaction/datasets/charades_sta_dataset.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/charades_sta_dataset.py
Apache-2.0
def load_data_list(self) -> List[Dict]: """Load annotation file to get video information.""" exists(self.ann_file) data_list = [] with open(self.ann_file) as f: data_lines = json.load(f) for data in data_lines: answers = data['answer'] if isinstance(answers, str): answers = [answers] count = Counter(answers) answer_weight = [i / len(answers) for i in count.values()] data_item = dict( question_id=data['question_id'], filename=osp.join(self.data_prefix['video'], data['video']), question=pre_text(data['question']), gt_answer=list(count.keys()), gt_answer_weight=answer_weight) data_list.append(data_item) return data_list
Load annotation file to get video information.
load_data_list
python
open-mmlab/mmaction2
mmaction/datasets/msrvtt_datasets.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/msrvtt_datasets.py
Apache-2.0
def load_data_list(self) -> List[Dict]: """Load annotation file to get video information.""" exists(self.ann_file) data_list = [] with open(self.ann_file) as f: data_lines = json.load(f) for data in data_lines: data_item = dict( filename=osp.join(self.data_prefix['video'], data['video']), label=data['answer'], caption_options=[pre_text(c) for c in data['caption']]) data_list.append(data_item) return data_list
Load annotation file to get video information.
load_data_list
python
open-mmlab/mmaction2
mmaction/datasets/msrvtt_datasets.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/msrvtt_datasets.py
Apache-2.0
def load_data_list(self) -> List[Dict]: """Load annotation file to get video information.""" exists(self.ann_file) data_list = [] with open(self.ann_file) as f: data_lines = json.load(f) video_idx = 0 text_idx = 0 for data in data_lines: # don't consider multiple videos or multiple captions video_path = osp.join(self.data_prefix['video'], data['video']) data_item = dict( filename=video_path, text=[], gt_video_id=[], gt_text_id=[]) if isinstance(data['caption'], str): data['caption'] = [data['caption']] for text in data['caption']: text = pre_text(text) data_item['text'].append(text) data_item['gt_video_id'].append(video_idx) data_item['gt_text_id'].append(text_idx) text_idx += 1 video_idx += 1 data_list.append(data_item) self.num_videos = video_idx self.num_texts = text_idx return data_list
Load annotation file to get video information.
load_data_list
python
open-mmlab/mmaction2
mmaction/datasets/msrvtt_datasets.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/msrvtt_datasets.py
Apache-2.0
def load_data_list(self) -> List[Dict]: """Load annotation file to get skeleton information.""" assert self.ann_file.endswith('.pkl') mmengine.exists(self.ann_file) data_list = mmengine.load(self.ann_file) if self.split is not None: split, annos = data_list['split'], data_list['annotations'] identifier = 'filename' if 'filename' in annos[0] else 'frame_dir' split = set(split[self.split]) data_list = [x for x in annos if x[identifier] in split] # Sometimes we may need to load video from the file if 'video' in self.data_prefix: for item in data_list: if 'filename' in item: item['filename'] = osp.join(self.data_prefix['video'], item['filename']) if 'frame_dir' in item: item['frame_dir'] = osp.join(self.data_prefix['video'], item['frame_dir']) return data_list
Load annotation file to get skeleton information.
load_data_list
python
open-mmlab/mmaction2
mmaction/datasets/pose_dataset.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/pose_dataset.py
Apache-2.0
def load_data_list(self) -> List[dict]: """Load annotation file to get video information.""" exists(self.ann_file) data_list = [] fin = list_from_file(self.ann_file) for line in fin: line_split = line.strip().split() video_info = {} idx = 0 # idx for frame_dir frame_dir = line_split[idx] if self.data_prefix['img'] is not None: frame_dir = osp.join(self.data_prefix['img'], frame_dir) video_info['frame_dir'] = frame_dir idx += 1 if self.with_offset: # idx for offset and total_frames video_info['offset'] = int(line_split[idx]) video_info['total_frames'] = int(line_split[idx + 1]) idx += 2 else: # idx for total_frames video_info['total_frames'] = int(line_split[idx]) idx += 1 # idx for label[s] label = [int(x) for x in line_split[idx:]] # add fake label for inference datalist without label if not label: label = [-1] if self.multi_class: assert self.num_classes is not None video_info['label'] = label else: assert len(label) == 1 video_info['label'] = label[0] data_list.append(video_info) return data_list
Load annotation file to get video information.
load_data_list
python
open-mmlab/mmaction2
mmaction/datasets/rawframe_dataset.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/rawframe_dataset.py
Apache-2.0
def get_type(transform: Union[dict, Callable]) -> str: """get the type of the transform.""" if isinstance(transform, dict) and 'type' in transform: return transform['type'] elif callable(transform): return transform.__repr__().split('(')[0] else: raise TypeError
get the type of the transform.
get_type
python
open-mmlab/mmaction2
mmaction/datasets/repeat_aug_dataset.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/repeat_aug_dataset.py
Apache-2.0
def prepare_data(self, idx) -> List[dict]: """Get data processed by ``self.pipeline``. Reduce the video loading and decompressing. Args: idx (int): The index of ``data_info``. Returns: List[dict]: A list of length num_repeats. """ transforms = self.pipeline.transforms data_info = self.get_data_info(idx) data_info = transforms[0](data_info) # DecordInit frame_inds_list, frame_inds_length = [], [0] fake_data_info = dict( total_frames=data_info['total_frames'], start_index=data_info['start_index']) if not self.sample_once: for repeat in range(self.num_repeats): data_info_ = transforms[1](fake_data_info) # SampleFrames frame_inds = data_info_['frame_inds'] frame_inds_list.append(frame_inds.reshape(-1)) frame_inds_length.append(frame_inds.size + frame_inds_length[-1]) else: data_info_ = transforms[1](fake_data_info) # SampleFrames frame_inds = data_info_['frame_inds'] for repeat in range(self.num_repeats): frame_inds_list.append(frame_inds.reshape(-1)) frame_inds_length.append(frame_inds.size + frame_inds_length[-1]) for key in data_info_: data_info[key] = data_info_[key] data_info['frame_inds'] = np.concatenate(frame_inds_list) data_info = transforms[2](data_info) # DecordDecode imgs = data_info.pop('imgs') data_info_list = [] for repeat in range(self.num_repeats): data_info_ = deepcopy(data_info) start = frame_inds_length[repeat] end = frame_inds_length[repeat + 1] data_info_['imgs'] = imgs[start:end] for transform in transforms[3:]: data_info_ = transform(data_info_) data_info_list.append(data_info_) del imgs return data_info_list
Get data processed by ``self.pipeline``. Reduce the video loading and decompressing. Args: idx (int): The index of ``data_info``. Returns: List[dict]: A list of length num_repeats.
prepare_data
python
open-mmlab/mmaction2
mmaction/datasets/repeat_aug_dataset.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/repeat_aug_dataset.py
Apache-2.0
def load_data_list(self) -> List[dict]: """Load annotation file to get video information.""" exists(self.ann_file) data_list = [] fin = list_from_file(self.ann_file) for line in fin: line_split = line.strip().split(self.delimiter) if self.multi_class: assert self.num_classes is not None filename, label = line_split[0], line_split[1:] label = list(map(int, label)) # add fake label for inference datalist without label elif len(line_split) == 1: filename, label = line_split[0], -1 else: filename, label = line_split label = int(label) if self.data_prefix['video'] is not None: filename = osp.join(self.data_prefix['video'], filename) data_list.append(dict(filename=filename, label=label)) return data_list
Load annotation file to get video information.
load_data_list
python
open-mmlab/mmaction2
mmaction/datasets/video_dataset.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/video_dataset.py
Apache-2.0
def load_data_list(self) -> List[Dict]: """Load annotation file to get video information.""" exists(self.ann_file) data_list = [] with open(self.ann_file) as f: video_dict = json.load(f) for filename, texts in video_dict.items(): filename = osp.join(self.data_prefix['video'], filename) video_text_pairs = [] for text in texts: data_item = dict(filename=filename, text=text) video_text_pairs.append(data_item) data_list.extend(video_text_pairs) return data_list
Load annotation file to get video information.
load_data_list
python
open-mmlab/mmaction2
mmaction/datasets/video_text_dataset.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/video_text_dataset.py
Apache-2.0
def transform(self, results: Dict) -> Dict: """The transform function of :class:`PackActionInputs`. Args: results (dict): The result dict. Returns: dict: The result dict. """ packed_results = dict() if self.collect_keys is not None: packed_results['inputs'] = dict() for key in self.collect_keys: packed_results['inputs'][key] = to_tensor(results[key]) else: if 'imgs' in results: imgs = results['imgs'] packed_results['inputs'] = to_tensor(imgs) elif 'heatmap_imgs' in results: heatmap_imgs = results['heatmap_imgs'] packed_results['inputs'] = to_tensor(heatmap_imgs) elif 'keypoint' in results: keypoint = results['keypoint'] packed_results['inputs'] = to_tensor(keypoint) elif 'audios' in results: audios = results['audios'] packed_results['inputs'] = to_tensor(audios) elif 'text' in results: text = results['text'] packed_results['inputs'] = to_tensor(text) else: raise ValueError( 'Cannot get `imgs`, `keypoint`, `heatmap_imgs`, ' '`audios` or `text` in the input dict of ' '`PackActionInputs`.') data_sample = ActionDataSample() if 'gt_bboxes' in results: instance_data = InstanceData() for key in self.mapping_table.keys(): instance_data[self.mapping_table[key]] = to_tensor( results[key]) data_sample.gt_instances = instance_data if 'proposals' in results: data_sample.proposals = InstanceData( bboxes=to_tensor(results['proposals'])) if 'label' in results: data_sample.set_gt_label(results['label']) # Set custom algorithm keys for key in self.algorithm_keys: if key in results: data_sample.set_field(results[key], key) # Set meta keys img_meta = {k: results[k] for k in self.meta_keys if k in results} data_sample.set_metainfo(img_meta) packed_results['data_samples'] = data_sample return packed_results
The transform function of :class:`PackActionInputs`. Args: results (dict): The result dict. Returns: dict: The result dict.
transform
python
open-mmlab/mmaction2
mmaction/datasets/transforms/formatting.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/formatting.py
Apache-2.0
def transform(self, results): """Method to pack the input data. Args: results (dict): Result dict from the data pipeline. Returns: dict: - 'inputs' (obj:`torch.Tensor`): The forward data of models. - 'data_samples' (obj:`DetDataSample`): The annotation info of the sample. """ packed_results = dict() if 'raw_feature' in results: raw_feature = results['raw_feature'] packed_results['inputs'] = to_tensor(raw_feature) elif 'bsp_feature' in results: packed_results['inputs'] = torch.tensor(0.) else: raise ValueError( 'Cannot get "raw_feature" or "bsp_feature" in the input ' 'dict of `PackActionInputs`.') data_sample = ActionDataSample() for key in self.keys: if key not in results: continue elif key == 'proposals': instance_data = InstanceData() instance_data[key] = to_tensor(results[key]) data_sample.proposals = instance_data else: if hasattr(data_sample, 'gt_instances'): data_sample.gt_instances[key] = to_tensor(results[key]) else: instance_data = InstanceData() instance_data[key] = to_tensor(results[key]) data_sample.gt_instances = instance_data img_meta = {k: results[k] for k in self.meta_keys if k in results} data_sample.set_metainfo(img_meta) packed_results['data_samples'] = data_sample return packed_results
Method to pack the input data. Args: results (dict): Result dict from the data pipeline. Returns: dict: - 'inputs' (obj:`torch.Tensor`): The forward data of models. - 'data_samples' (obj:`DetDataSample`): The annotation info of the sample.
transform
python
open-mmlab/mmaction2
mmaction/datasets/transforms/formatting.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/formatting.py
Apache-2.0
def transform(self, results): """Performs the Transpose formatting. Args: results (dict): The resulting dict to be modified and passed to the next transform in pipeline. """ for key in self.keys: results[key] = results[key].transpose(self.order) return results
Performs the Transpose formatting. Args: results (dict): The resulting dict to be modified and passed to the next transform in pipeline.
transform
python
open-mmlab/mmaction2
mmaction/datasets/transforms/formatting.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/formatting.py
Apache-2.0
def transform(self, results: Dict) -> Dict: """Performs the FormatShape formatting. Args: results (dict): The resulting dict to be modified and passed to the next transform in pipeline. """ if not isinstance(results['imgs'], np.ndarray): results['imgs'] = np.array(results['imgs']) # [M x H x W x C] # M = 1 * N_crops * N_clips * T if self.collapse: assert results['num_clips'] == 1 if self.input_format == 'NCTHW': if 'imgs' in results: imgs = results['imgs'] num_clips = results['num_clips'] clip_len = results['clip_len'] if isinstance(clip_len, dict): clip_len = clip_len['RGB'] imgs = imgs.reshape((-1, num_clips, clip_len) + imgs.shape[1:]) # N_crops x N_clips x T x H x W x C imgs = np.transpose(imgs, (0, 1, 5, 2, 3, 4)) # N_crops x N_clips x C x T x H x W imgs = imgs.reshape((-1, ) + imgs.shape[2:]) # M' x C x T x H x W # M' = N_crops x N_clips results['imgs'] = imgs results['input_shape'] = imgs.shape if 'heatmap_imgs' in results: imgs = results['heatmap_imgs'] num_clips = results['num_clips'] clip_len = results['clip_len'] # clip_len must be a dict clip_len = clip_len['Pose'] imgs = imgs.reshape((-1, num_clips, clip_len) + imgs.shape[1:]) # N_crops x N_clips x T x C x H x W imgs = np.transpose(imgs, (0, 1, 3, 2, 4, 5)) # N_crops x N_clips x C x T x H x W imgs = imgs.reshape((-1, ) + imgs.shape[2:]) # M' x C x T x H x W # M' = N_crops x N_clips results['heatmap_imgs'] = imgs results['heatmap_input_shape'] = imgs.shape elif self.input_format == 'NCTHW_Heatmap': num_clips = results['num_clips'] clip_len = results['clip_len'] imgs = results['imgs'] imgs = imgs.reshape((-1, num_clips, clip_len) + imgs.shape[1:]) # N_crops x N_clips x T x C x H x W imgs = np.transpose(imgs, (0, 1, 3, 2, 4, 5)) # N_crops x N_clips x C x T x H x W imgs = imgs.reshape((-1, ) + imgs.shape[2:]) # M' x C x T x H x W # M' = N_crops x N_clips results['imgs'] = imgs results['input_shape'] = imgs.shape elif self.input_format == 'NCHW': imgs = results['imgs'] imgs = np.transpose(imgs, (0, 3, 1, 2)) if 'modality' in results and results['modality'] == 'Flow': clip_len = results['clip_len'] imgs = imgs.reshape((-1, clip_len * imgs.shape[1]) + imgs.shape[2:]) # M x C x H x W results['imgs'] = imgs results['input_shape'] = imgs.shape elif self.input_format == 'NPTCHW': num_proposals = results['num_proposals'] num_clips = results['num_clips'] clip_len = results['clip_len'] imgs = results['imgs'] imgs = imgs.reshape((num_proposals, num_clips * clip_len) + imgs.shape[1:]) # P x M x H x W x C # M = N_clips x T imgs = np.transpose(imgs, (0, 1, 4, 2, 3)) # P x M x C x H x W results['imgs'] = imgs results['input_shape'] = imgs.shape if self.collapse: assert results['imgs'].shape[0] == 1 results['imgs'] = results['imgs'].squeeze(0) results['input_shape'] = results['imgs'].shape return results
Performs the FormatShape formatting. Args: results (dict): The resulting dict to be modified and passed to the next transform in pipeline.
transform
python
open-mmlab/mmaction2
mmaction/datasets/transforms/formatting.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/formatting.py
Apache-2.0
def transform(self, results: Dict) -> Dict: """Performs the FormatShape formatting. Args: results (dict): The resulting dict to be modified and passed to the next transform in pipeline. """ audios = results['audios'] # clip x sample x freq -> clip x channel x sample x freq clip, sample, freq = audios.shape audios = audios.reshape(clip, 1, sample, freq) results['audios'] = audios results['input_shape'] = audios.shape return results
Performs the FormatShape formatting. Args: results (dict): The resulting dict to be modified and passed to the next transform in pipeline.
transform
python
open-mmlab/mmaction2
mmaction/datasets/transforms/formatting.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/formatting.py
Apache-2.0
def transform(self, results: Dict) -> Dict: """The transform function of :class:`FormatGCNInput`. Args: results (dict): The result dict. Returns: dict: The result dict. """ keypoint = results['keypoint'] if 'keypoint_score' in results: keypoint = np.concatenate( (keypoint, results['keypoint_score'][..., None]), axis=-1) cur_num_person = keypoint.shape[0] if cur_num_person < self.num_person: pad_dim = self.num_person - cur_num_person pad = np.zeros( (pad_dim, ) + keypoint.shape[1:], dtype=keypoint.dtype) keypoint = np.concatenate((keypoint, pad), axis=0) if self.mode == 'loop' and cur_num_person == 1: for i in range(1, self.num_person): keypoint[i] = keypoint[0] elif cur_num_person > self.num_person: keypoint = keypoint[:self.num_person] M, T, V, C = keypoint.shape nc = results.get('num_clips', 1) assert T % nc == 0 keypoint = keypoint.reshape( (M, nc, T // nc, V, C)).transpose(1, 0, 2, 3, 4) results['keypoint'] = np.ascontiguousarray(keypoint) return results
The transform function of :class:`FormatGCNInput`. Args: results (dict): The result dict. Returns: dict: The result dict.
transform
python
open-mmlab/mmaction2
mmaction/datasets/transforms/formatting.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/formatting.py
Apache-2.0
def transform(self, results: dict) -> dict: """Functions to load image. Args: results (dict): Result dict from :obj:``mmcv.BaseDataset``. Returns: dict: The dict contains loaded image and meta information. """ filename = results['img_path'] try: img_bytes = self.file_client.get(filename) img = mmcv.imfrombytes( img_bytes, flag=self.color_type, channel_order='rgb', backend=self.imdecode_backend) except Exception as e: if self.ignore_empty: return None else: raise e if self.to_float32: img = img.astype(np.float32) results['img'] = img results['img_shape'] = img.shape[:2] results['ori_shape'] = img.shape[:2] return results
Functions to load image. Args: results (dict): Result dict from :obj:``mmcv.BaseDataset``. Returns: dict: The dict contains loaded image and meta information.
transform
python
open-mmlab/mmaction2
mmaction/datasets/transforms/loading.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/loading.py
Apache-2.0
def transform(self, results): """Convert the label dictionary to 3 tensors: "label", "mask" and "category_mask". Args: results (dict): The resulting dict to be modified and passed to the next transform in pipeline. """ if not self.hvu_initialized: self.init_hvu_info(results['categories'], results['category_nums']) onehot = torch.zeros(self.num_tags) onehot_mask = torch.zeros(self.num_tags) category_mask = torch.zeros(self.num_categories) for category, tags in results['label'].items(): # skip if not training on this category if category not in self.categories: continue category_mask[self.categories.index(category)] = 1. start_idx = self.category2startidx[category] category_num = self.category2num[category] tags = [idx + start_idx for idx in tags] onehot[tags] = 1. onehot_mask[start_idx:category_num + start_idx] = 1. results['label'] = onehot results['mask'] = onehot_mask results['category_mask'] = category_mask return results
Convert the label dictionary to 3 tensors: "label", "mask" and "category_mask". Args: results (dict): The resulting dict to be modified and passed to the next transform in pipeline.
transform
python
open-mmlab/mmaction2
mmaction/datasets/transforms/loading.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/loading.py
Apache-2.0
def _get_train_clips(self, num_frames: int, ori_clip_len: float) -> np.array: """Get clip offsets in train mode. It will calculate the average interval for selected frames, and randomly shift them within offsets between [0, avg_interval]. If the total number of frames is smaller than clips num or origin frames length, it will return all zero indices. Args: num_frames (int): Total number of frame in the video. ori_clip_len (float): length of original sample clip. Returns: np.ndarray: Sampled frame indices in train mode. """ if self.keep_tail_frames: avg_interval = (num_frames - ori_clip_len + 1) / float( self.num_clips) if num_frames > ori_clip_len - 1: base_offsets = np.arange(self.num_clips) * avg_interval clip_offsets = (base_offsets + np.random.uniform( 0, avg_interval, self.num_clips)).astype(np.int32) else: clip_offsets = np.zeros((self.num_clips, ), dtype=np.int32) else: avg_interval = (num_frames - ori_clip_len + 1) // self.num_clips if avg_interval > 0: base_offsets = np.arange(self.num_clips) * avg_interval clip_offsets = base_offsets + np.random.randint( avg_interval, size=self.num_clips) elif num_frames > max(self.num_clips, ori_clip_len): clip_offsets = np.sort( np.random.randint( num_frames - ori_clip_len + 1, size=self.num_clips)) elif avg_interval == 0: ratio = (num_frames - ori_clip_len + 1.0) / self.num_clips clip_offsets = np.around(np.arange(self.num_clips) * ratio) else: clip_offsets = np.zeros((self.num_clips, ), dtype=np.int32) return clip_offsets
Get clip offsets in train mode. It will calculate the average interval for selected frames, and randomly shift them within offsets between [0, avg_interval]. If the total number of frames is smaller than clips num or origin frames length, it will return all zero indices. Args: num_frames (int): Total number of frame in the video. ori_clip_len (float): length of original sample clip. Returns: np.ndarray: Sampled frame indices in train mode.
_get_train_clips
python
open-mmlab/mmaction2
mmaction/datasets/transforms/loading.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/loading.py
Apache-2.0
def _get_test_clips(self, num_frames: int, ori_clip_len: float) -> np.array: """Get clip offsets in test mode. If the total number of frames is not enough, it will return all zero indices. Args: num_frames (int): Total number of frame in the video. ori_clip_len (float): length of original sample clip. Returns: np.ndarray: Sampled frame indices in test mode. """ if self.clip_len == 1: # 2D recognizer # assert self.frame_interval == 1 avg_interval = num_frames / float(self.num_clips) base_offsets = np.arange(self.num_clips) * avg_interval clip_offsets = base_offsets + avg_interval / 2.0 if self.twice_sample: clip_offsets = np.concatenate([clip_offsets, base_offsets]) else: # 3D recognizer max_offset = max(num_frames - ori_clip_len, 0) if self.twice_sample: num_clips = self.num_clips * 2 else: num_clips = self.num_clips if num_clips > 1: num_segments = self.num_clips - 1 # align test sample strategy with `PySlowFast` repo if self.target_fps is not None: offset_between = np.floor(max_offset / float(num_segments)) clip_offsets = np.arange(num_clips) * offset_between else: offset_between = max_offset / float(num_segments) clip_offsets = np.arange(num_clips) * offset_between clip_offsets = np.round(clip_offsets) else: clip_offsets = np.array([max_offset // 2]) return clip_offsets
Get clip offsets in test mode. If the total number of frames is not enough, it will return all zero indices. Args: num_frames (int): Total number of frame in the video. ori_clip_len (float): length of original sample clip. Returns: np.ndarray: Sampled frame indices in test mode.
_get_test_clips
python
open-mmlab/mmaction2
mmaction/datasets/transforms/loading.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/loading.py
Apache-2.0
def _sample_clips(self, num_frames: int, ori_clip_len: float) -> np.array: """Choose clip offsets for the video in a given mode. Args: num_frames (int): Total number of frame in the video. Returns: np.ndarray: Sampled frame indices. """ if self.test_mode: clip_offsets = self._get_test_clips(num_frames, ori_clip_len) else: clip_offsets = self._get_train_clips(num_frames, ori_clip_len) return clip_offsets
Choose clip offsets for the video in a given mode. Args: num_frames (int): Total number of frame in the video. Returns: np.ndarray: Sampled frame indices.
_sample_clips
python
open-mmlab/mmaction2
mmaction/datasets/transforms/loading.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/loading.py
Apache-2.0
def _get_ori_clip_len(self, fps_scale_ratio: float) -> float: """calculate length of clip segment for different strategy. Args: fps_scale_ratio (float): Scale ratio to adjust fps. """ if self.target_fps is not None: # align test sample strategy with `PySlowFast` repo ori_clip_len = self.clip_len * self.frame_interval ori_clip_len = np.maximum(1, ori_clip_len * fps_scale_ratio) elif self.test_mode: ori_clip_len = (self.clip_len - 1) * self.frame_interval + 1 else: ori_clip_len = self.clip_len * self.frame_interval return ori_clip_len
calculate length of clip segment for different strategy. Args: fps_scale_ratio (float): Scale ratio to adjust fps.
_get_ori_clip_len
python
open-mmlab/mmaction2
mmaction/datasets/transforms/loading.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/loading.py
Apache-2.0
def transform(self, results: dict) -> dict: """Perform the SampleFrames loading. Args: results (dict): The resulting dict to be modified and passed to the next transform in pipeline. """ total_frames = results['total_frames'] # if can't get fps, same value of `fps` and `target_fps` # will perform nothing fps = results.get('avg_fps') if self.target_fps is None or not fps: fps_scale_ratio = 1.0 else: fps_scale_ratio = fps / self.target_fps ori_clip_len = self._get_ori_clip_len(fps_scale_ratio) clip_offsets = self._sample_clips(total_frames, ori_clip_len) if self.target_fps: frame_inds = clip_offsets[:, None] + np.linspace( 0, ori_clip_len - 1, self.clip_len).astype(np.int32) else: frame_inds = clip_offsets[:, None] + np.arange( self.clip_len)[None, :] * self.frame_interval frame_inds = np.concatenate(frame_inds) if self.temporal_jitter: perframe_offsets = np.random.randint( self.frame_interval, size=len(frame_inds)) frame_inds += perframe_offsets frame_inds = frame_inds.reshape((-1, self.clip_len)) if self.out_of_bound_opt == 'loop': frame_inds = np.mod(frame_inds, total_frames) elif self.out_of_bound_opt == 'repeat_last': safe_inds = frame_inds < total_frames unsafe_inds = 1 - safe_inds last_ind = np.max(safe_inds * frame_inds, axis=1) new_inds = (safe_inds * frame_inds + (unsafe_inds.T * last_ind).T) frame_inds = new_inds else: raise ValueError('Illegal out_of_bound option.') start_index = results['start_index'] frame_inds = np.concatenate(frame_inds) + start_index results['frame_inds'] = frame_inds.astype(np.int32) results['clip_len'] = self.clip_len results['frame_interval'] = self.frame_interval results['num_clips'] = self.num_clips return results
Perform the SampleFrames loading. Args: results (dict): The resulting dict to be modified and passed to the next transform in pipeline.
transform
python
open-mmlab/mmaction2
mmaction/datasets/transforms/loading.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/loading.py
Apache-2.0
def _get_sample_clips(self, num_frames: int) -> np.ndarray: """To sample an n-frame clip from the video. UniformSample basically divides the video into n segments of equal length and randomly samples one frame from each segment. When the duration of video frames is shorter than the desired length of the target clip, this approach will duplicate the sampled frame instead of looping the sample in "loop" mode. In the test mode, when we need to sample multiple clips, specifically 'n' clips, this method will further divide the segments based on the number of clips to be sampled. The 'i-th' clip will. sample the frame located at the position 'i * len(segment) / n' within the segment. Args: num_frames (int): Total number of frame in the video. Returns: seq (np.ndarray): the indexes of frames of sampled from the video. """ seg_size = float(num_frames - 1) / self.clip_len inds = [] if not self.test_mode: for i in range(self.clip_len): start = int(np.round(seg_size * i)) end = int(np.round(seg_size * (i + 1))) inds.append(np.random.randint(start, end + 1)) else: duration = seg_size / (self.num_clips + 1) for k in range(self.num_clips): for i in range(self.clip_len): start = int(np.round(seg_size * i)) frame_index = start + int(duration * (k + 1)) inds.append(frame_index) return np.array(inds)
To sample an n-frame clip from the video. UniformSample basically divides the video into n segments of equal length and randomly samples one frame from each segment. When the duration of video frames is shorter than the desired length of the target clip, this approach will duplicate the sampled frame instead of looping the sample in "loop" mode. In the test mode, when we need to sample multiple clips, specifically 'n' clips, this method will further divide the segments based on the number of clips to be sampled. The 'i-th' clip will. sample the frame located at the position 'i * len(segment) / n' within the segment. Args: num_frames (int): Total number of frame in the video. Returns: seq (np.ndarray): the indexes of frames of sampled from the video.
_get_sample_clips
python
open-mmlab/mmaction2
mmaction/datasets/transforms/loading.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/loading.py
Apache-2.0
def transform(self, results: Dict) -> Dict: """Perform the Uniform Sampling. Args: results (dict): The result dict. Returns: dict: The result dict. """ num_frames = results['total_frames'] inds = self._get_sample_clips(num_frames) start_index = results['start_index'] inds = inds + start_index results['frame_inds'] = inds.astype(np.int32) results['clip_len'] = self.clip_len results['frame_interval'] = None results['num_clips'] = self.num_clips return results
Perform the Uniform Sampling. Args: results (dict): The result dict. Returns: dict: The result dict.
transform
python
open-mmlab/mmaction2
mmaction/datasets/transforms/loading.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/loading.py
Apache-2.0
def transform(self, results): """Perform the SampleFrames loading. Args: results (dict): The resulting dict to be modified and passed to the next transform in pipeline. """ total_frames = results['total_frames'] start_index = results['start_index'] clip_centers = np.arange(self.clip_interval // 2, total_frames, self.clip_interval) num_clips = clip_centers.shape[0] frame_inds = clip_centers[:, None] + np.arange( -(self.clip_len // 2 * self.frame_interval), self.frame_interval * (self.clip_len - (self.clip_len // 2)), self.frame_interval)[None, :] # clip frame_inds to legal range frame_inds = np.clip(frame_inds, 0, total_frames - 1) frame_inds = np.concatenate(frame_inds) + start_index results['frame_inds'] = frame_inds.astype(np.int32) results['clip_len'] = self.clip_len results['clip_interval'] = self.clip_interval results['frame_interval'] = self.frame_interval results['num_clips'] = num_clips return results
Perform the SampleFrames loading. Args: results (dict): The resulting dict to be modified and passed to the next transform in pipeline.
transform
python
open-mmlab/mmaction2
mmaction/datasets/transforms/loading.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/loading.py
Apache-2.0
def _get_train_clips(self, num_frames: int) -> np.array: """Get clip offsets by dense sample strategy in train mode. It will calculate a sample position and sample interval and set start index 0 when sample_pos == 1 or randomly choose from [0, sample_pos - 1]. Then it will shift the start index by each base offset. Args: num_frames (int): Total number of frame in the video. Returns: np.ndarray: Sampled frame indices in train mode. """ sample_position = max(1, 1 + num_frames - self.sample_range) interval = self.sample_range // self.num_clips start_idx = 0 if sample_position == 1 else np.random.randint( 0, sample_position - 1) base_offsets = np.arange(self.num_clips) * interval clip_offsets = (base_offsets + start_idx) % num_frames return clip_offsets
Get clip offsets by dense sample strategy in train mode. It will calculate a sample position and sample interval and set start index 0 when sample_pos == 1 or randomly choose from [0, sample_pos - 1]. Then it will shift the start index by each base offset. Args: num_frames (int): Total number of frame in the video. Returns: np.ndarray: Sampled frame indices in train mode.
_get_train_clips
python
open-mmlab/mmaction2
mmaction/datasets/transforms/loading.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/loading.py
Apache-2.0
def _get_test_clips(self, num_frames: int) -> np.array: """Get clip offsets by dense sample strategy in test mode. It will calculate a sample position and sample interval and evenly sample several start indexes as start positions between [0, sample_position-1]. Then it will shift each start index by the base offsets. Args: num_frames (int): Total number of frame in the video. Returns: np.ndarray: Sampled frame indices in train mode. """ sample_position = max(1, 1 + num_frames - self.sample_range) interval = self.sample_range // self.num_clips start_list = np.linspace( 0, sample_position - 1, num=self.num_sample_positions, dtype=int) base_offsets = np.arange(self.num_clips) * interval clip_offsets = list() for start_idx in start_list: clip_offsets.extend((base_offsets + start_idx) % num_frames) clip_offsets = np.array(clip_offsets) return clip_offsets
Get clip offsets by dense sample strategy in test mode. It will calculate a sample position and sample interval and evenly sample several start indexes as start positions between [0, sample_position-1]. Then it will shift each start index by the base offsets. Args: num_frames (int): Total number of frame in the video. Returns: np.ndarray: Sampled frame indices in train mode.
_get_test_clips
python
open-mmlab/mmaction2
mmaction/datasets/transforms/loading.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/loading.py
Apache-2.0
def _sample_clips(self, num_frames: int) -> np.array: """Choose clip offsets for the video in a given mode. Args: num_frames (int): Total number of frame in the video. Returns: np.ndarray: Sampled frame indices. """ if self.test_mode: clip_offsets = self._get_test_clips(num_frames) else: clip_offsets = self._get_train_clips(num_frames) return clip_offsets
Choose clip offsets for the video in a given mode. Args: num_frames (int): Total number of frame in the video. Returns: np.ndarray: Sampled frame indices.
_sample_clips
python
open-mmlab/mmaction2
mmaction/datasets/transforms/loading.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/loading.py
Apache-2.0
def transform(self, results: dict) -> dict: """Perform the SampleFrames loading. Args: results (dict): The resulting dict to be modified and passed to the next transform in pipeline. """ total_frames = results['total_frames'] clip_offsets = self._sample_clips(total_frames) frame_inds = clip_offsets[:, None] + np.arange( self.clip_len)[None, :] * self.frame_interval frame_inds = np.concatenate(frame_inds) if self.temporal_jitter: perframe_offsets = np.random.randint( self.frame_interval, size=len(frame_inds)) frame_inds += perframe_offsets frame_inds = frame_inds.reshape((-1, self.clip_len)) if self.out_of_bound_opt == 'loop': frame_inds = np.mod(frame_inds, total_frames) elif self.out_of_bound_opt == 'repeat_last': safe_inds = frame_inds < total_frames unsafe_inds = 1 - safe_inds last_ind = np.max(safe_inds * frame_inds, axis=1) new_inds = (safe_inds * frame_inds + (unsafe_inds.T * last_ind).T) frame_inds = new_inds else: raise ValueError('Illegal out_of_bound option.') start_index = results['start_index'] frame_inds = np.concatenate(frame_inds) + start_index results['frame_inds'] = frame_inds.astype(np.int32) results['clip_len'] = self.clip_len results['frame_interval'] = self.frame_interval results['num_clips'] = self.num_clips return results
Perform the SampleFrames loading. Args: results (dict): The resulting dict to be modified and passed to the next transform in pipeline.
transform
python
open-mmlab/mmaction2
mmaction/datasets/transforms/loading.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/loading.py
Apache-2.0
def transform(self, results): """Perform the SampleFrames loading. Args: results (dict): The resulting dict to be modified and passed to the next transform in pipeline. """ fps = results['fps'] timestamp = results['timestamp'] timestamp_start = results['timestamp_start'] start_index = results.get('start_index', 0) if results.get('total_frames') is not None: shot_info = (0, results['total_frames']) else: shot_info = results['shot_info'] center_index = fps * (timestamp - timestamp_start) + start_index skip_offsets = np.random.randint( -self.frame_interval // 2, (self.frame_interval + 1) // 2, size=self.clip_len) frame_inds = self._get_clips(center_index, skip_offsets, shot_info) frame_inds = np.array(frame_inds, dtype=np.int32) + start_index results['frame_inds'] = frame_inds results['clip_len'] = self.clip_len results['frame_interval'] = self.frame_interval results['num_clips'] = 1 results['crop_quadruple'] = np.array([0, 0, 1, 1], dtype=np.float32) return results
Perform the SampleFrames loading. Args: results (dict): The resulting dict to be modified and passed to the next transform in pipeline.
transform
python
open-mmlab/mmaction2
mmaction/datasets/transforms/loading.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/loading.py
Apache-2.0
def transform(self, results): """Perform the PyAV initialization. Args: results (dict): The resulting dict to be modified and passed to the next transform in pipeline. """ try: import av except ImportError: raise ImportError('Please run "conda install av -c conda-forge" ' 'or "pip install av" to install PyAV first.') if self.file_client is None: self.file_client = FileClient(self.io_backend, **self.kwargs) file_obj = io.BytesIO(self.file_client.get(results['filename'])) container = av.open(file_obj) results['video_reader'] = container results['total_frames'] = container.streams.video[0].frames return results
Perform the PyAV initialization. Args: results (dict): The resulting dict to be modified and passed to the next transform in pipeline.
transform
python
open-mmlab/mmaction2
mmaction/datasets/transforms/loading.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/loading.py
Apache-2.0
def transform(self, results): """Perform the PyAV decoding. Args: results (dict): The resulting dict to be modified and passed to the next transform in pipeline. """ container = results['video_reader'] imgs = list() if self.multi_thread: container.streams.video[0].thread_type = 'AUTO' if results['frame_inds'].ndim != 1: results['frame_inds'] = np.squeeze(results['frame_inds']) if self.mode == 'accurate': # set max indice to make early stop max_inds = max(results['frame_inds']) i = 0 for frame in container.decode(video=0): if i > max_inds + 1: break imgs.append(frame.to_rgb().to_ndarray()) i += 1 # the available frame in pyav may be less than its length, # which may raise error results['imgs'] = [ imgs[i % len(imgs)] for i in results['frame_inds'] ] elif self.mode == 'efficient': for frame in container.decode(video=0): backup_frame = frame break stream = container.streams.video[0] for idx in results['frame_inds']: pts_scale = stream.average_rate * stream.time_base frame_pts = int(idx / pts_scale) container.seek( frame_pts, any_frame=False, backward=True, stream=stream) frame = self.frame_generator(container, stream) if frame is not None: imgs.append(frame) backup_frame = frame else: imgs.append(backup_frame) results['imgs'] = imgs results['original_shape'] = imgs[0].shape[:2] results['img_shape'] = imgs[0].shape[:2] results['video_reader'] = None del container return results
Perform the PyAV decoding. Args: results (dict): The resulting dict to be modified and passed to the next transform in pipeline.
transform
python
open-mmlab/mmaction2
mmaction/datasets/transforms/loading.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/loading.py
Apache-2.0
def transform(self, results): """Perform the PIMS initialization. Args: results (dict): The resulting dict to be modified and passed to the next transform in pipeline. """ try: import pims except ImportError: raise ImportError('Please run "conda install pims -c conda-forge" ' 'or "pip install pims" to install pims first.') if self.file_client is None: self.file_client = FileClient(self.io_backend, **self.kwargs) file_obj = io.BytesIO(self.file_client.get(results['filename'])) if self.mode == 'accurate': container = pims.PyAVReaderIndexed(file_obj) else: container = pims.PyAVReaderTimed(file_obj) results['video_reader'] = container results['total_frames'] = len(container) return results
Perform the PIMS initialization. Args: results (dict): The resulting dict to be modified and passed to the next transform in pipeline.
transform
python
open-mmlab/mmaction2
mmaction/datasets/transforms/loading.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/loading.py
Apache-2.0
def transform(self, results): """Perform the PIMS decoding. Args: results (dict): The resulting dict to be modified and passed to the next transform in pipeline. """ container = results['video_reader'] if results['frame_inds'].ndim != 1: results['frame_inds'] = np.squeeze(results['frame_inds']) frame_inds = results['frame_inds'] imgs = [container[idx] for idx in frame_inds] results['video_reader'] = None del container results['imgs'] = imgs results['original_shape'] = imgs[0].shape[:2] results['img_shape'] = imgs[0].shape[:2] return results
Perform the PIMS decoding. Args: results (dict): The resulting dict to be modified and passed to the next transform in pipeline.
transform
python
open-mmlab/mmaction2
mmaction/datasets/transforms/loading.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/loading.py
Apache-2.0
def transform(self, results): """Perform the PyAV motion vector decoding. Args: results (dict): The resulting dict to be modified and passed to the next transform in pipeline. """ container = results['video_reader'] imgs = list() if self.multi_thread: container.streams.video[0].thread_type = 'AUTO' if results['frame_inds'].ndim != 1: results['frame_inds'] = np.squeeze(results['frame_inds']) # set max index to make early stop max_idx = max(results['frame_inds']) i = 0 stream = container.streams.video[0] codec_context = stream.codec_context codec_context.options = {'flags2': '+export_mvs'} for packet in container.demux(stream): for frame in packet.decode(): if i > max_idx + 1: break i += 1 height = frame.height width = frame.width mv = np.zeros((height, width, 2), dtype=np.int8) vectors = frame.side_data.get('MOTION_VECTORS') if frame.key_frame: # Key frame don't have motion vectors assert vectors is None if vectors is not None and len(vectors) > 0: mv = self._parse_vectors(mv, vectors.to_ndarray(), height, width) imgs.append(mv) results['video_reader'] = None del container # the available frame in pyav may be less than its length, # which may raise error results['motion_vectors'] = np.array( [imgs[i % len(imgs)] for i in results['frame_inds']]) return results
Perform the PyAV motion vector decoding. Args: results (dict): The resulting dict to be modified and passed to the next transform in pipeline.
transform
python
open-mmlab/mmaction2
mmaction/datasets/transforms/loading.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/loading.py
Apache-2.0
def transform(self, results: Dict) -> Dict: """Perform the Decord initialization. Args: results (dict): The result dict. Returns: dict: The result dict. """ container = self._get_video_reader(results['filename']) results['total_frames'] = len(container) results['video_reader'] = container results['avg_fps'] = container.get_avg_fps() return results
Perform the Decord initialization. Args: results (dict): The result dict. Returns: dict: The result dict.
transform
python
open-mmlab/mmaction2
mmaction/datasets/transforms/loading.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/loading.py
Apache-2.0
def transform(self, results: Dict) -> Dict: """Perform the Decord decoding. Args: results (dict): The result dict. Returns: dict: The result dict. """ container = results['video_reader'] if results['frame_inds'].ndim != 1: results['frame_inds'] = np.squeeze(results['frame_inds']) frame_inds = results['frame_inds'] imgs = self._decord_load_frames(container, frame_inds) results['video_reader'] = None del container results['imgs'] = imgs results['original_shape'] = imgs[0].shape[:2] results['img_shape'] = imgs[0].shape[:2] # we resize the gt_bboxes and proposals to their real scale if 'gt_bboxes' in results: h, w = results['img_shape'] scale_factor = np.array([w, h, w, h]) gt_bboxes = results['gt_bboxes'] gt_bboxes = (gt_bboxes * scale_factor).astype(np.float32) results['gt_bboxes'] = gt_bboxes if 'proposals' in results and results['proposals'] is not None: proposals = results['proposals'] proposals = (proposals * scale_factor).astype(np.float32) results['proposals'] = proposals return results
Perform the Decord decoding. Args: results (dict): The result dict. Returns: dict: The result dict.
transform
python
open-mmlab/mmaction2
mmaction/datasets/transforms/loading.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/loading.py
Apache-2.0
def transform(self, results: dict) -> dict: """Perform the OpenCV initialization. Args: results (dict): The resulting dict to be modified and passed to the next transform in pipeline. """ if self.io_backend == 'disk': new_path = results['filename'] else: if self.file_client is None: self.file_client = FileClient(self.io_backend, **self.kwargs) thread_id = get_thread_id() # save the file of same thread at the same place new_path = osp.join(self.tmp_folder, f'tmp_{thread_id}.mp4') with open(new_path, 'wb') as f: f.write(self.file_client.get(results['filename'])) container = mmcv.VideoReader(new_path) results['new_path'] = new_path results['video_reader'] = container results['total_frames'] = len(container) return results
Perform the OpenCV initialization. Args: results (dict): The resulting dict to be modified and passed to the next transform in pipeline.
transform
python
open-mmlab/mmaction2
mmaction/datasets/transforms/loading.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/loading.py
Apache-2.0
def transform(self, results: dict) -> dict: """Perform the OpenCV decoding. Args: results (dict): The resulting dict to be modified and passed to the next transform in pipeline. """ container = results['video_reader'] imgs = list() if results['frame_inds'].ndim != 1: results['frame_inds'] = np.squeeze(results['frame_inds']) for frame_ind in results['frame_inds']: cur_frame = container[frame_ind] # last frame may be None in OpenCV while isinstance(cur_frame, type(None)): frame_ind -= 1 cur_frame = container[frame_ind] imgs.append(cur_frame) results['video_reader'] = None del container imgs = np.array(imgs) # The default channel order of OpenCV is BGR, thus we change it to RGB imgs = imgs[:, :, :, ::-1] results['imgs'] = list(imgs) results['original_shape'] = imgs[0].shape[:2] results['img_shape'] = imgs[0].shape[:2] return results
Perform the OpenCV decoding. Args: results (dict): The resulting dict to be modified and passed to the next transform in pipeline.
transform
python
open-mmlab/mmaction2
mmaction/datasets/transforms/loading.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/loading.py
Apache-2.0
def transform(self, results: dict) -> dict: """Perform the ``RawFrameDecode`` to pick frames given indices. Args: results (dict): The resulting dict to be modified and passed to the next transform in pipeline. """ mmcv.use_backend(self.decoding_backend) directory = results['frame_dir'] filename_tmpl = results['filename_tmpl'] modality = results['modality'] if self.file_client is None: self.file_client = FileClient(self.io_backend, **self.kwargs) imgs = list() if results['frame_inds'].ndim != 1: results['frame_inds'] = np.squeeze(results['frame_inds']) offset = results.get('offset', 0) cache = {} for i, frame_idx in enumerate(results['frame_inds']): # Avoid loading duplicated frames if frame_idx in cache: imgs.append(cp.deepcopy(imgs[cache[frame_idx]])) continue else: cache[frame_idx] = i frame_idx += offset if modality == 'RGB': filepath = osp.join(directory, filename_tmpl.format(frame_idx)) img_bytes = self.file_client.get(filepath) # Get frame with channel order RGB directly. cur_frame = mmcv.imfrombytes(img_bytes, channel_order='rgb') imgs.append(cur_frame) elif modality == 'Flow': x_filepath = osp.join(directory, filename_tmpl.format('x', frame_idx)) y_filepath = osp.join(directory, filename_tmpl.format('y', frame_idx)) x_img_bytes = self.file_client.get(x_filepath) x_frame = mmcv.imfrombytes(x_img_bytes, flag='grayscale') y_img_bytes = self.file_client.get(y_filepath) y_frame = mmcv.imfrombytes(y_img_bytes, flag='grayscale') imgs.append(np.stack([x_frame, y_frame], axis=-1)) else: raise NotImplementedError results['imgs'] = imgs results['original_shape'] = imgs[0].shape[:2] results['img_shape'] = imgs[0].shape[:2] # we resize the gt_bboxes and proposals to their real scale if 'gt_bboxes' in results: h, w = results['img_shape'] scale_factor = np.array([w, h, w, h]) gt_bboxes = results['gt_bboxes'] gt_bboxes = (gt_bboxes * scale_factor).astype(np.float32) results['gt_bboxes'] = gt_bboxes if 'proposals' in results and results['proposals'] is not None: proposals = results['proposals'] proposals = (proposals * scale_factor).astype(np.float32) results['proposals'] = proposals return results
Perform the ``RawFrameDecode`` to pick frames given indices. Args: results (dict): The resulting dict to be modified and passed to the next transform in pipeline.
transform
python
open-mmlab/mmaction2
mmaction/datasets/transforms/loading.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/loading.py
Apache-2.0
def transform(self, results): """Perform the ``RawFrameDecode`` to pick frames given indices. Args: results (dict): The resulting dict to be modified and passed to the next transform in pipeline. """ modality = results['modality'] array = results['array'] imgs = list() if results['frame_inds'].ndim != 1: results['frame_inds'] = np.squeeze(results['frame_inds']) offset = results.get('offset', 0) for i, frame_idx in enumerate(results['frame_inds']): frame_idx += offset if modality == 'RGB': imgs.append(array[frame_idx]) elif modality == 'Flow': imgs.extend( [array[frame_idx, ..., 0], array[frame_idx, ..., 1]]) else: raise NotImplementedError results['imgs'] = imgs results['original_shape'] = imgs[0].shape[:2] results['img_shape'] = imgs[0].shape[:2] return results
Perform the ``RawFrameDecode`` to pick frames given indices. Args: results (dict): The resulting dict to be modified and passed to the next transform in pipeline.
transform
python
open-mmlab/mmaction2
mmaction/datasets/transforms/loading.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/loading.py
Apache-2.0
def transform(self, results): """Perform the ``ImageDecode`` to load image given the file path. Args: results (dict): The resulting dict to be modified and passed to the next transform in pipeline. """ mmcv.use_backend(self.decoding_backend) filename = results['filename'] if self.file_client is None: self.file_client = FileClient(self.io_backend, **self.kwargs) imgs = list() img_bytes = self.file_client.get(filename) img = mmcv.imfrombytes(img_bytes, channel_order='rgb') imgs.append(img) results['imgs'] = imgs results['original_shape'] = imgs[0].shape[:2] results['img_shape'] = imgs[0].shape[:2] return results
Perform the ``ImageDecode`` to load image given the file path. Args: results (dict): The resulting dict to be modified and passed to the next transform in pipeline.
transform
python
open-mmlab/mmaction2
mmaction/datasets/transforms/loading.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/loading.py
Apache-2.0
def transform(self, results: Dict) -> Dict: """Perform the numpy loading. Args: results (dict): The resulting dict to be modified and passed to the next transform in pipeline. """ if osp.exists(results['audio_path']): feature_map = np.load(results['audio_path']) else: # Generate a random dummy 10s input # Some videos do not have audio stream pad_func = getattr(self, f'_{self.pad_method}_pad') feature_map = pad_func((640, 80)) results['length'] = feature_map.shape[0] results['audios'] = feature_map return results
Perform the numpy loading. Args: results (dict): The resulting dict to be modified and passed to the next transform in pipeline.
transform
python
open-mmlab/mmaction2
mmaction/datasets/transforms/loading.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/loading.py
Apache-2.0
def transform(self, results): """Perform the building of pseudo clips. Args: results (dict): The resulting dict to be modified and passed to the next transform in pipeline. """ # the input should be one single image assert len(results['imgs']) == 1 im = results['imgs'][0] for _ in range(1, self.clip_len): results['imgs'].append(np.copy(im)) results['clip_len'] = self.clip_len results['num_clips'] = 1 return results
Perform the building of pseudo clips. Args: results (dict): The resulting dict to be modified and passed to the next transform in pipeline.
transform
python
open-mmlab/mmaction2
mmaction/datasets/transforms/loading.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/loading.py
Apache-2.0
def transform(self, results: Dict) -> Dict: """Perform the ``AudioFeatureSelector`` to pick audio feature clips. Args: results (dict): The resulting dict to be modified and passed to the next transform in pipeline. """ audio = results['audios'] frame_inds = results['frame_inds'] num_clips = results['num_clips'] resampled_clips = list() frame_inds = frame_inds.reshape(num_clips, -1) for clip_idx in range(num_clips): clip_frame_inds = frame_inds[clip_idx] start_idx = max( 0, int( round((clip_frame_inds[0] + 1) / results['total_frames'] * results['length']))) end_idx = min( results['length'], int( round((clip_frame_inds[-1] + 1) / results['total_frames'] * results['length']))) cropped_audio = audio[start_idx:end_idx, :] if cropped_audio.shape[0] >= self.fixed_length: truncated_audio = cropped_audio[:self.fixed_length, :] else: truncated_audio = np.pad( cropped_audio, ((0, self.fixed_length - cropped_audio.shape[0]), (0, 0)), mode='constant') resampled_clips.append(truncated_audio) results['audios'] = np.array(resampled_clips) results['audios_shape'] = results['audios'].shape return results
Perform the ``AudioFeatureSelector`` to pick audio feature clips. Args: results (dict): The resulting dict to be modified and passed to the next transform in pipeline.
transform
python
open-mmlab/mmaction2
mmaction/datasets/transforms/loading.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/loading.py
Apache-2.0
def transform(self, results): """Perform the LoadLocalizationFeature loading. Args: results (dict): The resulting dict to be modified and passed to the next transform in pipeline. """ data_path = results['feature_path'] raw_feature = np.loadtxt( data_path, dtype=np.float32, delimiter=',', skiprows=1) results['raw_feature'] = np.transpose(raw_feature, (1, 0)) return results
Perform the LoadLocalizationFeature loading. Args: results (dict): The resulting dict to be modified and passed to the next transform in pipeline.
transform
python
open-mmlab/mmaction2
mmaction/datasets/transforms/loading.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/loading.py
Apache-2.0
def transform(self, results): """Perform the GenerateLocalizationLabels loading. Args: results (dict): The resulting dict to be modified and passed to the next transform in pipeline. """ video_frame = results['duration_frame'] video_second = results['duration_second'] feature_frame = results['feature_frame'] corrected_second = float(feature_frame) / video_frame * video_second annotations = results['annotations'] gt_bbox = [] for annotation in annotations: current_start = max( min(1, annotation['segment'][0] / corrected_second), 0) current_end = max( min(1, annotation['segment'][1] / corrected_second), 0) gt_bbox.append([current_start, current_end]) gt_bbox = np.array(gt_bbox) results['gt_bbox'] = gt_bbox return results
Perform the GenerateLocalizationLabels loading. Args: results (dict): The resulting dict to be modified and passed to the next transform in pipeline.
transform
python
open-mmlab/mmaction2
mmaction/datasets/transforms/loading.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/loading.py
Apache-2.0
def transform(self, results): """Perform the LoadProposals loading. Args: results (dict): The resulting dict to be modified and passed to the next transform in pipeline. """ video_name = results['video_name'] proposal_path = osp.join(self.pgm_proposals_dir, video_name + self.proposal_ext) if self.proposal_ext == '.csv': pgm_proposals = np.loadtxt( proposal_path, dtype=np.float32, delimiter=',', skiprows=1) pgm_proposals = np.array(pgm_proposals[:self.top_k]) tmin = pgm_proposals[:, 0] tmax = pgm_proposals[:, 1] tmin_score = pgm_proposals[:, 2] tmax_score = pgm_proposals[:, 3] reference_temporal_iou = pgm_proposals[:, 5] feature_path = osp.join(self.pgm_features_dir, video_name + self.feature_ext) if self.feature_ext == '.npy': bsp_feature = np.load(feature_path).astype(np.float32) bsp_feature = bsp_feature[:self.top_k, :] results['bsp_feature'] = bsp_feature results['tmin'] = tmin results['tmax'] = tmax results['tmin_score'] = tmin_score results['tmax_score'] = tmax_score results['reference_temporal_iou'] = reference_temporal_iou return results
Perform the LoadProposals loading. Args: results (dict): The resulting dict to be modified and passed to the next transform in pipeline.
transform
python
open-mmlab/mmaction2
mmaction/datasets/transforms/loading.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/loading.py
Apache-2.0
def transform(self, results: Dict) -> Dict: """Perform the pose decoding. Args: results (dict): The resulting dict to be modified and passed to the next transform in pipeline. """ required_keys = ['total_frames', 'frame_inds', 'keypoint'] for k in required_keys: assert k in results total_frames = results['total_frames'] frame_inds = results.pop('frame_inds') keypoint = results['keypoint'] if 'anno_inds' in results: frame_inds = frame_inds[results['anno_inds']] keypoint = keypoint[results['anno_inds']] assert np.all(np.diff(frame_inds) >= 0), \ 'frame_inds should be monotonical increasing' def mapinds(inds): uni = np.unique(inds) map_ = {x: i for i, x in enumerate(uni)} inds = [map_[x] for x in inds] return np.array(inds, dtype=np.int16) if self.squeeze: frame_inds = mapinds(frame_inds) total_frames = np.max(frame_inds) + 1 results['total_frames'] = total_frames num_joints = keypoint.shape[1] num_person = get_mode(frame_inds)[-1][0] new_kp = np.zeros([num_person, total_frames, num_joints, 2], dtype=np.float16) new_kpscore = np.zeros([num_person, total_frames, num_joints], dtype=np.float16) nperson_per_frame = np.zeros([total_frames], dtype=np.int16) for frame_ind, kp in zip(frame_inds, keypoint): person_ind = nperson_per_frame[frame_ind] new_kp[person_ind, frame_ind] = kp[:, :2] new_kpscore[person_ind, frame_ind] = kp[:, 2] nperson_per_frame[frame_ind] += 1 if num_person > self.max_person: for i in range(total_frames): nperson = nperson_per_frame[i] val = new_kpscore[:nperson, i] score_sum = val.sum(-1) inds = sorted(range(nperson), key=lambda x: -score_sum[x]) new_kpscore[:nperson, i] = new_kpscore[inds, i] new_kp[:nperson, i] = new_kp[inds, i] num_person = self.max_person results['num_person'] = num_person results['keypoint'] = new_kp[:num_person] results['keypoint_score'] = new_kpscore[:num_person] return results
Perform the pose decoding. Args: results (dict): The resulting dict to be modified and passed to the next transform in pipeline.
transform
python
open-mmlab/mmaction2
mmaction/datasets/transforms/pose_transforms.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/pose_transforms.py
Apache-2.0
def generate_a_heatmap(self, arr: np.ndarray, centers: np.ndarray, max_values: np.ndarray) -> None: """Generate pseudo heatmap for one keypoint in one frame. Args: arr (np.ndarray): The array to store the generated heatmaps. Shape: img_h * img_w. centers (np.ndarray): The coordinates of corresponding keypoints (of multiple persons). Shape: M * 2. max_values (np.ndarray): The max values of each keypoint. Shape: M. """ sigma = self.sigma img_h, img_w = arr.shape for center, max_value in zip(centers, max_values): if max_value < self.eps: continue mu_x, mu_y = center[0], center[1] st_x = max(int(mu_x - 3 * sigma), 0) ed_x = min(int(mu_x + 3 * sigma) + 1, img_w) st_y = max(int(mu_y - 3 * sigma), 0) ed_y = min(int(mu_y + 3 * sigma) + 1, img_h) x = np.arange(st_x, ed_x, 1, np.float32) y = np.arange(st_y, ed_y, 1, np.float32) # if the keypoint not in the heatmap coordinate system if not (len(x) and len(y)): continue y = y[:, None] patch = np.exp(-((x - mu_x)**2 + (y - mu_y)**2) / 2 / sigma**2) patch = patch * max_value arr[st_y:ed_y, st_x:ed_x] = \ np.maximum(arr[st_y:ed_y, st_x:ed_x], patch)
Generate pseudo heatmap for one keypoint in one frame. Args: arr (np.ndarray): The array to store the generated heatmaps. Shape: img_h * img_w. centers (np.ndarray): The coordinates of corresponding keypoints (of multiple persons). Shape: M * 2. max_values (np.ndarray): The max values of each keypoint. Shape: M.
generate_a_heatmap
python
open-mmlab/mmaction2
mmaction/datasets/transforms/pose_transforms.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/pose_transforms.py
Apache-2.0
def generate_a_limb_heatmap(self, arr: np.ndarray, starts: np.ndarray, ends: np.ndarray, start_values: np.ndarray, end_values: np.ndarray) -> None: """Generate pseudo heatmap for one limb in one frame. Args: arr (np.ndarray): The array to store the generated heatmaps. Shape: img_h * img_w. starts (np.ndarray): The coordinates of one keypoint in the corresponding limbs. Shape: M * 2. ends (np.ndarray): The coordinates of the other keypoint in the corresponding limbs. Shape: M * 2. start_values (np.ndarray): The max values of one keypoint in the corresponding limbs. Shape: M. end_values (np.ndarray): The max values of the other keypoint in the corresponding limbs. Shape: M. """ sigma = self.sigma img_h, img_w = arr.shape for start, end, start_value, end_value in zip(starts, ends, start_values, end_values): value_coeff = min(start_value, end_value) if value_coeff < self.eps: continue min_x, max_x = min(start[0], end[0]), max(start[0], end[0]) min_y, max_y = min(start[1], end[1]), max(start[1], end[1]) min_x = max(int(min_x - 3 * sigma), 0) max_x = min(int(max_x + 3 * sigma) + 1, img_w) min_y = max(int(min_y - 3 * sigma), 0) max_y = min(int(max_y + 3 * sigma) + 1, img_h) x = np.arange(min_x, max_x, 1, np.float32) y = np.arange(min_y, max_y, 1, np.float32) if not (len(x) and len(y)): continue y = y[:, None] x_0 = np.zeros_like(x) y_0 = np.zeros_like(y) # distance to start keypoints d2_start = ((x - start[0])**2 + (y - start[1])**2) # distance to end keypoints d2_end = ((x - end[0])**2 + (y - end[1])**2) # the distance between start and end keypoints. d2_ab = ((start[0] - end[0])**2 + (start[1] - end[1])**2) if d2_ab < 1: self.generate_a_heatmap(arr, start[None], start_value[None]) continue coeff = (d2_start - d2_end + d2_ab) / 2. / d2_ab a_dominate = coeff <= 0 b_dominate = coeff >= 1 seg_dominate = 1 - a_dominate - b_dominate position = np.stack([x + y_0, y + x_0], axis=-1) projection = start + np.stack([coeff, coeff], axis=-1) * ( end - start) d2_line = position - projection d2_line = d2_line[:, :, 0]**2 + d2_line[:, :, 1]**2 d2_seg = ( a_dominate * d2_start + b_dominate * d2_end + seg_dominate * d2_line) patch = np.exp(-d2_seg / 2. / sigma**2) patch = patch * value_coeff arr[min_y:max_y, min_x:max_x] = \ np.maximum(arr[min_y:max_y, min_x:max_x], patch)
Generate pseudo heatmap for one limb in one frame. Args: arr (np.ndarray): The array to store the generated heatmaps. Shape: img_h * img_w. starts (np.ndarray): The coordinates of one keypoint in the corresponding limbs. Shape: M * 2. ends (np.ndarray): The coordinates of the other keypoint in the corresponding limbs. Shape: M * 2. start_values (np.ndarray): The max values of one keypoint in the corresponding limbs. Shape: M. end_values (np.ndarray): The max values of the other keypoint in the corresponding limbs. Shape: M.
generate_a_limb_heatmap
python
open-mmlab/mmaction2
mmaction/datasets/transforms/pose_transforms.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/pose_transforms.py
Apache-2.0
def generate_heatmap(self, arr: np.ndarray, kps: np.ndarray, max_values: np.ndarray) -> None: """Generate pseudo heatmap for all keypoints and limbs in one frame (if needed). Args: arr (np.ndarray): The array to store the generated heatmaps. Shape: V * img_h * img_w. kps (np.ndarray): The coordinates of keypoints in this frame. Shape: M * V * 2. max_values (np.ndarray): The confidence score of each keypoint. Shape: M * V. """ if self.with_kp: num_kp = kps.shape[1] for i in range(num_kp): self.generate_a_heatmap(arr[i], kps[:, i], max_values[:, i]) if self.with_limb: for i, limb in enumerate(self.skeletons): start_idx, end_idx = limb starts = kps[:, start_idx] ends = kps[:, end_idx] start_values = max_values[:, start_idx] end_values = max_values[:, end_idx] self.generate_a_limb_heatmap(arr[i], starts, ends, start_values, end_values)
Generate pseudo heatmap for all keypoints and limbs in one frame (if needed). Args: arr (np.ndarray): The array to store the generated heatmaps. Shape: V * img_h * img_w. kps (np.ndarray): The coordinates of keypoints in this frame. Shape: M * V * 2. max_values (np.ndarray): The confidence score of each keypoint. Shape: M * V.
generate_heatmap
python
open-mmlab/mmaction2
mmaction/datasets/transforms/pose_transforms.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/pose_transforms.py
Apache-2.0
def gen_an_aug(self, results: Dict) -> np.ndarray: """Generate pseudo heatmaps for all frames. Args: results (dict): The dictionary that contains all info of a sample. Returns: np.ndarray: The generated pseudo heatmaps. """ all_kps = results['keypoint'].astype(np.float32) kp_shape = all_kps.shape if 'keypoint_score' in results: all_kpscores = results['keypoint_score'] else: all_kpscores = np.ones(kp_shape[:-1], dtype=np.float32) img_h, img_w = results['img_shape'] # scale img_h, img_w and kps img_h = int(img_h * self.scaling + 0.5) img_w = int(img_w * self.scaling + 0.5) all_kps[..., :2] *= self.scaling num_frame = kp_shape[1] num_c = 0 if self.with_kp: num_c += all_kps.shape[2] if self.with_limb: num_c += len(self.skeletons) ret = np.zeros([num_frame, num_c, img_h, img_w], dtype=np.float32) for i in range(num_frame): # M, V, C kps = all_kps[:, i] # M, C kpscores = all_kpscores[:, i] if self.use_score else \ np.ones_like(all_kpscores[:, i]) self.generate_heatmap(ret[i], kps, kpscores) return ret
Generate pseudo heatmaps for all frames. Args: results (dict): The dictionary that contains all info of a sample. Returns: np.ndarray: The generated pseudo heatmaps.
gen_an_aug
python
open-mmlab/mmaction2
mmaction/datasets/transforms/pose_transforms.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/pose_transforms.py
Apache-2.0
def transform(self, results: Dict) -> Dict: """Generate pseudo heatmaps based on joint coordinates and confidence. Args: results (dict): The resulting dict to be modified and passed to the next transform in pipeline. """ heatmap = self.gen_an_aug(results) key = 'heatmap_imgs' if 'imgs' in results else 'imgs' if self.double: indices = np.arange(heatmap.shape[1], dtype=np.int64) left, right = (self.left_kp, self.right_kp) if self.with_kp else ( self.left_limb, self.right_limb) for l, r in zip(left, right): # noqa: E741 indices[l] = r indices[r] = l heatmap_flip = heatmap[..., ::-1][:, indices] heatmap = np.concatenate([heatmap, heatmap_flip]) results[key] = heatmap return results
Generate pseudo heatmaps based on joint coordinates and confidence. Args: results (dict): The resulting dict to be modified and passed to the next transform in pipeline.
transform
python
open-mmlab/mmaction2
mmaction/datasets/transforms/pose_transforms.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/pose_transforms.py
Apache-2.0
def transform(self, results: Dict) -> Dict: """Convert the coordinates of keypoints to make it more compact. Args: results (dict): The resulting dict to be modified and passed to the next transform in pipeline. """ img_shape = results['img_shape'] h, w = img_shape kp = results['keypoint'] # Make NaN zero kp[np.isnan(kp)] = 0. kp_x = kp[..., 0] kp_y = kp[..., 1] min_x = np.min(kp_x[kp_x != 0], initial=np.Inf) min_y = np.min(kp_y[kp_y != 0], initial=np.Inf) max_x = np.max(kp_x[kp_x != 0], initial=-np.Inf) max_y = np.max(kp_y[kp_y != 0], initial=-np.Inf) # The compact area is too small if max_x - min_x < self.threshold or max_y - min_y < self.threshold: return results center = ((max_x + min_x) / 2, (max_y + min_y) / 2) half_width = (max_x - min_x) / 2 * (1 + self.padding) half_height = (max_y - min_y) / 2 * (1 + self.padding) if self.hw_ratio is not None: half_height = max(self.hw_ratio[0] * half_width, half_height) half_width = max(1 / self.hw_ratio[1] * half_height, half_width) min_x, max_x = center[0] - half_width, center[0] + half_width min_y, max_y = center[1] - half_height, center[1] + half_height # hot update if not self.allow_imgpad: min_x, min_y = int(max(0, min_x)), int(max(0, min_y)) max_x, max_y = int(min(w, max_x)), int(min(h, max_y)) else: min_x, min_y = int(min_x), int(min_y) max_x, max_y = int(max_x), int(max_y) kp_x[kp_x != 0] -= min_x kp_y[kp_y != 0] -= min_y new_shape = (max_y - min_y, max_x - min_x) results['img_shape'] = new_shape # the order is x, y, w, h (in [0, 1]), a tuple crop_quadruple = results.get('crop_quadruple', (0., 0., 1., 1.)) new_crop_quadruple = (min_x / w, min_y / h, (max_x - min_x) / w, (max_y - min_y) / h) crop_quadruple = _combine_quadruple(crop_quadruple, new_crop_quadruple) results['crop_quadruple'] = crop_quadruple return results
Convert the coordinates of keypoints to make it more compact. Args: results (dict): The resulting dict to be modified and passed to the next transform in pipeline.
transform
python
open-mmlab/mmaction2
mmaction/datasets/transforms/pose_transforms.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/pose_transforms.py
Apache-2.0