import mmpose import os import glob from mmpose.apis import MMPoseInferencer import gradio as gr import numpy as np import cv2 print("[INFO]: Imported modules!") human = MMPoseInferencer("human") hand = MMPoseInferencer("hand") human3d = MMPoseInferencer(pose3d="human3d") inferencers = {"Estimate human 2d poses":human, "Estimate human 2d hand poses":hand, "Estimate human 3d poses":human3d} # inferencer = MMPoseInferencer('human') # inferencer = MMPoseInferencer(pose3d="human3d") # https://github.com/open-mmlab/mmpose/tree/dev-1.x/configs/body_3d_keypoint/pose_lift # motionbert_ft_h36m-d80af323_20230531.pth # simple3Dbaseline_h36m-f0ad73a4_20210419.pth # videopose_h36m_243frames_fullconv_supervised_cpn_ft-88f5abbb_20210527.pth # videopose_h36m_81frames_fullconv_supervised-1f2d1104_20210527.pth # videopose_h36m_27frames_fullconv_supervised-fe8fbba9_20210527.pth # videopose_h36m_1frame_fullconv_supervised_cpn_ft-5c3afaed_20210527.pth # https://github.com/open-mmlab/mmpose/blob/main/mmpose/apis/inferencers/pose3d_inferencer.py print("[INFO]: Downloaded models!") def poses(photo, check): # Selecting the specific inferencer print(check) inferencer = inferencers[check[0]] # 'hand', 'human , device='cuda' print("[INFO]: Running inference!") #vis_out_dir result_generator = inferencer(photo, vis_out_dir =".", return_vis=True, thickness=2, rebase_keypoint_height=True) result = next(result_generator) print(type(result)) print(["[INFO] Result from iterator: ", result]) for r in result_generator: print("[INFO] Single result: ", r) print(type(r)) # # Prepare to save video # output_file = os.path.join("output.mp4") # fourcc = cv2.VideoWriter_fourcc(*"mp4v") # Codec for MP4 video # fps = 32 # height = 480 # width = 640 # size = (width,height) # out_writer = cv2.VideoWriter(output_file, fourcc, fps, size) # for result in result_generator: # print("[INFO] Result: ", result) # frame = result["visualization"] # out_writer.write(cv2.cvtColor(frame[0], cv2.COLOR_BGR2RGB)) # print(os.listdir()) # print("[INFO]: Visualizing results!") # print(os.listdir()) # print() # out_writer.release() # cv2.destroyAllWindows() # Closing window output_files = glob.glob(os.path.join(".", "*.mp4")) print(output_files) # 00000.mp4 # 000000.mp4 print(os.listdir()) return output_files[0] # # specify detection model by alias # # the available aliases include 'human', 'hand', 'face', 'animal', # # as well as any additional aliases defined in mmdet # inferencer = MMPoseInferencer( # # suppose the pose estimator is trained on custom dataset # pose2d='custom_human_pose_estimator.py', # pose2d_weights='custom_human_pose_estimator.pth', # det_model='human' # ) def run(): #https://github.com/open-mmlab/mmpose/blob/main/docs/en/user_guides/inference.md available_methods = ["Estimate human 2d poses", "Estimate human 2d hand poses", "Estimate human 3d poses"] check_web = gr.CheckboxGroup(choices = available_methods, label="Methods", type="value", info="Select the model(s) you want") check_file = gr.CheckboxGroup(choices = available_methods, label="Methods", type="value", info="Select the model(s) you want") webcam = gr.Interface( fn=poses, inputs= [gr.Video(source="webcam"), check_web], outputs = gr.PlayableVideo(), #file_types=['.mp4'] #gr.Video(), title = 'Pose estimation', description = 'Pose estimation on video', allow_flagging=False ) file = gr.Interface( poses, inputs = [gr.Video(source="upload"), check_file], outputs = gr.PlayableVideo(), allow_flagging=False ) demo = gr.TabbedInterface( interface_list=[file, webcam], tab_names=["From a File", "From your Webcam"] ) demo.launch(server_name="0.0.0.0", server_port=7860) if __name__ == "__main__": run()