File size: 4,242 Bytes
afd2199 577bb6b e2522a6 afd2199 cd41c0c a21de06 afd2199 e2522a6 0e929df e2522a6 08528b3 602fda1 08528b3 8db8164 c76c2fc 0e929df b28330f 9ef1876 0e929df 92499cd 6c7c3ae fa05982 8db8164 abeb59d 4ccfac3 010189f 95e7569 010189f 95e7569 010189f 95e7569 010189f e2522a6 a21de06 e2522a6 a21de06 e2522a6 a21de06 e2522a6 332fb03 e2522a6 a21de06 e2522a6 602fda1 6c7c3ae b28330f 9ef1876 e5a70b5 b1ec559 6c7c3ae fcbfb53 92499cd afd2199 92499cd afd2199 31fdeeb c76c2fc 0e929df 90febb1 b28330f 92499cd 0e5fb59 90febb1 95e7569 0e5fb59 92499cd 90febb1 95e7569 92499cd 0e929df 92499cd 31fdeeb 6b7f67c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 |
import mmpose
import os
import glob
from mmpose.apis import MMPoseInferencer
import gradio as gr
import numpy as np
import cv2
print("[INFO]: Imported modules!")
human = MMPoseInferencer("human")
hand = MMPoseInferencer("hand")
human3d = MMPoseInferencer(pose3d="human3d")
inferencers = {"Estimate human 2d poses":human, "Estimate human 2d hand poses":hand, "Estimate human 3d poses":human3d}
# inferencer = MMPoseInferencer('human')
# inferencer = MMPoseInferencer(pose3d="human3d")
# https://github.com/open-mmlab/mmpose/tree/dev-1.x/configs/body_3d_keypoint/pose_lift
# motionbert_ft_h36m-d80af323_20230531.pth
# simple3Dbaseline_h36m-f0ad73a4_20210419.pth
# videopose_h36m_243frames_fullconv_supervised_cpn_ft-88f5abbb_20210527.pth
# videopose_h36m_81frames_fullconv_supervised-1f2d1104_20210527.pth
# videopose_h36m_27frames_fullconv_supervised-fe8fbba9_20210527.pth
# videopose_h36m_1frame_fullconv_supervised_cpn_ft-5c3afaed_20210527.pth
# https://github.com/open-mmlab/mmpose/blob/main/mmpose/apis/inferencers/pose3d_inferencer.py
print("[INFO]: Downloaded models!")
def poses(photo, check):
# Selecting the specific inferencer
print(check)
inferencer = inferencers[check[0]] # 'hand', 'human , device='cuda'
print("[INFO]: Running inference!")
#vis_out_dir
result_generator = inferencer(photo,
vis_out_dir =".",
return_vis=True,
thickness=2,
rebase_keypoint_height=True)
result = [result for result in result_generator] #next(result_generator)
print(type(result))
# print(["[INFO] Result from iterator: ", result])
# for r in result_generator:
# print("[INFO] Single result: ", r)
# print(type(r))
# # Prepare to save video
# output_file = os.path.join("output.mp4")
# fourcc = cv2.VideoWriter_fourcc(*"mp4v") # Codec for MP4 video
# fps = 32
# height = 480
# width = 640
# size = (width,height)
# out_writer = cv2.VideoWriter(output_file, fourcc, fps, size)
# for result in result_generator:
# print("[INFO] Result: ", result)
# frame = result["visualization"]
# out_writer.write(cv2.cvtColor(frame[0], cv2.COLOR_BGR2RGB))
# print(os.listdir())
# print("[INFO]: Visualizing results!")
# print(os.listdir())
# print()
# out_writer.release()
# cv2.destroyAllWindows() # Closing window
output_files = glob.glob(os.path.join(".", "*.mp4"))
print(output_files)
# 00000.mp4
# 000000.mp4
print(os.listdir())
return output_files[0]
# # specify detection model by alias
# # the available aliases include 'human', 'hand', 'face', 'animal',
# # as well as any additional aliases defined in mmdet
# inferencer = MMPoseInferencer(
# # suppose the pose estimator is trained on custom dataset
# pose2d='custom_human_pose_estimator.py',
# pose2d_weights='custom_human_pose_estimator.pth',
# det_model='human'
# )
def run():
#https://github.com/open-mmlab/mmpose/blob/main/docs/en/user_guides/inference.md
available_methods = ["Estimate human 2d poses", "Estimate human 2d hand poses", "Estimate human 3d poses"]
check_web = gr.CheckboxGroup(choices = available_methods, label="Methods", type="value", info="Select the model(s) you want")
check_file = gr.CheckboxGroup(choices = available_methods, label="Methods", type="value", info="Select the model(s) you want")
webcam = gr.Interface(
fn=poses,
inputs= [gr.Video(source="webcam"), check_web],
outputs = gr.PlayableVideo(), #file_types=['.mp4'] #gr.Video(),
title = 'Pose estimation',
description = 'Pose estimation on video',
allow_flagging=False
)
file = gr.Interface(
poses,
inputs = [gr.Video(source="upload"), check_file],
outputs = gr.PlayableVideo(),
allow_flagging=False
)
demo = gr.TabbedInterface(
interface_list=[file, webcam],
tab_names=["From a File", "From your Webcam"]
)
demo.launch(server_name="0.0.0.0", server_port=7860)
if __name__ == "__main__":
run()
|