gr state on human3d
Browse files- main_noweb.py +9 -6
main_noweb.py
CHANGED
|
@@ -32,6 +32,10 @@ print("[INFO]: Imported modules!")
|
|
| 32 |
human = MMPoseInferencer("simcc_mobilenetv2_wo-deconv-8xb64-210e_coco-256x192") # simcc_mobilenetv2_wo-deconv-8xb64-210e_coco-256x192 dekr_hrnet-w32_8xb10-140e_coco-512x512
|
| 33 |
hand = MMPoseInferencer("hand")
|
| 34 |
#model3d = gr.State()
|
|
|
|
|
|
|
|
|
|
|
|
|
| 35 |
|
| 36 |
#"https://github.com/open-mmlab/mmpose/blob/main/configs/body_3d_keypoint/pose_lift/h36m/pose-lift_simplebaseline3d_8xb64-200e_h36m.py",
|
| 37 |
#"https://download.openmmlab.com/mmpose/body3d/simple_baseline/simple3Dbaseline_h36m-f0ad73a4_20210419.pth") # pose3d="human3d"
|
|
@@ -90,15 +94,13 @@ def check_extension(video):
|
|
| 90 |
return video
|
| 91 |
|
| 92 |
|
| 93 |
-
def pose3d(video, kpt_threshold):
|
|
|
|
| 94 |
video = check_extension(video)
|
| 95 |
print(device)
|
| 96 |
|
| 97 |
#human3d = MMPoseInferencer(device=device, pose3d="human3d", scope="mmpose")#"pose-lift_videopose3d-243frm-supv-cpn-ft_8xb128-200e_h36m")
|
| 98 |
|
| 99 |
-
human3d = MMPoseInferencer(device=device,
|
| 100 |
-
pose3d="human3d",
|
| 101 |
-
scope="mmpose")
|
| 102 |
print("HUMAN 3d downloaded!!")
|
| 103 |
#gr.State(value=)
|
| 104 |
|
|
@@ -109,14 +111,14 @@ def pose3d(video, kpt_threshold):
|
|
| 109 |
os.makedirs(add_dir)
|
| 110 |
print(check_fps(video))
|
| 111 |
#video = human3d.preprocess(video, batch_size=8)
|
| 112 |
-
result_generator = human3d(video,
|
| 113 |
vis_out_dir = add_dir,
|
| 114 |
radius = 8,
|
| 115 |
thickness = 5,
|
| 116 |
rebase_keypoint_height=True,
|
| 117 |
kpt_thr=kpt_threshold,
|
| 118 |
pred_out_dir = add_dir
|
| 119 |
-
)
|
| 120 |
print("INFERENCE DONW")
|
| 121 |
result = [result for result in result_generator] #next(result_generator)
|
| 122 |
|
|
@@ -195,6 +197,7 @@ def pose2dhand(video, kpt_threshold):
|
|
| 195 |
|
| 196 |
def UI():
|
| 197 |
block = gr.Blocks()
|
|
|
|
| 198 |
with block:
|
| 199 |
with gr.Column():
|
| 200 |
with gr.Tab("Upload video"):
|
|
|
|
| 32 |
human = MMPoseInferencer("simcc_mobilenetv2_wo-deconv-8xb64-210e_coco-256x192") # simcc_mobilenetv2_wo-deconv-8xb64-210e_coco-256x192 dekr_hrnet-w32_8xb10-140e_coco-512x512
|
| 33 |
hand = MMPoseInferencer("hand")
|
| 34 |
#model3d = gr.State()
|
| 35 |
+
human3d = MMPoseInferencer(device=device,
|
| 36 |
+
pose3d="human3d",
|
| 37 |
+
scope="mmpose")
|
| 38 |
+
|
| 39 |
|
| 40 |
#"https://github.com/open-mmlab/mmpose/blob/main/configs/body_3d_keypoint/pose_lift/h36m/pose-lift_simplebaseline3d_8xb64-200e_h36m.py",
|
| 41 |
#"https://download.openmmlab.com/mmpose/body3d/simple_baseline/simple3Dbaseline_h36m-f0ad73a4_20210419.pth") # pose3d="human3d"
|
|
|
|
| 94 |
return video
|
| 95 |
|
| 96 |
|
| 97 |
+
def pose3d(video, kpt_threshold, ):
|
| 98 |
+
|
| 99 |
video = check_extension(video)
|
| 100 |
print(device)
|
| 101 |
|
| 102 |
#human3d = MMPoseInferencer(device=device, pose3d="human3d", scope="mmpose")#"pose-lift_videopose3d-243frm-supv-cpn-ft_8xb128-200e_h36m")
|
| 103 |
|
|
|
|
|
|
|
|
|
|
| 104 |
print("HUMAN 3d downloaded!!")
|
| 105 |
#gr.State(value=)
|
| 106 |
|
|
|
|
| 111 |
os.makedirs(add_dir)
|
| 112 |
print(check_fps(video))
|
| 113 |
#video = human3d.preprocess(video, batch_size=8)
|
| 114 |
+
result_generator = gr.State(human3d(video,
|
| 115 |
vis_out_dir = add_dir,
|
| 116 |
radius = 8,
|
| 117 |
thickness = 5,
|
| 118 |
rebase_keypoint_height=True,
|
| 119 |
kpt_thr=kpt_threshold,
|
| 120 |
pred_out_dir = add_dir
|
| 121 |
+
))
|
| 122 |
print("INFERENCE DONW")
|
| 123 |
result = [result for result in result_generator] #next(result_generator)
|
| 124 |
|
|
|
|
| 197 |
|
| 198 |
def UI():
|
| 199 |
block = gr.Blocks()
|
| 200 |
+
|
| 201 |
with block:
|
| 202 |
with gr.Column():
|
| 203 |
with gr.Tab("Upload video"):
|