JSON
Browse files- main_noweb.py +10 -7
main_noweb.py
CHANGED
|
@@ -20,6 +20,7 @@ import json
|
|
| 20 |
# Image manipulation
|
| 21 |
import numpy as np
|
| 22 |
import cv2
|
|
|
|
| 23 |
|
| 24 |
print(torch.__version__)
|
| 25 |
# Use GPU if available
|
|
@@ -38,6 +39,12 @@ track_model = YOLO('yolov8n.pt') # Load an official Detect model
|
|
| 38 |
|
| 39 |
print("[INFO]: Downloaded models!")
|
| 40 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 41 |
def check_extension(video):
|
| 42 |
split_tup = os.path.splitext(video)
|
| 43 |
|
|
@@ -135,14 +142,13 @@ def pose3d(video):
|
|
| 135 |
|
| 136 |
def pose2d(video, kpt_threshold):
|
| 137 |
video = check_extension(video)
|
| 138 |
-
print(device)
|
| 139 |
|
| 140 |
# Define new unique folder
|
| 141 |
add_dir = str(uuid.uuid4())
|
| 142 |
vis_out_dir = os.path.join("/".join(video.split("/")[:-1]), add_dir)
|
| 143 |
|
| 144 |
os.makedirs(add_dir)
|
| 145 |
-
|
| 146 |
result_generator = human(video,
|
| 147 |
vis_out_dir = add_dir,
|
| 148 |
#return_vis=True,
|
|
@@ -159,10 +165,7 @@ def pose2d(video, kpt_threshold):
|
|
| 159 |
out_file = glob.glob(os.path.join(add_dir, "*.mp4")) #+ glob.glob(os.path.join(vis_out_dir, "*.webm"))
|
| 160 |
kpoints = glob.glob(os.path.join(add_dir, "*.json"))
|
| 161 |
|
| 162 |
-
|
| 163 |
-
data = json.load(f)
|
| 164 |
-
|
| 165 |
-
return "".join(out_file), data # "".join(kpoints)
|
| 166 |
|
| 167 |
|
| 168 |
def pose2dhand(video, kpt_threshold):
|
|
@@ -211,7 +214,7 @@ with block:
|
|
| 211 |
video_output2 = gr.PlayableVideo(height=512, label = "Estimate human 3d poses", show_label=True)
|
| 212 |
video_output3 = gr.PlayableVideo(height=512, label = "Estimate human hand poses", show_label=True)
|
| 213 |
video_output4 = gr.Video(height=512, label = "Detection and tracking", show_label=True, format="mp4")
|
| 214 |
-
jsonoutput = gr.
|
| 215 |
|
| 216 |
|
| 217 |
with gr.Tab("General information"):
|
|
|
|
| 20 |
# Image manipulation
|
| 21 |
import numpy as np
|
| 22 |
import cv2
|
| 23 |
+
import ffmpeg
|
| 24 |
|
| 25 |
print(torch.__version__)
|
| 26 |
# Use GPU if available
|
|
|
|
| 39 |
|
| 40 |
print("[INFO]: Downloaded models!")
|
| 41 |
|
| 42 |
+
def check_fps(video):
|
| 43 |
+
cap = cv2.VideoCapture(video)
|
| 44 |
+
nframes = cap.get(cv2.CAP_PROP_FRAME_COUNT)
|
| 45 |
+
fps = cap.get(cv2.CAP_PROP_FPS)
|
| 46 |
+
return nframes, fps
|
| 47 |
+
|
| 48 |
def check_extension(video):
|
| 49 |
split_tup = os.path.splitext(video)
|
| 50 |
|
|
|
|
| 142 |
|
| 143 |
def pose2d(video, kpt_threshold):
|
| 144 |
video = check_extension(video)
|
|
|
|
| 145 |
|
| 146 |
# Define new unique folder
|
| 147 |
add_dir = str(uuid.uuid4())
|
| 148 |
vis_out_dir = os.path.join("/".join(video.split("/")[:-1]), add_dir)
|
| 149 |
|
| 150 |
os.makedirs(add_dir)
|
| 151 |
+
print(check_fps(video))
|
| 152 |
result_generator = human(video,
|
| 153 |
vis_out_dir = add_dir,
|
| 154 |
#return_vis=True,
|
|
|
|
| 165 |
out_file = glob.glob(os.path.join(add_dir, "*.mp4")) #+ glob.glob(os.path.join(vis_out_dir, "*.webm"))
|
| 166 |
kpoints = glob.glob(os.path.join(add_dir, "*.json"))
|
| 167 |
|
| 168 |
+
return "".join(out_file), "".join(kpoints)
|
|
|
|
|
|
|
|
|
|
| 169 |
|
| 170 |
|
| 171 |
def pose2dhand(video, kpt_threshold):
|
|
|
|
| 214 |
video_output2 = gr.PlayableVideo(height=512, label = "Estimate human 3d poses", show_label=True)
|
| 215 |
video_output3 = gr.PlayableVideo(height=512, label = "Estimate human hand poses", show_label=True)
|
| 216 |
video_output4 = gr.Video(height=512, label = "Detection and tracking", show_label=True, format="mp4")
|
| 217 |
+
jsonoutput = gr.JSON()
|
| 218 |
|
| 219 |
|
| 220 |
with gr.Tab("General information"):
|