File size: 16,501 Bytes
face1bf 07f1d5d face1bf 21f207b face1bf 8d4e075 fe44aa1 1c513f3 66cf35e b075423 88152ab a36a050 92df7f4 face1bf f10c44c 2f92165 f10c44c face1bf 2f92165 face1bf a9c00f2 face1bf fe44aa1 1c513f3 face1bf b18c135 e527d19 1c513f3 b18c135 1253964 77eca9b b18c135 1c513f3 face1bf b18c135 beca0c4 face1bf 0a04bec face1bf d3a0381 face1bf 21e50d2 897d384 5e4a9da face1bf 897d384 face1bf 897d384 face1bf 897d384 0fa6461 5e4a9da face1bf 67454de face1bf 0a04bec face1bf 0a04bec face1bf 8d4e075 face1bf 0a04bec face1bf 0a04bec face1bf a7307cb b18c135 e527d19 8473ca8 e527d19 8473ca8 e527d19 8473ca8 b18c135 e527d19 b18c135 e527d19 b18c135 e527d19 d7219e1 a7307cb 6e82233 a7307cb d3a0381 a7307cb 12f6446 a7307cb 058d559 a7307cb 058d559 a7307cb 12f6446 a7307cb 6e82233 a7307cb 8473ca8 6e82233 77eca9b 8473ca8 d3a0381 8473ca8 6e82233 98fc48d 8473ca8 77eca9b 8473ca8 12f6446 8473ca8 12f6446 a9c00f2 1c513f3 8473ca8 12f6446 8473ca8 face1bf 12f6446 a7307cb face1bf 1c513f3 0814c1f 897d384 b2f044e fe44aa1 92df7f4 cc757fe eeb66fe bbfea4f face1bf fe4ffe7 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 |
# Pose inferencing
from mmpose.apis import MMPoseInferencer
import torch
# Gradio
import gradio as gr
import moviepy.editor as moviepy
# System and files
import os
import glob
import uuid
import json
# Image manipulation
import numpy as np
import cv2
#import ffmpeg
print(torch.__version__)
# Use GPU if available
if torch.cuda.is_available():
device = torch.device("cuda")
else:
device = torch.device("cpu")
os.system("nvidia-smi")
print("[INFO]: Imported modules!")
human = MMPoseInferencer("human", device=device) # "simcc_mobilenetv2_wo-deconv-8xb64-210e_coco-256x192", simcc_mobilenetv2_wo-deconv-8xb64-210e_coco-256x192 dekr_hrnet-w32_8xb10-140e_coco-512x512
hand = MMPoseInferencer("hand", device=device)
#model3d = gr.State()
# human3d = MMPoseInferencer(device=device,
# pose3d="human3d",
# scope="mmpose")
#"https://github.com/open-mmlab/mmpose/blob/main/configs/body_3d_keypoint/pose_lift/h36m/pose-lift_simplebaseline3d_8xb64-200e_h36m.py",
#"https://download.openmmlab.com/mmpose/body3d/simple_baseline/simple3Dbaseline_h36m-f0ad73a4_20210419.pth") # pose3d="human3d"
#https://github.com/open-mmlab/mmpose/tree/main/configs/hand_2d_keypoint/topdown_regression
print("[INFO]: Downloaded models!")
def check_extension(video):
clip = moviepy.VideoFileClip(video)
if clip.duration > 10:
raise gr.Error("Please provide or record a video shorter than 10 seconds...")
split_tup = os.path.splitext(video)
# extract the file name and extension
file_name = split_tup[0]
file_extension = split_tup[1]
if file_extension != ".mp4":
print("Converting to mp4")
video = file_name+".mp4"
clip.write_videofile(video, threads = 8)
return video
def pose3d(video, kpt_threshold):
video = check_extension(video)
print(device)
human3d = MMPoseInferencer(device=device, pose3d="human3d", scope="mmpose")#"pose-lift_videopose3d-243frm-supv-cpn-ft_8xb128-200e_h36m")
# Define new unique folder
add_dir = str(uuid.uuid4())
os.makedirs(add_dir)
#video = human3d.preprocess(video, batch_size=8)
result_generator = human3d(video,
vis_out_dir = add_dir,
radius = 8,
thickness = 5,
rebase_keypoint_height=True,
kpt_thr=kpt_threshold,
pred_out_dir = add_dir
)
result = [result for result in result_generator] #next(result_generator)
out_file = glob.glob(os.path.join(add_dir, "*.mp4")) #+ glob.glob(os.path.join(vis_out_dir, "*.webm"))
kpoints = glob.glob(os.path.join(add_dir, "*.json"))
print(kpoints)
# Reinitialize
return "".join(out_file), "".join(kpoints)
def pose2d(video, kpt_threshold):
video = check_extension(video)
#video = get_frames(video)
# Define new unique folder
add_dir = str(uuid.uuid4())
os.makedirs(add_dir)
result_generator = human(video,
vis_out_dir = add_dir,
radius = 5,
thickness=4,
rebase_keypoint_height=True,
kpt_thr=kpt_threshold,
pred_out_dir = add_dir
)
result = [result for result in result_generator] #next(result_generator)
out_file = glob.glob(os.path.join(add_dir, "*.mp4")) #+ glob.glob(os.path.join(vis_out_dir, "*.webm"))
kpoints = glob.glob(os.path.join(add_dir, "*.json"))
print(kpoints)
print(out_file)
return "".join(out_file), "".join(kpoints)
def pose2dhand(video, kpt_threshold):
video = check_extension(video)
print(device)
# ultraltics
# Define new unique folder
add_dir = str(uuid.uuid4())
os.makedirs(add_dir)
result_generator = hand(video,
vis_out_dir = add_dir,
thickness = 4,
radius = 5,
rebase_keypoint_height=True,
kpt_thr=kpt_threshold,
pred_out_dir = add_dir)
result = [result for result in result_generator] #next(result_generator)
out_file = glob.glob(os.path.join(add_dir, "*.mp4")) #+ glob.glob(os.path.join(vis_out_dir, "*.webm"))
kpoints = glob.glob(os.path.join(add_dir, "*.json"))
return "".join(out_file), "".join(kpoints)
code_example = """
# Importing packages needed
import json
import numpy as np
# First we load the data
with open(file_path, 'r') as json_file:
data = json.load(json_file)
# The we define a function for calculating angles
def calculate_angle(a, b, c):
a = np.array(a) # First point
b = np.array(b) # Middle point
c = np.array(c) # End point
radians = np.arctan2(c[1]-b[1], c[0]-b[0]) - np.arctan2(a[1]-b[1], a[0]-b[0])
angle = np.abs(radians*180.0/np.pi)
if angle >180.0:
angle = 360-angle
return angle
# COCO keypoint indices
shoulder_index = 6
elbow_index = 8
wrist_index = 9
# We select the first identified person in the first frame (zero index) as an example
# To calculate the angle of the right elbow we take the point before and after and according to the indices that will be 6 (right shoulder) and 9 (right wrist)
shoulder_point = data[0]['instances'][0]['keypoints'][shoulder_index]
elbow_point = data[0]['instances'][0]['keypoints'][elbow_index]
wrist_point = data[0]['instances'][0]['keypoints'][wrist_index]
angle = calculate_angle(shoulder_point, elbow_point, wrist_point)
print("Angle is: ", angle)
"""
venv_example = """
# Create a virtual environment
python -m venv ".bstad_env"
# Activate the environment
.bstad_env\Scripts\Activate
# Install numpy and json
pip install numpy
pip install json
# Run the code from the commandline
python \Path\To\Script.py
"""
block = gr.Blocks()
with block:
with gr.Column():
with gr.Tab("Capture video with webcam"):
with gr.Column():
with gr.Row():
with gr.Column():
with gr.Row():
video_input_web = gr.Video(source="webcam", include_audio=False, height=256, width=192)
# Insert slider with kpt_thr
with gr.Column():
gr.Markdown("Drag the keypoint threshold to filter out lower probability keypoints:")
file_kpthr_web = gr.Slider(0, 1, value=0.3, label='Keypoint threshold')
with gr.Row():
submit_pose_file_web = gr.Button("Make 2d pose estimation")
submit_pose3d_file_web = gr.Button("Make 3d pose estimation")
submit_hand_file_web = gr.Button("Make 2d hand estimation")
with gr.Row():
video_output1_web = gr.PlayableVideo(label = "Estimate human 2d poses", show_label=True, height=256)
video_output2_web = gr.PlayableVideo(label = "Estimate human 3d poses", show_label=True, height=256)
video_output3_web = gr.PlayableVideo(label = "Estimate human hand poses", show_label=True, height=256)
gr.Markdown("Download the .json file that contains the keypoint positions for each frame in the video.")
jsonoutput_web = gr.File(file_types=[".json"])
gr.Markdown("""There are multiple ways to interact with these keypoints.
\n The example below shows how you can calulate the angle on the elbow for example.
\n If you choose to run the code, start by installing the packages json and numpy. You can do that by running the following commands in Poweshell or another commandline/terminal.""")
gr.Code(value=venv_example,
language="python",
interactive=False,
show_label=False,
)
gr.Markdown("""\n Then copy the next code segment into your own preferred interpreter and experiment with the keypoint file.
""")
gr.Code(
value=code_example,
language="python",
interactive=False,
show_label=False,
)
gr.Markdown("""The complete overview of the keypoint indices can be seen in the tab 'General information'. """)
with gr.Tab("Upload video"):
with gr.Column():
with gr.Row():
with gr.Column():
with gr.Row():
video_input = gr.Video(source="upload", type="filepath", include_audio=False, height=256, width=192)
# Insert slider with kpt_thr
with gr.Column():
gr.Markdown("Drag the keypoint threshold to filter out lower probability keypoints:")
file_kpthr = gr.Slider(0, 1, value=0.3, label='Keypoint threshold')
with gr.Row():
submit_pose_file = gr.Button("Make 2d pose estimation")
submit_pose3d_file = gr.Button("Make 3d pose estimation")
submit_hand_file = gr.Button("Make 2d hand estimation")
with gr.Row():
video_output1 = gr.PlayableVideo(label = "Estimate human 2d poses", show_label=True, height=256)
video_output2 = gr.PlayableVideo(label = "Estimate human 3d poses", show_label=True, height=256)
video_output3 = gr.PlayableVideo(label = "Estimate human hand poses", show_label=True, height=256)
gr.Markdown("Download the .json file that contains the keypoint positions for each frame in the video.")
jsonoutput = gr.File(file_types=[".json"])
gr.Markdown("""There are multiple ways to interact with these keypoints.
\n The example below shows how you can calulate the angle on the elbow for example.
\n If you choose to run the code, start by installing the packages json and numpy. You can do that by running the following commands in Poweshell or another commandline/terminal.""")
gr.Code(value=venv_example,
language="python",
interactive=False,
show_label=False,
)
gr.Markdown("""\n Then copy the next code segment into your own preferred interpreter and experiment with the keypoint file.
""")
gr.Code(
value=code_example,
language="python",
interactive=False,
show_label=False,
)
gr.Markdown("""The complete overview of the keypoint indices can be seen in the tab 'General information'. """)
with gr.Tab("General information"):
gr.Markdown("""
\n # Information about the models
\n ## Pose models:
\n All the pose estimation models come from the library [MMpose](https://github.com/open-mmlab/mmpose). It is a library for human pose estimation that provides pre-trained models for 2D and 3D pose estimation.
\n The 2D pose model is used for estimating the 2D coordinates of human body joints from an image or a video frame. The model uses a convolutional neural network (CNN) to predict the joint locations and their confidence scores.
\n The 2D hand model is a specialized version of the 2D pose model that is designed for hand pose estimation. It uses a similar CNN architecture to the 2D pose model but is trained specifically for detecting the joints in the hand.
\n The 3D pose model is used for estimating the 3D coordinates of human body joints from an image or a video frame. The model uses a combination of 2D pose estimation and depth estimation to infer the 3D joint locations.
\n The keypoints in the 2D pose model has the following order:
\n ```
0: Nose
1: Left Eye
2: Right Eye
3: Left Ear
4: Right Ear
5: Left Shoulder
6: Right Shoulder
7: Left Elbow
8: Right Elbow
9: Left Wrist
10: Right Wrist
11: Left Hip
12: Right Hip
13: Left Knee
14: Right Knee
15: Left Ankle
16: Right Ankle ```
\n Below, you can see a visualization of the poses of the 2d, 3d and hand keypoint locations: """)
with gr.Row():
gr.Image("./cocoposes.png", width="160",height="220")
gr.Image("./cocohand.png", width="160",height="200")
# From file
click1 = submit_pose_file.click(fn=pose2d,
inputs= [video_input, file_kpthr],
outputs = [video_output1, jsonoutput],
queue=True)
click2 = submit_pose3d_file.click(fn=pose3d,
inputs= [video_input, file_kpthr],
outputs = [video_output2, jsonoutput],
#batch=True,
#max_batch_size=16,
queue=True) # Sometimes it worked with queue false? But still slow
click3 = submit_hand_file.click(fn=pose2dhand,
inputs= [video_input, file_kpthr],
outputs = [video_output3, jsonoutput],
queue=True)
# From web
submit_pose_file_web.click(fn=pose2d,
inputs= [video_input_web, file_kpthr_web],
outputs = [video_output1_web, jsonoutput_web],
queue=True)
submit_pose3d_file_web.click(fn=pose3d,
inputs= [video_input_web, file_kpthr_web],
outputs = [video_output2_web, jsonoutput_web],
#batch=True,
#max_batch_size=16,
queue=True) # Sometimes it worked with queue false? But still slow
submit_hand_file_web.click(fn=pose2dhand,
inputs= [video_input_web, file_kpthr_web],
outputs = [video_output3_web, jsonoutput_web],
queue=True)
if __name__ == "__main__":
block.queue(max_size=20,
#concurrency_count=40, # When you increase the concurrency_count parameter in queue(), max_threads() in launch() is automatically increased as well.
#max_size=25, # Maximum number of requests that the queue processes
api_open = False # When creating a Gradio demo, you may want to restrict all traffic to happen through the user interface as opposed to the programmatic API that is automatically created for your Gradio demo.
).launch(
# max_threads=31,
server_name="0.0.0.0",
server_port=7860,
auth=("novouser", "bstad2023")
)
# The total concurrency = number of processors * 10.
# 4vCPU 15 GB ram 40GV VRAM = 40?
|