file display
Browse files
main.py
CHANGED
@@ -26,6 +26,7 @@ human3d = MMPoseInferencer(pose3d="human3d")
|
|
26 |
# Defining inferencer models to lookup in function
|
27 |
inferencers = {"Estimate human 2d poses":human, "Estimate human 2d hand poses":hand, "Estimate human 3d poses":human3d}
|
28 |
|
|
|
29 |
|
30 |
print("[INFO]: Downloaded models!")
|
31 |
|
@@ -37,9 +38,42 @@ def tracking(video, model, boxes=True):
|
|
37 |
print("[INFO] Starting tracking!")
|
38 |
# https://docs.ultralytics.com/modes/predict/
|
39 |
annotated_frame = model(video, device="cuda", boxes=boxes)
|
40 |
-
|
41 |
return annotated_frame
|
42 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
43 |
def poses(photo, check):
|
44 |
# Selecting the specific inferencer
|
45 |
out_files=[]
|
@@ -73,7 +107,7 @@ def run():
|
|
73 |
webcam = gr.Interface(
|
74 |
fn=poses,
|
75 |
inputs= [gr.Video(source="webcam", height=412), check_web],
|
76 |
-
outputs = [gr.PlayableVideo()
|
77 |
title = 'Pose estimation',
|
78 |
description = 'Pose estimation on video',
|
79 |
allow_flagging=False
|
@@ -82,7 +116,7 @@ def run():
|
|
82 |
file = gr.Interface(
|
83 |
poses,
|
84 |
inputs = [gr.Video(source="upload", height=412), check_file],
|
85 |
-
outputs = [gr.PlayableVideo()
|
86 |
allow_flagging=False
|
87 |
)
|
88 |
|
|
|
26 |
# Defining inferencer models to lookup in function
|
27 |
inferencers = {"Estimate human 2d poses":human, "Estimate human 2d hand poses":hand, "Estimate human 3d poses":human3d}
|
28 |
|
29 |
+
#track_model = YOLO('yolov8n.pt') # Load an official Detect model
|
30 |
|
31 |
print("[INFO]: Downloaded models!")
|
32 |
|
|
|
38 |
print("[INFO] Starting tracking!")
|
39 |
# https://docs.ultralytics.com/modes/predict/
|
40 |
annotated_frame = model(video, device="cuda", boxes=boxes)
|
41 |
+
|
42 |
return annotated_frame
|
43 |
|
44 |
+
|
45 |
+
|
46 |
+
#def show_tracking(video_content):
|
47 |
+
|
48 |
+
video = cv2.VideoCapture(video_content)
|
49 |
+
|
50 |
+
# Track
|
51 |
+
video_track = tracking(video_content, track_model.track)
|
52 |
+
|
53 |
+
# Prepare to save video
|
54 |
+
output_file_track = os.path.join("track.mp4")
|
55 |
+
|
56 |
+
fourcc = cv2.VideoWriter_fourcc(*"mp4v") # Codec for MP4 video
|
57 |
+
fps = video.get(cv2.CAP_PROP_FPS)
|
58 |
+
height, width, _ = video_track[0][0].orig_img.shape
|
59 |
+
size = (width,height)
|
60 |
+
|
61 |
+
out_track = cv2.VideoWriter(output_file_track, fourcc, fps, size)
|
62 |
+
|
63 |
+
# Go through frames and write them
|
64 |
+
for frame_track in video_track:
|
65 |
+
result_track = frame_track[0].plot() # plot a BGR numpy array of predictions
|
66 |
+
|
67 |
+
#print(type(result_pose)) numpy ndarray
|
68 |
+
out_track.write(result_track)
|
69 |
+
|
70 |
+
out_track.release()
|
71 |
+
|
72 |
+
video.release()
|
73 |
+
cv2.destroyAllWindows() # Closing window
|
74 |
+
|
75 |
+
return output_file_track
|
76 |
+
|
77 |
def poses(photo, check):
|
78 |
# Selecting the specific inferencer
|
79 |
out_files=[]
|
|
|
107 |
webcam = gr.Interface(
|
108 |
fn=poses,
|
109 |
inputs= [gr.Video(source="webcam", height=412), check_web],
|
110 |
+
outputs = [gr.PlayableVideo(),gr.PlayableVideo(),gr.PlayableVideo()], #file_types=['.mp4'] #gr.Video(),
|
111 |
title = 'Pose estimation',
|
112 |
description = 'Pose estimation on video',
|
113 |
allow_flagging=False
|
|
|
116 |
file = gr.Interface(
|
117 |
poses,
|
118 |
inputs = [gr.Video(source="upload", height=412), check_file],
|
119 |
+
outputs = [gr.PlayableVideo(),gr.PlayableVideo(),gr.PlayableVideo()],
|
120 |
allow_flagging=False
|
121 |
)
|
122 |
|