blocks
Browse files- Dockerfile +1 -1
- main_blocks.py +205 -0
Dockerfile
CHANGED
@@ -42,4 +42,4 @@ WORKDIR $HOME/app
|
|
42 |
# Copy the current directory contents into the container at $HOME/app setting the owner to the user
|
43 |
COPY --chown=user . $HOME/app
|
44 |
|
45 |
-
CMD ["python", "
|
|
|
42 |
# Copy the current directory contents into the container at $HOME/app setting the owner to the user
|
43 |
COPY --chown=user . $HOME/app
|
44 |
|
45 |
+
CMD ["python", "main_blocks.py"]
|
main_blocks.py
ADDED
@@ -0,0 +1,205 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
|
3 |
+
# Pose inferencing
|
4 |
+
import mmpose
|
5 |
+
from mmpose.apis import MMPoseInferencer
|
6 |
+
|
7 |
+
# Ultralytics
|
8 |
+
from ultralytics import YOLO
|
9 |
+
import torch
|
10 |
+
|
11 |
+
# Gradio
|
12 |
+
import gradio as gr
|
13 |
+
|
14 |
+
# System and files
|
15 |
+
import os
|
16 |
+
import glob
|
17 |
+
import uuid
|
18 |
+
|
19 |
+
# Image manipulation
|
20 |
+
import numpy as np
|
21 |
+
import cv2
|
22 |
+
|
23 |
+
print("[INFO]: Imported modules!")
|
24 |
+
human = MMPoseInferencer("human")
|
25 |
+
hand = MMPoseInferencer("hand")
|
26 |
+
human3d = MMPoseInferencer(pose3d="human3d")
|
27 |
+
track_model = YOLO('yolov8n.pt') # Load an official Detect model
|
28 |
+
|
29 |
+
# ultraltics
|
30 |
+
|
31 |
+
# Defining inferencer models to lookup in function
|
32 |
+
inferencers = {"Estimate human 2d poses":human, "Estimate human 2d hand poses":hand, "Estimate human 3d poses":human3d, "Detect and track":track_model}
|
33 |
+
|
34 |
+
print("[INFO]: Downloaded models!")
|
35 |
+
|
36 |
+
def tracking(video, model, boxes=True):
|
37 |
+
print("[INFO] Loading model...")
|
38 |
+
# Load an official or custom model
|
39 |
+
|
40 |
+
# Perform tracking with the model
|
41 |
+
print("[INFO] Starting tracking!")
|
42 |
+
# https://docs.ultralytics.com/modes/predict/
|
43 |
+
annotated_frame = model(video, boxes=boxes)
|
44 |
+
|
45 |
+
return annotated_frame
|
46 |
+
|
47 |
+
def show_tracking(video_content, vis_out_dir, model):
|
48 |
+
video = cv2.VideoCapture(video_content)
|
49 |
+
|
50 |
+
# Track
|
51 |
+
video_track = tracking(video_content, model.track)
|
52 |
+
|
53 |
+
# Prepare to save video
|
54 |
+
#out_file = os.path.join(vis_out_dir, "track.mp4")
|
55 |
+
out_file = "track.mp4"
|
56 |
+
print("[INFO]: TRACK", out_file)
|
57 |
+
|
58 |
+
fourcc = cv2.VideoWriter_fourcc(*"mp4v") # Codec for MP4 video
|
59 |
+
fps = video.get(cv2.CAP_PROP_FPS)
|
60 |
+
height, width, _ = video_track[0][0].orig_img.shape
|
61 |
+
size = (width,height)
|
62 |
+
|
63 |
+
out_track = cv2.VideoWriter(out_file, fourcc, fps, size)
|
64 |
+
|
65 |
+
# Go through frames and write them
|
66 |
+
for frame_track in video_track:
|
67 |
+
result_track = frame_track[0].plot() # plot a BGR numpy array of predictions
|
68 |
+
print("[INFO] Done with frames")
|
69 |
+
#print(type(result_pose)) numpy ndarray
|
70 |
+
out_track.write(result_track)
|
71 |
+
|
72 |
+
out_track.release()
|
73 |
+
|
74 |
+
video.release()
|
75 |
+
cv2.destroyAllWindows() # Closing window
|
76 |
+
|
77 |
+
return out_file
|
78 |
+
|
79 |
+
|
80 |
+
def pose3d(video):
|
81 |
+
add_dir = str(uuid.uuid4())
|
82 |
+
#vidname = video.split("/")[-1]
|
83 |
+
vis_out_dir = "/".join(["/".join(video.split("/")[:-1]), add_dir])
|
84 |
+
print("[INFO]: CURRENT OUT DIR: ", vis_out_dir)
|
85 |
+
|
86 |
+
#full name = os.path.join(vis_out_dir, vidname)
|
87 |
+
|
88 |
+
result_generator = human3d(video,
|
89 |
+
vis_out_dir = vis_out_dir,
|
90 |
+
thickness=2,
|
91 |
+
rebase_keypoint_height=True,
|
92 |
+
device="cuda")
|
93 |
+
|
94 |
+
result = [result for result in result_generator] #next(result_generator)
|
95 |
+
out_file = glob.glob(os.path.join(vis_out_dir, "*"))
|
96 |
+
print("[INFO]: CURRENT OUT FILE NAME: ", out_file)
|
97 |
+
|
98 |
+
return out_file
|
99 |
+
|
100 |
+
|
101 |
+
def pose2d(video):
|
102 |
+
add_dir = str(uuid.uuid4())
|
103 |
+
vis_out_dir = "/".join(["/".join(video.split("/")[:-1]), add_dir])
|
104 |
+
print("[INFO]: CURRENT OUT DIR: ", vis_out_dir)
|
105 |
+
|
106 |
+
|
107 |
+
result_generator = human(video,
|
108 |
+
vis_out_dir = vis_out_dir,
|
109 |
+
thickness=2,
|
110 |
+
rebase_keypoint_height=True,
|
111 |
+
device="cuda")
|
112 |
+
|
113 |
+
result = [result for result in result_generator] #next(result_generator)
|
114 |
+
|
115 |
+
out_file = glob.glob(os.path.join(vis_out_dir, "*"))
|
116 |
+
print("[INFO]: CURRENT OUT FILE NAME: ", out_file)
|
117 |
+
|
118 |
+
return out_file
|
119 |
+
|
120 |
+
|
121 |
+
def pose2dhand(video):
|
122 |
+
add_dir = str(uuid.uuid4())
|
123 |
+
vis_out_dir = "/".join(["/".join(video.split("/")[:-1]), add_dir])
|
124 |
+
|
125 |
+
print("[INFO]: CURRENT OUT DIR: ", vis_out_dir)
|
126 |
+
|
127 |
+
vis_out_dir = str(uuid.uuid4())
|
128 |
+
|
129 |
+
result_generator = hand(video,
|
130 |
+
vis_out_dir = vis_out_dir,
|
131 |
+
thickness=2,
|
132 |
+
rebase_keypoint_height=True,
|
133 |
+
device="cuda")
|
134 |
+
|
135 |
+
result = [result for result in result_generator] #next(result_generator)
|
136 |
+
|
137 |
+
out_file = glob.glob(os.path.join(vis_out_dir, "*"))
|
138 |
+
print("[INFO]: CURRENT OUT FILE NAME: ", out_file)
|
139 |
+
|
140 |
+
return out_file
|
141 |
+
|
142 |
+
|
143 |
+
|
144 |
+
with gr.Blocks() as demo:
|
145 |
+
with gr.Column():
|
146 |
+
with gr.Tab("Upload video"):
|
147 |
+
with gr.Row():
|
148 |
+
with gr.Column():
|
149 |
+
video_input = gr.Video(source="upload", type="filepath", height=512)
|
150 |
+
|
151 |
+
submit_pose_file = gr.Button("Make 2d pose estimation")
|
152 |
+
submit_pose3d_file = gr.Button("Make 3d pose estimation")
|
153 |
+
submit_hand_file = gr.Button("Make 2d hand estimation")
|
154 |
+
submit_detect_file = gr.Button("Detect and track objects")
|
155 |
+
|
156 |
+
video_output = gr.Video(height=512)
|
157 |
+
|
158 |
+
with gr.Tab("Record video with webcam"):
|
159 |
+
with gr.Row():
|
160 |
+
with gr.Column():
|
161 |
+
webcam_input = gr.Video(source="webcam", height=512)
|
162 |
+
|
163 |
+
submit_pose_web = gr.Button("Make 2d pose estimation")
|
164 |
+
submit_pose3d_web = gr.Button("Make 3d pose estimation")
|
165 |
+
submit_hand_web = gr.Button("Make 2d hand estimation")
|
166 |
+
submit_detect_web = gr.Button("Detect and track objects")
|
167 |
+
|
168 |
+
webcam_output = gr.Video(height=512)
|
169 |
+
|
170 |
+
|
171 |
+
# From file
|
172 |
+
submit_pose_file.click(fn=pose2d,
|
173 |
+
inputs= video_input,
|
174 |
+
outputs = video_output)
|
175 |
+
|
176 |
+
submit_pose3d_file.click(fn=pose3d,
|
177 |
+
inputs= video_input,
|
178 |
+
outputs = video_output)
|
179 |
+
|
180 |
+
submit_hand_file.click(fn=pose2dhand,
|
181 |
+
inputs= video_input,
|
182 |
+
outputs = video_output)
|
183 |
+
|
184 |
+
submit_detect_file.click(fn=show_tracking,
|
185 |
+
inputs= video_input,
|
186 |
+
outputs = video_output)
|
187 |
+
|
188 |
+
# Web
|
189 |
+
submit_pose_web.click(fn=pose2d,
|
190 |
+
inputs= video_input,
|
191 |
+
outputs = video_output)
|
192 |
+
|
193 |
+
submit_pose3d_web.click(fn=pose3d,
|
194 |
+
inputs= video_input,
|
195 |
+
outputs = video_output)
|
196 |
+
|
197 |
+
submit_hand_web.click(fn=pose2dhand,
|
198 |
+
inputs= video_input,
|
199 |
+
outputs = video_output)
|
200 |
+
|
201 |
+
submit_detect_web.click(fn=show_tracking,
|
202 |
+
inputs= video_input,
|
203 |
+
outputs = video_output)
|
204 |
+
|
205 |
+
demo.launch()
|