Spaces:
Sleeping
Sleeping
Colin Leong
commited on
Commit
·
867f539
1
Parent(s):
247b8a5
CDL: whoops
Browse files
app.py
CHANGED
@@ -12,19 +12,30 @@ import mediapipe as mp
|
|
12 |
import torch
|
13 |
|
14 |
mp_holistic = mp.solutions.holistic
|
15 |
-
FACEMESH_CONTOURS_POINTS = [
|
|
|
|
|
|
|
|
|
|
|
|
|
16 |
|
17 |
def pose_normalization_info(pose_header):
|
18 |
if pose_header.components[0].name == "POSE_LANDMARKS":
|
19 |
-
return pose_header.normalization_info(
|
20 |
-
|
|
|
|
|
21 |
|
22 |
if pose_header.components[0].name == "BODY_135":
|
23 |
-
return pose_header.normalization_info(
|
|
|
|
|
24 |
|
25 |
if pose_header.components[0].name == "pose_keypoints_2d":
|
26 |
-
return pose_header.normalization_info(
|
27 |
-
|
|
|
28 |
|
29 |
|
30 |
def pose_hide_legs(pose):
|
@@ -44,12 +55,19 @@ def pose_hide_legs(pose):
|
|
44 |
|
45 |
|
46 |
def preprocess_pose(pose):
|
47 |
-
pose = pose.get_components(
|
48 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
49 |
|
50 |
pose = pose.normalize(pose_normalization_info(pose.header))
|
51 |
pose = pose_hide_legs(pose)
|
52 |
-
|
53 |
# from sign_vq.data.normalize import pre_process_mediapipe, normalize_mean_std
|
54 |
# from pose_anonymization.appearance import remove_appearance
|
55 |
|
@@ -66,7 +84,7 @@ def preprocess_pose(pose):
|
|
66 |
|
67 |
|
68 |
# @st.cache_data(hash_funcs={UploadedFile: lambda p: str(p.name)})
|
69 |
-
def load_pose(uploaded_file:UploadedFile)->Pose:
|
70 |
|
71 |
# with input_path.open("rb") as f_in:
|
72 |
if uploaded_file.name.endswith(".zst"):
|
@@ -74,8 +92,9 @@ def load_pose(uploaded_file:UploadedFile)->Pose:
|
|
74 |
else:
|
75 |
return Pose.read(uploaded_file.read())
|
76 |
|
|
|
77 |
@st.cache_data(hash_funcs={Pose: lambda p: np.array(p.body.data)})
|
78 |
-
def get_pose_frames(pose:Pose, transparency: bool = False):
|
79 |
v = PoseVisualizer(pose)
|
80 |
frames = [frame_data for frame_data in v.draw()]
|
81 |
|
@@ -86,33 +105,37 @@ def get_pose_frames(pose:Pose, transparency: bool = False):
|
|
86 |
images = [Image.fromarray(v.cv2.cvtColor(frame, cv_code)) for frame in frames]
|
87 |
return frames, images
|
88 |
|
89 |
-
|
|
|
90 |
if fps is not None:
|
91 |
pose.body.fps = fps
|
92 |
v = PoseVisualizer(pose)
|
93 |
frames = [frame_data for frame_data in v.draw()]
|
94 |
frames = frames[::step]
|
95 |
-
return v.save_gif(None,frames=frames)
|
|
|
96 |
|
97 |
st.write("# Pose-format explorer")
|
98 |
-
st.write(
|
99 |
-
|
|
|
|
|
|
|
|
|
100 |
uploaded_file = st.file_uploader("Upload a .pose file", type=[".pose", ".pose.zst"])
|
101 |
|
102 |
|
103 |
-
|
104 |
if uploaded_file is not None:
|
105 |
with st.spinner(f"Loading {uploaded_file.name}"):
|
106 |
pose = load_pose(uploaded_file)
|
107 |
frames, images = get_pose_frames(pose=pose)
|
108 |
st.success("done loading!")
|
109 |
# st.write(f"pose shape: {pose.body.data.shape}")
|
110 |
-
|
111 |
|
112 |
header = pose.header
|
113 |
st.write("### File Info")
|
114 |
with st.expander(f"Show full Pose-format header from {uploaded_file.name}"):
|
115 |
-
|
116 |
st.write(header)
|
117 |
# st.write(pose.body.data.shape)
|
118 |
# st.write(pose.body.fps)
|
@@ -124,49 +147,66 @@ if uploaded_file is not None:
|
|
124 |
component_names = [component.name for component in components]
|
125 |
chosen_component_names = component_names
|
126 |
|
127 |
-
component_selection = st.radio(
|
|
|
|
|
128 |
if component_selection == "manual":
|
129 |
st.write(f"### Component selection: ")
|
130 |
-
chosen_component_names = st.pills(
|
131 |
-
|
|
|
|
|
|
|
|
|
|
|
132 |
# st.write(chosen_component_names)
|
133 |
-
|
134 |
-
|
135 |
-
|
136 |
st.write("### Point selection:")
|
137 |
point_names = []
|
138 |
-
new_chosen_components =[]
|
139 |
points_dict = {}
|
140 |
for component in pose.header.components:
|
141 |
-
|
142 |
-
|
143 |
-
|
144 |
-
|
145 |
-
|
146 |
-
|
147 |
-
|
148 |
-
|
149 |
-
|
150 |
-
|
151 |
-
|
152 |
-
|
153 |
-
|
|
|
|
|
|
|
|
|
|
|
154 |
# selected_points = st.multiselect("points to visualize", options=point_names, default=point_names)
|
155 |
if chosen_component_names:
|
156 |
-
|
157 |
if not points_dict:
|
158 |
-
points_dict=None
|
159 |
-
# else:
|
160 |
# st.write(points_dict)
|
161 |
# st.write(chosen_component_names)
|
162 |
|
163 |
-
pose = pose.get_components(chosen_component_names,points=points_dict)
|
164 |
# st.write(pose.header)
|
165 |
|
166 |
elif component_selection == "signclip":
|
167 |
st.write("Selected landmarks used for SignCLIP. (Face countours only)")
|
168 |
-
pose = pose.get_components(
|
169 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
170 |
|
171 |
# pose = pose.normalize(pose_normalization_info(pose.header)) Visualization goes blank
|
172 |
pose = pose_hide_legs(pose)
|
@@ -179,23 +219,30 @@ if uploaded_file is not None:
|
|
179 |
# st.write(pose.header)
|
180 |
else:
|
181 |
pass
|
182 |
-
|
183 |
|
184 |
-
|
185 |
st.write(f"### Visualization")
|
186 |
-
width=st.select_slider(
|
187 |
-
|
188 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
189 |
visualize_clicked = st.button(f"Visualize!")
|
190 |
-
|
191 |
-
|
192 |
|
193 |
if visualize_clicked:
|
194 |
-
|
195 |
st.write(f"Generating gif...")
|
196 |
|
197 |
# st.write(pose.body.data.shape)
|
198 |
-
|
199 |
st.image(get_pose_gif(pose=pose, step=step, fps=fps))
|
200 |
|
201 |
with st.expander("See header"):
|
@@ -203,13 +250,10 @@ if uploaded_file is not None:
|
|
203 |
st.write(pose.header)
|
204 |
|
205 |
# st.write(pose.body.data.shape)
|
206 |
-
|
207 |
|
208 |
-
|
209 |
# st.write(visualize_pose(pose=pose)) # bunch of ndarrays
|
210 |
# st.write([Image.fromarray(v.cv2.cvtColor(frame, cv_code)) for frame in frames])
|
211 |
|
212 |
# for i, image in enumerate(images[::n]):
|
213 |
# print(f"i={i}")
|
214 |
# st.image(image=image, width=width)
|
215 |
-
|
|
|
12 |
import torch
|
13 |
|
14 |
mp_holistic = mp.solutions.holistic
|
15 |
+
FACEMESH_CONTOURS_POINTS = [
|
16 |
+
str(p)
|
17 |
+
for p in sorted(
|
18 |
+
set([p for p_tup in list(mp_holistic.FACEMESH_CONTOURS) for p in p_tup])
|
19 |
+
)
|
20 |
+
]
|
21 |
+
|
22 |
|
23 |
def pose_normalization_info(pose_header):
|
24 |
if pose_header.components[0].name == "POSE_LANDMARKS":
|
25 |
+
return pose_header.normalization_info(
|
26 |
+
p1=("POSE_LANDMARKS", "RIGHT_SHOULDER"),
|
27 |
+
p2=("POSE_LANDMARKS", "LEFT_SHOULDER"),
|
28 |
+
)
|
29 |
|
30 |
if pose_header.components[0].name == "BODY_135":
|
31 |
+
return pose_header.normalization_info(
|
32 |
+
p1=("BODY_135", "RShoulder"), p2=("BODY_135", "LShoulder")
|
33 |
+
)
|
34 |
|
35 |
if pose_header.components[0].name == "pose_keypoints_2d":
|
36 |
+
return pose_header.normalization_info(
|
37 |
+
p1=("pose_keypoints_2d", "RShoulder"), p2=("pose_keypoints_2d", "LShoulder")
|
38 |
+
)
|
39 |
|
40 |
|
41 |
def pose_hide_legs(pose):
|
|
|
55 |
|
56 |
|
57 |
def preprocess_pose(pose):
|
58 |
+
pose = pose.get_components(
|
59 |
+
[
|
60 |
+
"POSE_LANDMARKS",
|
61 |
+
"FACE_LANDMARKS",
|
62 |
+
"LEFT_HAND_LANDMARKS",
|
63 |
+
"RIGHT_HAND_LANDMARKS",
|
64 |
+
],
|
65 |
+
{"FACE_LANDMARKS": FACEMESH_CONTOURS_POINTS},
|
66 |
+
)
|
67 |
|
68 |
pose = pose.normalize(pose_normalization_info(pose.header))
|
69 |
pose = pose_hide_legs(pose)
|
70 |
+
|
71 |
# from sign_vq.data.normalize import pre_process_mediapipe, normalize_mean_std
|
72 |
# from pose_anonymization.appearance import remove_appearance
|
73 |
|
|
|
84 |
|
85 |
|
86 |
# @st.cache_data(hash_funcs={UploadedFile: lambda p: str(p.name)})
|
87 |
+
def load_pose(uploaded_file: UploadedFile) -> Pose:
|
88 |
|
89 |
# with input_path.open("rb") as f_in:
|
90 |
if uploaded_file.name.endswith(".zst"):
|
|
|
92 |
else:
|
93 |
return Pose.read(uploaded_file.read())
|
94 |
|
95 |
+
|
96 |
@st.cache_data(hash_funcs={Pose: lambda p: np.array(p.body.data)})
|
97 |
+
def get_pose_frames(pose: Pose, transparency: bool = False):
|
98 |
v = PoseVisualizer(pose)
|
99 |
frames = [frame_data for frame_data in v.draw()]
|
100 |
|
|
|
105 |
images = [Image.fromarray(v.cv2.cvtColor(frame, cv_code)) for frame in frames]
|
106 |
return frames, images
|
107 |
|
108 |
+
|
109 |
+
def get_pose_gif(pose: Pose, step: int = 1, fps: int = None):
|
110 |
if fps is not None:
|
111 |
pose.body.fps = fps
|
112 |
v = PoseVisualizer(pose)
|
113 |
frames = [frame_data for frame_data in v.draw()]
|
114 |
frames = frames[::step]
|
115 |
+
return v.save_gif(None, frames=frames)
|
116 |
+
|
117 |
|
118 |
st.write("# Pose-format explorer")
|
119 |
+
st.write(
|
120 |
+
"`pose-format` is a toolkit/library for 'handling, manipulation, and visualization of poses'. See [The documentation](https://pose-format.readthedocs.io/en/latest/)"
|
121 |
+
)
|
122 |
+
st.write(
|
123 |
+
"I made this app to help me visualize and understand the format, including different 'components' and 'points', and what they are named."
|
124 |
+
)
|
125 |
uploaded_file = st.file_uploader("Upload a .pose file", type=[".pose", ".pose.zst"])
|
126 |
|
127 |
|
|
|
128 |
if uploaded_file is not None:
|
129 |
with st.spinner(f"Loading {uploaded_file.name}"):
|
130 |
pose = load_pose(uploaded_file)
|
131 |
frames, images = get_pose_frames(pose=pose)
|
132 |
st.success("done loading!")
|
133 |
# st.write(f"pose shape: {pose.body.data.shape}")
|
|
|
134 |
|
135 |
header = pose.header
|
136 |
st.write("### File Info")
|
137 |
with st.expander(f"Show full Pose-format header from {uploaded_file.name}"):
|
138 |
+
|
139 |
st.write(header)
|
140 |
# st.write(pose.body.data.shape)
|
141 |
# st.write(pose.body.fps)
|
|
|
147 |
component_names = [component.name for component in components]
|
148 |
chosen_component_names = component_names
|
149 |
|
150 |
+
component_selection = st.radio(
|
151 |
+
"How to select components?", options=["manual", "signclip"]
|
152 |
+
)
|
153 |
if component_selection == "manual":
|
154 |
st.write(f"### Component selection: ")
|
155 |
+
chosen_component_names = st.pills(
|
156 |
+
"Components to visualize",
|
157 |
+
options=component_names,
|
158 |
+
selection_mode="multi",
|
159 |
+
default=component_names,
|
160 |
+
)
|
161 |
+
|
162 |
# st.write(chosen_component_names)
|
163 |
+
|
|
|
|
|
164 |
st.write("### Point selection:")
|
165 |
point_names = []
|
166 |
+
new_chosen_components = []
|
167 |
points_dict = {}
|
168 |
for component in pose.header.components:
|
169 |
+
with st.expander(f"points for {component.name}"):
|
170 |
+
|
171 |
+
if component.name in chosen_component_names:
|
172 |
+
|
173 |
+
st.write(f"#### {component.name}")
|
174 |
+
selected_points = st.multiselect(
|
175 |
+
f"points for component {component.name}:",
|
176 |
+
options=component.points,
|
177 |
+
default=component.points,
|
178 |
+
)
|
179 |
+
if selected_points == component.points:
|
180 |
+
st.write(
|
181 |
+
f"All selected, no need to add a points dict entry for {component.name}"
|
182 |
+
)
|
183 |
+
else:
|
184 |
+
st.write(f"Adding dictionary for {component.name}")
|
185 |
+
points_dict[component.name] = selected_points
|
186 |
+
|
187 |
# selected_points = st.multiselect("points to visualize", options=point_names, default=point_names)
|
188 |
if chosen_component_names:
|
189 |
+
|
190 |
if not points_dict:
|
191 |
+
points_dict = None
|
192 |
+
# else:
|
193 |
# st.write(points_dict)
|
194 |
# st.write(chosen_component_names)
|
195 |
|
196 |
+
pose = pose.get_components(chosen_component_names, points=points_dict)
|
197 |
# st.write(pose.header)
|
198 |
|
199 |
elif component_selection == "signclip":
|
200 |
st.write("Selected landmarks used for SignCLIP. (Face countours only)")
|
201 |
+
pose = pose.get_components(
|
202 |
+
[
|
203 |
+
"POSE_LANDMARKS",
|
204 |
+
"FACE_LANDMARKS",
|
205 |
+
"LEFT_HAND_LANDMARKS",
|
206 |
+
"RIGHT_HAND_LANDMARKS",
|
207 |
+
],
|
208 |
+
{"FACE_LANDMARKS": FACEMESH_CONTOURS_POINTS},
|
209 |
+
)
|
210 |
|
211 |
# pose = pose.normalize(pose_normalization_info(pose.header)) Visualization goes blank
|
212 |
pose = pose_hide_legs(pose)
|
|
|
219 |
# st.write(pose.header)
|
220 |
else:
|
221 |
pass
|
|
|
222 |
|
|
|
223 |
st.write(f"### Visualization")
|
224 |
+
width = st.select_slider(
|
225 |
+
"select width of images",
|
226 |
+
list(range(1, pose.header.dimensions.width + 1)),
|
227 |
+
value=pose.header.dimensions.width / 2,
|
228 |
+
)
|
229 |
+
step = st.select_slider(
|
230 |
+
"Step value to select every nth image", list(range(1, len(frames))), value=1
|
231 |
+
)
|
232 |
+
fps = st.slider(
|
233 |
+
"fps for visualization: ",
|
234 |
+
min_value=1.0,
|
235 |
+
max_value=pose.body.fps,
|
236 |
+
value=pose.body.fps,
|
237 |
+
)
|
238 |
visualize_clicked = st.button(f"Visualize!")
|
|
|
|
|
239 |
|
240 |
if visualize_clicked:
|
241 |
+
|
242 |
st.write(f"Generating gif...")
|
243 |
|
244 |
# st.write(pose.body.data.shape)
|
245 |
+
|
246 |
st.image(get_pose_gif(pose=pose, step=step, fps=fps))
|
247 |
|
248 |
with st.expander("See header"):
|
|
|
250 |
st.write(pose.header)
|
251 |
|
252 |
# st.write(pose.body.data.shape)
|
|
|
253 |
|
|
|
254 |
# st.write(visualize_pose(pose=pose)) # bunch of ndarrays
|
255 |
# st.write([Image.fromarray(v.cv2.cvtColor(frame, cv_code)) for frame in frames])
|
256 |
|
257 |
# for i, image in enumerate(images[::n]):
|
258 |
# print(f"i={i}")
|
259 |
# st.image(image=image, width=width)
|
|