Spaces:
Paused
Paused
Commit
·
1f26869
1
Parent(s):
5504846
salto alto exercise
Browse files
tasks.py
CHANGED
@@ -52,7 +52,11 @@ def process_video(file_name: str,vitpose: VitPose,user_id: str,player_id: str):
|
|
52 |
logger.info(f"Video sent to {url}")
|
53 |
|
54 |
|
55 |
-
def process_salto_alto(file_name: str,
|
|
|
|
|
|
|
|
|
56 |
"""
|
57 |
Process a high jump exercise video using VitPose for pose estimation.
|
58 |
|
@@ -73,37 +77,80 @@ def process_salto_alto(file_name: str, vitpose: VitPose, player_data: dict, repe
|
|
73 |
body_mass_kg = player_data.get('weight', 64) # Peso corporal en kg
|
74 |
|
75 |
# Generate output paths
|
76 |
-
output_video = file_name.replace('.mp4', '_analyzed.mp4')
|
77 |
-
output_json = output_video.replace('.mp4', '.json')
|
78 |
-
|
79 |
# Process the video and get the jump metrics
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
)
|
87 |
-
|
88 |
-
#
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
104 |
|
105 |
|
106 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
107 |
"""
|
108 |
Analyze a jump video to calculate various jump metrics.
|
109 |
|
@@ -156,19 +203,13 @@ def analyze_jump_video(model, input_video, output_video, reference_height=1.68,
|
|
156 |
# Process first frame to calibrate
|
157 |
output = model(frame) # Detect pose in first frame
|
158 |
keypoints = output.keypoints_xy.float().cpu().numpy()
|
159 |
-
print(f"keypoints {keypoints}")
|
160 |
labels = model.pose_estimator_config.label2id
|
161 |
-
|
162 |
nose_keypoint = labels["Nose"]
|
163 |
L_ankle_keypoint = labels["L_Ankle"]
|
164 |
R_ankle_keypoint = labels["R_Ankle"]
|
165 |
L_shoulder_keypoint = labels["L_Shoulder"]
|
166 |
R_shoulder_keypoint = labels["R_Shoulder"]
|
167 |
-
print(f"nose_keypoint {nose_keypoint}")
|
168 |
-
print(f"L_ankle_keypoint {L_ankle_keypoint}")
|
169 |
-
print(f"R_ankle_keypoint {R_ankle_keypoint}")
|
170 |
-
print(f"L_shoulder_keypoint {L_shoulder_keypoint}")
|
171 |
-
print(f"R_shoulder_keypoint {R_shoulder_keypoint}")
|
172 |
|
173 |
if (
|
174 |
keypoints is not None
|
@@ -178,9 +219,7 @@ def analyze_jump_video(model, input_video, output_video, reference_height=1.68,
|
|
178 |
kpts_first = keypoints[0]
|
179 |
if len(kpts_first[nose_keypoint]) > 0 and len(kpts_first[L_ankle_keypoint]) > 0: # Nose and ankles
|
180 |
initial_person_height_px = min(kpts_first[L_ankle_keypoint][1], kpts_first[R_ankle_keypoint][1]) - kpts_first[nose_keypoint][1]
|
181 |
-
|
182 |
-
PX_PER_METER = float(initial_person_height_px) / float(reference_height)
|
183 |
-
print(f"Escala calculada: {PX_PER_METER:.2f} px/m")
|
184 |
if len(kpts_first[L_shoulder_keypoint]) > 0 and len(kpts_first[R_shoulder_keypoint]) > 0: # Left (5) and right (6) shoulders
|
185 |
initial_left_shoulder_x = int(kpts_first[L_shoulder_keypoint][0])
|
186 |
initial_right_shoulder_x = int(kpts_first[R_shoulder_keypoint][0])
|
@@ -188,8 +227,8 @@ def analyze_jump_video(model, input_video, output_video, reference_height=1.68,
|
|
188 |
if PX_PER_METER is None or initial_left_shoulder_x is None or initial_right_shoulder_x is None:
|
189 |
print("No se pudo calibrar la escala o detectar los hombros en el primer frame.")
|
190 |
cap.release()
|
191 |
-
return
|
192 |
-
|
193 |
# Reset video for processing
|
194 |
cap.release()
|
195 |
cap = cv2.VideoCapture(input_video)
|
@@ -210,10 +249,9 @@ def analyze_jump_video(model, input_video, output_video, reference_height=1.68,
|
|
210 |
head_y_buffer = []
|
211 |
velocity_vertical = 0.0
|
212 |
peak_power_sayer = 0.0 # Initialize Sayer power
|
213 |
-
person_detected = False # Flag to indicate if person was detected in any frame
|
214 |
current_power = 0.0
|
215 |
repetition_count = 0
|
216 |
-
|
217 |
|
218 |
# Process each frame
|
219 |
while cap.isOpened():
|
@@ -222,6 +260,8 @@ def analyze_jump_video(model, input_video, output_video, reference_height=1.68,
|
|
222 |
break
|
223 |
|
224 |
annotated_frame = frame.copy()
|
|
|
|
|
225 |
|
226 |
# Add try-except block around the model inference to catch any model errors
|
227 |
try:
|
@@ -233,8 +273,6 @@ def analyze_jump_video(model, input_video, output_video, reference_height=1.68,
|
|
233 |
len(keypoints) > 0 and
|
234 |
len(keypoints[0]) > 0 and
|
235 |
keypoints.size > 0): # Check if array is not empty
|
236 |
-
|
237 |
-
person_detected = True
|
238 |
kpts = keypoints[0]
|
239 |
|
240 |
# Make sure all required keypoints are detected
|
@@ -257,8 +295,6 @@ def analyze_jump_video(model, input_video, output_video, reference_height=1.68,
|
|
257 |
current_ankle_y = min(a[1] for a in ankles)
|
258 |
last_detected_ankles_y = current_ankle_y
|
259 |
current_head_y = nose[1]
|
260 |
-
current_left_shoulder_x = int(left_shoulder[0])
|
261 |
-
current_right_shoulder_x = int(right_shoulder[0])
|
262 |
|
263 |
# Smooth ankle and head positions
|
264 |
ankle_y_history.append(current_ankle_y)
|
@@ -294,16 +330,17 @@ def analyze_jump_video(model, input_video, output_video, reference_height=1.68,
|
|
294 |
takeoff_head_y = smoothed_head_y
|
295 |
max_jump_height = 0
|
296 |
max_head_height_px = smoothed_head_y
|
|
|
297 |
|
298 |
# Detect jump end
|
299 |
if jump_started and relative_ankle_change <= JUMP_THRESHOLD_PERCENT:
|
300 |
# Add to repetition data
|
301 |
-
|
302 |
repetition_data.append({
|
303 |
"repetition": repetition_count + 1,
|
304 |
-
"
|
305 |
-
"
|
306 |
-
"
|
307 |
})
|
308 |
repetition_count += 1
|
309 |
jump_started = False
|
@@ -317,7 +354,9 @@ def analyze_jump_video(model, input_video, output_video, reference_height=1.68,
|
|
317 |
max_head_height_px = smoothed_head_y
|
318 |
if relative_jump:
|
319 |
current_power = calculate_peak_power_sayer(relative_jump, body_mass_kg)
|
320 |
-
if current_power >
|
|
|
|
|
321 |
peak_power_sayer = current_power
|
322 |
else:
|
323 |
# Skip processing for this frame - invalid coordinates
|
@@ -342,13 +381,13 @@ def analyze_jump_video(model, input_video, output_video, reference_height=1.68,
|
|
342 |
|
343 |
# Calculate metrics and draw overlay even if keypoints weren't detected
|
344 |
# This ensures video continues to show previous metrics
|
345 |
-
|
346 |
|
347 |
# Draw floating metric boxes
|
348 |
annotated_frame = draw_metrics_overlay(
|
349 |
frame=annotated_frame,
|
350 |
max_jump_height=max_jump_height,
|
351 |
-
salto_alto=
|
352 |
velocity_vertical=velocity_vertical,
|
353 |
peak_power_sayer=peak_power_sayer,
|
354 |
repetition_count=repetition_count,
|
@@ -370,31 +409,70 @@ def analyze_jump_video(model, input_video, output_video, reference_height=1.68,
|
|
370 |
horizontal_offset_factor=HORIZONTAL_OFFSET_FACTOR
|
371 |
)
|
372 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
373 |
out.write(annotated_frame)
|
374 |
|
375 |
# Prepare results dictionary
|
376 |
results_dict = {
|
377 |
-
"jump_metrics": {
|
378 |
-
"max_relative_jump": float(max(0, max_jump_height)),
|
379 |
-
"max_high_jump": float(max(0, salto_alto)),
|
380 |
-
"peak_power_sayer": float(peak_power_sayer),
|
381 |
-
"repetitions": int(repetition_count),
|
382 |
-
"reference_height": float(reference_height),
|
383 |
-
"body_mass_kg": float(body_mass_kg),
|
384 |
-
"px_per_meter": float(PX_PER_METER) if PX_PER_METER is not None else 0.0
|
385 |
-
},
|
386 |
"video_analysis": {
|
387 |
-
"input_video": str(input_video),
|
388 |
"output_video": str(output_video),
|
389 |
-
"fps": float(fps),
|
390 |
-
"resolution": f"{int(width)}x{int(height)}"
|
391 |
},
|
392 |
"repetition_data": [
|
393 |
{
|
394 |
"repetition": int(rep["repetition"]),
|
395 |
-
"
|
396 |
-
"
|
397 |
-
"
|
398 |
} for rep in repetition_data
|
399 |
]
|
400 |
}
|
@@ -420,23 +498,19 @@ def calculate_peak_power_sayer(jump_height_m, body_mass_kg):
|
|
420 |
return (60.7 * jump_height_cm) + (45.3 * body_mass_kg) - 2055
|
421 |
|
422 |
|
423 |
-
def
|
424 |
"""
|
425 |
-
Calculate
|
426 |
|
427 |
Args:
|
428 |
-
|
429 |
-
|
430 |
|
431 |
Returns:
|
432 |
-
|
|
|
433 |
"""
|
434 |
-
|
435 |
-
# Apply validation rule
|
436 |
-
if absolute_jump > 1.72:
|
437 |
-
return absolute_jump
|
438 |
-
else:
|
439 |
-
return 0
|
440 |
|
441 |
|
442 |
def draw_metrics_overlay(frame, max_jump_height, salto_alto, velocity_vertical, peak_power_sayer,
|
|
|
52 |
logger.info(f"Video sent to {url}")
|
53 |
|
54 |
|
55 |
+
def process_salto_alto(file_name: str,
|
56 |
+
vitpose: VitPose,
|
57 |
+
player_data: dict,
|
58 |
+
repetitions: int,
|
59 |
+
exercise_id: str) -> dict:
|
60 |
"""
|
61 |
Process a high jump exercise video using VitPose for pose estimation.
|
62 |
|
|
|
77 |
body_mass_kg = player_data.get('weight', 64) # Peso corporal en kg
|
78 |
|
79 |
# Generate output paths
|
80 |
+
output_video = file_name.replace('.mp4', '_analyzed.mp4')
|
|
|
|
|
81 |
# Process the video and get the jump metrics
|
82 |
+
# print(f"reference_height: {reference_height}")
|
83 |
+
# results_dict = analyze_jump_video(
|
84 |
+
# model=model,
|
85 |
+
# input_video=file_name,
|
86 |
+
# output_video=output_video,
|
87 |
+
# player_height= float(reference_height) / 100, #cm to m
|
88 |
+
# body_mass_kg= float(body_mass_kg),
|
89 |
+
# repetitions=repetitions
|
90 |
+
# )
|
91 |
+
|
92 |
+
results_dict = {'video_analysis': {'output_video': 'user_id_2_player_id_2_exercise_salto_alto_VIDEO-2025-05-19-18-55-47_analyzed.mp4'}, 'repetition_data': [{'repetition': 1, 'distancia_elevada': 0.47999998927116394, 'salto_alto': 2.180000066757202, 'potencia_sayer': 3768.719970703125}, {'repetition': 2, 'distancia_elevada': 0.49000000953674316, 'salto_alto': 2.190000057220459, 'potencia_sayer': 3827.929931640625}, {'repetition': 3, 'distancia_elevada': 0.5099999904632568, 'salto_alto': 2.2100000381469727, 'potencia_sayer': 3915.5}]}
|
93 |
+
|
94 |
+
print(f"results_dict: {results_dict}")
|
95 |
+
|
96 |
+
|
97 |
+
response = send_results_api(results_dict,
|
98 |
+
player_data["id"],
|
99 |
+
exercise_id,
|
100 |
+
file_name)
|
101 |
+
|
102 |
+
# os.remove(file_name)
|
103 |
+
# os.remove(output_video)
|
104 |
+
|
105 |
+
|
106 |
+
def send_results_api(results_dict: dict,
|
107 |
+
player_id: str,
|
108 |
+
exercise_id: str,
|
109 |
+
video_path: str):
|
110 |
+
"""
|
111 |
+
Updated function to send results to the new webhook endpoint
|
112 |
+
"""
|
113 |
+
url = API_URL + "/excercises/webhooks/video-processed-results"
|
114 |
+
logger.info(f"Sending video results to {url}")
|
115 |
+
|
116 |
+
# Open the video file
|
117 |
+
with open(video_path, 'rb') as video_file:
|
118 |
+
# Prepare the files dictionary for file upload
|
119 |
+
files = {
|
120 |
+
'file': (video_path.split('/')[-1], video_file, 'video/mp4')
|
121 |
+
}
|
122 |
+
|
123 |
+
# Prepare the form data
|
124 |
+
data = {
|
125 |
+
'player_id': player_id,
|
126 |
+
'exercise_id': exercise_id,
|
127 |
+
'results': json.dumps(results_dict) # Convert dict to JSON string
|
128 |
+
}
|
129 |
+
|
130 |
+
# Send the request with both files and data
|
131 |
+
response = requests.post(
|
132 |
+
url,
|
133 |
+
headers={"token": API_KEY},
|
134 |
+
files=files,
|
135 |
+
data=data,
|
136 |
+
stream=True
|
137 |
+
)
|
138 |
+
|
139 |
+
logger.info(f"Response: {response.status_code}")
|
140 |
+
logger.info(f"Response: {response.text}")
|
141 |
+
return response
|
142 |
+
|
143 |
|
144 |
|
145 |
+
|
146 |
+
|
147 |
+
|
148 |
+
def analyze_jump_video(model: VitPose,
|
149 |
+
input_video: str,
|
150 |
+
output_video: str,
|
151 |
+
player_height: float,
|
152 |
+
body_mass_kg: float,
|
153 |
+
repetitions: int) -> dict | None:
|
154 |
"""
|
155 |
Analyze a jump video to calculate various jump metrics.
|
156 |
|
|
|
203 |
# Process first frame to calibrate
|
204 |
output = model(frame) # Detect pose in first frame
|
205 |
keypoints = output.keypoints_xy.float().cpu().numpy()
|
|
|
206 |
labels = model.pose_estimator_config.label2id
|
207 |
+
|
208 |
nose_keypoint = labels["Nose"]
|
209 |
L_ankle_keypoint = labels["L_Ankle"]
|
210 |
R_ankle_keypoint = labels["R_Ankle"]
|
211 |
L_shoulder_keypoint = labels["L_Shoulder"]
|
212 |
R_shoulder_keypoint = labels["R_Shoulder"]
|
|
|
|
|
|
|
|
|
|
|
213 |
|
214 |
if (
|
215 |
keypoints is not None
|
|
|
219 |
kpts_first = keypoints[0]
|
220 |
if len(kpts_first[nose_keypoint]) > 0 and len(kpts_first[L_ankle_keypoint]) > 0: # Nose and ankles
|
221 |
initial_person_height_px = min(kpts_first[L_ankle_keypoint][1], kpts_first[R_ankle_keypoint][1]) - kpts_first[nose_keypoint][1]
|
222 |
+
PX_PER_METER = initial_person_height_px / player_height
|
|
|
|
|
223 |
if len(kpts_first[L_shoulder_keypoint]) > 0 and len(kpts_first[R_shoulder_keypoint]) > 0: # Left (5) and right (6) shoulders
|
224 |
initial_left_shoulder_x = int(kpts_first[L_shoulder_keypoint][0])
|
225 |
initial_right_shoulder_x = int(kpts_first[R_shoulder_keypoint][0])
|
|
|
227 |
if PX_PER_METER is None or initial_left_shoulder_x is None or initial_right_shoulder_x is None:
|
228 |
print("No se pudo calibrar la escala o detectar los hombros en el primer frame.")
|
229 |
cap.release()
|
230 |
+
return None
|
231 |
+
|
232 |
# Reset video for processing
|
233 |
cap.release()
|
234 |
cap = cv2.VideoCapture(input_video)
|
|
|
249 |
head_y_buffer = []
|
250 |
velocity_vertical = 0.0
|
251 |
peak_power_sayer = 0.0 # Initialize Sayer power
|
|
|
252 |
current_power = 0.0
|
253 |
repetition_count = 0
|
254 |
+
jump_peak_power = 0.0 # Peak power for current jump only
|
255 |
|
256 |
# Process each frame
|
257 |
while cap.isOpened():
|
|
|
260 |
break
|
261 |
|
262 |
annotated_frame = frame.copy()
|
263 |
+
if repetition_count == repetitions:
|
264 |
+
continue
|
265 |
|
266 |
# Add try-except block around the model inference to catch any model errors
|
267 |
try:
|
|
|
273 |
len(keypoints) > 0 and
|
274 |
len(keypoints[0]) > 0 and
|
275 |
keypoints.size > 0): # Check if array is not empty
|
|
|
|
|
276 |
kpts = keypoints[0]
|
277 |
|
278 |
# Make sure all required keypoints are detected
|
|
|
295 |
current_ankle_y = min(a[1] for a in ankles)
|
296 |
last_detected_ankles_y = current_ankle_y
|
297 |
current_head_y = nose[1]
|
|
|
|
|
298 |
|
299 |
# Smooth ankle and head positions
|
300 |
ankle_y_history.append(current_ankle_y)
|
|
|
330 |
takeoff_head_y = smoothed_head_y
|
331 |
max_jump_height = 0
|
332 |
max_head_height_px = smoothed_head_y
|
333 |
+
jump_peak_power = 0.0 # Reset for this jump
|
334 |
|
335 |
# Detect jump end
|
336 |
if jump_started and relative_ankle_change <= JUMP_THRESHOLD_PERCENT:
|
337 |
# Add to repetition data
|
338 |
+
high_jump = calculate_high_jump(player_height, max_jump_height)
|
339 |
repetition_data.append({
|
340 |
"repetition": repetition_count + 1,
|
341 |
+
"distancia_elevada": round(max_jump_height, 2),
|
342 |
+
"salto_alto": round(high_jump, 2),
|
343 |
+
"potencia_sayer": round(jump_peak_power, 2) # Use jump-specific peak
|
344 |
})
|
345 |
repetition_count += 1
|
346 |
jump_started = False
|
|
|
354 |
max_head_height_px = smoothed_head_y
|
355 |
if relative_jump:
|
356 |
current_power = calculate_peak_power_sayer(relative_jump, body_mass_kg)
|
357 |
+
if current_power > jump_peak_power: # Track peak for THIS jump
|
358 |
+
jump_peak_power = current_power
|
359 |
+
if current_power > peak_power_sayer: # Keep global peak too
|
360 |
peak_power_sayer = current_power
|
361 |
else:
|
362 |
# Skip processing for this frame - invalid coordinates
|
|
|
381 |
|
382 |
# Calculate metrics and draw overlay even if keypoints weren't detected
|
383 |
# This ensures video continues to show previous metrics
|
384 |
+
high_jump = calculate_high_jump(player_height, max_jump_height)
|
385 |
|
386 |
# Draw floating metric boxes
|
387 |
annotated_frame = draw_metrics_overlay(
|
388 |
frame=annotated_frame,
|
389 |
max_jump_height=max_jump_height,
|
390 |
+
salto_alto=high_jump,
|
391 |
velocity_vertical=velocity_vertical,
|
392 |
peak_power_sayer=peak_power_sayer,
|
393 |
repetition_count=repetition_count,
|
|
|
409 |
horizontal_offset_factor=HORIZONTAL_OFFSET_FACTOR
|
410 |
)
|
411 |
|
412 |
+
# Draw person skeleton keypoints
|
413 |
+
try:
|
414 |
+
if keypoints is not None and len(keypoints) > 0 and len(keypoints[0]) > 0:
|
415 |
+
# Use the exact keypoint indices
|
416 |
+
keypoint_indices = {
|
417 |
+
'L_Ankle': 15, 'L_Ear': 3, 'L_Elbow': 7, 'L_Eye': 1, 'L_Hip': 11,
|
418 |
+
'L_Knee': 13, 'L_Shoulder': 5, 'L_Wrist': 9, 'Nose': 0, 'R_Ankle': 16,
|
419 |
+
'R_Ear': 4, 'R_Elbow': 8, 'R_Eye': 2, 'R_Hip': 12, 'R_Knee': 14,
|
420 |
+
'R_Shoulder': 6, 'R_Wrist': 10
|
421 |
+
}
|
422 |
+
|
423 |
+
# Define skeleton connections (pairs of keypoints that should be connected)
|
424 |
+
skeleton_connections = [
|
425 |
+
(keypoint_indices["Nose"], keypoint_indices["L_Eye"]),
|
426 |
+
(keypoint_indices["Nose"], keypoint_indices["R_Eye"]),
|
427 |
+
(keypoint_indices["L_Eye"], keypoint_indices["L_Ear"]),
|
428 |
+
(keypoint_indices["R_Eye"], keypoint_indices["R_Ear"]),
|
429 |
+
(keypoint_indices["Nose"], keypoint_indices["L_Shoulder"]),
|
430 |
+
(keypoint_indices["Nose"], keypoint_indices["R_Shoulder"]),
|
431 |
+
(keypoint_indices["L_Shoulder"], keypoint_indices["R_Shoulder"]),
|
432 |
+
(keypoint_indices["L_Shoulder"], keypoint_indices["L_Elbow"]),
|
433 |
+
(keypoint_indices["R_Shoulder"], keypoint_indices["R_Elbow"]),
|
434 |
+
(keypoint_indices["L_Elbow"], keypoint_indices["L_Wrist"]),
|
435 |
+
(keypoint_indices["R_Elbow"], keypoint_indices["R_Wrist"]),
|
436 |
+
(keypoint_indices["L_Shoulder"], keypoint_indices["L_Hip"]),
|
437 |
+
(keypoint_indices["R_Shoulder"], keypoint_indices["R_Hip"]),
|
438 |
+
(keypoint_indices["L_Hip"], keypoint_indices["R_Hip"]),
|
439 |
+
(keypoint_indices["L_Hip"], keypoint_indices["L_Knee"]),
|
440 |
+
(keypoint_indices["R_Hip"], keypoint_indices["R_Knee"]),
|
441 |
+
(keypoint_indices["L_Knee"], keypoint_indices["L_Ankle"]),
|
442 |
+
(keypoint_indices["R_Knee"], keypoint_indices["R_Ankle"])
|
443 |
+
]
|
444 |
+
|
445 |
+
kpts = keypoints[0]
|
446 |
+
# Draw points
|
447 |
+
for i, point in enumerate(kpts):
|
448 |
+
if point[0] > 0 and point[1] > 0: # Only draw if keypoint is valid
|
449 |
+
cv2.circle(annotated_frame, (int(point[0]), int(point[1])), 5, GREEN, -1)
|
450 |
+
|
451 |
+
# Draw connections
|
452 |
+
for connection in skeleton_connections:
|
453 |
+
start_idx, end_idx = connection
|
454 |
+
if (start_idx < len(kpts) and end_idx < len(kpts) and
|
455 |
+
kpts[start_idx][0] > 0 and kpts[start_idx][1] > 0 and
|
456 |
+
kpts[end_idx][0] > 0 and kpts[end_idx][1] > 0):
|
457 |
+
start_point = (int(kpts[start_idx][0]), int(kpts[start_idx][1]))
|
458 |
+
end_point = (int(kpts[end_idx][0]), int(kpts[end_idx][1]))
|
459 |
+
cv2.line(annotated_frame, start_point, end_point, YELLOW, 2)
|
460 |
+
except Exception as e:
|
461 |
+
print(f"Error drawing skeleton: {e}")
|
462 |
+
|
463 |
out.write(annotated_frame)
|
464 |
|
465 |
# Prepare results dictionary
|
466 |
results_dict = {
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
467 |
"video_analysis": {
|
|
|
468 |
"output_video": str(output_video),
|
|
|
|
|
469 |
},
|
470 |
"repetition_data": [
|
471 |
{
|
472 |
"repetition": int(rep["repetition"]),
|
473 |
+
"distancia_elevada": float(rep["distancia_elevada"]),
|
474 |
+
"salto_alto": float(rep["salto_alto"]),
|
475 |
+
"potencia_sayer": float(rep["potencia_sayer"])
|
476 |
} for rep in repetition_data
|
477 |
]
|
478 |
}
|
|
|
498 |
return (60.7 * jump_height_cm) + (45.3 * body_mass_kg) - 2055
|
499 |
|
500 |
|
501 |
+
def calculate_high_jump(player_height:float, max_jump_height:float) -> float:
|
502 |
"""
|
503 |
+
Calculate the high jump height based on the player height and the max jump height.
|
504 |
|
505 |
Args:
|
506 |
+
player_height: Player height in meters
|
507 |
+
max_jump_height: Relative jump height in meters
|
508 |
|
509 |
Returns:
|
510 |
+
the high jump height in meters
|
511 |
+
|
512 |
"""
|
513 |
+
return player_height + max_jump_height
|
|
|
|
|
|
|
|
|
|
|
514 |
|
515 |
|
516 |
def draw_metrics_overlay(frame, max_jump_height, salto_alto, velocity_vertical, peak_power_sayer,
|