Spaces:
Sleeping
Sleeping
Sean Carnahan
commited on
Commit
·
3c6f221
1
Parent(s):
16f3c4e
Remove YOLOv7, ensure LFS for models, update Dockerfile
Browse files- Dockerfile +3 -3
- app.py +100 -100
- bodybuilding_pose_analyzer/README.md +63 -0
- bodybuilding_pose_analyzer/bodybuilding_pose_classifier.h5 +3 -0
- bodybuilding_pose_analyzer/requirements.txt +8 -0
- bodybuilding_pose_analyzer/src/__pycache__/movenet_analyzer.cpython-310.pyc +0 -0
- bodybuilding_pose_analyzer/src/__pycache__/pose_analyzer.cpython-310.pyc +0 -0
- bodybuilding_pose_analyzer/src/demo.py +80 -0
- bodybuilding_pose_analyzer/src/movenet_analyzer.py +321 -0
- bodybuilding_pose_analyzer/src/movenet_demo.py +66 -0
- bodybuilding_pose_analyzer/src/pose_analyzer.py +200 -0
- bodybuilding_pose_analyzer/src/sample_video.mp4 +3 -0
- bodybuilding_pose_classifier_savedmodel.keras +3 -0
- static/uploads/output.mp4 +3 -0
- static/uploads/output_mediapipe.mp4 +3 -0
- static/uploads/output_movenet_lightning.mp4 +3 -0
- static/uploads/output_movenet_thunder.mp4 +3 -0
- static/uploads/policeb.mp4 +3 -0
- templates/index.html +176 -0
Dockerfile
CHANGED
@@ -15,11 +15,11 @@ RUN pip install --no-cache-dir -r requirements.txt
|
|
15 |
# Copy all necessary application files and folders from HFup/ to /app in the container
|
16 |
# These paths are relative to the Dockerfile's location (i.e., inside HFup/)
|
17 |
COPY app.py .
|
18 |
-
RUN ls -la # Debug: List files in the build context root
|
19 |
COPY bodybuilding_pose_analyzer bodybuilding_pose_analyzer
|
20 |
COPY external external
|
21 |
-
COPY yolov7 yolov7
|
22 |
-
COPY yolov7-w6-pose.pt .
|
23 |
COPY static static
|
24 |
|
25 |
# Ensure the uploads directory within static exists and is writable
|
|
|
15 |
# Copy all necessary application files and folders from HFup/ to /app in the container
|
16 |
# These paths are relative to the Dockerfile's location (i.e., inside HFup/)
|
17 |
COPY app.py .
|
18 |
+
RUN echo "Listing files:" && ls -la # Debug: List files in the build context root
|
19 |
COPY bodybuilding_pose_analyzer bodybuilding_pose_analyzer
|
20 |
COPY external external
|
21 |
+
# COPY yolov7 yolov7
|
22 |
+
# COPY yolov7-w6-pose.pt .
|
23 |
COPY static static
|
24 |
|
25 |
# Ensure the uploads directory within static exists and is writable
|
app.py
CHANGED
@@ -17,12 +17,12 @@ from bodybuilding_pose_analyzer.src.movenet_analyzer import MoveNetAnalyzer
|
|
17 |
from bodybuilding_pose_analyzer.src.pose_analyzer import PoseAnalyzer
|
18 |
|
19 |
# Add YOLOv7 to path
|
20 |
-
sys.path.append('yolov7')
|
21 |
|
22 |
-
from yolov7.models.experimental import attempt_load
|
23 |
-
from yolov7.utils.general import check_img_size, non_max_suppression_kpt, scale_coords
|
24 |
-
from yolov7.utils.torch_utils import select_device
|
25 |
-
from yolov7.utils.plots import plot_skeleton_kpts
|
26 |
|
27 |
def wrap_text(text: str, font_face: int, font_scale: float, thickness: int, max_width: int) -> list[str]:
|
28 |
"""Wrap text to fit within max_width."""
|
@@ -68,23 +68,23 @@ app.config['MAX_CONTENT_LENGTH'] = 16 * 1024 * 1024 # 16MB max file size
|
|
68 |
os.makedirs(app.config['UPLOAD_FOLDER'], exist_ok=True)
|
69 |
|
70 |
# Initialize YOLOv7 model
|
71 |
-
device = select_device('')
|
72 |
-
yolo_model = None # Initialize as None
|
73 |
-
stride = None
|
74 |
-
imgsz = None
|
75 |
-
|
76 |
-
try:
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
except Exception as e:
|
82 |
-
|
83 |
-
|
84 |
# Not raising here to allow app to run if only MoveNet is used. Error will be caught if YOLOv7 is selected.
|
85 |
|
86 |
# YOLOv7 pose model expects 17 keypoints
|
87 |
-
kpt_shape = (17, 3)
|
88 |
|
89 |
# Load CNN model for bodybuilding pose classification
|
90 |
cnn_model_path = 'external/BodybuildingPoseClassifier/bodybuilding_pose_classifier.h5'
|
@@ -115,87 +115,87 @@ def after_request(response):
|
|
115 |
response.headers.add('Access-Control-Allow-Methods', 'GET,PUT,POST,DELETE,OPTIONS')
|
116 |
return response
|
117 |
|
118 |
-
def process_video_yolov7(video_path): # Renamed from process_video
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
|
126 |
-
|
127 |
-
|
128 |
-
|
129 |
-
|
130 |
-
|
131 |
-
|
132 |
-
|
133 |
-
|
134 |
-
|
135 |
-
|
136 |
-
|
137 |
-
|
138 |
-
|
139 |
-
|
140 |
-
|
141 |
-
|
142 |
-
|
143 |
-
|
144 |
-
|
145 |
-
|
146 |
-
|
147 |
-
|
148 |
-
|
149 |
-
|
150 |
-
|
151 |
-
|
152 |
-
|
153 |
-
|
154 |
-
|
155 |
-
|
156 |
-
|
157 |
-
|
158 |
-
|
159 |
-
|
160 |
-
|
161 |
-
|
162 |
-
|
163 |
-
|
164 |
-
|
165 |
-
|
166 |
-
|
167 |
-
|
168 |
-
|
169 |
-
|
170 |
-
|
171 |
-
|
172 |
-
|
173 |
-
|
174 |
-
|
175 |
-
|
176 |
-
|
177 |
-
|
178 |
-
|
179 |
-
|
180 |
-
|
181 |
-
|
182 |
-
|
183 |
-
|
184 |
-
|
185 |
-
|
186 |
-
|
187 |
-
|
188 |
-
|
189 |
-
|
190 |
-
|
191 |
-
|
192 |
-
|
193 |
-
|
194 |
-
|
195 |
-
|
196 |
-
|
197 |
-
|
198 |
-
|
199 |
|
200 |
def process_video_movenet(video_path, model_variant='lightning', pose_type='front_double_biceps'):
|
201 |
try:
|
|
|
17 |
from bodybuilding_pose_analyzer.src.pose_analyzer import PoseAnalyzer
|
18 |
|
19 |
# Add YOLOv7 to path
|
20 |
+
# sys.path.append('yolov7')
|
21 |
|
22 |
+
# from yolov7.models.experimental import attempt_load
|
23 |
+
# from yolov7.utils.general import check_img_size, non_max_suppression_kpt, scale_coords
|
24 |
+
# from yolov7.utils.torch_utils import select_device
|
25 |
+
# from yolov7.utils.plots import plot_skeleton_kpts
|
26 |
|
27 |
def wrap_text(text: str, font_face: int, font_scale: float, thickness: int, max_width: int) -> list[str]:
|
28 |
"""Wrap text to fit within max_width."""
|
|
|
68 |
os.makedirs(app.config['UPLOAD_FOLDER'], exist_ok=True)
|
69 |
|
70 |
# Initialize YOLOv7 model
|
71 |
+
# device = select_device('')
|
72 |
+
# yolo_model = None # Initialize as None
|
73 |
+
# stride = None
|
74 |
+
# imgsz = None
|
75 |
+
|
76 |
+
# try:
|
77 |
+
# yolo_model = attempt_load('yolov7-w6-pose.pt', map_location=device)
|
78 |
+
# stride = int(yolo_model.stride.max())
|
79 |
+
# imgsz = check_img_size(640, s=stride)
|
80 |
+
# print("YOLOv7 Model loaded successfully")
|
81 |
+
# except Exception as e:
|
82 |
+
# print(f"Error loading YOLOv7 model: {e}")
|
83 |
+
# traceback.print_exc()
|
84 |
# Not raising here to allow app to run if only MoveNet is used. Error will be caught if YOLOv7 is selected.
|
85 |
|
86 |
# YOLOv7 pose model expects 17 keypoints
|
87 |
+
# kpt_shape = (17, 3)
|
88 |
|
89 |
# Load CNN model for bodybuilding pose classification
|
90 |
cnn_model_path = 'external/BodybuildingPoseClassifier/bodybuilding_pose_classifier.h5'
|
|
|
115 |
response.headers.add('Access-Control-Allow-Methods', 'GET,PUT,POST,DELETE,OPTIONS')
|
116 |
return response
|
117 |
|
118 |
+
# def process_video_yolov7(video_path): # Renamed from process_video
|
119 |
+
# global yolo_model, imgsz, stride # Ensure global model is used
|
120 |
+
# if yolo_model is None:
|
121 |
+
# raise RuntimeError("YOLOv7 model failed to load. Cannot process video.")
|
122 |
+
# try:
|
123 |
+
# if not os.path.exists(video_path):
|
124 |
+
# raise FileNotFoundError(f"Video file not found: {video_path}")
|
125 |
+
#
|
126 |
+
# cap = cv2.VideoCapture(video_path)
|
127 |
+
# if not cap.isOpened():
|
128 |
+
# raise ValueError(f"Failed to open video file: {video_path}")
|
129 |
+
#
|
130 |
+
# fps = int(cap.get(cv2.CAP_PROP_FPS))
|
131 |
+
# width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
|
132 |
+
# height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
133 |
+
#
|
134 |
+
# print(f"Processing video: {width}x{height} @ {fps}fps")
|
135 |
+
#
|
136 |
+
# # Create output video writer
|
137 |
+
# output_path = os.path.join(app.config['UPLOAD_FOLDER'], 'output.mp4')
|
138 |
+
# fourcc = cv2.VideoWriter_fourcc(*'avc1')
|
139 |
+
# out = cv2.VideoWriter(output_path, fourcc, fps, (width, height))
|
140 |
+
#
|
141 |
+
# frame_count = 0
|
142 |
+
# while cap.isOpened():
|
143 |
+
# ret, frame = cap.read()
|
144 |
+
# if not ret:
|
145 |
+
# break
|
146 |
+
#
|
147 |
+
# frame_count += 1
|
148 |
+
# print(f"Processing frame {frame_count}")
|
149 |
+
#
|
150 |
+
# # Prepare image
|
151 |
+
# img = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
152 |
+
# img = cv2.resize(img, (imgsz, imgsz))
|
153 |
+
# img = img.transpose((2, 0, 1)) # HWC to CHW
|
154 |
+
# img = np.ascontiguousarray(img)
|
155 |
+
# img = torch.from_numpy(img).to(device)
|
156 |
+
# img = img.float() / 255.0
|
157 |
+
# if img.ndimension() == 3:
|
158 |
+
# img = img.unsqueeze(0)
|
159 |
+
#
|
160 |
+
# # Inference
|
161 |
+
# with torch.no_grad():
|
162 |
+
# pred = yolo_model(img)[0] # Use yolo_model
|
163 |
+
# pred = non_max_suppression_kpt(pred, conf_thres=0.25, iou_thres=0.45, nc=yolo_model.yaml['nc'], kpt_label=True)
|
164 |
+
#
|
165 |
+
# # Draw results
|
166 |
+
# output_frame = frame.copy()
|
167 |
+
# poses_detected = False
|
168 |
+
# for det in pred:
|
169 |
+
# if len(det):
|
170 |
+
# poses_detected = True
|
171 |
+
# det[:, :4] = scale_coords(img.shape[2:], det[:, :4], frame.shape).round()
|
172 |
+
# for row in det:
|
173 |
+
# xyxy = row[:4]
|
174 |
+
# conf = row[4]
|
175 |
+
# cls = row[5]
|
176 |
+
# kpts = row[6:]
|
177 |
+
# kpts = torch.tensor(kpts).view(kpt_shape)
|
178 |
+
# output_frame = plot_skeleton_kpts(output_frame, kpts, steps=3, orig_shape=output_frame.shape[:2])
|
179 |
+
#
|
180 |
+
# if not poses_detected:
|
181 |
+
# print(f"No poses detected in frame {frame_count}")
|
182 |
+
#
|
183 |
+
# out.write(output_frame)
|
184 |
+
#
|
185 |
+
# cap.release()
|
186 |
+
# out.release()
|
187 |
+
#
|
188 |
+
# if frame_count == 0:
|
189 |
+
# raise ValueError("No frames were processed from the video")
|
190 |
+
#
|
191 |
+
# print(f"Video processing completed. Processed {frame_count} frames")
|
192 |
+
# # Return URL for the client, using the 'serve_video' endpoint
|
193 |
+
# output_filename = 'output.mp4'
|
194 |
+
# return url_for('serve_video', filename=output_filename, _external=False)
|
195 |
+
# except Exception as e:
|
196 |
+
# print('Error in process_video:', e)
|
197 |
+
# traceback.print_exc()
|
198 |
+
# raise
|
199 |
|
200 |
def process_video_movenet(video_path, model_variant='lightning', pose_type='front_double_biceps'):
|
201 |
try:
|
bodybuilding_pose_analyzer/README.md
ADDED
@@ -0,0 +1,63 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Bodybuilding Pose Analyzer
|
2 |
+
|
3 |
+
A real-time pose analysis tool for bodybuilders that helps analyze and provide feedback on common bodybuilding poses.
|
4 |
+
|
5 |
+
## Features
|
6 |
+
|
7 |
+
- Real-time pose detection using MediaPipe
|
8 |
+
- Analysis of common bodybuilding poses:
|
9 |
+
- Front Double Biceps
|
10 |
+
- Side Chest
|
11 |
+
- Back Double Biceps
|
12 |
+
- Angle measurements for key body parts
|
13 |
+
- Real-time feedback and corrections
|
14 |
+
- FPS display
|
15 |
+
|
16 |
+
## Requirements
|
17 |
+
|
18 |
+
- Python 3.8+
|
19 |
+
- Webcam
|
20 |
+
- Required Python packages (listed in requirements.txt)
|
21 |
+
|
22 |
+
## Installation
|
23 |
+
|
24 |
+
1. Clone the repository:
|
25 |
+
```bash
|
26 |
+
git clone <repository-url>
|
27 |
+
cd bodybuilding_pose_analyzer
|
28 |
+
```
|
29 |
+
|
30 |
+
2. Create a virtual environment (recommended):
|
31 |
+
```bash
|
32 |
+
python -m venv venv
|
33 |
+
source venv/bin/activate # On Windows: venv\Scripts\activate
|
34 |
+
```
|
35 |
+
|
36 |
+
3. Install required packages:
|
37 |
+
```bash
|
38 |
+
pip install -r requirements.txt
|
39 |
+
```
|
40 |
+
|
41 |
+
## Usage
|
42 |
+
|
43 |
+
1. Run the demo script:
|
44 |
+
```bash
|
45 |
+
python src/demo.py
|
46 |
+
```
|
47 |
+
|
48 |
+
2. Position yourself in front of the webcam
|
49 |
+
3. The system will automatically detect your pose and provide feedback
|
50 |
+
4. Press 'q' to quit the application
|
51 |
+
|
52 |
+
## Supported Poses
|
53 |
+
|
54 |
+
Currently, the system supports the following poses:
|
55 |
+
- Front Double Biceps
|
56 |
+
- Side Chest
|
57 |
+
- Back Double Biceps
|
58 |
+
|
59 |
+
More poses will be added in future updates.
|
60 |
+
|
61 |
+
## Contributing
|
62 |
+
|
63 |
+
Feel free to submit issues and enhancement requests!
|
bodybuilding_pose_analyzer/bodybuilding_pose_classifier.h5
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:56cdfbdadbef2675622e699fcf444d5bcb0aab6c695bb32165ae60e984278346
|
3 |
+
size 228483160
|
bodybuilding_pose_analyzer/requirements.txt
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
opencv-python>=4.8.0
|
2 |
+
mediapipe>=0.10.0
|
3 |
+
numpy>=1.24.0
|
4 |
+
torch>=2.0.0
|
5 |
+
torchvision>=0.15.0
|
6 |
+
scikit-learn>=1.3.0
|
7 |
+
matplotlib>=3.7.0
|
8 |
+
tqdm>=4.65.0
|
bodybuilding_pose_analyzer/src/__pycache__/movenet_analyzer.cpython-310.pyc
ADDED
Binary file (6.91 kB). View file
|
|
bodybuilding_pose_analyzer/src/__pycache__/pose_analyzer.cpython-310.pyc
ADDED
Binary file (5.46 kB). View file
|
|
bodybuilding_pose_analyzer/src/demo.py
ADDED
@@ -0,0 +1,80 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import cv2
|
2 |
+
import time
|
3 |
+
import argparse
|
4 |
+
from pose_analyzer import PoseAnalyzer
|
5 |
+
|
6 |
+
def process_video(video_source, analyzer):
|
7 |
+
# Initialize video capture
|
8 |
+
cap = cv2.VideoCapture(video_source)
|
9 |
+
|
10 |
+
# Set window properties
|
11 |
+
cv2.namedWindow('Bodybuilding Pose Analyzer', cv2.WINDOW_NORMAL)
|
12 |
+
cv2.resizeWindow('Bodybuilding Pose Analyzer', 1280, 720)
|
13 |
+
|
14 |
+
# FPS calculation variables
|
15 |
+
prev_time = 0
|
16 |
+
curr_time = 0
|
17 |
+
|
18 |
+
while cap.isOpened():
|
19 |
+
# Read frame
|
20 |
+
ret, frame = cap.read()
|
21 |
+
if not ret:
|
22 |
+
break
|
23 |
+
|
24 |
+
# Calculate FPS
|
25 |
+
curr_time = time.time()
|
26 |
+
fps = 1 / (curr_time - prev_time) if prev_time > 0 else 0
|
27 |
+
prev_time = curr_time
|
28 |
+
|
29 |
+
# Process frame
|
30 |
+
frame_with_pose, analysis = analyzer.process_frame(frame)
|
31 |
+
|
32 |
+
# Add FPS and analysis text to frame
|
33 |
+
cv2.putText(frame_with_pose, f'FPS: {fps:.1f}', (10, 30),
|
34 |
+
cv2.FONT_HERSHEY_PLAIN, 0.5, (0, 255, 0), 1, lineType=cv2.LINE_AA)
|
35 |
+
|
36 |
+
# Display feedback
|
37 |
+
if 'error' not in analysis:
|
38 |
+
y_offset = 70
|
39 |
+
cv2.putText(frame_with_pose, f'Pose: {analysis["pose_type"]}', (10, y_offset),
|
40 |
+
cv2.FONT_HERSHEY_PLAIN, 0.5, (0, 255, 0), 1, lineType=cv2.LINE_AA)
|
41 |
+
|
42 |
+
for angle_name, angle_value in analysis['angles'].items():
|
43 |
+
y_offset += 40
|
44 |
+
cv2.putText(frame_with_pose, f'{angle_name}: {angle_value:.1f}°', (10, y_offset),
|
45 |
+
cv2.FONT_HERSHEY_PLAIN, 0.5, (0, 255, 0), 1, lineType=cv2.LINE_AA)
|
46 |
+
|
47 |
+
for correction in analysis['corrections']:
|
48 |
+
y_offset += 40
|
49 |
+
cv2.putText(frame_with_pose, correction, (10, y_offset),
|
50 |
+
cv2.FONT_HERSHEY_PLAIN, 0.5, (0, 0, 255), 1, lineType=cv2.LINE_AA)
|
51 |
+
else:
|
52 |
+
cv2.putText(frame_with_pose, analysis['error'], (10, 70),
|
53 |
+
cv2.FONT_HERSHEY_PLAIN, 0.5, (0, 0, 255), 1, lineType=cv2.LINE_AA)
|
54 |
+
|
55 |
+
# Display the frame
|
56 |
+
cv2.imshow('Bodybuilding Pose Analyzer', frame_with_pose)
|
57 |
+
|
58 |
+
# Break the loop if 'q' is pressed
|
59 |
+
if cv2.waitKey(1) & 0xFF == ord('q'):
|
60 |
+
break
|
61 |
+
|
62 |
+
# Release resources
|
63 |
+
cap.release()
|
64 |
+
cv2.destroyAllWindows()
|
65 |
+
|
66 |
+
def main():
|
67 |
+
# Parse command line arguments
|
68 |
+
parser = argparse.ArgumentParser(description='Bodybuilding Pose Analyzer Demo')
|
69 |
+
parser.add_argument('--video', type=str, help='Path to video file (optional)')
|
70 |
+
args = parser.parse_args()
|
71 |
+
|
72 |
+
# Initialize the pose analyzer
|
73 |
+
analyzer = PoseAnalyzer()
|
74 |
+
|
75 |
+
# Process video (either webcam or file)
|
76 |
+
video_source = args.video if args.video else 0
|
77 |
+
process_video(video_source, analyzer)
|
78 |
+
|
79 |
+
if __name__ == '__main__':
|
80 |
+
main()
|
bodybuilding_pose_analyzer/src/movenet_analyzer.py
ADDED
@@ -0,0 +1,321 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import cv2
|
2 |
+
import numpy as np
|
3 |
+
import tensorflow as tf
|
4 |
+
import tensorflow_hub as hub
|
5 |
+
from typing import List, Dict, Tuple
|
6 |
+
|
7 |
+
class MoveNetAnalyzer:
|
8 |
+
KEYPOINT_DICT = {
|
9 |
+
'nose': 0,
|
10 |
+
'left_eye': 1,
|
11 |
+
'right_eye': 2,
|
12 |
+
'left_ear': 3,
|
13 |
+
'right_ear': 4,
|
14 |
+
'left_shoulder': 5,
|
15 |
+
'right_shoulder': 6,
|
16 |
+
'left_elbow': 7,
|
17 |
+
'right_elbow': 8,
|
18 |
+
'left_wrist': 9,
|
19 |
+
'right_wrist': 10,
|
20 |
+
'left_hip': 11,
|
21 |
+
'right_hip': 12,
|
22 |
+
'left_knee': 13,
|
23 |
+
'right_knee': 14,
|
24 |
+
'left_ankle': 15,
|
25 |
+
'right_ankle': 16
|
26 |
+
}
|
27 |
+
|
28 |
+
def __init__(self, model_name="lightning"):
|
29 |
+
# Initialize MoveNet model
|
30 |
+
if model_name == "lightning":
|
31 |
+
self.model = hub.load("https://tfhub.dev/google/movenet/singlepose/lightning/4")
|
32 |
+
self.input_size = 192
|
33 |
+
else: # thunder
|
34 |
+
self.model = hub.load("https://tfhub.dev/google/movenet/singlepose/thunder/4")
|
35 |
+
self.input_size = 256
|
36 |
+
|
37 |
+
self.movenet = self.model.signatures['serving_default']
|
38 |
+
|
39 |
+
# Define key angles for bodybuilding poses
|
40 |
+
self.key_angles = {
|
41 |
+
'front_double_biceps': {
|
42 |
+
'shoulder_angle': (90, 120), # Expected angle range
|
43 |
+
'elbow_angle': (80, 100),
|
44 |
+
'wrist_angle': (0, 20)
|
45 |
+
},
|
46 |
+
'side_chest': {
|
47 |
+
'shoulder_angle': (45, 75),
|
48 |
+
'elbow_angle': (90, 110),
|
49 |
+
'wrist_angle': (0, 20)
|
50 |
+
},
|
51 |
+
'back_double_biceps': {
|
52 |
+
'shoulder_angle': (90, 120),
|
53 |
+
'elbow_angle': (80, 100),
|
54 |
+
'wrist_angle': (0, 20)
|
55 |
+
}
|
56 |
+
}
|
57 |
+
|
58 |
+
def detect_pose(self, frame: np.ndarray, last_valid_landmarks=None) -> Tuple[np.ndarray, List[Dict]]:
|
59 |
+
"""
|
60 |
+
Detect pose in the given frame and return the frame with pose landmarks drawn
|
61 |
+
and the list of detected landmarks.
|
62 |
+
If detection fails, reuse last valid landmarks if provided.
|
63 |
+
"""
|
64 |
+
# Resize and pad the image to keep aspect ratio
|
65 |
+
img = frame.copy()
|
66 |
+
img = tf.image.resize_with_pad(tf.expand_dims(img, axis=0), self.input_size, self.input_size)
|
67 |
+
img = tf.cast(img, dtype=tf.int32)
|
68 |
+
|
69 |
+
# Detection
|
70 |
+
results = self.movenet(img)
|
71 |
+
keypoints = results['output_0'].numpy() # Shape [1, 1, 17, 3]
|
72 |
+
|
73 |
+
# Draw the pose landmarks on the frame
|
74 |
+
if keypoints[0, 0, 0, 2] > 0.1: # Lowered confidence threshold for the nose
|
75 |
+
# Convert keypoints to image coordinates
|
76 |
+
y, x, c = frame.shape
|
77 |
+
shaped = np.squeeze(keypoints) # Shape [17, 3]
|
78 |
+
|
79 |
+
# Draw keypoints
|
80 |
+
for kp in shaped:
|
81 |
+
ky, kx, kp_conf = kp
|
82 |
+
if kp_conf > 0.1:
|
83 |
+
# Convert to image coordinates
|
84 |
+
x_coord = int(kx * x)
|
85 |
+
y_coord = int(ky * y)
|
86 |
+
cv2.circle(frame, (x_coord, y_coord), 6, (0, 255, 0), -1)
|
87 |
+
|
88 |
+
# Convert landmarks to a list of dictionaries
|
89 |
+
landmarks = []
|
90 |
+
for kp in shaped:
|
91 |
+
landmarks.append({
|
92 |
+
'x': float(kp[1]),
|
93 |
+
'y': float(kp[0]),
|
94 |
+
'visibility': float(kp[2])
|
95 |
+
})
|
96 |
+
|
97 |
+
return frame, landmarks
|
98 |
+
|
99 |
+
# If detection fails, reuse last valid landmarks if provided
|
100 |
+
if last_valid_landmarks is not None:
|
101 |
+
return frame, last_valid_landmarks
|
102 |
+
return frame, []
|
103 |
+
|
104 |
+
def calculate_angle(self, landmarks: List[Dict], joint1: int, joint2: int, joint3: int) -> float:
|
105 |
+
"""
|
106 |
+
Calculate the angle between three joints.
|
107 |
+
"""
|
108 |
+
if len(landmarks) < max(joint1, joint2, joint3):
|
109 |
+
return None
|
110 |
+
|
111 |
+
# Get the coordinates of the three joints
|
112 |
+
p1 = np.array([landmarks[joint1]['x'], landmarks[joint1]['y']])
|
113 |
+
p2 = np.array([landmarks[joint2]['x'], landmarks[joint2]['y']])
|
114 |
+
p3 = np.array([landmarks[joint3]['x'], landmarks[joint3]['y']])
|
115 |
+
|
116 |
+
# Calculate the angle
|
117 |
+
v1 = p1 - p2
|
118 |
+
v2 = p3 - p2
|
119 |
+
|
120 |
+
angle = np.degrees(np.arccos(
|
121 |
+
np.dot(v1, v2) / (np.linalg.norm(v1) * np.linalg.norm(v2))
|
122 |
+
))
|
123 |
+
|
124 |
+
return angle
|
125 |
+
|
126 |
+
def analyze_pose(self, landmarks: List[Dict], pose_type: str) -> Dict:
|
127 |
+
"""
|
128 |
+
Analyze the pose and provide feedback based on the pose_type.
|
129 |
+
Handles pose_types not in self.key_angles by providing a note.
|
130 |
+
"""
|
131 |
+
feedback = {
|
132 |
+
'pose_type': pose_type,
|
133 |
+
'angles': {},
|
134 |
+
'corrections': [],
|
135 |
+
'notes': [] # Initialize notes field
|
136 |
+
}
|
137 |
+
|
138 |
+
if not landmarks:
|
139 |
+
# If no landmarks, it's a more fundamental issue than just pose_type.
|
140 |
+
# The process_frame method already handles this by passing {'error': 'No pose detected'}
|
141 |
+
# from self.analyze_pose if landmarks is empty.
|
142 |
+
# However, to be safe, if this method is called directly with no landmarks:
|
143 |
+
feedback['error'] = 'No landmarks provided for analysis'
|
144 |
+
return feedback
|
145 |
+
|
146 |
+
if pose_type not in self.key_angles:
|
147 |
+
feedback['notes'].append(f"No specific angle checks defined for pose: {pose_type}")
|
148 |
+
# Still return the feedback structure, but angles and corrections will be empty.
|
149 |
+
# The 'error' field will not be set here, allowing app.py to distinguish this case.
|
150 |
+
return feedback
|
151 |
+
|
152 |
+
pose_rules = self.key_angles[pose_type]
|
153 |
+
|
154 |
+
if pose_type == 'front_double_biceps':
|
155 |
+
# Example: Left Shoulder - Elbow - Wrist for elbow angle
|
156 |
+
# Example: Left Hip - Shoulder - Elbow for shoulder angle (arm abduction)
|
157 |
+
# Note: These are examples, actual biomechanical definitions can be complex.
|
158 |
+
# We'll stick to the previous definition for front_double_biceps shoulder angle for now.
|
159 |
+
# Shoulder angle: right_hip - right_shoulder - right_elbow (can also use left)
|
160 |
+
# Elbow angle: right_shoulder - right_elbow - right_wrist (can also use left)
|
161 |
+
# Wrist angle (simplistic): right_elbow - right_wrist - a point slightly above wrist (not easily done without more points)
|
162 |
+
|
163 |
+
# Using right side for front_double_biceps as an example, consistent with a typical bodybuilding pose display
|
164 |
+
# Shoulder Angle (approximating arm abduction/flexion relative to torso)
|
165 |
+
# Using Right Hip, Right Shoulder, Right Elbow
|
166 |
+
rs = self.KEYPOINT_DICT['right_shoulder']
|
167 |
+
re = self.KEYPOINT_DICT['right_elbow']
|
168 |
+
rh = self.KEYPOINT_DICT['right_hip']
|
169 |
+
rw = self.KEYPOINT_DICT['right_wrist']
|
170 |
+
|
171 |
+
shoulder_angle = self.calculate_angle(landmarks, rh, rs, re)
|
172 |
+
if shoulder_angle is not None:
|
173 |
+
feedback['angles']['R Shoulder'] = shoulder_angle
|
174 |
+
if not (pose_rules['shoulder_angle'][0] <= shoulder_angle <= pose_rules['shoulder_angle'][1]):
|
175 |
+
# Debug print before forming correction string
|
176 |
+
print(f"[MOVENET_DEBUG_CORRECTION] pose_type: {pose_type}, rule_key: 'shoulder_angle', rules_for_angle: {pose_rules.get('shoulder_angle')}")
|
177 |
+
feedback['corrections'].append(
|
178 |
+
f"Adjust R Shoulder to {pose_rules['shoulder_angle'][0]}-{pose_rules['shoulder_angle'][1]} deg"
|
179 |
+
)
|
180 |
+
|
181 |
+
elbow_angle = self.calculate_angle(landmarks, rs, re, rw)
|
182 |
+
if elbow_angle is not None:
|
183 |
+
feedback['angles']['R Elbow'] = elbow_angle
|
184 |
+
if not (pose_rules['elbow_angle'][0] <= elbow_angle <= pose_rules['elbow_angle'][1]):
|
185 |
+
feedback['corrections'].append(
|
186 |
+
f"Adjust R Elbow to {pose_rules['elbow_angle'][0]}-{pose_rules['elbow_angle'][1]} deg"
|
187 |
+
)
|
188 |
+
# Wrist angle is hard to define meaningfully with current keypoints for this pose, skipping for now.
|
189 |
+
|
190 |
+
elif pose_type == 'side_chest':
|
191 |
+
# Assuming side chest often displays left side to judges
|
192 |
+
ls = self.KEYPOINT_DICT['left_shoulder']
|
193 |
+
le = self.KEYPOINT_DICT['left_elbow']
|
194 |
+
lw = self.KEYPOINT_DICT['left_wrist']
|
195 |
+
lh = self.KEYPOINT_DICT['left_hip'] # For shoulder angle relative to torso
|
196 |
+
|
197 |
+
# Shoulder angle (e.g. arm flexion/extension in sagittal plane for the front arm)
|
198 |
+
# For side chest, the front arm's shoulder angle relative to the torso (hip-shoulder-elbow)
|
199 |
+
shoulder_angle = self.calculate_angle(landmarks, lh, ls, le)
|
200 |
+
if shoulder_angle is not None:
|
201 |
+
feedback['angles']['L Shoulder'] = shoulder_angle
|
202 |
+
if not (pose_rules['shoulder_angle'][0] <= shoulder_angle <= pose_rules['shoulder_angle'][1]):
|
203 |
+
feedback['corrections'].append(
|
204 |
+
f"Adjust L Shoulder to {pose_rules['shoulder_angle'][0]}-{pose_rules['shoulder_angle'][1]} deg"
|
205 |
+
)
|
206 |
+
|
207 |
+
elbow_angle = self.calculate_angle(landmarks, ls, le, lw)
|
208 |
+
if elbow_angle is not None:
|
209 |
+
feedback['angles']['L Elbow'] = elbow_angle
|
210 |
+
if not (pose_rules['elbow_angle'][0] <= elbow_angle <= pose_rules['elbow_angle'][1]):
|
211 |
+
feedback['corrections'].append(
|
212 |
+
f"Adjust L Elbow to {pose_rules['elbow_angle'][0]}-{pose_rules['elbow_angle'][1]} deg"
|
213 |
+
)
|
214 |
+
# Wrist angle for side chest is also nuanced, skipping detailed check for now.
|
215 |
+
|
216 |
+
elif pose_type == 'back_double_biceps':
|
217 |
+
# Similar to front, but from back. We can calculate for both arms or pick one.
|
218 |
+
# Let's do right side for consistency with front_double_biceps example.
|
219 |
+
rs = self.KEYPOINT_DICT['right_shoulder']
|
220 |
+
re = self.KEYPOINT_DICT['right_elbow']
|
221 |
+
rh = self.KEYPOINT_DICT['right_hip']
|
222 |
+
rw = self.KEYPOINT_DICT['right_wrist']
|
223 |
+
|
224 |
+
shoulder_angle = self.calculate_angle(landmarks, rh, rs, re)
|
225 |
+
if shoulder_angle is not None:
|
226 |
+
feedback['angles']['R Shoulder'] = shoulder_angle
|
227 |
+
if not (pose_rules['shoulder_angle'][0] <= shoulder_angle <= pose_rules['shoulder_angle'][1]):
|
228 |
+
feedback['corrections'].append(
|
229 |
+
f"Adjust R Shoulder to {pose_rules['shoulder_angle'][0]}-{pose_rules['shoulder_angle'][1]} deg"
|
230 |
+
)
|
231 |
+
|
232 |
+
elbow_angle = self.calculate_angle(landmarks, rs, re, rw)
|
233 |
+
if elbow_angle is not None:
|
234 |
+
feedback['angles']['R Elbow'] = elbow_angle
|
235 |
+
if not (pose_rules['elbow_angle'][0] <= elbow_angle <= pose_rules['elbow_angle'][1]):
|
236 |
+
feedback['corrections'].append(
|
237 |
+
f"Adjust R Elbow to {pose_rules['elbow_angle'][0]}-{pose_rules['elbow_angle'][1]} deg"
|
238 |
+
)
|
239 |
+
|
240 |
+
# Clear notes if pose_type was valid and processed, unless specific notes were added by pose logic
|
241 |
+
if not feedback['notes']: # Only clear if no specific notes were added during pose rule processing
|
242 |
+
feedback.pop('notes', None)
|
243 |
+
|
244 |
+
return feedback
|
245 |
+
|
246 |
+
def process_frame(self, frame: np.ndarray, pose_type: str = 'front_double_biceps', last_valid_landmarks=None) -> Tuple[np.ndarray, Dict, List[Dict]]:
|
247 |
+
"""
|
248 |
+
Process a single frame, detect pose, and analyze it. Returns frame, analysis, and used landmarks.
|
249 |
+
"""
|
250 |
+
# Detect pose
|
251 |
+
frame_with_pose, landmarks = self.detect_pose(frame, last_valid_landmarks=last_valid_landmarks)
|
252 |
+
|
253 |
+
# Analyze pose if landmarks are detected
|
254 |
+
analysis = self.analyze_pose(landmarks, pose_type) if landmarks else {'error': 'No pose detected'}
|
255 |
+
|
256 |
+
return frame_with_pose, analysis, landmarks
|
257 |
+
|
258 |
+
def classify_pose(self, landmarks: List[Dict]) -> str:
|
259 |
+
"""
|
260 |
+
Classify the pose based on keypoint positions and angles.
|
261 |
+
Returns one of: 'front_double_biceps', 'side_chest', 'back_double_biceps'.
|
262 |
+
"""
|
263 |
+
if not landmarks or len(landmarks) < 17:
|
264 |
+
return 'front_double_biceps' # Default/fallback
|
265 |
+
|
266 |
+
# Calculate angles for both arms
|
267 |
+
# Right side
|
268 |
+
rs = self.KEYPOINT_DICT['right_shoulder']
|
269 |
+
re = self.KEYPOINT_DICT['right_elbow']
|
270 |
+
rh = self.KEYPOINT_DICT['right_hip']
|
271 |
+
rw = self.KEYPOINT_DICT['right_wrist']
|
272 |
+
# Left side
|
273 |
+
ls = self.KEYPOINT_DICT['left_shoulder']
|
274 |
+
le = self.KEYPOINT_DICT['left_elbow']
|
275 |
+
lh = self.KEYPOINT_DICT['left_hip']
|
276 |
+
lw = self.KEYPOINT_DICT['left_wrist']
|
277 |
+
|
278 |
+
# Shoulder angles
|
279 |
+
r_shoulder_angle = self.calculate_angle(landmarks, rh, rs, re)
|
280 |
+
l_shoulder_angle = self.calculate_angle(landmarks, lh, ls, le)
|
281 |
+
# Elbow angles
|
282 |
+
r_elbow_angle = self.calculate_angle(landmarks, rs, re, rw)
|
283 |
+
l_elbow_angle = self.calculate_angle(landmarks, ls, le, lw)
|
284 |
+
|
285 |
+
# Heuristic rules:
|
286 |
+
# - Front double biceps: both arms raised, elbows bent, both shoulders abducted
|
287 |
+
# - Side chest: one arm across chest (elbow in front of body), other arm flexed
|
288 |
+
# - Back double biceps: both arms raised, elbows bent, but person is facing away (shoulders/hips x order reversed)
|
289 |
+
|
290 |
+
# Use x-coordinates to estimate facing direction
|
291 |
+
# If right shoulder x < left shoulder x, assume facing front; else, facing back
|
292 |
+
facing_front = landmarks[rs]['x'] < landmarks[ls]['x']
|
293 |
+
|
294 |
+
# Count how many arms are "up" (shoulder angle in expected range)
|
295 |
+
arms_up = 0
|
296 |
+
if r_shoulder_angle and 80 < r_shoulder_angle < 150:
|
297 |
+
arms_up += 1
|
298 |
+
if l_shoulder_angle and 80 < l_shoulder_angle < 150:
|
299 |
+
arms_up += 1
|
300 |
+
elbows_bent = 0
|
301 |
+
if r_elbow_angle and 60 < r_elbow_angle < 130:
|
302 |
+
elbows_bent += 1
|
303 |
+
if l_elbow_angle and 60 < l_elbow_angle < 130:
|
304 |
+
elbows_bent += 1
|
305 |
+
|
306 |
+
# Side chest: one arm's elbow is much closer to the body's midline (x of elbow near x of nose)
|
307 |
+
nose_x = landmarks[self.KEYPOINT_DICT['nose']]['x']
|
308 |
+
le_x = landmarks[le]['x']
|
309 |
+
re_x = landmarks[re]['x']
|
310 |
+
side_chest_like = (abs(le_x - nose_x) < 0.08 or abs(re_x - nose_x) < 0.08)
|
311 |
+
|
312 |
+
if arms_up == 2 and elbows_bent == 2:
|
313 |
+
if facing_front:
|
314 |
+
return 'front_double_biceps'
|
315 |
+
else:
|
316 |
+
return 'back_double_biceps'
|
317 |
+
elif side_chest_like:
|
318 |
+
return 'side_chest'
|
319 |
+
else:
|
320 |
+
# Default/fallback
|
321 |
+
return 'front_double_biceps'
|
bodybuilding_pose_analyzer/src/movenet_demo.py
ADDED
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import cv2
|
2 |
+
import argparse
|
3 |
+
from movenet_analyzer import MoveNetAnalyzer
|
4 |
+
|
5 |
+
def main():
|
6 |
+
parser = argparse.ArgumentParser(description='MoveNet Pose Analysis Demo')
|
7 |
+
parser.add_argument('--video', type=str, help='Path to video file (optional)')
|
8 |
+
parser.add_argument('--model', type=str, default='lightning', choices=['lightning', 'thunder'],
|
9 |
+
help='MoveNet model variant (lightning or thunder)')
|
10 |
+
args = parser.parse_args()
|
11 |
+
|
12 |
+
# Initialize the MoveNet analyzer
|
13 |
+
analyzer = MoveNetAnalyzer(model_name=args.model)
|
14 |
+
|
15 |
+
# Initialize video capture
|
16 |
+
if args.video:
|
17 |
+
cap = cv2.VideoCapture(args.video)
|
18 |
+
else:
|
19 |
+
cap = cv2.VideoCapture(0) # Use webcam if no video file provided
|
20 |
+
|
21 |
+
if not cap.isOpened():
|
22 |
+
print("Error: Could not open video source")
|
23 |
+
return
|
24 |
+
|
25 |
+
while True:
|
26 |
+
ret, frame = cap.read()
|
27 |
+
if not ret:
|
28 |
+
break
|
29 |
+
|
30 |
+
# Process frame
|
31 |
+
frame_with_pose, analysis = analyzer.process_frame(frame)
|
32 |
+
|
33 |
+
# Display analysis results
|
34 |
+
if 'error' not in analysis:
|
35 |
+
# Display pose type
|
36 |
+
cv2.putText(frame_with_pose, f"Pose: {analysis['pose_type']}",
|
37 |
+
(10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
|
38 |
+
|
39 |
+
# Display angles
|
40 |
+
y_offset = 60
|
41 |
+
for joint, angle in analysis['angles'].items():
|
42 |
+
cv2.putText(frame_with_pose, f"{joint}: {angle:.1f}°",
|
43 |
+
(10, y_offset), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2)
|
44 |
+
y_offset += 30
|
45 |
+
|
46 |
+
# Display corrections
|
47 |
+
for correction in analysis['corrections']:
|
48 |
+
cv2.putText(frame_with_pose, correction,
|
49 |
+
(10, y_offset), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
|
50 |
+
y_offset += 30
|
51 |
+
else:
|
52 |
+
cv2.putText(frame_with_pose, analysis['error'],
|
53 |
+
(10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
|
54 |
+
|
55 |
+
# Display the frame
|
56 |
+
cv2.imshow('MoveNet Pose Analysis', frame_with_pose)
|
57 |
+
|
58 |
+
# Break the loop if 'q' is pressed
|
59 |
+
if cv2.waitKey(1) & 0xFF == ord('q'):
|
60 |
+
break
|
61 |
+
|
62 |
+
cap.release()
|
63 |
+
cv2.destroyAllWindows()
|
64 |
+
|
65 |
+
if __name__ == '__main__':
|
66 |
+
main()
|
bodybuilding_pose_analyzer/src/pose_analyzer.py
ADDED
@@ -0,0 +1,200 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import cv2
|
2 |
+
import mediapipe as mp
|
3 |
+
import numpy as np
|
4 |
+
from typing import List, Dict, Tuple
|
5 |
+
|
6 |
+
class PoseAnalyzer:
|
7 |
+
# Add MediaPipe skeleton connections as a class variable
|
8 |
+
MP_CONNECTIONS = [
|
9 |
+
(11, 13), (13, 15), # Left arm
|
10 |
+
(12, 14), (14, 16), # Right arm
|
11 |
+
(11, 12), # Shoulders
|
12 |
+
(11, 23), (12, 24), # Torso sides
|
13 |
+
(23, 24), # Hips
|
14 |
+
(23, 25), (25, 27), # Left leg
|
15 |
+
(24, 26), (26, 28), # Right leg
|
16 |
+
(27, 31), (28, 32), # Ankles to feet
|
17 |
+
(15, 17), (16, 18), # Wrists to hands
|
18 |
+
(15, 19), (16, 20), # Wrists to pinky
|
19 |
+
(15, 21), (16, 22), # Wrists to index
|
20 |
+
(15, 17), (17, 19), (19, 21), # Left hand
|
21 |
+
(16, 18), (18, 20), (20, 22) # Right hand
|
22 |
+
]
|
23 |
+
def __init__(self):
|
24 |
+
# Initialize MediaPipe Pose
|
25 |
+
self.mp_pose = mp.solutions.pose
|
26 |
+
self.pose = self.mp_pose.Pose(
|
27 |
+
static_image_mode=False,
|
28 |
+
model_complexity=2, # Using the most accurate model
|
29 |
+
min_detection_confidence=0.1,
|
30 |
+
min_tracking_confidence=0.1
|
31 |
+
)
|
32 |
+
self.mp_drawing = mp.solutions.drawing_utils
|
33 |
+
|
34 |
+
# Define key angles for bodybuilding poses
|
35 |
+
self.key_angles = {
|
36 |
+
'front_double_biceps': {
|
37 |
+
'shoulder_angle': (90, 120), # Expected angle range
|
38 |
+
'elbow_angle': (80, 100),
|
39 |
+
'wrist_angle': (0, 20)
|
40 |
+
},
|
41 |
+
'side_chest': {
|
42 |
+
'shoulder_angle': (45, 75),
|
43 |
+
'elbow_angle': (90, 110),
|
44 |
+
'wrist_angle': (0, 20)
|
45 |
+
},
|
46 |
+
'back_double_biceps': {
|
47 |
+
'shoulder_angle': (90, 120),
|
48 |
+
'elbow_angle': (80, 100),
|
49 |
+
'wrist_angle': (0, 20)
|
50 |
+
}
|
51 |
+
}
|
52 |
+
|
53 |
+
def detect_pose(self, frame: np.ndarray, last_valid_landmarks=None) -> Tuple[np.ndarray, List[Dict]]:
|
54 |
+
"""
|
55 |
+
Detect pose in the given frame and return the frame with pose landmarks drawn
|
56 |
+
and the list of detected landmarks. If detection fails, reuse last valid landmarks if provided.
|
57 |
+
"""
|
58 |
+
# Convert the BGR image to RGB
|
59 |
+
rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
60 |
+
|
61 |
+
# Process the frame and detect pose
|
62 |
+
results = self.pose.process(rgb_frame)
|
63 |
+
|
64 |
+
# Draw the pose landmarks on the frame
|
65 |
+
if results.pose_landmarks:
|
66 |
+
# Draw all 33 keypoints as bright red, smaller circles, and show index
|
67 |
+
for idx, landmark in enumerate(results.pose_landmarks.landmark):
|
68 |
+
x = int(landmark.x * frame.shape[1])
|
69 |
+
y = int(landmark.y * frame.shape[0])
|
70 |
+
if landmark.visibility > 0.1: # Lowered threshold from 0.3 to 0.1
|
71 |
+
cv2.circle(frame, (x, y), 3, (0, 0, 255), -1)
|
72 |
+
cv2.putText(frame, str(idx), (x+8, y-8), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (255, 255, 255), 1)
|
73 |
+
# Draw skeleton lines
|
74 |
+
# Convert landmarks to pixel coordinates for easier access
|
75 |
+
landmark_points = []
|
76 |
+
for landmark in results.pose_landmarks.landmark:
|
77 |
+
landmark_points.append((int(landmark.x * frame.shape[1]), int(landmark.y * frame.shape[0]), landmark.visibility))
|
78 |
+
for pt1, pt2 in self.MP_CONNECTIONS:
|
79 |
+
if pt1 < len(landmark_points) and pt2 < len(landmark_points):
|
80 |
+
x1, y1, v1 = landmark_points[pt1]
|
81 |
+
x2, y2, v2 = landmark_points[pt2]
|
82 |
+
if v1 > 0.1 and v2 > 0.1:
|
83 |
+
cv2.line(frame, (x1, y1), (x2, y2), (0, 255, 255), 2)
|
84 |
+
# Convert landmarks to a list of dictionaries
|
85 |
+
landmarks = []
|
86 |
+
for idx, landmark in enumerate(results.pose_landmarks.landmark):
|
87 |
+
landmarks.append({
|
88 |
+
'x': landmark.x,
|
89 |
+
'y': landmark.y,
|
90 |
+
'z': landmark.z,
|
91 |
+
'visibility': landmark.visibility
|
92 |
+
})
|
93 |
+
return frame, landmarks
|
94 |
+
# If detection fails, reuse last valid landmarks if provided
|
95 |
+
if last_valid_landmarks is not None:
|
96 |
+
return frame, last_valid_landmarks
|
97 |
+
return frame, []
|
98 |
+
|
99 |
+
def calculate_angle(self, landmarks: List[Dict], joint1: int, joint2: int, joint3: int) -> float:
|
100 |
+
"""
|
101 |
+
Calculate the angle between three joints.
|
102 |
+
"""
|
103 |
+
if len(landmarks) < max(joint1, joint2, joint3):
|
104 |
+
return None
|
105 |
+
|
106 |
+
# Get the coordinates of the three joints
|
107 |
+
p1 = np.array([landmarks[joint1]['x'], landmarks[joint1]['y']])
|
108 |
+
p2 = np.array([landmarks[joint2]['x'], landmarks[joint2]['y']])
|
109 |
+
p3 = np.array([landmarks[joint3]['x'], landmarks[joint3]['y']])
|
110 |
+
|
111 |
+
# Calculate the angle
|
112 |
+
v1 = p1 - p2
|
113 |
+
v2 = p3 - p2
|
114 |
+
|
115 |
+
angle = np.degrees(np.arccos(
|
116 |
+
np.dot(v1, v2) / (np.linalg.norm(v1) * np.linalg.norm(v2))
|
117 |
+
))
|
118 |
+
|
119 |
+
return angle
|
120 |
+
|
121 |
+
def analyze_pose(self, landmarks: List[Dict], pose_type: str) -> Dict:
|
122 |
+
"""
|
123 |
+
Analyze the pose and provide feedback based on the pose type.
|
124 |
+
Enhanced: Calculates angles for both left and right arms (shoulder, elbow, wrist) for all pose types.
|
125 |
+
"""
|
126 |
+
if not landmarks or pose_type not in self.key_angles:
|
127 |
+
return {'error': 'Invalid pose type or no landmarks detected'}
|
128 |
+
|
129 |
+
feedback = {
|
130 |
+
'pose_type': pose_type,
|
131 |
+
'angles': {},
|
132 |
+
'corrections': []
|
133 |
+
}
|
134 |
+
# Indices for MediaPipe 33 keypoints
|
135 |
+
LEFT_SHOULDER = 11
|
136 |
+
RIGHT_SHOULDER = 12
|
137 |
+
LEFT_ELBOW = 13
|
138 |
+
RIGHT_ELBOW = 14
|
139 |
+
LEFT_WRIST = 15
|
140 |
+
RIGHT_WRIST = 16
|
141 |
+
LEFT_HIP = 23
|
142 |
+
RIGHT_HIP = 24
|
143 |
+
LEFT_KNEE = 25
|
144 |
+
RIGHT_KNEE = 26
|
145 |
+
LEFT_ANKLE = 27
|
146 |
+
RIGHT_ANKLE = 28
|
147 |
+
# Calculate angles for both arms
|
148 |
+
# Shoulder angles (hip-shoulder-elbow)
|
149 |
+
l_shoulder_angle = self.calculate_angle(landmarks, LEFT_HIP, LEFT_SHOULDER, LEFT_ELBOW)
|
150 |
+
r_shoulder_angle = self.calculate_angle(landmarks, RIGHT_HIP, RIGHT_SHOULDER, RIGHT_ELBOW)
|
151 |
+
# Elbow angles (shoulder-elbow-wrist)
|
152 |
+
l_elbow_angle = self.calculate_angle(landmarks, LEFT_SHOULDER, LEFT_ELBOW, LEFT_WRIST)
|
153 |
+
r_elbow_angle = self.calculate_angle(landmarks, RIGHT_SHOULDER, RIGHT_ELBOW, RIGHT_WRIST)
|
154 |
+
# Wrist angles (elbow-wrist-hand index, if available)
|
155 |
+
# MediaPipe does not have hand index, so we can use a pseudo point (e.g., extend wrist direction)
|
156 |
+
# For now, skip wrist angle or set to None
|
157 |
+
# Leg angles (optional)
|
158 |
+
l_knee_angle = self.calculate_angle(landmarks, LEFT_HIP, LEFT_KNEE, LEFT_ANKLE)
|
159 |
+
r_knee_angle = self.calculate_angle(landmarks, RIGHT_HIP, RIGHT_KNEE, RIGHT_ANKLE)
|
160 |
+
# Add angles to feedback
|
161 |
+
if l_shoulder_angle:
|
162 |
+
feedback['angles']['L Shoulder'] = l_shoulder_angle
|
163 |
+
if not self.key_angles[pose_type]['shoulder_angle'][0] <= l_shoulder_angle <= self.key_angles[pose_type]['shoulder_angle'][1]:
|
164 |
+
feedback['corrections'].append(
|
165 |
+
f"Adjust L Shoulder to {self.key_angles[pose_type]['shoulder_angle'][0]}-{self.key_angles[pose_type]['shoulder_angle'][1]} deg"
|
166 |
+
)
|
167 |
+
if r_shoulder_angle:
|
168 |
+
feedback['angles']['R Shoulder'] = r_shoulder_angle
|
169 |
+
if not self.key_angles[pose_type]['shoulder_angle'][0] <= r_shoulder_angle <= self.key_angles[pose_type]['shoulder_angle'][1]:
|
170 |
+
feedback['corrections'].append(
|
171 |
+
f"Adjust R Shoulder to {self.key_angles[pose_type]['shoulder_angle'][0]}-{self.key_angles[pose_type]['shoulder_angle'][1]} deg"
|
172 |
+
)
|
173 |
+
if l_elbow_angle:
|
174 |
+
feedback['angles']['L Elbow'] = l_elbow_angle
|
175 |
+
if not self.key_angles[pose_type]['elbow_angle'][0] <= l_elbow_angle <= self.key_angles[pose_type]['elbow_angle'][1]:
|
176 |
+
feedback['corrections'].append(
|
177 |
+
f"Adjust L Elbow to {self.key_angles[pose_type]['elbow_angle'][0]}-{self.key_angles[pose_type]['elbow_angle'][1]} deg"
|
178 |
+
)
|
179 |
+
if r_elbow_angle:
|
180 |
+
feedback['angles']['R Elbow'] = r_elbow_angle
|
181 |
+
if not self.key_angles[pose_type]['elbow_angle'][0] <= r_elbow_angle <= self.key_angles[pose_type]['elbow_angle'][1]:
|
182 |
+
feedback['corrections'].append(
|
183 |
+
f"Adjust R Elbow to {self.key_angles[pose_type]['elbow_angle'][0]}-{self.key_angles[pose_type]['elbow_angle'][1]} deg"
|
184 |
+
)
|
185 |
+
# Optionally add knee angles
|
186 |
+
if l_knee_angle:
|
187 |
+
feedback['angles']['L Knee'] = l_knee_angle
|
188 |
+
if r_knee_angle:
|
189 |
+
feedback['angles']['R Knee'] = r_knee_angle
|
190 |
+
return feedback
|
191 |
+
|
192 |
+
def process_frame(self, frame: np.ndarray, pose_type: str = 'front_double_biceps', last_valid_landmarks=None) -> Tuple[np.ndarray, Dict, List[Dict]]:
|
193 |
+
"""
|
194 |
+
Process a single frame, detect pose, and analyze it. Returns frame, analysis, and used landmarks.
|
195 |
+
"""
|
196 |
+
# Detect pose
|
197 |
+
frame_with_pose, landmarks = self.detect_pose(frame, last_valid_landmarks=last_valid_landmarks)
|
198 |
+
# Analyze pose if landmarks are detected
|
199 |
+
analysis = self.analyze_pose(landmarks, pose_type) if landmarks else {'error': 'No pose detected'}
|
200 |
+
return frame_with_pose, analysis, landmarks
|
bodybuilding_pose_analyzer/src/sample_video.mp4
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:16b5cfe3c836a5fba2c46ce4bcf9d241b9a9292647822fbbf767f3db9f1aa0e9
|
3 |
+
size 1684449
|
bodybuilding_pose_classifier_savedmodel.keras
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:29114d2581644c4d8bde26f8fffa9a6dd6d295175f9e0b81c67da23bf435d194
|
3 |
+
size 76185162
|
static/uploads/output.mp4
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a555701da6bea5183cac40ea6f1b45d6fe182db4efc0cfca10ebab60fcdce498
|
3 |
+
size 261
|
static/uploads/output_mediapipe.mp4
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:dff942eb4a92e2af3f05573368ffa81cde14add1b0aeb28d7acc76b154aa56f0
|
3 |
+
size 926873
|
static/uploads/output_movenet_lightning.mp4
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:29e0173c04e5eb95a1f951e756a1f48fb56f5fee53afd0f2f812d1716de61bc4
|
3 |
+
size 557403
|
static/uploads/output_movenet_thunder.mp4
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a1aebd72c36462e0725557a154b595d70128b1723a01a33a2f9aa2854084c6a1
|
3 |
+
size 1757104
|
static/uploads/policeb.mp4
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:bedaa005f970439d8b6fe99e937027a3dc7c7f7d9ccec319af22344ed06df790
|
3 |
+
size 7552156
|
templates/index.html
ADDED
@@ -0,0 +1,176 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<!DOCTYPE html>
|
2 |
+
<html lang="en">
|
3 |
+
<head>
|
4 |
+
<meta charset="UTF-8">
|
5 |
+
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
6 |
+
<title>Bodybuilding Pose Analyzer</title>
|
7 |
+
<link href="https://cdn.jsdelivr.net/npm/[email protected]/dist/tailwind.min.css" rel="stylesheet">
|
8 |
+
</head>
|
9 |
+
<body class="bg-gray-100 min-h-screen">
|
10 |
+
<div class="container mx-auto px-4 py-8">
|
11 |
+
<h1 class="text-4xl font-bold text-center mb-8">Bodybuilding Pose Analyzer</h1>
|
12 |
+
|
13 |
+
<div class="max-w-2xl mx-auto bg-white rounded-lg shadow-lg p-6">
|
14 |
+
<div class="mb-6">
|
15 |
+
<h2 class="text-2xl font-semibold mb-4">Upload Video</h2>
|
16 |
+
<form id="uploadForm" class="space-y-4">
|
17 |
+
<div class="border-2 border-dashed border-gray-300 rounded-lg p-6 text-center">
|
18 |
+
<input type="file" id="videoInput" accept="video/*" class="hidden">
|
19 |
+
<label for="videoInput" class="cursor-pointer">
|
20 |
+
<div class="text-gray-600">
|
21 |
+
<svg class="mx-auto h-12 w-12" stroke="currentColor" fill="none" viewBox="0 0 48 48">
|
22 |
+
<path d="M28 8H12a4 4 0 00-4 4v20m32-12v8m0 0v8a4 4 0 01-4 4H12a4 4 0 01-4-4v-4m32-4l-3.172-3.172a4 4 0 00-5.656 0L28 28M8 32l9.172-9.172a4 4 0 015.656 0L28 28m0 0l4 4m4-24h8m-4-4v8m-12 4h.02" stroke-width="2" stroke-linecap="round" stroke-linejoin="round" />
|
23 |
+
</svg>
|
24 |
+
<p class="mt-1">Click to upload a video</p>
|
25 |
+
<p id="fileName" class="text-sm text-gray-500 mt-1"></p>
|
26 |
+
</div>
|
27 |
+
</label>
|
28 |
+
</div>
|
29 |
+
|
30 |
+
<div>
|
31 |
+
<label class="block text-sm font-medium text-gray-700">Choose Model:</label>
|
32 |
+
<div class="mt-1 flex rounded-md shadow-sm">
|
33 |
+
<div class="relative flex items-stretch flex-grow focus-within:z-10">
|
34 |
+
<label class="inline-flex items-center">
|
35 |
+
<input type="radio" class="form-radio" name="model_choice" value="movenet" checked>
|
36 |
+
<span class="ml-2">Gladiator BB</span>
|
37 |
+
</label>
|
38 |
+
<label class="inline-flex items-center ml-6">
|
39 |
+
<input type="radio" class="form-radio" name="model_choice" value="Gladiator SupaDot">
|
40 |
+
<span class="ml-2">Gladiator SupaDot</span>
|
41 |
+
</label>
|
42 |
+
</div>
|
43 |
+
</div>
|
44 |
+
</div>
|
45 |
+
<div id="gladiatorBBOptions" class="space-y-4">
|
46 |
+
<div>
|
47 |
+
<label class="block text-sm font-medium text-gray-700">Gladiator BB Variant:</label>
|
48 |
+
<div class="mt-1 flex rounded-md shadow-sm">
|
49 |
+
<div class="relative flex items-stretch flex-grow focus-within:z-10">
|
50 |
+
<label class="inline-flex items-center">
|
51 |
+
<input type="radio" class="form-radio" name="movenet_variant" value="lightning" checked>
|
52 |
+
<span class="ml-2">Lightning (Faster, Less Accurate)</span>
|
53 |
+
</label>
|
54 |
+
<label class="inline-flex items-center ml-6">
|
55 |
+
<input type="radio" class="form-radio" name="movenet_variant" value="thunder">
|
56 |
+
<span class="ml-2">Thunder (Slower, More Accurate)</span>
|
57 |
+
</label>
|
58 |
+
</div>
|
59 |
+
</div>
|
60 |
+
</div>
|
61 |
+
</div>
|
62 |
+
|
63 |
+
<button type="submit" class="w-full bg-blue-500 text-white py-2 px-4 rounded-lg hover:bg-blue-600 transition duration-200">
|
64 |
+
Process Video
|
65 |
+
</button>
|
66 |
+
</form>
|
67 |
+
</div>
|
68 |
+
|
69 |
+
<div id="result" class="hidden">
|
70 |
+
<h2 class="text-2xl font-semibold mb-4">Results</h2>
|
71 |
+
<div class="aspect-w-16 aspect-h-9">
|
72 |
+
<video id="outputVideo" controls class="w-full rounded-lg"></video>
|
73 |
+
</div>
|
74 |
+
</div>
|
75 |
+
|
76 |
+
<div id="loading" class="hidden">
|
77 |
+
<div class="flex items-center justify-center">
|
78 |
+
<div class="animate-spin rounded-full h-12 w-12 border-b-2 border-blue-500"></div>
|
79 |
+
</div>
|
80 |
+
<p class="text-center mt-4">Processing video...</p>
|
81 |
+
</div>
|
82 |
+
</div>
|
83 |
+
</div>
|
84 |
+
|
85 |
+
<script>
|
86 |
+
document.getElementById('videoInput').addEventListener('change', function() {
|
87 |
+
const fileName = this.files[0] ? this.files[0].name : 'No file selected';
|
88 |
+
document.getElementById('fileName').textContent = fileName;
|
89 |
+
});
|
90 |
+
|
91 |
+
document.querySelectorAll('input[name="model_choice"]').forEach(radio => {
|
92 |
+
radio.addEventListener('change', function() {
|
93 |
+
const gladiatorBBOptions = document.getElementById('gladiatorBBOptions');
|
94 |
+
if (this.value === 'movenet') {
|
95 |
+
gladiatorBBOptions.classList.remove('hidden');
|
96 |
+
} else {
|
97 |
+
gladiatorBBOptions.classList.add('hidden');
|
98 |
+
}
|
99 |
+
});
|
100 |
+
});
|
101 |
+
|
102 |
+
// Trigger change event on page load for the initially checked model_choice
|
103 |
+
document.querySelector('input[name="model_choice"]:checked').dispatchEvent(new Event('change'));
|
104 |
+
|
105 |
+
document.getElementById('uploadForm').addEventListener('submit', async (e) => {
|
106 |
+
e.preventDefault();
|
107 |
+
|
108 |
+
const fileInput = document.getElementById('videoInput');
|
109 |
+
const file = fileInput.files[0];
|
110 |
+
|
111 |
+
if (!file) {
|
112 |
+
alert('Please select a video file');
|
113 |
+
return;
|
114 |
+
}
|
115 |
+
|
116 |
+
const formData = new FormData();
|
117 |
+
formData.append('video', file);
|
118 |
+
const modelChoice = document.querySelector('input[name="model_choice"]:checked').value;
|
119 |
+
formData.append('model_choice', modelChoice);
|
120 |
+
if (modelChoice === 'movenet') {
|
121 |
+
const movenetVariant = document.querySelector('input[name="movenet_variant"]:checked').value;
|
122 |
+
formData.append('movenet_variant', movenetVariant);
|
123 |
+
}
|
124 |
+
|
125 |
+
// Show loading
|
126 |
+
document.getElementById('loading').classList.remove('hidden');
|
127 |
+
document.getElementById('result').classList.add('hidden');
|
128 |
+
|
129 |
+
try {
|
130 |
+
const response = await fetch('/upload', {
|
131 |
+
method: 'POST',
|
132 |
+
body: formData
|
133 |
+
});
|
134 |
+
|
135 |
+
console.log('[CLIENT] Full response object from /upload:', response);
|
136 |
+
console.log('[CLIENT] Response status from /upload:', response.status);
|
137 |
+
console.log('[CLIENT] Response status text from /upload:', response.statusText);
|
138 |
+
|
139 |
+
const data = await response.json();
|
140 |
+
console.log('[CLIENT] Parsed JSON data from /upload:', data);
|
141 |
+
|
142 |
+
if (response.ok) {
|
143 |
+
console.log('[CLIENT] Response from /upload is OK (status 200-299)');
|
144 |
+
const videoElement = document.getElementById('outputVideo');
|
145 |
+
const resultDiv = document.getElementById('result');
|
146 |
+
|
147 |
+
console.log('[CLIENT] Setting video src to:', data.output_path);
|
148 |
+
videoElement.src = data.output_path;
|
149 |
+
|
150 |
+
resultDiv.classList.remove('hidden');
|
151 |
+
videoElement.load();
|
152 |
+
console.log('[CLIENT] Called video.load()');
|
153 |
+
|
154 |
+
videoElement.onloadeddata = () => {
|
155 |
+
console.log('[CLIENT] Video data loaded successfully.');
|
156 |
+
};
|
157 |
+
videoElement.onerror = (e) => {
|
158 |
+
console.error('[CLIENT] Video player error:', e);
|
159 |
+
console.error('[CLIENT] Video src that failed:', videoElement.src);
|
160 |
+
alert('Error loading video. Check browser console for details.');
|
161 |
+
};
|
162 |
+
|
163 |
+
} else {
|
164 |
+
console.error('[CLIENT] Response from /upload not OK. Status:', response.status);
|
165 |
+
alert(data.error || `Server error: ${response.status} ${response.statusText}. Check browser console.`);
|
166 |
+
}
|
167 |
+
} catch (error) {
|
168 |
+
console.error('[CLIENT] Fetch Error during /upload:', error);
|
169 |
+
alert('A critical error occurred while uploading/processing the video. Check browser console.');
|
170 |
+
} finally {
|
171 |
+
document.getElementById('loading').classList.add('hidden');
|
172 |
+
}
|
173 |
+
});
|
174 |
+
</script>
|
175 |
+
</body>
|
176 |
+
</html>
|