Spaces:
Running
Running
File size: 12,426 Bytes
951802d 894cca2 951802d 894cca2 951802d 894cca2 951802d 894cca2 951802d 894cca2 951802d 894cca2 951802d 894cca2 951802d 894cca2 951802d 894cca2 951802d 894cca2 951802d 894cca2 951802d 894cca2 951802d 894cca2 951802d 894cca2 951802d 894cca2 951802d 894cca2 951802d 894cca2 951802d 894cca2 951802d 894cca2 951802d 894cca2 951802d 894cca2 951802d 894cca2 951802d 894cca2 951802d 894cca2 951802d 894cca2 951802d 894cca2 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 |
# Pose-Think: AI-Powered Movement Analysis Suite - Gradio 4.8.0 Compatible
import cv2
import mediapipe as mp
import gradio as gr
import numpy as np
# MediaPipe initialization
mp_pose = mp.solutions.pose
mp_hands = mp.solutions.hands
mp_drawing = mp.solutions.drawing_utils
def analyze_posture(image, analysis_type="basic", age=None, height=None, weight=None):
"""Main analysis function compatible with Gradio 4.8.0"""
if image is None:
return None, "β No image / GΓΆrΓΌntΓΌ yok"
# Convert BGR to RGB
rgb_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
output_image = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2BGR)
feedback = []
# Profile info for enhanced mode
if analysis_type == "enhanced" and (age or height or weight):
profile_info = []
if age:
profile_info.append(f"Age: {age}")
if height and weight:
bmi = weight / ((height/100) ** 2)
profile_info.append(f"BMI: {bmi:.1f}")
if bmi > 25:
feedback.append("β οΈ BMI high - extra load on posture")
if profile_info:
feedback.append(f"π€ Profile: {' | '.join(profile_info)}")
feedback.append("")
if analysis_type == "hand":
# Hand analysis
with mp_hands.Hands(
static_image_mode=False,
max_num_hands=2,
min_detection_confidence=0.5,
min_tracking_confidence=0.5
) as hands:
results = hands.process(rgb_image)
if results.multi_hand_landmarks:
hand_count = len(results.multi_hand_landmarks)
feedback.append(f"β
{hand_count} hands detected")
for idx, hand_landmarks in enumerate(results.multi_hand_landmarks):
mp_drawing.draw_landmarks(output_image, hand_landmarks, mp_hands.HAND_CONNECTIONS)
# Simple finger counting
landmarks = hand_landmarks.landmark
fingers_up = 0
tip_ids = [4, 8, 12, 16, 20]
pip_ids = [3, 6, 10, 14, 18]
for i in range(5):
if landmarks[tip_ids[i]].y < landmarks[pip_ids[i]].y:
fingers_up += 1
feedback.append(f"ποΈ Hand {idx+1}: {fingers_up} fingers up")
else:
feedback.append("β No hands detected")
feedback.append("ποΈ Show your hands to the camera")
else:
# Posture analysis
with mp_pose.Pose(
static_image_mode=False,
model_complexity=1,
enable_segmentation=False,
min_detection_confidence=0.5,
min_tracking_confidence=0.5
) as pose:
results = pose.process(rgb_image)
if results.pose_landmarks:
mp_drawing.draw_landmarks(output_image, results.pose_landmarks, mp_pose.POSE_CONNECTIONS)
landmarks = results.pose_landmarks.landmark
visible_parts = []
# Check visible parts
if landmarks[mp_pose.PoseLandmark.NOSE.value].visibility > 0.5:
visible_parts.append("Head")
# Shoulders
left_shoulder = landmarks[mp_pose.PoseLandmark.LEFT_SHOULDER.value]
right_shoulder = landmarks[mp_pose.PoseLandmark.RIGHT_SHOULDER.value]
if left_shoulder.visibility > 0.5 and right_shoulder.visibility > 0.5:
visible_parts.append("Shoulders")
# Shoulder level check
shoulder_diff = abs(left_shoulder.y - right_shoulder.y)
if shoulder_diff > 0.05:
if left_shoulder.y < right_shoulder.y:
feedback.append("β οΈ Left shoulder higher")
else:
feedback.append("β οΈ Right shoulder higher")
else:
feedback.append("β
Shoulders level")
# Elbows and angles
left_elbow = landmarks[mp_pose.PoseLandmark.LEFT_ELBOW.value]
right_elbow = landmarks[mp_pose.PoseLandmark.RIGHT_ELBOW.value]
if left_elbow.visibility > 0.5 and right_elbow.visibility > 0.5:
visible_parts.append("Elbows")
# Calculate elbow angles
try:
def calculate_angle(a, b, c):
a = np.array(a)
b = np.array(b)
c = np.array(c)
radians = np.arctan2(c[1] - b[1], c[0] - b[0]) - np.arctan2(a[1] - b[1], a[0] - b[0])
angle = np.abs(radians * 180.0 / np.pi)
if angle > 180.0:
angle = 360 - angle
return angle
# Left elbow angle
left_shoulder_pos = [left_shoulder.x, left_shoulder.y]
left_elbow_pos = [left_elbow.x, left_elbow.y]
left_wrist_pos = [landmarks[mp_pose.PoseLandmark.LEFT_WRIST.value].x,
landmarks[mp_pose.PoseLandmark.LEFT_WRIST.value].y]
left_angle = calculate_angle(left_shoulder_pos, left_elbow_pos, left_wrist_pos)
feedback.append(f"π Left elbow: {left_angle:.1f}Β°")
# Right elbow angle
right_shoulder_pos = [right_shoulder.x, right_shoulder.y]
right_elbow_pos = [right_elbow.x, right_elbow.y]
right_wrist_pos = [landmarks[mp_pose.PoseLandmark.RIGHT_WRIST.value].x,
landmarks[mp_pose.PoseLandmark.RIGHT_WRIST.value].y]
right_angle = calculate_angle(right_shoulder_pos, right_elbow_pos, right_wrist_pos)
feedback.append(f"π Right elbow: {right_angle:.1f}Β°")
except:
feedback.append("β οΈ Cannot calculate elbow angles")
# Hips
left_hip = landmarks[mp_pose.PoseLandmark.LEFT_HIP.value]
right_hip = landmarks[mp_pose.PoseLandmark.RIGHT_HIP.value]
if left_hip.visibility > 0.5 and right_hip.visibility > 0.5:
visible_parts.append("Hips")
hip_diff = abs(left_hip.y - right_hip.y)
if hip_diff > 0.03:
if left_hip.y < right_hip.y:
feedback.append("β οΈ Left hip higher")
else:
feedback.append("β οΈ Right hip higher")
else:
feedback.append("β
Hips level")
# Neck position
nose = landmarks[mp_pose.PoseLandmark.NOSE.value]
if nose.visibility > 0.5:
shoulder_center_x = (left_shoulder.x + right_shoulder.x) / 2
head_offset = abs(nose.x - shoulder_center_x)
if head_offset > 0.08:
if nose.x < shoulder_center_x:
feedback.append("π Neck tilted left")
else:
feedback.append("π Neck tilted right")
else:
feedback.append("π Neck centered")
# Age-specific recommendations for enhanced mode
if analysis_type == "enhanced" and age:
feedback.append("")
feedback.append("π― Age-Specific Recommendations:")
if age < 25:
feedback.append("π‘ Young age: Form good posture habits now")
elif age < 45:
feedback.append("π‘ Middle age: Regular exercise important")
else:
feedback.append("π‘ Mature age: Focus on bone health")
# List visible parts
if visible_parts:
feedback.insert(0, f"β
Visible: {', '.join(visible_parts)}")
feedback.insert(1, "")
else:
feedback.append("β Body not detected")
feedback.append("π Stand so full body is visible to camera")
return output_image, "\n".join(feedback)
# Simple Gradio Interface compatible with 4.8.0
with gr.Blocks(title="π― Pose-Think: AI Movement Analysis") as demo:
gr.Markdown("""
# π― Pose-Think: AI-Powered Movement Analysis Suite
## Real-time posture and movement analysis with multiple modes
**Choose your analysis type and get instant feedback on what the camera sees!**
""")
with gr.Row():
with gr.Column():
# Analysis type selection
analysis_type = gr.Radio(
choices=[
("π― Basic Posture", "basic"),
("π― Enhanced Posture", "enhanced"),
("π€ Hand Tracking", "hand")
],
value="basic",
label="Analysis Type"
)
# Profile info (for enhanced mode)
gr.Markdown("### π€ Optional Profile (for Enhanced mode)")
age_input = gr.Number(label="Age", minimum=10, maximum=100, value=None)
height_input = gr.Number(label="Height (cm)", minimum=100, maximum=250, value=None)
weight_input = gr.Number(label="Weight (kg)", minimum=30, maximum=200, value=None)
# Camera input
input_image = gr.Image(sources=["webcam"], label="πΉ Camera")
# Analysis button
analyze_btn = gr.Button("π Analyze", variant="primary", size="lg")
with gr.Column():
# Outputs
output_image = gr.Image(label="π― Analysis Result")
feedback_text = gr.Textbox(
label="π Detailed Feedback",
lines=15,
interactive=False
)
# Analysis function
analyze_btn.click(
fn=analyze_posture,
inputs=[input_image, analysis_type, age_input, height_input, weight_input],
outputs=[output_image, feedback_text]
)
# Usage instructions
gr.Markdown("""
## π How to Use
### π― **Analysis Types:**
- **Basic Posture**: Body parts, joint angles, alignment
- **Enhanced Posture**: Basic + age/BMI insights + personalized recommendations
- **Hand Tracking**: Hand detection and finger counting
### π **Instructions:**
1. **Choose analysis type** from the radio buttons
2. **Allow camera access** when prompted by your browser
3. **Position yourself** 2-3 meters from camera (full body visible for posture)
4. **For Enhanced mode**: Optionally enter age/height/weight for personalized insights
5. **Click Analyze** to get instant detailed feedback
### π― **What you'll see:**
- β
**Green checkmarks**: Good alignment/posture
- β οΈ **Warning signs**: Issues detected that need attention
- π **Measurements**: Joint angles in degrees
- π **Position info**: Head, neck, shoulder positions
- π€ **Profile insights**: Age-specific recommendations (Enhanced mode)
### π‘ **Tips for best results:**
- **Good lighting**: Ensure even, bright lighting
- **Plain background**: Use contrasting, simple background
- **Stable position**: Minimize movement during analysis
- **Full visibility**: Keep target body parts clearly visible
""")
if __name__ == "__main__":
demo.launch()
|