pose-think / app.py
sems's picture
Upload 3 files
951802d verified
# Pose-Think: AI-Powered Movement Analysis Suite - Gradio 4.8.0 Compatible
import cv2
import mediapipe as mp
import gradio as gr
import numpy as np
# MediaPipe initialization
mp_pose = mp.solutions.pose
mp_hands = mp.solutions.hands
mp_drawing = mp.solutions.drawing_utils
def analyze_posture(image, analysis_type="basic", age=None, height=None, weight=None):
"""Main analysis function compatible with Gradio 4.8.0"""
if image is None:
return None, "❌ No image / Gârüntü yok"
# Convert BGR to RGB
rgb_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
output_image = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2BGR)
feedback = []
# Profile info for enhanced mode
if analysis_type == "enhanced" and (age or height or weight):
profile_info = []
if age:
profile_info.append(f"Age: {age}")
if height and weight:
bmi = weight / ((height/100) ** 2)
profile_info.append(f"BMI: {bmi:.1f}")
if bmi > 25:
feedback.append("⚠️ BMI high - extra load on posture")
if profile_info:
feedback.append(f"πŸ‘€ Profile: {' | '.join(profile_info)}")
feedback.append("")
if analysis_type == "hand":
# Hand analysis
with mp_hands.Hands(
static_image_mode=False,
max_num_hands=2,
min_detection_confidence=0.5,
min_tracking_confidence=0.5
) as hands:
results = hands.process(rgb_image)
if results.multi_hand_landmarks:
hand_count = len(results.multi_hand_landmarks)
feedback.append(f"βœ… {hand_count} hands detected")
for idx, hand_landmarks in enumerate(results.multi_hand_landmarks):
mp_drawing.draw_landmarks(output_image, hand_landmarks, mp_hands.HAND_CONNECTIONS)
# Simple finger counting
landmarks = hand_landmarks.landmark
fingers_up = 0
tip_ids = [4, 8, 12, 16, 20]
pip_ids = [3, 6, 10, 14, 18]
for i in range(5):
if landmarks[tip_ids[i]].y < landmarks[pip_ids[i]].y:
fingers_up += 1
feedback.append(f"πŸ–οΈ Hand {idx+1}: {fingers_up} fingers up")
else:
feedback.append("❌ No hands detected")
feedback.append("πŸ–οΈ Show your hands to the camera")
else:
# Posture analysis
with mp_pose.Pose(
static_image_mode=False,
model_complexity=1,
enable_segmentation=False,
min_detection_confidence=0.5,
min_tracking_confidence=0.5
) as pose:
results = pose.process(rgb_image)
if results.pose_landmarks:
mp_drawing.draw_landmarks(output_image, results.pose_landmarks, mp_pose.POSE_CONNECTIONS)
landmarks = results.pose_landmarks.landmark
visible_parts = []
# Check visible parts
if landmarks[mp_pose.PoseLandmark.NOSE.value].visibility > 0.5:
visible_parts.append("Head")
# Shoulders
left_shoulder = landmarks[mp_pose.PoseLandmark.LEFT_SHOULDER.value]
right_shoulder = landmarks[mp_pose.PoseLandmark.RIGHT_SHOULDER.value]
if left_shoulder.visibility > 0.5 and right_shoulder.visibility > 0.5:
visible_parts.append("Shoulders")
# Shoulder level check
shoulder_diff = abs(left_shoulder.y - right_shoulder.y)
if shoulder_diff > 0.05:
if left_shoulder.y < right_shoulder.y:
feedback.append("⚠️ Left shoulder higher")
else:
feedback.append("⚠️ Right shoulder higher")
else:
feedback.append("βœ… Shoulders level")
# Elbows and angles
left_elbow = landmarks[mp_pose.PoseLandmark.LEFT_ELBOW.value]
right_elbow = landmarks[mp_pose.PoseLandmark.RIGHT_ELBOW.value]
if left_elbow.visibility > 0.5 and right_elbow.visibility > 0.5:
visible_parts.append("Elbows")
# Calculate elbow angles
try:
def calculate_angle(a, b, c):
a = np.array(a)
b = np.array(b)
c = np.array(c)
radians = np.arctan2(c[1] - b[1], c[0] - b[0]) - np.arctan2(a[1] - b[1], a[0] - b[0])
angle = np.abs(radians * 180.0 / np.pi)
if angle > 180.0:
angle = 360 - angle
return angle
# Left elbow angle
left_shoulder_pos = [left_shoulder.x, left_shoulder.y]
left_elbow_pos = [left_elbow.x, left_elbow.y]
left_wrist_pos = [landmarks[mp_pose.PoseLandmark.LEFT_WRIST.value].x,
landmarks[mp_pose.PoseLandmark.LEFT_WRIST.value].y]
left_angle = calculate_angle(left_shoulder_pos, left_elbow_pos, left_wrist_pos)
feedback.append(f"πŸ“ Left elbow: {left_angle:.1f}Β°")
# Right elbow angle
right_shoulder_pos = [right_shoulder.x, right_shoulder.y]
right_elbow_pos = [right_elbow.x, right_elbow.y]
right_wrist_pos = [landmarks[mp_pose.PoseLandmark.RIGHT_WRIST.value].x,
landmarks[mp_pose.PoseLandmark.RIGHT_WRIST.value].y]
right_angle = calculate_angle(right_shoulder_pos, right_elbow_pos, right_wrist_pos)
feedback.append(f"πŸ“ Right elbow: {right_angle:.1f}Β°")
except:
feedback.append("⚠️ Cannot calculate elbow angles")
# Hips
left_hip = landmarks[mp_pose.PoseLandmark.LEFT_HIP.value]
right_hip = landmarks[mp_pose.PoseLandmark.RIGHT_HIP.value]
if left_hip.visibility > 0.5 and right_hip.visibility > 0.5:
visible_parts.append("Hips")
hip_diff = abs(left_hip.y - right_hip.y)
if hip_diff > 0.03:
if left_hip.y < right_hip.y:
feedback.append("⚠️ Left hip higher")
else:
feedback.append("⚠️ Right hip higher")
else:
feedback.append("βœ… Hips level")
# Neck position
nose = landmarks[mp_pose.PoseLandmark.NOSE.value]
if nose.visibility > 0.5:
shoulder_center_x = (left_shoulder.x + right_shoulder.x) / 2
head_offset = abs(nose.x - shoulder_center_x)
if head_offset > 0.08:
if nose.x < shoulder_center_x:
feedback.append("πŸ” Neck tilted left")
else:
feedback.append("πŸ” Neck tilted right")
else:
feedback.append("πŸ” Neck centered")
# Age-specific recommendations for enhanced mode
if analysis_type == "enhanced" and age:
feedback.append("")
feedback.append("🎯 Age-Specific Recommendations:")
if age < 25:
feedback.append("πŸ’‘ Young age: Form good posture habits now")
elif age < 45:
feedback.append("πŸ’‘ Middle age: Regular exercise important")
else:
feedback.append("πŸ’‘ Mature age: Focus on bone health")
# List visible parts
if visible_parts:
feedback.insert(0, f"βœ… Visible: {', '.join(visible_parts)}")
feedback.insert(1, "")
else:
feedback.append("❌ Body not detected")
feedback.append("πŸ“ Stand so full body is visible to camera")
return output_image, "\n".join(feedback)
# Simple Gradio Interface compatible with 4.8.0
with gr.Blocks(title="🎯 Pose-Think: AI Movement Analysis") as demo:
gr.Markdown("""
# 🎯 Pose-Think: AI-Powered Movement Analysis Suite
## Real-time posture and movement analysis with multiple modes
**Choose your analysis type and get instant feedback on what the camera sees!**
""")
with gr.Row():
with gr.Column():
# Analysis type selection
analysis_type = gr.Radio(
choices=[
("🎯 Basic Posture", "basic"),
("🎯 Enhanced Posture", "enhanced"),
("🀚 Hand Tracking", "hand")
],
value="basic",
label="Analysis Type"
)
# Profile info (for enhanced mode)
gr.Markdown("### πŸ‘€ Optional Profile (for Enhanced mode)")
age_input = gr.Number(label="Age", minimum=10, maximum=100, value=None)
height_input = gr.Number(label="Height (cm)", minimum=100, maximum=250, value=None)
weight_input = gr.Number(label="Weight (kg)", minimum=30, maximum=200, value=None)
# Camera input
input_image = gr.Image(sources=["webcam"], label="πŸ“Ή Camera")
# Analysis button
analyze_btn = gr.Button("πŸ” Analyze", variant="primary", size="lg")
with gr.Column():
# Outputs
output_image = gr.Image(label="🎯 Analysis Result")
feedback_text = gr.Textbox(
label="πŸ“Š Detailed Feedback",
lines=15,
interactive=False
)
# Analysis function
analyze_btn.click(
fn=analyze_posture,
inputs=[input_image, analysis_type, age_input, height_input, weight_input],
outputs=[output_image, feedback_text]
)
# Usage instructions
gr.Markdown("""
## πŸ“‹ How to Use
### 🎯 **Analysis Types:**
- **Basic Posture**: Body parts, joint angles, alignment
- **Enhanced Posture**: Basic + age/BMI insights + personalized recommendations
- **Hand Tracking**: Hand detection and finger counting
### πŸ“ **Instructions:**
1. **Choose analysis type** from the radio buttons
2. **Allow camera access** when prompted by your browser
3. **Position yourself** 2-3 meters from camera (full body visible for posture)
4. **For Enhanced mode**: Optionally enter age/height/weight for personalized insights
5. **Click Analyze** to get instant detailed feedback
### 🎯 **What you'll see:**
- βœ… **Green checkmarks**: Good alignment/posture
- ⚠️ **Warning signs**: Issues detected that need attention
- πŸ“ **Measurements**: Joint angles in degrees
- πŸ” **Position info**: Head, neck, shoulder positions
- πŸ‘€ **Profile insights**: Age-specific recommendations (Enhanced mode)
### πŸ’‘ **Tips for best results:**
- **Good lighting**: Ensure even, bright lighting
- **Plain background**: Use contrasting, simple background
- **Stable position**: Minimize movement during analysis
- **Full visibility**: Keep target body parts clearly visible
""")
if __name__ == "__main__":
demo.launch()