pose-think / app.py
sems's picture
Upload 3 files
894cca2 verified
raw
history blame
16.4 kB
# Pose-Think: AI-Powered Movement Analysis Suite
# Hugging Face Spaces Compatible Version
import cv2
import mediapipe as mp
import gradio as gr
import numpy as np
# MediaPipe başlatma / Initialize MediaPipe
mp_pose = mp.solutions.pose
mp_hands = mp.solutions.hands
mp_drawing = mp.solutions.drawing_utils
class PostureAnalyzer:
"""Birleşik postür analiz sınıfı / Unified posture analyzer class"""
def __init__(self):
self.pose = mp_pose.Pose(
static_image_mode=False,
model_complexity=1,
enable_segmentation=False,
min_detection_confidence=0.5,
min_tracking_confidence=0.5
)
self.hands = mp_hands.Hands(
static_image_mode=False,
max_num_hands=2,
min_detection_confidence=0.5,
min_tracking_confidence=0.5
)
def calculate_angle(self, a, b, c):
"""Üç nokta arasındaki açıyı hesapla / Calculate angle between three points"""
try:
a = np.array(a)
b = np.array(b)
c = np.array(c)
radians = np.arctan2(c[1] - b[1], c[0] - b[0]) - np.arctan2(a[1] - b[1], a[0] - b[0])
angle = np.abs(radians * 180.0 / np.pi)
if angle > 180.0:
angle = 360 - angle
return angle
except:
return 0
def analyze_posture(self, image, analysis_type="basic", age=None, height=None, weight=None):
"""Ana analiz fonksiyonu / Main analysis function"""
if image is None:
return None, "❌ Görüntü yok / No image"
# BGR'den RGB'ye çevir / Convert BGR to RGB
rgb_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# Analiz türüne göre işlem / Process based on analysis type
if analysis_type == "hand":
return self._analyze_hands(rgb_image)
else:
return self._analyze_body_posture(rgb_image, analysis_type, age, height, weight)
def _analyze_hands(self, rgb_image):
"""El analizi / Hand analysis"""
results = self.hands.process(rgb_image)
output_image = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2BGR)
feedback = []
if results.multi_hand_landmarks:
hand_count = len(results.multi_hand_landmarks)
feedback.append(f"✅ {hand_count} el tespit edildi / {hand_count} hand(s) detected")
for idx, hand_landmarks in enumerate(results.multi_hand_landmarks):
# El landmark'larını çiz / Draw hand landmarks
mp_drawing.draw_landmarks(
output_image, hand_landmarks, mp_hands.HAND_CONNECTIONS)
# Parmak durumları / Finger states
landmarks = hand_landmarks.landmark
# Başparmak / Thumb
thumb_tip = landmarks[mp_hands.HandLandmark.THUMB_TIP]
thumb_ip = landmarks[mp_hands.HandLandmark.THUMB_IP]
# Diğer parmaklar / Other fingers
fingers_up = 0
finger_names = ["Başparmak/Thumb", "İşaret/Index", "Orta/Middle", "Yüzük/Ring", "Serçe/Pinky"]
# Basit parmak sayma / Simple finger counting
tip_ids = [4, 8, 12, 16, 20]
pip_ids = [3, 6, 10, 14, 18]
for i in range(5):
if landmarks[tip_ids[i]].y < landmarks[pip_ids[i]].y:
fingers_up += 1
feedback.append(f"🖐️ El {idx+1}: {fingers_up} parmak yukarıda / Hand {idx+1}: {fingers_up} fingers up")
else:
feedback.append("❌ El tespit edilemedi / No hands detected")
feedback.append("🖐️ Ellerinizi kameraya gösterin / Show your hands to the camera")
return output_image, "\n".join(feedback)
def _analyze_body_posture(self, rgb_image, analysis_type, age, height, weight):
"""Vücut postür analizi / Body posture analysis"""
results = self.pose.process(rgb_image)
output_image = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2BGR)
feedback = []
# Profil bilgisi varsa ekle / Add profile info if available
if analysis_type == "enhanced" and (age or height or weight):
profile_info = []
if age:
profile_info.append(f"Yaş/Age: {age}")
if height and weight:
bmi = weight / ((height/100) ** 2)
profile_info.append(f"BMI: {bmi:.1f}")
if bmi > 25:
feedback.append("⚠️ BMI yüksek - postür üzerinde ekstra yük / High BMI - extra load on posture")
if profile_info:
feedback.append(f"👤 Profil / Profile: {' | '.join(profile_info)}")
feedback.append("")
if results.pose_landmarks:
# Landmark'ları çiz / Draw landmarks
mp_drawing.draw_landmarks(
output_image,
results.pose_landmarks,
mp_pose.POSE_CONNECTIONS
)
landmarks = results.pose_landmarks.landmark
# Görünür parçaları kontrol et / Check visible parts
visible_parts = []
# Baş / Head
if landmarks[mp_pose.PoseLandmark.NOSE.value].visibility > 0.5:
visible_parts.append("Baş/Head")
# Omuzlar / Shoulders
left_shoulder = landmarks[mp_pose.PoseLandmark.LEFT_SHOULDER.value]
right_shoulder = landmarks[mp_pose.PoseLandmark.RIGHT_SHOULDER.value]
if left_shoulder.visibility > 0.5 and right_shoulder.visibility > 0.5:
visible_parts.append("Omuzlar/Shoulders")
# Omuz seviyesi / Shoulder level
shoulder_diff = abs(left_shoulder.y - right_shoulder.y)
if shoulder_diff > 0.05:
if left_shoulder.y < right_shoulder.y:
feedback.append("⚠️ Sol omuz yüksek / Left shoulder high")
else:
feedback.append("⚠️ Sağ omuz yüksek / Right shoulder high")
else:
feedback.append("✅ Omuzlar seviyeli / Shoulders level")
# Dirsekler ve açıları / Elbows and angles
left_elbow = landmarks[mp_pose.PoseLandmark.LEFT_ELBOW.value]
right_elbow = landmarks[mp_pose.PoseLandmark.RIGHT_ELBOW.value]
if left_elbow.visibility > 0.5 and right_elbow.visibility > 0.5:
visible_parts.append("Dirsekler/Elbows")
# Dirsek açıları hesapla / Calculate elbow angles
try:
# Sol dirsek / Left elbow
left_shoulder_pos = [left_shoulder.x, left_shoulder.y]
left_elbow_pos = [left_elbow.x, left_elbow.y]
left_wrist_pos = [landmarks[mp_pose.PoseLandmark.LEFT_WRIST.value].x,
landmarks[mp_pose.PoseLandmark.LEFT_WRIST.value].y]
left_elbow_angle = self.calculate_angle(left_shoulder_pos, left_elbow_pos, left_wrist_pos)
if left_elbow_angle > 0:
feedback.append(f"📐 Sol dirsek açısı / Left elbow: {left_elbow_angle:.1f}°")
# Sağ dirsek / Right elbow
right_shoulder_pos = [right_shoulder.x, right_shoulder.y]
right_elbow_pos = [right_elbow.x, right_elbow.y]
right_wrist_pos = [landmarks[mp_pose.PoseLandmark.RIGHT_WRIST.value].x,
landmarks[mp_pose.PoseLandmark.RIGHT_WRIST.value].y]
right_elbow_angle = self.calculate_angle(right_shoulder_pos, right_elbow_pos, right_wrist_pos)
if right_elbow_angle > 0:
feedback.append(f"📐 Sağ dirsek açısı / Right elbow: {right_elbow_angle:.1f}°")
except:
feedback.append("⚠️ Dirsek açısı hesaplanamadı / Cannot calculate elbow angles")
# Kalçalar / Hips
left_hip = landmarks[mp_pose.PoseLandmark.LEFT_HIP.value]
right_hip = landmarks[mp_pose.PoseLandmark.RIGHT_HIP.value]
if left_hip.visibility > 0.5 and right_hip.visibility > 0.5:
visible_parts.append("Kalçalar/Hips")
# Kalça seviyesi / Hip level
hip_diff = abs(left_hip.y - right_hip.y)
if hip_diff > 0.03:
if left_hip.y < right_hip.y:
feedback.append("⚠️ Sol kalça yüksek / Left hip high")
else:
feedback.append("⚠️ Sağ kalça yüksek / Right hip high")
else:
feedback.append("✅ Kalçalar seviyeli / Hips level")
# Boyun pozisyonu / Neck position
nose = landmarks[mp_pose.PoseLandmark.NOSE.value]
if nose.visibility > 0.5 and left_shoulder.visibility > 0.5 and right_shoulder.visibility > 0.5:
shoulder_center_x = (left_shoulder.x + right_shoulder.x) / 2
head_offset = abs(nose.x - shoulder_center_x)
if head_offset > 0.08:
if nose.x < shoulder_center_x:
feedback.append("🔍 Boyun sola eğik / Neck tilted left")
else:
feedback.append("🔍 Boyun sağa eğik / Neck tilted right")
else:
feedback.append("🔍 Boyun merkezi / Neck centered")
# Yaşa özel öneriler (enhanced modda) / Age-specific recommendations (in enhanced mode)
if analysis_type == "enhanced" and age:
feedback.append("")
feedback.append("🎯 Yaşınıza Özel Öneriler / Age-Specific Recommendations:")
if age < 25:
feedback.append("💡 Genç yaş: Postür alışkanlıkları şimdi oluşturun / Young age: Form posture habits now")
elif age < 45:
feedback.append("💡 Orta yaş: Düzenli egzersiz önemli / Middle age: Regular exercise important")
else:
feedback.append("💡 Olgun yaş: Kemik sağlığına dikkat / Mature age: Focus on bone health")
# Görünür parçaları listele / List visible parts
if visible_parts:
feedback.insert(0, f"✅ Görünen / Visible: {', '.join(visible_parts)}")
feedback.insert(1, "")
else:
feedback.append("❌ Vücut tespit edilemedi / Body not detected")
feedback.append("📍 Kameraya tam vücut görünecek şekilde durun / Stand so full body is visible")
return output_image, "\n".join(feedback)
# Global analyzer
analyzer = PostureAnalyzer()
def analyze_movement(image, analysis_type, age, height, weight):
"""Hareket analizi ana fonksiyonu / Main movement analysis function"""
return analyzer.analyze_posture(image, analysis_type, age, height, weight)
# Basit Gradio arayüzü / Simple Gradio interface
def create_interface():
def process_with_settings(image, analysis_type, age, height, weight):
"""Ayarlarla birlikte işle / Process with settings"""
# Boş değerleri None'a çevir / Convert empty values to None
age = int(age) if age and str(age).strip() else None
height = int(height) if height and str(height).strip() else None
weight = int(weight) if weight and str(weight).strip() else None
return analyze_movement(image, analysis_type, age, height, weight)
# Ana interface / Main interface
with gr.Blocks(title="Pose-Think: AI Movement Analysis") as demo:
gr.Markdown("""
# 🎯 Pose-Think: AI-Powered Movement Analysis Suite
## Real-time posture and movement analysis with multiple modes
**Choose your analysis type and get instant feedback on what the camera sees!**
""")
with gr.Row():
with gr.Column():
# Analiz türü / Analysis type
analysis_type = gr.Radio(
choices=[
("🎯 Basic Posture", "basic"),
("🎯 Enhanced Posture", "enhanced"),
("🤚 Hand Tracking", "hand")
],
value="basic",
label="Analysis Type"
)
# Profil bilgileri / Profile info
gr.Markdown("### 👤 Optional Profile (for Enhanced mode)")
age_input = gr.Number(label="Age", minimum=10, maximum=100, value=None)
height_input = gr.Number(label="Height (cm)", minimum=100, maximum=250, value=None)
weight_input = gr.Number(label="Weight (kg)", minimum=30, maximum=200, value=None)
# Kamera / Camera
input_image = gr.Image(sources=["webcam"], streaming=True, label="📹 Camera")
# Analiz modu seçimi / Analysis mode selection
realtime_mode = gr.Checkbox(label="🔄 Real-time Analysis / Gerçek Zamanlı Analiz", value=True)
# Buton (sadece real-time kapalıysa) / Button (only when real-time is off)
analyze_btn = gr.Button("🔍 Analyze", variant="primary", visible=False)
with gr.Column():
# Çıktılar / Outputs
output_image = gr.Image(label="🎯 Analysis Result")
feedback_text = gr.Textbox(
label="📊 Detailed Feedback",
lines=15,
interactive=False
)
# Real-time modu toggle / Real-time mode toggle
def toggle_realtime(realtime_enabled):
return gr.update(visible=not realtime_enabled)
realtime_mode.change(
fn=toggle_realtime,
inputs=[realtime_mode],
outputs=[analyze_btn]
)
# Gerçek zamanlı analiz / Real-time analysis
input_image.stream(
fn=process_with_settings,
inputs=[input_image, analysis_type, age_input, height_input, weight_input],
outputs=[output_image, feedback_text],
stream_every=0.5 # Her 0.5 saniyede bir analiz / Analyze every 0.5 seconds
)
# Manuel analiz butonu / Manual analysis button
analyze_btn.click(
fn=process_with_settings,
inputs=[input_image, analysis_type, age_input, height_input, weight_input],
outputs=[output_image, feedback_text]
)
# Kullanım talimatları / Usage instructions
gr.Markdown("""
## 📋 How to Use
### 🎯 **Analysis Types:**
- **Basic Posture**: Body parts, joint angles, alignment
- **Enhanced Posture**: Basic + age/BMI insights
- **Hand Tracking**: Hand detection and finger counting
### � **Analysis Modes:**
- **Real-time**: Continuous analysis (default) - automatic feedback every 0.5 seconds
- **Manual**: Click "Analyze" button for single analysis
### �📝 **Instructions:**
1. Choose analysis type
2. Allow camera access when prompted
3. Position yourself 2-3 meters from camera
4. **Real-time mode**: Get continuous feedback automatically
5. **Manual mode**: Uncheck real-time and click Analyze button
6. For Enhanced: Enter age/height/weight for personalized insights
### 🎯 **Feedback Symbols:**
- ✅ Good alignment | ⚠️ Issues detected | 📐 Joint angles | 🔍 Position info
""")
return demo
demo = create_interface()
if __name__ == "__main__":
demo.launch()