Spaces:
Sleeping
Sleeping
File size: 16,391 Bytes
894cca2 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 |
# Pose-Think: AI-Powered Movement Analysis Suite
# Hugging Face Spaces Compatible Version
import cv2
import mediapipe as mp
import gradio as gr
import numpy as np
# MediaPipe başlatma / Initialize MediaPipe
mp_pose = mp.solutions.pose
mp_hands = mp.solutions.hands
mp_drawing = mp.solutions.drawing_utils
class PostureAnalyzer:
"""Birleşik postür analiz sınıfı / Unified posture analyzer class"""
def __init__(self):
self.pose = mp_pose.Pose(
static_image_mode=False,
model_complexity=1,
enable_segmentation=False,
min_detection_confidence=0.5,
min_tracking_confidence=0.5
)
self.hands = mp_hands.Hands(
static_image_mode=False,
max_num_hands=2,
min_detection_confidence=0.5,
min_tracking_confidence=0.5
)
def calculate_angle(self, a, b, c):
"""Üç nokta arasındaki açıyı hesapla / Calculate angle between three points"""
try:
a = np.array(a)
b = np.array(b)
c = np.array(c)
radians = np.arctan2(c[1] - b[1], c[0] - b[0]) - np.arctan2(a[1] - b[1], a[0] - b[0])
angle = np.abs(radians * 180.0 / np.pi)
if angle > 180.0:
angle = 360 - angle
return angle
except:
return 0
def analyze_posture(self, image, analysis_type="basic", age=None, height=None, weight=None):
"""Ana analiz fonksiyonu / Main analysis function"""
if image is None:
return None, "❌ Görüntü yok / No image"
# BGR'den RGB'ye çevir / Convert BGR to RGB
rgb_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# Analiz türüne göre işlem / Process based on analysis type
if analysis_type == "hand":
return self._analyze_hands(rgb_image)
else:
return self._analyze_body_posture(rgb_image, analysis_type, age, height, weight)
def _analyze_hands(self, rgb_image):
"""El analizi / Hand analysis"""
results = self.hands.process(rgb_image)
output_image = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2BGR)
feedback = []
if results.multi_hand_landmarks:
hand_count = len(results.multi_hand_landmarks)
feedback.append(f"✅ {hand_count} el tespit edildi / {hand_count} hand(s) detected")
for idx, hand_landmarks in enumerate(results.multi_hand_landmarks):
# El landmark'larını çiz / Draw hand landmarks
mp_drawing.draw_landmarks(
output_image, hand_landmarks, mp_hands.HAND_CONNECTIONS)
# Parmak durumları / Finger states
landmarks = hand_landmarks.landmark
# Başparmak / Thumb
thumb_tip = landmarks[mp_hands.HandLandmark.THUMB_TIP]
thumb_ip = landmarks[mp_hands.HandLandmark.THUMB_IP]
# Diğer parmaklar / Other fingers
fingers_up = 0
finger_names = ["Başparmak/Thumb", "İşaret/Index", "Orta/Middle", "Yüzük/Ring", "Serçe/Pinky"]
# Basit parmak sayma / Simple finger counting
tip_ids = [4, 8, 12, 16, 20]
pip_ids = [3, 6, 10, 14, 18]
for i in range(5):
if landmarks[tip_ids[i]].y < landmarks[pip_ids[i]].y:
fingers_up += 1
feedback.append(f"🖐️ El {idx+1}: {fingers_up} parmak yukarıda / Hand {idx+1}: {fingers_up} fingers up")
else:
feedback.append("❌ El tespit edilemedi / No hands detected")
feedback.append("🖐️ Ellerinizi kameraya gösterin / Show your hands to the camera")
return output_image, "\n".join(feedback)
def _analyze_body_posture(self, rgb_image, analysis_type, age, height, weight):
"""Vücut postür analizi / Body posture analysis"""
results = self.pose.process(rgb_image)
output_image = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2BGR)
feedback = []
# Profil bilgisi varsa ekle / Add profile info if available
if analysis_type == "enhanced" and (age or height or weight):
profile_info = []
if age:
profile_info.append(f"Yaş/Age: {age}")
if height and weight:
bmi = weight / ((height/100) ** 2)
profile_info.append(f"BMI: {bmi:.1f}")
if bmi > 25:
feedback.append("⚠️ BMI yüksek - postür üzerinde ekstra yük / High BMI - extra load on posture")
if profile_info:
feedback.append(f"👤 Profil / Profile: {' | '.join(profile_info)}")
feedback.append("")
if results.pose_landmarks:
# Landmark'ları çiz / Draw landmarks
mp_drawing.draw_landmarks(
output_image,
results.pose_landmarks,
mp_pose.POSE_CONNECTIONS
)
landmarks = results.pose_landmarks.landmark
# Görünür parçaları kontrol et / Check visible parts
visible_parts = []
# Baş / Head
if landmarks[mp_pose.PoseLandmark.NOSE.value].visibility > 0.5:
visible_parts.append("Baş/Head")
# Omuzlar / Shoulders
left_shoulder = landmarks[mp_pose.PoseLandmark.LEFT_SHOULDER.value]
right_shoulder = landmarks[mp_pose.PoseLandmark.RIGHT_SHOULDER.value]
if left_shoulder.visibility > 0.5 and right_shoulder.visibility > 0.5:
visible_parts.append("Omuzlar/Shoulders")
# Omuz seviyesi / Shoulder level
shoulder_diff = abs(left_shoulder.y - right_shoulder.y)
if shoulder_diff > 0.05:
if left_shoulder.y < right_shoulder.y:
feedback.append("⚠️ Sol omuz yüksek / Left shoulder high")
else:
feedback.append("⚠️ Sağ omuz yüksek / Right shoulder high")
else:
feedback.append("✅ Omuzlar seviyeli / Shoulders level")
# Dirsekler ve açıları / Elbows and angles
left_elbow = landmarks[mp_pose.PoseLandmark.LEFT_ELBOW.value]
right_elbow = landmarks[mp_pose.PoseLandmark.RIGHT_ELBOW.value]
if left_elbow.visibility > 0.5 and right_elbow.visibility > 0.5:
visible_parts.append("Dirsekler/Elbows")
# Dirsek açıları hesapla / Calculate elbow angles
try:
# Sol dirsek / Left elbow
left_shoulder_pos = [left_shoulder.x, left_shoulder.y]
left_elbow_pos = [left_elbow.x, left_elbow.y]
left_wrist_pos = [landmarks[mp_pose.PoseLandmark.LEFT_WRIST.value].x,
landmarks[mp_pose.PoseLandmark.LEFT_WRIST.value].y]
left_elbow_angle = self.calculate_angle(left_shoulder_pos, left_elbow_pos, left_wrist_pos)
if left_elbow_angle > 0:
feedback.append(f"📐 Sol dirsek açısı / Left elbow: {left_elbow_angle:.1f}°")
# Sağ dirsek / Right elbow
right_shoulder_pos = [right_shoulder.x, right_shoulder.y]
right_elbow_pos = [right_elbow.x, right_elbow.y]
right_wrist_pos = [landmarks[mp_pose.PoseLandmark.RIGHT_WRIST.value].x,
landmarks[mp_pose.PoseLandmark.RIGHT_WRIST.value].y]
right_elbow_angle = self.calculate_angle(right_shoulder_pos, right_elbow_pos, right_wrist_pos)
if right_elbow_angle > 0:
feedback.append(f"📐 Sağ dirsek açısı / Right elbow: {right_elbow_angle:.1f}°")
except:
feedback.append("⚠️ Dirsek açısı hesaplanamadı / Cannot calculate elbow angles")
# Kalçalar / Hips
left_hip = landmarks[mp_pose.PoseLandmark.LEFT_HIP.value]
right_hip = landmarks[mp_pose.PoseLandmark.RIGHT_HIP.value]
if left_hip.visibility > 0.5 and right_hip.visibility > 0.5:
visible_parts.append("Kalçalar/Hips")
# Kalça seviyesi / Hip level
hip_diff = abs(left_hip.y - right_hip.y)
if hip_diff > 0.03:
if left_hip.y < right_hip.y:
feedback.append("⚠️ Sol kalça yüksek / Left hip high")
else:
feedback.append("⚠️ Sağ kalça yüksek / Right hip high")
else:
feedback.append("✅ Kalçalar seviyeli / Hips level")
# Boyun pozisyonu / Neck position
nose = landmarks[mp_pose.PoseLandmark.NOSE.value]
if nose.visibility > 0.5 and left_shoulder.visibility > 0.5 and right_shoulder.visibility > 0.5:
shoulder_center_x = (left_shoulder.x + right_shoulder.x) / 2
head_offset = abs(nose.x - shoulder_center_x)
if head_offset > 0.08:
if nose.x < shoulder_center_x:
feedback.append("🔍 Boyun sola eğik / Neck tilted left")
else:
feedback.append("🔍 Boyun sağa eğik / Neck tilted right")
else:
feedback.append("🔍 Boyun merkezi / Neck centered")
# Yaşa özel öneriler (enhanced modda) / Age-specific recommendations (in enhanced mode)
if analysis_type == "enhanced" and age:
feedback.append("")
feedback.append("🎯 Yaşınıza Özel Öneriler / Age-Specific Recommendations:")
if age < 25:
feedback.append("💡 Genç yaş: Postür alışkanlıkları şimdi oluşturun / Young age: Form posture habits now")
elif age < 45:
feedback.append("💡 Orta yaş: Düzenli egzersiz önemli / Middle age: Regular exercise important")
else:
feedback.append("💡 Olgun yaş: Kemik sağlığına dikkat / Mature age: Focus on bone health")
# Görünür parçaları listele / List visible parts
if visible_parts:
feedback.insert(0, f"✅ Görünen / Visible: {', '.join(visible_parts)}")
feedback.insert(1, "")
else:
feedback.append("❌ Vücut tespit edilemedi / Body not detected")
feedback.append("📍 Kameraya tam vücut görünecek şekilde durun / Stand so full body is visible")
return output_image, "\n".join(feedback)
# Global analyzer
analyzer = PostureAnalyzer()
def analyze_movement(image, analysis_type, age, height, weight):
"""Hareket analizi ana fonksiyonu / Main movement analysis function"""
return analyzer.analyze_posture(image, analysis_type, age, height, weight)
# Basit Gradio arayüzü / Simple Gradio interface
def create_interface():
def process_with_settings(image, analysis_type, age, height, weight):
"""Ayarlarla birlikte işle / Process with settings"""
# Boş değerleri None'a çevir / Convert empty values to None
age = int(age) if age and str(age).strip() else None
height = int(height) if height and str(height).strip() else None
weight = int(weight) if weight and str(weight).strip() else None
return analyze_movement(image, analysis_type, age, height, weight)
# Ana interface / Main interface
with gr.Blocks(title="Pose-Think: AI Movement Analysis") as demo:
gr.Markdown("""
# 🎯 Pose-Think: AI-Powered Movement Analysis Suite
## Real-time posture and movement analysis with multiple modes
**Choose your analysis type and get instant feedback on what the camera sees!**
""")
with gr.Row():
with gr.Column():
# Analiz türü / Analysis type
analysis_type = gr.Radio(
choices=[
("🎯 Basic Posture", "basic"),
("🎯 Enhanced Posture", "enhanced"),
("🤚 Hand Tracking", "hand")
],
value="basic",
label="Analysis Type"
)
# Profil bilgileri / Profile info
gr.Markdown("### 👤 Optional Profile (for Enhanced mode)")
age_input = gr.Number(label="Age", minimum=10, maximum=100, value=None)
height_input = gr.Number(label="Height (cm)", minimum=100, maximum=250, value=None)
weight_input = gr.Number(label="Weight (kg)", minimum=30, maximum=200, value=None)
# Kamera / Camera
input_image = gr.Image(sources=["webcam"], streaming=True, label="📹 Camera")
# Analiz modu seçimi / Analysis mode selection
realtime_mode = gr.Checkbox(label="🔄 Real-time Analysis / Gerçek Zamanlı Analiz", value=True)
# Buton (sadece real-time kapalıysa) / Button (only when real-time is off)
analyze_btn = gr.Button("🔍 Analyze", variant="primary", visible=False)
with gr.Column():
# Çıktılar / Outputs
output_image = gr.Image(label="🎯 Analysis Result")
feedback_text = gr.Textbox(
label="📊 Detailed Feedback",
lines=15,
interactive=False
)
# Real-time modu toggle / Real-time mode toggle
def toggle_realtime(realtime_enabled):
return gr.update(visible=not realtime_enabled)
realtime_mode.change(
fn=toggle_realtime,
inputs=[realtime_mode],
outputs=[analyze_btn]
)
# Gerçek zamanlı analiz / Real-time analysis
input_image.stream(
fn=process_with_settings,
inputs=[input_image, analysis_type, age_input, height_input, weight_input],
outputs=[output_image, feedback_text],
stream_every=0.5 # Her 0.5 saniyede bir analiz / Analyze every 0.5 seconds
)
# Manuel analiz butonu / Manual analysis button
analyze_btn.click(
fn=process_with_settings,
inputs=[input_image, analysis_type, age_input, height_input, weight_input],
outputs=[output_image, feedback_text]
)
# Kullanım talimatları / Usage instructions
gr.Markdown("""
## 📋 How to Use
### 🎯 **Analysis Types:**
- **Basic Posture**: Body parts, joint angles, alignment
- **Enhanced Posture**: Basic + age/BMI insights
- **Hand Tracking**: Hand detection and finger counting
### � **Analysis Modes:**
- **Real-time**: Continuous analysis (default) - automatic feedback every 0.5 seconds
- **Manual**: Click "Analyze" button for single analysis
### �📝 **Instructions:**
1. Choose analysis type
2. Allow camera access when prompted
3. Position yourself 2-3 meters from camera
4. **Real-time mode**: Get continuous feedback automatically
5. **Manual mode**: Uncheck real-time and click Analyze button
6. For Enhanced: Enter age/height/weight for personalized insights
### 🎯 **Feedback Symbols:**
- ✅ Good alignment | ⚠️ Issues detected | 📐 Joint angles | 🔍 Position info
""")
return demo
demo = create_interface()
if __name__ == "__main__":
demo.launch()
|