sems commited on
Commit
951802d
·
verified ·
1 Parent(s): 894cca2

Upload 3 files

Browse files
Files changed (1) hide show
  1. app.py +243 -329
app.py CHANGED
@@ -1,367 +1,281 @@
1
- # Pose-Think: AI-Powered Movement Analysis Suite
2
- # Hugging Face Spaces Compatible Version
3
  import cv2
4
  import mediapipe as mp
5
  import gradio as gr
6
  import numpy as np
7
 
8
- # MediaPipe başlatma / Initialize MediaPipe
9
  mp_pose = mp.solutions.pose
10
  mp_hands = mp.solutions.hands
11
  mp_drawing = mp.solutions.drawing_utils
12
 
13
- class PostureAnalyzer:
14
- """Birleşik postür analiz sınıfı / Unified posture analyzer class"""
 
 
15
 
16
- def __init__(self):
17
- self.pose = mp_pose.Pose(
18
- static_image_mode=False,
19
- model_complexity=1,
20
- enable_segmentation=False,
21
- min_detection_confidence=0.5,
22
- min_tracking_confidence=0.5
23
- )
 
 
 
 
 
 
 
 
24
 
25
- self.hands = mp_hands.Hands(
 
 
 
 
 
 
26
  static_image_mode=False,
27
  max_num_hands=2,
28
  min_detection_confidence=0.5,
29
  min_tracking_confidence=0.5
30
- )
31
-
32
- def calculate_angle(self, a, b, c):
33
- """Üç nokta arasındaki açıyı hesapla / Calculate angle between three points"""
34
- try:
35
- a = np.array(a)
36
- b = np.array(b)
37
- c = np.array(c)
38
 
39
- radians = np.arctan2(c[1] - b[1], c[0] - b[0]) - np.arctan2(a[1] - b[1], a[0] - b[0])
40
- angle = np.abs(radians * 180.0 / np.pi)
41
-
42
- if angle > 180.0:
43
- angle = 360 - angle
44
 
45
- return angle
46
- except:
47
- return 0
48
-
49
- def analyze_posture(self, image, analysis_type="basic", age=None, height=None, weight=None):
50
- """Ana analiz fonksiyonu / Main analysis function"""
51
- if image is None:
52
- return None, "❌ Görüntü yok / No image"
53
-
54
- # BGR'den RGB'ye çevir / Convert BGR to RGB
55
- rgb_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
56
-
57
- # Analiz türüne göre işlem / Process based on analysis type
58
- if analysis_type == "hand":
59
- return self._analyze_hands(rgb_image)
60
- else:
61
- return self._analyze_body_posture(rgb_image, analysis_type, age, height, weight)
62
 
63
- def _analyze_hands(self, rgb_image):
64
- """El analizi / Hand analysis"""
65
- results = self.hands.process(rgb_image)
66
- output_image = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2BGR)
67
-
68
- feedback = []
69
-
70
- if results.multi_hand_landmarks:
71
- hand_count = len(results.multi_hand_landmarks)
72
- feedback.append(f"✅ {hand_count} el tespit edildi / {hand_count} hand(s) detected")
73
 
74
- for idx, hand_landmarks in enumerate(results.multi_hand_landmarks):
75
- # El landmark'larını çiz / Draw hand landmarks
76
- mp_drawing.draw_landmarks(
77
- output_image, hand_landmarks, mp_hands.HAND_CONNECTIONS)
78
 
79
- # Parmak durumları / Finger states
80
- landmarks = hand_landmarks.landmark
81
 
82
- # Başparmak / Thumb
83
- thumb_tip = landmarks[mp_hands.HandLandmark.THUMB_TIP]
84
- thumb_ip = landmarks[mp_hands.HandLandmark.THUMB_IP]
85
 
86
- # Diğer parmaklar / Other fingers
87
- fingers_up = 0
88
- finger_names = ["Başparmak/Thumb", "İşaret/Index", "Orta/Middle", "Yüzük/Ring", "Serçe/Pinky"]
89
 
90
- # Basit parmak sayma / Simple finger counting
91
- tip_ids = [4, 8, 12, 16, 20]
92
- pip_ids = [3, 6, 10, 14, 18]
 
 
 
 
 
 
 
 
 
93
 
94
- for i in range(5):
95
- if landmarks[tip_ids[i]].y < landmarks[pip_ids[i]].y:
96
- fingers_up += 1
97
 
98
- feedback.append(f"🖐️ El {idx+1}: {fingers_up} parmak yukarıda / Hand {idx+1}: {fingers_up} fingers up")
99
- else:
100
- feedback.append("❌ El tespit edilemedi / No hands detected")
101
- feedback.append("🖐️ Ellerinizi kameraya gösterin / Show your hands to the camera")
102
-
103
- return output_image, "\n".join(feedback)
104
-
105
- def _analyze_body_posture(self, rgb_image, analysis_type, age, height, weight):
106
- """Vücut postür analizi / Body posture analysis"""
107
- results = self.pose.process(rgb_image)
108
- output_image = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2BGR)
109
-
110
- feedback = []
111
-
112
- # Profil bilgisi varsa ekle / Add profile info if available
113
- if analysis_type == "enhanced" and (age or height or weight):
114
- profile_info = []
115
- if age:
116
- profile_info.append(f"Yaş/Age: {age}")
117
- if height and weight:
118
- bmi = weight / ((height/100) ** 2)
119
- profile_info.append(f"BMI: {bmi:.1f}")
120
- if bmi > 25:
121
- feedback.append("⚠️ BMI yüksek - postür üzerinde ekstra yük / High BMI - extra load on posture")
122
-
123
- if profile_info:
124
- feedback.append(f"👤 Profil / Profile: {' | '.join(profile_info)}")
125
- feedback.append("")
126
-
127
- if results.pose_landmarks:
128
- # Landmark'ları çiz / Draw landmarks
129
- mp_drawing.draw_landmarks(
130
- output_image,
131
- results.pose_landmarks,
132
- mp_pose.POSE_CONNECTIONS
133
- )
134
-
135
- landmarks = results.pose_landmarks.landmark
136
-
137
- # Görünür parçaları kontrol et / Check visible parts
138
- visible_parts = []
139
-
140
- # Baş / Head
141
- if landmarks[mp_pose.PoseLandmark.NOSE.value].visibility > 0.5:
142
- visible_parts.append("Baş/Head")
143
-
144
- # Omuzlar / Shoulders
145
- left_shoulder = landmarks[mp_pose.PoseLandmark.LEFT_SHOULDER.value]
146
- right_shoulder = landmarks[mp_pose.PoseLandmark.RIGHT_SHOULDER.value]
147
-
148
- if left_shoulder.visibility > 0.5 and right_shoulder.visibility > 0.5:
149
- visible_parts.append("Omuzlar/Shoulders")
150
 
151
- # Omuz seviyesi / Shoulder level
152
- shoulder_diff = abs(left_shoulder.y - right_shoulder.y)
153
- if shoulder_diff > 0.05:
154
- if left_shoulder.y < right_shoulder.y:
155
- feedback.append("⚠️ Sol omuz yüksek / Left shoulder high")
156
- else:
157
- feedback.append("⚠️ Sağ omuz yüksek / Right shoulder high")
158
- else:
159
- feedback.append("✅ Omuzlar seviyeli / Shoulders level")
160
-
161
- # Dirsekler ve açıları / Elbows and angles
162
- left_elbow = landmarks[mp_pose.PoseLandmark.LEFT_ELBOW.value]
163
- right_elbow = landmarks[mp_pose.PoseLandmark.RIGHT_ELBOW.value]
164
-
165
- if left_elbow.visibility > 0.5 and right_elbow.visibility > 0.5:
166
- visible_parts.append("Dirsekler/Elbows")
167
 
168
- # Dirsek açıları hesapla / Calculate elbow angles
169
- try:
170
- # Sol dirsek / Left elbow
171
- left_shoulder_pos = [left_shoulder.x, left_shoulder.y]
172
- left_elbow_pos = [left_elbow.x, left_elbow.y]
173
- left_wrist_pos = [landmarks[mp_pose.PoseLandmark.LEFT_WRIST.value].x,
174
- landmarks[mp_pose.PoseLandmark.LEFT_WRIST.value].y]
175
-
176
- left_elbow_angle = self.calculate_angle(left_shoulder_pos, left_elbow_pos, left_wrist_pos)
177
- if left_elbow_angle > 0:
178
- feedback.append(f"📐 Sol dirsek açısı / Left elbow: {left_elbow_angle:.1f}°")
179
-
180
- # Sağ dirsek / Right elbow
181
- right_shoulder_pos = [right_shoulder.x, right_shoulder.y]
182
- right_elbow_pos = [right_elbow.x, right_elbow.y]
183
- right_wrist_pos = [landmarks[mp_pose.PoseLandmark.RIGHT_WRIST.value].x,
184
- landmarks[mp_pose.PoseLandmark.RIGHT_WRIST.value].y]
185
 
186
- right_elbow_angle = self.calculate_angle(right_shoulder_pos, right_elbow_pos, right_wrist_pos)
187
- if right_elbow_angle > 0:
188
- feedback.append(f"📐 Sağ dirsek açısı / Right elbow: {right_elbow_angle:.1f}°")
189
-
190
- except:
191
- feedback.append("⚠️ Dirsek açısı hesaplanamadı / Cannot calculate elbow angles")
192
-
193
- # Kalçalar / Hips
194
- left_hip = landmarks[mp_pose.PoseLandmark.LEFT_HIP.value]
195
- right_hip = landmarks[mp_pose.PoseLandmark.RIGHT_HIP.value]
196
-
197
- if left_hip.visibility > 0.5 and right_hip.visibility > 0.5:
198
- visible_parts.append("Kalçalar/Hips")
199
 
200
- # Kalça seviyesi / Hip level
201
- hip_diff = abs(left_hip.y - right_hip.y)
202
- if hip_diff > 0.03:
203
- if left_hip.y < right_hip.y:
204
- feedback.append("⚠️ Sol kalça yüksek / Left hip high")
 
 
 
 
 
 
205
  else:
206
- feedback.append("⚠️ Sağ kalça yüksek / Right hip high")
207
- else:
208
- feedback.append("✅ Kalçalar seviyeli / Hips level")
209
-
210
- # Boyun pozisyonu / Neck position
211
- nose = landmarks[mp_pose.PoseLandmark.NOSE.value]
212
- if nose.visibility > 0.5 and left_shoulder.visibility > 0.5 and right_shoulder.visibility > 0.5:
213
- shoulder_center_x = (left_shoulder.x + right_shoulder.x) / 2
214
- head_offset = abs(nose.x - shoulder_center_x)
215
 
216
- if head_offset > 0.08:
217
- if nose.x < shoulder_center_x:
218
- feedback.append("🔍 Boyun sola eğik / Neck tilted left")
 
 
 
 
 
219
  else:
220
- feedback.append("🔍 Boyun sağa eğik / Neck tilted right")
221
- else:
222
- feedback.append("🔍 Boyun merkezi / Neck centered")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
223
 
224
- # Yaşa özel öneriler (enhanced modda) / Age-specific recommendations (in enhanced mode)
225
- if analysis_type == "enhanced" and age:
226
- feedback.append("")
227
- feedback.append("🎯 Ya��ınıza Özel Öneriler / Age-Specific Recommendations:")
228
- if age < 25:
229
- feedback.append("💡 Genç yaş: Postür alışkanlıkları şimdi oluşturun / Young age: Form posture habits now")
230
- elif age < 45:
231
- feedback.append("💡 Orta yaş: Düzenli egzersiz önemli / Middle age: Regular exercise important")
232
- else:
233
- feedback.append("💡 Olgun yaş: Kemik sağlığına dikkat / Mature age: Focus on bone health")
234
 
235
- # Görünür parçaları listele / List visible parts
236
- if visible_parts:
237
- feedback.insert(0, f"✅ Görünen / Visible: {', '.join(visible_parts)}")
238
- feedback.insert(1, "")
239
- else:
240
- feedback.append("❌ Vücut tespit edilemedi / Body not detected")
241
- feedback.append("📍 Kameraya tam vücut görünecek şekilde durun / Stand so full body is visible")
242
 
243
- return output_image, "\n".join(feedback)
244
-
245
- # Global analyzer
246
- analyzer = PostureAnalyzer()
247
-
248
- def analyze_movement(image, analysis_type, age, height, weight):
249
- """Hareket analizi ana fonksiyonu / Main movement analysis function"""
250
- return analyzer.analyze_posture(image, analysis_type, age, height, weight)
251
-
252
- # Basit Gradio arayüzü / Simple Gradio interface
253
- def create_interface():
254
-
255
- def process_with_settings(image, analysis_type, age, height, weight):
256
- """Ayarlarla birlikte işle / Process with settings"""
257
- # Boş değerleri None'a çevir / Convert empty values to None
258
- age = int(age) if age and str(age).strip() else None
259
- height = int(height) if height and str(height).strip() else None
260
- weight = int(weight) if weight and str(weight).strip() else None
261
-
262
- return analyze_movement(image, analysis_type, age, height, weight)
263
-
264
- # Ana interface / Main interface
265
- with gr.Blocks(title="Pose-Think: AI Movement Analysis") as demo:
266
-
267
- gr.Markdown("""
268
- # 🎯 Pose-Think: AI-Powered Movement Analysis Suite
269
- ## Real-time posture and movement analysis with multiple modes
270
-
271
- **Choose your analysis type and get instant feedback on what the camera sees!**
272
- """)
273
-
274
- with gr.Row():
275
- with gr.Column():
276
- # Analiz türü / Analysis type
277
- analysis_type = gr.Radio(
278
- choices=[
279
- ("🎯 Basic Posture", "basic"),
280
- ("🎯 Enhanced Posture", "enhanced"),
281
- ("🤚 Hand Tracking", "hand")
282
- ],
283
- value="basic",
284
- label="Analysis Type"
285
- )
286
-
287
- # Profil bilgileri / Profile info
288
- gr.Markdown("### 👤 Optional Profile (for Enhanced mode)")
289
- age_input = gr.Number(label="Age", minimum=10, maximum=100, value=None)
290
- height_input = gr.Number(label="Height (cm)", minimum=100, maximum=250, value=None)
291
- weight_input = gr.Number(label="Weight (kg)", minimum=30, maximum=200, value=None)
292
-
293
- # Kamera / Camera
294
- input_image = gr.Image(sources=["webcam"], streaming=True, label="📹 Camera")
295
-
296
- # Analiz modu seçimi / Analysis mode selection
297
- realtime_mode = gr.Checkbox(label="🔄 Real-time Analysis / Gerçek Zamanlı Analiz", value=True)
298
-
299
- # Buton (sadece real-time kapalıysa) / Button (only when real-time is off)
300
- analyze_btn = gr.Button("🔍 Analyze", variant="primary", visible=False)
301
-
302
- with gr.Column():
303
- # Çıktılar / Outputs
304
- output_image = gr.Image(label="🎯 Analysis Result")
305
- feedback_text = gr.Textbox(
306
- label="📊 Detailed Feedback",
307
- lines=15,
308
- interactive=False
309
- )
310
-
311
- # Real-time modu toggle / Real-time mode toggle
312
- def toggle_realtime(realtime_enabled):
313
- return gr.update(visible=not realtime_enabled)
314
-
315
- realtime_mode.change(
316
- fn=toggle_realtime,
317
- inputs=[realtime_mode],
318
- outputs=[analyze_btn]
319
- )
320
-
321
- # Gerçek zamanlı analiz / Real-time analysis
322
- input_image.stream(
323
- fn=process_with_settings,
324
- inputs=[input_image, analysis_type, age_input, height_input, weight_input],
325
- outputs=[output_image, feedback_text],
326
- stream_every=0.5 # Her 0.5 saniyede bir analiz / Analyze every 0.5 seconds
327
- )
328
-
329
- # Manuel analiz butonu / Manual analysis button
330
- analyze_btn.click(
331
- fn=process_with_settings,
332
- inputs=[input_image, analysis_type, age_input, height_input, weight_input],
333
- outputs=[output_image, feedback_text]
334
- )
335
-
336
-
337
- # Kullanım talimatları / Usage instructions
338
- gr.Markdown("""
339
- ## 📋 How to Use
340
-
341
- ### 🎯 **Analysis Types:**
342
- - **Basic Posture**: Body parts, joint angles, alignment
343
- - **Enhanced Posture**: Basic + age/BMI insights
344
- - **Hand Tracking**: Hand detection and finger counting
345
-
346
- ### � **Analysis Modes:**
347
- - **Real-time**: Continuous analysis (default) - automatic feedback every 0.5 seconds
348
- - **Manual**: Click "Analyze" button for single analysis
349
-
350
- ### �📝 **Instructions:**
351
- 1. Choose analysis type
352
- 2. Allow camera access when prompted
353
- 3. Position yourself 2-3 meters from camera
354
- 4. **Real-time mode**: Get continuous feedback automatically
355
- 5. **Manual mode**: Uncheck real-time and click Analyze button
356
- 6. For Enhanced: Enter age/height/weight for personalized insights
357
-
358
- ### 🎯 **Feedback Symbols:**
359
- - ✅ Good alignment | ⚠️ Issues detected | 📐 Joint angles | 🔍 Position info
360
- """)
361
-
362
- return demo
363
-
364
- demo = create_interface()
365
 
366
  if __name__ == "__main__":
367
  demo.launch()
 
1
+ # Pose-Think: AI-Powered Movement Analysis Suite - Gradio 4.8.0 Compatible
 
2
  import cv2
3
  import mediapipe as mp
4
  import gradio as gr
5
  import numpy as np
6
 
7
+ # MediaPipe initialization
8
  mp_pose = mp.solutions.pose
9
  mp_hands = mp.solutions.hands
10
  mp_drawing = mp.solutions.drawing_utils
11
 
12
+ def analyze_posture(image, analysis_type="basic", age=None, height=None, weight=None):
13
+ """Main analysis function compatible with Gradio 4.8.0"""
14
+ if image is None:
15
+ return None, "❌ No image / Görüntü yok"
16
 
17
+ # Convert BGR to RGB
18
+ rgb_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
19
+ output_image = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2BGR)
20
+
21
+ feedback = []
22
+
23
+ # Profile info for enhanced mode
24
+ if analysis_type == "enhanced" and (age or height or weight):
25
+ profile_info = []
26
+ if age:
27
+ profile_info.append(f"Age: {age}")
28
+ if height and weight:
29
+ bmi = weight / ((height/100) ** 2)
30
+ profile_info.append(f"BMI: {bmi:.1f}")
31
+ if bmi > 25:
32
+ feedback.append("⚠️ BMI high - extra load on posture")
33
 
34
+ if profile_info:
35
+ feedback.append(f"👤 Profile: {' | '.join(profile_info)}")
36
+ feedback.append("")
37
+
38
+ if analysis_type == "hand":
39
+ # Hand analysis
40
+ with mp_hands.Hands(
41
  static_image_mode=False,
42
  max_num_hands=2,
43
  min_detection_confidence=0.5,
44
  min_tracking_confidence=0.5
45
+ ) as hands:
46
+ results = hands.process(rgb_image)
 
 
 
 
 
 
47
 
48
+ if results.multi_hand_landmarks:
49
+ hand_count = len(results.multi_hand_landmarks)
50
+ feedback.append(f"✅ {hand_count} hands detected")
 
 
51
 
52
+ for idx, hand_landmarks in enumerate(results.multi_hand_landmarks):
53
+ mp_drawing.draw_landmarks(output_image, hand_landmarks, mp_hands.HAND_CONNECTIONS)
54
+
55
+ # Simple finger counting
56
+ landmarks = hand_landmarks.landmark
57
+ fingers_up = 0
58
+ tip_ids = [4, 8, 12, 16, 20]
59
+ pip_ids = [3, 6, 10, 14, 18]
60
+
61
+ for i in range(5):
62
+ if landmarks[tip_ids[i]].y < landmarks[pip_ids[i]].y:
63
+ fingers_up += 1
64
+
65
+ feedback.append(f"🖐️ Hand {idx+1}: {fingers_up} fingers up")
66
+ else:
67
+ feedback.append("❌ No hands detected")
68
+ feedback.append("🖐️ Show your hands to the camera")
69
 
70
+ else:
71
+ # Posture analysis
72
+ with mp_pose.Pose(
73
+ static_image_mode=False,
74
+ model_complexity=1,
75
+ enable_segmentation=False,
76
+ min_detection_confidence=0.5,
77
+ min_tracking_confidence=0.5
78
+ ) as pose:
79
+ results = pose.process(rgb_image)
80
 
81
+ if results.pose_landmarks:
82
+ mp_drawing.draw_landmarks(output_image, results.pose_landmarks, mp_pose.POSE_CONNECTIONS)
 
 
83
 
84
+ landmarks = results.pose_landmarks.landmark
85
+ visible_parts = []
86
 
87
+ # Check visible parts
88
+ if landmarks[mp_pose.PoseLandmark.NOSE.value].visibility > 0.5:
89
+ visible_parts.append("Head")
90
 
91
+ # Shoulders
92
+ left_shoulder = landmarks[mp_pose.PoseLandmark.LEFT_SHOULDER.value]
93
+ right_shoulder = landmarks[mp_pose.PoseLandmark.RIGHT_SHOULDER.value]
94
 
95
+ if left_shoulder.visibility > 0.5 and right_shoulder.visibility > 0.5:
96
+ visible_parts.append("Shoulders")
97
+
98
+ # Shoulder level check
99
+ shoulder_diff = abs(left_shoulder.y - right_shoulder.y)
100
+ if shoulder_diff > 0.05:
101
+ if left_shoulder.y < right_shoulder.y:
102
+ feedback.append("⚠️ Left shoulder higher")
103
+ else:
104
+ feedback.append("⚠️ Right shoulder higher")
105
+ else:
106
+ feedback.append("✅ Shoulders level")
107
 
108
+ # Elbows and angles
109
+ left_elbow = landmarks[mp_pose.PoseLandmark.LEFT_ELBOW.value]
110
+ right_elbow = landmarks[mp_pose.PoseLandmark.RIGHT_ELBOW.value]
111
 
112
+ if left_elbow.visibility > 0.5 and right_elbow.visibility > 0.5:
113
+ visible_parts.append("Elbows")
114
+
115
+ # Calculate elbow angles
116
+ try:
117
+ def calculate_angle(a, b, c):
118
+ a = np.array(a)
119
+ b = np.array(b)
120
+ c = np.array(c)
121
+ radians = np.arctan2(c[1] - b[1], c[0] - b[0]) - np.arctan2(a[1] - b[1], a[0] - b[0])
122
+ angle = np.abs(radians * 180.0 / np.pi)
123
+ if angle > 180.0:
124
+ angle = 360 - angle
125
+ return angle
126
+
127
+ # Left elbow angle
128
+ left_shoulder_pos = [left_shoulder.x, left_shoulder.y]
129
+ left_elbow_pos = [left_elbow.x, left_elbow.y]
130
+ left_wrist_pos = [landmarks[mp_pose.PoseLandmark.LEFT_WRIST.value].x,
131
+ landmarks[mp_pose.PoseLandmark.LEFT_WRIST.value].y]
132
+
133
+ left_angle = calculate_angle(left_shoulder_pos, left_elbow_pos, left_wrist_pos)
134
+ feedback.append(f"📐 Left elbow: {left_angle:.1f}°")
135
+
136
+ # Right elbow angle
137
+ right_shoulder_pos = [right_shoulder.x, right_shoulder.y]
138
+ right_elbow_pos = [right_elbow.x, right_elbow.y]
139
+ right_wrist_pos = [landmarks[mp_pose.PoseLandmark.RIGHT_WRIST.value].x,
140
+ landmarks[mp_pose.PoseLandmark.RIGHT_WRIST.value].y]
141
+
142
+ right_angle = calculate_angle(right_shoulder_pos, right_elbow_pos, right_wrist_pos)
143
+ feedback.append(f"📐 Right elbow: {right_angle:.1f}°")
144
+
145
+ except:
146
+ feedback.append("⚠️ Cannot calculate elbow angles")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
147
 
148
+ # Hips
149
+ left_hip = landmarks[mp_pose.PoseLandmark.LEFT_HIP.value]
150
+ right_hip = landmarks[mp_pose.PoseLandmark.RIGHT_HIP.value]
 
 
 
 
 
 
 
 
 
 
 
 
 
151
 
152
+ if left_hip.visibility > 0.5 and right_hip.visibility > 0.5:
153
+ visible_parts.append("Hips")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
154
 
155
+ hip_diff = abs(left_hip.y - right_hip.y)
156
+ if hip_diff > 0.03:
157
+ if left_hip.y < right_hip.y:
158
+ feedback.append("⚠️ Left hip higher")
159
+ else:
160
+ feedback.append("⚠️ Right hip higher")
161
+ else:
162
+ feedback.append("✅ Hips level")
 
 
 
 
 
163
 
164
+ # Neck position
165
+ nose = landmarks[mp_pose.PoseLandmark.NOSE.value]
166
+ if nose.visibility > 0.5:
167
+ shoulder_center_x = (left_shoulder.x + right_shoulder.x) / 2
168
+ head_offset = abs(nose.x - shoulder_center_x)
169
+
170
+ if head_offset > 0.08:
171
+ if nose.x < shoulder_center_x:
172
+ feedback.append("🔍 Neck tilted left")
173
+ else:
174
+ feedback.append("🔍 Neck tilted right")
175
  else:
176
+ feedback.append("🔍 Neck centered")
 
 
 
 
 
 
 
 
177
 
178
+ # Age-specific recommendations for enhanced mode
179
+ if analysis_type == "enhanced" and age:
180
+ feedback.append("")
181
+ feedback.append("🎯 Age-Specific Recommendations:")
182
+ if age < 25:
183
+ feedback.append("💡 Young age: Form good posture habits now")
184
+ elif age < 45:
185
+ feedback.append("💡 Middle age: Regular exercise important")
186
  else:
187
+ feedback.append("💡 Mature age: Focus on bone health")
188
+
189
+ # List visible parts
190
+ if visible_parts:
191
+ feedback.insert(0, f"✅ Visible: {', '.join(visible_parts)}")
192
+ feedback.insert(1, "")
193
+ else:
194
+ feedback.append("❌ Body not detected")
195
+ feedback.append("📍 Stand so full body is visible to camera")
196
+
197
+ return output_image, "\n".join(feedback)
198
+
199
+ # Simple Gradio Interface compatible with 4.8.0
200
+ with gr.Blocks(title="🎯 Pose-Think: AI Movement Analysis") as demo:
201
+
202
+ gr.Markdown("""
203
+ # 🎯 Pose-Think: AI-Powered Movement Analysis Suite
204
+ ## Real-time posture and movement analysis with multiple modes
205
+
206
+ **Choose your analysis type and get instant feedback on what the camera sees!**
207
+ """)
208
+
209
+ with gr.Row():
210
+ with gr.Column():
211
+ # Analysis type selection
212
+ analysis_type = gr.Radio(
213
+ choices=[
214
+ ("🎯 Basic Posture", "basic"),
215
+ ("🎯 Enhanced Posture", "enhanced"),
216
+ ("🤚 Hand Tracking", "hand")
217
+ ],
218
+ value="basic",
219
+ label="Analysis Type"
220
+ )
221
+
222
+ # Profile info (for enhanced mode)
223
+ gr.Markdown("### 👤 Optional Profile (for Enhanced mode)")
224
+ age_input = gr.Number(label="Age", minimum=10, maximum=100, value=None)
225
+ height_input = gr.Number(label="Height (cm)", minimum=100, maximum=250, value=None)
226
+ weight_input = gr.Number(label="Weight (kg)", minimum=30, maximum=200, value=None)
227
 
228
+ # Camera input
229
+ input_image = gr.Image(sources=["webcam"], label="📹 Camera")
 
 
 
 
 
 
 
 
230
 
231
+ # Analysis button
232
+ analyze_btn = gr.Button("🔍 Analyze", variant="primary", size="lg")
 
 
 
 
 
233
 
234
+ with gr.Column():
235
+ # Outputs
236
+ output_image = gr.Image(label="🎯 Analysis Result")
237
+ feedback_text = gr.Textbox(
238
+ label="📊 Detailed Feedback",
239
+ lines=15,
240
+ interactive=False
241
+ )
242
+
243
+ # Analysis function
244
+ analyze_btn.click(
245
+ fn=analyze_posture,
246
+ inputs=[input_image, analysis_type, age_input, height_input, weight_input],
247
+ outputs=[output_image, feedback_text]
248
+ )
249
+
250
+ # Usage instructions
251
+ gr.Markdown("""
252
+ ## 📋 How to Use
253
+
254
+ ### 🎯 **Analysis Types:**
255
+ - **Basic Posture**: Body parts, joint angles, alignment
256
+ - **Enhanced Posture**: Basic + age/BMI insights + personalized recommendations
257
+ - **Hand Tracking**: Hand detection and finger counting
258
+
259
+ ### 📝 **Instructions:**
260
+ 1. **Choose analysis type** from the radio buttons
261
+ 2. **Allow camera access** when prompted by your browser
262
+ 3. **Position yourself** 2-3 meters from camera (full body visible for posture)
263
+ 4. **For Enhanced mode**: Optionally enter age/height/weight for personalized insights
264
+ 5. **Click Analyze** to get instant detailed feedback
265
+
266
+ ### 🎯 **What you'll see:**
267
+ - **Green checkmarks**: Good alignment/posture
268
+ - ⚠️ **Warning signs**: Issues detected that need attention
269
+ - 📐 **Measurements**: Joint angles in degrees
270
+ - 🔍 **Position info**: Head, neck, shoulder positions
271
+ - 👤 **Profile insights**: Age-specific recommendations (Enhanced mode)
272
+
273
+ ### 💡 **Tips for best results:**
274
+ - **Good lighting**: Ensure even, bright lighting
275
+ - **Plain background**: Use contrasting, simple background
276
+ - **Stable position**: Minimize movement during analysis
277
+ - **Full visibility**: Keep target body parts clearly visible
278
+ """)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
279
 
280
  if __name__ == "__main__":
281
  demo.launch()