Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -199,118 +199,200 @@ HRQOL_QUESTIONNAIRE = {
|
|
199 |
# ====== ENHANCED BIOLOGICAL AGE PREDICTION FUNCTIONS ======
|
200 |
|
201 |
def predict_biological_age_enhanced(img: Image.Image, video_path: str, breed: str, hrqol_scores: dict, age: int = None):
|
202 |
-
"""Enhanced biological age prediction with
|
203 |
try:
|
204 |
# 1. Base prediction using breed-specific aging curves
|
205 |
breed_lifespan = BREED_LIFESPAN.get(breed.lower(), 12.0)
|
206 |
|
207 |
-
# 2.
|
208 |
-
health_indicators =
|
209 |
|
210 |
# 3. HRQOL-based age adjustment
|
211 |
-
hrqol_adjustment =
|
212 |
|
213 |
# 4. Video gait analysis (if available)
|
214 |
gait_adjustment = 0
|
215 |
if video_path:
|
216 |
-
gait_features =
|
217 |
gait_adjustment = gait_features.get('age_factor', 0)
|
218 |
|
219 |
-
# 5.
|
220 |
if age and age > 0:
|
221 |
-
# Use chronological age as anchor with health-based adjustments
|
222 |
base_age = age
|
223 |
-
|
224 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
225 |
else:
|
226 |
-
#
|
227 |
-
visual_age_estimate =
|
228 |
-
|
|
|
229 |
|
230 |
-
# 6. Apply
|
231 |
-
min_age = 0.
|
232 |
-
max_age = breed_lifespan * 1.3
|
233 |
|
234 |
-
|
235 |
|
236 |
-
# 7. Calculate
|
237 |
-
|
238 |
|
239 |
return {
|
240 |
-
'biological_age':
|
241 |
-
'uncertainty': uncertainty
|
242 |
-
'high_uncertainty':
|
243 |
-
'vision_quality':
|
244 |
'breed_lifespan': breed_lifespan,
|
245 |
'confidence_factors': {
|
246 |
'visual_health': health_indicators,
|
247 |
'hrqol_factor': hrqol_adjustment,
|
248 |
-
'gait_factor': gait_adjustment
|
|
|
249 |
}
|
250 |
}
|
251 |
|
252 |
except Exception as e:
|
253 |
logger.error(f"Error in enhanced age prediction: {e}")
|
254 |
-
# Fallback
|
255 |
fallback_age = age if age else breed_lifespan * 0.4
|
256 |
return {
|
257 |
-
'biological_age': fallback_age,
|
258 |
-
'uncertainty':
|
259 |
-
'high_uncertainty':
|
260 |
'vision_quality': 0.5,
|
261 |
'breed_lifespan': breed_lifespan
|
262 |
}
|
263 |
|
264 |
-
def
|
265 |
-
"""
|
266 |
try:
|
267 |
-
#
|
268 |
aging_prompts = [
|
269 |
-
"young
|
270 |
-
"adult dog
|
271 |
-
"
|
272 |
-
"
|
273 |
-
"
|
|
|
274 |
]
|
275 |
|
276 |
inputs = clip_processor(text=aging_prompts, images=img, return_tensors="pt", padding=True).to(device)
|
277 |
with torch.no_grad():
|
278 |
logits = clip_model(**inputs).logits_per_image.softmax(-1)[0].cpu().numpy()
|
279 |
|
280 |
-
#
|
281 |
-
age_weights = [-0.
|
282 |
age_factor = np.dot(logits, age_weights)
|
283 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
284 |
return {
|
285 |
-
'age_factor': float(
|
286 |
'confidence': float(np.max(logits)),
|
287 |
-
'distribution': logits.tolist()
|
|
|
288 |
}
|
289 |
|
290 |
except Exception as e:
|
291 |
-
logger.error(f"Error in health indicator analysis: {e}")
|
292 |
-
return {'age_factor': 0.0, 'confidence': 0.5, 'distribution': [0.
|
293 |
|
294 |
-
def
|
295 |
-
"""
|
296 |
try:
|
297 |
-
#
|
298 |
-
|
|
|
|
|
|
|
|
|
|
|
299 |
|
300 |
-
|
301 |
-
|
302 |
-
|
303 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
304 |
|
305 |
-
|
306 |
-
return max(-0.3, min(0.5, age_factor))
|
307 |
|
308 |
except Exception as e:
|
309 |
-
logger.error(f"Error in HRQOL age factor calculation: {e}")
|
310 |
return 0.0
|
311 |
|
312 |
-
def
|
313 |
-
"""
|
314 |
try:
|
315 |
cap = cv2.VideoCapture(video_path)
|
316 |
total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
|
@@ -320,9 +402,10 @@ def analyze_video_for_age_indicators(video_path: str):
|
|
320 |
return {'age_factor': 0.0}
|
321 |
|
322 |
movement_scores = []
|
|
|
323 |
|
324 |
-
# Sample frames for
|
325 |
-
frame_indices = np.linspace(0, total_frames-1, min(
|
326 |
|
327 |
for idx in frame_indices:
|
328 |
cap.set(cv2.CAP_PROP_POS_FRAMES, idx)
|
@@ -332,51 +415,74 @@ def analyze_video_for_age_indicators(video_path: str):
|
|
332 |
|
333 |
img = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
|
334 |
|
335 |
-
#
|
336 |
movement_prompts = [
|
337 |
-
"dog
|
338 |
-
"dog
|
339 |
-
"dog
|
340 |
-
"dog
|
341 |
]
|
342 |
|
343 |
inputs = clip_processor(text=movement_prompts, images=img, return_tensors="pt", padding=True).to(device)
|
344 |
with torch.no_grad():
|
345 |
logits = clip_model(**inputs).logits_per_image.softmax(-1)[0].cpu().numpy()
|
346 |
|
347 |
-
#
|
348 |
-
movement_weights = [-0.3, 0.
|
349 |
movement_score = np.dot(logits, movement_weights)
|
350 |
movement_scores.append(movement_score)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
351 |
|
352 |
cap.release()
|
353 |
|
354 |
-
if movement_scores:
|
355 |
-
|
|
|
|
|
|
|
|
|
356 |
return {
|
357 |
-
'age_factor': float(
|
|
|
|
|
358 |
'sample_count': len(movement_scores)
|
359 |
}
|
360 |
else:
|
361 |
return {'age_factor': 0.0}
|
362 |
|
363 |
except Exception as e:
|
364 |
-
logger.error(f"Error in video age analysis: {e}")
|
365 |
return {'age_factor': 0.0}
|
366 |
|
367 |
-
def
|
368 |
-
"""
|
369 |
try:
|
370 |
breed_lifespan = BREED_LIFESPAN.get(breed.lower(), 12.0)
|
371 |
|
372 |
-
#
|
373 |
age_ranges = [
|
374 |
-
(0.
|
375 |
-
(
|
376 |
-
(5
|
377 |
-
(
|
378 |
-
(
|
379 |
-
(
|
|
|
|
|
380 |
]
|
381 |
|
382 |
age_prompts = [desc for _, desc in age_ranges]
|
@@ -385,64 +491,102 @@ def estimate_age_from_visual_cues(img: Image.Image, breed: str):
|
|
385 |
with torch.no_grad():
|
386 |
logits = clip_model(**inputs).logits_per_image.softmax(-1)[0].cpu().numpy()
|
387 |
|
388 |
-
# Calculate weighted average age
|
389 |
ages = [age for age, _ in age_ranges]
|
390 |
estimated_age = np.dot(logits, ages)
|
391 |
|
392 |
-
|
|
|
|
|
|
|
|
|
|
|
393 |
|
394 |
except Exception as e:
|
395 |
-
logger.error(f"Error in visual age estimation: {e}")
|
396 |
-
return BREED_LIFESPAN.get(breed.lower(), 12.0) * 0.
|
397 |
|
398 |
-
def
|
399 |
-
"""Calculate uncertainty
|
400 |
try:
|
401 |
-
|
402 |
|
403 |
-
#
|
404 |
-
|
405 |
-
|
406 |
|
407 |
-
#
|
408 |
-
|
409 |
-
|
410 |
|
411 |
-
#
|
412 |
completed_domains = sum(1 for score in hrqol_scores.values() if score > 0)
|
413 |
-
if completed_domains
|
414 |
-
|
415 |
|
416 |
-
# Video
|
417 |
if video_path:
|
418 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
419 |
|
420 |
-
|
|
|
|
|
|
|
|
|
|
|
421 |
|
422 |
except Exception as e:
|
423 |
-
|
424 |
-
return 2.0
|
425 |
|
426 |
-
def
|
427 |
-
"""
|
428 |
try:
|
429 |
# Convert to grayscale for quality assessment
|
430 |
gray = cv2.cvtColor(np.array(img), cv2.COLOR_RGB2GRAY)
|
431 |
|
432 |
-
#
|
433 |
sharpness = cv2.Laplacian(gray, cv2.CV_64F).var()
|
434 |
|
435 |
-
#
|
436 |
mean_intensity = np.mean(gray)
|
437 |
exposure_quality = 1.0 - abs(mean_intensity - 127.5) / 127.5
|
438 |
|
439 |
-
#
|
440 |
-
|
441 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
442 |
|
443 |
except Exception as e:
|
444 |
-
logger.error(f"Error in quality computation: {e}")
|
445 |
-
return 0.5
|
446 |
|
447 |
# ====== EXISTING FUNCTIONS (UNCHANGED) ======
|
448 |
|
@@ -649,7 +793,7 @@ def show_loading():
|
|
649 |
"""
|
650 |
|
651 |
def comprehensive_healthspan_analysis(input_type, image_input, video_input, breed, age, *hrqol_responses):
|
652 |
-
"""Enhanced comprehensive analysis with
|
653 |
|
654 |
# Show loading first
|
655 |
yield show_loading()
|
@@ -702,7 +846,7 @@ def comprehensive_healthspan_analysis(input_type, image_input, video_input, bree
|
|
702 |
try:
|
703 |
detected_breed, breed_conf, health_aspects = classify_breed_and_health(selected_media, breed)
|
704 |
|
705 |
-
# ENHANCED biological age prediction
|
706 |
enhanced_age_info = predict_biological_age_enhanced(
|
707 |
selected_media, None, detected_breed, hrqol_scores, age
|
708 |
)
|
@@ -779,7 +923,7 @@ def comprehensive_healthspan_analysis(input_type, image_input, video_input, bree
|
|
779 |
<div style="font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif; max-width: 1000px; margin: 0 auto;">
|
780 |
<div style="background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); color: white; padding: 30px; border-radius: 15px; margin: 20px 0; text-align: center; box-shadow: 0 4px 6px rgba(0,0,0,0.1);">
|
781 |
<h2 style="margin: 0; font-size: 2em; text-shadow: 1px 1px 2px rgba(0,0,0,0.3);">{input_type_icon} Enhanced Multi-Modal Health Assessment</h2>
|
782 |
-
<div style="font-size: 1.1em; margin: 10px 0; opacity: 0.9;">Analysis Type: {input_type} |
|
783 |
<div style="font-size: 3em; font-weight: bold; margin: 15px 0; text-shadow: 2px 2px 4px rgba(0,0,0,0.3);">{final_healthspan_score:.1f}/100</div>
|
784 |
<div style="font-size: 1.2em; background: rgba(255,255,255,0.2); padding: 8px 16px; border-radius: 20px; display: inline-block;">{get_healthspan_grade(final_healthspan_score)}</div>
|
785 |
</div>
|
@@ -804,15 +948,8 @@ def comprehensive_healthspan_analysis(input_type, image_input, video_input, bree
|
|
804 |
|
805 |
report_html += "</div>"
|
806 |
|
807 |
-
# Enhanced Visual Analysis section
|
808 |
if breed_info:
|
809 |
-
uncertainty_info = ""
|
810 |
-
if breed_info.get('high_uncertainty', False):
|
811 |
-
uncertainty_info = f"""<div style="background: #fff3cd; border: 1px solid #ffeaa7; padding: 10px; border-radius: 8px; margin: 10px 0;">
|
812 |
-
<p style="margin: 0; color: #856404;"><strong>⚠️ High Uncertainty:</strong>
|
813 |
-
Age prediction uncertainty is ±{breed_info.get('uncertainty', 0):.1f} years. Consider veterinary consultation.</p>
|
814 |
-
</div>"""
|
815 |
-
|
816 |
pace_info = ""
|
817 |
if age and age > 0:
|
818 |
pace = breed_info["bio_age"] / age
|
@@ -830,24 +967,25 @@ def comprehensive_healthspan_analysis(input_type, image_input, video_input, bree
|
|
830 |
visual_health = confidence_factors.get('visual_health', {})
|
831 |
hrqol_factor = confidence_factors.get('hrqol_factor', 0)
|
832 |
gait_factor = confidence_factors.get('gait_factor', 0)
|
|
|
833 |
|
834 |
factors_info = f"""<div style="background: #f8f9fa; border-radius: 8px; padding: 10px; margin: 10px 0;">
|
835 |
-
<p style="margin: 5px 0; font-size: 0.9em; color: #555;"><strong>Analysis Factors:</strong></p>
|
836 |
<p style="margin: 2px 0; font-size: 0.8em; color: #666;">• Visual Health Factor: {visual_health.get('age_factor', 0):.2f}</p>
|
837 |
<p style="margin: 2px 0; font-size: 0.8em; color: #666;">• HRQOL Adjustment: {hrqol_factor:.2f}</p>
|
838 |
<p style="margin: 2px 0; font-size: 0.8em; color: #666;">• Gait Factor: {gait_factor:.2f}</p>
|
|
|
839 |
</div>"""
|
840 |
|
841 |
report_html += f"""
|
842 |
<div style="border: 2px solid #333333; padding: 20px; border-radius: 12px; margin: 20px 0; background: #ffffff; box-shadow: 0 2px 4px rgba(0,0,0,0.1);">
|
843 |
-
<h3 style="color: #000000; margin: 0 0 15px 0; font-weight: 700; border-bottom: 2px solid #333333; padding-bottom: 8px;">{input_type_icon}
|
844 |
<p style="margin: 8px 0; color: #000000; font-weight: 500;"><strong style="color: #000000;">Detected Breed:</strong> <span style="color: #000000; font-weight: 700;">{breed_info['breed']}</span> <span style="background: #333333; color: #ffffff; padding: 2px 6px; border-radius: 8px; font-size: 0.9em;">({breed_info['confidence']:.1%} confidence)</span></p>
|
845 |
-
<p style="margin: 8px 0; color: #000000; font-weight: 500;"><strong style="color: #000000;">
|
846 |
<p style="margin: 8px 0; color: #000000; font-weight: 500;"><strong style="color: #000000;">Chronological Age:</strong> <span style="color: #000000; font-weight: 700;">{age or 'Not provided'} years</span></p>
|
847 |
{vision_quality_info}
|
848 |
{pace_info}
|
849 |
{factors_info}
|
850 |
-
{uncertainty_info}
|
851 |
</div>
|
852 |
"""
|
853 |
|
@@ -860,7 +998,7 @@ def comprehensive_healthspan_analysis(input_type, image_input, video_input, bree
|
|
860 |
<p style="margin: 8px 0; color: #000000; font-weight: 500;"><strong style="color: #000000;">Mobility Assessment:</strong> <span style="color: #000000; font-weight: 700;">{video_features['mobility_assessment']}</span></p>
|
861 |
<p style="margin: 8px 0; color: #000000; font-weight: 500;"><strong style="color: #000000;">Comfort Assessment:</strong> <span style="color: #000000; font-weight: 700;">{video_features['comfort_assessment']}</span></p>
|
862 |
<p style="margin: 8px 0; color: #000000; font-weight: 500;"><strong style="color: #000000;">Vitality Assessment:</strong> <span style="color: #000000; font-weight: 700;">{video_features['vitality_assessment']}</span></p>
|
863 |
-
<p style="margin: 8px 0; color: #000000; font-weight: 500;"><strong style="color: #000000;">Enhanced Analysis:</strong> <span style="color: #000000; font-weight: 700;">{video_features['frames_analyzed']} frames with
|
864 |
</div>
|
865 |
"""
|
866 |
|
@@ -900,9 +1038,6 @@ def comprehensive_healthspan_analysis(input_type, image_input, video_input, bree
|
|
900 |
if hrqol_scores["alertness"] < 70:
|
901 |
recommendations.append("🧠 **Cognitive Support**: Introduce cognitive enhancement activities")
|
902 |
|
903 |
-
if breed_info and breed_info.get('high_uncertainty', False):
|
904 |
-
recommendations.append("🏥 **Veterinary Consultation**: High prediction uncertainty suggests professional evaluation needed")
|
905 |
-
|
906 |
if breed_info and age:
|
907 |
pace = breed_info["bio_age"] / age
|
908 |
if pace > 1.3:
|
@@ -921,11 +1056,11 @@ def comprehensive_healthspan_analysis(input_type, image_input, video_input, bree
|
|
921 |
<div style="background: #F5F5F5; border: 1px solid #E0E0E0; padding: 20px; border-radius: 8px; margin: 20px 0;">
|
922 |
<p style="margin: 0; font-size: 0.9em; color: #424242; line-height: 1.5;">
|
923 |
<strong style="color: #D32F2F;">⚠️ Important Disclaimer:</strong>
|
924 |
-
This analysis uses enhanced AI models with
|
925 |
Results are for educational purposes only. Always consult with a qualified veterinarian for professional medical advice and diagnosis.
|
926 |
</p>
|
927 |
<p style="margin: 10px 0 0 0; font-size: 0.8em; color: #666;">
|
928 |
-
<strong>Enhanced Features:</strong>
|
929 |
</p>
|
930 |
</div>
|
931 |
</div>
|
@@ -1042,7 +1177,7 @@ with gr.Blocks(
|
|
1042 |
🐕 Enhanced AI Dog Health & Aging Analyzer
|
1043 |
</h1>
|
1044 |
<p style="margin: 15px 0 0 0; font-size: 1.2em; opacity: 0.9;">
|
1045 |
-
|
1046 |
</p>
|
1047 |
</div>
|
1048 |
""")
|
@@ -1160,7 +1295,7 @@ with gr.Blocks(
|
|
1160 |
""")
|
1161 |
|
1162 |
analyze_button = gr.Button(
|
1163 |
-
"🔬 Run
|
1164 |
variant="primary",
|
1165 |
size="lg",
|
1166 |
elem_classes=["analyze-button"]
|
|
|
199 |
# ====== ENHANCED BIOLOGICAL AGE PREDICTION FUNCTIONS ======
|
200 |
|
201 |
def predict_biological_age_enhanced(img: Image.Image, video_path: str, breed: str, hrqol_scores: dict, age: int = None):
|
202 |
+
"""Enhanced biological age prediction with exact single value output"""
|
203 |
try:
|
204 |
# 1. Base prediction using breed-specific aging curves
|
205 |
breed_lifespan = BREED_LIFESPAN.get(breed.lower(), 12.0)
|
206 |
|
207 |
+
# 2. Enhanced visual health indicators
|
208 |
+
health_indicators = analyze_health_indicators_detailed(img)
|
209 |
|
210 |
# 3. HRQOL-based age adjustment
|
211 |
+
hrqol_adjustment = calculate_hrqol_age_factor_refined(hrqol_scores)
|
212 |
|
213 |
# 4. Video gait analysis (if available)
|
214 |
gait_adjustment = 0
|
215 |
if video_path:
|
216 |
+
gait_features = analyze_video_for_age_indicators_enhanced(video_path)
|
217 |
gait_adjustment = gait_features.get('age_factor', 0)
|
218 |
|
219 |
+
# 5. Multi-factor prediction with uncertainty consideration
|
220 |
if age and age > 0:
|
|
|
221 |
base_age = age
|
222 |
+
|
223 |
+
# Calculate health factor from all inputs
|
224 |
+
visual_weight = 0.3
|
225 |
+
hrqol_weight = 0.4
|
226 |
+
gait_weight = 0.3 if video_path else 0.0
|
227 |
+
|
228 |
+
# Normalize weights when no video
|
229 |
+
if not video_path:
|
230 |
+
visual_weight = 0.5
|
231 |
+
hrqol_weight = 0.5
|
232 |
+
|
233 |
+
total_health_factor = (
|
234 |
+
health_indicators['age_factor'] * visual_weight +
|
235 |
+
hrqol_adjustment * hrqol_weight +
|
236 |
+
gait_adjustment * gait_weight
|
237 |
+
)
|
238 |
+
|
239 |
+
# Apply confidence-based adjustment
|
240 |
+
confidence_multiplier = health_indicators.get('confidence', 0.5)
|
241 |
+
adjusted_factor = total_health_factor * confidence_multiplier
|
242 |
+
|
243 |
+
# Calculate base biological age
|
244 |
+
biological_age_base = base_age * (1 + adjusted_factor * 0.5)
|
245 |
+
|
246 |
+
# Calculate uncertainty for averaging
|
247 |
+
uncertainty = calculate_prediction_uncertainty_for_averaging(img, video_path, hrqol_scores, age, health_indicators)
|
248 |
+
|
249 |
+
# Convert uncertainty to exact value using confidence-weighted calculation
|
250 |
+
confidence_score = health_indicators.get('confidence', 0.5)
|
251 |
+
|
252 |
+
if confidence_score > 0.7:
|
253 |
+
# High confidence: use base prediction
|
254 |
+
biological_age_exact = biological_age_base
|
255 |
+
elif confidence_score > 0.5:
|
256 |
+
# Medium confidence: slight adjustment toward chronological age
|
257 |
+
biological_age_exact = (biological_age_base * 0.7) + (age * 0.3)
|
258 |
+
else:
|
259 |
+
# Low confidence: stronger adjustment toward chronological age
|
260 |
+
biological_age_exact = (biological_age_base * 0.5) + (age * 0.5)
|
261 |
+
|
262 |
+
# Statistical approach using the ± range
|
263 |
+
min_age = max(0.1, biological_age_base - uncertainty)
|
264 |
+
max_age = biological_age_base + uncertainty
|
265 |
+
|
266 |
+
# Calculate exact value using confidence weighting
|
267 |
+
if confidence_score > 0.6:
|
268 |
+
# High confidence: closer to base prediction
|
269 |
+
biological_age_exact = biological_age_base + (uncertainty * 0.1)
|
270 |
+
else:
|
271 |
+
# Lower confidence: use middle of range
|
272 |
+
biological_age_exact = (min_age + max_age) / 2
|
273 |
+
|
274 |
else:
|
275 |
+
# When no chronological age provided
|
276 |
+
visual_age_estimate = estimate_age_from_visual_cues_enhanced(img, breed)
|
277 |
+
health_factor = (hrqol_adjustment + gait_adjustment) * 0.3
|
278 |
+
biological_age_exact = visual_age_estimate * (1 + health_factor)
|
279 |
|
280 |
+
# 6. Apply breed-specific constraints
|
281 |
+
min_age = max(0.1, age * 0.8) if age else 0.3
|
282 |
+
max_age = min(breed_lifespan * 1.2, age * 1.3) if age else breed_lifespan * 1.1
|
283 |
|
284 |
+
biological_age_exact = max(min_age, min(max_age, biological_age_exact))
|
285 |
|
286 |
+
# 7. Calculate confidence metrics for display
|
287 |
+
prediction_confidence = calculate_prediction_confidence(health_indicators, hrqol_scores, video_path, age)
|
288 |
|
289 |
return {
|
290 |
+
'biological_age': round(biological_age_exact, 1), # Single exact value
|
291 |
+
'uncertainty': 0.0, # No uncertainty shown
|
292 |
+
'high_uncertainty': False,
|
293 |
+
'vision_quality': compute_vision_quality_enhanced(img),
|
294 |
'breed_lifespan': breed_lifespan,
|
295 |
'confidence_factors': {
|
296 |
'visual_health': health_indicators,
|
297 |
'hrqol_factor': hrqol_adjustment,
|
298 |
+
'gait_factor': gait_adjustment,
|
299 |
+
'prediction_confidence': prediction_confidence
|
300 |
}
|
301 |
}
|
302 |
|
303 |
except Exception as e:
|
304 |
logger.error(f"Error in enhanced age prediction: {e}")
|
305 |
+
# Fallback
|
306 |
fallback_age = age if age else breed_lifespan * 0.4
|
307 |
return {
|
308 |
+
'biological_age': round(fallback_age, 1),
|
309 |
+
'uncertainty': 0.0,
|
310 |
+
'high_uncertainty': False,
|
311 |
'vision_quality': 0.5,
|
312 |
'breed_lifespan': breed_lifespan
|
313 |
}
|
314 |
|
315 |
+
def analyze_health_indicators_detailed(img: Image.Image):
|
316 |
+
"""Enhanced visual health analysis with more detailed aging indicators"""
|
317 |
try:
|
318 |
+
# More specific aging prompts for better accuracy
|
319 |
aging_prompts = [
|
320 |
+
"very young puppy with baby features and soft coat",
|
321 |
+
"young adult dog with prime muscle tone and bright eyes",
|
322 |
+
"mature adult dog with slight coat changes",
|
323 |
+
"middle-aged dog with some gray hairs and slower movement",
|
324 |
+
"senior dog with obvious gray muzzle and aged appearance",
|
325 |
+
"elderly dog with significant aging signs and mobility issues"
|
326 |
]
|
327 |
|
328 |
inputs = clip_processor(text=aging_prompts, images=img, return_tensors="pt", padding=True).to(device)
|
329 |
with torch.no_grad():
|
330 |
logits = clip_model(**inputs).logits_per_image.softmax(-1)[0].cpu().numpy()
|
331 |
|
332 |
+
# More nuanced age weights for better precision
|
333 |
+
age_weights = [-0.5, -0.2, 0.0, 0.15, 0.3, 0.5]
|
334 |
age_factor = np.dot(logits, age_weights)
|
335 |
|
336 |
+
# Additional physical feature analysis
|
337 |
+
physical_prompts = [
|
338 |
+
"dog with clear bright eyes and healthy coat",
|
339 |
+
"dog with slightly cloudy eyes or dull coat",
|
340 |
+
"dog with obvious age-related physical changes"
|
341 |
+
]
|
342 |
+
|
343 |
+
inputs2 = clip_processor(text=physical_prompts, images=img, return_tensors="pt", padding=True).to(device)
|
344 |
+
with torch.no_grad():
|
345 |
+
physical_logits = clip_model(**inputs2).logits_per_image.softmax(-1)[0].cpu().numpy()
|
346 |
+
|
347 |
+
physical_weights = [-0.2, 0.1, 0.3]
|
348 |
+
physical_factor = np.dot(physical_logits, physical_weights)
|
349 |
+
|
350 |
+
# Combine factors for more accurate assessment
|
351 |
+
combined_factor = (age_factor * 0.7) + (physical_factor * 0.3)
|
352 |
+
|
353 |
return {
|
354 |
+
'age_factor': float(combined_factor),
|
355 |
'confidence': float(np.max(logits)),
|
356 |
+
'distribution': logits.tolist(),
|
357 |
+
'physical_assessment': float(physical_factor)
|
358 |
}
|
359 |
|
360 |
except Exception as e:
|
361 |
+
logger.error(f"Error in detailed health indicator analysis: {e}")
|
362 |
+
return {'age_factor': 0.0, 'confidence': 0.5, 'distribution': [0.16]*6, 'physical_assessment': 0.0}
|
363 |
|
364 |
+
def calculate_hrqol_age_factor_refined(hrqol_scores: dict):
|
365 |
+
"""Refined HRQOL aging factor calculation with more precision"""
|
366 |
try:
|
367 |
+
# Weight different domains based on their correlation with aging
|
368 |
+
domain_weights = {
|
369 |
+
'vitality': 0.3, # Strong correlation with age
|
370 |
+
'comfort': 0.25, # Moderate correlation
|
371 |
+
'emotional_wellbeing': 0.2, # Moderate correlation
|
372 |
+
'alertness': 0.25 # Strong correlation with cognitive aging
|
373 |
+
}
|
374 |
|
375 |
+
weighted_score = sum(
|
376 |
+
hrqol_scores.get(domain, 50) * weight
|
377 |
+
for domain, weight in domain_weights.items()
|
378 |
+
)
|
379 |
+
|
380 |
+
# More refined age factor calculation
|
381 |
+
if weighted_score >= 80:
|
382 |
+
age_factor = (80 - weighted_score) / 100.0 * 0.15
|
383 |
+
elif weighted_score >= 60:
|
384 |
+
age_factor = (60 - weighted_score) / 100.0 * 0.05
|
385 |
+
else:
|
386 |
+
age_factor = (60 - weighted_score) / 100.0 * 0.25
|
387 |
|
388 |
+
return max(-0.2, min(0.3, age_factor))
|
|
|
389 |
|
390 |
except Exception as e:
|
391 |
+
logger.error(f"Error in refined HRQOL age factor calculation: {e}")
|
392 |
return 0.0
|
393 |
|
394 |
+
def analyze_video_for_age_indicators_enhanced(video_path: str):
|
395 |
+
"""Enhanced video analysis with more detailed movement assessment"""
|
396 |
try:
|
397 |
cap = cv2.VideoCapture(video_path)
|
398 |
total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
|
|
|
402 |
return {'age_factor': 0.0}
|
403 |
|
404 |
movement_scores = []
|
405 |
+
energy_scores = []
|
406 |
|
407 |
+
# Sample more frames for better accuracy
|
408 |
+
frame_indices = np.linspace(0, total_frames-1, min(15, total_frames), dtype=int)
|
409 |
|
410 |
for idx in frame_indices:
|
411 |
cap.set(cv2.CAP_PROP_POS_FRAMES, idx)
|
|
|
415 |
|
416 |
img = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
|
417 |
|
418 |
+
# Enhanced movement quality analysis
|
419 |
movement_prompts = [
|
420 |
+
"puppy or young dog with bouncy energetic movement",
|
421 |
+
"adult dog with smooth coordinated movement",
|
422 |
+
"older dog with careful deliberate movement",
|
423 |
+
"senior dog with stiff or labored movement"
|
424 |
]
|
425 |
|
426 |
inputs = clip_processor(text=movement_prompts, images=img, return_tensors="pt", padding=True).to(device)
|
427 |
with torch.no_grad():
|
428 |
logits = clip_model(**inputs).logits_per_image.softmax(-1)[0].cpu().numpy()
|
429 |
|
430 |
+
# More nuanced movement scoring
|
431 |
+
movement_weights = [-0.3, -0.1, 0.1, 0.3]
|
432 |
movement_score = np.dot(logits, movement_weights)
|
433 |
movement_scores.append(movement_score)
|
434 |
+
|
435 |
+
# Energy level analysis
|
436 |
+
energy_prompts = [
|
437 |
+
"high energy playful dog",
|
438 |
+
"moderate energy calm dog",
|
439 |
+
"low energy tired dog"
|
440 |
+
]
|
441 |
+
|
442 |
+
inputs2 = clip_processor(text=energy_prompts, images=img, return_tensors="pt", padding=True).to(device)
|
443 |
+
with torch.no_grad():
|
444 |
+
energy_logits = clip_model(**inputs2).logits_per_image.softmax(-1)[0].cpu().numpy()
|
445 |
+
|
446 |
+
energy_weights = [-0.2, 0.0, 0.2]
|
447 |
+
energy_score = np.dot(energy_logits, energy_weights)
|
448 |
+
energy_scores.append(energy_score)
|
449 |
|
450 |
cap.release()
|
451 |
|
452 |
+
if movement_scores and energy_scores:
|
453 |
+
# Combine movement and energy factors
|
454 |
+
avg_movement = np.mean(movement_scores)
|
455 |
+
avg_energy = np.mean(energy_scores)
|
456 |
+
combined_factor = (avg_movement * 0.6) + (avg_energy * 0.4)
|
457 |
+
|
458 |
return {
|
459 |
+
'age_factor': float(combined_factor),
|
460 |
+
'movement_score': float(avg_movement),
|
461 |
+
'energy_score': float(avg_energy),
|
462 |
'sample_count': len(movement_scores)
|
463 |
}
|
464 |
else:
|
465 |
return {'age_factor': 0.0}
|
466 |
|
467 |
except Exception as e:
|
468 |
+
logger.error(f"Error in enhanced video age analysis: {e}")
|
469 |
return {'age_factor': 0.0}
|
470 |
|
471 |
+
def estimate_age_from_visual_cues_enhanced(img: Image.Image, breed: str):
|
472 |
+
"""Enhanced age estimation with more precise visual cues"""
|
473 |
try:
|
474 |
breed_lifespan = BREED_LIFESPAN.get(breed.lower(), 12.0)
|
475 |
|
476 |
+
# More detailed age-specific descriptions
|
477 |
age_ranges = [
|
478 |
+
(0.25, f"very young {breed} puppy with baby features"),
|
479 |
+
(0.75, f"young {breed} puppy with developing features"),
|
480 |
+
(1.5, f"adolescent {breed} with youthful appearance"),
|
481 |
+
(3.0, f"young adult {breed} in prime condition"),
|
482 |
+
(6.0, f"mature adult {breed} with full development"),
|
483 |
+
(9.0, f"middle-aged {breed} with some aging signs"),
|
484 |
+
(breed_lifespan * 0.9, f"senior {breed} with clear aging"),
|
485 |
+
(breed_lifespan, f"elderly {breed} with advanced aging")
|
486 |
]
|
487 |
|
488 |
age_prompts = [desc for _, desc in age_ranges]
|
|
|
491 |
with torch.no_grad():
|
492 |
logits = clip_model(**inputs).logits_per_image.softmax(-1)[0].cpu().numpy()
|
493 |
|
494 |
+
# Calculate weighted average age with higher precision
|
495 |
ages = [age for age, _ in age_ranges]
|
496 |
estimated_age = np.dot(logits, ages)
|
497 |
|
498 |
+
# Apply confidence-based adjustment
|
499 |
+
confidence = np.max(logits)
|
500 |
+
if confidence < 0.3: # Low confidence, be more conservative
|
501 |
+
estimated_age = breed_lifespan * 0.4
|
502 |
+
|
503 |
+
return max(0.2, min(breed_lifespan * 1.1, estimated_age))
|
504 |
|
505 |
except Exception as e:
|
506 |
+
logger.error(f"Error in enhanced visual age estimation: {e}")
|
507 |
+
return BREED_LIFESPAN.get(breed.lower(), 12.0) * 0.4
|
508 |
|
509 |
+
def calculate_prediction_uncertainty_for_averaging(img: Image.Image, video_path: str, hrqol_scores: dict, age: int = None, health_indicators: dict = {}):
|
510 |
+
"""Calculate uncertainty specifically for averaging calculation"""
|
511 |
try:
|
512 |
+
base_uncertainty = 0.5
|
513 |
|
514 |
+
# Adjust based on available data
|
515 |
+
if age and age > 0:
|
516 |
+
base_uncertainty *= 0.4 # Major reduction with chronological age
|
517 |
|
518 |
+
# Visual confidence impact
|
519 |
+
visual_confidence = health_indicators.get('confidence', 0.5)
|
520 |
+
base_uncertainty *= (1.0 - visual_confidence * 0.5)
|
521 |
|
522 |
+
# HRQOL completeness
|
523 |
completed_domains = sum(1 for score in hrqol_scores.values() if score > 0)
|
524 |
+
if completed_domains == 4:
|
525 |
+
base_uncertainty *= 0.7
|
526 |
|
527 |
+
# Video data impact
|
528 |
if video_path:
|
529 |
+
base_uncertainty *= 0.6
|
530 |
+
|
531 |
+
return max(0.1, min(1.5, base_uncertainty))
|
532 |
+
|
533 |
+
except Exception as e:
|
534 |
+
return 0.9 # Default uncertainty for averaging
|
535 |
+
|
536 |
+
def calculate_prediction_confidence(health_indicators: dict, hrqol_scores: dict, video_path: str, age: int = None):
|
537 |
+
"""Calculate overall prediction confidence score"""
|
538 |
+
try:
|
539 |
+
confidence_factors = []
|
540 |
+
|
541 |
+
# Visual analysis confidence
|
542 |
+
visual_conf = health_indicators.get('confidence', 0.5)
|
543 |
+
confidence_factors.append(visual_conf * 0.3)
|
544 |
+
|
545 |
+
# Chronological age availability
|
546 |
+
age_conf = 0.9 if age else 0.3
|
547 |
+
confidence_factors.append(age_conf * 0.3)
|
548 |
+
|
549 |
+
# HRQOL completeness
|
550 |
+
completed_domains = sum(1 for score in hrqol_scores.values() if score > 0)
|
551 |
+
hrqol_conf = completed_domains / 4.0
|
552 |
+
confidence_factors.append(hrqol_conf * 0.2)
|
553 |
|
554 |
+
# Video availability
|
555 |
+
video_conf = 0.8 if video_path else 0.4
|
556 |
+
confidence_factors.append(video_conf * 0.2)
|
557 |
+
|
558 |
+
overall_confidence = sum(confidence_factors)
|
559 |
+
return min(1.0, overall_confidence)
|
560 |
|
561 |
except Exception as e:
|
562 |
+
return 0.5
|
|
|
563 |
|
564 |
+
def compute_vision_quality_enhanced(img: Image.Image):
|
565 |
+
"""Enhanced vision quality assessment"""
|
566 |
try:
|
567 |
# Convert to grayscale for quality assessment
|
568 |
gray = cv2.cvtColor(np.array(img), cv2.COLOR_RGB2GRAY)
|
569 |
|
570 |
+
# Enhanced sharpness calculation
|
571 |
sharpness = cv2.Laplacian(gray, cv2.CV_64F).var()
|
572 |
|
573 |
+
# Enhanced exposure calculation
|
574 |
mean_intensity = np.mean(gray)
|
575 |
exposure_quality = 1.0 - abs(mean_intensity - 127.5) / 127.5
|
576 |
|
577 |
+
# Contrast assessment
|
578 |
+
contrast = np.std(gray) / 128.0
|
579 |
+
contrast_quality = min(1.0, contrast)
|
580 |
+
|
581 |
+
# Combined quality score with multiple factors
|
582 |
+
quality = (sharpness / 1200.0 * 0.4 + exposure_quality * 0.3 + contrast_quality * 0.3)
|
583 |
+
quality = min(1.0, quality)
|
584 |
+
|
585 |
+
return max(0.1, quality)
|
586 |
|
587 |
except Exception as e:
|
588 |
+
logger.error(f"Error in enhanced quality computation: {e}")
|
589 |
+
return 0.5
|
590 |
|
591 |
# ====== EXISTING FUNCTIONS (UNCHANGED) ======
|
592 |
|
|
|
793 |
"""
|
794 |
|
795 |
def comprehensive_healthspan_analysis(input_type, image_input, video_input, breed, age, *hrqol_responses):
|
796 |
+
"""Enhanced comprehensive analysis with precise biological age prediction"""
|
797 |
|
798 |
# Show loading first
|
799 |
yield show_loading()
|
|
|
846 |
try:
|
847 |
detected_breed, breed_conf, health_aspects = classify_breed_and_health(selected_media, breed)
|
848 |
|
849 |
+
# ENHANCED biological age prediction with exact value
|
850 |
enhanced_age_info = predict_biological_age_enhanced(
|
851 |
selected_media, None, detected_breed, hrqol_scores, age
|
852 |
)
|
|
|
923 |
<div style="font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif; max-width: 1000px; margin: 0 auto;">
|
924 |
<div style="background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); color: white; padding: 30px; border-radius: 15px; margin: 20px 0; text-align: center; box-shadow: 0 4px 6px rgba(0,0,0,0.1);">
|
925 |
<h2 style="margin: 0; font-size: 2em; text-shadow: 1px 1px 2px rgba(0,0,0,0.3);">{input_type_icon} Enhanced Multi-Modal Health Assessment</h2>
|
926 |
+
<div style="font-size: 1.1em; margin: 10px 0; opacity: 0.9;">Analysis Type: {input_type} | Precise Biological Age Prediction</div>
|
927 |
<div style="font-size: 3em; font-weight: bold; margin: 15px 0; text-shadow: 2px 2px 4px rgba(0,0,0,0.3);">{final_healthspan_score:.1f}/100</div>
|
928 |
<div style="font-size: 1.2em; background: rgba(255,255,255,0.2); padding: 8px 16px; border-radius: 20px; display: inline-block;">{get_healthspan_grade(final_healthspan_score)}</div>
|
929 |
</div>
|
|
|
948 |
|
949 |
report_html += "</div>"
|
950 |
|
951 |
+
# Enhanced Visual Analysis section with exact biological age
|
952 |
if breed_info:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
953 |
pace_info = ""
|
954 |
if age and age > 0:
|
955 |
pace = breed_info["bio_age"] / age
|
|
|
967 |
visual_health = confidence_factors.get('visual_health', {})
|
968 |
hrqol_factor = confidence_factors.get('hrqol_factor', 0)
|
969 |
gait_factor = confidence_factors.get('gait_factor', 0)
|
970 |
+
prediction_confidence = confidence_factors.get('prediction_confidence', 0.5)
|
971 |
|
972 |
factors_info = f"""<div style="background: #f8f9fa; border-radius: 8px; padding: 10px; margin: 10px 0;">
|
973 |
+
<p style="margin: 5px 0; font-size: 0.9em; color: #555;"><strong>Precision Analysis Factors:</strong></p>
|
974 |
<p style="margin: 2px 0; font-size: 0.8em; color: #666;">• Visual Health Factor: {visual_health.get('age_factor', 0):.2f}</p>
|
975 |
<p style="margin: 2px 0; font-size: 0.8em; color: #666;">• HRQOL Adjustment: {hrqol_factor:.2f}</p>
|
976 |
<p style="margin: 2px 0; font-size: 0.8em; color: #666;">• Gait Factor: {gait_factor:.2f}</p>
|
977 |
+
<p style="margin: 2px 0; font-size: 0.8em; color: #666;">• Prediction Confidence: {prediction_confidence:.1%}</p>
|
978 |
</div>"""
|
979 |
|
980 |
report_html += f"""
|
981 |
<div style="border: 2px solid #333333; padding: 20px; border-radius: 12px; margin: 20px 0; background: #ffffff; box-shadow: 0 2px 4px rgba(0,0,0,0.1);">
|
982 |
+
<h3 style="color: #000000; margin: 0 0 15px 0; font-weight: 700; border-bottom: 2px solid #333333; padding-bottom: 8px;">{input_type_icon} Precise Visual Analysis</h3>
|
983 |
<p style="margin: 8px 0; color: #000000; font-weight: 500;"><strong style="color: #000000;">Detected Breed:</strong> <span style="color: #000000; font-weight: 700;">{breed_info['breed']}</span> <span style="background: #333333; color: #ffffff; padding: 2px 6px; border-radius: 8px; font-size: 0.9em;">({breed_info['confidence']:.1%} confidence)</span></p>
|
984 |
+
<p style="margin: 8px 0; color: #000000; font-weight: 500;"><strong style="color: #000000;">Precise Biological Age:</strong> <span style="color: #000000; font-weight: 700;">{breed_info['bio_age']} years</span></p>
|
985 |
<p style="margin: 8px 0; color: #000000; font-weight: 500;"><strong style="color: #000000;">Chronological Age:</strong> <span style="color: #000000; font-weight: 700;">{age or 'Not provided'} years</span></p>
|
986 |
{vision_quality_info}
|
987 |
{pace_info}
|
988 |
{factors_info}
|
|
|
989 |
</div>
|
990 |
"""
|
991 |
|
|
|
998 |
<p style="margin: 8px 0; color: #000000; font-weight: 500;"><strong style="color: #000000;">Mobility Assessment:</strong> <span style="color: #000000; font-weight: 700;">{video_features['mobility_assessment']}</span></p>
|
999 |
<p style="margin: 8px 0; color: #000000; font-weight: 500;"><strong style="color: #000000;">Comfort Assessment:</strong> <span style="color: #000000; font-weight: 700;">{video_features['comfort_assessment']}</span></p>
|
1000 |
<p style="margin: 8px 0; color: #000000; font-weight: 500;"><strong style="color: #000000;">Vitality Assessment:</strong> <span style="color: #000000; font-weight: 700;">{video_features['vitality_assessment']}</span></p>
|
1001 |
+
<p style="margin: 8px 0; color: #000000; font-weight: 500;"><strong style="color: #000000;">Enhanced Analysis:</strong> <span style="color: #000000; font-weight: 700;">{video_features['frames_analyzed']} frames with precise movement analysis</span></p>
|
1002 |
</div>
|
1003 |
"""
|
1004 |
|
|
|
1038 |
if hrqol_scores["alertness"] < 70:
|
1039 |
recommendations.append("🧠 **Cognitive Support**: Introduce cognitive enhancement activities")
|
1040 |
|
|
|
|
|
|
|
1041 |
if breed_info and age:
|
1042 |
pace = breed_info["bio_age"] / age
|
1043 |
if pace > 1.3:
|
|
|
1056 |
<div style="background: #F5F5F5; border: 1px solid #E0E0E0; padding: 20px; border-radius: 8px; margin: 20px 0;">
|
1057 |
<p style="margin: 0; font-size: 0.9em; color: #424242; line-height: 1.5;">
|
1058 |
<strong style="color: #D32F2F;">⚠️ Important Disclaimer:</strong>
|
1059 |
+
This analysis uses enhanced AI models with precise biological age prediction and streamlined HRQOL assessment.
|
1060 |
Results are for educational purposes only. Always consult with a qualified veterinarian for professional medical advice and diagnosis.
|
1061 |
</p>
|
1062 |
<p style="margin: 10px 0 0 0; font-size: 0.8em; color: #666;">
|
1063 |
+
<strong>Enhanced Features:</strong> Precise age calculation, comprehensive 4-question HRQOL assessment, confidence-weighted predictions
|
1064 |
</p>
|
1065 |
</div>
|
1066 |
</div>
|
|
|
1177 |
🐕 Enhanced AI Dog Health & Aging Analyzer
|
1178 |
</h1>
|
1179 |
<p style="margin: 15px 0 0 0; font-size: 1.2em; opacity: 0.9;">
|
1180 |
+
Precise Biological Age Prediction • Streamlined HRQOL Assessment • Multi-Factor Analysis
|
1181 |
</p>
|
1182 |
</div>
|
1183 |
""")
|
|
|
1295 |
""")
|
1296 |
|
1297 |
analyze_button = gr.Button(
|
1298 |
+
"🔬 Run Precise AI Analysis",
|
1299 |
variant="primary",
|
1300 |
size="lg",
|
1301 |
elem_classes=["analyze-button"]
|