Spaces:
Running
Running
# app.py | |
import os | |
import gradio as gr | |
from PIL import Image | |
import torch | |
import numpy as np | |
import cv2 | |
from transformers import ( | |
CLIPProcessor, CLIPModel, | |
AutoProcessor | |
) | |
import time | |
# βββββββββββββββββββββββββββββ | |
# CONFIG: set your HF token here or via env var HF_TOKEN | |
HF_TOKEN = os.getenv("HF_TOKEN") | |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") | |
# 1. CLIP for breed, age, basic health | |
clip_model = CLIPModel.from_pretrained( | |
"openai/clip-vit-base-patch16", | |
token=HF_TOKEN | |
).to(device) | |
clip_processor = CLIPProcessor.from_pretrained( | |
"openai/clip-vit-base-patch16", | |
token=HF_TOKEN | |
) | |
# 2. Alternative medical analysis model (public, no gating issues) | |
try: | |
medical_processor = AutoProcessor.from_pretrained( | |
"microsoft/BiomedCLIP-PubMedBERT_256-vit_base_patch16_224", | |
token=HF_TOKEN | |
) | |
medical_model = CLIPModel.from_pretrained( | |
"microsoft/BiomedCLIP-PubMedBERT_256-vit_base_patch16_224", | |
token=HF_TOKEN | |
).to(device) | |
MEDICAL_MODEL_AVAILABLE = True | |
except: | |
medical_processor = clip_processor | |
medical_model = clip_model | |
MEDICAL_MODEL_AVAILABLE = False | |
# 3. Stanford Dogs & lifespans (expanded list) | |
STANFORD_BREEDS = [ | |
"afghan hound", "african hunting dog", "airedale", "american staffordshire terrier", | |
"appenzeller", "australian terrier", "basenji", "basset", "beagle", | |
"bedlington terrier", "bernese mountain dog", "black-and-tan coonhound", | |
"blenheim spaniel", "bloodhound", "bluetick", "border collie", "border terrier", | |
"borzoi", "boston bull", "bouvier des flandres", "boxer", "brabancon griffon", | |
"briard", "brittany spaniel", "bull mastiff", "cairn", "cardigan", | |
"chesapeake bay retriever", "chihuahua", "chow", "clumber", "cocker spaniel", | |
"collie", "curly-coated retriever", "dandie dinmont", "dhole", "dingo", | |
"doberman", "english foxhound", "english setter", "english springer", | |
"entlebucher", "eskimo dog", "flat-coated retriever", "french bulldog", | |
"german shepherd", "german short-haired pointer", "giant schnauzer", | |
"golden retriever", "gordon setter", "great dane", "great pyrenees", | |
"greater swiss mountain dog", "groenendael", "ibizan hound", "irish setter", | |
"irish terrier", "irish water spaniel", "irish wolfhound", "italian greyhound", | |
"japanese spaniel", "keeshond", "kelpie", "kerry blue terrier", "komondor", | |
"kuvasz", "labrador retriever", "lakeland terrier", "leonberg", "lhasa", | |
"malamute", "malinois", "maltese dog", "mexican hairless", "miniature pinscher", | |
"miniature poodle", "miniature schnauzer", "newfoundland", "norfolk terrier", | |
"norwegian elkhound", "norwich terrier", "old english sheepdog", "otterhound", | |
"papillon", "pekinese", "pembroke", "pomeranian", "pug", "redbone", | |
"rhodesian ridgeback", "rottweiler", "saint bernard", "saluki", "samoyed", | |
"schipperke", "scotch terrier", "scottish deerhound", "sealyham terrier", | |
"shetland sheepdog", "shih tzu", "siberian husky", "silky terrier", | |
"soft-coated wheaten terrier", "staffordshire bullterrier", "standard poodle", | |
"standard schnauzer", "sussex spaniel", "tibetan mastiff", "tibetan terrier", | |
"toy poodle", "toy terrier", "vizsla", "walker hound", "weimaraner", | |
"welsh springer spaniel", "west highland white terrier", "whippet", | |
"wire-haired fox terrier", "yorkshire terrier" | |
] | |
BREED_LIFESPAN = { | |
"afghan hound": 11.1, "african hunting dog": 10.5, "airedale": 11.5, | |
"american staffordshire terrier": 12.5, "appenzeller": 13.0, "australian terrier": 13.5, | |
"basenji": 12.1, "basset": 12.5, "beagle": 12.5, "bedlington terrier": 13.7, | |
"bernese mountain dog": 10.1, "black-and-tan coonhound": 10.8, "blenheim spaniel": 13.3, | |
"bloodhound": 9.3, "bluetick": 11.0, "border collie": 13.1, "border terrier": 14.2, | |
"borzoi": 12.0, "boston bull": 11.8, "bouvier des flandres": 11.3, "boxer": 11.3, | |
"brabancon griffon": 13.0, "briard": 12.6, "brittany spaniel": 13.5, | |
"bull mastiff": 10.2, "cairn": 14.0, "cardigan": 13.2, "chesapeake bay retriever": 11.6, | |
"chihuahua": 11.8, "chow": 12.1, "clumber": 12.3, "cocker spaniel": 13.3, | |
"collie": 13.3, "curly-coated retriever": 12.2, "dandie dinmont": 12.8, | |
"dhole": 10.0, "dingo": 10.0, "doberman": 11.2, "english foxhound": 13.0, | |
"english setter": 13.1, "english springer": 13.5, "entlebucher": 13.0, | |
"eskimo dog": 11.3, "flat-coated retriever": 11.7, "french bulldog": 9.8, | |
"german shepherd": 11.3, "german short-haired pointer": 13.4, "giant schnauzer": 12.1, | |
"golden retriever": 13.2, "gordon setter": 12.4, "great dane": 10.6, | |
"great pyrenees": 10.9, "greater swiss mountain dog": 10.9, "groenendael": 12.0, | |
"ibizan hound": 13.3, "irish setter": 12.9, "irish terrier": 13.5, | |
"irish water spaniel": 10.8, "irish wolfhound": 9.9, "italian greyhound": 14.0, | |
"japanese spaniel": 13.3, "keeshond": 12.3, "kelpie": 12.0, "kerry blue terrier": 12.4, | |
"komondor": 10.5, "kuvasz": 10.5, "labrador retriever": 13.1, "lakeland terrier": 14.2, | |
"leonberg": 10.0, "lhasa": 14.0, "malamute": 11.3, "malinois": 12.0, | |
"maltese dog": 13.1, "mexican hairless": 13.0, "miniature pinscher": 13.7, | |
"miniature poodle": 14.0, "miniature schnauzer": 13.3, "newfoundland": 11.0, | |
"norfolk terrier": 13.5, "norwegian elkhound": 13.0, "norwich terrier": 14.0, | |
"old english sheepdog": 12.1, "otterhound": 12.0, "papillon": 14.5, | |
"pekinese": 13.3, "pembroke": 13.2, "pomeranian": 12.2, "pug": 11.6, | |
"redbone": 12.0, "rhodesian ridgeback": 12.0, "rottweiler": 10.6, | |
"saint bernard": 9.3, "saluki": 13.3, "samoyed": 13.1, "schipperke": 14.2, | |
"scotch terrier": 12.7, "scottish deerhound": 10.5, "sealyham terrier": 13.1, | |
"shetland sheepdog": 13.4, "shih tzu": 12.8, "siberian husky": 11.9, | |
"silky terrier": 13.3, "soft-coated wheaten terrier": 13.7, "staffordshire bullterrier": 12.0, | |
"standard poodle": 14.0, "standard schnauzer": 13.0, "sussex spaniel": 13.5, | |
"tibetan mastiff": 13.3, "tibetan terrier": 13.8, "toy poodle": 14.0, | |
"toy terrier": 13.0, "vizsla": 13.5, "walker hound": 12.0, "weimaraner": 12.8, | |
"welsh springer spaniel": 14.0, "west highland white terrier": 13.4, "whippet": 13.4, | |
"wire-haired fox terrier": 13.5, "yorkshire terrier": 13.3 | |
} | |
# 4. VetMetrica HRQOL Framework with dropdown options | |
HRQOL_QUESTIONNAIRE = { | |
"vitality": { | |
"title": "π Vitality & Energy Assessment", | |
"description": "Evaluate your dog's energy levels and enthusiasm for activities", | |
"questions": [ | |
{ | |
"id": "vitality_energy", | |
"text": "How would you rate your dog's energy level over the past week?", | |
"options": [ | |
"Excellent - Very energetic, eager for activities", | |
"Very Good - Generally energetic with occasional rest", | |
"Good - Moderate energy, participates willingly", | |
"Fair - Lower energy, needs encouragement", | |
"Poor - Very low energy, reluctant to participate" | |
] | |
}, | |
{ | |
"id": "vitality_play", | |
"text": "How often does your dog seek out play or interaction?", | |
"options": [ | |
"Always seeks play/interaction", | |
"Often seeks play/interaction", | |
"Sometimes seeks play/interaction", | |
"Rarely seeks play/interaction", | |
"Never seeks play/interaction" | |
] | |
}, | |
{ | |
"id": "vitality_response", | |
"text": "How quickly does your dog respond to exciting stimuli (treats, walks, visitors)?", | |
"options": [ | |
"Immediate enthusiastic response", | |
"Quick positive response", | |
"Moderate response time", | |
"Slow or delayed response", | |
"No response or negative reaction" | |
] | |
} | |
], | |
"weight": 0.25 | |
}, | |
"comfort": { | |
"title": "π Comfort & Pain Management", | |
"description": "Assess signs of discomfort, pain, or mobility issues", | |
"questions": [ | |
{ | |
"id": "comfort_activities", | |
"text": "How comfortable does your dog appear during normal activities?", | |
"options": [ | |
"Completely comfortable during all activities", | |
"Mostly comfortable with minor adjustments", | |
"Some discomfort during certain activities", | |
"Frequently uncomfortable, avoids some activities", | |
"Severe discomfort, avoids most activities" | |
] | |
}, | |
{ | |
"id": "comfort_pain_frequency", | |
"text": "How often do you notice signs of pain or discomfort?", | |
"options": [ | |
"Never shows pain signs", | |
"Rarely shows pain signs (< 1 day/week)", | |
"Sometimes shows pain signs (2-3 days/week)", | |
"Often shows pain signs (4-5 days/week)", | |
"Always shows pain signs (daily)" | |
] | |
}, | |
{ | |
"id": "comfort_impact", | |
"text": "How does your dog's comfort level affect daily activities?", | |
"options": [ | |
"No impact on daily activities", | |
"Minimal impact on daily activities", | |
"Moderate impact, some activities modified", | |
"Significant impact, many activities avoided", | |
"Severe impact, most activities impossible" | |
] | |
} | |
], | |
"weight": 0.25 | |
}, | |
"emotional_wellbeing": { | |
"title": "π Emotional Wellbeing", | |
"description": "Evaluate mood, anxiety levels, and social engagement", | |
"questions": [ | |
{ | |
"id": "emotion_mood", | |
"text": "How would you describe your dog's overall mood?", | |
"options": [ | |
"Very positive - happy, content, enthusiastic", | |
"Mostly positive - generally cheerful", | |
"Neutral - neither particularly happy nor sad", | |
"Mostly negative - seems subdued or withdrawn", | |
"Very negative - appears depressed or distressed" | |
] | |
}, | |
{ | |
"id": "emotion_anxiety", | |
"text": "How often does your dog show signs of anxiety or stress?", | |
"options": [ | |
"Never shows anxiety/stress", | |
"Rarely shows anxiety/stress", | |
"Sometimes shows anxiety/stress", | |
"Often shows anxiety/stress", | |
"Constantly shows anxiety/stress" | |
] | |
}, | |
{ | |
"id": "emotion_engagement", | |
"text": "How engaged is your dog with family activities?", | |
"options": [ | |
"Highly engaged, initiates family interactions", | |
"Well engaged, participates enthusiastically", | |
"Moderately engaged, participates when invited", | |
"Minimally engaged, needs encouragement", | |
"Not engaged, avoids family activities" | |
] | |
} | |
], | |
"weight": 0.25 | |
}, | |
"alertness": { | |
"title": "π§ Alertness & Cognition", | |
"description": "Assess cognitive function, awareness, and responsiveness", | |
"questions": [ | |
{ | |
"id": "alert_awareness", | |
"text": "How alert and aware does your dog seem?", | |
"options": [ | |
"Highly alert, notices everything immediately", | |
"Alert, notices most things quickly", | |
"Moderately alert, notices things with some delay", | |
"Slightly alert, slow to notice surroundings", | |
"Not alert, seems confused or disoriented" | |
] | |
}, | |
{ | |
"id": "alert_commands", | |
"text": "How well does your dog respond to commands or their name?", | |
"options": [ | |
"Responds immediately to name/commands", | |
"Usually responds quickly to name/commands", | |
"Sometimes responds, may need repetition", | |
"Often doesn't respond, needs multiple attempts", | |
"Rarely or never responds to name/commands" | |
] | |
}, | |
{ | |
"id": "alert_focus", | |
"text": "How focused is your dog during training or play?", | |
"options": [ | |
"Highly focused, maintains attention easily", | |
"Good focus, occasional distraction", | |
"Moderate focus, some difficulty concentrating", | |
"Poor focus, easily distracted", | |
"No focus, cannot maintain attention" | |
] | |
} | |
], | |
"weight": 0.25 | |
} | |
} | |
def predict_biological_age(img: Image.Image, breed: str) -> int: | |
avg = BREED_LIFESPAN.get(breed.lower(), 12) | |
prompts = [f"a {age}-year-old {breed}" for age in range(1, int(avg*2)+1)] | |
inputs = clip_processor(text=prompts, images=img, return_tensors="pt", padding=True).to(device) | |
with torch.no_grad(): | |
probs = clip_model(**inputs).logits_per_image.softmax(1)[0].cpu().numpy() | |
return int(np.argmax(probs)+1) | |
def analyze_medical_image(img: Image.Image): | |
health_conditions = [ | |
"healthy normal dog", | |
"dog with visible health issues", | |
"dog showing signs of illness", | |
"dog with poor body condition", | |
"dog with excellent health" | |
] | |
if MEDICAL_MODEL_AVAILABLE: | |
inputs = medical_processor(text=health_conditions, images=img, return_tensors="pt", padding=True).to(device) | |
with torch.no_grad(): | |
logits = medical_model(**inputs).logits_per_image.softmax(-1)[0].cpu().numpy() | |
else: | |
inputs = clip_processor(text=health_conditions, images=img, return_tensors="pt", padding=True).to(device) | |
with torch.no_grad(): | |
logits = clip_model(**inputs).logits_per_image.softmax(-1)[0].cpu().numpy() | |
idx = int(np.argmax(logits)) | |
label = health_conditions[idx] | |
conf = float(logits[idx]) | |
return label, conf | |
def classify_breed_and_health(img: Image.Image, override=None): | |
inp = clip_processor(images=img, return_tensors="pt").to(device) | |
with torch.no_grad(): | |
feats = clip_model.get_image_features(**inp) | |
text_prompts = [f"a photo of a {b}" for b in STANFORD_BREEDS] | |
ti = clip_processor(text=text_prompts, return_tensors="pt", padding=True).to(device) | |
with torch.no_grad(): | |
tf = clip_model.get_text_features(**ti) | |
sims = (feats @ tf.T).softmax(-1)[0].cpu().numpy() | |
idx = int(np.argmax(sims)) | |
breed = override or STANFORD_BREEDS[idx] | |
breed_conf = float(sims[idx]) | |
aspects = { | |
"Coat Quality": ("shiny healthy coat","dull patchy fur"), | |
"Eye Clarity": ("bright clear eyes","cloudy milky eyes"), | |
"Body Condition": ("ideal muscle tone","visible ribs or bones"), | |
"Dental Health": ("clean white teeth","yellow stained teeth") | |
} | |
health = {} | |
for name,(p,n) in aspects.items(): | |
ti = clip_processor(text=[p,n], return_tensors="pt", padding=True).to(device) | |
with torch.no_grad(): | |
tf2 = clip_model.get_text_features(**ti) | |
sim2 = (feats @ tf2.T).softmax(-1)[0].cpu().numpy() | |
choice = p if sim2[0]>sim2[1] else n | |
health[name] = {"assessment":choice,"confidence":float(max(sim2))} | |
return breed, breed_conf, health | |
def analyze_video_gait(video_path): | |
if not video_path: | |
return None | |
try: | |
cap = cv2.VideoCapture(video_path) | |
fps = cap.get(cv2.CAP_PROP_FPS) or 24 | |
total = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) | |
if total == 0: | |
cap.release() | |
return None | |
indices = np.linspace(0, total-1, min(15, total), dtype=int) | |
health_scores = [] | |
movement_scores = [] | |
vitality_scores = [] | |
for i in indices: | |
cap.set(cv2.CAP_PROP_POS_FRAMES, i) | |
ret, frame = cap.read() | |
if not ret: | |
continue | |
img = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)) | |
# Health assessment | |
_, health_conf = analyze_medical_image(img) | |
health_scores.append(health_conf) | |
# Movement assessment | |
movement_prompts = ["dog moving normally", "dog limping or showing pain", "dog moving stiffly"] | |
inputs = clip_processor(text=movement_prompts, images=img, return_tensors="pt", padding=True).to(device) | |
with torch.no_grad(): | |
movement_logits = clip_model(**inputs).logits_per_image.softmax(-1)[0].cpu().numpy() | |
movement_scores.append(float(movement_logits[0])) | |
# Vitality assessment | |
vitality_prompts = ["energetic active dog", "lethargic tired dog", "alert playful dog"] | |
inputs = clip_processor(text=vitality_prompts, images=img, return_tensors="pt", padding=True).to(device) | |
with torch.no_grad(): | |
vitality_logits = clip_model(**inputs).logits_per_image.softmax(-1)[0].cpu().numpy() | |
vitality_scores.append(float(vitality_logits[0] + vitality_logits[2])) | |
cap.release() | |
if not health_scores: | |
return None | |
return { | |
"duration_sec": round(total/fps, 1), | |
"mobility_score": float(np.mean(movement_scores)) * 100, | |
"comfort_score": float(np.mean(health_scores)) * 100, | |
"vitality_score": float(np.mean(vitality_scores)) * 100, | |
"frames_analyzed": len(health_scores), | |
"mobility_assessment": "Normal gait pattern" if np.mean(movement_scores) > 0.6 else "Mobility concerns detected", | |
"comfort_assessment": "No obvious discomfort" if np.mean(health_scores) > 0.7 else "Possible discomfort signs", | |
"vitality_assessment": "Good energy level" if np.mean(vitality_scores) > 0.6 else "Low energy observed" | |
} | |
except Exception as e: | |
return None | |
def score_from_response(response, score_mapping): | |
"""Extract numeric score from text response""" | |
if not response: | |
return 50 | |
for key, value in score_mapping.items(): | |
if key.lower() in response.lower(): | |
return value | |
return 50 | |
def calculate_hrqol_scores(hrqol_responses): | |
"""Convert VetMetrica-style responses to 0-100 domain scores""" | |
score_mapping = { | |
"excellent": 100, "very good": 80, "good": 60, "fair": 40, "poor": 20, | |
"always": 100, "often": 80, "sometimes": 60, "rarely": 40, "never": 20, | |
"immediate": 100, "quick": 80, "moderate": 60, "slow": 40, "no response": 20, | |
"completely": 100, "mostly": 80, "some": 60, "frequently": 40, "severe": 20, | |
"very positive": 100, "mostly positive": 80, "neutral": 60, "mostly negative": 40, "very negative": 20, | |
"highly": 100, "well": 80, "moderately": 60, "minimally": 40, "not": 20 | |
} | |
domain_scores = {} | |
# Vitality Domain | |
vitality_scores = [ | |
score_from_response(hrqol_responses.get("vitality_energy", ""), score_mapping), | |
score_from_response(hrqol_responses.get("vitality_play", ""), score_mapping), | |
score_from_response(hrqol_responses.get("vitality_response", ""), score_mapping) | |
] | |
domain_scores["vitality"] = np.mean(vitality_scores) | |
# Comfort Domain (invert pain frequency) | |
comfort_scores = [ | |
score_from_response(hrqol_responses.get("comfort_activities", ""), score_mapping), | |
100 - score_from_response(hrqol_responses.get("comfort_pain_frequency", ""), score_mapping), | |
score_from_response(hrqol_responses.get("comfort_impact", ""), score_mapping) | |
] | |
domain_scores["comfort"] = max(0, np.mean(comfort_scores)) | |
# Emotional Wellbeing Domain (invert anxiety) | |
emotion_scores = [ | |
score_from_response(hrqol_responses.get("emotion_mood", ""), score_mapping), | |
100 - score_from_response(hrqol_responses.get("emotion_anxiety", ""), score_mapping), | |
score_from_response(hrqol_responses.get("emotion_engagement", ""), score_mapping) | |
] | |
domain_scores["emotional_wellbeing"] = max(0, np.mean(emotion_scores)) | |
# Alertness Domain | |
alertness_scores = [ | |
score_from_response(hrqol_responses.get("alert_awareness", ""), score_mapping), | |
score_from_response(hrqol_responses.get("alert_commands", ""), score_mapping), | |
score_from_response(hrqol_responses.get("alert_focus", ""), score_mapping) | |
] | |
domain_scores["alertness"] = np.mean(alertness_scores) | |
return domain_scores | |
def get_score_color(score): | |
"""Return background and text color based on score for better visibility""" | |
if score >= 80: | |
return {"bg": "#4CAF50", "text": "#FFFFFF"} # Green background, white text | |
elif score >= 60: | |
return {"bg": "#FFC107", "text": "#000000"} # Yellow background, black text | |
elif score >= 40: | |
return {"bg": "#FF9800", "text": "#FFFFFF"} # Orange background, white text | |
else: | |
return {"bg": "#F44336", "text": "#FFFFFF"} # Red background, white text | |
def get_healthspan_grade(score): | |
if score >= 85: | |
return "Excellent (A+)" | |
elif score >= 75: | |
return "Very Good (A)" | |
elif score >= 65: | |
return "Good (B)" | |
elif score >= 55: | |
return "Fair (C)" | |
elif score >= 45: | |
return "Poor (D)" | |
else: | |
return "Critical (F)" | |
def show_loading(): | |
"""Display loading animation""" | |
return """ | |
<div style="text-align: center; padding: 40px;"> | |
<div style="display: inline-block; width: 40px; height: 40px; border: 4px solid #f3f3f3; border-top: 4px solid #667eea; border-radius: 50%; animation: spin 1s linear infinite;"></div> | |
<style> | |
@keyframes spin { | |
0% { transform: rotate(0deg); } | |
100% { transform: rotate(360deg); } | |
} | |
</style> | |
<h3 style="color: #667eea; margin-top: 20px;">π¬ Analyzing Your Dog's Health...</h3> | |
<p style="color: #666;">Please wait while we process the image/video and questionnaire data.</p> | |
<div style="background: #f0f0f0; border-radius: 20px; padding: 10px; margin: 20px auto; width: 300px;"> | |
<div style="background: linear-gradient(90deg, #667eea, #764ba2); height: 6px; border-radius: 10px; width: 0%; animation: progress 3s ease-in-out infinite;"></div> | |
</div> | |
<style> | |
@keyframes progress { | |
0% { width: 0%; } | |
50% { width: 80%; } | |
100% { width: 100%; } | |
} | |
</style> | |
</div> | |
""" | |
def comprehensive_healthspan_analysis(input_type, image_input, video_input, breed, age, *hrqol_responses): | |
"""Combine image/video analysis with HRQOL assessment based on input type""" | |
# Show loading first | |
yield show_loading() | |
# Simulate processing time | |
time.sleep(2) | |
# Determine which input to use based on dropdown selection | |
if input_type == "Image Analysis": | |
selected_media = image_input | |
media_type = "image" | |
elif input_type == "Video Analysis": | |
selected_media = video_input | |
media_type = "video" | |
else: | |
yield "β **Error**: Please select an input type." | |
return | |
if selected_media is None: | |
yield f"β **Error**: Please provide a {media_type} for analysis." | |
return | |
# Check if questionnaire is completed | |
if not hrqol_responses or all(not r for r in hrqol_responses): | |
yield "β **Error**: Please complete the HRQOL questionnaire before analysis." | |
return | |
# Build HRQOL responses dictionary | |
response_keys = [] | |
for domain_key, domain_data in HRQOL_QUESTIONNAIRE.items(): | |
for question in domain_data["questions"]: | |
response_keys.append(question["id"]) | |
hrqol_dict = {key: hrqol_responses[i] if i < len(hrqol_responses) else "" | |
for i, key in enumerate(response_keys)} | |
# Calculate HRQOL scores | |
hrqol_scores = calculate_hrqol_scores(hrqol_dict) | |
# Initialize analysis variables | |
video_features = {} | |
breed_info = None | |
bio_age = None | |
health_aspects = {} | |
# Perform analysis based on media type | |
if media_type == "image": | |
try: | |
detected_breed, breed_conf, health_aspects = classify_breed_and_health(selected_media, breed) | |
bio_age = predict_biological_age(selected_media, detected_breed) | |
breed_info = { | |
"breed": detected_breed, | |
"confidence": breed_conf, | |
"bio_age": bio_age | |
} | |
except Exception as e: | |
pass | |
elif media_type == "video": | |
# For video, we analyze both movement and can extract frame for breed analysis | |
video_features = analyze_video_gait(selected_media) or {} | |
# Try to extract a frame from video for breed analysis | |
try: | |
cap = cv2.VideoCapture(selected_media) | |
ret, frame = cap.read() | |
if ret: | |
img = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)) | |
detected_breed, breed_conf, health_aspects = classify_breed_and_health(img, breed) | |
bio_age = predict_biological_age(img, detected_breed) | |
breed_info = { | |
"breed": detected_breed, | |
"confidence": breed_conf, | |
"bio_age": bio_age | |
} | |
cap.release() | |
except Exception as e: | |
pass | |
# Calculate Composite Healthspan Score | |
video_weight = 0.4 if video_features else 0.0 | |
hrqol_weight = 0.6 if video_features else 1.0 | |
if video_features: | |
video_score = ( | |
video_features.get("mobility_score", 70) * 0.15 + | |
video_features.get("comfort_score", 70) * 0.10 + | |
video_features.get("vitality_score", 70) * 0.15 | |
) | |
else: | |
video_score = 0 | |
hrqol_composite = ( | |
hrqol_scores["vitality"] * 0.25 + | |
hrqol_scores["comfort"] * 0.25 + | |
hrqol_scores["emotional_wellbeing"] * 0.25 + | |
hrqol_scores["alertness"] * 0.25 | |
) | |
final_healthspan_score = (video_score * video_weight) + (hrqol_composite * hrqol_weight) | |
final_healthspan_score = min(100, max(0, final_healthspan_score)) | |
# Generate comprehensive report with improved colors | |
input_type_icon = "πΈ" if media_type == "image" else "π₯" | |
report_html = f""" | |
<div style="font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif; max-width: 1000px; margin: 0 auto;"> | |
<div style="background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); color: white; padding: 30px; border-radius: 15px; margin: 20px 0; text-align: center; box-shadow: 0 4px 6px rgba(0,0,0,0.1);"> | |
<h2 style="margin: 0; font-size: 2em; text-shadow: 1px 1px 2px rgba(0,0,0,0.3);">{input_type_icon} Comprehensive Healthspan Assessment</h2> | |
<div style="font-size: 1.1em; margin: 10px 0; opacity: 0.9;">Analysis Type: {input_type}</div> | |
<div style="font-size: 3em; font-weight: bold; margin: 15px 0; text-shadow: 2px 2px 4px rgba(0,0,0,0.3);">{final_healthspan_score:.1f}/100</div> | |
<div style="font-size: 1.2em; background: rgba(255,255,255,0.2); padding: 8px 16px; border-radius: 20px; display: inline-block;">{get_healthspan_grade(final_healthspan_score)}</div> | |
</div> | |
<div style="display: grid; grid-template-columns: repeat(auto-fit, minmax(240px, 1fr)); gap: 20px; margin: 30px 0;"> | |
""" | |
# Add domain score cards with improved contrast | |
for domain, score in [("vitality", "π Vitality"), ("comfort", "π Comfort"), ("emotional_wellbeing", "π Emotional"), ("alertness", "π§ Alertness")]: | |
colors = get_score_color(hrqol_scores[domain]) | |
report_html += f""" | |
<div style="border: 2px solid #e0e0e0; padding: 20px; border-radius: 12px; background: #ffffff; box-shadow: 0 2px 4px rgba(0,0,0,0.1);"> | |
<h4 style="margin: 0 0 15px 0; color: #333333; font-weight: 600;">{score.split()[1]}</h4> | |
<div style="background: #e9ecef; height: 12px; border-radius: 6px; margin: 10px 0; border: 1px solid #dee2e6;"> | |
<div style="background: {colors['bg']}; height: 100%; width: {hrqol_scores[domain]}%; border-radius: 6px; transition: width 0.3s ease; position: relative; display: flex; align-items: center; justify-content: center;"> | |
<span style="color: {colors['text']}; font-size: 10px; font-weight: bold; text-shadow: 1px 1px 1px rgba(0,0,0,0.3);">{hrqol_scores[domain]:.0f}</span> | |
</div> | |
</div> | |
<div style="font-size: 1.1em; font-weight: bold; color: #333333;">{hrqol_scores[domain]:.1f}/100</div> | |
</div> | |
""" | |
report_html += "</div>" | |
# Visual Analysis section with better contrast | |
if breed_info: | |
pace_info = "" | |
if age and age > 0: | |
pace = breed_info["bio_age"] / age | |
pace_status = "Accelerated" if pace > 1.2 else "Normal" if pace > 0.8 else "Slow" | |
pace_color = "#FF5722" if pace > 1.2 else "#4CAF50" if pace < 0.8 else "#FF9800" | |
pace_info = f"""<p style="margin: 8px 0;"><strong style="color: #333;">Aging Pace:</strong> | |
<span style="background: {pace_color}; color: white; padding: 4px 8px; border-radius: 12px; font-weight: bold; text-shadow: 1px 1px 1px rgba(0,0,0,0.3);"> | |
{pace:.2f}Γ ({pace_status})</span></p>""" | |
report_html += f""" | |
<div style="border: 2px solid #2196F3; padding: 20px; border-radius: 12px; margin: 20px 0; background: #ffffff; box-shadow: 0 2px 4px rgba(0,0,0,0.1);"> | |
<h3 style="color: #1976D2; margin: 0 0 15px 0; font-weight: 600; border-bottom: 2px solid #E3F2FD; padding-bottom: 8px;">{input_type_icon} Visual Analysis</h3> | |
<p style="margin: 8px 0; color: #1976D2;"><strong>Detected Breed:</strong> <span style="color: #1976D2; font-weight: 600;">{breed_info['breed']}</span> <span style="background: #E3F2FD; color: #1976D2; padding: 2px 6px; border-radius: 8px; font-size: 0.9em;">({breed_info['confidence']:.1%} confidence)</span></p> | |
<p style="margin: 8px 0; color: #1976D2;"><strong>Estimated Biological Age:</strong> <span style="color: #1976D2; font-weight: 600;">{breed_info['bio_age']} years</span></p> | |
<p style="margin: 8px 0; color: #1976D2;"><strong>Chronological Age:</strong> <span style="color: #1976D2; font-weight: 600;">{age or 'Not provided'} years</span></p> | |
{pace_info} | |
</div> | |
""" | |
# Add video-specific analysis if available | |
if video_features: | |
report_html += f""" | |
<div style="border: 2px solid #FF5722; padding: 20px; border-radius: 12px; margin: 20px 0; background: #ffffff; box-shadow: 0 2px 4px rgba(0,0,0,0.1);"> | |
<h3 style="color: #D84315; margin: 0 0 15px 0; font-weight: 600; border-bottom: 2px solid #FFEBE7; padding-bottom: 8px;">π₯ Video Gait Analysis</h3> | |
<p style="margin: 8px 0; color: #333;"><strong>Duration:</strong> <span style="color: #D84315; font-weight: 600;">{video_features['duration_sec']} seconds</span></p> | |
<p style="margin: 8px 0; color: #333;"><strong>Mobility Assessment:</strong> <span style="color: #D84315; font-weight: 600;">{video_features['mobility_assessment']}</span></p> | |
<p style="margin: 8px 0; color: #333;"><strong>Comfort Assessment:</strong> <span style="color: #D84315; font-weight: 600;">{video_features['comfort_assessment']}</span></p> | |
<p style="margin: 8px 0; color: #333;"><strong>Vitality Assessment:</strong> <span style="color: #D84315; font-weight: 600;">{video_features['vitality_assessment']}</span></p> | |
<p style="margin: 8px 0; color: #333;"><strong>Frames Analyzed:</strong> <span style="color: #D84315; font-weight: 600;">{video_features['frames_analyzed']}</span></p> | |
</div> | |
""" | |
# Physical Health Assessment with improved visibility | |
if health_aspects and media_type == "image": | |
report_html += f""" | |
<div style="border: 2px solid #4CAF50; padding: 20px; border-radius: 12px; margin: 20px 0; background: #ffffff; box-shadow: 0 2px 4px rgba(0,0,0,0.1);"> | |
<h3 style="color: #2E7D32; margin: 0 0 15px 0; font-weight: 600; border-bottom: 2px solid #E8F5E8; padding-bottom: 8px;">πΈ Physical Health Assessment</h3> | |
""" | |
for aspect, data in health_aspects.items(): | |
is_healthy = any(word in data["assessment"].lower() for word in ["healthy", "bright", "clean", "ideal"]) | |
status_icon = "β " if is_healthy else "β οΈ" | |
status_color = "#2E7D32" if is_healthy else "#F57C00" | |
bg_color = "#E8F5E8" if is_healthy else "#FFF3E0" | |
report_html += f""" | |
<div style="margin: 10px 0; padding: 12px; background: {bg_color}; border-radius: 8px; border-left: 4px solid {status_color};"> | |
<p style="margin: 0; color: #333;"> | |
<span style="font-size: 1.2em;">{status_icon}</span> | |
<strong style="color: {status_color};">{aspect}:</strong> | |
<span style="color: #333; font-weight: 500;">{data['assessment']}</span> | |
<span style="background: #E0E0E0; color: #424242; padding: 2px 6px; border-radius: 8px; font-size: 0.85em; margin-left: 8px;"> | |
({data['confidence']:.1%} confidence)</span> | |
</p> | |
</div> | |
""" | |
report_html += "</div>" | |
# Add recommendations | |
recommendations = [] | |
if hrqol_scores["vitality"] < 60: | |
recommendations.append("π **Vitality Enhancement**: Consider shorter, frequent exercise sessions and mental stimulation") | |
if hrqol_scores["comfort"] < 70: | |
recommendations.append("π **Comfort Support**: Evaluate joint supplements and orthopedic bedding") | |
if hrqol_scores["emotional_wellbeing"] < 65: | |
recommendations.append("π **Emotional Care**: Increase routine predictability and reduce stressors") | |
if hrqol_scores["alertness"] < 70: | |
recommendations.append("π§ **Cognitive Support**: Implement brain training games and mental challenges") | |
if recommendations: | |
report_html += f""" | |
<div style="border: 2px solid #FF9800; padding: 20px; border-radius: 12px; margin: 20px 0; background: #ffffff; box-shadow: 0 2px 4px rgba(0,0,0,0.1);"> | |
<h3 style="color: #F57C00; margin: 0 0 15px 0; font-weight: 600; border-bottom: 2px solid #FFF3E0; padding-bottom: 8px;">π― Personalized Recommendations</h3> | |
{''.join([f'<div style="margin: 10px 0; padding: 12px; background: #FFF8E1; border-radius: 8px; border-left: 4px solid #FF9800;"><p style="margin: 0; color: #333; font-weight: 500;">{rec}</p></div>' for rec in recommendations])} | |
</div> | |
""" | |
# Disclaimer with improved visibility | |
report_html += """ | |
<div style="background: #F5F5F5; border: 1px solid #E0E0E0; padding: 20px; border-radius: 8px; margin: 20px 0;"> | |
<p style="margin: 0; font-size: 0.9em; color: #424242; line-height: 1.5;"> | |
<strong style="color: #D32F2F;">β οΈ Important Disclaimer:</strong> | |
This analysis uses validated HRQOL assessment tools but is for educational purposes only. | |
Always consult with a qualified veterinarian for professional medical advice and diagnosis. | |
</p> | |
</div> | |
</div> | |
""" | |
yield report_html | |
def update_media_input(input_type): | |
"""Update the visibility of media inputs based on dropdown selection""" | |
if input_type == "Image Analysis": | |
return gr.update(visible=True), gr.update(visible=False) | |
else: # Video Analysis | |
return gr.update(visible=False), gr.update(visible=True) | |
# Custom CSS for enhanced styling | |
custom_css = """ | |
/* Enhanced gradient background */ | |
.gradio-container { | |
background: linear-gradient(135deg, #f5f7fa 0%, #c3cfe2 100%); | |
min-height: 100vh; | |
} | |
/* Card styling */ | |
.input-card, .questionnaire-card { | |
background: white; | |
border-radius: 15px; | |
padding: 25px; | |
box-shadow: 0 8px 25px rgba(0,0,0,0.1); | |
margin: 10px; | |
border: 1px solid #e0e6ed; | |
} | |
/* Header styling */ | |
.main-header { | |
background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); | |
color: white; | |
text-align: center; | |
padding: 30px; | |
border-radius: 15px; | |
margin-bottom: 30px; | |
box-shadow: 0 8px 25px rgba(102, 126, 234, 0.3); | |
} | |
/* Button styling */ | |
.analyze-button { | |
background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); | |
border: none; | |
color: white; | |
padding: 15px 30px; | |
font-size: 16px; | |
font-weight: 600; | |
border-radius: 25px; | |
cursor: pointer; | |
transition: all 0.3s ease; | |
box-shadow: 0 4px 15px rgba(102, 126, 234, 0.3); | |
} | |
.analyze-button:hover { | |
transform: translateY(-2px); | |
box-shadow: 0 8px 25px rgba(102, 126, 234, 0.4); | |
} | |
/* Accordion styling */ | |
.accordion-header { | |
background: linear-gradient(135deg, #f8f9fa 0%, #e9ecef 100%); | |
border: 1px solid #dee2e6; | |
border-radius: 8px; | |
padding: 15px; | |
margin: 10px 0; | |
cursor: pointer; | |
transition: all 0.3s ease; | |
} | |
.accordion-header:hover { | |
background: linear-gradient(135deg, #e9ecef 0%, #dee2e6 100%); | |
transform: translateY(-1px); | |
} | |
/* Dropdown styling */ | |
.gr-dropdown { | |
border-radius: 8px; | |
border: 2px solid #e0e6ed; | |
transition: border-color 0.3s ease; | |
} | |
.gr-dropdown:focus { | |
border-color: #667eea; | |
box-shadow: 0 0 0 3px rgba(102, 126, 234, 0.1); | |
} | |
/* Progress animation */ | |
@keyframes pulse { | |
0% { opacity: 1; } | |
50% { opacity: 0.5; } | |
100% { opacity: 1; } | |
} | |
.loading-pulse { | |
animation: pulse 2s infinite; | |
} | |
""" | |
# Gradio Interface with Enhanced UI | |
with gr.Blocks( | |
title="πΆ VetMetrica HRQOL Dog Health Analyzer", | |
theme=gr.themes.Soft(), | |
css=custom_css | |
) as demo: | |
# Main Header | |
gr.HTML(""" | |
<div class="main-header"> | |
<h1 style="margin: 0; font-size: 2.5em; text-shadow: 2px 2px 4px rgba(0,0,0,0.3);"> | |
π VetMetricaΒ© HRQOL Dog Health & Age Analyzer | |
</h1> | |
<p style="margin: 15px 0 0 0; font-size: 1.2em; opacity: 0.9;"> | |
AI-powered comprehensive analysis using validated Health-Related Quality of Life metrics | |
</p> | |
</div> | |
""") | |
with gr.Row(): | |
# Left Column - Enhanced Media Input | |
with gr.Column(scale=1): | |
gr.HTML(""" | |
<div class="input-card"> | |
<h2 style="color: #667eea; margin: 0 0 20px 0; text-align: center;"> | |
πΈ Media Input Selection | |
</h2> | |
</div> | |
""") | |
# Enhanced dropdown with better styling | |
input_type_dropdown = gr.Dropdown( | |
choices=["Image Analysis", "Video Analysis"], | |
label="π Select Analysis Type", | |
value="Image Analysis", | |
interactive=True, | |
elem_classes=["gr-dropdown"] | |
) | |
# Media input components with enhanced labels | |
image_input = gr.Image( | |
type="pil", | |
label="π· Upload Dog Photo or Use Webcam", | |
visible=True, | |
sources=["upload", "webcam"], | |
height=300 | |
) | |
video_input = gr.Video( | |
label="π₯ Upload Video (10-30 seconds) or Record with Webcam", | |
visible=False, | |
sources=["upload", "webcam"], | |
height=300 | |
) | |
# Update visibility based on dropdown selection | |
input_type_dropdown.change( | |
fn=update_media_input, | |
inputs=[input_type_dropdown], | |
outputs=[image_input, video_input] | |
) | |
# Enhanced optional information section | |
gr.HTML(""" | |
<div style="margin: 20px 0;"> | |
<h3 style="color: #667eea; text-align: center; margin-bottom: 15px;"> | |
βοΈ Optional Information | |
</h3> | |
</div> | |
""") | |
breed_input = gr.Dropdown( | |
STANFORD_BREEDS, | |
label="π Dog Breed (Auto-detected if not specified)", | |
value=None, | |
allow_custom_value=True, | |
elem_classes=["gr-dropdown"] | |
) | |
age_input = gr.Number( | |
label="π Chronological Age (years)", | |
precision=1, | |
value=None, | |
minimum=0, | |
maximum=25 | |
) | |
# Right Column - Enhanced HRQOL Questionnaire | |
with gr.Column(scale=1): | |
gr.HTML(""" | |
<div class="questionnaire-card"> | |
<h2 style="color: #667eea; margin: 0 0 10px 0; text-align: center;"> | |
π VetMetricaΒ© HRQOL Assessment | |
</h2> | |
<p style="text-align: center; color: #666; font-style: italic; margin-bottom: 20px;"> | |
Complete all sections for accurate healthspan analysis | |
</p> | |
</div> | |
""") | |
hrqol_inputs = [] | |
for domain_key, domain_data in HRQOL_QUESTIONNAIRE.items(): | |
# Enhanced accordion header | |
gr.HTML(f""" | |
<div class="accordion-header"> | |
<h3 style="margin: 0; color: #333;"> | |
{domain_data['title']} | |
</h3> | |
<p style="margin: 5px 0 0 0; color: #666; font-size: 0.9em;"> | |
{domain_data['description']} | |
</p> | |
</div> | |
""") | |
with gr.Accordion(domain_data["title"], open=True): | |
for question in domain_data["questions"]: | |
# Enhanced dropdown for each question | |
dropdown = gr.Dropdown( | |
choices=question["options"], | |
label=question["text"], | |
value=None, | |
interactive=True, | |
elem_classes=["gr-dropdown"] | |
) | |
hrqol_inputs.append(dropdown) | |
# Enhanced Analysis Button | |
gr.HTML(""" | |
<div style="text-align: center; margin: 30px 0;"> | |
""") | |
analyze_button = gr.Button( | |
"π¬ Analyze Comprehensive Healthspan", | |
variant="primary", | |
size="lg", | |
elem_classes=["analyze-button"] | |
) | |
gr.HTML("</div>") | |
# Enhanced Results Section | |
output_report = gr.HTML() | |
# Connect analysis function with loading | |
analyze_button.click( | |
fn=comprehensive_healthspan_analysis, | |
inputs=[input_type_dropdown, image_input, video_input, breed_input, age_input] + hrqol_inputs, | |
outputs=output_report | |
) | |
if __name__ == "__main__": | |
demo.launch() | |