Video / app.py
CelagenexResearch's picture
Update app.py
df441b6 verified
# app.py
import os
import gradio as gr
from PIL import Image
import torch
import numpy as np
import cv2
from transformers import (
CLIPProcessor, CLIPModel,
AutoProcessor
)
import time
import logging
# Setup logging for continuous feedback
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
# β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”
# CONFIG: set your HF token here or via env var HF_TOKEN
HF_TOKEN = os.getenv("HF_TOKEN")
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# 1. CLIP for breed, age, basic health
clip_model = CLIPModel.from_pretrained(
"openai/clip-vit-base-patch16",
token=HF_TOKEN
).to(device)
clip_processor = CLIPProcessor.from_pretrained(
"openai/clip-vit-base-patch16",
token=HF_TOKEN
)
# 2. Alternative medical analysis model
try:
medical_processor = AutoProcessor.from_pretrained(
"microsoft/BiomedCLIP-PubMedBERT_256-vit_base_patch16_224",
token=HF_TOKEN
)
medical_model = CLIPModel.from_pretrained(
"microsoft/BiomedCLIP-PubMedBERT_256-vit_base_patch16_224",
token=HF_TOKEN
).to(device)
MEDICAL_MODEL_AVAILABLE = True
except:
medical_processor = clip_processor
medical_model = clip_model
MEDICAL_MODEL_AVAILABLE = False
# Stanford Dogs & lifespans
STANFORD_BREEDS = [
"afghan hound", "african hunting dog", "airedale", "american staffordshire terrier",
"appenzeller", "australian terrier", "basenji", "basset", "beagle",
"bedlington terrier", "bernese mountain dog", "black-and-tan coonhound",
"blenheim spaniel", "bloodhound", "bluetick", "border collie", "border terrier",
"borzoi", "boston bull", "bouvier des flandres", "boxer", "brabancon griffon",
"briard", "brittany spaniel", "bull mastiff", "cairn", "cardigan",
"chesapeake bay retriever", "chihuahua", "chow", "clumber", "cocker spaniel",
"collie", "curly-coated retriever", "dandie dinmont", "dhole", "dingo",
"doberman", "english foxhound", "english setter", "english springer",
"entlebucher", "eskimo dog", "flat-coated retriever", "french bulldog",
"german shepherd", "german short-haired pointer", "giant schnauzer",
"golden retriever", "gordon setter", "great dane", "great pyrenees",
"greater swiss mountain dog", "groenendael", "ibizan hound", "irish setter",
"irish terrier", "irish water spaniel", "irish wolfhound", "italian greyhound",
"japanese spaniel", "keeshond", "kelpie", "kerry blue terrier", "komondor",
"kuvasz", "labrador retriever", "lakeland terrier", "leonberg", "lhasa",
"malamute", "malinois", "maltese dog", "mexican hairless", "miniature pinscher",
"miniature poodle", "miniature schnauzer", "newfoundland", "norfolk terrier",
"norwegian elkhound", "norwich terrier", "old english sheepdog", "otterhound",
"papillon", "pekinese", "pembroke", "pomeranian", "pug", "redbone",
"rhodesian ridgeback", "rottweiler", "saint bernard", "saluki", "samoyed",
"schipperke", "scotch terrier", "scottish deerhound", "sealyham terrier",
"shetland sheepdog", "shih tzu", "siberian husky", "silky terrier",
"soft-coated wheaten terrier", "staffordshire bullterrier", "standard poodle",
"standard schnauzer", "sussex spaniel", "tibetan mastiff", "tibetan terrier",
"toy poodle", "toy terrier", "vizsla", "walker hound", "weimaraner",
"welsh springer spaniel", "west highland white terrier", "whippet",
"wire-haired fox terrier", "yorkshire terrier"
]
BREED_LIFESPAN = {
"afghan hound": 11.1, "african hunting dog": 10.5, "airedale": 11.5,
"american staffordshire terrier": 12.5, "appenzeller": 13.0, "australian terrier": 13.5,
"basenji": 12.1, "basset": 12.5, "beagle": 12.5, "bedlington terrier": 13.7,
"bernese mountain dog": 10.1, "black-and-tan coonhound": 10.8, "blenheim spaniel": 13.3,
"bloodhound": 9.3, "bluetick": 11.0, "border collie": 13.1, "border terrier": 14.2,
"borzoi": 12.0, "boston bull": 11.8, "bouvier des flandres": 11.3, "boxer": 11.3,
"brabancon griffon": 13.0, "briard": 12.6, "brittany spaniel": 13.5,
"bull mastiff": 10.2, "cairn": 14.0, "cardigan": 13.2, "chesapeake bay retriever": 11.6,
"chihuahua": 11.8, "chow": 12.1, "clumber": 12.3, "cocker spaniel": 13.3,
"collie": 13.3, "curly-coated retriever": 12.2, "dandie dinmont": 12.8,
"dhole": 10.0, "dingo": 10.0, "doberman": 11.2, "english foxhound": 13.0,
"english setter": 13.1, "english springer": 13.5, "entlebucher": 13.0,
"eskimo dog": 11.3, "flat-coated retriever": 11.7, "french bulldog": 9.8,
"german shepherd": 11.3, "german short-haired pointer": 13.4, "giant schnauzer": 12.1,
"golden retriever": 13.2, "gordon setter": 12.4, "great dane": 10.6,
"great pyrenees": 10.9, "greater swiss mountain dog": 10.9, "groenendael": 12.0,
"ibizan hound": 13.3, "irish setter": 12.9, "irish terrier": 13.5,
"irish water spaniel": 10.8, "irish wolfhound": 9.9, "italian greyhound": 14.0,
"japanese spaniel": 13.3, "keeshond": 12.3, "kelpie": 12.0, "kerry blue terrier": 12.4,
"komondor": 10.5, "kuvasz": 10.5, "labrador retriever": 13.1, "lakeland terrier": 14.2,
"leonberg": 10.0, "lhasa": 14.0, "malamute": 11.3, "malinois": 12.0,
"maltese dog": 13.1, "mexican hairless": 13.0, "miniature pinscher": 13.7,
"miniature poodle": 14.0, "miniature schnauzer": 13.3, "newfoundland": 11.0,
"norfolk terrier": 13.5, "norwegian elkhound": 13.0, "norwich terrier": 14.0,
"old english sheepdog": 12.1, "otterhound": 12.0, "papillon": 14.5,
"pekinese": 13.3, "pembroke": 13.2, "pomeranian": 12.2, "pug": 11.6,
"redbone": 12.0, "rhodesian ridgeback": 12.0, "rottweiler": 10.6,
"saint bernard": 9.3, "saluki": 13.3, "samoyed": 13.1, "schipperke": 14.2,
"scotch terrier": 12.7, "scottish deerhound": 10.5, "sealyham terrier": 13.1,
"shetland sheepdog": 13.4, "shih tzu": 12.8, "siberian husky": 11.9,
"silky terrier": 13.3, "soft-coated wheaten terrier": 13.7, "staffordshire bullterrier": 12.0,
"standard poodle": 14.0, "standard schnauzer": 13.0, "sussex spaniel": 13.5,
"tibetan mastiff": 13.3, "tibetan terrier": 13.8, "toy poodle": 14.0,
"toy terrier": 13.0, "vizsla": 13.5, "walker hound": 12.0, "weimaraner": 12.8,
"welsh springer spaniel": 14.0, "west highland white terrier": 13.4, "whippet": 13.4,
"wire-haired fox terrier": 13.5, "yorkshire terrier": 13.3
}
# SHORTENED HRQOL Questionnaire
HRQOL_QUESTIONNAIRE = {
"vitality": {
"title": "πŸ”‹ Vitality & Energy Assessment",
"description": "Evaluate your dog's overall energy and responsiveness",
"questions": [
{
"id": "vitality_comprehensive",
"text": "How would you rate your dog's overall vitality considering energy levels, playfulness, and responsiveness to exciting activities?",
"options": [
"Excellent - Very energetic, always seeks play, immediate enthusiastic responses",
"Very Good - Generally energetic, often seeks play, quick positive responses",
"Good - Moderate energy, sometimes seeks play, moderate response time",
"Fair - Lower energy, rarely seeks play, slow or delayed responses",
"Poor - Very low energy, never seeks play, no response or negative reactions"
]
}
],
"weight": 0.25
},
"comfort": {
"title": "😌 Comfort & Pain Management",
"description": "Assess overall comfort and mobility",
"questions": [
{
"id": "comfort_comprehensive",
"text": "How would you assess your dog's overall comfort considering activity comfort, pain frequency, and impact on daily life?",
"options": [
"Excellent - Completely comfortable in all activities, never shows pain, no impact on daily life",
"Very Good - Mostly comfortable with minor adjustments, rarely shows pain, minimal impact",
"Good - Some discomfort in certain activities, occasional pain signs, moderate activity modifications",
"Fair - Frequently uncomfortable, often shows pain, significant activity limitations",
"Poor - Severe discomfort in most activities, daily pain signs, major activity restrictions"
]
}
],
"weight": 0.25
},
"emotional_wellbeing": {
"title": "😊 Emotional Wellbeing",
"description": "Evaluate mood, stress levels, and social engagement",
"questions": [
{
"id": "emotional_comprehensive",
"text": "How would you describe your dog's overall emotional state considering mood, stress/anxiety levels, and family engagement?",
"options": [
"Excellent - Very positive mood, never shows stress, highly engaged with family activities",
"Very Good - Mostly positive mood, rarely shows stress, well engaged with family",
"Good - Generally neutral mood, sometimes shows stress, moderately engaged when invited",
"Fair - Often subdued mood, frequently shows stress, minimally engaged with encouragement",
"Poor - Negative/depressed mood, constantly stressed, avoids family activities"
]
}
],
"weight": 0.25
},
"alertness": {
"title": "🧠 Alertness & Cognition",
"description": "Assess cognitive function and awareness",
"questions": [
{
"id": "alertness_comprehensive",
"text": "How would you rate your dog's overall cognitive function considering awareness, command response, and focus during activities?",
"options": [
"Excellent - Highly alert and aware, responds immediately to commands, maintains excellent focus",
"Very Good - Alert and notices things quickly, usually responds quickly, good focus with occasional distraction",
"Good - Moderately alert with some delay, sometimes needs repetition, moderate focus with difficulty concentrating",
"Fair - Slightly alert and slow to notice, often needs multiple attempts, poor focus and easily distracted",
"Poor - Not alert or confused, rarely responds to commands, cannot maintain attention or focus"
]
}
],
"weight": 0.25
}
}
# ====== ENHANCED BIOLOGICAL AGE PREDICTION FUNCTIONS ======
def predict_biological_age_enhanced(img: Image.Image, video_path: str, breed: str, hrqol_scores: dict, age: int = None):
"""Enhanced biological age prediction with accurate multi-factor analysis"""
try:
# 1. Base prediction using breed-specific aging curves
breed_lifespan = BREED_LIFESPAN.get(breed.lower(), 12.0)
# 2. Enhanced visual health indicators with detailed analysis
health_indicators = analyze_health_indicators_detailed(img)
# 3. HRQOL-based age adjustment with refined weighting
hrqol_adjustment = calculate_hrqol_age_factor_refined(hrqol_scores)
# 4. Video gait analysis (if available)
gait_adjustment = 0
if video_path:
gait_features = analyze_video_for_age_indicators_enhanced(video_path)
gait_adjustment = gait_features.get('age_factor', 0)
# 5. Multi-factor biological age calculation
if age and age > 0:
# Start with chronological age as base
base_age = float(age)
# Apply visual health assessment (stronger influence)
visual_factor = health_indicators.get('age_factor', 0.0) * 0.8
# Apply HRQOL health adjustment (moderate influence)
hrqol_factor = hrqol_adjustment * 0.6
# Apply gait/movement adjustment (if available)
gait_factor = gait_adjustment * 0.4 if video_path else 0.0
# Calculate combined health impact
total_health_impact = visual_factor + hrqol_factor + gait_factor
# Apply health impact to biological age calculation
# Positive factors = accelerated aging, Negative factors = slower aging
biological_age = base_age * (1.0 + total_health_impact)
# Add breed-specific aging rate adjustments
breed_aging_rate = calculate_breed_aging_rate(breed, age, breed_lifespan)
biological_age = biological_age * breed_aging_rate
else:
# When no chronological age provided, estimate from visual cues
visual_age_estimate = estimate_age_from_visual_cues_enhanced(img, breed)
# Apply health adjustments to visual estimate
health_adjustment = (hrqol_adjustment + gait_adjustment) * 0.5
biological_age = visual_age_estimate * (1.0 + health_adjustment)
# 6. Apply realistic constraints
min_age = max(0.3, age * 0.7) if age else 0.3
max_age = min(breed_lifespan * 1.4, age * 1.6) if age else breed_lifespan * 1.2
biological_age = max(min_age, min(max_age, biological_age))
# 7. Calculate confidence and uncertainty
prediction_confidence = calculate_prediction_confidence_enhanced(health_indicators, hrqol_scores, video_path, age)
uncertainty = max(0.1, (1.0 - prediction_confidence) * 2.0)
return {
'biological_age': round(biological_age, 1),
'uncertainty': round(uncertainty, 1),
'high_uncertainty': uncertainty > 1.5,
'vision_quality': compute_vision_quality_enhanced(img),
'breed_lifespan': breed_lifespan,
'confidence_factors': {
'visual_health': health_indicators,
'hrqol_factor': hrqol_adjustment,
'gait_factor': gait_adjustment,
'total_health_impact': total_health_impact if age else 0.0,
'prediction_confidence': prediction_confidence
}
}
except Exception as e:
logger.error(f"Error in enhanced age prediction: {e}")
# Fallback calculation
fallback_age = age * 1.1 if age else breed_lifespan * 0.4
return {
'biological_age': round(fallback_age, 1),
'uncertainty': 2.0,
'high_uncertainty': True,
'vision_quality': 0.5,
'breed_lifespan': breed_lifespan
}
def analyze_health_indicators_detailed(img: Image.Image):
"""Enhanced visual health analysis with detailed aging assessment"""
try:
# More comprehensive aging assessment prompts
aging_prompts = [
"very young healthy puppy with baby features and perfect health",
"young adult dog with excellent health and prime condition",
"healthy mature adult dog with minor aging signs",
"middle-aged dog with moderate aging and some health decline",
"senior dog with visible aging and health deterioration",
"elderly dog with significant aging and multiple health issues"
]
inputs = clip_processor(text=aging_prompts, images=img, return_tensors="pt", padding=True).to(device)
with torch.no_grad():
logits = clip_model(**inputs).logits_per_image.softmax(-1)[0].cpu().numpy()
# More nuanced age factor weights (higher range for better distinction)
age_weights = [-0.6, -0.3, -0.1, 0.2, 0.4, 0.7]
age_factor = np.dot(logits, age_weights)
# Additional physical condition analysis
condition_prompts = [
"dog with excellent physical condition and youthful appearance",
"dog with good physical condition and minimal aging",
"dog with fair physical condition and moderate aging",
"dog with poor physical condition and advanced aging"
]
inputs2 = clip_processor(text=condition_prompts, images=img, return_tensors="pt", padding=True).to(device)
with torch.no_grad():
condition_logits = clip_model(**inputs2).logits_per_image.softmax(-1)[0].cpu().numpy()
condition_weights = [-0.4, -0.1, 0.2, 0.5]
condition_factor = np.dot(condition_logits, condition_weights)
# Combine factors with weighted importance
combined_factor = (age_factor * 0.7) + (condition_factor * 0.3)
return {
'age_factor': float(combined_factor),
'confidence': float(np.max(logits)),
'distribution': logits.tolist(),
'condition_factor': float(condition_factor)
}
except Exception as e:
logger.error(f"Error in detailed health indicator analysis: {e}")
return {'age_factor': 0.0, 'confidence': 0.5, 'distribution': [0.16]*6, 'condition_factor': 0.0}
def calculate_hrqol_age_factor_refined(hrqol_scores: dict):
"""Refined HRQOL aging factor with stronger impact"""
try:
# Calculate weighted average with domain-specific importance
domain_weights = {
'vitality': 0.35, # Highest correlation with aging
'comfort': 0.30, # Strong correlation with aging
'alertness': 0.25, # Cognitive aging indicator
'emotional_wellbeing': 0.10 # Secondary factor
}
weighted_score = sum(
hrqol_scores.get(domain, 50) * weight
for domain, weight in domain_weights.items()
)
# More pronounced age factor calculation for better distinction
if weighted_score >= 85: # Excellent health
age_factor = -0.25 # Significantly slower aging
elif weighted_score >= 70: # Very good health
age_factor = -0.15 # Slower aging
elif weighted_score >= 55: # Good health
age_factor = -0.05 # Slightly slower aging
elif weighted_score >= 40: # Fair health
age_factor = 0.1 # Slightly accelerated aging
elif weighted_score >= 25: # Poor health
age_factor = 0.3 # Accelerated aging
else: # Very poor health
age_factor = 0.5 # Significantly accelerated aging
return age_factor
except Exception as e:
logger.error(f"Error in refined HRQOL age factor calculation: {e}")
return 0.0
def analyze_video_for_age_indicators_enhanced(video_path: str):
"""Enhanced video analysis with detailed movement assessment"""
try:
cap = cv2.VideoCapture(video_path)
total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
if total_frames == 0:
cap.release()
return {'age_factor': 0.0}
movement_scores = []
energy_scores = []
# Sample more frames for better accuracy
frame_indices = np.linspace(0, total_frames-1, min(20, total_frames), dtype=int)
for idx in frame_indices:
cap.set(cv2.CAP_PROP_POS_FRAMES, idx)
ret, frame = cap.read()
if not ret:
continue
img = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
# Enhanced movement assessment
movement_prompts = [
"young dog with bouncy energetic playful movement",
"adult dog with smooth confident coordinated movement",
"older dog with careful measured slower movement",
"senior dog with stiff labored difficult movement"
]
inputs = clip_processor(text=movement_prompts, images=img, return_tensors="pt", padding=True).to(device)
with torch.no_grad():
logits = clip_model(**inputs).logits_per_image.softmax(-1)[0].cpu().numpy()
# Stronger movement scoring for better age distinction
movement_weights = [-0.4, -0.1, 0.2, 0.5]
movement_score = np.dot(logits, movement_weights)
movement_scores.append(movement_score)
# Energy level assessment
energy_prompts = [
"very high energy enthusiastic dog",
"good energy alert dog",
"moderate energy calm dog",
"low energy lethargic dog"
]
inputs2 = clip_processor(text=energy_prompts, images=img, return_tensors="pt", padding=True).to(device)
with torch.no_grad():
energy_logits = clip_model(**inputs2).logits_per_image.softmax(-1)[0].cpu().numpy()
energy_weights = [-0.3, -0.1, 0.1, 0.4]
energy_score = np.dot(energy_logits, energy_weights)
energy_scores.append(energy_score)
cap.release()
if movement_scores and energy_scores:
# Combine movement and energy with weighted importance
avg_movement = np.mean(movement_scores)
avg_energy = np.mean(energy_scores)
combined_factor = (avg_movement * 0.7) + (avg_energy * 0.3)
return {
'age_factor': float(combined_factor),
'movement_score': float(avg_movement),
'energy_score': float(avg_energy),
'sample_count': len(movement_scores)
}
else:
return {'age_factor': 0.0}
except Exception as e:
logger.error(f"Error in enhanced video age analysis: {e}")
return {'age_factor': 0.0}
def calculate_breed_aging_rate(breed: str, current_age: int, breed_lifespan: float):
"""Calculate breed-specific aging rate adjustment"""
try:
# Calculate relative age within breed lifespan
relative_age = current_age / breed_lifespan
# Aging rate adjustments based on breed characteristics
if relative_age < 0.2: # Very young (0-20% of lifespan)
aging_rate = 0.95 # Slightly slower development
elif relative_age < 0.4: # Young adult (20-40% of lifespan)
aging_rate = 1.0 # Normal aging
elif relative_age < 0.6: # Mature adult (40-60% of lifespan)
aging_rate = 1.05 # Slightly accelerated
elif relative_age < 0.8: # Senior (60-80% of lifespan)
aging_rate = 1.15 # Accelerated aging
else: # Elderly (80%+ of lifespan)
aging_rate = 1.25 # Significantly accelerated
# Breed-specific adjustments
large_breeds = ["great dane", "saint bernard", "mastiff", "irish wolfhound"]
small_breeds = ["chihuahua", "toy poodle", "papillon", "maltese dog"]
if any(large_breed in breed.lower() for large_breed in large_breeds):
aging_rate *= 1.1 # Large breeds age faster
elif any(small_breed in breed.lower() for small_breed in small_breeds):
aging_rate *= 0.95 # Small breeds age slower
return aging_rate
except Exception as e:
logger.error(f"Error in breed aging rate calculation: {e}")
return 1.0
def estimate_age_from_visual_cues_enhanced(img: Image.Image, breed: str):
"""Enhanced age estimation with more detailed visual analysis"""
try:
breed_lifespan = BREED_LIFESPAN.get(breed.lower(), 12.0)
# More detailed age-specific descriptions
age_ranges = [
(0.3, f"very young {breed} puppy with baby features and soft coat"),
(1.0, f"young {breed} puppy with developing adult features"),
(2.5, f"adolescent {breed} with youthful energy and developing body"),
(4.0, f"young adult {breed} in peak physical condition"),
(7.0, f"mature adult {breed} with full development and strength"),
(10.0, f"middle-aged {breed} with some aging signs and experience"),
(breed_lifespan * 0.85, f"senior {breed} with clear aging and wisdom"),
(breed_lifespan, f"elderly {breed} with advanced aging and slower movement")
]
age_prompts = [desc for _, desc in age_ranges]
inputs = clip_processor(text=age_prompts, images=img, return_tensors="pt", padding=True).to(device)
with torch.no_grad():
logits = clip_model(**inputs).logits_per_image.softmax(-1)[0].cpu().numpy()
# Calculate weighted average age with higher precision
ages = [age for age, _ in age_ranges]
estimated_age = np.dot(logits, ages)
# Apply confidence-based adjustment
confidence = np.max(logits)
if confidence < 0.4: # Low confidence
# Default to middle age estimate
estimated_age = breed_lifespan * 0.5
return max(0.2, min(breed_lifespan * 1.2, estimated_age))
except Exception as e:
logger.error(f"Error in enhanced visual age estimation: {e}")
return BREED_LIFESPAN.get(breed.lower(), 12.0) * 0.5
def calculate_prediction_confidence_enhanced(health_indicators: dict, hrqol_scores: dict, video_path: str, age: int = None):
"""Calculate enhanced prediction confidence"""
try:
confidence_factors = []
# Visual analysis confidence (higher weight)
visual_conf = health_indicators.get('confidence', 0.5)
confidence_factors.append(visual_conf * 0.4)
# Chronological age availability (high importance)
age_conf = 0.95 if age else 0.2
confidence_factors.append(age_conf * 0.3)
# HRQOL completeness and consistency
completed_domains = sum(1 for score in hrqol_scores.values() if score > 0)
hrqol_conf = completed_domains / 4.0
confidence_factors.append(hrqol_conf * 0.2)
# Video availability
video_conf = 0.9 if video_path else 0.5
confidence_factors.append(video_conf * 0.1)
overall_confidence = sum(confidence_factors)
return min(1.0, overall_confidence)
except Exception as e:
return 0.5
def compute_vision_quality_enhanced(img: Image.Image):
"""Enhanced vision quality computation"""
try:
gray = cv2.cvtColor(np.array(img), cv2.COLOR_RGB2GRAY)
# Sharpness calculation
sharpness = cv2.Laplacian(gray, cv2.CV_64F).var()
# Exposure quality
mean_intensity = np.mean(gray)
exposure_quality = 1.0 - abs(mean_intensity - 127.5) / 127.5
# Contrast quality
contrast = np.std(gray) / 128.0
contrast_quality = min(1.0, contrast)
# Combined quality score
quality = (sharpness / 1500.0 * 0.5 + exposure_quality * 0.3 + contrast_quality * 0.2)
quality = min(1.0, quality)
return max(0.1, quality)
except Exception as e:
logger.error(f"Error in enhanced quality computation: {e}")
return 0.5
# ====== EXISTING SUPPORT FUNCTIONS ======
def analyze_medical_image(img: Image.Image):
health_conditions = [
"healthy normal dog",
"dog with visible health issues",
"dog showing signs of illness",
"dog with poor body condition",
"dog with excellent health"
]
if MEDICAL_MODEL_AVAILABLE:
inputs = medical_processor(text=health_conditions, images=img, return_tensors="pt", padding=True).to(device)
with torch.no_grad():
logits = medical_model(**inputs).logits_per_image.softmax(-1)[0].cpu().numpy()
else:
inputs = clip_processor(text=health_conditions, images=img, return_tensors="pt", padding=True).to(device)
with torch.no_grad():
logits = clip_model(**inputs).logits_per_image.softmax(-1)[0].cpu().numpy()
idx = int(np.argmax(logits))
label = health_conditions[idx]
conf = float(logits[idx])
return label, conf
def classify_breed_and_health(img: Image.Image, override=None):
inp = clip_processor(images=img, return_tensors="pt").to(device)
with torch.no_grad():
feats = clip_model.get_image_features(**inp)
text_prompts = [f"a photo of a {b}" for b in STANFORD_BREEDS]
ti = clip_processor(text=text_prompts, return_tensors="pt", padding=True).to(device)
with torch.no_grad():
tf = clip_model.get_text_features(**ti)
sims = (feats @ tf.T).softmax(-1)[0].cpu().numpy()
idx = int(np.argmax(sims))
breed = override or STANFORD_BREEDS[idx]
breed_conf = float(sims[idx])
aspects = {
"Coat Quality": ("shiny healthy coat","dull patchy fur"),
"Eye Clarity": ("bright clear eyes","cloudy milky eyes"),
"Body Condition": ("ideal muscle tone","visible ribs or bones"),
"Dental Health": ("clean white teeth","yellow stained teeth")
}
health = {}
for name,(p,n) in aspects.items():
ti = clip_processor(text=[p,n], return_tensors="pt", padding=True).to(device)
with torch.no_grad():
tf2 = clip_model.get_text_features(**ti)
sim2 = (feats @ tf2.T).softmax(-1)[0].cpu().numpy()
choice = p if sim2[0]>sim2[1] else n
health[name] = {"assessment":choice,"confidence":float(max(sim2))}
return breed, breed_conf, health
def analyze_video_gait(video_path):
if not video_path:
return None
try:
cap = cv2.VideoCapture(video_path)
fps = cap.get(cv2.CAP_PROP_FPS) or 24
total = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
if total == 0:
cap.release()
return None
indices = np.linspace(0, total-1, min(15, total), dtype=int)
health_scores = []
movement_scores = []
vitality_scores = []
for i in indices:
cap.set(cv2.CAP_PROP_POS_FRAMES, i)
ret, frame = cap.read()
if not ret:
continue
img = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
# Health assessment
_, health_conf = analyze_medical_image(img)
health_scores.append(health_conf)
# Movement assessment
movement_prompts = ["dog moving normally", "dog limping or showing pain", "dog moving stiffly"]
inputs = clip_processor(text=movement_prompts, images=img, return_tensors="pt", padding=True).to(device)
with torch.no_grad():
movement_logits = clip_model(**inputs).logits_per_image.softmax(-1)[0].cpu().numpy()
movement_scores.append(float(movement_logits[0]))
# Vitality assessment
vitality_prompts = ["energetic active dog", "lethargic tired dog", "alert playful dog"]
inputs = clip_processor(text=vitality_prompts, images=img, return_tensors="pt", padding=True).to(device)
with torch.no_grad():
vitality_logits = clip_model(**inputs).logits_per_image.softmax(-1)[0].cpu().numpy()
vitality_scores.append(float(vitality_logits[0] + vitality_logits[2]))
cap.release()
if not health_scores:
return None
return {
"duration_sec": round(total/fps, 1),
"mobility_score": float(np.mean(movement_scores)) * 100,
"comfort_score": float(np.mean(health_scores)) * 100,
"vitality_score": float(np.mean(vitality_scores)) * 100,
"frames_analyzed": len(health_scores),
"mobility_assessment": "Normal gait pattern" if np.mean(movement_scores) > 0.6 else "Mobility concerns detected",
"comfort_assessment": "No obvious discomfort" if np.mean(health_scores) > 0.7 else "Possible discomfort signs",
"vitality_assessment": "Good energy level" if np.mean(vitality_scores) > 0.6 else "Low energy observed"
}
except Exception as e:
return None
def score_from_response(response, score_mapping):
"""Extract numeric score from text response"""
if not response:
return 50
for key, value in score_mapping.items():
if key.lower() in response.lower():
return value
return 50
def calculate_hrqol_scores(hrqol_responses):
"""Convert comprehensive HRQOL responses to 0-100 domain scores"""
score_mapping = {
"excellent": 100, "very good": 80, "good": 60, "fair": 40, "poor": 20
}
domain_scores = {}
# Each domain now has one comprehensive question
domain_scores["vitality"] = score_from_response(
hrqol_responses.get("vitality_comprehensive", ""), score_mapping
)
domain_scores["comfort"] = score_from_response(
hrqol_responses.get("comfort_comprehensive", ""), score_mapping
)
domain_scores["emotional_wellbeing"] = score_from_response(
hrqol_responses.get("emotional_comprehensive", ""), score_mapping
)
domain_scores["alertness"] = score_from_response(
hrqol_responses.get("alertness_comprehensive", ""), score_mapping
)
return domain_scores
def get_score_color(score):
"""Return background and text color based on score for better visibility"""
if score >= 80:
return {"bg": "#4CAF50", "text": "#FFFFFF"} # Green background, white text
elif score >= 60:
return {"bg": "#FFC107", "text": "#000000"} # Yellow background, black text
elif score >= 40:
return {"bg": "#FF9800", "text": "#FFFFFF"} # Orange background, white text
else:
return {"bg": "#F44336", "text": "#FFFFFF"} # Red background, white text
def get_healthspan_grade(score):
if score >= 85:
return "Excellent (A+)"
elif score >= 75:
return "Very Good (A)"
elif score >= 65:
return "Good (B)"
elif score >= 55:
return "Fair (C)"
elif score >= 45:
return "Poor (D)"
else:
return "Critical (F)"
def show_loading():
"""Display loading animation"""
return """
<div style="text-align: center; padding: 40px;">
<div style="display: inline-block; width: 40px; height: 40px; border: 4px solid #f3f3f3; border-top: 4px solid #667eea; border-radius: 50%; animation: spin 1s linear infinite;"></div>
<style>
@keyframes spin {
0% { transform: rotate(0deg); }
100% { transform: rotate(360deg); }
}
</style>
<h3 style="color: #667eea; margin-top: 20px;">πŸ”¬ Analyzing Your Dog's Health...</h3>
<p style="color: #666;">Please wait while we process the image/video and questionnaire data using enhanced AI models.</p>
<div style="background: #f0f0f0; border-radius: 20px; padding: 10px; margin: 20px auto; width: 300px;">
<div style="background: linear-gradient(90deg, #667eea, #764ba2); height: 6px; border-radius: 10px; width: 0%; animation: progress 3s ease-in-out infinite;"></div>
</div>
<style>
@keyframes progress {
0% { width: 0%; }
50% { width: 80%; }
100% { width: 100%; }
}
</style>
</div>
"""
def comprehensive_healthspan_analysis(input_type, image_input, video_input, breed, age, *hrqol_responses):
"""Enhanced comprehensive analysis with improved biological age prediction"""
# Show loading first
yield show_loading()
# Simulate processing time for enhanced computations
time.sleep(3)
# Determine which input to use based on dropdown selection
if input_type == "Image Analysis":
selected_media = image_input
media_type = "image"
video_path = None
elif input_type == "Video Analysis":
selected_media = video_input
media_type = "video"
video_path = video_input
else:
yield "❌ *Error*: Please select an input type."
return
if selected_media is None:
yield f"❌ *Error*: Please provide a {media_type} for analysis."
return
# Check if questionnaire is completed
if not hrqol_responses or all(not r for r in hrqol_responses):
yield "❌ *Error*: Please complete the HRQOL questionnaire before analysis."
return
# Build HRQOL responses dictionary - Updated for shortened questionnaire
response_keys = []
for domain_key, domain_data in HRQOL_QUESTIONNAIRE.items():
for question in domain_data["questions"]:
response_keys.append(question["id"])
hrqol_dict = {key: hrqol_responses[i] if i < len(hrqol_responses) else ""
for i, key in enumerate(response_keys)}
# Calculate HRQOL scores
hrqol_scores = calculate_hrqol_scores(hrqol_dict)
# Initialize analysis variables
video_features = {}
breed_info = None
enhanced_age_info = None
health_aspects = {}
# Perform analysis based on media type
if media_type == "image":
try:
detected_breed, breed_conf, health_aspects = classify_breed_and_health(selected_media, breed)
# ENHANCED biological age prediction with improved accuracy
enhanced_age_info = predict_biological_age_enhanced(
selected_media, None, detected_breed, hrqol_scores, age
)
breed_info = {
"breed": detected_breed,
"confidence": breed_conf,
"bio_age": enhanced_age_info['biological_age'],
"uncertainty": enhanced_age_info['uncertainty'],
"high_uncertainty": enhanced_age_info['high_uncertainty'],
"vision_quality": enhanced_age_info['vision_quality'],
"confidence_factors": enhanced_age_info['confidence_factors']
}
except Exception as e:
logger.error(f"Image analysis error: {e}")
elif media_type == "video":
# For video, analyze both movement and extract frame for breed analysis
video_features = analyze_video_gait(selected_media) or {}
# Try to extract a frame from video for breed analysis
try:
cap = cv2.VideoCapture(selected_media)
ret, frame = cap.read()
if ret:
img = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
detected_breed, breed_conf, health_aspects = classify_breed_and_health(img, breed)
# ENHANCED biological age prediction with video
enhanced_age_info = predict_biological_age_enhanced(
img, selected_media, detected_breed, hrqol_scores, age
)
breed_info = {
"breed": detected_breed,
"confidence": breed_conf,
"bio_age": enhanced_age_info['biological_age'],
"uncertainty": enhanced_age_info['uncertainty'],
"high_uncertainty": enhanced_age_info['high_uncertainty'],
"vision_quality": enhanced_age_info['vision_quality'],
"confidence_factors": enhanced_age_info['confidence_factors']
}
cap.release()
except Exception as e:
logger.error(f"Video analysis error: {e}")
# Calculate Composite Healthspan Score (enhanced)
video_weight = 0.3 if video_features else 0.0
hrqol_weight = 0.7 if video_features else 1.0
if video_features:
video_score = (
video_features.get("mobility_score", 70) * 0.4 +
video_features.get("comfort_score", 70) * 0.3 +
video_features.get("vitality_score", 70) * 0.3
)
else:
video_score = 0
hrqol_composite = (
hrqol_scores["vitality"] * 0.25 +
hrqol_scores["comfort"] * 0.25 +
hrqol_scores["emotional_wellbeing"] * 0.25 +
hrqol_scores["alertness"] * 0.25
)
final_healthspan_score = (video_score * video_weight) + (hrqol_composite * hrqol_weight)
final_healthspan_score = min(100, max(0, final_healthspan_score))
# Generate comprehensive report with enhanced features
input_type_icon = "πŸ“Έ" if media_type == "image" else "πŸŽ₯"
report_html = f"""
<div style="font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif; max-width: 1000px; margin: 0 auto;">
<div style="background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); color: white; padding: 30px; border-radius: 15px; margin: 20px 0; text-align: center; box-shadow: 0 4px 6px rgba(0,0,0,0.1);">
<h2 style="margin: 0; font-size: 2em; text-shadow: 1px 1px 2px rgba(0,0,0,0.3);">{input_type_icon} Enhanced Multi-Modal Health Assessment</h2>
<div style="font-size: 1.1em; margin: 10px 0; opacity: 0.9;">Analysis Type: {input_type} | Advanced Biological Age Prediction</div>
<div style="font-size: 3em; font-weight: bold; margin: 15px 0; text-shadow: 2px 2px 4px rgba(0,0,0,0.3);">{final_healthspan_score:.1f}/100</div>
<div style="font-size: 1.2em; background: rgba(255,255,255,0.2); padding: 8px 16px; border-radius: 20px; display: inline-block;">{get_healthspan_grade(final_healthspan_score)}</div>
</div>
<div style="display: grid; grid-template-columns: repeat(auto-fit, minmax(240px, 1fr)); gap: 20px; margin: 30px 0;">
"""
# Add domain score cards with improved contrast
for domain, score in [("vitality", "πŸ”‹ Vitality"), ("comfort", "😌 Comfort"), ("emotional_wellbeing", "😊 Emotional"), ("alertness", "🧠 Alertness")]:
colors = get_score_color(hrqol_scores[domain])
report_html += f"""
<div style="border: 2px solid #e0e0e0; padding: 20px; border-radius: 12px; background: #ffffff; box-shadow: 0 2px 4px rgba(0,0,0,0.1);">
<h4 style="margin: 0 0 15px 0; color: #333333; font-weight: 600;">{score.split()[1]}</h4>
<div style="background: #e9ecef; height: 12px; border-radius: 6px; margin: 10px 0; border: 1px solid #dee2e6;">
<div style="background: {colors['bg']}; height: 100%; width: {hrqol_scores[domain]}%; border-radius: 6px; transition: width 0.3s ease; position: relative; display: flex; align-items: center; justify-content: center;">
<span style="color: {colors['text']}; font-size: 10px; font-weight: bold; text-shadow: 1px 1px 1px rgba(0,0,0,0.3);">{hrqol_scores[domain]:.0f}</span>
</div>
</div>
<div style="font-size: 1.1em; font-weight: bold; color: #333333;">{hrqol_scores[domain]:.1f}/100</div>
</div>
"""
report_html += "</div>"
# Enhanced Visual Analysis section with improved accuracy
if breed_info:
uncertainty_info = ""
if breed_info.get('high_uncertainty', False):
uncertainty_info = f"""<div style="background: #fff3cd; border: 1px solid #ffeaa7; padding: 10px; border-radius: 8px; margin: 10px 0;">
<p style="margin: 0; color: #856404;"><strong>⚠ High Uncertainty:</strong>
Age prediction uncertainty is Β±{breed_info.get('uncertainty', 0):.1f} years. Consider veterinary consultation.</p>
</div>"""
pace_info = ""
if age and age > 0:
pace = breed_info["bio_age"] / age
pace_status = "Accelerated" if pace > 1.2 else "Normal" if pace > 0.8 else "Slow"
pace_color = "#FF5722" if pace > 1.2 else "#4CAF50" if pace < 0.8 else "#FF9800"
pace_info = f"""<p style="margin: 8px 0;"><strong style="color: #000000;">Aging Pace:</strong>
<span style="background: {pace_color}; color: white; padding: 4px 8px; border-radius: 12px; font-weight: bold; text-shadow: 1px 1px 1px rgba(0,0,0,0.3);">
{pace:.2f}Γ— ({pace_status})</span></p>"""
vision_quality_info = f"""<p style="margin: 8px 0;"><strong style="color: #000000;">Image Quality:</strong>
<span style="color: #000000; font-weight: 700;">{breed_info.get('vision_quality', 0.5)*100:.0f}%</span></p>"""
# Confidence factors breakdown
confidence_factors = breed_info.get('confidence_factors', {})
visual_health = confidence_factors.get('visual_health', {})
hrqol_factor = confidence_factors.get('hrqol_factor', 0)
gait_factor = confidence_factors.get('gait_factor', 0)
total_health_impact = confidence_factors.get('total_health_impact', 0)
prediction_confidence = confidence_factors.get('prediction_confidence', 0.5)
factors_info = f"""<div style="background: #f8f9fa; border-radius: 8px; padding: 10px; margin: 10px 0;">
<p style="margin: 5px 0; font-size: 0.9em; color: #555;"><strong>Advanced Analysis Factors:</strong></p>
<p style="margin: 2px 0; font-size: 0.8em; color: #666;">β€’ Visual Health Factor: {visual_health.get('age_factor', 0):.3f}</p>
<p style="margin: 2px 0; font-size: 0.8em; color: #666;">β€’ HRQOL Adjustment: {hrqol_factor:.3f}</p>
<p style="margin: 2px 0; font-size: 0.8em; color: #666;">β€’ Gait Factor: {gait_factor:.3f}</p>
<p style="margin: 2px 0; font-size: 0.8em; color: #666;">β€’ Total Health Impact: {total_health_impact:.3f}</p>
<p style="margin: 2px 0; font-size: 0.8em; color: #666;">β€’ Prediction Confidence: {prediction_confidence:.1%}</p>
</div>"""
report_html += f"""
<div style="border: 2px solid #333333; padding: 20px; border-radius: 12px; margin: 20px 0; background: #ffffff; box-shadow: 0 2px 4px rgba(0,0,0,0.1);">
<h3 style="color: #000000; margin: 0 0 15px 0; font-weight: 700; border-bottom: 2px solid #333333; padding-bottom: 8px;">{input_type_icon} Advanced Visual Analysis</h3>
<p style="margin: 8px 0; color: #000000; font-weight: 500;"><strong style="color: #000000;">Detected Breed:</strong> <span style="color: #000000; font-weight: 700;">{breed_info['breed']}</span> <span style="background: #333333; color: #ffffff; padding: 2px 6px; border-radius: 8px; font-size: 0.9em;">({breed_info['confidence']:.1%} confidence)</span></p>
<p style="margin: 8px 0; color: #000000; font-weight: 500;"><strong style="color: #000000;">Enhanced Biological Age:</strong> <span style="color: #000000; font-weight: 700;">{breed_info['bio_age']} Β± {breed_info.get('uncertainty', 0):.1f} years</span></p>
<p style="margin: 8px 0; color: #000000; font-weight: 500;"><strong style="color: #000000;">Chronological Age:</strong> <span style="color: #000000; font-weight: 700;">{age or 'Not provided'} years</span></p>
{vision_quality_info}
{pace_info}
{factors_info}
{uncertainty_info}
</div>
"""
# Enhanced video analysis
if video_features:
report_html += f"""
<div style="border: 2px solid #333333; padding: 20px; border-radius: 12px; margin: 20px 0; background: #ffffff; box-shadow: 0 2px 4px rgba(0,0,0,0.1);">
<h3 style="color: #000000; margin: 0 0 15px 0; font-weight: 700; border-bottom: 2px solid #333333; padding-bottom: 8px;">πŸŽ₯ Advanced Gait & Movement Analysis</h3>
<p style="margin: 8px 0; color: #000000; font-weight: 500;"><strong style="color: #000000;">Duration:</strong> <span style="color: #000000; font-weight: 700;">{video_features['duration_sec']} seconds</span></p>
<p style="margin: 8px 0; color: #000000; font-weight: 500;"><strong style="color: #000000;">Mobility Assessment:</strong> <span style="color: #000000; font-weight: 700;">{video_features['mobility_assessment']}</span></p>
<p style="margin: 8px 0; color: #000000; font-weight: 500;"><strong style="color: #000000;">Comfort Assessment:</strong> <span style="color: #000000; font-weight: 700;">{video_features['comfort_assessment']}</span></p>
<p style="margin: 8px 0; color: #000000; font-weight: 500;"><strong style="color: #000000;">Vitality Assessment:</strong> <span style="color: #000000; font-weight: 700;">{video_features['vitality_assessment']}</span></p>
<p style="margin: 8px 0; color: #000000; font-weight: 500;"><strong style="color: #000000;">Enhanced Analysis:</strong> <span style="color: #000000; font-weight: 700;">{video_features['frames_analyzed']} frames with age-specific movement analysis</span></p>
</div>
"""
# Physical Health Assessment with improved visibility
if health_aspects and media_type == "image":
report_html += f"""
<div style="border: 2px solid #4CAF50; padding: 20px; border-radius: 12px; margin: 20px 0; background: #ffffff; box-shadow: 0 2px 4px rgba(0,0,0,0.1);">
<h3 style="color: #2E7D32; margin: 0 0 15px 0; font-weight: 600; border-bottom: 2px solid #E8F5E8; padding-bottom: 8px;">πŸ“Έ Physical Health Assessment</h3>
"""
for aspect, data in health_aspects.items():
is_healthy = any(word in data["assessment"].lower() for word in ["healthy", "bright", "clean", "ideal"])
status_icon = "βœ…" if is_healthy else "⚠"
status_color = "#2E7D32" if is_healthy else "#F57C00"
bg_color = "#E8F5E8" if is_healthy else "#FFF3E0"
report_html += f"""
<div style="margin: 10px 0; padding: 12px; background: {bg_color}; border-radius: 8px; border-left: 4px solid {status_color};">
<p style="margin: 0; color: #333;">
<span style="font-size: 1.2em;">{status_icon}</span>
<strong style="color: {status_color};">{aspect}:</strong>
<span style="color: #333; font-weight: 500;">{data['assessment']}</span>
<span style="background: #E0E0E0; color: #424242; padding: 2px 6px; border-radius: 8px; font-size: 0.85em; margin-left: 8px;">
({data['confidence']:.1%} confidence)</span>
</p>
</div>
"""
report_html += "</div>"
# Enhanced recommendations based on advanced analysis
recommendations = []
if hrqol_scores["vitality"] < 60:
recommendations.append("πŸ”‹ *Vitality Enhancement*: Implement graduated exercise program with monitoring")
if hrqol_scores["comfort"] < 70:
recommendations.append("😌 *Comfort Support*: Consider pain management and mobility aids")
if hrqol_scores["emotional_wellbeing"] < 65:
recommendations.append("😊 *Emotional Care*: Increase environmental enrichment and social interaction")
if hrqol_scores["alertness"] < 70:
recommendations.append("🧠 *Cognitive Support*: Introduce cognitive enhancement activities")
if breed_info and breed_info.get('high_uncertainty', False):
recommendations.append("πŸ₯ *Veterinary Consultation*: High prediction uncertainty suggests professional evaluation needed")
if breed_info and age:
pace = breed_info["bio_age"] / age
if pace > 1.3:
recommendations.append("⚑ *Accelerated Aging*: Consider comprehensive health screening and lifestyle modifications")
if recommendations:
report_html += f"""
<div style="border: 2px solid #FF9800; padding: 20px; border-radius: 12px; margin: 20px 0; background: #ffffff; box-shadow: 0 2px 4px rgba(0,0,0,0.1);">
<h3 style="color: #F57C00; margin: 0 0 15px 0; font-weight: 600; border-bottom: 2px solid #FFF3E0; padding-bottom: 8px;">🎯 Enhanced AI Recommendations</h3>
{''.join([f'<div style="margin: 10px 0; padding: 12px; background: #FFF8E1; border-radius: 8px; border-left: 4px solid #FF9800;"><p style="margin: 0; color: #333; font-weight: 500;">{rec}</p></div>' for rec in recommendations])}
</div>
"""
# Enhanced disclaimer with model information
report_html += """
<div style="background: #F5F5F5; border: 1px solid #E0E0E0; padding: 20px; border-radius: 8px; margin: 20px 0;">
<p style="margin: 0; font-size: 0.9em; color: #424242; line-height: 1.5;">
<strong style="color: #D32F2F;">⚠ Important Disclaimer:</strong>
This analysis uses advanced AI models with multi-factor biological age prediction based on visual health indicators,
HRQOL correlations, and movement analysis. Results are for educational purposes only.
Always consult with a qualified veterinarian for professional medical advice and diagnosis.
</p>
<p style="margin: 10px 0 0 0; font-size: 0.8em; color: #666;">
<strong>Advanced Features:</strong> Multi-factor age prediction, breed-specific aging rates, enhanced uncertainty quantification, comprehensive health analysis
</p>
</div>
</div>
"""
yield report_html
def update_media_input(input_type):
"""Update the visibility of media inputs based on dropdown selection"""
if input_type == "Image Analysis":
return gr.update(visible=True), gr.update(visible=False)
else: # Video Analysis
return gr.update(visible=False), gr.update(visible=True)
custom_css = """
/* Enhanced gradient background - Orangish fade theme */
.gradio-container {
background: linear-gradient(135deg, #ff8a50 0%, #ff6b35 25%, #ff4500 50%, #ff8c00 75%, #ffa500 100%);
min-height: 100vh;
font-family: 'Inter', -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif;
}
/* Card styling - Light orange fade background */
.input-card {
background: linear-gradient(135deg, #fff4e6 0%, #ffe4cc 100%);
border-radius: 12px;
padding: 28px;
box-shadow: 0 4px 20px rgba(255, 140, 0, 0.15);
margin: 12px;
border: 1px solid #ffb366;
color: #1a202c;
}
/* Questionnaire grid container - Orange fade design */
.questionnaire-grid {
background: linear-gradient(135deg, #fff1e6 0%, #ffe6cc 50%, #ffdbcc 100%);
border-radius: 12px;
padding: 32px;
box-shadow: 0 4px 20px rgba(255, 140, 0, 0.18);
margin: 12px;
border: 1px solid #ffb366;
color: #1a202c;
}
/* Header styling - Bold orange fade gradient */
.main-header {
background: linear-gradient(135deg, #ff6347 0%, #ff7f50 25%, #ff8c00 50%, #ffa500 75%, #ffb347 100%);
color: #ffffff;
text-align: center;
padding: 40px;
border-radius: 16px;
margin-bottom: 32px;
box-shadow: 0 8px 32px rgba(255, 140, 0, 0.3);
font-family: 'Inter', -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif;
}
/* Button styling - Orange fade with depth */
.analyze-button {
background: linear-gradient(135deg, #ff6b35 0%, #ff8c00 50%, #ffa500 100%);
border: none;
color: #ffffff;
padding: 16px 32px;
font-size: 16px;
font-weight: 600;
border-radius: 12px;
cursor: pointer;
transition: all 0.3s ease;
box-shadow: 0 4px 16px rgba(255, 107, 53, 0.3);
font-family: 'Inter', -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif;
}
.analyze-button:hover {
transform: translateY(-2px);
box-shadow: 0 8px 28px rgba(255, 107, 53, 0.4);
background: linear-gradient(135deg, #ff5722 0%, #ff7043 50%, #ff8a65 100%);
}
/* Grid styling for questionnaire */
.question-grid {
display: grid;
grid-template-columns: 2fr 1fr;
gap: 24px;
align-items: center;
padding: 20px 0;
border-bottom: 1px solid #ffcc99;
margin-bottom: 16px;
}
.question-grid:last-child {
border-bottom: none;
margin-bottom: 0;
}
/* Orange questionnaire text styling - UPDATED TO MATCH THEME */
.question-text {
font-size: 16px !important;
color: #e65100 !important;
line-height: 1.6 !important;
margin: 0 !important;
padding-right: 20px !important;
font-weight: 500 !important;
letter-spacing: 0.025em !important;
font-family: 'Inter', -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif !important;
}
.question-text strong {
color: #bf360c !important;
font-weight: 600 !important;
font-size: 16px !important;
font-family: 'Inter', -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif !important;
}
/* Dropdown styling - Orange fade theme */
.gr-dropdown {
border-radius: 8px;
border: 2px solid #ffb366;
background: linear-gradient(135deg, #fff9f5 0%, #fff4e6 100%) !important;
transition: all 0.3s ease;
font-size: 14px !important;
font-weight: 500 !important;
color: #2d3748 !important;
font-family: 'Inter', -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif !important;
}
.gr-dropdown:focus {
border-color: #ff8c00;
box-shadow: 0 0 0 3px rgba(255, 140, 0, 0.15);
outline: none;
}
.gr-dropdown:hover {
border-color: #ff9f43;
background: linear-gradient(135deg, #fff7f0 0%, #ffede0 100%) !important;
}
/* Compact spacing */
.question-section {
margin-bottom: 24px;
}
.question-section:last-child {
margin-bottom: 0;
}
/* Professional headers with orange fade - UPDATED */
.questionnaire-grid h2 {
font-size: 28px !important;
font-weight: 700 !important;
color: #d84315 !important;
font-family: 'Inter', -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif !important;
}
.questionnaire-grid p {
font-size: 16px !important;
color: #ff6f00 !important;
font-weight: 400 !important;
font-family: 'Inter', -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif !important;
}
/* Additional professional styling with orange fade */
.gr-textbox, .gr-number {
font-family: 'Inter', -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif !important;
color: #2d3748 !important;
background: linear-gradient(135deg, #fff9f5 0%, #fff4e6 100%) !important;
border: 2px solid #ffb366 !important;
border-radius: 8px !important;
transition: all 0.3s ease !important;
}
.gr-textbox:focus, .gr-number:focus {
border-color: #ff8c00 !important;
box-shadow: 0 0 0 3px rgba(255, 140, 0, 0.15) !important;
outline: none !important;
}
.gr-textbox:hover, .gr-number:hover {
border-color: #ff9f43 !important;
background: linear-gradient(135deg, #fff7f0 0%, #ffede0 100%) !important;
}
/* Labels styling - UPDATED TO ORANGE */
label {
color: #e65100 !important;
font-family: 'Inter', -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif !important;
font-weight: 500 !important;
font-size: 14px !important;
margin-bottom: 8px !important;
}
/* Media input styling with orange fade */
.gr-image, .gr-video {
border-radius: 12px !important;
border: 2px solid #ffb366 !important;
background: linear-gradient(135deg, #fff9f5 0%, #fff4e6 100%) !important;
}
/* Additional modern touches with orange fade */
.gr-group {
background: transparent !important;
border: none !important;
}
.gr-panel {
background: linear-gradient(135deg, #fff9f5 0%, #fff4e6 100%) !important;
border: 1px solid #ffb366 !important;
border-radius: 12px !important;
}
"""
# Updated Gradio Interface with Orange Questionnaire Font
with gr.Blocks(
title="🐢 Enhanced AI Dog Health Analyzer",
theme=gr.themes.Soft(
primary_hue="orange",
secondary_hue="amber",
neutral_hue="slate",
font=["Inter", "system-ui", "sans-serif"]
),
css=custom_css
) as demo:
# Main Header with Orange Fade
gr.HTML("""
<div class="main-header">
<h1 style="margin: 0; font-size: 2.8em; color: #ffffff; font-family: 'Inter', -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif; font-weight: 700; text-shadow: 2px 2px 4px rgba(0,0,0,0.2);">
🐾 PAWSYears - Every Dog has 20 Years Potential
</h1>
<p style="margin: 20px 0 0 0; font-size: 1.3em; color: #ffffff; font-family: 'Inter', -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif; opacity: 0.95; text-shadow: 1px 1px 2px rgba(0,0,0,0.2);">
Your Companion's Next-Gen Health Intelligence Platform
</p>
</div>
""")
with gr.Row():
# Left Column - Enhanced Media Input
with gr.Column(scale=1):
gr.HTML("""
<div class="input-card">
<h2 style="color: #d84315; margin: 0 0 24px 0; text-align: center; font-family: 'Inter', -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif; font-weight: 600;">
πŸ“Έ Media Input Selection
</h2>
</div>
""")
# Enhanced dropdown with better styling
input_type_dropdown = gr.Dropdown(
choices=["Image Analysis", "Video Analysis"],
label="πŸ” Select Analysis Type",
value="Image Analysis",
interactive=True,
elem_classes=["gr-dropdown"]
)
# Media input components with enhanced labels
image_input = gr.Image(
type="pil",
label="πŸ“· Upload Dog Photo or Use Webcam",
visible=True,
sources=["upload", "webcam"],
height=320
)
video_input = gr.Video(
label="πŸŽ₯ Upload Video (10-30 seconds) or Record with Webcam",
visible=False,
sources=["upload", "webcam"],
height=320
)
# Update visibility based on dropdown selection
input_type_dropdown.change(
fn=update_media_input,
inputs=[input_type_dropdown],
outputs=[image_input, video_input]
)
breed_input = gr.Dropdown(
STANFORD_BREEDS,
label="πŸ• Dog Breed (Auto-detected if not specified)",
value=None,
allow_custom_value=True,
elem_classes=["gr-dropdown"]
)
age_input = gr.Number(
label="πŸ“… Chronological Age (years)",
precision=1,
value=None,
minimum=0,
maximum=25
)
# Right Column - Orange Font HRQOL Questionnaire
with gr.Column(scale=1):
gr.HTML("""
<div class="questionnaire-grid">
<h2 style="color: #d84315; margin: 0 0 16px 0; text-align: center; font-size: 28px; font-weight: 700; font-family: 'Inter', -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif;">
πŸ“‹ Health Assessment
</h2>
<p style="text-align: center; color: #ff6f00; font-style: italic; margin-bottom: 28px; font-size: 16px; font-family: 'Inter', -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif;">
Complete all questions for comprehensive healthspan analysis
</p>
""")
hrqol_inputs = []
# Create compact grid layout with orange text
with gr.Group(elem_classes=["question-section"]):
for domain_key, domain_data in HRQOL_QUESTIONNAIRE.items():
for i, question in enumerate(domain_data["questions"]):
with gr.Row():
with gr.Column(scale=2):
gr.HTML(f"""
<div class="question-text">
<strong>Q{len(hrqol_inputs)+1}:</strong> {question["text"]}
</div>
""")
with gr.Column(scale=1):
dropdown = gr.Dropdown(
choices=question["options"],
label="",
value=None,
interactive=True,
show_label=False,
elem_classes=["gr-dropdown"]
)
hrqol_inputs.append(dropdown)
gr.HTML("</div>") # Close questionnaire-grid
# Enhanced Analysis Button
gr.HTML("""
<div style="text-align: center; margin: 40px 0;">
""")
analyze_button = gr.Button(
"πŸ”¬ Run Advanced AI Health Analysis",
variant="primary",
size="lg",
elem_classes=["analyze-button"]
)
gr.HTML("</div>")
# Enhanced Results Section
output_report = gr.HTML()
# Connect analysis function with loading
analyze_button.click(
fn=comprehensive_healthspan_analysis,
inputs=[input_type_dropdown, image_input, video_input, breed_input, age_input] + hrqol_inputs,
outputs=output_report
)
# Launch the interface
if __name__ == "__main__":
demo.launch()