Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -4,6 +4,8 @@ import os
|
|
4 |
import gradio as gr
|
5 |
from PIL import Image
|
6 |
import torch
|
|
|
|
|
7 |
import numpy as np
|
8 |
import cv2
|
9 |
from transformers import (
|
@@ -11,6 +13,13 @@ from transformers import (
|
|
11 |
AutoProcessor
|
12 |
)
|
13 |
import time
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
14 |
|
15 |
# βββββββββββββββββββββββββββββ
|
16 |
# CONFIG: set your HF token here or via env var HF_TOKEN
|
@@ -28,7 +37,7 @@ clip_processor = CLIPProcessor.from_pretrained(
|
|
28 |
token=HF_TOKEN
|
29 |
)
|
30 |
|
31 |
-
# 2. Alternative medical analysis model
|
32 |
try:
|
33 |
medical_processor = AutoProcessor.from_pretrained(
|
34 |
"microsoft/BiomedCLIP-PubMedBERT_256-vit_base_patch16_224",
|
@@ -44,7 +53,72 @@ except:
|
|
44 |
medical_model = clip_model
|
45 |
MEDICAL_MODEL_AVAILABLE = False
|
46 |
|
47 |
-
# 3.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
48 |
STANFORD_BREEDS = [
|
49 |
"afghan hound", "african hunting dog", "airedale", "american staffordshire terrier",
|
50 |
"appenzeller", "australian terrier", "basenji", "basset", "beagle",
|
@@ -115,7 +189,7 @@ BREED_LIFESPAN = {
|
|
115 |
"wire-haired fox terrier": 13.5, "yorkshire terrier": 13.3
|
116 |
}
|
117 |
|
118 |
-
#
|
119 |
HRQOL_QUESTIONNAIRE = {
|
120 |
"vitality": {
|
121 |
"title": "π Vitality & Energy Assessment",
|
@@ -279,13 +353,234 @@ HRQOL_QUESTIONNAIRE = {
|
|
279 |
}
|
280 |
}
|
281 |
|
282 |
-
|
283 |
-
|
284 |
-
|
285 |
-
|
286 |
-
|
287 |
-
|
288 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
289 |
|
290 |
def analyze_medical_image(img: Image.Image):
|
291 |
health_conditions = [
|
@@ -495,7 +790,7 @@ def show_loading():
|
|
495 |
}
|
496 |
</style>
|
497 |
<h3 style="color: #667eea; margin-top: 20px;">π¬ Analyzing Your Dog's Health...</h3>
|
498 |
-
<p style="color: #666;">Please wait while we process the image/video and questionnaire data.</p>
|
499 |
<div style="background: #f0f0f0; border-radius: 20px; padding: 10px; margin: 20px auto; width: 300px;">
|
500 |
<div style="background: linear-gradient(90deg, #667eea, #764ba2); height: 6px; border-radius: 10px; width: 0%; animation: progress 3s ease-in-out infinite;"></div>
|
501 |
</div>
|
@@ -510,21 +805,23 @@ def show_loading():
|
|
510 |
"""
|
511 |
|
512 |
def comprehensive_healthspan_analysis(input_type, image_input, video_input, breed, age, *hrqol_responses):
|
513 |
-
"""
|
514 |
|
515 |
# Show loading first
|
516 |
yield show_loading()
|
517 |
|
518 |
-
# Simulate processing time
|
519 |
-
time.sleep(
|
520 |
|
521 |
# Determine which input to use based on dropdown selection
|
522 |
if input_type == "Image Analysis":
|
523 |
selected_media = image_input
|
524 |
media_type = "image"
|
|
|
525 |
elif input_type == "Video Analysis":
|
526 |
selected_media = video_input
|
527 |
media_type = "video"
|
|
|
528 |
else:
|
529 |
yield "β **Error**: Please select an input type."
|
530 |
return
|
@@ -553,24 +850,32 @@ def comprehensive_healthspan_analysis(input_type, image_input, video_input, bree
|
|
553 |
# Initialize analysis variables
|
554 |
video_features = {}
|
555 |
breed_info = None
|
556 |
-
|
557 |
health_aspects = {}
|
558 |
|
559 |
# Perform analysis based on media type
|
560 |
if media_type == "image":
|
561 |
try:
|
562 |
detected_breed, breed_conf, health_aspects = classify_breed_and_health(selected_media, breed)
|
563 |
-
|
|
|
|
|
|
|
|
|
|
|
564 |
breed_info = {
|
565 |
"breed": detected_breed,
|
566 |
"confidence": breed_conf,
|
567 |
-
"bio_age":
|
|
|
|
|
|
|
568 |
}
|
569 |
except Exception as e:
|
570 |
-
|
571 |
|
572 |
elif media_type == "video":
|
573 |
-
# For video,
|
574 |
video_features = analyze_video_gait(selected_media) or {}
|
575 |
|
576 |
# Try to extract a frame from video for breed analysis
|
@@ -580,25 +885,33 @@ def comprehensive_healthspan_analysis(input_type, image_input, video_input, bree
|
|
580 |
if ret:
|
581 |
img = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
|
582 |
detected_breed, breed_conf, health_aspects = classify_breed_and_health(img, breed)
|
583 |
-
|
|
|
|
|
|
|
|
|
|
|
584 |
breed_info = {
|
585 |
"breed": detected_breed,
|
586 |
"confidence": breed_conf,
|
587 |
-
"bio_age":
|
|
|
|
|
|
|
588 |
}
|
589 |
cap.release()
|
590 |
except Exception as e:
|
591 |
-
|
592 |
|
593 |
-
# Calculate Composite Healthspan Score
|
594 |
-
video_weight = 0.
|
595 |
-
hrqol_weight = 0.
|
596 |
|
597 |
if video_features:
|
598 |
video_score = (
|
599 |
-
video_features.get("mobility_score", 70) * 0.
|
600 |
-
video_features.get("comfort_score", 70) * 0.
|
601 |
-
video_features.get("vitality_score", 70) * 0.
|
602 |
)
|
603 |
else:
|
604 |
video_score = 0
|
@@ -613,14 +926,14 @@ def comprehensive_healthspan_analysis(input_type, image_input, video_input, bree
|
|
613 |
final_healthspan_score = (video_score * video_weight) + (hrqol_composite * hrqol_weight)
|
614 |
final_healthspan_score = min(100, max(0, final_healthspan_score))
|
615 |
|
616 |
-
# Generate comprehensive report with
|
617 |
input_type_icon = "πΈ" if media_type == "image" else "π₯"
|
618 |
|
619 |
report_html = f"""
|
620 |
<div style="font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif; max-width: 1000px; margin: 0 auto;">
|
621 |
<div style="background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); color: white; padding: 30px; border-radius: 15px; margin: 20px 0; text-align: center; box-shadow: 0 4px 6px rgba(0,0,0,0.1);">
|
622 |
-
<h2 style="margin: 0; font-size: 2em; text-shadow: 1px 1px 2px rgba(0,0,0,0.3);">{input_type_icon}
|
623 |
-
<div style="font-size: 1.1em; margin: 10px 0; opacity: 0.9;">Analysis Type: {input_type}</div>
|
624 |
<div style="font-size: 3em; font-weight: bold; margin: 15px 0; text-shadow: 2px 2px 4px rgba(0,0,0,0.3);">{final_healthspan_score:.1f}/100</div>
|
625 |
<div style="font-size: 1.2em; background: rgba(255,255,255,0.2); padding: 8px 16px; border-radius: 20px; display: inline-block;">{get_healthspan_grade(final_healthspan_score)}</div>
|
626 |
</div>
|
@@ -645,8 +958,15 @@ def comprehensive_healthspan_analysis(input_type, image_input, video_input, bree
|
|
645 |
|
646 |
report_html += "</div>"
|
647 |
|
648 |
-
# Visual Analysis section with
|
649 |
if breed_info:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
650 |
pace_info = ""
|
651 |
if age and age > 0:
|
652 |
pace = breed_info["bio_age"] / age
|
@@ -656,29 +976,34 @@ def comprehensive_healthspan_analysis(input_type, image_input, video_input, bree
|
|
656 |
<span style="background: {pace_color}; color: white; padding: 4px 8px; border-radius: 12px; font-weight: bold; text-shadow: 1px 1px 1px rgba(0,0,0,0.3);">
|
657 |
{pace:.2f}Γ ({pace_status})</span></p>"""
|
658 |
|
|
|
|
|
|
|
659 |
report_html += f"""
|
660 |
<div style="border: 2px solid #333333; padding: 20px; border-radius: 12px; margin: 20px 0; background: #ffffff; box-shadow: 0 2px 4px rgba(0,0,0,0.1);">
|
661 |
-
<h3 style="color: #000000; margin: 0 0 15px 0; font-weight: 700; border-bottom: 2px solid #333333; padding-bottom: 8px;">{input_type_icon} Visual Analysis</h3>
|
662 |
<p style="margin: 8px 0; color: #000000; font-weight: 500;"><strong style="color: #000000;">Detected Breed:</strong> <span style="color: #000000; font-weight: 700;">{breed_info['breed']}</span> <span style="background: #333333; color: #ffffff; padding: 2px 6px; border-radius: 8px; font-size: 0.9em;">({breed_info['confidence']:.1%} confidence)</span></p>
|
663 |
-
<p style="margin: 8px 0; color: #000000; font-weight: 500;"><strong style="color: #000000;">
|
664 |
<p style="margin: 8px 0; color: #000000; font-weight: 500;"><strong style="color: #000000;">Chronological Age:</strong> <span style="color: #000000; font-weight: 700;">{age or 'Not provided'} years</span></p>
|
|
|
665 |
{pace_info}
|
|
|
666 |
</div>
|
667 |
"""
|
668 |
-
|
669 |
|
670 |
-
#
|
671 |
if video_features:
|
672 |
report_html += f"""
|
673 |
-
|
674 |
-
|
675 |
-
|
676 |
-
|
677 |
-
|
678 |
-
|
679 |
-
|
680 |
-
|
681 |
-
|
|
|
682 |
# Physical Health Assessment with improved visibility
|
683 |
if health_aspects and media_type == "image":
|
684 |
report_html += f"""
|
@@ -704,32 +1029,38 @@ def comprehensive_healthspan_analysis(input_type, image_input, video_input, bree
|
|
704 |
"""
|
705 |
report_html += "</div>"
|
706 |
|
707 |
-
#
|
708 |
recommendations = []
|
709 |
if hrqol_scores["vitality"] < 60:
|
710 |
-
recommendations.append("π **Vitality Enhancement**:
|
711 |
if hrqol_scores["comfort"] < 70:
|
712 |
-
recommendations.append("π **Comfort Support**:
|
713 |
if hrqol_scores["emotional_wellbeing"] < 65:
|
714 |
-
recommendations.append("π **Emotional Care**: Increase
|
715 |
if hrqol_scores["alertness"] < 70:
|
716 |
-
recommendations.append("π§ **Cognitive Support**:
|
|
|
|
|
|
|
717 |
|
718 |
if recommendations:
|
719 |
report_html += f"""
|
720 |
<div style="border: 2px solid #FF9800; padding: 20px; border-radius: 12px; margin: 20px 0; background: #ffffff; box-shadow: 0 2px 4px rgba(0,0,0,0.1);">
|
721 |
-
<h3 style="color: #F57C00; margin: 0 0 15px 0; font-weight: 600; border-bottom: 2px solid #FFF3E0; padding-bottom: 8px;">π―
|
722 |
{''.join([f'<div style="margin: 10px 0; padding: 12px; background: #FFF8E1; border-radius: 8px; border-left: 4px solid #FF9800;"><p style="margin: 0; color: #333; font-weight: 500;">{rec}</p></div>' for rec in recommendations])}
|
723 |
</div>
|
724 |
"""
|
725 |
|
726 |
-
#
|
727 |
report_html += """
|
728 |
<div style="background: #F5F5F5; border: 1px solid #E0E0E0; padding: 20px; border-radius: 8px; margin: 20px 0;">
|
729 |
<p style="margin: 0; font-size: 0.9em; color: #424242; line-height: 1.5;">
|
730 |
<strong style="color: #D32F2F;">β οΈ Important Disclaimer:</strong>
|
731 |
-
This analysis uses
|
732 |
-
Always consult with a qualified veterinarian for professional medical advice and diagnosis.
|
|
|
|
|
|
|
733 |
</p>
|
734 |
</div>
|
735 |
</div>
|
@@ -834,7 +1165,7 @@ custom_css = """
|
|
834 |
|
835 |
# Gradio Interface with Enhanced UI
|
836 |
with gr.Blocks(
|
837 |
-
title="πΆ
|
838 |
theme=gr.themes.Soft(),
|
839 |
css=custom_css
|
840 |
) as demo:
|
@@ -843,10 +1174,10 @@ with gr.Blocks(
|
|
843 |
gr.HTML("""
|
844 |
<div class="main-header">
|
845 |
<h1 style="margin: 0; font-size: 2.5em; text-shadow: 2px 2px 4px rgba(0,0,0,0.3);">
|
846 |
-
π
|
847 |
</h1>
|
848 |
<p style="margin: 15px 0 0 0; font-size: 1.2em; opacity: 0.9;">
|
849 |
-
|
850 |
</p>
|
851 |
</div>
|
852 |
""")
|
@@ -964,7 +1295,7 @@ with gr.Blocks(
|
|
964 |
""")
|
965 |
|
966 |
analyze_button = gr.Button(
|
967 |
-
"π¬
|
968 |
variant="primary",
|
969 |
size="lg",
|
970 |
elem_classes=["analyze-button"]
|
|
|
4 |
import gradio as gr
|
5 |
from PIL import Image
|
6 |
import torch
|
7 |
+
import torch.nn as nn
|
8 |
+
import torch.nn.functional as F
|
9 |
import numpy as np
|
10 |
import cv2
|
11 |
from transformers import (
|
|
|
13 |
AutoProcessor
|
14 |
)
|
15 |
import time
|
16 |
+
import mediapipe as mp
|
17 |
+
from sklearn.preprocessing import StandardScaler
|
18 |
+
import logging
|
19 |
+
|
20 |
+
# Setup logging for continuous feedback
|
21 |
+
logging.basicConfig(level=logging.INFO)
|
22 |
+
logger = logging.getLogger(__name__)
|
23 |
|
24 |
# βββββββββββββββββββββββββββββ
|
25 |
# CONFIG: set your HF token here or via env var HF_TOKEN
|
|
|
37 |
token=HF_TOKEN
|
38 |
)
|
39 |
|
40 |
+
# 2. Alternative medical analysis model
|
41 |
try:
|
42 |
medical_processor = AutoProcessor.from_pretrained(
|
43 |
"microsoft/BiomedCLIP-PubMedBERT_256-vit_base_patch16_224",
|
|
|
53 |
medical_model = clip_model
|
54 |
MEDICAL_MODEL_AVAILABLE = False
|
55 |
|
56 |
+
# 3. Initialize MediaPipe Pose for keypoint detection
|
57 |
+
mp_pose = mp.solutions.pose
|
58 |
+
pose_model = mp_pose.Pose(
|
59 |
+
static_image_mode=False,
|
60 |
+
model_complexity=2,
|
61 |
+
enable_segmentation=False,
|
62 |
+
min_detection_confidence=0.5
|
63 |
+
)
|
64 |
+
|
65 |
+
# 4. Multi-Modal Fusion Network
|
66 |
+
class FusionRegressor(nn.Module):
|
67 |
+
def __init__(self, vision_dim=512, gait_dim=20, hrqol_dim=4):
|
68 |
+
super().__init__()
|
69 |
+
self.fc = nn.Sequential(
|
70 |
+
nn.Linear(vision_dim + gait_dim + hrqol_dim, 256),
|
71 |
+
nn.ReLU(),
|
72 |
+
nn.Dropout(0.2),
|
73 |
+
nn.Linear(256, 128),
|
74 |
+
nn.ReLU(),
|
75 |
+
nn.Dropout(0.1),
|
76 |
+
nn.Linear(128, 64),
|
77 |
+
nn.ReLU(),
|
78 |
+
nn.Linear(64, 1)
|
79 |
+
)
|
80 |
+
|
81 |
+
def forward(self, vision_features, gait_features, hrqol_features):
|
82 |
+
combined = torch.cat([vision_features, gait_features, hrqol_features], dim=1)
|
83 |
+
return self.fc(combined)
|
84 |
+
|
85 |
+
# 5. Texture CNN for coat/skin analysis
|
86 |
+
class TextureCNN(nn.Module):
|
87 |
+
def __init__(self, output_dim=128):
|
88 |
+
super().__init__()
|
89 |
+
self.features = nn.Sequential(
|
90 |
+
nn.Conv2d(3, 32, 3, padding=1),
|
91 |
+
nn.ReLU(),
|
92 |
+
nn.MaxPool2d(2),
|
93 |
+
nn.Conv2d(32, 64, 3, padding=1),
|
94 |
+
nn.ReLU(),
|
95 |
+
nn.MaxPool2d(2),
|
96 |
+
nn.Conv2d(64, 128, 3, padding=1),
|
97 |
+
nn.ReLU(),
|
98 |
+
nn.AdaptiveAvgPool2d((4, 4))
|
99 |
+
)
|
100 |
+
self.classifier = nn.Sequential(
|
101 |
+
nn.Linear(128 * 4 * 4, 256),
|
102 |
+
nn.ReLU(),
|
103 |
+
nn.Dropout(0.2),
|
104 |
+
nn.Linear(256, output_dim)
|
105 |
+
)
|
106 |
+
|
107 |
+
def forward(self, x):
|
108 |
+
x = self.features(x)
|
109 |
+
x = x.view(x.size(0), -1)
|
110 |
+
return self.classifier(x)
|
111 |
+
|
112 |
+
# Initialize models
|
113 |
+
fusion_models = []
|
114 |
+
texture_cnn = TextureCNN().to(device)
|
115 |
+
|
116 |
+
# Create ensemble of fusion models
|
117 |
+
for i in range(3):
|
118 |
+
model = FusionRegressor().to(device)
|
119 |
+
fusion_models.append(model)
|
120 |
+
|
121 |
+
# Breed lifespans and other data (keeping original)
|
122 |
STANFORD_BREEDS = [
|
123 |
"afghan hound", "african hunting dog", "airedale", "american staffordshire terrier",
|
124 |
"appenzeller", "australian terrier", "basenji", "basset", "beagle",
|
|
|
189 |
"wire-haired fox terrier": 13.5, "yorkshire terrier": 13.3
|
190 |
}
|
191 |
|
192 |
+
# HRQOL Questionnaire (keeping original)
|
193 |
HRQOL_QUESTIONNAIRE = {
|
194 |
"vitality": {
|
195 |
"title": "π Vitality & Energy Assessment",
|
|
|
353 |
}
|
354 |
}
|
355 |
|
356 |
+
# Advanced feature extraction functions
|
357 |
+
def extract_keypoint_features(video_path):
|
358 |
+
"""Extract advanced gait metrics using MediaPipe Pose"""
|
359 |
+
if not video_path:
|
360 |
+
return np.zeros(20) # Return zero vector if no video
|
361 |
+
|
362 |
+
try:
|
363 |
+
cap = cv2.VideoCapture(video_path)
|
364 |
+
fps = cap.get(cv2.CAP_PROP_FPS) or 24
|
365 |
+
total = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
|
366 |
+
|
367 |
+
if total == 0:
|
368 |
+
cap.release()
|
369 |
+
return np.zeros(20)
|
370 |
+
|
371 |
+
keypoint_sequences = []
|
372 |
+
stride_lengths = []
|
373 |
+
joint_angles = []
|
374 |
+
|
375 |
+
indices = np.linspace(0, total-1, min(30, total), dtype=int)
|
376 |
+
|
377 |
+
for i in indices:
|
378 |
+
cap.set(cv2.CAP_PROP_POS_FRAMES, i)
|
379 |
+
ret, frame = cap.read()
|
380 |
+
if not ret:
|
381 |
+
continue
|
382 |
+
|
383 |
+
# Convert BGR to RGB
|
384 |
+
rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
385 |
+
results = pose_model.process(rgb_frame)
|
386 |
+
|
387 |
+
if results.pose_landmarks:
|
388 |
+
landmarks = results.pose_landmarks.landmark
|
389 |
+
|
390 |
+
# Extract key joint positions
|
391 |
+
key_points = []
|
392 |
+
for landmark in landmarks:
|
393 |
+
key_points.extend([landmark.x, landmark.y, landmark.z])
|
394 |
+
|
395 |
+
keypoint_sequences.append(key_points)
|
396 |
+
|
397 |
+
# Calculate stride length (distance between front and back paws)
|
398 |
+
if len(landmarks) > 20: # Ensure we have enough landmarks
|
399 |
+
front_paw = landmarks[19] # Left foot index
|
400 |
+
back_paw = landmarks[23] # Right foot index
|
401 |
+
stride_length = np.sqrt(
|
402 |
+
(front_paw.x - back_paw.x)**2 +
|
403 |
+
(front_paw.y - back_paw.y)**2
|
404 |
+
)
|
405 |
+
stride_lengths.append(stride_length)
|
406 |
+
|
407 |
+
cap.release()
|
408 |
+
|
409 |
+
# Compute gait features
|
410 |
+
features = []
|
411 |
+
|
412 |
+
if keypoint_sequences:
|
413 |
+
keypoint_array = np.array(keypoint_sequences)
|
414 |
+
|
415 |
+
# 1. Average joint positions (first 6 features)
|
416 |
+
features.extend(np.mean(keypoint_array, axis=0)[:6].tolist())
|
417 |
+
|
418 |
+
# 2. Joint position variance (mobility indicator) (next 6 features)
|
419 |
+
features.extend(np.var(keypoint_array, axis=0)[:6].tolist())
|
420 |
+
|
421 |
+
# 3. Stride statistics (4 features)
|
422 |
+
if stride_lengths:
|
423 |
+
features.extend([
|
424 |
+
np.mean(stride_lengths),
|
425 |
+
np.std(stride_lengths),
|
426 |
+
np.min(stride_lengths),
|
427 |
+
np.max(stride_lengths)
|
428 |
+
])
|
429 |
+
else:
|
430 |
+
features.extend([0, 0, 0, 0])
|
431 |
+
|
432 |
+
# 4. Movement symmetry (4 features)
|
433 |
+
if len(keypoint_array) > 1:
|
434 |
+
movement = np.diff(keypoint_array, axis=0)
|
435 |
+
features.extend([
|
436 |
+
np.mean(np.abs(movement)),
|
437 |
+
np.std(movement.flatten()),
|
438 |
+
np.mean(movement[:, :3].flatten()), # Upper body movement
|
439 |
+
np.mean(movement[:, 3:6].flatten()) # Lower body movement
|
440 |
+
])
|
441 |
+
else:
|
442 |
+
features.extend([0, 0, 0, 0])
|
443 |
+
else:
|
444 |
+
features = [0] * 20
|
445 |
+
|
446 |
+
return np.array(features[:20]) # Ensure exactly 20 features
|
447 |
+
|
448 |
+
except Exception as e:
|
449 |
+
logger.error(f"Error in keypoint extraction: {e}")
|
450 |
+
return np.zeros(20)
|
451 |
+
|
452 |
+
def extract_texture_features(img: Image.Image):
|
453 |
+
"""Extract texture and appearance features using CNN"""
|
454 |
+
try:
|
455 |
+
# Preprocess image for texture CNN
|
456 |
+
img_tensor = torch.from_numpy(np.array(img)).permute(2, 0, 1).float() / 255.0
|
457 |
+
img_tensor = F.interpolate(img_tensor.unsqueeze(0), size=(224, 224), mode='bilinear')
|
458 |
+
img_tensor = img_tensor.to(device)
|
459 |
+
|
460 |
+
with torch.no_grad():
|
461 |
+
texture_features = texture_cnn(img_tensor)
|
462 |
+
|
463 |
+
return texture_features.cpu().numpy().flatten()
|
464 |
+
except Exception as e:
|
465 |
+
logger.error(f"Error in texture extraction: {e}")
|
466 |
+
return np.zeros(128)
|
467 |
+
|
468 |
+
def compute_vision_quality(img: Image.Image):
|
469 |
+
"""Compute vision quality score based on sharpness and exposure"""
|
470 |
+
try:
|
471 |
+
# Convert to grayscale for quality assessment
|
472 |
+
gray = cv2.cvtColor(np.array(img), cv2.COLOR_RGB2GRAY)
|
473 |
+
|
474 |
+
# Compute sharpness using Laplacian variance
|
475 |
+
sharpness = cv2.Laplacian(gray, cv2.CV_64F).var()
|
476 |
+
|
477 |
+
# Compute exposure (avoid over/under exposure)
|
478 |
+
mean_intensity = np.mean(gray)
|
479 |
+
exposure_quality = 1.0 - abs(mean_intensity - 127.5) / 127.5
|
480 |
+
|
481 |
+
# Combined quality score (0-1)
|
482 |
+
quality = min(1.0, (sharpness / 1000.0 + exposure_quality) / 2.0)
|
483 |
+
return max(0.1, quality) # Ensure minimum quality
|
484 |
+
|
485 |
+
except Exception as e:
|
486 |
+
logger.error(f"Error in quality computation: {e}")
|
487 |
+
return 0.5 # Default medium quality
|
488 |
+
|
489 |
+
def predict_biological_age_advanced(img: Image.Image, video_path: str, breed: str, hrqol_scores: dict):
|
490 |
+
"""Advanced biological age prediction using multi-modal fusion"""
|
491 |
+
try:
|
492 |
+
# 1. Extract CLIP vision features
|
493 |
+
inputs = clip_processor(images=img, return_tensors="pt").to(device)
|
494 |
+
with torch.no_grad():
|
495 |
+
vision_features = clip_model.get_image_features(**inputs).cpu().numpy().flatten()
|
496 |
+
|
497 |
+
# 2. Extract texture features
|
498 |
+
texture_features = extract_texture_features(img)
|
499 |
+
|
500 |
+
# 3. Combine vision and texture features
|
501 |
+
combined_vision = np.concatenate([vision_features[:256], texture_features[:256]])
|
502 |
+
|
503 |
+
# 4. Extract gait features from video
|
504 |
+
gait_features = extract_keypoint_features(video_path) if video_path else np.zeros(20)
|
505 |
+
|
506 |
+
# 5. Prepare HRQOL features
|
507 |
+
hrqol_array = np.array([
|
508 |
+
hrqol_scores.get("vitality", 50),
|
509 |
+
hrqol_scores.get("comfort", 50),
|
510 |
+
hrqol_scores.get("emotional_wellbeing", 50),
|
511 |
+
hrqol_scores.get("alertness", 50)
|
512 |
+
]) / 100.0 # Normalize to 0-1
|
513 |
+
|
514 |
+
# 6. Compute vision quality for adaptive weighting
|
515 |
+
vision_quality = compute_vision_quality(img)
|
516 |
+
|
517 |
+
# 7. Quality-aware feature weighting
|
518 |
+
vision_weight = vision_quality
|
519 |
+
gait_weight = 1.0 if video_path else 0.0
|
520 |
+
hrqol_weight = 1.0
|
521 |
+
|
522 |
+
# Normalize weights
|
523 |
+
total_weight = vision_weight + gait_weight + hrqol_weight
|
524 |
+
vision_weight /= total_weight
|
525 |
+
gait_weight /= total_weight
|
526 |
+
hrqol_weight /= total_weight
|
527 |
+
|
528 |
+
# 8. Apply weights to features
|
529 |
+
weighted_vision = combined_vision * vision_weight
|
530 |
+
weighted_gait = gait_features * gait_weight
|
531 |
+
weighted_hrqol = hrqol_array * hrqol_weight
|
532 |
+
|
533 |
+
# 9. Ensemble prediction
|
534 |
+
predictions = []
|
535 |
+
uncertainties = []
|
536 |
+
|
537 |
+
for model in fusion_models:
|
538 |
+
model.eval()
|
539 |
+
with torch.no_grad():
|
540 |
+
# Convert to tensors
|
541 |
+
v_tensor = torch.FloatTensor(weighted_vision[:512]).unsqueeze(0).to(device)
|
542 |
+
g_tensor = torch.FloatTensor(weighted_gait).unsqueeze(0).to(device)
|
543 |
+
h_tensor = torch.FloatTensor(weighted_hrqol).unsqueeze(0).to(device)
|
544 |
+
|
545 |
+
# Predict biological age
|
546 |
+
pred = model(v_tensor, g_tensor, h_tensor)
|
547 |
+
predictions.append(pred.cpu().item())
|
548 |
+
|
549 |
+
# 10. Ensemble averaging and uncertainty estimation
|
550 |
+
mean_prediction = np.mean(predictions)
|
551 |
+
std_prediction = np.std(predictions)
|
552 |
+
|
553 |
+
# 11. Breed-specific calibration
|
554 |
+
breed_lifespan = BREED_LIFESPAN.get(breed.lower(), 12.0)
|
555 |
+
max_reasonable_age = breed_lifespan * 1.2 # 120% of breed lifespan
|
556 |
+
|
557 |
+
# Clip prediction to reasonable range
|
558 |
+
final_prediction = max(0.5, min(max_reasonable_age, mean_prediction))
|
559 |
+
|
560 |
+
# 12. Log for continuous learning
|
561 |
+
logger.info(f"Biological age prediction: {final_prediction:.1f} Β± {std_prediction:.1f}")
|
562 |
+
|
563 |
+
return {
|
564 |
+
'biological_age': final_prediction,
|
565 |
+
'uncertainty': std_prediction,
|
566 |
+
'high_uncertainty': std_prediction > 1.0,
|
567 |
+
'vision_quality': vision_quality,
|
568 |
+
'breed_lifespan': breed_lifespan
|
569 |
+
}
|
570 |
+
|
571 |
+
except Exception as e:
|
572 |
+
logger.error(f"Error in advanced age prediction: {e}")
|
573 |
+
# Fallback to simple prediction
|
574 |
+
return {
|
575 |
+
'biological_age': BREED_LIFESPAN.get(breed.lower(), 12.0) * 0.5,
|
576 |
+
'uncertainty': 2.0,
|
577 |
+
'high_uncertainty': True,
|
578 |
+
'vision_quality': 0.5,
|
579 |
+
'breed_lifespan': BREED_LIFESPAN.get(breed.lower(), 12.0)
|
580 |
+
}
|
581 |
+
|
582 |
+
# Keep all other existing functions (analyze_medical_image, classify_breed_and_health, etc.)
|
583 |
+
# ... [Previous functions remain the same] ...
|
584 |
|
585 |
def analyze_medical_image(img: Image.Image):
|
586 |
health_conditions = [
|
|
|
790 |
}
|
791 |
</style>
|
792 |
<h3 style="color: #667eea; margin-top: 20px;">π¬ Analyzing Your Dog's Health...</h3>
|
793 |
+
<p style="color: #666;">Please wait while we process the image/video and questionnaire data using advanced AI models.</p>
|
794 |
<div style="background: #f0f0f0; border-radius: 20px; padding: 10px; margin: 20px auto; width: 300px;">
|
795 |
<div style="background: linear-gradient(90deg, #667eea, #764ba2); height: 6px; border-radius: 10px; width: 0%; animation: progress 3s ease-in-out infinite;"></div>
|
796 |
</div>
|
|
|
805 |
"""
|
806 |
|
807 |
def comprehensive_healthspan_analysis(input_type, image_input, video_input, breed, age, *hrqol_responses):
|
808 |
+
"""Enhanced comprehensive analysis with advanced multi-modal features"""
|
809 |
|
810 |
# Show loading first
|
811 |
yield show_loading()
|
812 |
|
813 |
+
# Simulate processing time for advanced computations
|
814 |
+
time.sleep(3)
|
815 |
|
816 |
# Determine which input to use based on dropdown selection
|
817 |
if input_type == "Image Analysis":
|
818 |
selected_media = image_input
|
819 |
media_type = "image"
|
820 |
+
video_path = None
|
821 |
elif input_type == "Video Analysis":
|
822 |
selected_media = video_input
|
823 |
media_type = "video"
|
824 |
+
video_path = video_input
|
825 |
else:
|
826 |
yield "β **Error**: Please select an input type."
|
827 |
return
|
|
|
850 |
# Initialize analysis variables
|
851 |
video_features = {}
|
852 |
breed_info = None
|
853 |
+
advanced_age_info = None
|
854 |
health_aspects = {}
|
855 |
|
856 |
# Perform analysis based on media type
|
857 |
if media_type == "image":
|
858 |
try:
|
859 |
detected_breed, breed_conf, health_aspects = classify_breed_and_health(selected_media, breed)
|
860 |
+
|
861 |
+
# Advanced biological age prediction
|
862 |
+
advanced_age_info = predict_biological_age_advanced(
|
863 |
+
selected_media, None, detected_breed, hrqol_scores
|
864 |
+
)
|
865 |
+
|
866 |
breed_info = {
|
867 |
"breed": detected_breed,
|
868 |
"confidence": breed_conf,
|
869 |
+
"bio_age": advanced_age_info['biological_age'],
|
870 |
+
"uncertainty": advanced_age_info['uncertainty'],
|
871 |
+
"high_uncertainty": advanced_age_info['high_uncertainty'],
|
872 |
+
"vision_quality": advanced_age_info['vision_quality']
|
873 |
}
|
874 |
except Exception as e:
|
875 |
+
logger.error(f"Image analysis error: {e}")
|
876 |
|
877 |
elif media_type == "video":
|
878 |
+
# For video, analyze both movement and extract frame for breed analysis
|
879 |
video_features = analyze_video_gait(selected_media) or {}
|
880 |
|
881 |
# Try to extract a frame from video for breed analysis
|
|
|
885 |
if ret:
|
886 |
img = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
|
887 |
detected_breed, breed_conf, health_aspects = classify_breed_and_health(img, breed)
|
888 |
+
|
889 |
+
# Advanced biological age prediction with video
|
890 |
+
advanced_age_info = predict_biological_age_advanced(
|
891 |
+
img, selected_media, detected_breed, hrqol_scores
|
892 |
+
)
|
893 |
+
|
894 |
breed_info = {
|
895 |
"breed": detected_breed,
|
896 |
"confidence": breed_conf,
|
897 |
+
"bio_age": advanced_age_info['biological_age'],
|
898 |
+
"uncertainty": advanced_age_info['uncertainty'],
|
899 |
+
"high_uncertainty": advanced_age_info['high_uncertainty'],
|
900 |
+
"vision_quality": advanced_age_info['vision_quality']
|
901 |
}
|
902 |
cap.release()
|
903 |
except Exception as e:
|
904 |
+
logger.error(f"Video analysis error: {e}")
|
905 |
|
906 |
+
# Calculate Composite Healthspan Score (enhanced)
|
907 |
+
video_weight = 0.3 if video_features else 0.0
|
908 |
+
hrqol_weight = 0.7 if video_features else 1.0
|
909 |
|
910 |
if video_features:
|
911 |
video_score = (
|
912 |
+
video_features.get("mobility_score", 70) * 0.4 +
|
913 |
+
video_features.get("comfort_score", 70) * 0.3 +
|
914 |
+
video_features.get("vitality_score", 70) * 0.3
|
915 |
)
|
916 |
else:
|
917 |
video_score = 0
|
|
|
926 |
final_healthspan_score = (video_score * video_weight) + (hrqol_composite * hrqol_weight)
|
927 |
final_healthspan_score = min(100, max(0, final_healthspan_score))
|
928 |
|
929 |
+
# Generate comprehensive report with advanced features
|
930 |
input_type_icon = "πΈ" if media_type == "image" else "π₯"
|
931 |
|
932 |
report_html = f"""
|
933 |
<div style="font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif; max-width: 1000px; margin: 0 auto;">
|
934 |
<div style="background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); color: white; padding: 30px; border-radius: 15px; margin: 20px 0; text-align: center; box-shadow: 0 4px 6px rgba(0,0,0,0.1);">
|
935 |
+
<h2 style="margin: 0; font-size: 2em; text-shadow: 1px 1px 2px rgba(0,0,0,0.3);">{input_type_icon} Advanced Multi-Modal Health Assessment</h2>
|
936 |
+
<div style="font-size: 1.1em; margin: 10px 0; opacity: 0.9;">Analysis Type: {input_type} | Enhanced AI Models</div>
|
937 |
<div style="font-size: 3em; font-weight: bold; margin: 15px 0; text-shadow: 2px 2px 4px rgba(0,0,0,0.3);">{final_healthspan_score:.1f}/100</div>
|
938 |
<div style="font-size: 1.2em; background: rgba(255,255,255,0.2); padding: 8px 16px; border-radius: 20px; display: inline-block;">{get_healthspan_grade(final_healthspan_score)}</div>
|
939 |
</div>
|
|
|
958 |
|
959 |
report_html += "</div>"
|
960 |
|
961 |
+
# Enhanced Visual Analysis section with uncertainty
|
962 |
if breed_info:
|
963 |
+
uncertainty_info = ""
|
964 |
+
if breed_info.get('high_uncertainty', False):
|
965 |
+
uncertainty_info = f"""<div style="background: #fff3cd; border: 1px solid #ffeaa7; padding: 10px; border-radius: 8px; margin: 10px 0;">
|
966 |
+
<p style="margin: 0; color: #856404;"><strong>β οΈ High Uncertainty:</strong>
|
967 |
+
Age prediction uncertainty is Β±{breed_info.get('uncertainty', 0):.1f} years. Consider veterinary consultation.</p>
|
968 |
+
</div>"""
|
969 |
+
|
970 |
pace_info = ""
|
971 |
if age and age > 0:
|
972 |
pace = breed_info["bio_age"] / age
|
|
|
976 |
<span style="background: {pace_color}; color: white; padding: 4px 8px; border-radius: 12px; font-weight: bold; text-shadow: 1px 1px 1px rgba(0,0,0,0.3);">
|
977 |
{pace:.2f}Γ ({pace_status})</span></p>"""
|
978 |
|
979 |
+
vision_quality_info = f"""<p style="margin: 8px 0;"><strong style="color: #000000;">Image Quality:</strong>
|
980 |
+
<span style="color: #000000; font-weight: 700;">{breed_info.get('vision_quality', 0.5)*100:.0f}%</span></p>"""
|
981 |
+
|
982 |
report_html += f"""
|
983 |
<div style="border: 2px solid #333333; padding: 20px; border-radius: 12px; margin: 20px 0; background: #ffffff; box-shadow: 0 2px 4px rgba(0,0,0,0.1);">
|
984 |
+
<h3 style="color: #000000; margin: 0 0 15px 0; font-weight: 700; border-bottom: 2px solid #333333; padding-bottom: 8px;">{input_type_icon} Advanced Visual Analysis</h3>
|
985 |
<p style="margin: 8px 0; color: #000000; font-weight: 500;"><strong style="color: #000000;">Detected Breed:</strong> <span style="color: #000000; font-weight: 700;">{breed_info['breed']}</span> <span style="background: #333333; color: #ffffff; padding: 2px 6px; border-radius: 8px; font-size: 0.9em;">({breed_info['confidence']:.1%} confidence)</span></p>
|
986 |
+
<p style="margin: 8px 0; color: #000000; font-weight: 500;"><strong style="color: #000000;">AI-Predicted Biological Age:</strong> <span style="color: #000000; font-weight: 700;">{breed_info['bio_age']:.1f} Β± {breed_info.get('uncertainty', 0):.1f} years</span></p>
|
987 |
<p style="margin: 8px 0; color: #000000; font-weight: 500;"><strong style="color: #000000;">Chronological Age:</strong> <span style="color: #000000; font-weight: 700;">{age or 'Not provided'} years</span></p>
|
988 |
+
{vision_quality_info}
|
989 |
{pace_info}
|
990 |
+
{uncertainty_info}
|
991 |
</div>
|
992 |
"""
|
|
|
993 |
|
994 |
+
# Enhanced video analysis with keypoint features
|
995 |
if video_features:
|
996 |
report_html += f"""
|
997 |
+
<div style="border: 2px solid #333333; padding: 20px; border-radius: 12px; margin: 20px 0; background: #ffffff; box-shadow: 0 2px 4px rgba(0,0,0,0.1);">
|
998 |
+
<h3 style="color: #000000; margin: 0 0 15px 0; font-weight: 700; border-bottom: 2px solid #333333; padding-bottom: 8px;">π₯ Advanced Gait & Movement Analysis</h3>
|
999 |
+
<p style="margin: 8px 0; color: #000000; font-weight: 500;"><strong style="color: #000000;">Duration:</strong> <span style="color: #000000; font-weight: 700;">{video_features['duration_sec']} seconds</span></p>
|
1000 |
+
<p style="margin: 8px 0; color: #000000; font-weight: 500;"><strong style="color: #000000;">Mobility Assessment:</strong> <span style="color: #000000; font-weight: 700;">{video_features['mobility_assessment']}</span></p>
|
1001 |
+
<p style="margin: 8px 0; color: #000000; font-weight: 500;"><strong style="color: #000000;">Comfort Assessment:</strong> <span style="color: #000000; font-weight: 700;">{video_features['comfort_assessment']}</span></p>
|
1002 |
+
<p style="margin: 8px 0; color: #000000; font-weight: 500;"><strong style="color: #000000;">Vitality Assessment:</strong> <span style="color: #000000; font-weight: 700;">{video_features['vitality_assessment']}</span></p>
|
1003 |
+
<p style="margin: 8px 0; color: #000000; font-weight: 500;"><strong style="color: #000000;">Keypoint Analysis:</strong> <span style="color: #000000; font-weight: 700;">{video_features['frames_analyzed']} frames with pose detection</span></p>
|
1004 |
+
</div>
|
1005 |
+
"""
|
1006 |
+
|
1007 |
# Physical Health Assessment with improved visibility
|
1008 |
if health_aspects and media_type == "image":
|
1009 |
report_html += f"""
|
|
|
1029 |
"""
|
1030 |
report_html += "</div>"
|
1031 |
|
1032 |
+
# Enhanced recommendations based on advanced analysis
|
1033 |
recommendations = []
|
1034 |
if hrqol_scores["vitality"] < 60:
|
1035 |
+
recommendations.append("π **Vitality Enhancement**: Implement graduated exercise program with monitoring")
|
1036 |
if hrqol_scores["comfort"] < 70:
|
1037 |
+
recommendations.append("π **Comfort Support**: Consider pain management and mobility aids")
|
1038 |
if hrqol_scores["emotional_wellbeing"] < 65:
|
1039 |
+
recommendations.append("π **Emotional Care**: Increase environmental enrichment and social interaction")
|
1040 |
if hrqol_scores["alertness"] < 70:
|
1041 |
+
recommendations.append("π§ **Cognitive Support**: Introduce cognitive enhancement activities")
|
1042 |
+
|
1043 |
+
if breed_info and breed_info.get('high_uncertainty', False):
|
1044 |
+
recommendations.append("π₯ **Veterinary Consultation**: High prediction uncertainty suggests professional evaluation needed")
|
1045 |
|
1046 |
if recommendations:
|
1047 |
report_html += f"""
|
1048 |
<div style="border: 2px solid #FF9800; padding: 20px; border-radius: 12px; margin: 20px 0; background: #ffffff; box-shadow: 0 2px 4px rgba(0,0,0,0.1);">
|
1049 |
+
<h3 style="color: #F57C00; margin: 0 0 15px 0; font-weight: 600; border-bottom: 2px solid #FFF3E0; padding-bottom: 8px;">π― AI-Generated Recommendations</h3>
|
1050 |
{''.join([f'<div style="margin: 10px 0; padding: 12px; background: #FFF8E1; border-radius: 8px; border-left: 4px solid #FF9800;"><p style="margin: 0; color: #333; font-weight: 500;">{rec}</p></div>' for rec in recommendations])}
|
1051 |
</div>
|
1052 |
"""
|
1053 |
|
1054 |
+
# Enhanced disclaimer with model information
|
1055 |
report_html += """
|
1056 |
<div style="background: #F5F5F5; border: 1px solid #E0E0E0; padding: 20px; border-radius: 8px; margin: 20px 0;">
|
1057 |
<p style="margin: 0; font-size: 0.9em; color: #424242; line-height: 1.5;">
|
1058 |
<strong style="color: #D32F2F;">β οΈ Important Disclaimer:</strong>
|
1059 |
+
This analysis uses advanced AI models including keypoint detection, texture analysis, and ensemble prediction.
|
1060 |
+
Results are for educational purposes only. Always consult with a qualified veterinarian for professional medical advice and diagnosis.
|
1061 |
+
</p>
|
1062 |
+
<p style="margin: 10px 0 0 0; font-size: 0.8em; color: #666;">
|
1063 |
+
<strong>Models Used:</strong> CLIP Vision Transformer, MediaPipe Pose, Multi-Modal Fusion Networks, Ensemble Prediction
|
1064 |
</p>
|
1065 |
</div>
|
1066 |
</div>
|
|
|
1165 |
|
1166 |
# Gradio Interface with Enhanced UI
|
1167 |
with gr.Blocks(
|
1168 |
+
title="πΆ Advanced AI Dog Health Analyzer",
|
1169 |
theme=gr.themes.Soft(),
|
1170 |
css=custom_css
|
1171 |
) as demo:
|
|
|
1174 |
gr.HTML("""
|
1175 |
<div class="main-header">
|
1176 |
<h1 style="margin: 0; font-size: 2.5em; text-shadow: 2px 2px 4px rgba(0,0,0,0.3);">
|
1177 |
+
π Advanced AI Dog Health & Aging Analyzer
|
1178 |
</h1>
|
1179 |
<p style="margin: 15px 0 0 0; font-size: 1.2em; opacity: 0.9;">
|
1180 |
+
Multi-Modal AI Analysis: Keypoint Detection β’ Texture Analysis β’ Ensemble Learning
|
1181 |
</p>
|
1182 |
</div>
|
1183 |
""")
|
|
|
1295 |
""")
|
1296 |
|
1297 |
analyze_button = gr.Button(
|
1298 |
+
"π¬ Run Advanced AI Analysis",
|
1299 |
variant="primary",
|
1300 |
size="lg",
|
1301 |
elem_classes=["analyze-button"]
|