Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -8,7 +8,7 @@ import numpy as np
|
|
8 |
import cv2
|
9 |
from transformers import (
|
10 |
CLIPProcessor, CLIPModel,
|
11 |
-
AutoProcessor,
|
12 |
)
|
13 |
|
14 |
# βββββββββββββββββββββββββββββ
|
@@ -20,32 +20,100 @@ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
|
20 |
# 1. CLIP for breed, age, basic health
|
21 |
clip_model = CLIPModel.from_pretrained(
|
22 |
"openai/clip-vit-base-patch16",
|
23 |
-
|
24 |
).to(device)
|
25 |
clip_processor = CLIPProcessor.from_pretrained(
|
26 |
"openai/clip-vit-base-patch16",
|
27 |
-
|
28 |
)
|
29 |
|
30 |
-
# 2.
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
39 |
|
40 |
-
# 3. Stanford Dogs & lifespans (
|
41 |
STANFORD_BREEDS = [
|
42 |
-
"afghan hound", "
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
43 |
]
|
|
|
44 |
BREED_LIFESPAN = {
|
45 |
-
"afghan hound": 11.1,
|
46 |
-
"
|
47 |
-
"
|
48 |
-
"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
49 |
}
|
50 |
|
51 |
# 4. Questionnaire
|
@@ -73,7 +141,6 @@ QUESTIONNAIRE = [
|
|
73 |
]
|
74 |
SCALE = ["0","1","2","3","4","5"]
|
75 |
|
76 |
-
|
77 |
def predict_biological_age(img: Image.Image, breed: str) -> int:
|
78 |
avg = BREED_LIFESPAN.get(breed.lower(), 12)
|
79 |
prompts = [f"a {age}-year-old {breed}" for age in range(1, int(avg*2)+1)]
|
@@ -83,17 +150,34 @@ def predict_biological_age(img: Image.Image, breed: str) -> int:
|
|
83 |
return int(np.argmax(probs)+1)
|
84 |
|
85 |
def analyze_medical_image(img: Image.Image):
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
91 |
return label, conf
|
92 |
|
93 |
def classify_breed_and_health(img: Image.Image, override=None):
|
94 |
inp = clip_processor(images=img, return_tensors="pt").to(device)
|
95 |
with torch.no_grad():
|
96 |
feats = clip_model.get_image_features(**inp)
|
|
|
97 |
text_prompts = [f"a photo of a {b}" for b in STANFORD_BREEDS]
|
98 |
ti = clip_processor(text=text_prompts, return_tensors="pt", padding=True).to(device)
|
99 |
with torch.no_grad():
|
@@ -102,6 +186,7 @@ def classify_breed_and_health(img: Image.Image, override=None):
|
|
102 |
idx = int(np.argmax(sims))
|
103 |
breed = override or STANFORD_BREEDS[idx]
|
104 |
breed_conf = float(sims[idx])
|
|
|
105 |
aspects = {
|
106 |
"Coat": ("shiny healthy coat","dull patchy fur"),
|
107 |
"Eyes": ("bright clear eyes","cloudy milky eyes"),
|
@@ -119,70 +204,117 @@ def classify_breed_and_health(img: Image.Image, override=None):
|
|
119 |
return breed, breed_conf, health
|
120 |
|
121 |
def analyze_video(video_path):
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
|
126 |
-
|
127 |
-
|
128 |
-
cap.
|
129 |
-
|
130 |
-
if
|
131 |
-
|
132 |
-
|
133 |
-
|
134 |
-
|
135 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
136 |
|
137 |
def compute_q_score(answers):
|
|
|
|
|
|
|
138 |
out={}
|
139 |
idx=0
|
140 |
for sec in QUESTIONNAIRE:
|
141 |
n=len(sec["questions"])
|
142 |
-
|
143 |
-
|
144 |
-
|
|
|
|
|
|
|
145 |
return out
|
146 |
|
147 |
with gr.Blocks(title="πΆ Dog Health & Age Analyzer") as demo:
|
148 |
gr.Markdown("## Upload an Image or Video (10β30 s) or Record Live")
|
149 |
|
150 |
-
with gr.Tab("Image"):
|
151 |
-
img = gr.Image(type="pil")
|
152 |
-
br = gr.Textbox(label="Override Breed")
|
153 |
-
ca = gr.Number(label="Chronological Age")
|
154 |
-
btn = gr.Button("Analyze")
|
155 |
md = gr.Markdown()
|
|
|
156 |
def run_i(i,b,o):
|
157 |
-
|
158 |
-
|
159 |
-
|
160 |
-
|
161 |
-
|
162 |
-
|
163 |
-
|
164 |
-
|
165 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
166 |
btn.click(run_i, inputs=[img,br,ca], outputs=md)
|
167 |
|
168 |
-
with gr.Tab("Video"):
|
169 |
-
vid=gr.Video()
|
170 |
b2=gr.Button("Analyze Video")
|
171 |
out2=gr.JSON()
|
172 |
b2.click(analyze_video, inputs=vid, outputs=out2)
|
173 |
|
174 |
-
with gr.Tab("Questionnaire"):
|
175 |
widgets=[]
|
176 |
for sec in QUESTIONNAIRE:
|
177 |
gr.Markdown(f"### {sec['domain']}")
|
178 |
for q in sec["questions"]:
|
179 |
-
w = gr.Radio(SCALE, label=q)
|
180 |
widgets.append(w)
|
181 |
-
b3=gr.Button("Score")
|
182 |
o3=gr.JSON()
|
183 |
b3.click(compute_q_score, inputs=widgets, outputs=o3)
|
184 |
|
185 |
with gr.Tab("About"):
|
186 |
-
gr.Markdown("
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
187 |
|
188 |
-
|
|
|
|
8 |
import cv2
|
9 |
from transformers import (
|
10 |
CLIPProcessor, CLIPModel,
|
11 |
+
AutoProcessor, AutoModelForVisionToText
|
12 |
)
|
13 |
|
14 |
# βββββββββββββββββββββββββββββ
|
|
|
20 |
# 1. CLIP for breed, age, basic health
|
21 |
clip_model = CLIPModel.from_pretrained(
|
22 |
"openai/clip-vit-base-patch16",
|
23 |
+
token=HF_TOKEN
|
24 |
).to(device)
|
25 |
clip_processor = CLIPProcessor.from_pretrained(
|
26 |
"openai/clip-vit-base-patch16",
|
27 |
+
token=HF_TOKEN
|
28 |
)
|
29 |
|
30 |
+
# 2. Alternative medical analysis model (public, no gating issues)
|
31 |
+
try:
|
32 |
+
# Try to load a publicly available medical vision model
|
33 |
+
medical_processor = AutoProcessor.from_pretrained(
|
34 |
+
"microsoft/BiomedCLIP-PubMedBERT_256-vit_base_patch16_224",
|
35 |
+
token=HF_TOKEN
|
36 |
+
)
|
37 |
+
medical_model = CLIPModel.from_pretrained(
|
38 |
+
"microsoft/BiomedCLIP-PubMedBERT_256-vit_base_patch16_224",
|
39 |
+
token=HF_TOKEN
|
40 |
+
).to(device)
|
41 |
+
MEDICAL_MODEL_AVAILABLE = True
|
42 |
+
except:
|
43 |
+
# Fallback: use CLIP for medical analysis too
|
44 |
+
medical_processor = clip_processor
|
45 |
+
medical_model = clip_model
|
46 |
+
MEDICAL_MODEL_AVAILABLE = False
|
47 |
|
48 |
+
# 3. Stanford Dogs & lifespans (expanded list)
|
49 |
STANFORD_BREEDS = [
|
50 |
+
"afghan hound", "african hunting dog", "airedale", "american staffordshire terrier",
|
51 |
+
"appenzeller", "australian terrier", "basenji", "basset", "beagle",
|
52 |
+
"bedlington terrier", "bernese mountain dog", "black-and-tan coonhound",
|
53 |
+
"blenheim spaniel", "bloodhound", "bluetick", "border collie", "border terrier",
|
54 |
+
"borzoi", "boston bull", "bouvier des flandres", "boxer", "brabancon griffon",
|
55 |
+
"briard", "brittany spaniel", "bull mastiff", "cairn", "cardigan",
|
56 |
+
"chesapeake bay retriever", "chihuahua", "chow", "clumber", "cocker spaniel",
|
57 |
+
"collie", "curly-coated retriever", "dandie dinmont", "dhole", "dingo",
|
58 |
+
"doberman", "english foxhound", "english setter", "english springer",
|
59 |
+
"entlebucher", "eskimo dog", "flat-coated retriever", "french bulldog",
|
60 |
+
"german shepherd", "german short-haired pointer", "giant schnauzer",
|
61 |
+
"golden retriever", "gordon setter", "great dane", "great pyrenees",
|
62 |
+
"greater swiss mountain dog", "groenendael", "ibizan hound", "irish setter",
|
63 |
+
"irish terrier", "irish water spaniel", "irish wolfhound", "italian greyhound",
|
64 |
+
"japanese spaniel", "keeshond", "kelpie", "kerry blue terrier", "komondor",
|
65 |
+
"kuvasz", "labrador retriever", "lakeland terrier", "leonberg", "lhasa",
|
66 |
+
"malamute", "malinois", "maltese dog", "mexican hairless", "miniature pinscher",
|
67 |
+
"miniature poodle", "miniature schnauzer", "newfoundland", "norfolk terrier",
|
68 |
+
"norwegian elkhound", "norwich terrier", "old english sheepdog", "otterhound",
|
69 |
+
"papillon", "pekinese", "pembroke", "pomeranian", "pug", "redbone",
|
70 |
+
"rhodesian ridgeback", "rottweiler", "saint bernard", "saluki", "samoyed",
|
71 |
+
"schipperke", "scotch terrier", "scottish deerhound", "sealyham terrier",
|
72 |
+
"shetland sheepdog", "shih tzu", "siberian husky", "silky terrier",
|
73 |
+
"soft-coated wheaten terrier", "staffordshire bullterrier", "standard poodle",
|
74 |
+
"standard schnauzer", "sussex spaniel", "tibetan mastiff", "tibetan terrier",
|
75 |
+
"toy poodle", "toy terrier", "vizsla", "walker hound", "weimaraner",
|
76 |
+
"welsh springer spaniel", "west highland white terrier", "whippet",
|
77 |
+
"wire-haired fox terrier", "yorkshire terrier"
|
78 |
]
|
79 |
+
|
80 |
BREED_LIFESPAN = {
|
81 |
+
"afghan hound": 11.1, "african hunting dog": 10.5, "airedale": 11.5,
|
82 |
+
"american staffordshire terrier": 12.5, "appenzeller": 13.0, "australian terrier": 13.5,
|
83 |
+
"basenji": 12.1, "basset": 12.5, "beagle": 12.5, "bedlington terrier": 13.7,
|
84 |
+
"bernese mountain dog": 10.1, "black-and-tan coonhound": 10.8, "blenheim spaniel": 13.3,
|
85 |
+
"bloodhound": 9.3, "bluetick": 11.0, "border collie": 13.1, "border terrier": 14.2,
|
86 |
+
"borzoi": 12.0, "boston bull": 11.8, "bouvier des flandres": 11.3, "boxer": 11.3,
|
87 |
+
"brabancon griffon": 13.0, "briard": 12.6, "brittany spaniel": 13.5,
|
88 |
+
"bull mastiff": 10.2, "cairn": 14.0, "cardigan": 13.2, "chesapeake bay retriever": 11.6,
|
89 |
+
"chihuahua": 11.8, "chow": 12.1, "clumber": 12.3, "cocker spaniel": 13.3,
|
90 |
+
"collie": 13.3, "curly-coated retriever": 12.2, "dandie dinmont": 12.8,
|
91 |
+
"dhole": 10.0, "dingo": 10.0, "doberman": 11.2, "english foxhound": 13.0,
|
92 |
+
"english setter": 13.1, "english springer": 13.5, "entlebucher": 13.0,
|
93 |
+
"eskimo dog": 11.3, "flat-coated retriever": 11.7, "french bulldog": 9.8,
|
94 |
+
"german shepherd": 11.3, "german short-haired pointer": 13.4, "giant schnauzer": 12.1,
|
95 |
+
"golden retriever": 13.2, "gordon setter": 12.4, "great dane": 10.6,
|
96 |
+
"great pyrenees": 10.9, "greater swiss mountain dog": 10.9, "groenendael": 12.0,
|
97 |
+
"ibizan hound": 13.3, "irish setter": 12.9, "irish terrier": 13.5,
|
98 |
+
"irish water spaniel": 10.8, "irish wolfhound": 9.9, "italian greyhound": 14.0,
|
99 |
+
"japanese spaniel": 13.3, "keeshond": 12.3, "kelpie": 12.0, "kerry blue terrier": 12.4,
|
100 |
+
"komondor": 10.5, "kuvasz": 10.5, "labrador retriever": 13.1, "lakeland terrier": 14.2,
|
101 |
+
"leonberg": 10.0, "lhasa": 14.0, "malamute": 11.3, "malinois": 12.0,
|
102 |
+
"maltese dog": 13.1, "mexican hairless": 13.0, "miniature pinscher": 13.7,
|
103 |
+
"miniature poodle": 14.0, "miniature schnauzer": 13.3, "newfoundland": 11.0,
|
104 |
+
"norfolk terrier": 13.5, "norwegian elkhound": 13.0, "norwich terrier": 14.0,
|
105 |
+
"old english sheepdog": 12.1, "otterhound": 12.0, "papillon": 14.5,
|
106 |
+
"pekinese": 13.3, "pembroke": 13.2, "pomeranian": 12.2, "pug": 11.6,
|
107 |
+
"redbone": 12.0, "rhodesian ridgeback": 12.0, "rottweiler": 10.6,
|
108 |
+
"saint bernard": 9.3, "saluki": 13.3, "samoyed": 13.1, "schipperke": 14.2,
|
109 |
+
"scotch terrier": 12.7, "scottish deerhound": 10.5, "sealyham terrier": 13.1,
|
110 |
+
"shetland sheepdog": 13.4, "shih tzu": 12.8, "siberian husky": 11.9,
|
111 |
+
"silky terrier": 13.3, "soft-coated wheaten terrier": 13.7, "staffordshire bullterrier": 12.0,
|
112 |
+
"standard poodle": 14.0, "standard schnauzer": 13.0, "sussex spaniel": 13.5,
|
113 |
+
"tibetan mastiff": 13.3, "tibetan terrier": 13.8, "toy poodle": 14.0,
|
114 |
+
"toy terrier": 13.0, "vizsla": 13.5, "walker hound": 12.0, "weimaraner": 12.8,
|
115 |
+
"welsh springer spaniel": 14.0, "west highland white terrier": 13.4, "whippet": 13.4,
|
116 |
+
"wire-haired fox terrier": 13.5, "yorkshire terrier": 13.3
|
117 |
}
|
118 |
|
119 |
# 4. Questionnaire
|
|
|
141 |
]
|
142 |
SCALE = ["0","1","2","3","4","5"]
|
143 |
|
|
|
144 |
def predict_biological_age(img: Image.Image, breed: str) -> int:
|
145 |
avg = BREED_LIFESPAN.get(breed.lower(), 12)
|
146 |
prompts = [f"a {age}-year-old {breed}" for age in range(1, int(avg*2)+1)]
|
|
|
150 |
return int(np.argmax(probs)+1)
|
151 |
|
152 |
def analyze_medical_image(img: Image.Image):
|
153 |
+
# Use medical terminology for health assessment
|
154 |
+
health_conditions = [
|
155 |
+
"healthy normal dog",
|
156 |
+
"dog with visible health issues",
|
157 |
+
"dog showing signs of illness",
|
158 |
+
"dog with poor body condition",
|
159 |
+
"dog with excellent health"
|
160 |
+
]
|
161 |
+
|
162 |
+
if MEDICAL_MODEL_AVAILABLE:
|
163 |
+
inputs = medical_processor(text=health_conditions, images=img, return_tensors="pt", padding=True).to(device)
|
164 |
+
with torch.no_grad():
|
165 |
+
logits = medical_model(**inputs).logits_per_image.softmax(-1)[0].cpu().numpy()
|
166 |
+
else:
|
167 |
+
inputs = clip_processor(text=health_conditions, images=img, return_tensors="pt", padding=True).to(device)
|
168 |
+
with torch.no_grad():
|
169 |
+
logits = clip_model(**inputs).logits_per_image.softmax(-1)[0].cpu().numpy()
|
170 |
+
|
171 |
+
idx = int(np.argmax(logits))
|
172 |
+
label = health_conditions[idx]
|
173 |
+
conf = float(logits[idx])
|
174 |
return label, conf
|
175 |
|
176 |
def classify_breed_and_health(img: Image.Image, override=None):
|
177 |
inp = clip_processor(images=img, return_tensors="pt").to(device)
|
178 |
with torch.no_grad():
|
179 |
feats = clip_model.get_image_features(**inp)
|
180 |
+
|
181 |
text_prompts = [f"a photo of a {b}" for b in STANFORD_BREEDS]
|
182 |
ti = clip_processor(text=text_prompts, return_tensors="pt", padding=True).to(device)
|
183 |
with torch.no_grad():
|
|
|
186 |
idx = int(np.argmax(sims))
|
187 |
breed = override or STANFORD_BREEDS[idx]
|
188 |
breed_conf = float(sims[idx])
|
189 |
+
|
190 |
aspects = {
|
191 |
"Coat": ("shiny healthy coat","dull patchy fur"),
|
192 |
"Eyes": ("bright clear eyes","cloudy milky eyes"),
|
|
|
204 |
return breed, breed_conf, health
|
205 |
|
206 |
def analyze_video(video_path):
|
207 |
+
if not video_path:
|
208 |
+
return {"error": "No video provided"}
|
209 |
+
|
210 |
+
try:
|
211 |
+
cap = cv2.VideoCapture(video_path)
|
212 |
+
fps = cap.get(cv2.CAP_PROP_FPS) or 24
|
213 |
+
total = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
|
214 |
+
|
215 |
+
if total == 0:
|
216 |
+
cap.release()
|
217 |
+
return {"error": "Invalid video file"}
|
218 |
+
|
219 |
+
indices = np.linspace(0,total-1,min(10, total),dtype=int)
|
220 |
+
scores=[]
|
221 |
+
for i in indices:
|
222 |
+
cap.set(cv2.CAP_PROP_POS_FRAMES, i)
|
223 |
+
ret,frame=cap.read()
|
224 |
+
if not ret:
|
225 |
+
continue
|
226 |
+
img=Image.fromarray(cv2.cvtColor(frame,cv2.COLOR_BGR2RGB))
|
227 |
+
_,conf=analyze_medical_image(img)
|
228 |
+
scores.append(conf)
|
229 |
+
cap.release()
|
230 |
+
|
231 |
+
if not scores:
|
232 |
+
return {"error": "Could not extract frames from video"}
|
233 |
+
|
234 |
+
return {
|
235 |
+
"duration_sec": round(total/fps,1),
|
236 |
+
"avg_gait_conf": float(np.mean(scores)),
|
237 |
+
"frames_analyzed": len(scores)
|
238 |
+
}
|
239 |
+
except Exception as e:
|
240 |
+
return {"error": f"Video analysis failed: {str(e)}"}
|
241 |
|
242 |
def compute_q_score(answers):
|
243 |
+
if not answers or all(a is None for a in answers):
|
244 |
+
return {"error": "No answers provided"}
|
245 |
+
|
246 |
out={}
|
247 |
idx=0
|
248 |
for sec in QUESTIONNAIRE:
|
249 |
n=len(sec["questions"])
|
250 |
+
try:
|
251 |
+
vals=[int(a) if a is not None else 0 for a in answers[idx:idx+n]]
|
252 |
+
idx+=n
|
253 |
+
out[sec["domain"]]=round(sum(vals)/n,2)
|
254 |
+
except (ValueError, TypeError):
|
255 |
+
out[sec["domain"]] = 0.0
|
256 |
return out
|
257 |
|
258 |
with gr.Blocks(title="πΆ Dog Health & Age Analyzer") as demo:
|
259 |
gr.Markdown("## Upload an Image or Video (10β30 s) or Record Live")
|
260 |
|
261 |
+
with gr.Tab("Image Analysis"):
|
262 |
+
img = gr.Image(type="pil", label="Upload Dog Image")
|
263 |
+
br = gr.Textbox(label="Override Breed (Optional)")
|
264 |
+
ca = gr.Number(label="Chronological Age (years)", precision=1)
|
265 |
+
btn = gr.Button("Analyze Image")
|
266 |
md = gr.Markdown()
|
267 |
+
|
268 |
def run_i(i,b,o):
|
269 |
+
if i is None:
|
270 |
+
return "Please upload an image first."
|
271 |
+
|
272 |
+
try:
|
273 |
+
breed,bc,h=classify_breed_and_health(i,b)
|
274 |
+
ml,mc=analyze_medical_image(i)
|
275 |
+
ba=predict_biological_age(i,breed)
|
276 |
+
pace = f"{ba/o:.2f}Γ" if o and o > 0 else "N/A"
|
277 |
+
|
278 |
+
rpt = f"**Breed:** {breed} ({bc:.1%})\n\n"
|
279 |
+
rpt+=f"**Health Assessment:** {ml} ({mc:.1%})\n\n"
|
280 |
+
rpt+=f"**Bio Age:** {ba} yrs | **Chrono:** {o or 'N/A'} yrs | **Pace:** {pace}\n\n"
|
281 |
+
rpt+="### Health Aspects\n"+ "\n".join(f"- **{k}:** {v['assessment']} ({v['confidence']:.1%})" for k,v in h.items())
|
282 |
+
return rpt
|
283 |
+
except Exception as e:
|
284 |
+
return f"Analysis failed: {str(e)}"
|
285 |
+
|
286 |
btn.click(run_i, inputs=[img,br,ca], outputs=md)
|
287 |
|
288 |
+
with gr.Tab("Video Analysis"):
|
289 |
+
vid=gr.Video(label="Upload Video (10-30 seconds)")
|
290 |
b2=gr.Button("Analyze Video")
|
291 |
out2=gr.JSON()
|
292 |
b2.click(analyze_video, inputs=vid, outputs=out2)
|
293 |
|
294 |
+
with gr.Tab("Healthspan Questionnaire"):
|
295 |
widgets=[]
|
296 |
for sec in QUESTIONNAIRE:
|
297 |
gr.Markdown(f"### {sec['domain']}")
|
298 |
for q in sec["questions"]:
|
299 |
+
w = gr.Radio(SCALE, label=q, value="0")
|
300 |
widgets.append(w)
|
301 |
+
b3=gr.Button("Compute Healthspan Score")
|
302 |
o3=gr.JSON()
|
303 |
b3.click(compute_q_score, inputs=widgets, outputs=o3)
|
304 |
|
305 |
with gr.Tab("About"):
|
306 |
+
gr.Markdown("""
|
307 |
+
## πΆ Dog Health & Age Analyzer
|
308 |
+
|
309 |
+
**Features:**
|
310 |
+
- **Breed Classification**: Identifies dog breeds using CLIP vision-language model
|
311 |
+
- **Age Estimation**: Predicts biological age based on visual appearance
|
312 |
+
- **Health Assessment**: Analyzes coat, eyes, body condition, and teeth
|
313 |
+
- **Video Analysis**: Evaluates gait and movement patterns
|
314 |
+
- **Healthspan Questionnaire**: Research-based assessment tool
|
315 |
+
|
316 |
+
**Note**: This tool is for educational purposes only and should not replace professional veterinary consultation.
|
317 |
+
""")
|
318 |
|
319 |
+
if __name__ == "__main__":
|
320 |
+
demo.launch()
|