Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,6 +1,5 @@
|
|
1 |
import torch
|
2 |
from transformers import ViTImageProcessor, ViTForImageClassification
|
3 |
-
from transformers import AutoFeatureExtractor, AutoModelForImageClassification
|
4 |
from fastai.learner import load_learner
|
5 |
from fastai.vision.core import PILImage
|
6 |
from PIL import Image
|
@@ -9,84 +8,72 @@ import numpy as np
|
|
9 |
import gradio as gr
|
10 |
import io
|
11 |
import base64
|
12 |
-
import os
|
13 |
-
import zipfile
|
14 |
|
15 |
-
# --- Cargar modelo ViT ---
|
|
|
|
|
|
|
|
|
|
|
|
|
16 |
MODEL_NAME = "ahishamm/vit-base-HAM-10000-sharpened-patch-32"
|
17 |
feature_extractor = ViTImageProcessor.from_pretrained(MODEL_NAME)
|
18 |
model_vit = ViTForImageClassification.from_pretrained(MODEL_NAME)
|
19 |
model_vit.eval()
|
20 |
|
21 |
-
#
|
22 |
model_malignancy = load_learner("ada_learn_malben.pkl")
|
23 |
model_norm2000 = load_learner("ada_learn_skin_norm2000.pkl")
|
24 |
|
25 |
-
#
|
26 |
-
model_effnet = AutoModelForImageClassification.from_pretrained("syaha/skin_cancer_detection_model")
|
27 |
-
extractor_effnet = AutoFeatureExtractor.from_pretrained("syaha/skin_cancer_detection_model")
|
28 |
-
model_effnet.eval()
|
29 |
-
|
30 |
CLASSES = [
|
31 |
"Queratosis actínica / Bowen", "Carcinoma células basales",
|
32 |
"Lesión queratósica benigna", "Dermatofibroma",
|
33 |
"Melanoma maligno", "Nevus melanocítico", "Lesión vascular"
|
34 |
]
|
35 |
-
|
36 |
RISK_LEVELS = {
|
37 |
0: {'level': 'Moderado', 'color': '#ffaa00', 'weight': 0.6},
|
38 |
-
1: {'level': 'Alto',
|
39 |
-
2: {'level': 'Bajo',
|
40 |
-
3: {'level': 'Bajo',
|
41 |
-
4: {'level': 'Crítico',
|
42 |
-
5: {'level': 'Bajo',
|
43 |
-
6: {'level': 'Bajo',
|
44 |
}
|
45 |
-
|
46 |
-
MALIGNANT_INDICES = [0, 1, 4] # clases de riesgo alto/crítico
|
47 |
|
48 |
def analizar_lesion_combined(img):
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
outputs_eff = model_effnet(**inputs_eff)
|
78 |
-
probs_eff = outputs_eff.logits.softmax(dim=-1).cpu().numpy()[0]
|
79 |
-
pred_idx_eff = int(np.argmax(probs_eff))
|
80 |
-
confidence_eff = probs_eff[pred_idx_eff]
|
81 |
-
pred_class_eff = model_effnet.config.id2label[str(pred_idx_eff)]
|
82 |
-
except Exception as e:
|
83 |
-
pred_class_eff = "Error"
|
84 |
-
confidence_eff = 0.0
|
85 |
-
|
86 |
-
colors_bars = [RISK_LEVELS[i]['color'] for i in range(7)]
|
87 |
fig, ax = plt.subplots(figsize=(8, 3))
|
88 |
-
ax.bar(CLASSES, probs_vit*100, color=
|
89 |
-
ax.set_title("Probabilidad ViT por tipo de lesión")
|
90 |
ax.set_ylabel("Probabilidad (%)")
|
91 |
ax.set_xticks(np.arange(len(CLASSES)))
|
92 |
ax.set_xticklabels(CLASSES, rotation=45, ha='right')
|
@@ -95,45 +82,39 @@ def analizar_lesion_combined(img):
|
|
95 |
buf = io.BytesIO()
|
96 |
plt.savefig(buf, format="png")
|
97 |
plt.close(fig)
|
98 |
-
|
99 |
-
html_chart = f'<img src="data:image/png;base64,{img_b64}" style="max-width:100%"/>'
|
100 |
|
101 |
informe = f"""
|
102 |
<div style="font-family:sans-serif; max-width:800px; margin:auto">
|
103 |
-
<h2>🧪 Diagnóstico por
|
104 |
-
<table style="
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
</table>
|
111 |
-
<br>
|
112 |
-
<b>🧪 Recomendación automática:</b><br>
|
113 |
"""
|
114 |
-
|
115 |
-
|
116 |
-
if prob_malignant > 0.7 or cancer_risk_score > 0.6:
|
117 |
informe += "🚨 <b>CRÍTICO</b> – Derivación urgente a oncología dermatológica"
|
118 |
-
elif
|
119 |
informe += "⚠️ <b>ALTO RIESGO</b> – Consulta con dermatólogo en 7 días"
|
120 |
-
elif
|
121 |
-
informe += "📋 <b>RIESGO MODERADO</b> – Evaluación programada
|
122 |
else:
|
123 |
informe += "✅ <b>BAJO RIESGO</b> – Seguimiento de rutina (3-6 meses)"
|
|
|
124 |
|
125 |
-
informe += "</div>"
|
126 |
return informe, html_chart
|
127 |
|
128 |
-
# Interfaz Gradio
|
129 |
demo = gr.Interface(
|
130 |
fn=analizar_lesion_combined,
|
131 |
-
inputs=gr.Image(type="pil"
|
132 |
-
outputs=[gr.HTML(label="Informe
|
133 |
-
title="Detector de Lesiones Cutáneas (ViT + Fast.ai
|
134 |
-
description="Comparación entre ViT transformer (HAM10000), dos modelos Fast.ai y un modelo EfficientNetB3.",
|
135 |
-
flagging_mode="never"
|
136 |
)
|
137 |
-
|
138 |
if __name__ == "__main__":
|
139 |
demo.launch()
|
|
|
|
1 |
import torch
|
2 |
from transformers import ViTImageProcessor, ViTForImageClassification
|
|
|
3 |
from fastai.learner import load_learner
|
4 |
from fastai.vision.core import PILImage
|
5 |
from PIL import Image
|
|
|
8 |
import gradio as gr
|
9 |
import io
|
10 |
import base64
|
|
|
|
|
11 |
|
12 |
+
# --- Cargar modelo ViT preentrenado fine‑tuned HAM10000 ---
|
13 |
+
TF_MODEL_NAME = "Anwarkh1/Skin_Cancer-Image_Classification"
|
14 |
+
feature_extractor_tf = ViTImageProcessor.from_pretrained(TF_MODEL_NAME)
|
15 |
+
model_tf_vit = ViTForImageClassification.from_pretrained(TF_MODEL_NAME)
|
16 |
+
model_tf_vit.eval()
|
17 |
+
|
18 |
+
# 🔹 Cargar modelo ViT base
|
19 |
MODEL_NAME = "ahishamm/vit-base-HAM-10000-sharpened-patch-32"
|
20 |
feature_extractor = ViTImageProcessor.from_pretrained(MODEL_NAME)
|
21 |
model_vit = ViTForImageClassification.from_pretrained(MODEL_NAME)
|
22 |
model_vit.eval()
|
23 |
|
24 |
+
# 🔹 Cargar modelos Fast.ai locales
|
25 |
model_malignancy = load_learner("ada_learn_malben.pkl")
|
26 |
model_norm2000 = load_learner("ada_learn_skin_norm2000.pkl")
|
27 |
|
28 |
+
# Clases estándar de HAM10000
|
|
|
|
|
|
|
|
|
29 |
CLASSES = [
|
30 |
"Queratosis actínica / Bowen", "Carcinoma células basales",
|
31 |
"Lesión queratósica benigna", "Dermatofibroma",
|
32 |
"Melanoma maligno", "Nevus melanocítico", "Lesión vascular"
|
33 |
]
|
|
|
34 |
RISK_LEVELS = {
|
35 |
0: {'level': 'Moderado', 'color': '#ffaa00', 'weight': 0.6},
|
36 |
+
1: {'level': 'Alto', 'color': '#ff4444', 'weight': 0.8},
|
37 |
+
2: {'level': 'Bajo', 'color': '#44ff44', 'weight': 0.1},
|
38 |
+
3: {'level': 'Bajo', 'color': '#44ff44', 'weight': 0.1},
|
39 |
+
4: {'level': 'Crítico', 'color': '#cc0000', 'weight': 1.0},
|
40 |
+
5: {'level': 'Bajo', 'color': '#44ff44', 'weight': 0.1},
|
41 |
+
6: {'level': 'Bajo', 'color': '#44ff44', 'weight': 0.1}
|
42 |
}
|
43 |
+
MALIGNANT_INDICES = [0, 1, 4] # akiec, bcc, melanoma
|
|
|
44 |
|
45 |
def analizar_lesion_combined(img):
|
46 |
+
img_fastai = PILImage.create(img)
|
47 |
+
|
48 |
+
# ViT base
|
49 |
+
inputs = feature_extractor(img, return_tensors="pt")
|
50 |
+
with torch.no_grad():
|
51 |
+
outputs = model_vit(**inputs)
|
52 |
+
probs_vit = outputs.logits.softmax(dim=-1).cpu().numpy()[0]
|
53 |
+
idx_vit = int(np.argmax(probs_vit))
|
54 |
+
class_vit = CLASSES[idx_vit]
|
55 |
+
conf_vit = probs_vit[idx_vit]
|
56 |
+
|
57 |
+
# Fast.ai modelos
|
58 |
+
_, _, probs_mal = model_malignancy.predict(img_fastai)
|
59 |
+
prob_malign = float(probs_mal[1])
|
60 |
+
pred_fast_type, _, _ = model_norm2000.predict(img_fastai)
|
61 |
+
|
62 |
+
# ViT pre-trained fine-tuned (último modelo recomendado)
|
63 |
+
inputs_tf = feature_extractor_tf(img, return_tensors="pt")
|
64 |
+
with torch.no_grad():
|
65 |
+
outputs_tf = model_tf_vit(**inputs_tf)
|
66 |
+
probs_tf = outputs_tf.logits.softmax(dim=-1).cpu().numpy()[0]
|
67 |
+
idx_tf = int(np.argmax(probs_tf))
|
68 |
+
class_tf_model = CLASSES[idx_tf]
|
69 |
+
conf_tf = probs_tf[idx_tf]
|
70 |
+
mal_tf = "Maligno" if idx_tf in MALIGNANT_INDICES else "Benigno"
|
71 |
+
|
72 |
+
# Gráfico ViT base
|
73 |
+
colors = [RISK_LEVELS[i]['color'] for i in range(7)]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
74 |
fig, ax = plt.subplots(figsize=(8, 3))
|
75 |
+
ax.bar(CLASSES, probs_vit*100, color=colors)
|
76 |
+
ax.set_title("Probabilidad ViT base por tipo de lesión")
|
77 |
ax.set_ylabel("Probabilidad (%)")
|
78 |
ax.set_xticks(np.arange(len(CLASSES)))
|
79 |
ax.set_xticklabels(CLASSES, rotation=45, ha='right')
|
|
|
82 |
buf = io.BytesIO()
|
83 |
plt.savefig(buf, format="png")
|
84 |
plt.close(fig)
|
85 |
+
html_chart = f'<img src="data:image/png;base64,{base64.b64encode(buf.getvalue()).decode()}" style="max-width:100%"/>'
|
|
|
86 |
|
87 |
informe = f"""
|
88 |
<div style="font-family:sans-serif; max-width:800px; margin:auto">
|
89 |
+
<h2>🧪 Diagnóstico por múltiples modelos de IA</h2>
|
90 |
+
<table style="width:100%; font-size:16px; border-collapse:collapse">
|
91 |
+
<tr><th>Modelo</th><th>Resultado</th><th>Confianza</th></tr>
|
92 |
+
<tr><td>🧠 ViT base</td><td><b>{class_vit}</b></td><td>{conf_vit:.1%}</td></tr>
|
93 |
+
<tr><td>🧬 Fast.ai (tipo)</td><td><b>{pred_fast_type}</b></td><td>N/A</td></tr>
|
94 |
+
<tr><td>⚠️ Fast.ai (malignidad)</td><td><b>{'Maligno' if prob_malign > 0.5 else 'Benigno'}</b></td><td>{prob_malign:.1%}</td></tr>
|
95 |
+
<tr><td>🌟 ViT fined‑tuned (HAM10000)</td><td><b>{mal_tf} ({class_tf_model})</b></td><td>{conf_tf:.1%}</td></tr>
|
96 |
+
</table><br>
|
97 |
+
<b>🩺 Recomendación automática:</b><br>
|
|
|
98 |
"""
|
99 |
+
risk = sum(probs_vit[i] * RISK_LEVELS[i]['weight'] for i in range(7))
|
100 |
+
if prob_malign > 0.7 or risk > 0.6:
|
|
|
101 |
informe += "🚨 <b>CRÍTICO</b> – Derivación urgente a oncología dermatológica"
|
102 |
+
elif prob_malign > 0.4 or risk > 0.4:
|
103 |
informe += "⚠️ <b>ALTO RIESGO</b> – Consulta con dermatólogo en 7 días"
|
104 |
+
elif risk > 0.2:
|
105 |
+
informe += "📋 <b>RIESGO MODERADO</b> – Evaluación programada en 2-4 semanas"
|
106 |
else:
|
107 |
informe += "✅ <b>BAJO RIESGO</b> – Seguimiento de rutina (3-6 meses)"
|
108 |
+
informe += "</div>"""
|
109 |
|
|
|
110 |
return informe, html_chart
|
111 |
|
|
|
112 |
demo = gr.Interface(
|
113 |
fn=analizar_lesion_combined,
|
114 |
+
inputs=gr.Image(type="pil"),
|
115 |
+
outputs=[gr.HTML(label="Informe"), gr.HTML(label="Gráfico ViT base")],
|
116 |
+
title="Detector de Lesiones Cutáneas (ViT + Fast.ai)",
|
|
|
|
|
117 |
)
|
|
|
118 |
if __name__ == "__main__":
|
119 |
demo.launch()
|
120 |
+
|