LoloSemper commited on
Commit
e44c49f
verified
1 Parent(s): da43c22

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +23 -47
app.py CHANGED
@@ -1,5 +1,6 @@
1
  import torch
2
  from transformers import ViTImageProcessor, ViTForImageClassification
 
3
  from fastai.learner import load_learner
4
  from fastai.vision.core import PILImage
5
  from PIL import Image
@@ -10,51 +11,28 @@ import io
10
  import base64
11
  import os
12
  import zipfile
13
- import tensorflow as tf
14
 
15
- # --- Extraer y cargar modelo TensorFlow desde zip ---
16
- zip_path = "saved_model.zip"
17
- extract_dir = "saved_model"
18
- if not os.path.exists(extract_dir):
19
- os.makedirs(extract_dir)
20
- with zipfile.ZipFile(zip_path, 'r') as zip_ref:
21
- zip_ref.extractall(extract_dir)
22
-
23
- model_tf = tf.saved_model.load(extract_dir)
24
- TF_NUM_CLASSES = 7 # asumimos que son las mismas que CLASSES
25
-
26
- # Funci贸n helper para inferencia TensorFlow
27
- def predict_tf(img: Image.Image):
28
- try:
29
- img_resized = img.resize((224,224))
30
- img_np = np.array(img_resized) / 255.0
31
- if img_np.shape[-1] == 4:
32
- img_np = img_np[..., :3]
33
- img_tf = tf.convert_to_tensor(img_np, dtype=tf.float32)
34
- img_tf = tf.expand_dims(img_tf, axis=0)
35
-
36
- infer = model_tf.signatures["serving_default"]
37
- output = infer(img_tf)
38
- pred = list(output.values())[0].numpy()[0]
39
- probs = tf.nn.softmax(pred[:TF_NUM_CLASSES]).numpy()
40
- return probs
41
- except Exception as e:
42
- print(f"Error en predict_tf: {e}")
43
- return np.zeros(TF_NUM_CLASSES)
44
-
45
- # --- Cargar modelos ---
46
  MODEL_NAME = "ahishamm/vit-base-HAM-10000-sharpened-patch-32"
47
  feature_extractor = ViTImageProcessor.from_pretrained(MODEL_NAME)
48
  model_vit = ViTForImageClassification.from_pretrained(MODEL_NAME)
49
  model_vit.eval()
 
 
50
  model_malignancy = load_learner("ada_learn_malben.pkl")
51
  model_norm2000 = load_learner("ada_learn_skin_norm2000.pkl")
52
 
 
 
 
 
 
53
  CLASSES = [
54
  "Queratosis act铆nica / Bowen", "Carcinoma c茅lulas basales",
55
  "Lesi贸n querat贸sica benigna", "Dermatofibroma",
56
  "Melanoma maligno", "Nevus melanoc铆tico", "Lesi贸n vascular"
57
  ]
 
58
  RISK_LEVELS = {
59
  0: {'level': 'Moderado', 'color': '#ffaa00', 'weight': 0.6},
60
  1: {'level': 'Alto', 'color': '#ff4444', 'weight': 0.8},
@@ -94,16 +72,16 @@ def analizar_lesion_combined(img):
94
  pred_fast_type = "Error"
95
 
96
  try:
97
- probs_tf = predict_tf(img)
98
- pred_idx_tf = int(np.argmax(probs_tf))
99
- confidence_tf = probs_tf[pred_idx_tf]
100
- if pred_idx_tf < len(CLASSES):
101
- pred_class_tf = "Maligno" if pred_idx_tf in MALIGNANT_INDICES else "Benigno"
102
- else:
103
- pred_class_tf = f"Desconocido"
104
- except:
105
- pred_class_tf = "Error"
106
- confidence_tf = 0.0
107
 
108
  colors_bars = [RISK_LEVELS[i]['color'] for i in range(7)]
109
  fig, ax = plt.subplots(figsize=(8, 3))
@@ -128,7 +106,7 @@ def analizar_lesion_combined(img):
128
  <tr><td>馃 ViT (transformer)</td><td><b>{pred_class_vit}</b></td><td>{confidence_vit:.1%}</td></tr>
129
  <tr><td>馃К Fast.ai (clasificaci贸n)</td><td><b>{pred_fast_type}</b></td><td>N/A</td></tr>
130
  <tr><td>鈿狅笍 Fast.ai (malignidad)</td><td><b>{"Maligno" if prob_malignant > 0.5 else "Benigno"}</b></td><td>{prob_malignant:.1%}</td></tr>
131
- <tr><td>馃敩 TensorFlow (saved_model)</td><td><b>{pred_class_tf}</b></td><td>{confidence_tf:.1%}</td></tr>
132
  </table>
133
  <br>
134
  <b>馃И Recomendaci贸n autom谩tica:</b><br>
@@ -152,12 +130,10 @@ demo = gr.Interface(
152
  fn=analizar_lesion_combined,
153
  inputs=gr.Image(type="pil", label="Sube una imagen de la lesi贸n"),
154
  outputs=[gr.HTML(label="Informe combinado"), gr.HTML(label="Gr谩fico ViT")],
155
- title="Detector de Lesiones Cut谩neas (ViT + Fast.ai + TensorFlow)",
156
- description="Comparaci贸n entre ViT transformer (HAM10000), dos modelos Fast.ai y un modelo TensorFlow.",
157
  flagging_mode="never"
158
  )
159
 
160
  if __name__ == "__main__":
161
  demo.launch()
162
-
163
-
 
1
  import torch
2
  from transformers import ViTImageProcessor, ViTForImageClassification
3
+ from transformers import AutoFeatureExtractor, AutoModelForImageClassification
4
  from fastai.learner import load_learner
5
  from fastai.vision.core import PILImage
6
  from PIL import Image
 
11
  import base64
12
  import os
13
  import zipfile
 
14
 
15
+ # --- Cargar modelo ViT ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
16
  MODEL_NAME = "ahishamm/vit-base-HAM-10000-sharpened-patch-32"
17
  feature_extractor = ViTImageProcessor.from_pretrained(MODEL_NAME)
18
  model_vit = ViTForImageClassification.from_pretrained(MODEL_NAME)
19
  model_vit.eval()
20
+
21
+ # --- Cargar modelos Fast.ai ---
22
  model_malignancy = load_learner("ada_learn_malben.pkl")
23
  model_norm2000 = load_learner("ada_learn_skin_norm2000.pkl")
24
 
25
+ # --- Cargar modelo EfficientNetB3 desde Hugging Face ---
26
+ model_effnet = AutoModelForImageClassification.from_pretrained("syaha/skin_cancer_detection_model")
27
+ extractor_effnet = AutoFeatureExtractor.from_pretrained("syaha/skin_cancer_detection_model")
28
+ model_effnet.eval()
29
+
30
  CLASSES = [
31
  "Queratosis act铆nica / Bowen", "Carcinoma c茅lulas basales",
32
  "Lesi贸n querat贸sica benigna", "Dermatofibroma",
33
  "Melanoma maligno", "Nevus melanoc铆tico", "Lesi贸n vascular"
34
  ]
35
+
36
  RISK_LEVELS = {
37
  0: {'level': 'Moderado', 'color': '#ffaa00', 'weight': 0.6},
38
  1: {'level': 'Alto', 'color': '#ff4444', 'weight': 0.8},
 
72
  pred_fast_type = "Error"
73
 
74
  try:
75
+ inputs_eff = extractor_effnet(images=img, return_tensors="pt")
76
+ with torch.no_grad():
77
+ outputs_eff = model_effnet(**inputs_eff)
78
+ probs_eff = outputs_eff.logits.softmax(dim=-1).cpu().numpy()[0]
79
+ pred_idx_eff = int(np.argmax(probs_eff))
80
+ confidence_eff = probs_eff[pred_idx_eff]
81
+ pred_class_eff = model_effnet.config.id2label[str(pred_idx_eff)]
82
+ except Exception as e:
83
+ pred_class_eff = "Error"
84
+ confidence_eff = 0.0
85
 
86
  colors_bars = [RISK_LEVELS[i]['color'] for i in range(7)]
87
  fig, ax = plt.subplots(figsize=(8, 3))
 
106
  <tr><td>馃 ViT (transformer)</td><td><b>{pred_class_vit}</b></td><td>{confidence_vit:.1%}</td></tr>
107
  <tr><td>馃К Fast.ai (clasificaci贸n)</td><td><b>{pred_fast_type}</b></td><td>N/A</td></tr>
108
  <tr><td>鈿狅笍 Fast.ai (malignidad)</td><td><b>{"Maligno" if prob_malignant > 0.5 else "Benigno"}</b></td><td>{prob_malignant:.1%}</td></tr>
109
+ <tr><td>馃敩 EfficientNetB3 (HAM10000)</td><td><b>{pred_class_eff}</b></td><td>{confidence_eff:.1%}</td></tr>
110
  </table>
111
  <br>
112
  <b>馃И Recomendaci贸n autom谩tica:</b><br>
 
130
  fn=analizar_lesion_combined,
131
  inputs=gr.Image(type="pil", label="Sube una imagen de la lesi贸n"),
132
  outputs=[gr.HTML(label="Informe combinado"), gr.HTML(label="Gr谩fico ViT")],
133
+ title="Detector de Lesiones Cut谩neas (ViT + Fast.ai + EfficientNetB3)",
134
+ description="Comparaci贸n entre ViT transformer (HAM10000), dos modelos Fast.ai y un modelo EfficientNetB3.",
135
  flagging_mode="never"
136
  )
137
 
138
  if __name__ == "__main__":
139
  demo.launch()