import gradio as gr from PIL import Image import torch from torchvision import transforms from transformers import AutoModelForImageClassification, AutoFeatureExtractor # Cargar el modelo desde Hugging Face Hub model = AutoModelForImageClassification.from_pretrained("AdrianRevi/Practica1Blindness") extractor = AutoFeatureExtractor.from_pretrained("AdrianRevi/Practica1Blindness") # Preprocesamiento def predict(img: Image.Image): inputs = extractor(images=img, return_tensors="pt") with torch.no_grad(): outputs = model(**inputs) probs = torch.nn.functional.softmax(outputs.logits, dim=1)[0] labels = model.config.id2label return {labels[i]: float(probs[i]) for i in range(len(labels))} # Interfaz Gradio demo = gr.Interface( fn=predict, inputs=gr.Image(type="pil"), outputs=gr.Label(num_top_classes=3), examples=["examples/20068.jpg", "examples/20084.jpg"], title="Blindness Detection", description="Sube una imagen del ojo para detectar el grado de ceguera.", ) if __name__ == "__main__": demo.launch()