yonadab commited on
Commit
5c36b58
·
verified ·
1 Parent(s): 2124780

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -5
app.py CHANGED
@@ -2,26 +2,27 @@ import gradio as gr
2
  from transformers import AutoImageProcessor, AutoModel
3
  import torch
4
  from PIL import Image
 
5
 
6
  # Cargar el modelo solo una vez
7
  processor = AutoImageProcessor.from_pretrained("facebook/dinov2-base")
8
  model = AutoModel.from_pretrained("facebook/dinov2-base")
9
  model.eval()
10
 
11
- def get_embedding(image_file):
12
- image = Image.open(image_file).convert("RGB")
13
  inputs = processor(images=image, return_tensors="pt")
14
  with torch.no_grad():
15
  embeddings = model(**inputs).last_hidden_state[:, 0] # CLS token
16
  return embeddings.squeeze().tolist()
17
 
18
- # Interfaz Gradio para uso visual o programático (API)
19
  iface = gr.Interface(
20
  fn=get_embedding,
21
- inputs=gr.Image(type="filepath"), # << cambia aquí
22
  outputs="json",
23
  description="Microservicio para extraer embeddings de imágenes usando DINOv2."
24
  )
25
 
26
- iface.launch()
27
  iface.queue()
 
 
 
2
  from transformers import AutoImageProcessor, AutoModel
3
  import torch
4
  from PIL import Image
5
+ import numpy as np
6
 
7
  # Cargar el modelo solo una vez
8
  processor = AutoImageProcessor.from_pretrained("facebook/dinov2-base")
9
  model = AutoModel.from_pretrained("facebook/dinov2-base")
10
  model.eval()
11
 
12
+ def get_embedding(image_np):
13
+ image = Image.fromarray(image_np).convert("RGB")
14
  inputs = processor(images=image, return_tensors="pt")
15
  with torch.no_grad():
16
  embeddings = model(**inputs).last_hidden_state[:, 0] # CLS token
17
  return embeddings.squeeze().tolist()
18
 
 
19
  iface = gr.Interface(
20
  fn=get_embedding,
21
+ inputs=gr.Image(type="numpy"), # CAMBIO CLAVE AQUÍ
22
  outputs="json",
23
  description="Microservicio para extraer embeddings de imágenes usando DINOv2."
24
  )
25
 
 
26
  iface.queue()
27
+ iface.launch()
28
+