yonadab commited on
Commit
c82a3d1
verified
1 Parent(s): f2cd8de

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +34 -9
app.py CHANGED
@@ -1,40 +1,65 @@
1
- from fastapi import FastAPI, File, UploadFile
2
  from fastapi.middleware.cors import CORSMiddleware
3
  from transformers import AutoImageProcessor, AutoModel
4
  from PIL import Image
5
  import torch
 
6
  import io
7
 
8
  app = FastAPI()
9
 
10
- # CORS (para pruebas locales o producci贸n cruzada)
11
  app.add_middleware(
12
  CORSMiddleware,
13
- allow_origins=["*"], # Cambia esto en producci贸n
14
  allow_credentials=True,
15
  allow_methods=["*"],
16
  allow_headers=["*"],
17
  )
18
 
19
- # Carga del modelo y procesador
20
  processor = AutoImageProcessor.from_pretrained("facebook/dinov2-base")
21
  model = AutoModel.from_pretrained("facebook/dinov2-base")
22
  model.eval()
23
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
24
  @app.post("/embedding")
25
- async def get_embedding(file: UploadFile = File(...)):
 
 
 
 
 
 
26
  try:
27
- image_bytes = await file.read()
28
  image = Image.open(io.BytesIO(image_bytes)).convert("RGB")
29
-
30
  inputs = processor(images=image, return_tensors="pt")
31
  with torch.no_grad():
32
  outputs = model(**inputs)
33
 
34
- # Promedio de los embeddings de todos los tokens (sin CLS)
35
  embedding = outputs.last_hidden_state.mean(dim=1).squeeze().tolist()
36
 
37
- return {"embedding": embedding}
 
 
 
38
 
39
  except Exception as e:
40
  return {"error": str(e)}
 
1
+ from fastapi import FastAPI, File, UploadFile, Form, HTTPException
2
  from fastapi.middleware.cors import CORSMiddleware
3
  from transformers import AutoImageProcessor, AutoModel
4
  from PIL import Image
5
  import torch
6
+ import uuid
7
  import io
8
 
9
  app = FastAPI()
10
 
11
+ # Habilita CORS si lo necesitas
12
  app.add_middleware(
13
  CORSMiddleware,
14
+ allow_origins=["*"],
15
  allow_credentials=True,
16
  allow_methods=["*"],
17
  allow_headers=["*"],
18
  )
19
 
20
+ # Carga del modelo
21
  processor = AutoImageProcessor.from_pretrained("facebook/dinov2-base")
22
  model = AutoModel.from_pretrained("facebook/dinov2-base")
23
  model.eval()
24
 
25
+ # Memoria temporal para almacenar im谩genes (podr铆as usar base de datos si prefieres)
26
+ temp_images = {}
27
+ event_ids = {}
28
+
29
+ # Paso 1: Subida de imagen + event_id
30
+ @app.post("/upload")
31
+ async def upload_image(file: UploadFile = File(...), event_id: str = Form(...)):
32
+ try:
33
+ content = await file.read()
34
+ image_id = str(uuid.uuid4())
35
+ temp_images[image_id] = content
36
+ event_ids[image_id] = event_id
37
+ return {"image_id": image_id, "event_id": event_id}
38
+ except Exception as e:
39
+ return {"error": str(e)}
40
+
41
+ # Paso 2: Obtener embedding por image_id
42
  @app.post("/embedding")
43
+ async def get_embedding(image_id: str = Form(...)):
44
+ if image_id not in temp_images:
45
+ raise HTTPException(status_code=404, detail="image_id not found")
46
+
47
+ event_id = event_ids[image_id]
48
+ image_bytes = temp_images[image_id]
49
+
50
  try:
 
51
  image = Image.open(io.BytesIO(image_bytes)).convert("RGB")
 
52
  inputs = processor(images=image, return_tensors="pt")
53
  with torch.no_grad():
54
  outputs = model(**inputs)
55
 
56
+ # Promedio de todos los tokens (puedes cambiar por CLS si quieres)
57
  embedding = outputs.last_hidden_state.mean(dim=1).squeeze().tolist()
58
 
59
+ return {
60
+ "event_id": event_id,
61
+ "embedding": embedding
62
+ }
63
 
64
  except Exception as e:
65
  return {"error": str(e)}