Spaces:
Sleeping
Sleeping
File size: 1,492 Bytes
52a2f69 8f136e1 52a2f69 8f136e1 52a2f69 8f136e1 52a2f69 8f136e1 52a2f69 8f136e1 52a2f69 8f136e1 52a2f69 8f136e1 52a2f69 8f136e1 52a2f69 8f136e1 819e021 8f136e1 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 |
import os
os.environ["HF_HOME"] = "/tmp/hf_cache"
os.environ["TRANSFORMERS_CACHE"] = "/tmp/hf_cache"
import io
import torch
from fastapi import FastAPI, File, UploadFile
from fastapi.responses import JSONResponse, HTMLResponse
from transformers import AutoImageProcessor, AutoModelForImageClassification
from PIL import Image
# Load model and processor
processor = AutoImageProcessor.from_pretrained("prithivMLmods/Realistic-Gender-Classification")
model = AutoModelForImageClassification.from_pretrained("prithivMLmods/Realistic-Gender-Classification")
# FastAPI app
app = FastAPI()
@app.get("/", response_class=HTMLResponse)
async def home():
return '''
<html>
<body>
<h2>Upload an Image for Gender Detection</h2>
<form action="/predict" enctype="multipart/form-data" method="post">
<input name="file" type="file" accept="image/*">
<input type="submit" value="Upload">
</form>
</body>
</html>
'''
@app.post("/predict")
async def predict(file: UploadFile = File(...)):
image = Image.open(io.BytesIO(await file.read())).convert("RGB")
inputs = processor(images=image, return_tensors="pt")
with torch.no_grad():
logits = model(**inputs).logits
probs = torch.nn.functional.softmax(logits, dim=-1).cpu().numpy()[0]
labels = model.config.id2label
result = {labels[i]: float(probs[i]) for i in range(len(labels))}
return JSONResponse(content=result) |