Spaces:
Runtime error
Runtime error
File size: 1,514 Bytes
0774891 09651b0 0774891 face833 09651b0 0774891 ec7f722 0774891 09651b0 0774891 09651b0 0774891 09651b0 0774891 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 |
from fastapi import FastAPI, HTTPException
from pydantic import BaseModel
from transformers import AutoTokenizer, AutoModel
import torch
import os
app = FastAPI()
# Load Hugging Face Token
HF_TOKEN = os.getenv("HF_TOKEN")
if not HF_TOKEN:
raise ValueError("❌ Hugging Face API token not found! Set HF_TOKEN as an environment variable.")
# Load tokenizer and model
tokenizer = AutoTokenizer.from_pretrained("mental/mental-bert-base-uncased", token=HF_TOKEN)
model = AutoModel.from_pretrained("mental/mental-bert-base-uncased", token=HF_TOKEN)
model.eval() # Set model to evaluation mode
# Request body schema
class TextRequest(BaseModel):
text: str
# Helper function to compute embedding
def compute_embedding(text: str) -> list[float]:
"""Generate a sentence embedding using mean pooling on MentalBERT output."""
inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True)
with torch.no_grad():
outputs = model(**inputs)
embedding = outputs.last_hidden_state.mean(dim=1).squeeze()
return embedding.tolist()
# POST endpoint to return embedding
@app.post("/embed")
def get_embedding(request: TextRequest):
text = request.text.strip()
if not text:
raise HTTPException(status_code=400, detail="Input text cannot be empty.")
try:
embedding = compute_embedding(text)
return {"embedding": embedding}
except Exception as e:
raise HTTPException(status_code=500, detail=f"Error computing embedding: {str(e)}")
|