Spaces:
Sleeping
Sleeping
File size: 699 Bytes
650149e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 |
from fastapi import FastAPI
from pydantic import BaseModel
from transformers import pipeline
app = FastAPI(title="SAM_MedTesting")
# Load your model once at startup
generator = pipeline("text-generation", model="gpt2")
class GenerationRequest(BaseModel):
prompt: str
max_new_tokens: int = 50
class GenerationResponse(BaseModel):
generated_text: str
@app.post("/generate", response_model=GenerationResponse)
def generate(req: GenerationRequest):
out = generator(req.prompt, max_length=req.max_new_tokens, do_sample=True)
return GenerationResponse(generated_text=out[0]["generated_text"])
@app.get("/health")
def health():
return {"status": "ok"}
|