# Import libraries from fastapi import FastAPI, HTTPException from pydantic import BaseModel from transformers import pipeline import uvicorn # Initialize FastAPI app app = FastAPI() # Load the Hugging Face question-answering model try: qa_pipeline = pipeline("question-answering", model="distilbert-base-uncased-distilled-squad", from_pt=True) except Exception as e: raise RuntimeError(f"Error loading model: {e}") # Define request and response models class ChatRequest(BaseModel): question: str context: str class ChatResponse(BaseModel): answer: str # Define the /chat endpoint @app.post("/chat", response_model=ChatResponse) async def chat(request: ChatRequest): try: result = qa_pipeline(question=request.question, context=request.context) return ChatResponse(answer=result['answer']) except Exception as e: raise HTTPException(status_code=500, detail=str(e)) # Run the FastAPI server if __name__ == "__main__": uvicorn.run(app, host="0.0.0.0", port=8000)