from fastapi import FastAPI, File, UploadFile from fastapi.middleware.cors import CORSMiddleware from pydantic import BaseModel from transformers import pipeline import uvicorn import tempfile # Initialize FastAPI app = FastAPI() # Enable CORS for all origins (so Render or any client can access it) app.add_middleware( CORSMiddleware, allow_origins=["*"], allow_credentials=True, allow_methods=["*"], allow_headers=["*"], ) # Load the pretrained speech emotion recognition pipeline emotion_pipeline = pipeline( "audio-classification", model="ehcalabres/wav2vec2-lg-xlsr-en-speech-emotion-recognition" ) # Health check route @app.get("/") def read_root(): return {"message": "HF Space is live!"} # Predict route @app.post("/predict") async def predict_emotion(file: UploadFile = File(...)): try: # Save the uploaded audio file to a temporary location with tempfile.NamedTemporaryFile(delete=False) as tmp: tmp.write(await file.read()) tmp_path = tmp.name # Run emotion prediction result = emotion_pipeline(tmp_path) top_emotion = result[0]['label'] return {"emotion": top_emotion} except Exception as e: return {"error": str(e)}