import os from fastapi import FastAPI, HTTPException, Query from fastapi.responses import StreamingResponse from openai import AsyncOpenAI app = FastAPI() async def generate_ai_response(prompt: str, model: str): token = os.getenv("GITHUB_TOKEN") if not token: raise HTTPException(status_code=500, detail="GitHub token not configured") endpoint = "https://models.github.ai/inference" client = AsyncOpenAI(base_url=endpoint, api_key=token) try: stream = await client.chat.completions.create( messages=[ {"role": "system", "content": "You are a helpful assistant named Orion and made by Abdullah Ali"}, {"role": "user", "content": prompt} ], model=model, temperature=1.0, top_p=1.0, stream=True ) async for chunk in stream: if chunk.choices and chunk.choices[0].delta.content: yield chunk.choices[0].delta.content except Exception as err: yield f"Error: {str(err)}" raise HTTPException(status_code=500, detail=f"AI generation failed: {str(err)}") @app.post("/generate") async def generate_response( prompt: str = Query(...), model: str = Query(...) ): if not prompt or not model: raise HTTPException(status_code=400, detail="Prompt and model must be provided") return StreamingResponse( generate_ai_response(prompt, model), media_type="text/event-stream" ) def get_app(): return app