vision / app.py
abdullahalioo's picture
Update app.py
19fe1fe verified
raw
history blame
2.57 kB
import os
from fastapi import FastAPI, HTTPException, Query
from fastapi.responses import StreamingResponse
from pydantic import BaseModel
from openai import AsyncOpenAI
from typing import Optional
app = FastAPI()
class GenerateRequest(BaseModel):
prompt: str
async def generate_ai_response(prompt: str, model: str):
# Configuration for AI endpoint
token = os.getenv("GITHUB_TOKEN")
endpoint = os.getenv("AI_SERVER_URL", "https://models.github.ai/inference") # Default fallback
if not token:
raise HTTPException(status_code=500, detail="GitHub token not configured")
client = AsyncOpenAI(base_url=endpoint, api_key=token)
try
stream = await client.chat.completions.create(
messages=[
{"role": "system", "content": "You are a helpful assistant named Orion, created by Abdullah Ali"},
{"role": "user", "content": prompt}
],
model=model,
temperature=1.0,
top_p=1.0,
stream=True
)
async for chunk in stream:
if chunk.choices and chunk.choices[0].delta.content:
yield chunk.choices[0].delta.content
except Exception as err:
yield f"Error: {str(err)}"
raise HTTPException(status_code=500, detail=f"AI generation failed: {str(err)}")
@app.post("/generate", summary="Generate AI response", response_description="Streaming AI response")
async def generate_response(
model: str = Query("default-model", description="The AI model to use"),
prompt: Optional[str] = Query(None, description="The input text prompt for the AI"),
request: Optional[GenerateRequest] = None
):
"""
Generate a streaming AI response based on the provided prompt and model.
- **model**: The AI model to use (specified as a query parameter, defaults to default-model)
- **prompt**: The input text prompt for the AI (can be in query parameter or request body)
"""
# Determine prompt source: query parameter or request body
final_prompt = prompt if prompt is not None else (request.prompt if request is not None else None)
if not final_prompt or not final_prompt.strip():
raise HTTPException(status_code=400, detail="Prompt cannot be empty")
if not model or not model.strip():
raise HTTPException(status_code=400, detail="Model cannot be empty")
return StreamingResponse(
generate_ai_response(final_prompt, model),
media_type="text/event-stream"
)
def get_app():
return app