vision / app.py
abdullahalioo's picture
Update app.py
f9d8346 verified
raw
history blame
2.68 kB
import os
import logging
from fastapi import FastAPI, HTTPException, Query
from fastapi.responses import StreamingResponse
from pydantic import BaseModel
from openai import AsyncOpenAI
from typing import Optional
# Configure logging
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
app = FastAPI()
class GenerateRequest(BaseModel):
prompt: str
async def generate_ai_response(prompt: str, model: str):
logger.debug(f"Received prompt: {prompt}, model: {model}")
token = os.getenv("GITHUB_TOKEN")
endpoint = os.getenv("AI_SERVER_URL", "https://models.github.ai/inference")
if not token:
logger.error("GitHub token not configured")
raise HTTPException(status_code=500, detail="GitHub token not configured")
logger.debug(f"Using endpoint: {endpoint}")
client = AsyncOpenAI(base_url=endpoint, api_key=token)
try:
stream = await client.chat.completions.create(
messages=[
{"role": "system", "content": "You are a helpful assistant named Orion, created by Abdullah Ali"},
{"role": "user", "content": prompt}
],
model=model,
temperature=1.0,
top_p=1.0,
stream=True
)
async for chunk in stream:
if chunk.choices and chunk.choices[0].delta.content:
yield chunk.choices[0].delta.content
except Exception as err:
logger.error(f"AI generation failed: {str(err)}")
yield f"Error: {str(err)}"
raise HTTPException(status_code=500, detail=f"AI generation failed: {str(err)}")
@app.post("/generate", summary="Generate AI response", response_description="Streaming AI response")
async def generate_response(
model: str = Query("default-model", description="The AI model to use"),
prompt: Optional[str] = Query(None, description="The input text prompt for the AI"),
request: Optional[GenerateRequest] = None
):
logger.debug(f"Request received - model: {model}, prompt: {prompt}, body: {request}")
final_prompt = prompt if prompt is not None else (request.prompt if request is not None else None)
if not final_prompt or not final_prompt.strip():
logger.error("Prompt cannot be empty")
raise HTTPException(status_code=400, detail="Prompt cannot be empty")
if not model or not model.strip():
logger.error("Model cannot be empty")
raise HTTPException(status_code=400, detail="Model cannot be empty")
return StreamingResponse(
generate_ai_response(final_prompt, model),
media_type="text/event-stream"
)
def get_app():
return app