File size: 1,549 Bytes
f7c0abb
85ce218
fa8e2ce
d0fc55f
f7c0abb
 
 
3314587
fa8e2ce
6025f1c
 
 
 
 
 
f7c0abb
d0fc55f
f7c0abb
85ce218
f7c0abb
 
6025f1c
 
 
d0fc55f
f7c0abb
 
d0fc55f
045ef7e
 
f7c0abb
 
045ef7e
585fc5c
f7c0abb
 
585fc5c
 
 
 
 
 
045ef7e
fa8e2ce
85ce218
93c4b1f
7a83ce6
20d0b59
387e225
85ce218
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
import os
from fastapi import FastAPI, HTTPException, Query
from fastapi.responses import StreamingResponse
from openai import AsyncOpenAI

app = FastAPI()

async def generate_ai_response(prompt: str, model: str):
    token = os.getenv("GITHUB_TOKEN")
    if not token:
        raise HTTPException(status_code=500, detail="GitHub token not configured")
    
    endpoint = "https://models.github.ai/inference"
    client = AsyncOpenAI(base_url=endpoint, api_key=token)

    try:
        stream = await client.chat.completions.create(
            messages=[
                {"role": "system", "content": "You are a helpful assistant named Orion and made by Abdullah Ali"},
                {"role": "user", "content": prompt}
            ],
            model=model,
            temperature=1.0,
            top_p=1.0,
            stream=True
        )

        async for chunk in stream:
            if chunk.choices and chunk.choices[0].delta.content:
                yield chunk.choices[0].delta.content

    except Exception as err:
        yield f"Error: {str(err)}"
        raise HTTPException(status_code=500, detail=f"AI generation failed: {str(err)}")

@app.post("/generate")
async def generate_response(
    prompt: str = Query(...),
    model: str = Query(...)
):
    if not prompt or not model:
        raise HTTPException(status_code=400, detail="Prompt and model must be provided")
    
    return StreamingResponse(
        generate_ai_response(prompt, model),
        media_type="text/event-stream"
    )

def get_app():
    return app