File size: 3,509 Bytes
4843008
c7cc975
 
 
 
 
 
 
 
 
4843008
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c7cc975
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4843008
 
 
 
 
 
 
 
c7cc975
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
import json
from fastapi import APIRouter, Depends, HTTPException, Request
from fastapi.responses import StreamingResponse, JSONResponse
from typing import Optional

from .auth import verify_app_secret
from .models import ChatRequest, ImageResponse
from .utils import strip_model_prefix
from .gizai import GizAI
from .logger import setup_logger

logger = setup_logger(__name__)

router = APIRouter()

@router.options("/v1/chat/completions")
@router.options("/api/v1/chat/completions")
async def chat_completions_options():
    return Response(
        status_code=200,
        headers={
            "Access-Control-Allow-Origin": "*",
            "Access-Control-Allow-Methods": "POST, OPTIONS",
            "Access-Control-Allow-Headers": "Content-Type, Authorization",
        },
    )

@router.get("/v1/models")
@router.get("/api/v1/models")
async def list_models():
    return {"object": "list", "data": ALLOWED_MODELS}

@router.post("/v1/chat/completions")
@router.post("/api/v1/chat/completions")
async def chat_completions(
    request: ChatRequest, app_secret: str = Depends(verify_app_secret)
):
    logger.info("Entering chat_completions route")
    logger.info(f"Processing chat completion request for model: {request.model}")

    if request.model not in [model["id"] for model in ALLOWED_MODELS]:
        raise HTTPException(
            status_code=400,
            detail=f"Model {request.model} is not allowed. Allowed models are: {', '.join(model['id'] for model in ALLOWED_MODELS)}",
        )

    if request.stream:
        logger.info("Streaming response")
        return StreamingResponse(process_streaming_response(request), media_type="text/event-stream")
    else:
        logger.info("Non-streaming response")
        return await process_non_streaming_response(request)


# GizAI Routes
gizai_router = APIRouter(prefix="/gizai", tags=["GizAI"])

@gizai_router.options("/v1/chat/completions")
async def gizai_chat_completions_options():
    return JSONResponse(
        status_code=200,
        headers={
            "Access-Control-Allow-Origin": "*",
            "Access-Control-Allow-Methods": "POST, OPTIONS",
            "Access-Control-Allow-Headers": "Content-Type, Authorization",
        },
    )

@gizai_router.post("/v1/chat/completions")
async def gizai_chat_completions(
    request: ChatRequest, app_secret: str = Depends(verify_app_secret)
):
    logger.info("Entering GizAI chat_completions route")
    logger.info(f"Processing chat completion request for model: {request.model}")

    # Validate model
    model = GizAI.get_model(request.model)
    if model not in GizAI.models:
        raise HTTPException(
            status_code=400,
            detail=f"Model {request.model} is not supported by GizAI. Supported models are: {', '.join(GizAI.models)}",
        )

    try:
        async_generator = GizAI.create_async_generator(model=model, messages=request.messages)
        return StreamingResponse(async_generator, media_type="application/json")
    except Exception as e:
        logger.error(f"Error in GizAI chat_completions: {e}", exc_info=True)
        raise HTTPException(status_code=500, detail=str(e))

# Include GizAI router in the main router
router.include_router(gizai_router)


@router.route('/')
@router.route('/healthz')
@router.route('/ready')
@router.route('/alive')
@router.route('/status')
@router.get("/health")
def health_check(request: Request):
    return Response(content=json.dumps({"status": "ok"}), media_type="application/json")