Spaces:
Running
Running
File size: 1,421 Bytes
7cc3183 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 |
from fastapi import FastAPI, Depends # Depends might be used by root endpoint
# from fastapi.responses import JSONResponse # Not used
from fastapi.middleware.cors import CORSMiddleware
# import asyncio # Not used
# import os # Not used
# Local module imports
from auth import get_api_key # Potentially for root endpoint
from credentials_manager import CredentialManager
from vertex_ai_init import init_vertex_ai
# Routers
from routes import models_api
from routes import chat_api
# import config as app_config # Not directly used in main.py
app = FastAPI(title="OpenAI to Gemini Adapter")
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
credential_manager = CredentialManager()
app.state.credential_manager = credential_manager # Store manager on app state
# Include API routers
app.include_router(models_api.router)
app.include_router(chat_api.router)
@app.on_event("startup")
async def startup_event():
if await init_vertex_ai(credential_manager): # Added await
print("INFO: Vertex AI credential and model config initialization check completed successfully.")
else:
print("ERROR: Failed to initialize a fallback Vertex AI client. API will likely fail.")
@app.get("/")
async def root():
return {
"status": "ok",
"message": "OpenAI to Gemini Adapter is running."
}
|