Spaces:
Running
Running
import os | |
import threading | |
import uvicorn | |
from fastapi import FastAPI, Request, HTTPException | |
from fastapi.responses import JSONResponse | |
from service_config import ServiceConfig | |
from session import SessionStore | |
from llm_model import LLMModel, Message | |
from chat_handler import handle_chat | |
from log import log | |
# Global nesneler | |
service_config = ServiceConfig() | |
session_store = SessionStore() | |
llm_models = {} # project_name -> LLMModel instance | |
# FastAPI uygulaması | |
app = FastAPI() | |
def health(): | |
return {"status": "ok"} | |
def start_chat(request: Request): | |
project_name = request.query_params.get("project_name") | |
if not project_name: | |
raise HTTPException(status_code=400, detail="project_name parametresi gereklidir.") | |
if project_name not in service_config.projects: | |
raise HTTPException(status_code=400, detail="Geçersiz project_name değeri.") | |
session = session_store.create_session(project_name) | |
log(f"🆕 Yeni session başlatıldı: {session.session_id} (proje: {project_name})") | |
return {"session_id": session.session_id} | |
def reload_config(): | |
try: | |
service_config.load(is_reload=True) | |
log("🔁 Config reload sonrası tüm projeler için modeller yeniden yükleniyor...") | |
for project_name in service_config.projects: | |
llm_config = service_config.get_project_llm_config(project_name) | |
model_base = llm_config["model_base"] | |
model_instance = LLMModel() | |
model_instance.setup(model_base) | |
llm_models[project_name] = model_instance | |
log(f"✅ '{project_name}' için model yüklendi.") | |
return {"status": "ok", "message": "Konfigürasyon ve modeller yeniden yüklendi."} | |
except Exception as e: | |
return JSONResponse(content={"error": str(e)}, status_code=500) | |
async def chat(msg: Message, request: Request): | |
session_id = request.headers.get("X-Session-ID") | |
if not session_id: | |
return JSONResponse(content={"error": "Session ID eksik."}, status_code=400) | |
session = session_store.get_session(session_id) | |
if not session: | |
return JSONResponse(content={"error": "Geçersiz veya süresi dolmuş session."}, status_code=400) | |
project_name = session.project_name | |
llm_model = llm_models.get(project_name) | |
if llm_model is None: | |
return JSONResponse(content={"error": f"{project_name} için model yüklenmemiş."}, status_code=500) | |
return await handle_chat(msg, request, app, service_config, session, llm_model) | |
if __name__ == "__main__": | |
log("🌐 Servis başlatılıyor...") | |
service_config.load(is_reload=False) | |
for project_name in service_config.projects: | |
llm_config = service_config.get_project_llm_config(project_name) | |
model_base = llm_config["model_base"] | |
model_instance = LLMModel() | |
model_instance.setup(model_base) | |
llm_models[project_name] = model_instance | |
log(f"✅ '{project_name}' için model yüklendi.") | |
uvicorn.run(app, host="0.0.0.0", port=7860) | |