File size: 3,193 Bytes
16134a9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
import os
import threading
import uvicorn
from fastapi import FastAPI, Request, HTTPException
from fastapi.responses import JSONResponse
from service_config import ServiceConfig
from session import SessionStore
from llm_model import LLMModel, Message
from chat_handler import handle_chat
from log import log

# Global nesneler
service_config = ServiceConfig()
session_store = SessionStore()
llm_models = {}  # project_name -> LLMModel instance

# FastAPI uygulaması
app = FastAPI()

@app.get("/")
def health():
    return {"status": "ok"}

@app.post("/start_chat")
def start_chat(request: Request):
    project_name = request.query_params.get("project_name")
    if not project_name:
        raise HTTPException(status_code=400, detail="project_name parametresi gereklidir.")

    if project_name not in service_config.projects:
        raise HTTPException(status_code=400, detail="Geçersiz project_name değeri.")

    session = session_store.create_session(project_name)
    log(f"🆕 Yeni session başlatıldı: {session.session_id} (proje: {project_name})")
    return {"session_id": session.session_id}

@app.post("/reload_config")
def reload_config():
    try:
        service_config.load(is_reload=True)
        log("🔁 Config reload sonrası tüm projeler için modeller yeniden yükleniyor...")
        for project_name in service_config.projects:
            llm_config = service_config.get_project_llm_config(project_name)
            model_base = llm_config["model_base"]
            model_instance = LLMModel()
            model_instance.setup(model_base)
            llm_models[project_name] = model_instance
            log(f"✅ '{project_name}' için model yüklendi.")
        return {"status": "ok", "message": "Konfigürasyon ve modeller yeniden yüklendi."}
    except Exception as e:
        return JSONResponse(content={"error": str(e)}, status_code=500)

@app.post("/chat")
async def chat(msg: Message, request: Request):
    session_id = request.headers.get("X-Session-ID")
    if not session_id:
        return JSONResponse(content={"error": "Session ID eksik."}, status_code=400)

    session = session_store.get_session(session_id)
    if not session:
        return JSONResponse(content={"error": "Geçersiz veya süresi dolmuş session."}, status_code=400)

    project_name = session.project_name
    llm_model = llm_models.get(project_name)
    if llm_model is None:
        return JSONResponse(content={"error": f"{project_name} için model yüklenmemiş."}, status_code=500)

    return await handle_chat(msg, request, app, service_config, session, llm_model)

if __name__ == "__main__":
    log("🌐 Servis başlatılıyor...")
    service_config.load(is_reload=False)
    for project_name in service_config.projects:
        llm_config = service_config.get_project_llm_config(project_name)
        model_base = llm_config["model_base"]
        model_instance = LLMModel()
        model_instance.setup(model_base)
        llm_models[project_name] = model_instance
        log(f"✅ '{project_name}' için model yüklendi.")
    uvicorn.run(app, host="0.0.0.0", port=7860)