ciyidogan commited on
Commit
f1a2f26
·
verified ·
1 Parent(s): 16134a9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +80 -80
app.py CHANGED
@@ -1,80 +1,80 @@
1
- import os
2
- import threading
3
- import uvicorn
4
- from fastapi import FastAPI, Request, HTTPException
5
- from fastapi.responses import JSONResponse
6
- from service_config import ServiceConfig
7
- from session import SessionStore
8
- from llm_model import LLMModel, Message
9
- from chat_handler import handle_chat
10
- from log import log
11
-
12
- # Global nesneler
13
- service_config = ServiceConfig()
14
- session_store = SessionStore()
15
- llm_models = {} # project_name -> LLMModel instance
16
-
17
- # FastAPI uygulaması
18
- app = FastAPI()
19
-
20
- @app.get("/")
21
- def health():
22
- return {"status": "ok"}
23
-
24
- @app.post("/start_chat")
25
- def start_chat(request: Request):
26
- project_name = request.query_params.get("project_name")
27
- if not project_name:
28
- raise HTTPException(status_code=400, detail="project_name parametresi gereklidir.")
29
-
30
- if project_name not in service_config.projects:
31
- raise HTTPException(status_code=400, detail="Geçersiz project_name değeri.")
32
-
33
- session = session_store.create_session(project_name)
34
- log(f"🆕 Yeni session başlatıldı: {session.session_id} (proje: {project_name})")
35
- return {"session_id": session.session_id}
36
-
37
- @app.post("/reload_config")
38
- def reload_config():
39
- try:
40
- service_config.load(is_reload=True)
41
- log("🔁 Config reload sonrası tüm projeler için modeller yeniden yükleniyor...")
42
- for project_name in service_config.projects:
43
- llm_config = service_config.get_project_llm_config(project_name)
44
- model_base = llm_config["model_base"]
45
- model_instance = LLMModel()
46
- model_instance.setup(model_base)
47
- llm_models[project_name] = model_instance
48
- log(f"✅ '{project_name}' için model yüklendi.")
49
- return {"status": "ok", "message": "Konfigürasyon ve modeller yeniden yüklendi."}
50
- except Exception as e:
51
- return JSONResponse(content={"error": str(e)}, status_code=500)
52
-
53
- @app.post("/chat")
54
- async def chat(msg: Message, request: Request):
55
- session_id = request.headers.get("X-Session-ID")
56
- if not session_id:
57
- return JSONResponse(content={"error": "Session ID eksik."}, status_code=400)
58
-
59
- session = session_store.get_session(session_id)
60
- if not session:
61
- return JSONResponse(content={"error": "Geçersiz veya süresi dolmuş session."}, status_code=400)
62
-
63
- project_name = session.project_name
64
- llm_model = llm_models.get(project_name)
65
- if llm_model is None:
66
- return JSONResponse(content={"error": f"{project_name} için model yüklenmemiş."}, status_code=500)
67
-
68
- return await handle_chat(msg, request, app, service_config, session, llm_model)
69
-
70
- if __name__ == "__main__":
71
- log("🌐 Servis başlatılıyor...")
72
- service_config.load(is_reload=False)
73
- for project_name in service_config.projects:
74
- llm_config = service_config.get_project_llm_config(project_name)
75
- model_base = llm_config["model_base"]
76
- model_instance = LLMModel()
77
- model_instance.setup(model_base)
78
- llm_models[project_name] = model_instance
79
- log(f"✅ '{project_name}' için model yüklendi.")
80
- uvicorn.run(app, host="0.0.0.0", port=7860)
 
1
+ import os
2
+ import threading
3
+ import uvicorn
4
+ from fastapi import FastAPI, Request, HTTPException
5
+ from fastapi.responses import JSONResponse
6
+ from service_config import ServiceConfig
7
+ from session import SessionStore
8
+ from llm_model import LLMModel, Message
9
+ from chat_handler import handle_chat
10
+ from log import log
11
+
12
+ # Global nesneler
13
+ service_config = ServiceConfig()
14
+ session_store = SessionStore()
15
+ llm_models = {} # project_name -> LLMModel instance
16
+
17
+ # FastAPI uygulaması
18
+ app = FastAPI()
19
+
20
+ @app.get("/")
21
+ def health():
22
+ return {"status": "ok"}
23
+
24
+ @app.post("/start_chat")
25
+ def start_chat(request: Request):
26
+ project_name = request.query_params.get("project_name")
27
+ if not project_name:
28
+ raise HTTPException(status_code=400, detail="project_name parametresi gereklidir.")
29
+
30
+ if project_name not in service_config.projects:
31
+ raise HTTPException(status_code=400, detail="Geçersiz project_name değeri.")
32
+
33
+ session = session_store.create_session(project_name)
34
+ log(f"🆕 Yeni session başlatıldı: {session.session_id} (proje: {project_name})")
35
+ return {"session_id": session.session_id}
36
+
37
+ @app.post("/reload_config")
38
+ def reload_config():
39
+ try:
40
+ service_config.load(is_reload=True)
41
+ log("🔁 Config reload sonrası tüm projeler için modeller yeniden yükleniyor...")
42
+ for project_name in service_config.projects:
43
+ llm_config = service_config.get_project_llm_config(project_name)
44
+ model_base = llm_config["model_base"]
45
+ model_instance = LLMModel()
46
+ model_instance.setup(service_config, llm_config)
47
+ llm_models[project_name] = model_instance
48
+ log(f"✅ '{project_name}' için model yüklendi.")
49
+ return {"status": "ok", "message": "Konfigürasyon ve modeller yeniden yüklendi."}
50
+ except Exception as e:
51
+ return JSONResponse(content={"error": str(e)}, status_code=500)
52
+
53
+ @app.post("/chat")
54
+ async def chat(msg: Message, request: Request):
55
+ session_id = request.headers.get("X-Session-ID")
56
+ if not session_id:
57
+ return JSONResponse(content={"error": "Session ID eksik."}, status_code=400)
58
+
59
+ session = session_store.get_session(session_id)
60
+ if not session:
61
+ return JSONResponse(content={"error": "Geçersiz veya süresi dolmuş session."}, status_code=400)
62
+
63
+ project_name = session.project_name
64
+ llm_model = llm_models.get(project_name)
65
+ if llm_model is None:
66
+ return JSONResponse(content={"error": f"{project_name} için model yüklenmemiş."}, status_code=500)
67
+
68
+ return await handle_chat(msg, request, app, service_config, session, llm_model)
69
+
70
+ if __name__ == "__main__":
71
+ log("🌐 Servis başlatılıyor...")
72
+ service_config.load(is_reload=False)
73
+ for project_name in service_config.projects:
74
+ llm_config = service_config.get_project_llm_config(project_name)
75
+ model_base = llm_config["model_base"]
76
+ model_instance = LLMModel()
77
+ model_instance.setup(service_config, llm_config)
78
+ llm_models[project_name] = model_instance
79
+ log(f"✅ '{project_name}' için model yüklendi.")
80
+ uvicorn.run(app, host="0.0.0.0", port=7860)