Spaces:
Paused
Paused
| import os, threading, uvicorn, time, traceback, random, json, asyncio, uuid | |
| from fastapi import FastAPI, Request | |
| from fastapi.responses import HTMLResponse, JSONResponse | |
| from transformers import AutoTokenizer, AutoModelForSequenceClassification | |
| import intent_test_runner | |
| from service_config import ServiceConfig | |
| import intent, intent, llm_model | |
| from log import log | |
| from chat_handler_debug import handle_chat | |
| s_config = ServiceConfig() | |
| s_config.setup_environment() | |
| # === FastAPI | |
| app = FastAPI() | |
| chat_history = [] | |
| def health(): | |
| return {"status": "ok"} | |
| import uuid # yukarıda zaten eklendiğini varsayıyoruz | |
| def run_tests(): | |
| log("🚦 /run_tests çağrıldı. Testler başlatılıyor...") | |
| threading.Thread(target=intent_test_runner.run_all_tests, daemon=True).start() | |
| return {"status": "running", "message": "Test süreci başlatıldı."} | |
| def root(): | |
| # Yeni session ID üret | |
| session_id = str(uuid.uuid4()) | |
| session_info = { | |
| "session_id": session_id, | |
| "variables": {}, | |
| "auth_tokens": {}, | |
| "last_intent": None, | |
| "awaiting_variable": None | |
| } | |
| # Session store başlatıldıysa ekle | |
| if not hasattr(app.state, "session_store"): | |
| app.state.session_store = {} | |
| app.state.session_store[session_id] = session_info | |
| log(f"🌐 /start ile yeni session başlatıldı: {session_id}") | |
| # HTML + session_id gömülü | |
| return f""" | |
| <html><body> | |
| <h2>Turkcell LLM Chat</h2> | |
| <textarea id='input' rows='4' cols='60'></textarea><br> | |
| <button onclick='send()'>Gönder</button><br><br> | |
| <label>Model Cevabı:</label><br> | |
| <textarea id='output' rows='10' cols='80' readonly style='white-space: pre-wrap;'></textarea> | |
| <script> | |
| const sessionId = "{session_id}"; | |
| localStorage.setItem("session_id", sessionId); | |
| async function send() {{ | |
| const input = document.getElementById("input").value; | |
| const res = await fetch('/chat', {{ | |
| method: 'POST', | |
| headers: {{ | |
| 'Content-Type': 'application/json', | |
| 'X-Session-ID': sessionId | |
| }}, | |
| body: JSON.stringify({{ user_input: input }}) | |
| }}); | |
| const data = await res.json(); | |
| document.getElementById('output').value = data.reply || data.response || data.error || 'Hata oluştu.'; | |
| }} | |
| </script> | |
| </body></html> | |
| """ | |
| def start_chat(): | |
| if llm_model.model is None or llm_model.tokenizer is None: | |
| return {"error": "Model yüklenmedi."} | |
| if not hasattr(app.state, "session_store"): | |
| app.state.session_store = {} | |
| session_id = str(uuid.uuid4()) | |
| session_info = { | |
| "session_id": session_id, | |
| "variables": {}, | |
| "auth_tokens": {}, | |
| "last_intent": None, | |
| "awaiting_variable": None | |
| } | |
| app.state.session_store[session_id] = session_info | |
| log(f"🆕 Yeni session başlatıldı: {session_id}") | |
| return {"session_id": session_id} | |
| def train_intents(train_input: intent.TrainInput): | |
| log("📥 POST /train_intents çağrıldı.") | |
| intents = train_input.intents | |
| s_config.INTENT_DEFINITIONS = {intent["name"]: intent for intent in intents} | |
| threading.Thread(target=lambda: intent.background_training(intents, s_config), daemon=True).start() | |
| return {"status": "accepted", "message": "Intent eğitimi arka planda başlatıldı."} | |
| def load_intent_model(): | |
| try: | |
| intent.INTENT_TOKENIZER = AutoTokenizer.from_pretrained(s_config.INTENT_MODEL_PATH) | |
| intent.INTENT_MODEL = AutoModelForSequenceClassification.from_pretrained(s_config.INTENT_MODEL_PATH) | |
| with open(os.path.join(s_config.INTENT_MODEL_PATH, "label2id.json")) as f: | |
| intent.LABEL2ID = json.load(f) | |
| return {"status": "ok", "message": "Intent modeli yüklendi."} | |
| except Exception as e: | |
| return JSONResponse(content={"error": str(e)}, status_code=500) | |
| async def chat(msg: llm_model.Message, request: Request): | |
| return await handle_chat(msg, request, app, s_config) | |
| threading.Thread(target=llm_model.setup_model, kwargs={"s_config": s_config}, daemon=True).start() | |
| threading.Thread(target=lambda: uvicorn.run(app, host="0.0.0.0", port=7860), daemon=True).start() | |
| while True: | |
| time.sleep(60) |