Spaces:
Running
Running
Delete inference_test.py
Browse files- inference_test.py +0 -133
inference_test.py
DELETED
@@ -1,133 +0,0 @@
|
|
1 |
-
import os, threading, uvicorn, time, traceback, random, json, asyncio, uuid
|
2 |
-
from fastapi import FastAPI, Request
|
3 |
-
from fastapi.responses import HTMLResponse, JSONResponse
|
4 |
-
from transformers import AutoTokenizer, AutoModelForSequenceClassification
|
5 |
-
import intent_test_runner
|
6 |
-
from service_config import ServiceConfig
|
7 |
-
import intent, intent, llm_model
|
8 |
-
from log import log
|
9 |
-
from chat_handler_debug import handle_chat
|
10 |
-
from llm_model import get_model, get_tokenizer
|
11 |
-
|
12 |
-
s_config = ServiceConfig()
|
13 |
-
s_config.setup_environment()
|
14 |
-
|
15 |
-
# === FastAPI
|
16 |
-
app = FastAPI()
|
17 |
-
chat_history = []
|
18 |
-
|
19 |
-
@app.get("/")
|
20 |
-
def health():
|
21 |
-
return {"status": "ok"}
|
22 |
-
|
23 |
-
import uuid # yukarıda zaten eklendiğini varsayıyoruz
|
24 |
-
|
25 |
-
@app.post("/run_tests", status_code=202)
|
26 |
-
def run_tests():
|
27 |
-
log("🚦 /run_tests çağrıldı. Testler başlatılıyor...")
|
28 |
-
threading.Thread(target=intent_test_runner.run_all_tests, daemon=True).start()
|
29 |
-
return {"status": "running", "message": "Test süreci başlatıldı."}
|
30 |
-
|
31 |
-
@app.get("/start", response_class=HTMLResponse)
|
32 |
-
def root():
|
33 |
-
# Yeni session ID üret
|
34 |
-
session_id = str(uuid.uuid4())
|
35 |
-
session_info = {
|
36 |
-
"session_id": session_id,
|
37 |
-
"variables": {},
|
38 |
-
"auth_tokens": {},
|
39 |
-
"last_intent": None,
|
40 |
-
"awaiting_variable": None
|
41 |
-
}
|
42 |
-
|
43 |
-
# Session store başlatıldıysa ekle
|
44 |
-
if not hasattr(app.state, "session_store"):
|
45 |
-
app.state.session_store = {}
|
46 |
-
app.state.session_store[session_id] = session_info
|
47 |
-
|
48 |
-
log(f"🌐 /start ile yeni session başlatıldı: {session_id}")
|
49 |
-
|
50 |
-
# HTML + session_id gömülü
|
51 |
-
return f"""
|
52 |
-
<html><body>
|
53 |
-
<h2>Turkcell LLM Chat</h2>
|
54 |
-
<textarea id='input' rows='4' cols='60'></textarea><br>
|
55 |
-
<button onclick='send()'>Gönder</button><br><br>
|
56 |
-
<label>Model Cevabı:</label><br>
|
57 |
-
<textarea id='output' rows='10' cols='80' readonly style='white-space: pre-wrap;'></textarea>
|
58 |
-
<script>
|
59 |
-
const sessionId = "{session_id}";
|
60 |
-
localStorage.setItem("session_id", sessionId);
|
61 |
-
async function send() {{
|
62 |
-
const input = document.getElementById("input").value;
|
63 |
-
const res = await fetch('/chat', {{
|
64 |
-
method: 'POST',
|
65 |
-
headers: {{
|
66 |
-
'Content-Type': 'application/json',
|
67 |
-
'X-Session-ID': sessionId
|
68 |
-
}},
|
69 |
-
body: JSON.stringify({{ user_input: input }})
|
70 |
-
}});
|
71 |
-
const data = await res.json();
|
72 |
-
document.getElementById('output').value = data.reply || data.response || data.error || 'Hata oluştu.';
|
73 |
-
}}
|
74 |
-
</script>
|
75 |
-
</body></html>
|
76 |
-
"""
|
77 |
-
|
78 |
-
@app.post("/start_chat")
|
79 |
-
def start_chat():
|
80 |
-
if get_model() is None or get_tokenizer() is None:
|
81 |
-
return {"error": "Model yüklenmedi."}
|
82 |
-
|
83 |
-
if not hasattr(app.state, "session_store"):
|
84 |
-
app.state.session_store = {}
|
85 |
-
|
86 |
-
session_id = str(uuid.uuid4())
|
87 |
-
session_info = {
|
88 |
-
"session_id": session_id,
|
89 |
-
"variables": {},
|
90 |
-
"auth_tokens": {},
|
91 |
-
"last_intent": None,
|
92 |
-
"awaiting_variable": None
|
93 |
-
}
|
94 |
-
app.state.session_store[session_id] = session_info
|
95 |
-
log(f"🆕 Yeni session başlatıldı: {session_id}")
|
96 |
-
return {"session_id": session_id}
|
97 |
-
|
98 |
-
@app.post("/train_intents", status_code=202)
|
99 |
-
def train_intents(train_input: intent.TrainInput):
|
100 |
-
log("📥 POST /train_intents çağrıldı.")
|
101 |
-
|
102 |
-
intents = train_input.intents
|
103 |
-
data_formats = getattr(train_input, "data_formats", [])
|
104 |
-
|
105 |
-
s_config.INTENT_DEFINITIONS = {intent["name"]: intent for intent in intents}
|
106 |
-
s_config.DATA_FORMATS = data_formats # 🔧 DATA_FORMATS burada set ediliyor
|
107 |
-
|
108 |
-
threading.Thread(
|
109 |
-
target=lambda: intent.background_training(intents, s_config),
|
110 |
-
daemon=True
|
111 |
-
).start()
|
112 |
-
|
113 |
-
return {"status": "accepted", "message": "Intent eğitimi arka planda başlatıldı."}
|
114 |
-
|
115 |
-
@app.post("/load_intent_model")
|
116 |
-
def load_intent_model():
|
117 |
-
try:
|
118 |
-
intent.INTENT_TOKENIZER = AutoTokenizer.from_pretrained(s_config.INTENT_MODEL_PATH)
|
119 |
-
intent.INTENT_MODEL = AutoModelForSequenceClassification.from_pretrained(s_config.INTENT_MODEL_PATH)
|
120 |
-
with open(os.path.join(s_config.INTENT_MODEL_PATH, "label2id.json")) as f:
|
121 |
-
intent.LABEL2ID = json.load(f)
|
122 |
-
return {"status": "ok", "message": "Intent modeli yüklendi."}
|
123 |
-
except Exception as e:
|
124 |
-
return JSONResponse(content={"error": str(e)}, status_code=500)
|
125 |
-
|
126 |
-
@app.post("/chat")
|
127 |
-
async def chat(msg: llm_model.Message, request: Request):
|
128 |
-
return await handle_chat(msg, request, app, s_config)
|
129 |
-
|
130 |
-
threading.Thread(target=llm_model.setup_model, kwargs={"s_config": s_config}, daemon=True).start()
|
131 |
-
threading.Thread(target=lambda: uvicorn.run(app, host="0.0.0.0", port=7860), daemon=True).start()
|
132 |
-
while True:
|
133 |
-
time.sleep(60)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|