Spaces:
Paused
Paused
File size: 4,627 Bytes
e1fecdb 96453da 6d2594f e1fecdb 96453da 816d82f 96453da 816d82f 96453da 816d82f 96453da |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 |
from fastapi import APIRouter, Request
from core import service_config, llm_models
from llm_model import LLMModel
from intent_utils import background_training
from log import log
import json, os, shutil, threading
router = APIRouter()
@router.post("/reload_config")
async def reload_config(request: Request):
body = await request.json()
project_name = body.get("project_name")
new_config_data = body.get("service_config")
if not project_name or not new_config_data:
return {"error": "project_name ve service_config gereklidir."}
def background_reload():
try:
current_project = service_config.projects.get(project_name)
incoming_project = new_config_data.get("projects", {}).get(project_name)
if not incoming_project:
log(f"❌ '{project_name}' yeni config içinde bulunamadı, işlem durduruldu.")
return
project_path = f"/data/projects/{project_name}"
temp_path = f"/data/projects/{project_name}_temp"
if os.path.exists(temp_path):
shutil.rmtree(temp_path)
os.makedirs(temp_path, exist_ok=True)
llm_config = incoming_project["llm"]
intents = incoming_project["intents"]
temp_instance = LLMModel()
# 🆕 Yeni proje ekleniyor
if current_project is None:
log(f"🆕 Yeni proje '{project_name}' tespit edildi, yükleme başlatılıyor...")
temp_instance.setup(service_config, llm_config, temp_path)
intent_model_path = os.path.join(temp_path, "intent", "trained_model")
background_training(
project_name,
intents,
llm_config["intent_model_id"],
intent_model_path,
llm_config["train_confidence_treshold"]
)
temp_instance.load_intent_model(intent_model_path)
if os.path.exists(project_path):
shutil.rmtree(project_path)
shutil.copytree(temp_path, project_path)
llm_models[project_name] = temp_instance
service_config.projects[project_name] = incoming_project
log(f"✅ Yeni proje '{project_name}' başarıyla yüklendi ve belleğe alındı.")
return
# 🔄 Var olan projede değişiklik varsa güncelle
if current_project == incoming_project:
log(f"ℹ️ '{project_name}' için değişiklik bulunamadı, işlem atlandı.")
return
log(f"🔄 '{project_name}' güncellemesi tespit edildi, güncelleme başlatılıyor...")
# Ana model değiştiyse yükle
if current_project["llm"]["model_base"] != llm_config["model_base"]:
temp_instance.setup(service_config, llm_config, temp_path)
else:
temp_instance.model = llm_models[project_name].model
temp_instance.tokenizer = llm_models[project_name].tokenizer
# Intent değiştiyse yeniden eğit
if current_project["intents"] != intents:
intent_model_path = os.path.join(temp_path, "intent", "trained_model")
background_training(
project_name,
intents,
llm_config["intent_model_id"],
intent_model_path,
llm_config["train_confidence_treshold"]
)
temp_instance.load_intent_model(intent_model_path)
else:
temp_instance.intent_model = llm_models[project_name].intent_model
temp_instance.intent_tokenizer = llm_models[project_name].intent_tokenizer
temp_instance.intent_label2id = llm_models[project_name].intent_label2id
if os.path.exists(project_path):
shutil.rmtree(project_path)
shutil.copytree(temp_path, project_path)
llm_models[project_name] = temp_instance
service_config.projects[project_name] = incoming_project
log(f"✅ '{project_name}' güncellemesi tamamlandı ve belleğe alındı.")
except Exception as e:
log(f"❌ reload_config background hatası: {e}")
# Arka planda başlat
threading.Thread(target=background_reload, daemon=True).start()
return {
"status": "accepted",
"message": f"'{project_name}' için güncelleme arka planda başlatıldı. İşlem loglardan takip edilebilir."
}
|