flare / app.py
ciyidogan's picture
Upload app.py
bb3c450 verified
raw
history blame
1.96 kB
from fastapi import FastAPI
from controllers import chat_controller, test_controller, admin_controller, health_controller
from core import service_config, session_store, llm_models, INTENT_MODELS
from llm_model import LLMModel
from log import log
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import json
import os
app = FastAPI()
app.include_router(health_controller.router)
app.include_router(chat_controller.router)
app.include_router(test_controller.router)
app.include_router(admin_controller.router)
def load_intent_model_on_startup(project_name, model_path):
log(f"🔧 Intent modeli yükleniyor: {project_name}{model_path}")
try:
tokenizer = AutoTokenizer.from_pretrained(model_path)
model = AutoModelForSequenceClassification.from_pretrained(model_path)
with open(os.path.join(model_path, "label2id.json")) as f:
label2id = json.load(f)
INTENT_MODELS[project_name] = {
"model": model,
"tokenizer": tokenizer,
"label2id": label2id
}
log(f"✅ Intent modeli yüklendi: {project_name}")
except Exception as e:
log(f"❌ Intent modeli yüklenemedi: {project_name}, Hata: {e}")
if __name__ == "__main__":
log("🌐 Servis başlatılıyor...")
service_config.load(is_reload=False)
for project_name in service_config.projects:
llm_config = service_config.get_project_llm_config(project_name)
model_instance = LLMModel()
model_instance.setup(service_config, llm_config)
llm_models[project_name] = model_instance
log(f"✅ '{project_name}' için LLM modeli yüklendi.")
intent_model_path = llm_config.get("intent_model_path")
if intent_model_path:
load_intent_model_on_startup(project_name, intent_model_path)
import uvicorn
uvicorn.run(app, host="0.0.0.0", port=7860)