import os import threading import uvicorn from fastapi import FastAPI from fastapi.responses import HTMLResponse from pydantic import BaseModel from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline from datasets import load_dataset from peft import PeftModel import torch from huggingface_hub import hf_hub_download import zipfile from datetime import datetime # ✅ Zamanlı log fonksiyonu (flush destekli) def log(message): timestamp = datetime.now().strftime("%H:%M:%S") print(f"[{timestamp}] {message}") os.sys.stdout.flush() # ✅ Sabitler HF_TOKEN = os.environ.get("HF_TOKEN") MODEL_BASE = "UcsTurkey/kanarya-750m-fixed" FINE_TUNE_ZIP = "trained_model_000_100.zip" FINE_TUNE_REPO = "UcsTurkey/trained-zips" RAG_DATA_FILE = "merged_dataset_000_100.parquet" RAG_DATA_REPO = "UcsTurkey/turkish-general-culture-tokenized" app = FastAPI() chat_history = [] pipe = None # global text-generation pipeline class Message(BaseModel): user_input: str @app.get("/", response_class=HTMLResponse) def root(): return """ Fine-Tune Chat

📘 Fine-tune Chat Test




        
    
    
    """

@app.post("/chat")
def chat(msg: Message):
    try:
        global pipe
        if pipe is None:
            log("🚫 Hata: Model henüz yüklenmedi.")
            return {"error": "Model yüklenmedi. Lütfen birkaç saniye sonra tekrar deneyin."}

        user_input = msg.user_input.strip()
        if not user_input:
            return {"error": "Boş giriş"}

        full_prompt = ""
        for turn in chat_history:
            full_prompt += f"Kullanıcı: {turn['user']}\nAsistan: {turn['bot]()_