File size: 6,355 Bytes
6734e84
 
 
d0e6741
2a16a45
6734e84
06fedb7
6734e84
0a5b12b
d0e6741
 
 
 
06fedb7
d0e6741
 
 
 
 
 
6734e84
 
 
 
be00f5e
6734e84
0a5b12b
6734e84
9a3135b
06fedb7
 
 
 
 
 
6734e84
 
 
06fedb7
 
6734e84
 
 
 
a5cd092
b5c1d21
2a16a45
 
b5c1d21
cc493ed
 
 
 
 
 
2a16a45
 
 
cc493ed
 
2a16a45
cc493ed
 
 
 
 
 
 
 
 
 
 
 
6734e84
 
 
d0e6741
3977e82
06fedb7
 
d0e6741
 
 
 
 
 
 
1f2f347
 
430e8dc
06fedb7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1a7401c
 
 
 
 
 
06fedb7
 
 
 
 
 
 
 
430e8dc
 
 
 
 
 
 
 
 
06fedb7
430e8dc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
cee75e6
430e8dc
06fedb7
 
584aa58
06fedb7
430e8dc
 
 
6e08bcb
 
 
 
430e8dc
 
6e08bcb
430e8dc
6e08bcb
430e8dc
 
 
 
 
1a7401c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
import os
import threading
import uvicorn
from fastapi import FastAPI
from fastapi.responses import HTMLResponse, JSONResponse
from pydantic import BaseModel
from transformers import AutoTokenizer, AutoModelForCausalLM
from datasets import load_dataset
from peft import PeftModel
import torch
from huggingface_hub import hf_hub_download
import zipfile
from datetime import datetime
import random

# ✅ Zamanlı log fonksiyonu (flush destekli)
def log(message):
    timestamp = datetime.now().strftime("%H:%M:%S")
    print(f"[{timestamp}] {message}")
    os.sys.stdout.flush()

# ✅ Sabitler
HF_TOKEN = os.environ.get("HF_TOKEN")
MODEL_BASE = "UcsTurkey/kanarya-750m-fixed"
FINE_TUNE_ZIP = "trained_model_002_005.zip"
FINE_TUNE_REPO = "UcsTurkey/trained-zips"
RAG_DATA_FILE = "merged_dataset_000_100.parquet"
RAG_DATA_REPO = "UcsTurkey/turkish-general-culture-tokenized"
USE_RAG = False  # ✅ RAG kullanımını opsiyonel hale getiren sabit
CONFIDENCE_THRESHOLD = -1.5  # ✅ Logit skorlarına göre eşik değeri
FALLBACK_ANSWERS = [
    "Bu konuda maalesef bilgim yok.",
    "Ne demek istediğinizi tam anlayamadım.",
    "Bu soruya şu an yanıt veremiyorum."
]

app = FastAPI()
chat_history = []
model = None
tokenizer = None

class Message(BaseModel):
    user_input: str

@app.get("/")
def health():
    return {"status": "ok"}

@app.get("/start", response_class=HTMLResponse)
def root():
    return """
    <html>
    <head><title>Fine-Tune Chat</title></head>
    <body>
        <h2>📘 Fine-tune Chat Test</h2>
        <textarea id=\"input\" rows=\"4\" cols=\"60\" placeholder=\"Bir şeyler yaz...\"></textarea><br><br>
        <button onclick=\"send()\">Gönder</button>
        <pre id=\"output\"></pre>
        <script>
            async function send() {
                const input = document.getElementById(\"input\").value;
                const res = await fetch("/chat", {
                    method: "POST",
                    headers: { "Content-Type": "application/json" },
                    body: JSON.stringify({ user_input: input })
                });
                const data = await res.json();
                document.getElementById("output").innerText = data.answer || data.error || "Hata oluştu.";
            }
        </script>
    </body>
    </html>
    """

@app.post("/chat")
def chat(msg: Message):
    try:
        log(f"📦 Kullanıcı mesajı alındı: {msg}")
        global model, tokenizer
        if model is None or tokenizer is None:
            log("🚫 Hata: Model henüz yüklenmedi.")
            return {"error": "Model yüklenmedi. Lütfen birkaç saniye sonra tekrar deneyin."}

        user_input = msg.user_input.strip()
        if not user_input:
            return {"error": "Boş giriş"}

        full_prompt = f"SORU: {user_input}\nCEVAP:"
        log(f"📨 Prompt: {full_prompt}")

        inputs = tokenizer(full_prompt, return_tensors="pt")
        inputs = {k: v.to(model.device) for k, v in inputs.items()}

        with torch.no_grad():
            output = model.generate(
                **inputs,
                max_new_tokens=200,
                do_sample=True,
                temperature=0.7,
                return_dict_in_generate=True,
                output_scores=True
            )

        generated_ids = output.sequences[0]
        generated_text = tokenizer.decode(generated_ids, skip_special_tokens=True)
        answer = generated_text[len(full_prompt):].strip()

        if output.scores and len(output.scores) > 0:
            first_token_logit = output.scores[0][0]
            if torch.isnan(first_token_logit).any() or torch.isinf(first_token_logit).any():
                log("⚠️ Geçersiz logit (NaN/Inf) tespit edildi, fallback cevabı gönderiliyor.")
                fallback = random.choice(FALLBACK_ANSWERS)
                answer = fallback
                return {"answer": answer, "chat_history": chat_history}  # ilk tokenin logits
            top_logit_score = torch.max(first_token_logit).item()
            log(f"🔎 İlk token logit skoru: {top_logit_score:.4f}")

            if top_logit_score < CONFIDENCE_THRESHOLD:
                fallback = random.choice(FALLBACK_ANSWERS)
                log(f"⚠️ Düşük güven: fallback cevabı gönderiliyor: {fallback}")
                answer = fallback

        chat_history.append({"user": user_input, "bot": answer})
        log(f"🗨️ Soru: {user_input} → Yanıt: {answer[:60]}...")
        return {"answer": answer, "chat_history": chat_history}
    except Exception as e:
        log(f"❌ /chat sırasında hata oluştu: {e}")
        return {"error": str(e)}

def setup_model():
    try:
        global model, tokenizer

        log("📦 Fine-tune zip indiriliyor...")
        zip_path = hf_hub_download(
            repo_id=FINE_TUNE_REPO,
            filename=FINE_TUNE_ZIP,
            repo_type="model",
            token=HF_TOKEN
        )

        extract_dir = "/app/extracted"
        os.makedirs(extract_dir, exist_ok=True)

        with zipfile.ZipFile(zip_path, "r") as zip_ref:
            zip_ref.extractall(extract_dir)
        log("📂 Zip başarıyla açıldı.")

        log("🔁 Tokenizer yükleniyor...")
        tokenizer = AutoTokenizer.from_pretrained(os.path.join(extract_dir, "output"))

        log("🧠 Base model indiriliyor...")
        base_model = AutoModelForCausalLM.from_pretrained(
            MODEL_BASE,
            torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32
        )

        log("➕ LoRA adapter uygulanıyor...")
        peft_model = PeftModel.from_pretrained(base_model, os.path.join(extract_dir, "output"))

        model = peft_model.model
        model.eval()

        log("✅ Model başarıyla yüklendi.")
    except Exception as e:
        log(f"❌ setup_model() sırasında hata oluştu: {e}")

def run_server():
    log("🚀 Uvicorn sunucusu başlatılıyor...")
    uvicorn.run(app, host="0.0.0.0", port=7860)

# ✅ Uygulama başlangıcı
threading.Thread(target=setup_model, daemon=True).start()
threading.Thread(target=run_server, daemon=True).start()

log("⌛ Model yükleniyor, istekler ve API sunucusu hazırlanıyor...")
while True:
    try:
        import time
        time.sleep(60)
    except Exception as e:
        log(f"❌ Ana bekleme döngüsünde hata: {e}")