File size: 5,242 Bytes
6734e84 d0e6741 2a16a45 6734e84 2a16a45 6734e84 0a5b12b d0e6741 6734e84 be00f5e 6734e84 0a5b12b 6734e84 9a3135b 6734e84 d0e6741 6734e84 2a16a45 b5c1d21 2a16a45 b5c1d21 cc493ed 2a16a45 cc493ed 2a16a45 cc493ed 6734e84 d0e6741 430e8dc cee75e6 430e8dc 9a3135b 430e8dc 584aa58 cee75e6 430e8dc 584aa58 430e8dc 2a16a45 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 |
import os
import threading
import uvicorn
from fastapi import FastAPI
from fastapi.responses import HTMLResponse, JSONResponse
from pydantic import BaseModel
from transformers import AutoTokenizer, AutoModelForCausalLM, TextGenerationPipeline
from datasets import load_dataset
from peft import PeftModel
import torch
from huggingface_hub import hf_hub_download
import zipfile
from datetime import datetime
# ✅ Zamanlı log fonksiyonu (flush destekli)
def log(message):
timestamp = datetime.now().strftime("%H:%M:%S")
print(f"[{timestamp}] {message}")
os.sys.stdout.flush()
# ✅ Sabitler
HF_TOKEN = os.environ.get("HF_TOKEN")
MODEL_BASE = "UcsTurkey/kanarya-750m-fixed"
FINE_TUNE_ZIP = "trained_model_002_005.zip"
FINE_TUNE_REPO = "UcsTurkey/trained-zips"
RAG_DATA_FILE = "merged_dataset_000_100.parquet"
RAG_DATA_REPO = "UcsTurkey/turkish-general-culture-tokenized"
USE_RAG = False # ✅ RAG kullanımını opsiyonel hale getiren sabit
app = FastAPI()
chat_history = []
pipe = None # global text-generation pipeline
class Message(BaseModel):
user_input: str
@app.get("/health")
def health():
return {"status": "ok"}
@app.get("/start", response_class=HTMLResponse)
def root():
return """
<html>
<head><title>Fine-Tune Chat</title></head>
<body>
<h2>📘 Fine-tune Chat Test</h2>
<textarea id=\"input\" rows=\"4\" cols=\"60\" placeholder=\"Bir şeyler yaz...\"></textarea><br><br>
<button onclick=\"send()\">Gönder</button>
<pre id=\"output\"></pre>
<script>
async function send() {
const input = document.getElementById(\"input\").value;
const res = await fetch("/chat", {
method: "POST",
headers: { "Content-Type": "application/json" },
body: JSON.stringify({ user_input: input })
});
const data = await res.json();
document.getElementById("output").innerText = data.answer || data.error || "Hata oluştu.";
}
</script>
</body>
</html>
"""
@app.post("/chat")
def chat(msg: Message):
try:
global pipe
if pipe is None:
log("🚫 Hata: Model henüz yüklenmedi.")
return {"error": "Model yüklenmedi. Lütfen birkaç saniye sonra tekrar deneyin."}
user_input = msg.user_input.strip()
if not user_input:
return {"error": "Boş giriş"}
full_prompt = ""
for turn in chat_history:
full_prompt += f"Kullanıcı: {turn['user']}\nAsistan: {turn['bot']}\n"
full_prompt += f"Kullanıcı: {user_input}\nAsistan:"
result = pipe(full_prompt, max_new_tokens=200, do_sample=True, temperature=0.7)
answer = result[0]["generated_text"][len(full_prompt):].strip()
chat_history.append({"user": user_input, "bot": answer})
log(f"🗨️ Soru: {user_input} → Yanıt: {answer[:60]}...")
return {"answer": answer, "chat_history": chat_history}
except Exception as e:
log(f"❌ /chat sırasında hata oluştu: {e}")
return {"error": str(e)}
def setup_model():
try:
global pipe
log("📦 Fine-tune zip indiriliyor...")
zip_path = hf_hub_download(
repo_id=FINE_TUNE_REPO,
filename=FINE_TUNE_ZIP,
repo_type="model",
token=HF_TOKEN
)
extract_dir = "/app/extracted"
os.makedirs(extract_dir, exist_ok=True)
with zipfile.ZipFile(zip_path, "r") as zip_ref:
zip_ref.extractall(extract_dir)
log("📂 Zip başarıyla açıldı.")
log("🔁 Tokenizer yükleniyor...")
tokenizer = AutoTokenizer.from_pretrained(os.path.join(extract_dir, "output"))
log("🧠 Base model indiriliyor...")
base_model = AutoModelForCausalLM.from_pretrained(
MODEL_BASE,
torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32
)
log("➕ LoRA adapter uygulanıyor...")
peft_model = PeftModel.from_pretrained(base_model, os.path.join(extract_dir, "output"))
if USE_RAG:
log("📚 RAG dataseti yükleniyor...")
rag = load_dataset(
RAG_DATA_REPO,
data_files=RAG_DATA_FILE,
split="train",
token=HF_TOKEN
)
log(f"🔍 RAG boyutu: {len(rag)}")
log("🚀 Pipeline oluşturuluyor...")
pipe = TextGenerationPipeline(
model=peft_model.model,
tokenizer=tokenizer,
torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
device=0 if torch.cuda.is_available() else -1
)
log("✅ Model ve pipeline başarıyla yüklendi.")
except Exception as e:
log(f"❌ setup_model() sırasında hata oluştu: {e}")
# ✅ Uygulama başlangıcı
threading.Thread(target=setup_model, daemon=True).start()
log("⌛ Model yükleniyor, istekler için hazır olunacak...")
while True:
try:
import time
time.sleep(60)
except Exception as e:
log(f"❌ Ana bekleme döngüsünde hata: {e}")
|