Upload fine-tune inference test code
Browse files- Dockerfile +22 -0
- fine_tune_inference_test.py +80 -0
- requirements.txt +7 -0
Dockerfile
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
FROM python:3.10-slim
|
2 |
+
|
3 |
+
WORKDIR /app
|
4 |
+
|
5 |
+
# Sistem araçları
|
6 |
+
RUN apt-get update && apt-get install -y git unzip && apt-get clean
|
7 |
+
|
8 |
+
# Hugging Face cache klasörleri ve yazma izinleri
|
9 |
+
RUN mkdir -p /app/.cache /app/output /app/extracted && chmod -R 777 /app
|
10 |
+
|
11 |
+
ENV HF_HOME=/app/.cache \
|
12 |
+
HF_DATASETS_CACHE=/app/.cache \
|
13 |
+
HF_HUB_CACHE=/app/.cache \
|
14 |
+
TRANSFORMERS_CACHE=/app/.cache \
|
15 |
+
OUTPUT_DIR=/app/output
|
16 |
+
|
17 |
+
COPY requirements.txt ./
|
18 |
+
RUN pip install --upgrade pip && pip install -r requirements.txt
|
19 |
+
|
20 |
+
COPY fine_tune_inference_test.py ./
|
21 |
+
|
22 |
+
CMD ["python", "fine_tune_inference_test.py"]
|
fine_tune_inference_test.py
ADDED
@@ -0,0 +1,80 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import threading
|
3 |
+
import uvicorn
|
4 |
+
from fastapi import FastAPI, Request
|
5 |
+
from pydantic import BaseModel
|
6 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
|
7 |
+
from datasets import load_dataset
|
8 |
+
from fastapi.responses import JSONResponse
|
9 |
+
|
10 |
+
# ✅ Sabitler
|
11 |
+
HF_TOKEN = os.environ.get("HF_TOKEN")
|
12 |
+
MODEL_BASE = "UcsTurkey/kanarya-750m-fixed"
|
13 |
+
FINE_TUNE_ZIP = "trained_model_000_099.zip" # 👈 Değiştirilebilir
|
14 |
+
FINE_TUNE_REPO = "UcsTurkey/trained-zips"
|
15 |
+
RAG_DATA_FILE = "merged_dataset_000_099.parquet" # 👈 Değiştirilebilir
|
16 |
+
RAG_DATA_REPO = "UcsTurkey/turkish-general-culture-tokenized"
|
17 |
+
|
18 |
+
# ✅ FastAPI app
|
19 |
+
app = FastAPI()
|
20 |
+
chat_history = []
|
21 |
+
|
22 |
+
class Message(BaseModel):
|
23 |
+
user_input: str
|
24 |
+
|
25 |
+
@app.get("/")
|
26 |
+
def health():
|
27 |
+
return {"status": "ok"}
|
28 |
+
|
29 |
+
@app.post("/chat")
|
30 |
+
def chat(msg: Message):
|
31 |
+
user_input = msg.user_input.strip()
|
32 |
+
if not user_input:
|
33 |
+
return {"error": "Boş giriş"}
|
34 |
+
|
35 |
+
full_prompt = ""
|
36 |
+
for turn in chat_history:
|
37 |
+
full_prompt += f"Kullanıcı: {turn['user']}\nAsistan: {turn['bot']}\n"
|
38 |
+
full_prompt += f"Kullanıcı: {user_input}\nAsistan:"
|
39 |
+
|
40 |
+
result = pipe(full_prompt, max_new_tokens=200, do_sample=True, temperature=0.7)
|
41 |
+
answer = result[0]["generated_text"][len(full_prompt):].strip()
|
42 |
+
|
43 |
+
chat_history.append({"user": user_input, "bot": answer})
|
44 |
+
return {"answer": answer, "chat_history": chat_history}
|
45 |
+
|
46 |
+
|
47 |
+
# ✅ Model ve RAG yükleme
|
48 |
+
def setup_model():
|
49 |
+
global pipe
|
50 |
+
from huggingface_hub import hf_hub_download
|
51 |
+
import zipfile
|
52 |
+
|
53 |
+
print("📦 Fine-tune zip indiriliyor...")
|
54 |
+
zip_path = hf_hub_download(
|
55 |
+
repo_id=FINE_TUNE_REPO,
|
56 |
+
filename=FINE_TUNE_ZIP,
|
57 |
+
repo_type="model",
|
58 |
+
token=HF_TOKEN
|
59 |
+
)
|
60 |
+
extract_dir = "/app/extracted"
|
61 |
+
os.makedirs(extract_dir, exist_ok=True)
|
62 |
+
with zipfile.ZipFile(zip_path, "r") as zip_ref:
|
63 |
+
zip_ref.extractall(extract_dir)
|
64 |
+
|
65 |
+
print("🔁 Tokenizer ve model yükleniyor...")
|
66 |
+
tokenizer = AutoTokenizer.from_pretrained(os.path.join(extract_dir, "output"))
|
67 |
+
model = AutoModelForCausalLM.from_pretrained(os.path.join(extract_dir, "output"))
|
68 |
+
|
69 |
+
print("📚 RAG dataseti yükleniyor...")
|
70 |
+
rag = load_dataset(RAG_DATA_REPO, data_files=RAG_DATA_FILE, split="train", token=HF_TOKEN)
|
71 |
+
print(f"🔍 RAG boyutu: {len(rag)}")
|
72 |
+
|
73 |
+
pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
|
74 |
+
|
75 |
+
# ✅ Uygulama başladığında modeli yükle
|
76 |
+
threading.Thread(target=setup_model, daemon=True).start()
|
77 |
+
|
78 |
+
# 🧘 Eğitim sonrası uygulama restart olmasın diye bekleme
|
79 |
+
if __name__ == "__main__":
|
80 |
+
uvicorn.run(app, host="0.0.0.0", port=7860)
|
requirements.txt
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
fastapi
|
2 |
+
uvicorn
|
3 |
+
transformers
|
4 |
+
torch
|
5 |
+
huggingface_hub
|
6 |
+
datasets
|
7 |
+
pydantic
|