Update fine_tune_inference_test.py
Browse files
fine_tune_inference_test.py
CHANGED
@@ -106,7 +106,12 @@ def chat(msg: Message):
|
|
106 |
answer = generated_text[len(full_prompt):].strip()
|
107 |
|
108 |
if output.scores and len(output.scores) > 0:
|
109 |
-
first_token_logit = output.scores[0][0]
|
|
|
|
|
|
|
|
|
|
|
110 |
top_logit_score = torch.max(first_token_logit).item()
|
111 |
log(f"🔎 İlk token logit skoru: {top_logit_score:.4f}")
|
112 |
|
@@ -174,4 +179,4 @@ while True:
|
|
174 |
import time
|
175 |
time.sleep(60)
|
176 |
except Exception as e:
|
177 |
-
log(f"❌ Ana bekleme döngüsünde hata: {e}")
|
|
|
106 |
answer = generated_text[len(full_prompt):].strip()
|
107 |
|
108 |
if output.scores and len(output.scores) > 0:
|
109 |
+
first_token_logit = output.scores[0][0]
|
110 |
+
if torch.isnan(first_token_logit).any() or torch.isinf(first_token_logit).any():
|
111 |
+
log("⚠️ Geçersiz logit (NaN/Inf) tespit edildi, fallback cevabı gönderiliyor.")
|
112 |
+
fallback = random.choice(FALLBACK_ANSWERS)
|
113 |
+
answer = fallback
|
114 |
+
return {"answer": answer, "chat_history": chat_history} # ilk tokenin logits
|
115 |
top_logit_score = torch.max(first_token_logit).item()
|
116 |
log(f"🔎 İlk token logit skoru: {top_logit_score:.4f}")
|
117 |
|
|
|
179 |
import time
|
180 |
time.sleep(60)
|
181 |
except Exception as e:
|
182 |
+
log(f"❌ Ana bekleme döngüsünde hata: {e}")
|