Update app.py
Browse files
app.py
CHANGED
@@ -39,25 +39,25 @@ def predict_relevance(question, answer):
|
|
39 |
return "Irrelevant"
|
40 |
|
41 |
# confidence analysis
|
42 |
-
def predict_confidence(question, answer):
|
43 |
-
if not answer.strip():
|
44 |
return "Not Confident"
|
45 |
|
|
|
46 |
inputs = confidence_tokenizer(question, answer, return_tensors="pt", padding=True, truncation=True)
|
|
|
|
|
47 |
confidence_model.eval()
|
48 |
|
49 |
with torch.no_grad():
|
50 |
outputs = confidence_model(**inputs)
|
51 |
logits = outputs.logits
|
52 |
probabilities = torch.softmax(logits, dim=-1)
|
53 |
-
|
54 |
-
print("Logits:", logits)
|
55 |
-
print("Probabilities:", probabilities)
|
56 |
-
|
57 |
-
confidence_prob = probabilities[0, 1] # Probability for Confident class
|
58 |
|
59 |
-
#
|
60 |
-
|
|
|
|
|
61 |
|
62 |
# Questions from questions.py
|
63 |
def fetch_questions():
|
|
|
39 |
return "Irrelevant"
|
40 |
|
41 |
# confidence analysis
|
42 |
+
def predict_confidence(question, answer, threshold=0.4):
|
43 |
+
if not isinstance(answer, str) or not answer.strip():
|
44 |
return "Not Confident"
|
45 |
|
46 |
+
# Tokenize input
|
47 |
inputs = confidence_tokenizer(question, answer, return_tensors="pt", padding=True, truncation=True)
|
48 |
+
|
49 |
+
# Set model to evaluation mode
|
50 |
confidence_model.eval()
|
51 |
|
52 |
with torch.no_grad():
|
53 |
outputs = confidence_model(**inputs)
|
54 |
logits = outputs.logits
|
55 |
probabilities = torch.softmax(logits, dim=-1)
|
|
|
|
|
|
|
|
|
|
|
56 |
|
57 |
+
confidence_prob = probabilities[0, 1].item() # Extract probability for "Confident" class
|
58 |
+
|
59 |
+
|
60 |
+
return "Confident" if confidence_prob > threshold else "Not Confident"
|
61 |
|
62 |
# Questions from questions.py
|
63 |
def fetch_questions():
|