File size: 1,150 Bytes
854864a
4e96bf5
b530936
4e96bf5
b530936
f02261f
b530936
4e96bf5
 
854864a
 
 
 
 
 
 
4e96bf5
854864a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f02261f
854864a
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
import torch
from transformers import AutoTokenizer
from evo_model import EvoTransformerForClassification

# Load tokenizer and model
tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased")
model = EvoTransformerForClassification.from_pretrained("trained_model")
model.eval()

def generate_response(goal, option1, option2):
    prompt1 = f"Goal: {goal}\nOption: {option1}"
    prompt2 = f"Goal: {goal}\nOption: {option2}"

    inputs1 = tokenizer(prompt1, return_tensors="pt", padding=True, truncation=True)
    inputs2 = tokenizer(prompt2, return_tensors="pt", padding=True, truncation=True)

    with torch.no_grad():
        output1 = model(**inputs1)
        output2 = model(**inputs2)

    logits1 = output1["logits"]
    logits2 = output2["logits"]

    prob1 = torch.softmax(logits1, dim=1)[0][1].item()
    prob2 = torch.softmax(logits2, dim=1)[0][1].item()

    if prob1 > prob2:
        suggestion = "✅ Option 1 is more aligned with the goal."
    elif prob2 > prob1:
        suggestion = "✅ Option 2 is more aligned with the goal."
    else:
        suggestion = "⚖️ Both options are equally likely."

    return suggestion