File size: 1,240 Bytes
00ea8bb
1c98e83
d7a4aba
1c98e83
 
 
 
d7a4aba
 
00ea8bb
5259900
d7a4aba
1c98e83
 
 
02a8541
d7a4aba
 
 
02a8541
 
 
1c98e83
02a8541
 
d7a4aba
02a8541
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
# generate.py
import torch
from transformers import AutoTokenizer
from evo_model import EvoDecoderModel

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

# Load tokenizer and model
tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased")
vocab_size = tokenizer.vocab_size

model = EvoDecoderModel(vocab_size=vocab_size).to(device)
model.load_state_dict(torch.load("evo_decoder_model.pt", map_location=device))
model.eval()

def generate_response(prompt, max_new_tokens=50, use_web=False):
    inputs = tokenizer(prompt, return_tensors="pt", truncation=True, max_length=128)
    input_ids = inputs["input_ids"].to(device)

    for _ in range(max_new_tokens):
        with torch.no_grad():
            logits = model(input_ids)

        next_token_logits = logits[:, -1, :]  # shape (B, vocab_size)
        next_token_id = torch.argmax(next_token_logits, dim=-1).unsqueeze(0)  # shape (1, 1)

        # Append to input
        input_ids = torch.cat([input_ids, next_token_id], dim=1)

        # Stop if EOS token
        if next_token_id.item() in tokenizer.all_special_ids:
            break

    output_text = tokenizer.decode(input_ids[0], skip_special_tokens=True)
    return output_text[len(prompt):].strip()