File size: 1,076 Bytes
8c2c140
 
9cf6e0a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
import torch

from transformers import AutoTokenizer, AutoModelForCausalLM

tokenizer = AutoTokenizer.from_pretrained("AventIQ-AI/pythia-410m-chatbot")
model = AutoModelForCausalLM.from_pretrained("AventIQ-AI/pythia-410m-chatbot")

tokenizer.pad_token = tokenizer.eos_token

def chat_with_model(model, tokenizer, question, max_length=256):
    """Generate response to a question"""
    input_text = question
    
    inputs = tokenizer(input_text, return_tensors="pt", padding=True, truncation=True, max_length=512)
    
    with torch.no_grad():
        outputs = model.generate(
            inputs["input_ids"],
            attention_mask=inputs["attention_mask"],  
            max_length=max_length,
            num_return_sequences=1,
            temperature=1.0,
            do_sample=True,  
            pad_token_id=tokenizer.pad_token_id
        )

    return tokenizer.decode(outputs[0], skip_special_tokens=True)

# Example usage
test_question = "What is the capital of France?"
response = chat_with_model(model, tokenizer, test_question)
print("Answer", response)