File size: 630 Bytes
5a36579
b985953
3d98aa9
 
622f41b
b985953
3d98aa9
 
 
 
5a36579
 
3d98aa9
 
 
 
5a36579
622f41b
5a36579
3d98aa9
698aba0
622f41b
b985953
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
from transformers import AutoModelForSeq2SeqLM, AutoTokenizer

tokenizer = AutoTokenizer.from_pretrained("google/flan-t5-base")
model = AutoModelForSeq2SeqLM.from_pretrained("google/flan-t5-base")

def generate_answer(context, question):
    prompt = f"""
You are a helpful AI assistant.

Context:
{context}

Question: {question}

Answer as a helpful paragraph:"""

    inputs = tokenizer(prompt, return_tensors='pt', truncation=True, max_length=512)
    outputs = model.generate(
        **inputs,
        max_new_tokens=100,
        do_sample=False
    )
    return tokenizer.decode(outputs[0], skip_special_tokens=True).strip()