Scaper_search / llm.py
gaur3009's picture
Update llm.py
3d98aa9 verified
raw
history blame
630 Bytes
from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained("google/flan-t5-base")
model = AutoModelForSeq2SeqLM.from_pretrained("google/flan-t5-base")
def generate_answer(context, question):
prompt = f"""
You are a helpful AI assistant.
Context:
{context}
Question: {question}
Answer as a helpful paragraph:"""
inputs = tokenizer(prompt, return_tensors='pt', truncation=True, max_length=512)
outputs = model.generate(
**inputs,
max_new_tokens=100,
do_sample=False
)
return tokenizer.decode(outputs[0], skip_special_tokens=True).strip()