Scaper_search / llm.py
gaur3009's picture
Update llm.py
3be11bc verified
raw
history blame
954 Bytes
from transformers import pipeline
import torch
qa_pipeline = pipeline(
"text2text-generation",
model="google/flan-t5-base",
device=device
)
def generate_answer(context, question):
# Handle empty context
if context == "No relevant context found.":
prompt = f"""
You are a helpful AI assistant. Answer the question based on your general knowledge.
Question: {question}
Answer as a helpful paragraph:
"""
else:
prompt = f"""
You are a helpful AI assistant. Use the context to answer the question.
Context:
{context}
Question: {question}
Answer as a comprehensive paragraph with key details:
"""
result = qa_pipeline(
prompt,
max_length=400,
do_sample=True,
temperature=0.7,
top_p=0.9
)
return result[0]['generated_text'].strip()