from transformers import pipeline qa_pipeline = pipeline( "text2text-generation", model="google/flan-t5-large", device=0 if torch.cuda.is_available() else -1 ) def generate_answer(context, question): prompt = f""" You are a helpful research assistant. Use the context to answer the question. Context: {context} Question: {question} Answer in a comprehensive paragraph with key details: """ result = qa_pipeline( prompt, max_length=512, do_sample=True, temperature=0.7, top_p=0.9 ) return result[0]['generated_text'].strip()