studymate / rag_model.py
saranya19b's picture
Upload 5 files
3f7f9d9 verified
raw
history blame contribute delete
830 Bytes
from transformers import RagTokenizer, RagRetriever, RagTokenForGeneration
from transformers import pipeline
import torch
def setup_retriever_and_qa():
tokenizer = RagTokenizer.from_pretrained("facebook/rag-token-base")
retriever = RagRetriever.from_pretrained("facebook/rag-token-base", index_name="exact", use_dummy_dataset=True)
rag_model = RagTokenForGeneration.from_pretrained("facebook/rag-token-base")
qa_pipeline = pipeline("text2text-generation", model=rag_model, tokenizer=tokenizer, device=0 if torch.cuda.is_available() else -1)
return retriever, qa_pipeline
def get_answer(context: str, question: str, retriever, qa_pipeline):
input_text = f"question: {question} context: {context}"
result = qa_pipeline(input_text, max_length=200, do_sample=True)
return result[0]['generated_text']