Spaces:
Sleeping
Sleeping
from sentence_transformers import SentenceTransformer | |
import faiss | |
import numpy as np | |
import pandas as pd | |
from transformers import pipeline | |
class RAGPipeline: | |
def __init__(self, dataset_path): | |
self.embedder = SentenceTransformer("sentence-transformers/all-MiniLM-L6-v2") | |
self.generator = pipeline("text2text-generation", model="google/flan-t5-base") | |
self.data = pd.read_csv(dataset_path) | |
self.documents = self.data['context'].tolist() | |
self.questions = self.data['question'].tolist() | |
self.index = self.build_faiss_index() | |
def build_faiss_index(self): | |
embeddings = self.embedder.encode(self.documents, convert_to_numpy=True) | |
index = faiss.IndexFlatL2(embeddings.shape[1]) | |
index.add(embeddings) | |
return index | |
def retrieve(self, query, top_k=5): | |
query_embedding = self.embedder.encode([query], convert_to_numpy=True) | |
scores, indices = self.index.search(query_embedding, top_k) | |
return [self.documents[i] for i in indices[0]] | |
def generate_answer(self, query): | |
docs = self.retrieve(query) | |
context = " ".join(docs) | |
prompt = f"Answer the following question using the provided context:\nContext: {context}\nQuestion: {query}" | |
result = self.generator(prompt, max_length=200, do_sample=True) | |
return result[0]['generated_text'] | |