Spaces:
Sleeping
Sleeping
File size: 1,409 Bytes
9d1d210 913f5f9 9d1d210 913f5f9 9d1d210 913f5f9 9d1d210 913f5f9 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 |
from datasets import load_dataset
import pandas as pd
from sentence_transformers import SentenceTransformer
import faiss
from transformers import pipeline
class RAGPipeline:
def __init__(self):
self.embedder = SentenceTransformer("sentence-transformers/all-MiniLM-L6-v2")
self.generator = pipeline("text2text-generation", model="google/flan-t5-base")
# Load dataset directly
ds = load_dataset("pubmed_qa", "pqa_labeled", split="train[:500]")
self.documents = ds["context"]
self.questions = ds["question"]
self.index = self.build_faiss_index()
def build_faiss_index(self):
embeddings = self.embedder.encode(self.documents, convert_to_numpy=True)
index = faiss.IndexFlatL2(embeddings.shape[1])
index.add(embeddings)
return index
def retrieve(self, query, top_k=5):
query_embedding = self.embedder.encode([query], convert_to_numpy=True)
scores, indices = self.index.search(query_embedding, top_k)
return [self.documents[i] for i in indices[0]]
def generate_answer(self, query):
docs = self.retrieve(query)
context = " ".join(docs)
prompt = f"Answer the following medical question using the context:\nContext: {context}\nQuestion: {query}"
result = self.generator(prompt, max_length=200, do_sample=True)
return result[0]['generated_text']
|