Spaces:
Sleeping
Sleeping
Create rag.py
Browse files- utils/rag.py +57 -0
utils/rag.py
ADDED
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from langchain_core.vectorstores import VectorStore
|
2 |
+
from langchain_core.retrievers import BaseRetriever
|
3 |
+
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
4 |
+
from langchain.vectorstores import Milvus
|
5 |
+
from langchain.retrievers import MultiVectorRetriever
|
6 |
+
from langchain.storage import InMemoryStore
|
7 |
+
import numpy as np
|
8 |
+
|
9 |
+
def query_pipeline(question, texts, tables, pictures, embeddings_model, llm_model):
|
10 |
+
"""
|
11 |
+
Process a question through the RAG pipeline.
|
12 |
+
|
13 |
+
Args:
|
14 |
+
question: The user's question
|
15 |
+
texts: List of text documents
|
16 |
+
tables: List of table documents
|
17 |
+
pictures: List of image description documents
|
18 |
+
embeddings_model: Model for generating embeddings
|
19 |
+
llm_model: LLM for generating answers
|
20 |
+
|
21 |
+
Returns:
|
22 |
+
str: The generated answer
|
23 |
+
"""
|
24 |
+
# Combine all document types
|
25 |
+
all_docs = texts + tables + pictures
|
26 |
+
|
27 |
+
# Create vector store and retriever
|
28 |
+
vectorstore = Milvus.from_documents(
|
29 |
+
all_docs,
|
30 |
+
embeddings_model,
|
31 |
+
connection_args={"host": "127.0.0.1", "port": "19530"}
|
32 |
+
)
|
33 |
+
|
34 |
+
retriever = vectorstore.as_retriever()
|
35 |
+
|
36 |
+
# Retrieve relevant documents
|
37 |
+
relevant_docs = retriever.get_relevant_documents(question)
|
38 |
+
|
39 |
+
# Format context for LLM
|
40 |
+
context = "\n\n".join([doc.page_content for doc in relevant_docs])
|
41 |
+
|
42 |
+
# Generate answer
|
43 |
+
prompt = f"""
|
44 |
+
You are an AI assistant answering questions based on the provided context.
|
45 |
+
Use only the information from the context to answer the question.
|
46 |
+
If you don't know the answer, say "I don't know".
|
47 |
+
|
48 |
+
Context:
|
49 |
+
{context}
|
50 |
+
|
51 |
+
Question: {question}
|
52 |
+
Answer:
|
53 |
+
"""
|
54 |
+
|
55 |
+
response = llm_model.invoke(prompt)
|
56 |
+
|
57 |
+
return response.content
|