|
__import__('pysqlite3')
|
|
import sys
|
|
sys.modules['sqlite3'] = sys.modules.pop('pysqlite3')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import streamlit as st
|
|
from huggingface_hub import InferenceClient
|
|
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader, ServiceContext, PromptTemplate
|
|
from llama_index.vector_stores.chroma import ChromaVectorStore
|
|
from llama_index.core import StorageContext
|
|
from langchain.embeddings import HuggingFaceEmbeddings
|
|
from langchain.text_splitter import CharacterTextSplitter
|
|
from langchain.vectorstores import Chroma
|
|
import chromadb
|
|
from langchain.memory import ConversationBufferMemory
|
|
|
|
|
|
|
|
|
|
st.set_page_config(page_title="RAG Chatbot", page_icon="π€", layout="wide")
|
|
|
|
|
|
|
|
HF_TOKEN = st.secrets["HF_TOKEN"]
|
|
|
|
|
|
@st.cache_resource
|
|
def init_chroma():
|
|
persist_directory = "chroma_db"
|
|
chroma_client = chromadb.PersistentClient(path=persist_directory)
|
|
chroma_collection = chroma_client.get_or_create_collection("my_collection")
|
|
return chroma_client, chroma_collection
|
|
|
|
@st.cache_resource
|
|
def init_vectorstore():
|
|
persist_directory = "chroma_db"
|
|
embeddings = HuggingFaceEmbeddings()
|
|
vectorstore = Chroma(persist_directory=persist_directory, embedding_function=embeddings, collection_name="my_collection")
|
|
return vectorstore
|
|
|
|
|
|
client = InferenceClient("mistralai/Mistral-7B-Instruct-v0.3", token=HF_TOKEN)
|
|
chroma_client, chroma_collection = init_chroma()
|
|
vectorstore = init_vectorstore()
|
|
|
|
|
|
memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
|
|
|
|
def rag_query(query):
|
|
|
|
retrieved_docs = vectorstore.similarity_search(query, k=3)
|
|
|
|
|
|
if retrieved_docs:
|
|
context = "\n".join([doc.page_content for doc in retrieved_docs])
|
|
else:
|
|
context = ""
|
|
|
|
|
|
memory.chat_memory.add_user_message(query)
|
|
|
|
|
|
past_interactions = memory.load_memory_variables({})[memory.memory_key]
|
|
context_with_memory = f"{context}\n\nConversation History:\n{past_interactions}"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
messages = [
|
|
{"role": "user", "content": f"Context: {context_with_memory}\n\nQuestion: {query},it is not mandatory to use the context\n\nAnswer:"}
|
|
]
|
|
|
|
|
|
response_content = client.chat_completion(messages=messages, max_tokens=500, stream=False)
|
|
|
|
|
|
response = response_content.choices[0].message.content.split("Answer:")[-1].strip()
|
|
|
|
|
|
if not context or len(response.split()) < 35 or not retrieved_docs:
|
|
messages = [{"role": "user", "content": query}]
|
|
response_content = client.chat_completion(messages=messages, max_tokens=500, stream=False)
|
|
response = response_content.choices[0].message.content
|
|
|
|
|
|
memory.chat_memory.add_ai_message(response)
|
|
|
|
return response
|
|
|
|
def process_feedback(query, response, feedback):
|
|
|
|
if feedback:
|
|
|
|
memory.chat_memory.add_ai_message(response)
|
|
else:
|
|
|
|
|
|
new_query=f"{query}. Give better response"
|
|
new_response = rag_query(new_query)
|
|
st.markdown(new_response)
|
|
memory.chat_memory.add_ai_message(new_response)
|
|
|
|
|
|
|
|
st.title("Welcome to our RAG-Based Chatbot")
|
|
st.markdown("***")
|
|
st.info('''
|
|
To use Our Mistral supported Chatbot, click Chat.
|
|
|
|
To push data, click on Store Document.
|
|
''')
|
|
|
|
col1, col2 = st.columns(2)
|
|
|
|
with col1:
|
|
chat = st.button("Chat")
|
|
if chat:
|
|
st.switch_page("pages/chatbot.py")
|
|
|
|
with col2:
|
|
rag = st.button("Store Document")
|
|
if rag:
|
|
st.switch_page("pages/management.py")
|
|
|
|
st.markdown("<div style='text-align:center;'></div>", unsafe_allow_html=True) |