File size: 5,026 Bytes
ada8cbb
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
__import__('pysqlite3')
import sys
sys.modules['sqlite3'] = sys.modules.pop('pysqlite3')

# DATABASES = {
#     'default': {
#         'ENGINE': 'django.db.backends.sqlite3',
#         'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
#     }
# }
import streamlit as st
from huggingface_hub import InferenceClient
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader, ServiceContext, PromptTemplate
from llama_index.vector_stores.chroma import ChromaVectorStore
from llama_index.core import StorageContext
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.text_splitter import CharacterTextSplitter
from langchain.vectorstores import Chroma
import chromadb
from langchain.memory import ConversationBufferMemory



# Set page config
st.set_page_config(page_title="RAG Chatbot", page_icon="πŸ€–", layout="wide")

# Set your Hugging Face token here

HF_TOKEN = st.secrets["HF_TOKEN"]

# Initialize your models, databases, and other components here
@st.cache_resource
def init_chroma():
    persist_directory = "chroma_db"
    chroma_client = chromadb.PersistentClient(path=persist_directory)
    chroma_collection = chroma_client.get_or_create_collection("my_collection")
    return chroma_client, chroma_collection

@st.cache_resource
def init_vectorstore():
    persist_directory = "chroma_db"
    embeddings = HuggingFaceEmbeddings()
    vectorstore = Chroma(persist_directory=persist_directory, embedding_function=embeddings, collection_name="my_collection")
    return vectorstore

# Initialize components
client = InferenceClient("mistralai/Mistral-7B-Instruct-v0.3", token=HF_TOKEN)
chroma_client, chroma_collection = init_chroma()
vectorstore = init_vectorstore()

# Initialize memory buffer
memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)

def rag_query(query):
    # Retrieve relevant documents using similarity search
    retrieved_docs = vectorstore.similarity_search(query, k=3)

    # Prepare context for LLaMA
    if retrieved_docs:
        context = "\n".join([doc.page_content for doc in retrieved_docs])
    else:
        context = ""

    # Append new interaction to memory
    memory.chat_memory.add_user_message(query)

    # Retrieve past interactions for context
    past_interactions = memory.load_memory_variables({})[memory.memory_key]
    context_with_memory = f"{context}\n\nConversation History:\n{past_interactions}"

    # Debugging: Display context and past interactions
    # st.write("Debugging Info:")
    # st.write("Context Sent to Model:", context_with_memory)
    # st.write("Retrieved Documents:", [doc.page_content for doc in retrieved_docs])
    # st.write("Past Interactions:", past_interactions)

    # Generate response using LLaMA
    messages = [
        {"role": "user", "content": f"Context: {context_with_memory}\n\nQuestion: {query},it is not mandatory to use the context\n\nAnswer:"}
    ]

    # Get the response from the client
    response_content = client.chat_completion(messages=messages, max_tokens=500, stream=False)

    # Process the response content
    response = response_content.choices[0].message.content.split("Answer:")[-1].strip()

    # If the response is empty or very short, or if no relevant documents were found, use the LLM's default knowledge
    if not context or len(response.split()) < 35 or not retrieved_docs:
        messages = [{"role": "user", "content": query}]
        response_content = client.chat_completion(messages=messages, max_tokens=500, stream=False)
        response = response_content.choices[0].message.content

    # Append the response to memory
    memory.chat_memory.add_ai_message(response)

    return response

def process_feedback(query, response, feedback):
   # st.write(f"Feedback received: {'πŸ‘' if feedback else 'πŸ‘Ž'} for query: {query}")
    if feedback:
        # If thumbs up, store the response in memory buffer
        memory.chat_memory.add_ai_message(response)
    else:
        # If thumbs down, remove the response from memory buffer and regenerate the response
       # memory.chat_memory.messages = [msg for msg in memory.chat_memory.messages if msg.get("content") != response]
        new_query=f"{query}. Give better response"
        new_response = rag_query(new_query)
        st.markdown(new_response)
        memory.chat_memory.add_ai_message(new_response)

# Streamlit interface

st.title("Welcome to our RAG-Based Chatbot")
st.markdown("***")
st.info('''

        To use Our Mistral supported Chatbot, click Chat.

         

        To push data, click on Store Document.

        ''')

col1, col2 = st.columns(2)

with col1:
    chat = st.button("Chat")
    if chat:
        st.switch_page("pages/chatbot.py")

with col2:
    rag = st.button("Store Document")
    if rag:
        st.switch_page("pages/management.py")

st.markdown("<div style='text-align:center;'></div>", unsafe_allow_html=True)