MBAL_chatbot / app.py
ngcanh's picture
Update app.py
64806bd verified
raw
history blame
6.45 kB
__import__('pysqlite3')
import sys
sys.modules['sqlite3'] = sys.modules.pop('pysqlite3')
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
# }
# }
import streamlit as st
from huggingface_hub import InferenceClient
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader, ServiceContext, PromptTemplate
from llama_index.vector_stores.chroma import ChromaVectorStore
from llama_index.core import StorageContext
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.text_splitter import CharacterTextSplitter
from langchain.vectorstores import Chroma
import chromadb
from langchain.memory import ConversationBufferMemory
# Set page config
st.set_page_config(page_title="MBAL Chatbot", page_icon="🛡️", layout="wide")
# Set your Hugging Face token here
HF_TOKEN = st.secrets["HF_TOKEN"]
# Initialize your models, databases, and other components here
# @st.cache_resource
# def init_chroma():
# persist_directory = "chroma_db"
# chroma_client = chromadb.PersistentClient(path=persist_directory)
# chroma_collection = chroma_client.get_or_create_collection("my_collection")
# return chroma_client, chroma_collection
# @st.cache_resource
# def init_vectorstore():
# persist_directory = "chroma_db"
# embeddings = HuggingFaceEmbeddings()
# vectorstore = Chroma(persist_directory=persist_directory, embedding_function=embeddings, collection_name="my_collection")
# return vectorstore
@st.cache_resource
def setup_vector():
# Đọc dữ liệu từ file Excel
df = pd.read_excel("chunk_metadata_template.xlsx")
chunks = []
# Tạo danh sách các Document có metadata
for _, row in df.iterrows():
chunk_with_metadata = Document(
page_content=row['page_content'],
metadata={
'chunk_id': row['chunk_id'],
'document_title': row['document_title'],
'topic': row['topic'],
'access': row['access']
}
)
chunks.append(chunk_with_metadata)
# Khởi tạo embedding
embeddings = HuggingFaceEmbeddings(model_name='sentence-transformers/all-MiniLM-L6-v2')
# Khởi tạo hoặc ghi vào vectorstore đã tồn tại
persist_directory = "chroma_db"
collection_name = "my_collection"
# Tạo vectorstore từ dữ liệu và ghi vào Chroma
vectorstore = Chroma.from_documents(
documents=chunks,
embedding=embeddings,
persist_directory=persist_directory,
collection_name=collection_name
)
# Ghi xuống đĩa để đảm bảo dữ liệu được lưu
vectorstore.persist()
return vectorstore
# Initialize components
client = InferenceClient("mistralai/Mistral-7B-Instruct-v0.3", token=HF_TOKEN)
chroma_client, chroma_collection = init_chroma()
vectorstore = init_vectorstore()
# Initialize memory buffer
memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
def rag_query(query):
# Retrieve relevant documents using similarity search
retrieved_docs = vectorstore.similarity_search(query, k=3)
# Prepare context for LLaMA
if retrieved_docs:
context = "\n".join([doc.page_content for doc in retrieved_docs])
else:
context = ""
# Append new interaction to memory
memory.chat_memory.add_user_message(query)
# Retrieve past interactions for context
past_interactions = memory.load_memory_variables({})[memory.memory_key]
context_with_memory = f"{context}\n\nConversation History:\n{past_interactions}"
# Debugging: Display context and past interactions
# st.write("Debugging Info:")
# st.write("Context Sent to Model:", context_with_memory)
# st.write("Retrieved Documents:", [doc.page_content for doc in retrieved_docs])
# st.write("Past Interactions:", past_interactions)
# Generate response using LLaMA
messages = [
{"role": "user", "content": f"Bạn là một chuyên viên tư vấn cho khách hàng về sản phẩm bảo hiểm của công ty MB Ageas Life tại Việt Nam.
Hãy trả lời chuyên nghiệp, chính xác, cung cấp thông tin trước rồi hỏi câu tiếp theo. Tất cả các thông tin cung cấp đều trong phạm vi MBAL. Khi có đủ thông tin khách hàng thì mới mời khách hàng đăng ký để nhận tư vấn trên https://www.mbageas.life/
{context_with_memory} \nCâu hỏi: {query} \nTrả lời:"}
]
# Get the response from the client
response_content = client.chat_completion(messages=messages, max_tokens=500, stream=False)
# Process the response content
response = response_content.choices[0].message.content.split("Answer:")[-1].strip()
# If the response is empty or very short, or if no relevant documents were found, use the LLM's default knowledge
if not context or len(response.split()) < 35 or not retrieved_docs:
messages = [{"role": "user", "content": query}]
response_content = client.chat_completion(messages=messages, max_tokens=500, stream=False)
response = response_content.choices[0].message.content
# Append the response to memory
memory.chat_memory.add_ai_message(response)
return response
def process_feedback(query, response, feedback):
# st.write(f"Feedback received: {'👍' if feedback else '👎'} for query: {query}")
if feedback:
# If thumbs up, store the response in memory buffer
memory.chat_memory.add_ai_message(response)
else:
# If thumbs down, remove the response from memory buffer and regenerate the response
# memory.chat_memory.messages = [msg for msg in memory.chat_memory.messages if msg.get("content") != response]
new_query=f"{query}. Give better response"
new_response = rag_query(new_query)
st.markdown(new_response)
memory.chat_memory.add_ai_message(new_response)
# Streamlit interface
st.title("Welcome to our RAG-Based Chatbot")
st.markdown("***")
st.info('''
To use Our Mistral supported Chatbot, click Chat.
To push data, click on Store Document.
''')
col1, col2 = st.columns(2)
with col1:
chat = st.button("Chat")
if chat:
st.switch_page("pages/chatbot.py")
st.markdown("<div style='text-align:center;'></div>", unsafe_allow_html=True)