ngcanh commited on
Commit
9a4c66f
·
verified ·
1 Parent(s): 1c06151

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +21 -30
app.py CHANGED
@@ -2,12 +2,6 @@ __import__('pysqlite3')
2
  import sys
3
  sys.modules['sqlite3'] = sys.modules.pop('pysqlite3')
4
 
5
- # DATABASES = {
6
- # 'default': {
7
- # 'ENGINE': 'django.db.backends.sqlite3',
8
- # 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
9
- # }
10
- # }
11
  import streamlit as st
12
  from huggingface_hub import InferenceClient
13
  from llama_index.core import VectorStoreIndex, SimpleDirectoryReader, ServiceContext, PromptTemplate
@@ -119,7 +113,7 @@ def rag_query(query):
119
  ]
120
 
121
  # Get the response from the client
122
- response_content = client.chat_completion(messages=messages, max_tokens=500, stream=False)
123
 
124
  # Process the response content
125
  response = response_content.choices[0].message.content.split("Answer:")[-1].strip()
@@ -127,7 +121,7 @@ def rag_query(query):
127
  # If the response is empty or very short, or if no relevant documents were found, use the LLM's default knowledge
128
  if not context or len(response.split()) < 35 or not retrieved_docs:
129
  messages = [{"role": "user", "content": query}]
130
- response_content = client.chat_completion(messages=messages, max_tokens=500, stream=False)
131
  response = response_content.choices[0].message.content
132
 
133
  # Append the response to memory
@@ -135,35 +129,32 @@ def rag_query(query):
135
 
136
  return response
137
 
138
- def process_feedback(query, response, feedback):
139
- # st.write(f"Feedback received: {'👍' if feedback else '👎'} for query: {query}")
140
- if feedback:
141
- # If thumbs up, store the response in memory buffer
142
- memory.chat_memory.add_ai_message(response)
143
- else:
144
- # If thumbs down, remove the response from memory buffer and regenerate the response
145
- # memory.chat_memory.messages = [msg for msg in memory.chat_memory.messages if msg.get("content") != response]
146
- new_query=f"{query}. Give better response"
147
- new_response = rag_query(new_query)
148
- st.markdown(new_response)
149
- memory.chat_memory.add_ai_message(new_response)
150
 
151
  # Streamlit interface
152
 
153
- st.title("Welcome to our RAG-Based Chatbot")
154
  st.markdown("***")
155
  st.info('''
156
- To use Our Mistral supported Chatbot, click Chat.
157
-
158
- To push data, click on Store Document.
159
- ''')
160
 
161
- col1, col2 = st.columns(2)
162
 
163
- with col1:
164
- chat = st.button("Chat")
165
- if chat:
166
- st.switch_page("pages/chatbot.py")
167
 
168
 
169
  st.markdown("<div style='text-align:center;'></div>", unsafe_allow_html=True)
 
2
  import sys
3
  sys.modules['sqlite3'] = sys.modules.pop('pysqlite3')
4
 
 
 
 
 
 
 
5
  import streamlit as st
6
  from huggingface_hub import InferenceClient
7
  from llama_index.core import VectorStoreIndex, SimpleDirectoryReader, ServiceContext, PromptTemplate
 
113
  ]
114
 
115
  # Get the response from the client
116
+ response_content = client.chat_completion(messages=messages, max_tokens=1024, stream=False)
117
 
118
  # Process the response content
119
  response = response_content.choices[0].message.content.split("Answer:")[-1].strip()
 
121
  # If the response is empty or very short, or if no relevant documents were found, use the LLM's default knowledge
122
  if not context or len(response.split()) < 35 or not retrieved_docs:
123
  messages = [{"role": "user", "content": query}]
124
+ response_content = client.chat_completion(messages=messages, max_tokens=1024, stream=False)
125
  response = response_content.choices[0].message.content
126
 
127
  # Append the response to memory
 
129
 
130
  return response
131
 
132
+ # def process_feedback(query, response, feedback):
133
+ # # st.write(f"Feedback received: {'👍' if feedback else '👎'} for query: {query}")
134
+ # if feedback:
135
+ # # If thumbs up, store the response in memory buffer
136
+ # memory.chat_memory.add_ai_message(response)
137
+ # else:
138
+ # # If thumbs down, remove the response from memory buffer and regenerate the response
139
+ # # memory.chat_memory.messages = [msg for msg in memory.chat_memory.messages if msg.get("content") != response]
140
+ # new_query=f"{query}. Give better response"
141
+ # new_response = rag_query(new_query)
142
+ # st.markdown(new_response)
143
+ # memory.chat_memory.add_ai_message(new_response)
144
 
145
  # Streamlit interface
146
 
147
+ st.title("Chào mừng bạn đã đến với MBAL Chatbot")
148
  st.markdown("***")
149
  st.info('''
150
+ Tôi sẽ giải đáp các thắc mắc của bạn liên quan đến các sản phẩm bảo hiểm nhân thọ của MB Ageas Life''')
 
 
 
151
 
152
+ # col1, col2 = st.columns(2)
153
 
154
+ # with col1:
155
+ # chat = st.button("Chat")
156
+ # if chat:
157
+ st.switch_page("pages/chatbot.py")
158
 
159
 
160
  st.markdown("<div style='text-align:center;'></div>", unsafe_allow_html=True)