Update app.py
Browse files
app.py
CHANGED
|
@@ -1,3 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
| 1 |
|
| 2 |
#__import__('pysqlite3')
|
| 3 |
#import sys
|
|
@@ -66,7 +69,7 @@ print(len(docs))
|
|
| 66 |
def load_model():
|
| 67 |
#embeddings = HuggingFaceInstructEmbeddings(model_name="hkunlp/instructor-large",model_kwargs={"device":DEVICE})
|
| 68 |
embeddings = HuggingFaceInstructEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2",model_kwargs={"device":DEVICE})
|
| 69 |
-
|
| 70 |
|
| 71 |
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1024, chunk_overlap=256)
|
| 72 |
texts = text_splitter.split_documents(docs)
|
|
@@ -134,7 +137,7 @@ def load_model():
|
|
| 134 |
qa_chain = RetrievalQA.from_chain_type(
|
| 135 |
llm=llm,
|
| 136 |
chain_type="stuff",
|
| 137 |
-
retriever=db.as_retriever(search_kwargs={"k":
|
| 138 |
return_source_documents=True,
|
| 139 |
chain_type_kwargs={"prompt": prompt,
|
| 140 |
"verbose": False,
|
|
@@ -181,6 +184,8 @@ def get_message_history():
|
|
| 181 |
|
| 182 |
qa_chain = load_model()
|
| 183 |
|
|
|
|
|
|
|
| 184 |
if prompt := st.chat_input("How can I help you today?"):
|
| 185 |
st.session_state.messages.append({"role": "user", "content": prompt})
|
| 186 |
with st.chat_message("user"):
|
|
@@ -192,7 +197,11 @@ if prompt := st.chat_input("How can I help you today?"):
|
|
| 192 |
logger.info(f"{user_session_id} Message History: {message_history}")
|
| 193 |
# question = st.text_input("Ask your question", placeholder="Try to include context in your question",
|
| 194 |
# disabled=not uploaded_file,)
|
|
|
|
|
|
|
| 195 |
result = qa_chain(prompt)
|
|
|
|
|
|
|
| 196 |
sound_file = BytesIO()
|
| 197 |
tts = gTTS(result['result'], lang='en')
|
| 198 |
tts.write_to_fp(sound_file)
|
|
@@ -205,7 +214,8 @@ if prompt := st.chat_input("How can I help you today?"):
|
|
| 205 |
#st.write(repr(result['source_documents'][0].metadata['page']))
|
| 206 |
#st.write(repr(result['source_documents'][0]))
|
| 207 |
|
| 208 |
-
|
|
|
|
| 209 |
### READ IN PDF
|
| 210 |
page_number = int(result['source_documents'][0].metadata['page'])
|
| 211 |
doc = fitz.open(str(result['source_documents'][0].metadata['source']))
|
|
@@ -296,18 +306,18 @@ if prompt := st.chat_input("How can I help you today?"):
|
|
| 296 |
# """
|
| 297 |
# message_placeholder.markdown(html_string, unsafe_allow_html=True) # will display a st.audio with the sound you specified in the "src" of the html_string and autoplay it
|
| 298 |
# #time.sleep(5) # wait for 2 seconds to finish the playing of the audio
|
| 299 |
-
response_sentiment = st.radio(
|
| 300 |
-
|
| 301 |
-
|
| 302 |
-
|
| 303 |
-
|
| 304 |
-
|
| 305 |
-
|
| 306 |
-
|
| 307 |
-
|
| 308 |
-
|
| 309 |
-
)
|
| 310 |
-
logger.info(f"{user_session_id} | {full_response} | {response_sentiment}")
|
| 311 |
|
| 312 |
# # Logging to FastAPI Endpoint
|
| 313 |
# headers = {"Authorization": f"Bearer {secret_token}"}
|
|
|
|
| 1 |
+
import time
|
| 2 |
+
print('1')
|
| 3 |
+
print(time.time())
|
| 4 |
|
| 5 |
#__import__('pysqlite3')
|
| 6 |
#import sys
|
|
|
|
| 69 |
def load_model():
|
| 70 |
#embeddings = HuggingFaceInstructEmbeddings(model_name="hkunlp/instructor-large",model_kwargs={"device":DEVICE})
|
| 71 |
embeddings = HuggingFaceInstructEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2",model_kwargs={"device":DEVICE})
|
| 72 |
+
print(DEVICE)
|
| 73 |
|
| 74 |
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1024, chunk_overlap=256)
|
| 75 |
texts = text_splitter.split_documents(docs)
|
|
|
|
| 137 |
qa_chain = RetrievalQA.from_chain_type(
|
| 138 |
llm=llm,
|
| 139 |
chain_type="stuff",
|
| 140 |
+
retriever=db.as_retriever(search_kwargs={"k": 5}),
|
| 141 |
return_source_documents=True,
|
| 142 |
chain_type_kwargs={"prompt": prompt,
|
| 143 |
"verbose": False,
|
|
|
|
| 184 |
|
| 185 |
qa_chain = load_model()
|
| 186 |
|
| 187 |
+
print('2')
|
| 188 |
+
print(time.time())
|
| 189 |
if prompt := st.chat_input("How can I help you today?"):
|
| 190 |
st.session_state.messages.append({"role": "user", "content": prompt})
|
| 191 |
with st.chat_message("user"):
|
|
|
|
| 197 |
logger.info(f"{user_session_id} Message History: {message_history}")
|
| 198 |
# question = st.text_input("Ask your question", placeholder="Try to include context in your question",
|
| 199 |
# disabled=not uploaded_file,)
|
| 200 |
+
print('3')
|
| 201 |
+
print(time.time())
|
| 202 |
result = qa_chain(prompt)
|
| 203 |
+
print('4')
|
| 204 |
+
print(time.time())
|
| 205 |
sound_file = BytesIO()
|
| 206 |
tts = gTTS(result['result'], lang='en')
|
| 207 |
tts.write_to_fp(sound_file)
|
|
|
|
| 214 |
#st.write(repr(result['source_documents'][0].metadata['page']))
|
| 215 |
#st.write(repr(result['source_documents'][0]))
|
| 216 |
|
| 217 |
+
print('5')
|
| 218 |
+
print(time.time())
|
| 219 |
### READ IN PDF
|
| 220 |
page_number = int(result['source_documents'][0].metadata['page'])
|
| 221 |
doc = fitz.open(str(result['source_documents'][0].metadata['source']))
|
|
|
|
| 306 |
# """
|
| 307 |
# message_placeholder.markdown(html_string, unsafe_allow_html=True) # will display a st.audio with the sound you specified in the "src" of the html_string and autoplay it
|
| 308 |
# #time.sleep(5) # wait for 2 seconds to finish the playing of the audio
|
| 309 |
+
#response_sentiment = st.radio(
|
| 310 |
+
# "How was the Assistant's response?",
|
| 311 |
+
# ["π", "π", "π’"],
|
| 312 |
+
# key="response_sentiment",
|
| 313 |
+
# disabled=st.session_state.disabled,
|
| 314 |
+
# horizontal=True,
|
| 315 |
+
# index=1,
|
| 316 |
+
# help="This helps us improve the model.",
|
| 317 |
+
# # hide the radio button on click
|
| 318 |
+
# on_change=on_select(),
|
| 319 |
+
#)
|
| 320 |
+
#logger.info(f"{user_session_id} | {full_response} | {response_sentiment}")
|
| 321 |
|
| 322 |
# # Logging to FastAPI Endpoint
|
| 323 |
# headers = {"Authorization": f"Bearer {secret_token}"}
|