Siyuan0730 commited on
Commit
a79769a
·
1 Parent(s): 71782c8

小迭代,user_question

Browse files
Files changed (1) hide show
  1. app.py +19 -20
app.py CHANGED
@@ -336,26 +336,25 @@ def app():
336
  st.markdown(message["content"][0])
337
 
338
  #这里的session.state就是保存了这个对话会话的一些基本信息和设置
339
- if user_question:
340
- retrieved_chunks_for_user = searchVDB(user_question, st.session_state.embeddings_df, st.session_state.faiss_index)
341
- #retrieved_chunks_for_user = []
342
- prompt = decorate_user_question(user_question, retrieved_chunks_for_user)
343
- st.session_state.messages.append({"role": "user", "content": [user_question, prompt]})
344
- with st.chat_message("user"):
345
- st.markdown(user_question)
346
- # Display assistant response in chat message container
347
- with st.chat_message("assistant"):
348
- message_placeholder = st.empty()
349
- full_response = ""
350
- for response in openai.ChatCompletion.create(
351
- model=st.session_state["openai_model"],
352
- messages=[{"role": m["role"], "content": m["content"][1]} for m in st.session_state.messages],
353
- stream=True,
354
- ):
355
- full_response += response.choices[0].delta.get("content", "")
356
- message_placeholder.markdown(full_response + "▌")
357
- message_placeholder.markdown(full_response)
358
- st.session_state.messages.append({"role": "assistant", "content": [full_response,1]})
359
 
360
 
361
 
 
336
  st.markdown(message["content"][0])
337
 
338
  #这里的session.state就是保存了这个对话会话的一些基本信息和设置
339
+ retrieved_chunks_for_user = searchVDB(user_question, st.session_state.embeddings_df, st.session_state.faiss_index)
340
+ #retrieved_chunks_for_user = []
341
+ prompt = decorate_user_question(user_question, retrieved_chunks_for_user)
342
+ st.session_state.messages.append({"role": "user", "content": [user_question, prompt]})
343
+ with st.chat_message("user"):
344
+ st.markdown(user_question)
345
+ # Display assistant response in chat message container
346
+ with st.chat_message("assistant"):
347
+ message_placeholder = st.empty()
348
+ full_response = ""
349
+ for response in openai.ChatCompletion.create(
350
+ model=st.session_state["openai_model"],
351
+ messages=[{"role": m["role"], "content": m["content"][1]} for m in st.session_state.messages],
352
+ stream=True,
353
+ ):
354
+ full_response += response.choices[0].delta.get("content", "")
355
+ message_placeholder.markdown(full_response + "")
356
+ message_placeholder.markdown(full_response)
357
+ st.session_state.messages.append({"role": "assistant", "content": [full_response,1]})
 
358
 
359
 
360