MilanM commited on
Commit
c811446
·
verified ·
1 Parent(s): 163e457

Update neo_sages4.py

Browse files
Files changed (1) hide show
  1. neo_sages4.py +23 -7
neo_sages4.py CHANGED
@@ -268,13 +268,13 @@ def fetch_response(user_input, milvus_client, emb, vector_index_properties, vect
268
  )
269
  prompt_data = prompt_data.replace("__grounding__", grounding)
270
 
271
- # Add debug information to column 1 if enabled
272
- if genparam.INPUT_DEBUG_VIEW == 1:
273
- with col1: # Access first column
274
- bot_name = genparam.BOT_2_NAME if chat_history == st.session_state.chat_history_2 else genparam.BOT_3_NAME
275
- bot_avatar = genparam.BOT_2_AVATAR if chat_history == st.session_state.chat_history_2 else genparam.BOT_3_AVATAR
276
- st.markdown(f"**{bot_avatar} {bot_name} Prompt Data:**")
277
- st.code(prompt_data, language="text")
278
 
279
  # Continue with normal processing for columns 2 and 3
280
  watsonx_llm = ModelInference(
@@ -503,6 +503,14 @@ def main():
503
  system_prompt,
504
  st.session_state.chat_history_2
505
  )
 
 
 
 
 
 
 
 
506
  st.session_state.chat_history_2.append({"role": genparam.BOT_2_NAME, "content": response, "avatar": genparam.BOT_2_AVATAR})
507
  st.markdown("</div></div>", unsafe_allow_html=True)
508
 
@@ -534,6 +542,14 @@ def main():
534
  system_prompt,
535
  st.session_state.chat_history_3
536
  )
 
 
 
 
 
 
 
 
537
  st.session_state.chat_history_3.append({"role": genparam.BOT_3_NAME, "content": response, "avatar": genparam.BOT_3_AVATAR})
538
  st.markdown("</div></div>", unsafe_allow_html=True)
539
 
 
268
  )
269
  prompt_data = prompt_data.replace("__grounding__", grounding)
270
 
271
+ # # Add debug information to column 1 if enabled
272
+ # if genparam.INPUT_DEBUG_VIEW == 1:
273
+ # with col1: # Access first column
274
+ # bot_name = genparam.BOT_2_NAME if chat_history == st.session_state.chat_history_2 else genparam.BOT_3_NAME
275
+ # bot_avatar = genparam.BOT_2_AVATAR if chat_history == st.session_state.chat_history_2 else genparam.BOT_3_AVATAR
276
+ # st.markdown(f"**{bot_avatar} {bot_name} Prompt Data:**")
277
+ # st.code(prompt_data, language="text")
278
 
279
  # Continue with normal processing for columns 2 and 3
280
  watsonx_llm = ModelInference(
 
503
  system_prompt,
504
  st.session_state.chat_history_2
505
  )
506
+
507
+ if genparam.INPUT_DEBUG_VIEW == 1:
508
+ with col1: # Access first column
509
+ bot_name = genparam.BOT_2_NAME if chat_history == st.session_state.chat_history_2 else genparam.BOT_3_NAME
510
+ bot_avatar = genparam.BOT_2_AVATAR if chat_history == st.session_state.chat_history_2 else genparam.BOT_3_AVATAR
511
+ st.markdown(f"**{bot_avatar} {bot_name} Prompt Data:**")
512
+ st.code(prompt_data, language="text")
513
+
514
  st.session_state.chat_history_2.append({"role": genparam.BOT_2_NAME, "content": response, "avatar": genparam.BOT_2_AVATAR})
515
  st.markdown("</div></div>", unsafe_allow_html=True)
516
 
 
542
  system_prompt,
543
  st.session_state.chat_history_3
544
  )
545
+
546
+ if genparam.INPUT_DEBUG_VIEW == 1:
547
+ with col1: # Access first column
548
+ bot_name = genparam.BOT_2_NAME if chat_history == st.session_state.chat_history_2 else genparam.BOT_3_NAME
549
+ bot_avatar = genparam.BOT_2_AVATAR if chat_history == st.session_state.chat_history_2 else genparam.BOT_3_AVATAR
550
+ st.markdown(f"**{bot_avatar} {bot_name} Prompt Data:**")
551
+ st.code(prompt_data, language="text")
552
+
553
  st.session_state.chat_history_3.append({"role": genparam.BOT_3_NAME, "content": response, "avatar": genparam.BOT_3_AVATAR})
554
  st.markdown("</div></div>", unsafe_allow_html=True)
555