MilanM commited on
Commit
5f3ffea
·
verified ·
1 Parent(s): 41f2f58

Update neo_sages.py

Browse files
Files changed (1) hide show
  1. neo_sages.py +80 -32
neo_sages.py CHANGED
@@ -217,6 +217,7 @@ def generate_response(watsonx_llm, prompt_data, params):
217
  yield chunk
218
 
219
  def fetch_response(user_input, milvus_client, emb, vector_index_properties, vector_store_schema, system_prompt, chat_history):
 
220
  grounding = proximity_search(
221
  question=user_input,
222
  milvus_client=milvus_client,
@@ -224,17 +225,54 @@ def fetch_response(user_input, milvus_client, emb, vector_index_properties, vect
224
  vector_index_properties=vector_index_properties,
225
  vector_store_schema=vector_store_schema
226
  )
227
- prompt = prepare_prompt(user_input, chat_history)
228
 
229
- prompt_data = apply_prompt_syntax(
230
- prompt,
231
- system_prompt,
232
- get_active_prompt_template(),
233
- genparam.BAKE_IN_PROMPT_SYNTAX
234
- )
235
-
236
- prompt_data = prompt_data.replace("__grounding__", grounding)
237
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
238
  watsonx_llm = ModelInference(
239
  api_client=client,
240
  model_id=get_active_model(),
@@ -264,9 +302,11 @@ def fetch_response(user_input, milvus_client, emb, vector_index_properties, vect
264
  with st.chat_message(bot_name, avatar=bot_avatar):
265
  if genparam.TOKEN_CAPTURE_ENABLED:
266
  st.code(prompt_data, line_numbers=True, wrap_lines=True)
267
- stream = generate_response(watsonx_llm, prompt_data, params)
268
- response = st.write_stream(stream)
269
- # response = st.write_stream(stream, f"<span style='color: {color};'>", unsafe_allow_html=True)
 
 
270
 
271
  if genparam.TOKEN_CAPTURE_ENABLED:
272
  chat_number = len(chat_history) // 2
@@ -324,23 +364,35 @@ def main():
324
  # Create three columns
325
  col1, col2, col3 = st.columns(3)
326
 
 
 
327
  with col1:
328
  st.markdown("<div class='chat-container'>", unsafe_allow_html=True)
329
  st.subheader(f"{genparam.BOT_1_AVATAR} {genparam.BOT_1_NAME}")
330
  st.markdown("<div class='chat-messages'>", unsafe_allow_html=True)
331
 
332
- # Display only bot responses
333
  for message in st.session_state.chat_history_1:
334
- if message["role"] != "user": # Only show bot messages
335
- with st.chat_message(message["role"], avatar=genparam.BOT_1_AVATAR):
336
  st.markdown(message['content'])
 
 
 
 
 
 
 
 
 
 
337
 
338
- # Add user message to history but don't display
339
  st.session_state.chat_history_1.append({"role": "user", "content": user_input, "avatar": genparam.USER_AVATAR})
340
  milvus_client, emb, vector_index_properties, vector_store_schema = setup_vector_index(
341
  client,
342
  wml_credentials,
343
- get_active_vector_index()
344
  )
345
  system_prompt = genparam.BOT_1_PROMPT
346
 
@@ -355,24 +407,23 @@ def main():
355
  )
356
  st.session_state.chat_history_1.append({"role": genparam.BOT_1_NAME, "content": response, "avatar": genparam.BOT_1_AVATAR})
357
  st.markdown("</div></div>", unsafe_allow_html=True)
358
-
 
359
  with col2:
360
  st.markdown("<div class='chat-container'>", unsafe_allow_html=True)
361
  st.subheader(f"{genparam.BOT_2_AVATAR} {genparam.BOT_2_NAME}")
362
  st.markdown("<div class='chat-messages'>", unsafe_allow_html=True)
363
 
364
- # Display only bot responses
365
  for message in st.session_state.chat_history_2:
366
- if message["role"] != "user": # Only show bot messages
367
  with st.chat_message(message["role"], avatar=genparam.BOT_2_AVATAR):
368
  st.markdown(message['content'])
369
 
370
- # Add user message to history but don't display
371
  st.session_state.chat_history_2.append({"role": "user", "content": user_input, "avatar": genparam.USER_AVATAR})
372
  milvus_client, emb, vector_index_properties, vector_store_schema = setup_vector_index(
373
  client,
374
  wml_credentials,
375
- get_active_vector_index()
376
  )
377
  system_prompt = genparam.BOT_2_PROMPT
378
 
@@ -388,23 +439,22 @@ def main():
388
  st.session_state.chat_history_2.append({"role": genparam.BOT_2_NAME, "content": response, "avatar": genparam.BOT_2_AVATAR})
389
  st.markdown("</div></div>", unsafe_allow_html=True)
390
 
 
391
  with col3:
392
  st.markdown("<div class='chat-container'>", unsafe_allow_html=True)
393
  st.subheader(f"{genparam.BOT_3_AVATAR} {genparam.BOT_3_NAME}")
394
  st.markdown("<div class='chat-messages'>", unsafe_allow_html=True)
395
 
396
- # Display only bot responses
397
  for message in st.session_state.chat_history_3:
398
- if message["role"] != "user": # Only show bot messages
399
  with st.chat_message(message["role"], avatar=genparam.BOT_3_AVATAR):
400
  st.markdown(message['content'])
401
 
402
- # Add user message to history but don't display
403
  st.session_state.chat_history_3.append({"role": "user", "content": user_input, "avatar": genparam.USER_AVATAR})
404
  milvus_client, emb, vector_index_properties, vector_store_schema = setup_vector_index(
405
  client,
406
  wml_credentials,
407
- st.secrets["vector_index_id_2"]
408
  )
409
  system_prompt = genparam.BOT_3_PROMPT
410
 
@@ -419,11 +469,9 @@ def main():
419
  )
420
  st.session_state.chat_history_3.append({"role": genparam.BOT_3_NAME, "content": response, "avatar": genparam.BOT_3_AVATAR})
421
  st.markdown("</div></div>", unsafe_allow_html=True)
 
422
 
423
  # Update sidebar with new question
424
- st.sidebar.markdown("---") # Add divider
425
  st.sidebar.markdown("**Latest Question:**")
426
- st.sidebar.markdown(f"_{user_input}_")
427
-
428
- if __name__ == "__main__":
429
- main()
 
217
  yield chunk
218
 
219
  def fetch_response(user_input, milvus_client, emb, vector_index_properties, vector_store_schema, system_prompt, chat_history):
220
+ # Get grounding documents
221
  grounding = proximity_search(
222
  question=user_input,
223
  milvus_client=milvus_client,
 
225
  vector_index_properties=vector_index_properties,
226
  vector_store_schema=vector_store_schema
227
  )
 
228
 
229
+ # Special handling for PATH-er B. (first column)
230
+ if chat_history == st.session_state.chat_history_1:
231
+ # Display user question first
232
+ with st.chat_message("user", avatar=genparam.USER_AVATAR):
233
+ st.markdown(user_input)
234
+
235
+ # Parse and display each document from the grounding
236
+ documents = grounding.split("\n\n")[2:] # Skip the count line and first newline
237
+ for doc in documents:
238
+ if doc.strip(): # Only process non-empty strings
239
+ parts = doc.split("\n")
240
+ doc_name = parts[0].replace("Document: ", "")
241
+ content = parts[1].replace("Content: ", "")
242
+
243
+ # Display document with delay
244
+ time.sleep(0.5)
245
+ st.markdown(f"**{doc_name}**")
246
+ st.code(content)
247
+
248
+ # Store in chat history
249
+ return grounding
250
+
251
+ # For MOD-ther S. (second column)
252
+ elif chat_history == st.session_state.chat_history_2:
253
+ prompt = prepare_prompt(user_input, chat_history)
254
+ prompt_data = apply_prompt_syntax(
255
+ prompt,
256
+ system_prompt,
257
+ get_active_prompt_template(),
258
+ genparam.BAKE_IN_PROMPT_SYNTAX
259
+ )
260
+ prompt_data = prompt_data.replace("__grounding__", grounding)
261
+
262
+ # For SYS-ter V. (third column)
263
+ else:
264
+ # Get chat history from MOD-ther S.
265
+ mod_ther_history = st.session_state.chat_history_2
266
+ prompt = prepare_prompt(user_input, mod_ther_history)
267
+ prompt_data = apply_prompt_syntax(
268
+ prompt,
269
+ system_prompt,
270
+ get_active_prompt_template(),
271
+ genparam.BAKE_IN_PROMPT_SYNTAX
272
+ )
273
+ prompt_data = prompt_data.replace("__grounding__", grounding)
274
+
275
+ # Continue with normal processing for columns 2 and 3
276
  watsonx_llm = ModelInference(
277
  api_client=client,
278
  model_id=get_active_model(),
 
302
  with st.chat_message(bot_name, avatar=bot_avatar):
303
  if genparam.TOKEN_CAPTURE_ENABLED:
304
  st.code(prompt_data, line_numbers=True, wrap_lines=True)
305
+ if chat_history != st.session_state.chat_history_1: # Only generate responses for columns 2 and 3
306
+ stream = generate_response(watsonx_llm, prompt_data, params)
307
+ response = st.write_stream(stream)
308
+ else:
309
+ response = grounding # For column 1, we already displayed the content
310
 
311
  if genparam.TOKEN_CAPTURE_ENABLED:
312
  chat_number = len(chat_history) // 2
 
364
  # Create three columns
365
  col1, col2, col3 = st.columns(3)
366
 
367
+ ###-------------START
368
+ # First column - PATH-er B. (Document Display)
369
  with col1:
370
  st.markdown("<div class='chat-container'>", unsafe_allow_html=True)
371
  st.subheader(f"{genparam.BOT_1_AVATAR} {genparam.BOT_1_NAME}")
372
  st.markdown("<div class='chat-messages'>", unsafe_allow_html=True)
373
 
374
+ # Display previous messages
375
  for message in st.session_state.chat_history_1:
376
+ if message["role"] == "user":
377
+ with st.chat_message(message["role"], avatar=genparam.USER_AVATAR):
378
  st.markdown(message['content'])
379
+ else:
380
+ # Parse and display stored documents
381
+ documents = message['content'].split("\n\n")[2:] # Skip count line
382
+ for doc in documents:
383
+ if doc.strip():
384
+ parts = doc.split("\n")
385
+ doc_name = parts[0].replace("Document: ", "")
386
+ content = parts[1].replace("Content: ", "")
387
+ st.markdown(f"**{doc_name}**")
388
+ st.code(content)
389
 
390
+ # Add user message and get new response
391
  st.session_state.chat_history_1.append({"role": "user", "content": user_input, "avatar": genparam.USER_AVATAR})
392
  milvus_client, emb, vector_index_properties, vector_store_schema = setup_vector_index(
393
  client,
394
  wml_credentials,
395
+ st.secrets["vector_index_id_1"] # Use first vector index
396
  )
397
  system_prompt = genparam.BOT_1_PROMPT
398
 
 
407
  )
408
  st.session_state.chat_history_1.append({"role": genparam.BOT_1_NAME, "content": response, "avatar": genparam.BOT_1_AVATAR})
409
  st.markdown("</div></div>", unsafe_allow_html=True)
410
+
411
+ # Second column - MOD-ther S. (Uses documents from first vector index)
412
  with col2:
413
  st.markdown("<div class='chat-container'>", unsafe_allow_html=True)
414
  st.subheader(f"{genparam.BOT_2_AVATAR} {genparam.BOT_2_NAME}")
415
  st.markdown("<div class='chat-messages'>", unsafe_allow_html=True)
416
 
 
417
  for message in st.session_state.chat_history_2:
418
+ if message["role"] != "user":
419
  with st.chat_message(message["role"], avatar=genparam.BOT_2_AVATAR):
420
  st.markdown(message['content'])
421
 
 
422
  st.session_state.chat_history_2.append({"role": "user", "content": user_input, "avatar": genparam.USER_AVATAR})
423
  milvus_client, emb, vector_index_properties, vector_store_schema = setup_vector_index(
424
  client,
425
  wml_credentials,
426
+ st.secrets["vector_index_id_1"] # Use first vector index
427
  )
428
  system_prompt = genparam.BOT_2_PROMPT
429
 
 
439
  st.session_state.chat_history_2.append({"role": genparam.BOT_2_NAME, "content": response, "avatar": genparam.BOT_2_AVATAR})
440
  st.markdown("</div></div>", unsafe_allow_html=True)
441
 
442
+ # Third column - SYS-ter V. (Uses second vector index and chat history from second column)
443
  with col3:
444
  st.markdown("<div class='chat-container'>", unsafe_allow_html=True)
445
  st.subheader(f"{genparam.BOT_3_AVATAR} {genparam.BOT_3_NAME}")
446
  st.markdown("<div class='chat-messages'>", unsafe_allow_html=True)
447
 
 
448
  for message in st.session_state.chat_history_3:
449
+ if message["role"] != "user":
450
  with st.chat_message(message["role"], avatar=genparam.BOT_3_AVATAR):
451
  st.markdown(message['content'])
452
 
 
453
  st.session_state.chat_history_3.append({"role": "user", "content": user_input, "avatar": genparam.USER_AVATAR})
454
  milvus_client, emb, vector_index_properties, vector_store_schema = setup_vector_index(
455
  client,
456
  wml_credentials,
457
+ st.secrets["vector_index_id_2"] # Use second vector index
458
  )
459
  system_prompt = genparam.BOT_3_PROMPT
460
 
 
469
  )
470
  st.session_state.chat_history_3.append({"role": genparam.BOT_3_NAME, "content": response, "avatar": genparam.BOT_3_AVATAR})
471
  st.markdown("</div></div>", unsafe_allow_html=True)
472
+ ###-------------END
473
 
474
  # Update sidebar with new question
475
+ st.sidebar.markdown("---")
476
  st.sidebar.markdown("**Latest Question:**")
477
+ st.sidebar.markdown(f"_{user_input}_")