masadonline commited on
Commit
7df18b4
·
verified ·
1 Parent(s): 5f6c08a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +19 -6
app.py CHANGED
@@ -438,7 +438,17 @@ if st.session_state.get("app_started") and st.session_state.get("rag_pipeline_re
438
  with st.chat_message(chat_entry["role"]):
439
  st.markdown(chat_entry["content"])
440
  if "context" in chat_entry and chat_entry["context"]:
441
- with st.expander("Retrieved Context"): st.json(chat_entry["context"])
 
 
 
 
 
 
 
 
 
 
442
 
443
  user_query_manual = st.chat_input("Ask a question:")
444
  if user_query_manual:
@@ -461,10 +471,10 @@ if st.session_state.get("app_started") and st.session_state.get("rag_pipeline_re
461
  elif intent == "PRODUCT_INFO":
462
  raw_context_data = get_product_info(user_query_manual, st.session_state.products_data)
463
  context_for_llm = f"Product Information: {raw_context_data}"
464
- elif intent == "GENERAL_POLICY_FAQ" or intent == "UNKNOWN":
465
  if st.session_state.faiss_index_pdfs and st.session_state.embedding_model:
466
  k_val = 2 if intent == "GENERAL_POLICY_FAQ" else 1
467
- retrieved_chunks = search_faiss_index(st.session_state.faiss_index_pdfs, user_query_manual,
468
  st.session_state.embedding_model, st.session_state.indexed_pdf_chunks, k=k_val)
469
  if retrieved_chunks:
470
  context_for_llm = "\n\n".join(retrieved_chunks)
@@ -475,17 +485,20 @@ if st.session_state.get("app_started") and st.session_state.get("rag_pipeline_re
475
  else:
476
  context_for_llm = "Policy/FAQ documents unavailable."
477
  raw_context_data = {"message": "PDF index not ready."}
478
-
479
  llm_response = generate_response_groq(st.session_state.groq_client, user_query_manual, context_for_llm)
480
  with st.chat_message("assistant"):
481
  st.markdown(llm_response)
482
  if raw_context_data:
483
  with st.expander("Retrieved Context"):
 
484
  try:
485
  if isinstance(raw_context_data, str) and (raw_context_data.strip().startswith('{') or raw_context_data.strip().startswith('[')):
486
  st.json(json.loads(raw_context_data))
487
- else: st.json(raw_context_data)
488
- except (json.JSONDecodeError, TypeError): st.text(str(raw_context_data))
 
 
489
  st.session_state.manual_chat_history.append({"role": "assistant", "content": llm_response, "context": raw_context_data})
490
 
491
  # --- Twilio Bot Polling Logic ---
 
438
  with st.chat_message(chat_entry["role"]):
439
  st.markdown(chat_entry["content"])
440
  if "context" in chat_entry and chat_entry["context"]:
441
+ with st.expander("Retrieved Context"):
442
+ try:
443
+ # Attempt to parse as JSON only if it looks like a JSON string
444
+ if isinstance(chat_entry["context"], str) and (chat_entry["context"].strip().startswith('{') or chat_entry["context"].strip().startswith('[')):
445
+ st.json(json.loads(chat_entry["context"]))
446
+ else:
447
+ # Otherwise, display as plain text
448
+ st.text(str(chat_entry["context"]))
449
+ except (json.JSONDecodeError, TypeError):
450
+ # Fallback for any other parsing errors
451
+ st.text(str(chat_entry["context"]))
452
 
453
  user_query_manual = st.chat_input("Ask a question:")
454
  if user_query_manual:
 
471
  elif intent == "PRODUCT_INFO":
472
  raw_context_data = get_product_info(user_query_manual, st.session_state.products_data)
473
  context_for_llm = f"Product Information: {raw_context_data}"
474
+ elif intent == "GENERAL_POLICY_FAQ" or intent == "UNKNOWN":
475
  if st.session_state.faiss_index_pdfs and st.session_state.embedding_model:
476
  k_val = 2 if intent == "GENERAL_POLICY_FAQ" else 1
477
+ retrieved_chunks = search_faiss_index(st.session_state.faiss_index_pdfs, user_query_manual,
478
  st.session_state.embedding_model, st.session_state.indexed_pdf_chunks, k=k_val)
479
  if retrieved_chunks:
480
  context_for_llm = "\n\n".join(retrieved_chunks)
 
485
  else:
486
  context_for_llm = "Policy/FAQ documents unavailable."
487
  raw_context_data = {"message": "PDF index not ready."}
488
+
489
  llm_response = generate_response_groq(st.session_state.groq_client, user_query_manual, context_for_llm)
490
  with st.chat_message("assistant"):
491
  st.markdown(llm_response)
492
  if raw_context_data:
493
  with st.expander("Retrieved Context"):
494
+ # This block is the one that needs the fix
495
  try:
496
  if isinstance(raw_context_data, str) and (raw_context_data.strip().startswith('{') or raw_context_data.strip().startswith('[')):
497
  st.json(json.loads(raw_context_data))
498
+ else:
499
+ st.text(str(raw_context_data))
500
+ except (json.JSONDecodeError, TypeError):
501
+ st.text(str(raw_context_data))
502
  st.session_state.manual_chat_history.append({"role": "assistant", "content": llm_response, "context": raw_context_data})
503
 
504
  # --- Twilio Bot Polling Logic ---