Jeremy Live commited on
Commit
8717362
·
1 Parent(s): 9f67f77

Revert "upgrade memoria + UX"

Browse files

This reverts commit f4fa5e0ca87199aac17eb8dc77f24ade018e6cce.

Files changed (1) hide show
  1. app.py +32 -87
app.py CHANGED
@@ -435,17 +435,13 @@ def convert_to_messages_format(chat_history):
435
 
436
  return messages
437
 
438
- async def stream_agent_response(question: str, chat_history: List[List[str]]) -> Tuple[str, Optional["go.Figure"], Optional[Dict[str, Any]]]:
439
- """Procesa la pregunta del usuario y devuelve (respuesta, figura, chart_state).
440
-
441
- chart_state schema: {"data": List[Dict], "x_col": str, "y_col": str, "title": str, "chart_type": str}
442
- """
443
  global agent # Make sure we can modify the agent's memory
444
 
445
  # Initialize response
446
  response_text = ""
447
  chart_fig = None
448
- chart_state: Optional[Dict[str, Any]] = None
449
  messages = []
450
 
451
  # Add previous chat history in the correct format for the agent
@@ -512,6 +508,7 @@ async def stream_agent_response(question: str, chat_history: List[List[str]]) ->
512
  sql_query = extract_sql_query(response_text)
513
  if sql_query and looks_like_sql(sql_query):
514
  logger.info(f"Detected SQL query: {sql_query}")
 
515
  db_connection, _ = setup_database_connection()
516
  if db_connection:
517
  query_result = execute_sql_query(sql_query, db_connection)
@@ -553,26 +550,24 @@ async def stream_agent_response(question: str, chat_history: List[List[str]]) ->
553
 
554
  # Choose x/y columns (assume first is category, second numeric)
555
  x_col = columns[0]
556
- # pick first numeric column different to x
557
- y_col = None
558
- for col in columns[1:]:
 
559
  try:
560
- pd.to_numeric(data[0][col])
561
- y_col = col
562
- break
563
  except Exception:
564
- continue
565
- if y_col:
566
- chart_fig = generate_chart(
567
- data=data,
568
- chart_type=desired_type,
569
- x=x_col,
570
- y=y_col,
571
- title=f"{y_col} por {x_col}"
572
- )
573
- if chart_fig is not None:
574
- logger.info(f"Chart generated from SQL table: type={desired_type}, x={x_col}, y={y_col}, rows={len(data)}")
575
- chart_state = {"data": data, "x_col": x_col, "y_col": y_col, "title": f"{y_col} por {x_col}", "chart_type": desired_type}
576
  except Exception as e:
577
  logger.error(f"Error generating chart: {str(e)}", exc_info=True)
578
  # Don't fail the whole request if chart generation fails
@@ -635,7 +630,6 @@ async def stream_agent_response(question: str, chat_history: List[List[str]]) ->
635
  )
636
  if chart_fig is not None:
637
  logger.info("Chart generated from second-pass SQL execution.")
638
- chart_state = {"data": data, "x_col": x_col, "y_col": y_col, "title": f"{y_col} por {x_col}", "chart_type": desired_type}
639
  else:
640
  logger.info("No DB connection on second pass; skipping.")
641
  except Exception as e:
@@ -693,7 +687,6 @@ async def stream_agent_response(question: str, chat_history: List[List[str]]) ->
693
  )
694
  if chart_fig is not None:
695
  logger.info(f"Chart generated from text fallback: type={desired_type}, items={len(data)}")
696
- chart_state = {"data": data, "x_col": "label", "y_col": "value", "title": "Distribución", "chart_type": desired_type}
697
 
698
  # Update the assistant's message with the response
699
  assistant_message["content"] = response_text
@@ -720,7 +713,7 @@ async def stream_agent_response(question: str, chat_history: List[List[str]]) ->
720
  logger.info("No chart generated for this turn.")
721
  else:
722
  logger.info("Returning a chart figure to UI.")
723
- return message_content, chart_fig, chart_state
724
 
725
  except Exception as e:
726
  error_msg = f"## ❌ Error\n\nOcurrió un error al procesar tu solicitud:\n\n```\n{str(e)}\n```"
@@ -931,16 +924,13 @@ def create_ui():
931
 
932
  # Hidden component for streaming output
933
  streaming_output_display = gr.Textbox(visible=False)
934
-
935
- # Session state to persist last chart data for follow-ups
936
- chart_state = gr.State(value=None)
937
 
938
- return demo, chatbot, chart_display, question_input, submit_button, streaming_output_display, chart_state
939
 
940
  def create_application():
941
  """Create and configure the Gradio application."""
942
  # Create the UI components
943
- demo, chatbot, chart_display, question_input, submit_button, streaming_output_display, chart_state = create_ui()
944
 
945
  def user_message(user_input: str, chat_history: List[Dict[str, str]]) -> Tuple[str, List[Dict[str, str]]]:
946
  """Add user message to chat history (messages format) and clear input."""
@@ -957,14 +947,10 @@ def create_application():
957
 
958
  return "", chat_history
959
 
960
- async def bot_response(chat_history: List[Dict[str, str]], chart_state_value: Optional[Dict[str, Any]]) -> Tuple[List[Dict[str, str]], Optional[go.Figure], Optional[Dict[str, Any]]]:
961
- """Generate bot response and optional chart figure using messages-format chat history.
962
-
963
- Also accepts and returns chart_state (dict) to support follow-up prompts like
964
- 'muestra los mismos datos en barras'.
965
- """
966
  if not chat_history:
967
- return chat_history, None, chart_state_value
968
 
969
  # Ensure last message is a user turn awaiting assistant reply
970
  last = chat_history[-1]
@@ -975,44 +961,6 @@ def create_application():
975
  question = last["content"]
976
  logger.info(f"Processing question: {question}")
977
 
978
- # Detect quick follow-up re-plot using the same data
979
- ql = question.lower()
980
- wants_reuse = any(kw in ql for kw in [
981
- "mismos datos", "estos mismos", "los mismos", "igual data", "misma data", "con los mismos"
982
- ])
983
- # Determine requested chart type if any
984
- desired_type = None
985
- if any(k in ql for k in ["barras", "bar", "columnas"]):
986
- desired_type = "bar"
987
- elif any(k in ql for k in ["línea", "linea", "line"]):
988
- desired_type = "line"
989
- elif any(k in ql for k in ["pastel", "pie", "circular"]):
990
- desired_type = "pie"
991
- elif "scatter" in ql or "dispersión" in ql or "dispersion" in ql:
992
- desired_type = "scatter"
993
- elif "histograma" in ql or "histogram" in ql:
994
- desired_type = "histogram"
995
-
996
- if wants_reuse and chart_state_value and isinstance(chart_state_value, dict) and chart_state_value.get("data"):
997
- # Re-plot with same data using requested type (or keep previous if not specified)
998
- new_type = desired_type or chart_state_value.get("chart_type", "bar")
999
- fig = generate_chart(
1000
- data=chart_state_value.get("data"),
1001
- chart_type=new_type,
1002
- x=chart_state_value.get("x_col", "label"),
1003
- y=chart_state_value.get("y_col", "value"),
1004
- title=chart_state_value.get("title", "Distribución")
1005
- )
1006
- if fig is not None:
1007
- friendly = f"He actualizado la visualización a {('gráfico de ' + new_type) if new_type != 'pie' else 'gráfico circular'} usando los mismos datos."
1008
- chat_history.append({"role": "assistant", "content": friendly})
1009
- # Save new type in state
1010
- chart_state_value = {
1011
- **chart_state_value,
1012
- "chart_type": new_type,
1013
- }
1014
- return chat_history, fig, chart_state_value
1015
-
1016
  # Convert prior messages to pair history for stream_agent_response()
1017
  pair_history: List[List[str]] = []
1018
  i = 0
@@ -1031,23 +979,20 @@ def create_application():
1031
  i += 1
1032
 
1033
  # Call the agent for this new user question
1034
- assistant_message, chart_fig, new_state = await stream_agent_response(question, pair_history)
1035
 
1036
  # Append assistant message back into messages history
1037
  chat_history.append({"role": "assistant", "content": assistant_message})
1038
 
1039
- # Update chart_state if provided by the agent path
1040
- chart_state_value = new_state if new_state is not None else chart_state_value
1041
-
1042
  logger.info("Response generation complete")
1043
- return chat_history, chart_fig, chart_state_value
1044
 
1045
  except Exception as e:
1046
  error_msg = f"## ❌ Error\n\nError al procesar la solicitud:\n\n```\n{str(e)}\n```"
1047
  logger.error(error_msg, exc_info=True)
1048
  # Ensure we add an assistant error message for the UI
1049
  chat_history.append({"role": "assistant", "content": error_msg})
1050
- return chat_history, None, chart_state_value
1051
 
1052
  # Event handlers
1053
  with demo:
@@ -1059,8 +1004,8 @@ def create_application():
1059
  queue=True
1060
  ).then(
1061
  fn=bot_response,
1062
- inputs=[chatbot, chart_state],
1063
- outputs=[chatbot, chart_display, chart_state],
1064
  api_name="ask"
1065
  )
1066
 
@@ -1072,8 +1017,8 @@ def create_application():
1072
  queue=True
1073
  ).then(
1074
  fn=bot_response,
1075
- inputs=[chatbot, chart_state],
1076
- outputs=[chatbot, chart_display, chart_state]
1077
  )
1078
 
1079
  return demo
 
435
 
436
  return messages
437
 
438
+ async def stream_agent_response(question: str, chat_history: List[List[str]]) -> Tuple[str, Optional["go.Figure"]]:
439
+ """Procesa la pregunta del usuario y devuelve la respuesta del agente con memoria de conversación."""
 
 
 
440
  global agent # Make sure we can modify the agent's memory
441
 
442
  # Initialize response
443
  response_text = ""
444
  chart_fig = None
 
445
  messages = []
446
 
447
  # Add previous chat history in the correct format for the agent
 
508
  sql_query = extract_sql_query(response_text)
509
  if sql_query and looks_like_sql(sql_query):
510
  logger.info(f"Detected SQL query: {sql_query}")
511
+ # Execute the query and update the response
512
  db_connection, _ = setup_database_connection()
513
  if db_connection:
514
  query_result = execute_sql_query(sql_query, db_connection)
 
550
 
551
  # Choose x/y columns (assume first is category, second numeric)
552
  x_col = columns[0]
553
+ y_col = columns[1]
554
+
555
+ # Coerce numeric values for y
556
+ for row in data:
557
  try:
558
+ row[y_col] = float(re.sub(r"[^0-9.\-]", "", str(row[y_col])))
 
 
559
  except Exception:
560
+ pass
561
+
562
+ chart_fig = generate_chart(
563
+ data=data,
564
+ chart_type=desired_type,
565
+ x=x_col,
566
+ y=y_col,
567
+ title=f"{y_col} por {x_col}"
568
+ )
569
+ if chart_fig is not None:
570
+ logger.info(f"Chart generated from SQL table: type={desired_type}, x={x_col}, y={y_col}, rows={len(data)}")
 
571
  except Exception as e:
572
  logger.error(f"Error generating chart: {str(e)}", exc_info=True)
573
  # Don't fail the whole request if chart generation fails
 
630
  )
631
  if chart_fig is not None:
632
  logger.info("Chart generated from second-pass SQL execution.")
 
633
  else:
634
  logger.info("No DB connection on second pass; skipping.")
635
  except Exception as e:
 
687
  )
688
  if chart_fig is not None:
689
  logger.info(f"Chart generated from text fallback: type={desired_type}, items={len(data)}")
 
690
 
691
  # Update the assistant's message with the response
692
  assistant_message["content"] = response_text
 
713
  logger.info("No chart generated for this turn.")
714
  else:
715
  logger.info("Returning a chart figure to UI.")
716
+ return message_content, chart_fig
717
 
718
  except Exception as e:
719
  error_msg = f"## ❌ Error\n\nOcurrió un error al procesar tu solicitud:\n\n```\n{str(e)}\n```"
 
924
 
925
  # Hidden component for streaming output
926
  streaming_output_display = gr.Textbox(visible=False)
 
 
 
927
 
928
+ return demo, chatbot, chart_display, question_input, submit_button, streaming_output_display
929
 
930
  def create_application():
931
  """Create and configure the Gradio application."""
932
  # Create the UI components
933
+ demo, chatbot, chart_display, question_input, submit_button, streaming_output_display = create_ui()
934
 
935
  def user_message(user_input: str, chat_history: List[Dict[str, str]]) -> Tuple[str, List[Dict[str, str]]]:
936
  """Add user message to chat history (messages format) and clear input."""
 
947
 
948
  return "", chat_history
949
 
950
+ async def bot_response(chat_history: List[Dict[str, str]]) -> Tuple[List[Dict[str, str]], Optional[go.Figure]]:
951
+ """Generate bot response for messages-format chat history and return optional chart figure."""
 
 
 
 
952
  if not chat_history:
953
+ return chat_history, None
954
 
955
  # Ensure last message is a user turn awaiting assistant reply
956
  last = chat_history[-1]
 
961
  question = last["content"]
962
  logger.info(f"Processing question: {question}")
963
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
964
  # Convert prior messages to pair history for stream_agent_response()
965
  pair_history: List[List[str]] = []
966
  i = 0
 
979
  i += 1
980
 
981
  # Call the agent for this new user question
982
+ assistant_message, chart_fig = await stream_agent_response(question, pair_history)
983
 
984
  # Append assistant message back into messages history
985
  chat_history.append({"role": "assistant", "content": assistant_message})
986
 
 
 
 
987
  logger.info("Response generation complete")
988
+ return chat_history, chart_fig
989
 
990
  except Exception as e:
991
  error_msg = f"## ❌ Error\n\nError al procesar la solicitud:\n\n```\n{str(e)}\n```"
992
  logger.error(error_msg, exc_info=True)
993
  # Ensure we add an assistant error message for the UI
994
  chat_history.append({"role": "assistant", "content": error_msg})
995
+ return chat_history, None
996
 
997
  # Event handlers
998
  with demo:
 
1004
  queue=True
1005
  ).then(
1006
  fn=bot_response,
1007
+ inputs=[chatbot],
1008
+ outputs=[chatbot, chart_display],
1009
  api_name="ask"
1010
  )
1011
 
 
1017
  queue=True
1018
  ).then(
1019
  fn=bot_response,
1020
+ inputs=[chatbot],
1021
+ outputs=[chatbot, chart_display]
1022
  )
1023
 
1024
  return demo