Jeremy Live commited on
Commit
707e65a
·
1 Parent(s): cf30c20

upgrade fix chatgpt5

Browse files
Files changed (1) hide show
  1. app.py +67 -66
app.py CHANGED
@@ -43,21 +43,21 @@ def generate_chart(data: Union[Dict, List[Dict], pd.DataFrame],
43
  y: str = None,
44
  title: str = "",
45
  x_label: str = None,
46
- y_label: str = None) -> str:
47
  """
48
- Generate a chart from data and return it as a base64 encoded image.
49
 
50
  Args:
51
  data: The data to plot (can be a list of dicts or a pandas DataFrame)
52
  chart_type: Type of chart to generate (bar, line, pie, scatter, histogram)
53
- x: Column name for x-axis
54
- y: Column name for y-axis (not needed for pie charts)
55
  title: Chart title
56
  x_label: Label for x-axis
57
  y_label: Label for y-axis
58
 
59
  Returns:
60
- Markdown string with embedded image
61
  """
62
  try:
63
  # Convert data to DataFrame if it's a list of dicts
@@ -69,7 +69,7 @@ def generate_chart(data: Union[Dict, List[Dict], pd.DataFrame],
69
  df = data
70
 
71
  if not isinstance(df, pd.DataFrame):
72
- return "Error: Data must be a dictionary, list of dictionaries, or pandas DataFrame"
73
 
74
  # Generate the appropriate chart type
75
  fig = None
@@ -78,13 +78,13 @@ def generate_chart(data: Union[Dict, List[Dict], pd.DataFrame],
78
  elif chart_type == 'line':
79
  fig = px.line(df, x=x, y=y, title=title)
80
  elif chart_type == 'pie':
81
- fig = px.pie(df, names=x, values=y, title=title)
82
  elif chart_type == 'scatter':
83
  fig = px.scatter(df, x=x, y=y, title=title)
84
  elif chart_type == 'histogram':
85
  fig = px.histogram(df, x=x, title=title)
86
  else:
87
- return "Error: Unsupported chart type. Use 'bar', 'line', 'pie', 'scatter', or 'histogram'"
88
 
89
  # Update layout
90
  fig.update_layout(
@@ -96,24 +96,13 @@ def generate_chart(data: Union[Dict, List[Dict], pd.DataFrame],
96
  height=400
97
  )
98
 
99
- # Save the figure to a temporary file
100
- temp_file = tempfile.NamedTemporaryFile(delete=False, suffix='.png')
101
- fig.write_image(temp_file.name, format='png', engine='kaleido')
102
-
103
- # Read the image file and encode as base64
104
- with open(temp_file.name, 'rb') as img_file:
105
- img_base64 = base64.b64encode(img_file.read()).decode('utf-8')
106
-
107
- # Clean up the temporary file
108
- os.unlink(temp_file.name)
109
-
110
- # Return as markdown image
111
- return f'<img src="data:image/png;base64,{img_base64}" style="max-width:100%;"/>'
112
 
113
  except Exception as e:
114
  error_msg = f"Error generating chart: {str(e)}"
115
  logger.error(error_msg, exc_info=True)
116
- return f"<div style='color: red;'>{error_msg}</div>"
 
117
  logger = logging.getLogger(__name__)
118
 
119
  def check_environment():
@@ -423,12 +412,13 @@ def convert_to_messages_format(chat_history):
423
 
424
  return messages
425
 
426
- async def stream_agent_response(question: str, chat_history: List[List[str]]) -> str:
427
  """Procesa la pregunta del usuario y devuelve la respuesta del agente con memoria de conversación."""
428
  global agent # Make sure we can modify the agent's memory
429
 
430
  # Initialize response
431
  response_text = ""
 
432
  messages = []
433
 
434
  # Add previous chat history in the correct format for the agent
@@ -505,7 +495,7 @@ async def stream_agent_response(question: str, chat_history: List[List[str]]) ->
505
  # Add the query and its result to the response
506
  response_text += f"\n\n### 🔍 Resultado de la consulta:\n```sql\n{sql_query}\n```\n\n{query_result}"
507
 
508
- # Try to generate a chart if the result is tabular
509
  try:
510
  if isinstance(query_result, str) and '|' in query_result and '---' in query_result:
511
  # Convert markdown table to DataFrame
@@ -526,32 +516,37 @@ async def stream_agent_response(question: str, chat_history: List[List[str]]) ->
526
  data.append(dict(zip(columns, values)))
527
 
528
  if data and len(columns) >= 2:
529
- # Generate a chart based on the data
530
- chart_type = 'bar' # Default chart type
531
- if len(columns) == 2:
532
- # Simple bar chart for two columns
533
- chart_html = generate_chart(
534
- data=data,
535
- chart_type=chart_type,
536
- x=columns[0],
537
- y=columns[1],
538
- title=f"{columns[1]} por {columns[0]}",
539
- x_label=columns[0],
540
- y_label=columns[1]
541
- )
542
- response_text += f"\n\n### 📊 Visualización:\n{chart_html}"
543
- elif len(columns) > 2:
544
- # For multiple columns, create a line chart
545
- chart_html = generate_chart(
546
- data=data,
547
- chart_type='line',
548
- x=columns[0],
549
- y=columns[1],
550
- title=f"{', '.join(columns[1:])} por {columns[0]}",
551
- x_label=columns[0],
552
- y_label=", ".join(columns[1:])
553
- )
554
- response_text += f"\n\n### 📊 Visualización:\n{chart_html}"
 
 
 
 
 
555
  except Exception as e:
556
  logger.error(f"Error generating chart: {str(e)}", exc_info=True)
557
  # Don't fail the whole request if chart generation fails
@@ -579,15 +574,14 @@ async def stream_agent_response(question: str, chat_history: List[List[str]]) ->
579
  else:
580
  message_content = str(assistant_message)
581
 
582
- # Return the assistant's response in the format expected by the bot_response function
583
- # The bot_response function will handle updating the chat history
584
- return message_content
585
 
586
  except Exception as e:
587
  error_msg = f"## ❌ Error\n\nOcurrió un error al procesar tu solicitud:\n\n```\n{str(e)}\n```"
588
  logger.error(f"Error in stream_agent_response: {str(e)}", exc_info=True)
589
- # Ensure we return in the correct format: [(user_msg, bot_msg)]
590
- return [(None, error_msg)]
591
 
592
  # Custom CSS for the app
593
  custom_css = """
@@ -703,6 +697,13 @@ def create_ui():
703
  layout="panel" # Better layout for messages
704
  )
705
 
 
 
 
 
 
 
 
706
  # Input area
707
  with gr.Row():
708
  question_input = gr.Textbox(
@@ -788,12 +789,12 @@ def create_ui():
788
  # Hidden component for streaming output
789
  streaming_output_display = gr.Textbox(visible=False)
790
 
791
- return demo, chatbot, question_input, submit_button, streaming_output_display
792
 
793
  def create_application():
794
  """Create and configure the Gradio application."""
795
  # Create the UI components
796
- demo, chatbot, question_input, submit_button, streaming_output_display = create_ui()
797
 
798
  def user_message(user_input: str, chat_history: List[List[str]]) -> Tuple[str, List[List[str]]]:
799
  """Add user message to chat history and clear input."""
@@ -812,34 +813,34 @@ def create_application():
812
  # Clear the input
813
  return "", chat_history
814
 
815
- async def bot_response(chat_history: List[List[str]]) -> List[List[str]]:
816
- """Get bot response and update chat history."""
817
  if not chat_history:
818
- return chat_history
819
 
820
  # Get the last user message (first element of the last list if it exists)
821
  if not chat_history[-1][0] or chat_history[-1][1] is not None:
822
- return chat_history
823
 
824
  try:
825
  question = chat_history[-1][0]
826
  logger.info(f"Processing question: {question}")
827
 
828
  # Call the agent and get the response
829
- assistant_message = await stream_agent_response(question, chat_history[:-1])
830
 
831
  # Update the assistant's message in the chat history
832
  chat_history[-1] = [question, assistant_message]
833
 
834
  logger.info("Response generation complete")
835
- return chat_history
836
 
837
  except Exception as e:
838
  error_msg = f"## ❌ Error\n\nError al procesar la solicitud:\n\n```\n{str(e)}\n```"
839
  logger.error(error_msg, exc_info=True)
840
  if chat_history and len(chat_history[-1]) == 2 and chat_history[-1][1] is None:
841
  chat_history[-1] = [chat_history[-1][0], error_msg]
842
- return chat_history
843
 
844
  # Event handlers
845
  with demo:
@@ -852,7 +853,7 @@ def create_application():
852
  ).then(
853
  fn=bot_response,
854
  inputs=[chatbot],
855
- outputs=[chatbot],
856
  api_name="ask"
857
  )
858
 
@@ -865,7 +866,7 @@ def create_application():
865
  ).then(
866
  fn=bot_response,
867
  inputs=[chatbot],
868
- outputs=[chatbot]
869
  )
870
 
871
  return demo
 
43
  y: str = None,
44
  title: str = "",
45
  x_label: str = None,
46
+ y_label: str = None):
47
  """
48
+ Generate an interactive Plotly figure from data.
49
 
50
  Args:
51
  data: The data to plot (can be a list of dicts or a pandas DataFrame)
52
  chart_type: Type of chart to generate (bar, line, pie, scatter, histogram)
53
+ x: Column name for x-axis (names for pie)
54
+ y: Column name for y-axis (values for pie)
55
  title: Chart title
56
  x_label: Label for x-axis
57
  y_label: Label for y-axis
58
 
59
  Returns:
60
+ A Plotly Figure object (interactive) or None on error
61
  """
62
  try:
63
  # Convert data to DataFrame if it's a list of dicts
 
69
  df = data
70
 
71
  if not isinstance(df, pd.DataFrame):
72
+ return None
73
 
74
  # Generate the appropriate chart type
75
  fig = None
 
78
  elif chart_type == 'line':
79
  fig = px.line(df, x=x, y=y, title=title)
80
  elif chart_type == 'pie':
81
+ fig = px.pie(df, names=x, values=y, title=title, hole=0)
82
  elif chart_type == 'scatter':
83
  fig = px.scatter(df, x=x, y=y, title=title)
84
  elif chart_type == 'histogram':
85
  fig = px.histogram(df, x=x, title=title)
86
  else:
87
+ return None
88
 
89
  # Update layout
90
  fig.update_layout(
 
96
  height=400
97
  )
98
 
99
+ return fig
 
 
 
 
 
 
 
 
 
 
 
 
100
 
101
  except Exception as e:
102
  error_msg = f"Error generating chart: {str(e)}"
103
  logger.error(error_msg, exc_info=True)
104
+ return None
105
+
106
  logger = logging.getLogger(__name__)
107
 
108
  def check_environment():
 
412
 
413
  return messages
414
 
415
+ async def stream_agent_response(question: str, chat_history: List[List[str]]) -> Tuple[str, Optional["go.Figure"]]:
416
  """Procesa la pregunta del usuario y devuelve la respuesta del agente con memoria de conversación."""
417
  global agent # Make sure we can modify the agent's memory
418
 
419
  # Initialize response
420
  response_text = ""
421
+ chart_fig = None
422
  messages = []
423
 
424
  # Add previous chat history in the correct format for the agent
 
495
  # Add the query and its result to the response
496
  response_text += f"\n\n### 🔍 Resultado de la consulta:\n```sql\n{sql_query}\n```\n\n{query_result}"
497
 
498
+ # Try to generate an interactive chart if the result is tabular
499
  try:
500
  if isinstance(query_result, str) and '|' in query_result and '---' in query_result:
501
  # Convert markdown table to DataFrame
 
516
  data.append(dict(zip(columns, values)))
517
 
518
  if data and len(columns) >= 2:
519
+ # Determine chart type from user's question (supports pie chart)
520
+ q_lower = question.lower()
521
+ if any(k in q_lower for k in ["gráfico circular", "grafico circular", "pie", "pastel"]):
522
+ desired_type = 'pie'
523
+ elif any(k in q_lower for k in ["línea", "linea", "line"]):
524
+ desired_type = 'line'
525
+ elif any(k in q_lower for k in ["dispersión", "dispersion", "scatter"]):
526
+ desired_type = 'scatter'
527
+ elif any(k in q_lower for k in ["histograma", "histogram"]):
528
+ desired_type = 'histogram'
529
+ else:
530
+ desired_type = 'bar'
531
+
532
+ # Choose x/y columns (assume first is category, second numeric)
533
+ x_col = columns[0]
534
+ y_col = columns[1]
535
+
536
+ # Coerce numeric values for y
537
+ for row in data:
538
+ try:
539
+ row[y_col] = float(re.sub(r"[^0-9.\-]", "", str(row[y_col])))
540
+ except Exception:
541
+ pass
542
+
543
+ chart_fig = generate_chart(
544
+ data=data,
545
+ chart_type=desired_type,
546
+ x=x_col,
547
+ y=y_col,
548
+ title=f"{y_col} por {x_col}"
549
+ )
550
  except Exception as e:
551
  logger.error(f"Error generating chart: {str(e)}", exc_info=True)
552
  # Don't fail the whole request if chart generation fails
 
574
  else:
575
  message_content = str(assistant_message)
576
 
577
+ # Return the assistant's response and an optional interactive chart figure
578
+ return message_content, chart_fig
 
579
 
580
  except Exception as e:
581
  error_msg = f"## ❌ Error\n\nOcurrió un error al procesar tu solicitud:\n\n```\n{str(e)}\n```"
582
  logger.error(f"Error in stream_agent_response: {str(e)}", exc_info=True)
583
+ # Return error message and no chart
584
+ return error_msg, None
585
 
586
  # Custom CSS for the app
587
  custom_css = """
 
697
  layout="panel" # Better layout for messages
698
  )
699
 
700
+ # Chart display area (interactive Plotly figure)
701
+ chart_display = gr.Plot(
702
+ label="📊 Visualización",
703
+ height=420,
704
+ interactive=True,
705
+ )
706
+
707
  # Input area
708
  with gr.Row():
709
  question_input = gr.Textbox(
 
789
  # Hidden component for streaming output
790
  streaming_output_display = gr.Textbox(visible=False)
791
 
792
+ return demo, chatbot, chart_display, question_input, submit_button, streaming_output_display
793
 
794
  def create_application():
795
  """Create and configure the Gradio application."""
796
  # Create the UI components
797
+ demo, chatbot, chart_display, question_input, submit_button, streaming_output_display = create_ui()
798
 
799
  def user_message(user_input: str, chat_history: List[List[str]]) -> Tuple[str, List[List[str]]]:
800
  """Add user message to chat history and clear input."""
 
813
  # Clear the input
814
  return "", chat_history
815
 
816
+ async def bot_response(chat_history: List[List[str]]) -> Tuple[List[List[str]], Optional[go.Figure]]:
817
+ """Get bot response and update chat history and return optional chart figure."""
818
  if not chat_history:
819
+ return chat_history, None
820
 
821
  # Get the last user message (first element of the last list if it exists)
822
  if not chat_history[-1][0] or chat_history[-1][1] is not None:
823
+ return chat_history, None
824
 
825
  try:
826
  question = chat_history[-1][0]
827
  logger.info(f"Processing question: {question}")
828
 
829
  # Call the agent and get the response
830
+ assistant_message, chart_fig = await stream_agent_response(question, chat_history[:-1])
831
 
832
  # Update the assistant's message in the chat history
833
  chat_history[-1] = [question, assistant_message]
834
 
835
  logger.info("Response generation complete")
836
+ return chat_history, chart_fig
837
 
838
  except Exception as e:
839
  error_msg = f"## ❌ Error\n\nError al procesar la solicitud:\n\n```\n{str(e)}\n```"
840
  logger.error(error_msg, exc_info=True)
841
  if chat_history and len(chat_history[-1]) == 2 and chat_history[-1][1] is None:
842
  chat_history[-1] = [chat_history[-1][0], error_msg]
843
+ return chat_history, None
844
 
845
  # Event handlers
846
  with demo:
 
853
  ).then(
854
  fn=bot_response,
855
  inputs=[chatbot],
856
+ outputs=[chatbot, chart_display],
857
  api_name="ask"
858
  )
859
 
 
866
  ).then(
867
  fn=bot_response,
868
  inputs=[chatbot],
869
+ outputs=[chatbot, chart_display]
870
  )
871
 
872
  return demo