Jeremy Live commited on
Commit
3decc02
·
1 Parent(s): 707e65a
Files changed (1) hide show
  1. app.py +56 -12
app.py CHANGED
@@ -441,22 +441,19 @@ async def stream_agent_response(question: str, chat_history: List[List[str]]) ->
441
  f"3. El modelo de lenguaje esté disponible\n\n"
442
  f"Error: {agent_error}"
443
  )
444
- assistant_message = {"role": "assistant", "content": error_msg}
445
- return [assistant_message]
446
 
447
  # Update the agent's memory with the full conversation history
448
  try:
449
- # Clear existing memory
450
  if hasattr(agent, 'memory') and agent.memory is not None:
451
  agent.memory.clear()
452
-
453
- # Add all messages to memory
454
- for i in range(0, len(messages)-1, 2): # Process in pairs (user, assistant)
455
- if i+1 < len(messages):
456
- agent.memory.save_context(
457
- {"input": messages[i].content},
458
- {"output": messages[i+1].content}
459
- )
460
  except Exception as e:
461
  logger.error(f"Error updating agent memory: {str(e)}", exc_info=True)
462
 
@@ -467,7 +464,8 @@ async def stream_agent_response(question: str, chat_history: List[List[str]]) ->
467
 
468
  # Execute the agent with proper error handling
469
  try:
470
- response = await agent.ainvoke({"input": question, "chat_history": chat_history})
 
471
  logger.info(f"Agent response type: {type(response)}")
472
  logger.info(f"Agent response content: {str(response)[:500]}...")
473
 
@@ -554,6 +552,52 @@ async def stream_agent_response(question: str, chat_history: List[List[str]]) ->
554
  else:
555
  response_text += "\n\n⚠️ No se pudo conectar a la base de datos para ejecutar la consulta."
556
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
557
  # Update the assistant's message with the response
558
  assistant_message["content"] = response_text
559
 
 
441
  f"3. El modelo de lenguaje esté disponible\n\n"
442
  f"Error: {agent_error}"
443
  )
444
+ return error_msg, None
 
445
 
446
  # Update the agent's memory with the full conversation history
447
  try:
448
+ # Rebuild agent memory from chat history pairs
449
  if hasattr(agent, 'memory') and agent.memory is not None:
450
  agent.memory.clear()
451
+ for i in range(0, len(messages)-1, 2): # (user, assistant)
452
+ if i+1 < len(messages):
453
+ agent.memory.save_context(
454
+ {"input": messages[i].content},
455
+ {"output": messages[i+1].content}
456
+ )
 
 
457
  except Exception as e:
458
  logger.error(f"Error updating agent memory: {str(e)}", exc_info=True)
459
 
 
464
 
465
  # Execute the agent with proper error handling
466
  try:
467
+ # Let the agent use its memory; don't pass raw chat_history
468
+ response = await agent.ainvoke({"input": question})
469
  logger.info(f"Agent response type: {type(response)}")
470
  logger.info(f"Agent response content: {str(response)[:500]}...")
471
 
 
552
  else:
553
  response_text += "\n\n⚠️ No se pudo conectar a la base de datos para ejecutar la consulta."
554
 
555
+ # Fallback: if user asked for a chart (e.g., pie) and we didn't get SQL or chart yet,
556
+ # parse the most recent assistant text for lines like "LABEL: NUMBER" (bulleted or plain).
557
+ if chart_fig is None:
558
+ q_lower = question.lower()
559
+ wants_chart = any(k in q_lower for k in ["gráfico", "grafico", "chart", "graph", "pastel", "pie"])
560
+ if wants_chart and chat_history:
561
+ # Find the most recent assistant message with usable numeric pairs
562
+ candidate_text = ""
563
+ for pair in reversed(chat_history):
564
+ if len(pair) >= 2 and isinstance(pair[1], str) and pair[1].strip():
565
+ candidate_text = pair[1]
566
+ break
567
+ if candidate_text:
568
+ raw_lines = candidate_text.split('\n')
569
+ # Normalize lines: strip bullets and markdown symbols
570
+ norm_lines = []
571
+ for l in raw_lines:
572
+ s = l.strip()
573
+ if not s:
574
+ continue
575
+ s = s.lstrip("•*-\t ")
576
+ # Remove surrounding markdown emphasis from labels later
577
+ norm_lines.append(s)
578
+ data = []
579
+ for l in norm_lines:
580
+ # Accept patterns like "**LABEL**: 123" or "LABEL: 1,234"
581
+ m = re.match(r"^(.+?):\s*([0-9][0-9.,]*)$", l)
582
+ if m:
583
+ label = m.group(1).strip()
584
+ # Strip common markdown emphasis
585
+ label = re.sub(r"[*_`]+", "", label).strip()
586
+ try:
587
+ val = float(m.group(2).replace(',', ''))
588
+ except Exception:
589
+ continue
590
+ data.append({"label": label, "value": val})
591
+ if len(data) >= 2:
592
+ desired_type = 'pie' if any(k in q_lower for k in ["gráfico circular", "grafico circular", "pie", "pastel"]) else 'bar'
593
+ chart_fig = generate_chart(
594
+ data=data,
595
+ chart_type=desired_type,
596
+ x="label",
597
+ y="value",
598
+ title="Distribución"
599
+ )
600
+
601
  # Update the assistant's message with the response
602
  assistant_message["content"] = response_text
603