SpencerCPurdy commited on
Commit
f923db1
·
verified ·
1 Parent(s): 5291705

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +79 -96
app.py CHANGED
@@ -105,7 +105,7 @@ class Task:
105
  created_at (datetime): The timestamp when the task was created.
106
  completed_at (Optional[datetime]): The timestamp when the task was completed or failed.
107
  performance_metrics (Dict[str, float]): Metrics related to the task's performance, like execution time.
108
- metadata (Dict[str, Any]): A dictionary for storing arbitrary data related to the task, such as the suggested agent role.
109
  """
110
  id: str
111
  description: str
@@ -117,7 +117,7 @@ class Task:
117
  created_at: datetime = field(default_factory=datetime.now)
118
  completed_at: Optional[datetime] = None
119
  performance_metrics: Dict[str, float] = field(default_factory=dict)
120
- metadata: Dict[str, Any] = field(default_factory=dict)
121
 
122
 
123
  # ==============================================================================
@@ -129,8 +129,8 @@ class BaseAgent:
129
  An abstract base class for all AI agents in the system.
130
 
131
  This class provides the fundamental structure for agents, including task
132
- processing logic, memory management, and collaboration protocols. Each
133
- specialized agent extends this class to implement role-specific behaviors.
134
  """
135
  def __init__(self, name: str, role: AgentRole, llm: Optional[ChatOpenAI] = None):
136
  """
@@ -146,7 +146,6 @@ class BaseAgent:
146
  self.role = role
147
  self.llm = llm
148
  self.current_task: Optional[Task] = None
149
- self.completed_tasks: List[Task] = []
150
 
151
  async def process_task(self, task: Task) -> Task:
152
  """
@@ -175,7 +174,6 @@ class BaseAgent:
175
 
176
  task.result = result
177
  task.status = TaskStatus.COMPLETED
178
- task.completed_at = datetime.now()
179
  task.confidence = self._calculate_confidence(result)
180
 
181
  except Exception as e:
@@ -186,11 +184,9 @@ class BaseAgent:
186
 
187
  finally:
188
  # Record performance metrics regardless of success or failure
189
- if task.status != TaskStatus.IN_PROGRESS:
190
- task.completed_at = task.completed_at or datetime.now()
191
- execution_time = (task.completed_at - start_time).total_seconds()
192
- task.performance_metrics['execution_time'] = execution_time
193
- self.completed_tasks.append(task)
194
  self.current_task = None
195
 
196
  return task
@@ -250,8 +246,8 @@ class AnalystAgent(BaseAgent):
250
 
251
  async def _execute_task(self, task: Task) -> Any:
252
  prompt = ChatPromptTemplate.from_messages([
253
- SystemMessage(content="You are an expert analyst. Your role is to dissect information, identify underlying patterns, trends, and correlations, and present actionable insights. Your analysis must be logical and evidence-based."),
254
- HumanMessage(content=f"Please analyze the following information and provide a detailed breakdown: {task.description}")
255
  ])
256
  response = await self.llm.ainvoke(prompt.format_messages())
257
  return {"analysis": response.content, "patterns": "Identified key performance indicators and trends."}
@@ -263,8 +259,8 @@ class CriticAgent(BaseAgent):
263
 
264
  async def _execute_task(self, task: Task) -> Any:
265
  prompt = ChatPromptTemplate.from_messages([
266
- SystemMessage(content="You are a meticulous critic. Your function is to rigorously evaluate the provided information, identifying logical fallacies, biases, assumptions, and gaps. Provide constructive feedback for improvement."),
267
- HumanMessage(content=f"Please provide a critical evaluation of the following: {task.description}")
268
  ])
269
  response = await self.llm.ainvoke(prompt.format_messages())
270
  return {"evaluation": response.content, "strengths": "Identified robust arguments.", "weaknesses": "Flagged potential biases."}
@@ -276,8 +272,8 @@ class SynthesizerAgent(BaseAgent):
276
 
277
  async def _execute_task(self, task: Task) -> Any:
278
  prompt = ChatPromptTemplate.from_messages([
279
- SystemMessage(content="You are an expert synthesizer. Your task is to integrate disparate pieces of information, resolve contradictions, and formulate a single, coherent, and comprehensive narrative or strategic plan."),
280
- HumanMessage(content=f"Please synthesize the following inputs into a unified conclusion: {task.description}")
281
  ])
282
  response = await self.llm.ainvoke(prompt.format_messages())
283
  return {"synthesis": response.content, "recommendations": "Formulated final strategic recommendations."}
@@ -292,8 +288,8 @@ class CoordinatorAgent(BaseAgent):
292
  The central agent responsible for managing the entire workflow.
293
 
294
  The Coordinator decomposes the main problem, creates and assigns tasks,
295
- builds a dependency graph, and orchestrates the execution of the workflow
296
- by the specialized agents.
297
  """
298
  def __init__(self, name: str, llm: Optional[ChatOpenAI] = None):
299
  super().__init__(name, AgentRole.COORDINATOR, llm)
@@ -317,12 +313,11 @@ class CoordinatorAgent(BaseAgent):
317
  Returns:
318
  List[Task]: A list of Task objects ready for execution.
319
  """
320
- # A standard workflow template ensures consistency.
321
  tasks = [
322
- Task(id="task_1", description=f"Conduct foundational research on: {problem}", metadata={"suggested_role": "Researcher"}),
323
- Task(id="task_2", description=f"Analyze the research findings for patterns and insights related to: {problem}", metadata={"suggested_role": "Analyst"}),
324
- Task(id="task_3", description="Critically evaluate the research and analysis for quality, bias, and completeness.", metadata={"suggested_role": "Critic"}),
325
- Task(id="task_4", description="Synthesize all findings into a final report with actionable recommendations.", metadata={"suggested_role": "Synthesizer"})
326
  ]
327
  self._build_dependency_graph(tasks)
328
  return tasks
@@ -334,10 +329,8 @@ class CoordinatorAgent(BaseAgent):
334
 
335
  async def execute_workflow(self, tasks: List[Task]) -> Dict[str, Any]:
336
  """
337
- Executes a list of tasks according to their dependencies.
338
-
339
- This method uses a thread pool to execute tasks in parallel where possible,
340
- respecting the predefined dependency graph.
341
 
342
  Args:
343
  tasks (List[Task]): The list of tasks to execute.
@@ -349,6 +342,7 @@ class CoordinatorAgent(BaseAgent):
349
  self._update_workflow_graph_with_tasks(tasks)
350
 
351
  completed_task_ids = set()
 
352
  task_dict = {t.id: t for t in tasks}
353
 
354
  while len(completed_task_ids) < len(tasks):
@@ -356,45 +350,48 @@ class CoordinatorAgent(BaseAgent):
356
  t for t in tasks if t.status == TaskStatus.PENDING and all(dep in completed_task_ids for dep in t.dependencies)
357
  ]
358
  if not ready_tasks:
359
- # Break if no tasks are ready to run to prevent infinite loops
360
- failed_tasks = [t for t in tasks if t.status == TaskStatus.FAILED]
361
- if len(completed_task_ids) + len(failed_tasks) == len(tasks):
362
  break
363
- # If there are still pending tasks but none are ready, it indicates a dependency issue.
364
- logger.error("Workflow stalled: circular dependency or unresolved failed dependency.")
365
  break
366
 
 
 
 
 
 
 
 
 
367
  with ThreadPoolExecutor(max_workers=len(self.agents)) as executor:
368
- future_to_task = {}
369
- for task in ready_tasks:
370
- agent = self._select_agent_for_task(task)
371
- if agent:
372
- task.status = TaskStatus.IN_PROGRESS
373
- future = executor.submit(asyncio.run, agent.process_task(task))
374
- future_to_task[future] = task.id
375
 
376
  for future in as_completed(future_to_task):
377
- task_id = future_to_task[future]
378
- try:
379
- completed_task = future.result()
380
- task_dict[task_id] = completed_task
381
- if completed_task.status == TaskStatus.COMPLETED:
382
- completed_task_ids.add(task_id)
383
- # Update the graph with the final status
384
- self.workflow_graph.nodes[task_id]['status'] = completed_task.status.value
385
- except Exception as exc:
386
- logger.error(f"Task {task_id} generated an exception: {exc}")
387
- task_dict[task_id].status = TaskStatus.FAILED
388
- self.workflow_graph.nodes[task_id]['status'] = TaskStatus.FAILED.value
389
 
390
  final_tasks = list(task_dict.values())
391
  return self._compile_workflow_results(final_tasks, start_time)
392
 
393
  def _select_agent_for_task(self, task: Task) -> Optional[BaseAgent]:
394
  """Selects an available agent best suited for a given task."""
395
- suggested_role = task.metadata.get("suggested_role")
 
 
 
 
 
 
396
  for agent in self.agents.values():
397
- if agent.role.value == suggested_role:
398
  return agent
399
  return None
400
 
@@ -404,7 +401,6 @@ class CoordinatorAgent(BaseAgent):
404
  self.workflow_graph.add_node(task.id, task_description=task.description, status=task.status.value)
405
  for dep_id in task.dependencies:
406
  self.workflow_graph.add_edge(dep_id, task.id)
407
- # Link agent to the task it will perform
408
  agent = self._select_agent_for_task(task)
409
  if agent:
410
  self.workflow_graph.add_edge(agent.name, task.id)
@@ -445,7 +441,6 @@ class WorkflowVisualizer:
445
  if not G.nodes():
446
  return self._create_empty_figure("No workflow data available.")
447
 
448
- # Use a hierarchical layout
449
  pos = nx.spring_layout(G, k=0.9, iterations=50, seed=42)
450
 
451
  edge_x, edge_y = [], []
@@ -469,7 +464,7 @@ class WorkflowVisualizer:
469
  node_text.append(f"<b>{node}</b><br>{role}")
470
  node_colors.append(self.color_map.get(role, '#ccc'))
471
  node_sizes.append(35)
472
- else: # is task
473
  status = G.nodes[node].get('status', 'Pending')
474
  node_text.append(f"<b>{node}</b><br>Status: {status}")
475
  node_colors.append('#6c757d' if status == 'Pending' else '#28a745' if status == 'Completed' else '#dc3545')
@@ -486,13 +481,13 @@ class WorkflowVisualizer:
486
 
487
  def create_task_timeline(self, tasks: List[Task]) -> go.Figure:
488
  """Creates a Plotly timeline (Gantt chart) of task execution."""
489
- if not tasks or not any(t.created_at and t.completed_at for t in tasks):
490
  return self._create_empty_figure("No task execution data to display.")
491
 
492
- df_data = []
493
- for task in tasks:
494
- if task.created_at and task.completed_at:
495
- df_data.append(dict(Task=task.id, Start=task.created_at, Finish=task.completed_at, Agent=task.assigned_to or "Unassigned"))
496
 
497
  if not df_data:
498
  return self._create_empty_figure("No completed tasks with timing data.")
@@ -552,7 +547,7 @@ class ReportGenerator:
552
  str: A formatted HTML string representing the report.
553
  """
554
  tasks = workflow_result.get('tasks', [])
555
- synthesis_task = next((t for t in reversed(tasks) if t.status == TaskStatus.COMPLETED and t.metadata.get("suggested_role") == "Synthesizer"), None)
556
 
557
  report = f"""
558
  <div style="font-family: sans-serif; color: #333;">
@@ -568,6 +563,11 @@ class ReportGenerator:
568
  """
569
  return report
570
 
 
 
 
 
 
571
  def _generate_summary(self, result: Dict[str, Any]) -> str:
572
  """Generates the executive summary section of the report."""
573
  return f"""
@@ -579,10 +579,10 @@ class ReportGenerator:
579
 
580
  def _generate_recommendations(self, synth_task: Optional[Task]) -> str:
581
  """Generates the key recommendations section from the Synthesizer's output."""
582
- content = "<p>No synthesized recommendations were produced.</p>"
583
  if synth_task and isinstance(synth_task.result, dict):
584
  recommendations = synth_task.result.get('synthesis', 'No specific recommendations provided in the synthesis.')
585
- content = f"<p>{recommendations}</p>"
586
 
587
  return f"""
588
  <div style="margin-bottom: 20px;">
@@ -635,12 +635,7 @@ def create_gradio_interface():
635
  visualizer = WorkflowVisualizer()
636
  report_generator = ReportGenerator()
637
 
638
- # Use a dictionary for shared state to avoid global variables
639
- state = {
640
- "coordinator": None,
641
- "current_workflow": None,
642
- "current_problem": ""
643
- }
644
 
645
  def initialize_system(api_key: str, model: str, demo_mode: bool) -> str:
646
  """Initializes the coordinator and specialized agents."""
@@ -665,19 +660,18 @@ def create_gradio_interface():
665
 
666
  async def run_analysis(problem: str) -> Tuple[str, go.Figure, go.Figure, go.Figure, str]:
667
  """Runs the full analysis workflow for a given problem."""
668
- if not state["coordinator"]:
669
- return "Error: System not initialized. Please initialize first.", None, None, None, ""
 
 
670
  if not problem:
671
- return "Error: Problem statement cannot be empty.", None, None, None, ""
672
-
673
- state["current_problem"] = problem
674
 
675
  try:
676
- tasks = state["coordinator"].decompose_problem(problem)
677
- workflow = await state["coordinator"].execute_workflow(tasks)
678
- state["current_workflow"] = workflow
679
 
680
- # Generate outputs
681
  status_text = f"Analysis complete. Success Rate: {workflow['success_rate']:.0%}. Total Time: {workflow['execution_time']:.1f}s."
682
  graph_fig = visualizer.create_workflow_graph(workflow['workflow_graph'])
683
  timeline_fig = visualizer.create_task_timeline(workflow['tasks'])
@@ -686,10 +680,10 @@ def create_gradio_interface():
686
 
687
  return status_text, graph_fig, timeline_fig, perf_fig, report_html
688
  except Exception as e:
689
- logger.error(f"An error occurred during analysis: {e}")
690
- return f"An unexpected error occurred: {e}", None, None, None, ""
 
691
 
692
- # Define CSS for a professional look and feel
693
  custom_css = """
694
  .gradio-container { max-width: 1400px !important; margin: auto !important; }
695
  h1 { color: #003366; font-family: sans-serif; text-align: center; }
@@ -702,7 +696,6 @@ def create_gradio_interface():
702
 
703
  with gr.Row():
704
  with gr.Column(scale=1):
705
- # Configuration Panel
706
  with gr.Accordion("System Configuration", open=True):
707
  api_key_input = gr.Textbox(label="OpenAI API Key", type="password", info="Required for live mode.")
708
  model_select = gr.Dropdown(choices=["gpt-4", "gpt-4-turbo", "gpt-3.5-turbo"], value=Config.DEFAULT_MODEL, label="Language Model")
@@ -711,7 +704,6 @@ def create_gradio_interface():
711
  init_status = gr.Textbox(label="System Status", interactive=False)
712
 
713
  with gr.Column(scale=3):
714
- # Main Analysis Panel
715
  with gr.Group():
716
  problem_input = gr.Textbox(label="Problem Statement", placeholder="Enter a complex problem for the multi-agent system to analyze...", lines=3)
717
  analyze_button = gr.Button("Run Analysis", variant="primary")
@@ -728,17 +720,8 @@ def create_gradio_interface():
728
  with gr.TabItem("Generated Report"):
729
  report_output = gr.HTML()
730
 
731
- # Event Handlers
732
- init_button.click(
733
- fn=initialize_system,
734
- inputs=[api_key_input, model_select, demo_mode_checkbox],
735
- outputs=init_status
736
- )
737
- analyze_button.click(
738
- fn=lambda p: asyncio.run(run_analysis(p)),
739
- inputs=[problem_input],
740
- outputs=[analysis_status, workflow_graph, timeline_chart, performance_chart, report_output]
741
- )
742
 
743
  return interface
744
 
 
105
  created_at (datetime): The timestamp when the task was created.
106
  completed_at (Optional[datetime]): The timestamp when the task was completed or failed.
107
  performance_metrics (Dict[str, float]): Metrics related to the task's performance, like execution time.
108
+ context (str): A string containing the results of previous tasks to provide context for the current one.
109
  """
110
  id: str
111
  description: str
 
117
  created_at: datetime = field(default_factory=datetime.now)
118
  completed_at: Optional[datetime] = None
119
  performance_metrics: Dict[str, float] = field(default_factory=dict)
120
+ context: str = ""
121
 
122
 
123
  # ==============================================================================
 
129
  An abstract base class for all AI agents in the system.
130
 
131
  This class provides the fundamental structure for agents, including task
132
+ processing logic. Each specialized agent extends this class to implement
133
+ role-specific behaviors.
134
  """
135
  def __init__(self, name: str, role: AgentRole, llm: Optional[ChatOpenAI] = None):
136
  """
 
146
  self.role = role
147
  self.llm = llm
148
  self.current_task: Optional[Task] = None
 
149
 
150
  async def process_task(self, task: Task) -> Task:
151
  """
 
174
 
175
  task.result = result
176
  task.status = TaskStatus.COMPLETED
 
177
  task.confidence = self._calculate_confidence(result)
178
 
179
  except Exception as e:
 
184
 
185
  finally:
186
  # Record performance metrics regardless of success or failure
187
+ task.completed_at = datetime.now()
188
+ execution_time = (task.completed_at - start_time).total_seconds()
189
+ task.performance_metrics['execution_time'] = execution_time
 
 
190
  self.current_task = None
191
 
192
  return task
 
246
 
247
  async def _execute_task(self, task: Task) -> Any:
248
  prompt = ChatPromptTemplate.from_messages([
249
+ SystemMessage(content="You are an expert analyst. Your role is to dissect information, identify underlying patterns, trends, and correlations, and present actionable insights. Use the provided context as the basis for your analysis."),
250
+ HumanMessage(content=f"Based on the following context, please perform your analysis.\n\nCONTEXT:\n{task.context}\n\nTASK:\n{task.description}")
251
  ])
252
  response = await self.llm.ainvoke(prompt.format_messages())
253
  return {"analysis": response.content, "patterns": "Identified key performance indicators and trends."}
 
259
 
260
  async def _execute_task(self, task: Task) -> Any:
261
  prompt = ChatPromptTemplate.from_messages([
262
+ SystemMessage(content="You are a meticulous critic. Your function is to rigorously evaluate the provided information, identifying logical fallacies, biases, and gaps. Provide constructive feedback for improvement. Base your critique on the context provided."),
263
+ HumanMessage(content=f"Based on the following context, please perform your critique.\n\nCONTEXT:\n{task.context}\n\nTASK:\n{task.description}")
264
  ])
265
  response = await self.llm.ainvoke(prompt.format_messages())
266
  return {"evaluation": response.content, "strengths": "Identified robust arguments.", "weaknesses": "Flagged potential biases."}
 
272
 
273
  async def _execute_task(self, task: Task) -> Any:
274
  prompt = ChatPromptTemplate.from_messages([
275
+ SystemMessage(content="You are an expert synthesizer. Your task is to integrate all the provided information from the context (research, analysis, critique) into a single, coherent, and comprehensive final report with actionable recommendations."),
276
+ HumanMessage(content=f"Please synthesize the following inputs from the context into a unified conclusion.\n\nCONTEXT:\n{task.context}\n\nTASK:\n{task.description}")
277
  ])
278
  response = await self.llm.ainvoke(prompt.format_messages())
279
  return {"synthesis": response.content, "recommendations": "Formulated final strategic recommendations."}
 
288
  The central agent responsible for managing the entire workflow.
289
 
290
  The Coordinator decomposes the main problem, creates and assigns tasks,
291
+ builds a dependency graph, and orchestrates the execution of the workflow,
292
+ ensuring information flows correctly between agents.
293
  """
294
  def __init__(self, name: str, llm: Optional[ChatOpenAI] = None):
295
  super().__init__(name, AgentRole.COORDINATOR, llm)
 
313
  Returns:
314
  List[Task]: A list of Task objects ready for execution.
315
  """
 
316
  tasks = [
317
+ Task(id="task_1", description=f"Conduct foundational research on: {problem}"),
318
+ Task(id="task_2", description=f"Analyze the research findings for patterns and insights related to: {problem}"),
319
+ Task(id="task_3", description="Critically evaluate the research and analysis for quality, bias, and completeness."),
320
+ Task(id="task_4", description="Synthesize all findings into a final report with actionable recommendations.")
321
  ]
322
  self._build_dependency_graph(tasks)
323
  return tasks
 
329
 
330
  async def execute_workflow(self, tasks: List[Task]) -> Dict[str, Any]:
331
  """
332
+ Executes a list of tasks according to their dependencies, managing the
333
+ flow of information between them.
 
 
334
 
335
  Args:
336
  tasks (List[Task]): The list of tasks to execute.
 
342
  self._update_workflow_graph_with_tasks(tasks)
343
 
344
  completed_task_ids = set()
345
+ task_results = {}
346
  task_dict = {t.id: t for t in tasks}
347
 
348
  while len(completed_task_ids) < len(tasks):
 
350
  t for t in tasks if t.status == TaskStatus.PENDING and all(dep in completed_task_ids for dep in t.dependencies)
351
  ]
352
  if not ready_tasks:
353
+ if any(t.status == TaskStatus.FAILED for t in tasks):
354
+ logger.warning("Workflow halted due to a failed task.")
 
355
  break
356
+ logger.error("Workflow stalled: No ready tasks found. Check for circular dependencies.")
 
357
  break
358
 
359
+ # Prepare context for the ready tasks
360
+ for task in ready_tasks:
361
+ context_parts = []
362
+ for dep_id in task.dependencies:
363
+ if dep_id in task_results:
364
+ context_parts.append(f"--- Result of {dep_id} ---\n{json.dumps(task_results[dep_id], indent=2)}")
365
+ task.context = "\n\n".join(context_parts)
366
+
367
  with ThreadPoolExecutor(max_workers=len(self.agents)) as executor:
368
+ future_to_task = {executor.submit(asyncio.run, self._select_agent_for_task(t).process_task(t)): t for t in ready_tasks}
 
 
 
 
 
 
369
 
370
  for future in as_completed(future_to_task):
371
+ completed_task = future.result()
372
+ task_id = completed_task.id
373
+ task_dict[task_id] = completed_task
374
+
375
+ if completed_task.status == TaskStatus.COMPLETED:
376
+ completed_task_ids.add(task_id)
377
+ task_results[task_id] = completed_task.result
378
+
379
+ self.workflow_graph.nodes[task_id]['status'] = completed_task.status.value
 
 
 
380
 
381
  final_tasks = list(task_dict.values())
382
  return self._compile_workflow_results(final_tasks, start_time)
383
 
384
  def _select_agent_for_task(self, task: Task) -> Optional[BaseAgent]:
385
  """Selects an available agent best suited for a given task."""
386
+ role_map = {
387
+ "task_1": AgentRole.RESEARCHER,
388
+ "task_2": AgentRole.ANALYST,
389
+ "task_3": AgentRole.CRITIC,
390
+ "task_4": AgentRole.SYNTHESIZER,
391
+ }
392
+ target_role = role_map.get(task.id)
393
  for agent in self.agents.values():
394
+ if agent.role == target_role:
395
  return agent
396
  return None
397
 
 
401
  self.workflow_graph.add_node(task.id, task_description=task.description, status=task.status.value)
402
  for dep_id in task.dependencies:
403
  self.workflow_graph.add_edge(dep_id, task.id)
 
404
  agent = self._select_agent_for_task(task)
405
  if agent:
406
  self.workflow_graph.add_edge(agent.name, task.id)
 
441
  if not G.nodes():
442
  return self._create_empty_figure("No workflow data available.")
443
 
 
444
  pos = nx.spring_layout(G, k=0.9, iterations=50, seed=42)
445
 
446
  edge_x, edge_y = [], []
 
464
  node_text.append(f"<b>{node}</b><br>{role}")
465
  node_colors.append(self.color_map.get(role, '#ccc'))
466
  node_sizes.append(35)
467
+ else:
468
  status = G.nodes[node].get('status', 'Pending')
469
  node_text.append(f"<b>{node}</b><br>Status: {status}")
470
  node_colors.append('#6c757d' if status == 'Pending' else '#28a745' if status == 'Completed' else '#dc3545')
 
481
 
482
  def create_task_timeline(self, tasks: List[Task]) -> go.Figure:
483
  """Creates a Plotly timeline (Gantt chart) of task execution."""
484
+ if not any(t.created_at and t.completed_at for t in tasks):
485
  return self._create_empty_figure("No task execution data to display.")
486
 
487
+ df_data = [
488
+ dict(Task=task.id, Start=task.created_at, Finish=task.completed_at, Agent=task.assigned_to or "Unassigned")
489
+ for task in tasks if task.created_at and task.completed_at
490
+ ]
491
 
492
  if not df_data:
493
  return self._create_empty_figure("No completed tasks with timing data.")
 
547
  str: A formatted HTML string representing the report.
548
  """
549
  tasks = workflow_result.get('tasks', [])
550
+ synthesis_task = next((t for t in reversed(tasks) if t.status == TaskStatus.COMPLETED and self._get_task_role(t) == AgentRole.SYNTHESIZER), None)
551
 
552
  report = f"""
553
  <div style="font-family: sans-serif; color: #333;">
 
563
  """
564
  return report
565
 
566
+ def _get_task_role(self, task: Task) -> Optional[AgentRole]:
567
+ """Helper to determine the role associated with a task."""
568
+ role_map = { "task_1": AgentRole.RESEARCHER, "task_2": AgentRole.ANALYST, "task_3": AgentRole.CRITIC, "task_4": AgentRole.SYNTHESIZER }
569
+ return role_map.get(task.id)
570
+
571
  def _generate_summary(self, result: Dict[str, Any]) -> str:
572
  """Generates the executive summary section of the report."""
573
  return f"""
 
579
 
580
  def _generate_recommendations(self, synth_task: Optional[Task]) -> str:
581
  """Generates the key recommendations section from the Synthesizer's output."""
582
+ content = "<p>No synthesized recommendations were produced due to workflow incompletion or error.</p>"
583
  if synth_task and isinstance(synth_task.result, dict):
584
  recommendations = synth_task.result.get('synthesis', 'No specific recommendations provided in the synthesis.')
585
+ content = f"<div>{recommendations}</div>"
586
 
587
  return f"""
588
  <div style="margin-bottom: 20px;">
 
635
  visualizer = WorkflowVisualizer()
636
  report_generator = ReportGenerator()
637
 
638
+ state = { "coordinator": None }
 
 
 
 
 
639
 
640
  def initialize_system(api_key: str, model: str, demo_mode: bool) -> str:
641
  """Initializes the coordinator and specialized agents."""
 
660
 
661
  async def run_analysis(problem: str) -> Tuple[str, go.Figure, go.Figure, go.Figure, str]:
662
  """Runs the full analysis workflow for a given problem."""
663
+ coordinator = state.get("coordinator")
664
+ if not coordinator:
665
+ empty_fig = visualizer._create_empty_figure("System not initialized.")
666
+ return "Error: System not initialized.", empty_fig, empty_fig, empty_fig, ""
667
  if not problem:
668
+ empty_fig = visualizer._create_empty_figure("No problem statement provided.")
669
+ return "Error: Problem statement cannot be empty.", empty_fig, empty_fig, empty_fig, ""
 
670
 
671
  try:
672
+ tasks = coordinator.decompose_problem(problem)
673
+ workflow = await coordinator.execute_workflow(tasks)
 
674
 
 
675
  status_text = f"Analysis complete. Success Rate: {workflow['success_rate']:.0%}. Total Time: {workflow['execution_time']:.1f}s."
676
  graph_fig = visualizer.create_workflow_graph(workflow['workflow_graph'])
677
  timeline_fig = visualizer.create_task_timeline(workflow['tasks'])
 
680
 
681
  return status_text, graph_fig, timeline_fig, perf_fig, report_html
682
  except Exception as e:
683
+ logger.error(f"An error occurred during analysis: {e}", exc_info=True)
684
+ empty_fig = visualizer._create_empty_figure(f"An error occurred: {e}")
685
+ return f"An unexpected error occurred: {e}", empty_fig, empty_fig, empty_fig, ""
686
 
 
687
  custom_css = """
688
  .gradio-container { max-width: 1400px !important; margin: auto !important; }
689
  h1 { color: #003366; font-family: sans-serif; text-align: center; }
 
696
 
697
  with gr.Row():
698
  with gr.Column(scale=1):
 
699
  with gr.Accordion("System Configuration", open=True):
700
  api_key_input = gr.Textbox(label="OpenAI API Key", type="password", info="Required for live mode.")
701
  model_select = gr.Dropdown(choices=["gpt-4", "gpt-4-turbo", "gpt-3.5-turbo"], value=Config.DEFAULT_MODEL, label="Language Model")
 
704
  init_status = gr.Textbox(label="System Status", interactive=False)
705
 
706
  with gr.Column(scale=3):
 
707
  with gr.Group():
708
  problem_input = gr.Textbox(label="Problem Statement", placeholder="Enter a complex problem for the multi-agent system to analyze...", lines=3)
709
  analyze_button = gr.Button("Run Analysis", variant="primary")
 
720
  with gr.TabItem("Generated Report"):
721
  report_output = gr.HTML()
722
 
723
+ init_button.click(fn=initialize_system, inputs=[api_key_input, model_select, demo_mode_checkbox], outputs=init_status)
724
+ analyze_button.click(fn=lambda p: asyncio.run(run_analysis(p)), inputs=[problem_input], outputs=[analysis_status, workflow_graph, timeline_chart, performance_chart, report_output])
 
 
 
 
 
 
 
 
 
725
 
726
  return interface
727