blackopsrepl commited on
Commit
2004c79
Β·
1 Parent(s): 1c4ca89

feat!: add constraint analyzer service and refactor all systems

Browse files
src/app.py CHANGED
@@ -13,7 +13,7 @@ if not os.getenv("NEBIUS_API_KEY") or not os.getenv("NEBIUS_MODEL"):
13
  load_secrets("tests/secrets/creds.py")
14
 
15
 
16
- from handlers import (
17
  load_data,
18
  show_solved,
19
  start_timer,
@@ -21,7 +21,7 @@ from handlers import (
21
  show_mock_project_content,
22
  )
23
 
24
- from mcp_handlers import process_message_and_attached_file
25
 
26
  from services import MockProjectService
27
 
 
13
  load_secrets("tests/secrets/creds.py")
14
 
15
 
16
+ from handlers.web_backend import (
17
  load_data,
18
  show_solved,
19
  start_timer,
 
21
  show_mock_project_content,
22
  )
23
 
24
+ from handlers.mcp_backend import process_message_and_attached_file
25
 
26
  from services import MockProjectService
27
 
src/constraint_solvers/timetable/__init__.py CHANGED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Timetable constraint solver module.
3
+
4
+ This module contains the domain models, constraints, and solver logic
5
+ for employee scheduling optimization.
6
+ """
7
+
8
+ try:
9
+ from .domain import (
10
+ Employee,
11
+ Task,
12
+ EmployeeSchedule,
13
+ ScheduleInfo,
14
+ )
15
+ from .solver import solver_manager, solution_manager
16
+ from .constraints import (
17
+ get_slot_overlap,
18
+ get_slot_date,
19
+ tasks_violate_sequence_order,
20
+ define_constraints,
21
+ )
22
+
23
+ _TIMEFOLD_AVAILABLE = True
24
+ except ImportError as e:
25
+ # Handle missing timefold dependency gracefully
26
+ _TIMEFOLD_AVAILABLE = False
27
+ Employee = None
28
+ Task = None
29
+ EmployeeSchedule = None
30
+ ScheduleInfo = None
31
+ solver_manager = None
32
+ solution_manager = None
33
+ get_slot_overlap = None
34
+ get_slot_date = None
35
+ tasks_violate_sequence_order = None
36
+ define_constraints = None
37
+
38
+ __all__ = [
39
+ # Domain models
40
+ "Employee",
41
+ "Task",
42
+ "EmployeeSchedule",
43
+ "ScheduleInfo",
44
+ # Solver managers
45
+ "solver_manager",
46
+ "solution_manager",
47
+ # Constraint functions
48
+ "get_slot_overlap",
49
+ "get_slot_date",
50
+ "tasks_violate_sequence_order",
51
+ "define_constraints",
52
+ ]
src/constraint_solvers/timetable/analysis/__init__.py DELETED
@@ -1,9 +0,0 @@
1
- """
2
- Constraint violation analysis module.
3
-
4
- This module provides tools for analyzing constraint violations in Timefold solver results.
5
- """
6
-
7
- from .violation_analyzer import ConstraintViolationAnalyzer
8
-
9
- __all__ = ["ConstraintViolationAnalyzer"]
 
 
 
 
 
 
 
 
 
 
src/constraint_solvers/timetable/analysis/violation_analyzer.py DELETED
@@ -1,185 +0,0 @@
1
- from typing import Dict, List, Set
2
- from ..domain import EmployeeSchedule, Task, Employee
3
-
4
-
5
- class ConstraintViolationAnalyzer:
6
- """
7
- Service for analyzing constraint violations in scheduling solutions.
8
-
9
- This service implements automatic detection of infeasible scheduling problems.
10
- When the Timefold solver cannot satisfy all hard constraints, it returns a
11
- solution with a negative hard score. This service analyzes such solutions to
12
- provide users with specific, actionable feedback about why their scheduling
13
- problem cannot be solved.
14
- """
15
-
16
- @staticmethod
17
- def analyze_constraint_violations(schedule: EmployeeSchedule) -> str:
18
- """
19
- Analyze constraint violations in a schedule and provide detailed feedback.
20
-
21
- Args:
22
- schedule: The schedule to analyze
23
-
24
- Returns:
25
- Detailed string describing constraint violations and suggestions
26
- """
27
- if not schedule.score or schedule.score.hard_score >= 0:
28
- return "No constraint violations detected."
29
-
30
- violations = []
31
-
32
- # Check for missing skills
33
- skill_violations = ConstraintViolationAnalyzer._check_skill_violations(schedule)
34
- if skill_violations:
35
- violations.extend(skill_violations)
36
-
37
- # Check for insufficient time
38
- time_violations = ConstraintViolationAnalyzer._check_time_violations(schedule)
39
- if time_violations:
40
- violations.extend(time_violations)
41
-
42
- # Check for availability conflicts
43
- availability_violations = (
44
- ConstraintViolationAnalyzer._check_availability_violations(schedule)
45
- )
46
- if availability_violations:
47
- violations.extend(availability_violations)
48
-
49
- # Check for sequencing issues
50
- sequence_violations = ConstraintViolationAnalyzer._check_sequence_violations(
51
- schedule
52
- )
53
- if sequence_violations:
54
- violations.extend(sequence_violations)
55
-
56
- if not violations:
57
- violations.append("Unknown constraint violations detected.")
58
-
59
- return "\n".join(violations)
60
-
61
- @staticmethod
62
- def _check_skill_violations(schedule: EmployeeSchedule) -> List[str]:
63
- """Check for tasks that require skills not available in the employee pool"""
64
- violations = []
65
-
66
- # Get all available skills
67
- available_skills: Set[str] = set()
68
- for employee in schedule.employees:
69
- available_skills.update(employee.skills)
70
-
71
- # Check for tasks requiring unavailable skills
72
- unassigned_tasks = [task for task in schedule.tasks if not task.employee]
73
- missing_skills: Set[str] = set()
74
-
75
- for task in unassigned_tasks:
76
- if task.required_skill not in available_skills:
77
- missing_skills.add(task.required_skill)
78
-
79
- if missing_skills:
80
- violations.append(
81
- f"β€’ Missing Skills: No employees have these required skills: {', '.join(sorted(missing_skills))}"
82
- )
83
-
84
- return violations
85
-
86
- @staticmethod
87
- def _check_time_violations(schedule: EmployeeSchedule) -> List[str]:
88
- """Check for insufficient time to complete all tasks"""
89
- violations = []
90
-
91
- total_task_slots = sum(task.duration_slots for task in schedule.tasks)
92
- total_available_slots = (
93
- len(schedule.employees) * schedule.schedule_info.total_slots
94
- )
95
-
96
- if total_task_slots > total_available_slots:
97
- total_task_hours = total_task_slots / 2 # Convert slots to hours
98
- total_available_hours = total_available_slots / 2
99
- violations.append(
100
- f"β€’ Insufficient Time: Tasks require {total_task_hours:.1f} hours total, "
101
- f"but only {total_available_hours:.1f} hours available across all employees"
102
- )
103
-
104
- return violations
105
-
106
- @staticmethod
107
- def _check_availability_violations(schedule: EmployeeSchedule) -> List[str]:
108
- """Check for tasks scheduled during employee unavailable periods"""
109
- violations = []
110
-
111
- for task in schedule.tasks:
112
- if task.employee and hasattr(task.employee, "unavailable_dates"):
113
- # This would need actual date calculation based on start_slot
114
- # For now, we'll just note if there are unassigned tasks with availability constraints
115
- pass
116
-
117
- unassigned_count = len([task for task in schedule.tasks if not task.employee])
118
- if unassigned_count > 0:
119
- violations.append(
120
- f"β€’ Unassigned Tasks: {unassigned_count} task(s) could not be assigned to any employee"
121
- )
122
-
123
- return violations
124
-
125
- @staticmethod
126
- def _check_sequence_violations(schedule: EmployeeSchedule) -> List[str]:
127
- """Check for project sequencing constraint violations"""
128
- violations = []
129
-
130
- # Group tasks by project
131
- project_tasks: Dict[str, List[Task]] = {}
132
- for task in schedule.tasks:
133
- project_id = getattr(task, "project_id", "")
134
- if project_id:
135
- if project_id not in project_tasks:
136
- project_tasks[project_id] = []
137
- project_tasks[project_id].append(task)
138
-
139
- # Check sequencing within each project
140
- for project_id, tasks in project_tasks.items():
141
- if len(tasks) > 1:
142
- # Sort by sequence number
143
- sorted_tasks = sorted(
144
- tasks, key=lambda t: getattr(t, "sequence_number", 0)
145
- )
146
-
147
- # Check if tasks are assigned and properly sequenced
148
- for i in range(len(sorted_tasks) - 1):
149
- current_task = sorted_tasks[i]
150
- next_task = sorted_tasks[i + 1]
151
-
152
- if not current_task.employee or not next_task.employee:
153
- continue # Skip unassigned tasks
154
-
155
- # Check if next task starts after current task ends
156
- if next_task.start_slot < (
157
- current_task.start_slot + current_task.duration_slots
158
- ):
159
- violations.append(
160
- f"β€’ Sequence Violation: In project '{project_id}', task sequence is violated"
161
- )
162
- break
163
-
164
- return violations
165
-
166
- @staticmethod
167
- def generate_suggestions(schedule: EmployeeSchedule) -> List[str]:
168
- """Generate actionable suggestions for fixing constraint violations"""
169
- suggestions = []
170
-
171
- if not schedule.score or schedule.score.hard_score >= 0:
172
- return suggestions
173
-
174
- # Basic suggestions based on common issues
175
- suggestions.extend(
176
- [
177
- "Add more employees with required skills",
178
- "Increase the scheduling time window (more days)",
179
- "Reduce task requirements or durations",
180
- "Check employee availability constraints",
181
- "Review project sequencing requirements",
182
- ]
183
- )
184
-
185
- return suggestions
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
src/constraint_solvers/timetable/domain.py CHANGED
@@ -1,7 +1,8 @@
1
  from timefold.solver import SolverStatus
2
  from timefold.solver.domain import *
3
  from timefold.solver.score import HardSoftDecimalScore
4
- from datetime import datetime, date
 
5
  from typing import Annotated
6
  from dataclasses import dataclass, field
7
 
 
1
  from timefold.solver import SolverStatus
2
  from timefold.solver.domain import *
3
  from timefold.solver.score import HardSoftDecimalScore
4
+
5
+ from datetime import date
6
  from typing import Annotated
7
  from dataclasses import dataclass, field
8
 
src/constraint_solvers/timetable/solver.py CHANGED
@@ -2,8 +2,8 @@ from timefold.solver import SolverManager, SolverFactory, SolutionManager
2
  from timefold.solver.config import (
3
  SolverConfig,
4
  ScoreDirectorFactoryConfig,
5
- TerminationConfig,
6
- Duration,
7
  )
8
 
9
  from .domain import *
 
2
  from timefold.solver.config import (
3
  SolverConfig,
4
  ScoreDirectorFactoryConfig,
5
+ # TerminationConfig,
6
+ # Duration,
7
  )
8
 
9
  from .domain import *
src/factory/__init__.py ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Factory module for data creation - both algorithmic and AI-powered.
3
+
4
+ This module contains all data creation, generation, and formatting logic
5
+ for the Yuga Planner scheduling system, organized into:
6
+ - data: Algorithmic data generation and formatting
7
+ - agents: AI-powered task composition
8
+ """
9
+
10
+ # Import from data submodule
11
+ from .data.formatters import schedule_to_dataframe, employees_to_dataframe
12
+ from .data.generators import (
13
+ generate_employees,
14
+ generate_employee_availability,
15
+ earliest_monday_on_or_after,
16
+ )
17
+ from .data.provider import generate_agent_data, generate_mcp_data
18
+
19
+ # Import from agents submodule
20
+ from .agents.task_composer_agent import TaskComposerAgent
21
+
22
+ __all__ = [
23
+ # Data formatters - convert domain objects to DataFrames
24
+ "schedule_to_dataframe",
25
+ "employees_to_dataframe",
26
+ # Data generators - create domain objects
27
+ "generate_employees",
28
+ "generate_employee_availability",
29
+ "earliest_monday_on_or_after",
30
+ # Data providers - orchestrate data creation
31
+ "generate_agent_data",
32
+ "generate_mcp_data",
33
+ # AI agents - intelligent task composition
34
+ "TaskComposerAgent",
35
+ ]
src/factory/agents/__init__.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Agents module for AI-powered data creation.
3
+
4
+ This module contains all AI-based task composition and data generation logic
5
+ for the Yuga Planner scheduling system.
6
+ """
7
+
8
+ from .task_composer_agent import TaskComposerAgent
9
+ from .task_processing import (
10
+ remove_markdown_code_blocks,
11
+ remove_markdown_list_elements,
12
+ unwrap_tasks_from_generated,
13
+ log_task_duration_breakdown,
14
+ log_total_time,
15
+ )
16
+
17
+ __all__ = [
18
+ # Main agent class
19
+ "TaskComposerAgent",
20
+ # Task processing utilities
21
+ "remove_markdown_code_blocks",
22
+ "remove_markdown_list_elements",
23
+ "unwrap_tasks_from_generated",
24
+ "log_task_duration_breakdown",
25
+ "log_total_time",
26
+ ]
src/{agents β†’ factory/agents}/task_composer_agent.py RENAMED
@@ -1,4 +1,4 @@
1
- import os, asyncio
2
  from typing import Optional, List
3
 
4
  from llama_index.llms.nebius import NebiusLLM
@@ -12,7 +12,7 @@ from llama_index.core.workflow import (
12
  )
13
 
14
  from utils.markdown_analyzer import MarkdownAnalyzer
15
- from agents.task_processing import (
16
  remove_markdown_code_blocks,
17
  remove_markdown_list_elements,
18
  unwrap_tasks_from_generated,
@@ -89,6 +89,34 @@ class TaskComposerAgent:
89
  input=query, skills=skills or [], context=context
90
  )
91
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
92
 
93
  class TaskSplitter(Event):
94
  task_splitter_output: str
@@ -200,34 +228,14 @@ class TaskComposerWorkflow(Workflow):
200
  async def evaluate_tasks_dependencies(
201
  self, event: TaskEvaluator
202
  ) -> TaskDependencyMatcher:
203
- logger.info("=== Step 3: Task Dependencies ===")
204
- logger.info("Matching tasks with available skills")
205
-
206
- # Get skills and context from the event
207
- skills = event.skills
208
- context = event.context
209
-
210
- if not skills:
211
- logger.warning("No skills provided, skipping dependency matching")
212
- # Convert to dependency format with empty skill
213
- task_dependencies = [
214
- (task, duration, "") for task, duration in event.task_evaluator_output
215
- ]
216
- return TaskDependencyMatcher(task_dependency_output=task_dependencies)
217
-
218
- skills_str = "\n".join([f"- {skill}" for skill in skills])
219
- logger.info(f"Available skills: {skills}")
220
- logger.info(f"Context: {context}")
221
-
222
- task_dependencies: list[tuple[str, str, str]] = []
223
- logger.info(
224
- f"Processing {len(event.task_evaluator_output)} tasks for skill matching..."
225
- )
226
 
 
227
  for i, (task, duration) in enumerate(event.task_evaluator_output, 1):
228
  try:
229
  formatted_prompt: str = self._task_deps_matcher_template.format(
230
- task=task, skills=skills_str, context=context
231
  )
232
 
233
  response = await asyncio.wait_for(
@@ -236,36 +244,35 @@ class TaskComposerWorkflow(Workflow):
236
  )
237
 
238
  matched_skill = response.text.strip()
239
- task_dependencies.append((task, duration, matched_skill))
240
  logger.info(
241
- f"Completed skill matching {i}/{len(event.task_evaluator_output)}: {task[:50]}... -> {matched_skill}"
242
  )
243
 
244
  except asyncio.TimeoutError:
245
  logger.warning(f"Skill matching timeout for task {i}: {task[:50]}...")
246
 
247
- # Use first available skill as fallback
248
- fallback_skill = skills[0] if skills else ""
249
- task_dependencies.append((task, duration, fallback_skill))
250
 
251
  except Exception as e:
252
  logger.error(f"Error matching skill for task {i}: {e}")
253
 
254
- # Use first available skill as fallback
255
- fallback_skill = skills[0] if skills else ""
256
- task_dependencies.append((task, duration, fallback_skill))
 
 
257
 
258
- return TaskDependencyMatcher(task_dependency_output=task_dependencies)
259
 
260
  @step
261
  async def result_output(self, event: TaskDependencyMatcher) -> StopEvent:
262
- logger.info("=== Step 4: Final Result ===")
 
263
 
264
- # Log the final breakdown with dependencies
265
  for task, duration, skill in event.task_dependency_output:
266
- logger.info(f"Task: {task}")
267
- logger.info(f" Duration: {duration} units")
268
- logger.info(f" Matched Skill: {skill}")
269
- logger.info("-" * 50)
270
 
271
  return StopEvent(result=event.task_dependency_output)
 
1
+ import asyncio
2
  from typing import Optional, List
3
 
4
  from llama_index.llms.nebius import NebiusLLM
 
12
  )
13
 
14
  from utils.markdown_analyzer import MarkdownAnalyzer
15
+ from factory.agents.task_processing import (
16
  remove_markdown_code_blocks,
17
  remove_markdown_list_elements,
18
  unwrap_tasks_from_generated,
 
89
  input=query, skills=skills or [], context=context
90
  )
91
 
92
+ async def compose_tasks(self, input_text: str, parameters) -> List:
93
+ """
94
+ Compose tasks from input text using the task composer workflow.
95
+
96
+ Args:
97
+ input_text: The input text to compose tasks from
98
+ parameters: TimeTableDataParameters containing skill information
99
+
100
+ Returns:
101
+ List of task tuples (description, duration, skill)
102
+ """
103
+ try:
104
+ # Extract skills from parameters
105
+ skills = list(parameters.skill_set.required_skills) + list(
106
+ parameters.skill_set.optional_skills
107
+ )
108
+
109
+ # Run the workflow
110
+ result = await self.run_workflow(input_text, skills=skills, context="")
111
+
112
+ # The workflow returns a list of tuples (description, duration, skill)
113
+ logger.debug(f"Task composer workflow result: {result}")
114
+ return result
115
+
116
+ except Exception as e:
117
+ logger.error(f"Error in compose_tasks: {e}")
118
+ return []
119
+
120
 
121
  class TaskSplitter(Event):
122
  task_splitter_output: str
 
228
  async def evaluate_tasks_dependencies(
229
  self, event: TaskEvaluator
230
  ) -> TaskDependencyMatcher:
231
+ logger.info("=== Step 3: Skill Matching ===")
232
+ logger.info("Matching tasks with skills...")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
233
 
234
+ final_tasks: list[tuple[str, str, str]] = []
235
  for i, (task, duration) in enumerate(event.task_evaluator_output, 1):
236
  try:
237
  formatted_prompt: str = self._task_deps_matcher_template.format(
238
+ task=task, skills=", ".join(event.skills), context=event.context
239
  )
240
 
241
  response = await asyncio.wait_for(
 
244
  )
245
 
246
  matched_skill = response.text.strip()
247
+ final_tasks.append((task, duration, matched_skill))
248
  logger.info(
249
+ f"Completed skill matching {i}/{len(event.task_evaluator_output)}"
250
  )
251
 
252
  except asyncio.TimeoutError:
253
  logger.warning(f"Skill matching timeout for task {i}: {task[:50]}...")
254
 
255
+ # Use a default skill
256
+ default_skill = event.skills[0] if event.skills else "General"
257
+ final_tasks.append((task, duration, default_skill))
258
 
259
  except Exception as e:
260
  logger.error(f"Error matching skill for task {i}: {e}")
261
 
262
+ # Use a default skill
263
+ default_skill = event.skills[0] if event.skills else "General"
264
+ final_tasks.append((task, duration, default_skill))
265
+
266
+ logger.info(f"Skill matching completed for {len(final_tasks)} tasks")
267
 
268
+ return TaskDependencyMatcher(task_dependency_output=final_tasks)
269
 
270
  @step
271
  async def result_output(self, event: TaskDependencyMatcher) -> StopEvent:
272
+ logger.info("=== Final Result ===")
273
+ logger.info(f"Generated {len(event.task_dependency_output)} tasks with skills")
274
 
 
275
  for task, duration, skill in event.task_dependency_output:
276
+ logger.info(f"- {task[:50]}... | Duration: {duration} | Skill: {skill}")
 
 
 
277
 
278
  return StopEvent(result=event.task_dependency_output)
src/{agents β†’ factory/agents}/task_processing.py RENAMED
@@ -1,5 +1,3 @@
1
- import re
2
-
3
  from utils.markdown_analyzer import MarkdownAnalyzer
4
  from utils.logging_config import setup_logging, get_logger
5
 
 
 
 
1
  from utils.markdown_analyzer import MarkdownAnalyzer
2
  from utils.logging_config import setup_logging, get_logger
3
 
src/factory/data/__init__.py ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Data module for data generation, transformation, and formatting.
3
+
4
+ This module contains all algorithmic data creation, generation, and formatting logic
5
+ for the Yuga Planner scheduling system.
6
+ """
7
+
8
+ from .formatters import schedule_to_dataframe, employees_to_dataframe
9
+ from .generators import (
10
+ generate_employees,
11
+ generate_employee_availability,
12
+ earliest_monday_on_or_after,
13
+ )
14
+ from .provider import generate_agent_data, generate_mcp_data
15
+
16
+ __all__ = [
17
+ # Data formatters - convert domain objects to DataFrames
18
+ "schedule_to_dataframe",
19
+ "employees_to_dataframe",
20
+ # Data generators - create domain objects
21
+ "generate_employees",
22
+ "generate_employee_availability",
23
+ "earliest_monday_on_or_after",
24
+ # Data providers - orchestrate data creation
25
+ "generate_agent_data",
26
+ "generate_mcp_data",
27
+ ]
src/{helpers.py β†’ factory/data/formatters.py} RENAMED
@@ -1,6 +1,8 @@
1
- from datetime import datetime, timedelta
2
  import pandas as pd
3
 
 
 
4
 
5
  def schedule_to_dataframe(schedule) -> pd.DataFrame:
6
  """
@@ -21,9 +23,6 @@ def schedule_to_dataframe(schedule) -> pd.DataFrame:
21
 
22
  # Calculate start and end times based on 30-minute slots
23
  # Schedule starts from next Monday at 8 AM
24
- from datetime import date
25
- from factory.data_generators import earliest_monday_on_or_after
26
-
27
  base_date = earliest_monday_on_or_after(date.today())
28
  base_datetime = datetime.combine(
29
  base_date, datetime.min.time().replace(hour=8)
 
1
+ from datetime import datetime, timedelta, date
2
  import pandas as pd
3
 
4
+ from factory.data.generators import earliest_monday_on_or_after
5
+
6
 
7
  def schedule_to_dataframe(schedule) -> pd.DataFrame:
8
  """
 
23
 
24
  # Calculate start and end times based on 30-minute slots
25
  # Schedule starts from next Monday at 8 AM
 
 
 
26
  base_date = earliest_monday_on_or_after(date.today())
27
  base_datetime = datetime.combine(
28
  base_date, datetime.min.time().replace(hour=8)
src/factory/{data_generators.py β†’ data/generators.py} RENAMED
@@ -1,8 +1,9 @@
1
  from datetime import date, timedelta
 
2
  from random import Random
3
  from itertools import product
4
 
5
- from factory.data_models import *
6
  from constraint_solvers.timetable.domain import *
7
 
8
 
@@ -200,6 +201,7 @@ def generate_tasks(
200
  required_skill=required_skill,
201
  )
202
  )
 
203
  return tasks
204
 
205
 
@@ -209,51 +211,35 @@ def generate_tasks_from_calendar(
209
  calendar_entries: list[dict],
210
  ) -> list[Task]:
211
  """
212
- Given a list of calendar entry dicts, generate Task objects with randomized required_skill.
213
- Output format matches generate_tasks.
214
  """
215
- from datetime import datetime
216
-
217
  tasks: list[Task] = []
218
  ids = generate_task_ids()
219
 
220
  for entry in calendar_entries:
221
- try:
222
- summary = entry.get("summary", "Event")
223
- dtstart = entry.get("dtstart", "").replace("Z", "+00:00")
224
- dtend = entry.get("dtend", "").replace("Z", "+00:00")
225
- start_dt = datetime.fromisoformat(dtstart) if dtstart else None
226
- end_dt = datetime.fromisoformat(dtend) if dtend else None
227
- if start_dt and end_dt:
228
- duration_minutes = int((end_dt - start_dt).total_seconds() // 60)
229
- duration_slots = max(1, duration_minutes // 30)
230
- else:
231
- duration_slots = 2 # Default 1 hour
232
-
233
- # Randomize required_skill as in generate_tasks
234
  if random.random() >= 0.5:
235
  required_skill = random.choice(parameters.skill_set.required_skills)
236
-
237
  else:
238
  required_skill = random.choice(parameters.skill_set.optional_skills)
239
 
240
- tasks.append(
241
- Task(
242
- id=next(ids),
243
- description=summary,
244
- duration_slots=duration_slots,
245
- start_slot=0, # This will be assigned by the solver
246
- required_skill=required_skill,
247
- )
248
  )
249
-
250
- except Exception:
251
- continue
252
 
253
  return tasks
254
 
255
 
256
  def generate_task_ids():
 
257
  current_id = 0
258
  while True:
259
  yield str(current_id)
@@ -279,18 +265,16 @@ def weights(distributions: tuple[CountDistribution, ...]) -> tuple[float, ...]:
279
 
280
  def earliest_monday_on_or_after(target_date: date) -> date:
281
  """
282
- Returns the date of the next Monday on or after the given date.
283
- If the date is already Monday, returns the same date.
284
  """
285
- days = (7 - target_date.weekday()) % 7
286
- return target_date + timedelta(days=days)
287
 
288
 
289
  def tasks_from_agent_output(agent_output, parameters, project_id: str = ""):
290
  """
291
  Convert task_composer_agent output (list of (description, duration, skill)) to Task objects.
292
  """
293
- from constraint_solvers.timetable.domain import Task
294
 
295
  ids = generate_task_ids()
296
  tasks = []
@@ -302,12 +286,13 @@ def tasks_from_agent_output(agent_output, parameters, project_id: str = ""):
302
  elif len(task_data) == 2:
303
  description, duration = task_data
304
  # Fallback to random assignment if no skill provided
305
- import random
 
306
 
307
- if random.random() >= 0.5:
308
- required_skill = random.choice(parameters.skill_set.required_skills)
309
  else:
310
- required_skill = random.choice(parameters.skill_set.optional_skills)
311
  else:
312
  continue # skip invalid task data
313
 
@@ -325,19 +310,20 @@ def tasks_from_agent_output(agent_output, parameters, project_id: str = ""):
325
  )
326
  if required_skill not in all_skills:
327
  # If skill doesn't match exactly, try to find closest match or fallback to random
328
- import random
329
 
330
- required_skill = random.choice(parameters.skill_set.required_skills)
331
 
332
  tasks.append(
333
  Task(
334
  id=next(ids),
335
  description=description,
336
  duration_slots=duration_int,
337
- start_slot=0,
338
  required_skill=required_skill,
339
  project_id=project_id,
340
  sequence_number=sequence_num,
341
  )
342
  )
 
343
  return tasks
 
1
  from datetime import date, timedelta
2
+ import random
3
  from random import Random
4
  from itertools import product
5
 
6
+ from factory.data.models import *
7
  from constraint_solvers.timetable.domain import *
8
 
9
 
 
201
  required_skill=required_skill,
202
  )
203
  )
204
+
205
  return tasks
206
 
207
 
 
211
  calendar_entries: list[dict],
212
  ) -> list[Task]:
213
  """
214
+ Generate Task objects from calendar entries with Skills.
 
215
  """
 
 
216
  tasks: list[Task] = []
217
  ids = generate_task_ids()
218
 
219
  for entry in calendar_entries:
220
+ # Get skill from entry or randomly assign
221
+ required_skill = entry.get("skill")
222
+ if not required_skill:
 
 
 
 
 
 
 
 
 
 
223
  if random.random() >= 0.5:
224
  required_skill = random.choice(parameters.skill_set.required_skills)
 
225
  else:
226
  required_skill = random.choice(parameters.skill_set.optional_skills)
227
 
228
+ tasks.append(
229
+ Task(
230
+ id=next(ids),
231
+ description=entry["summary"],
232
+ duration_slots=entry.get("duration_slots", 2), # Default 1 hour
233
+ start_slot=entry.get("start_slot", 0),
234
+ required_skill=required_skill,
 
235
  )
236
+ )
 
 
237
 
238
  return tasks
239
 
240
 
241
  def generate_task_ids():
242
+ """Generate sequential task IDs starting from 0."""
243
  current_id = 0
244
  while True:
245
  yield str(current_id)
 
265
 
266
  def earliest_monday_on_or_after(target_date: date) -> date:
267
  """
268
+ Returns the earliest Monday on or after the given date.
 
269
  """
270
+ days_until_monday = (7 - target_date.weekday()) % 7
271
+ return target_date + timedelta(days=days_until_monday)
272
 
273
 
274
  def tasks_from_agent_output(agent_output, parameters, project_id: str = ""):
275
  """
276
  Convert task_composer_agent output (list of (description, duration, skill)) to Task objects.
277
  """
 
278
 
279
  ids = generate_task_ids()
280
  tasks = []
 
286
  elif len(task_data) == 2:
287
  description, duration = task_data
288
  # Fallback to random assignment if no skill provided
289
+ # Use a new Random instance for compatibility
290
+ rng = random.Random()
291
 
292
+ if rng.random() >= 0.5:
293
+ required_skill = rng.choice(parameters.skill_set.required_skills)
294
  else:
295
+ required_skill = rng.choice(parameters.skill_set.optional_skills)
296
  else:
297
  continue # skip invalid task data
298
 
 
310
  )
311
  if required_skill not in all_skills:
312
  # If skill doesn't match exactly, try to find closest match or fallback to random
313
+ rng = random.Random()
314
 
315
+ required_skill = rng.choice(parameters.skill_set.required_skills)
316
 
317
  tasks.append(
318
  Task(
319
  id=next(ids),
320
  description=description,
321
  duration_slots=duration_int,
322
+ start_slot=0, # Will be assigned by solver
323
  required_skill=required_skill,
324
  project_id=project_id,
325
  sequence_number=sequence_num,
326
  )
327
  )
328
+
329
  return tasks
src/factory/{data_models.py β†’ data/models.py} RENAMED
File without changes
src/factory/{data_provider.py β†’ data/provider.py} RENAMED
@@ -1,18 +1,16 @@
1
  import os
2
  import pandas as pd
3
 
4
- pd.set_option("display.max_columns", None)
5
- from helpers import schedule_to_dataframe
6
-
7
  from datetime import date
8
  from random import Random
9
 
10
- from domain import AGENTS_CONFIG
 
11
 
12
- from factory.data_generators import *
13
- from factory.data_models import *
14
 
15
- from agents.task_composer_agent import TaskComposerAgent
16
 
17
  from constraint_solvers.timetable.domain import *
18
 
@@ -173,8 +171,6 @@ async def generate_mcp_data(
173
  # --- LLM TASKS ---
174
  llm_tasks = []
175
  if user_message:
176
- from factory.data_provider import run_task_composer_agent
177
-
178
  agent_output = await run_task_composer_agent(user_message, parameters)
179
  llm_tasks = tasks_from_agent_output(agent_output, parameters, "PROJECT")
180
  for t in llm_tasks:
@@ -274,32 +270,18 @@ async def generate_mcp_data(
274
  async def run_task_composer_agent(
275
  input_str: str, parameters: TimeTableDataParameters
276
  ) -> list:
277
- agent = TaskComposerAgent(AGENTS_CONFIG)
278
- available_skills = list(parameters.skill_set.required_skills) + list(
279
- parameters.skill_set.optional_skills
280
- )
281
- context = f"Project scheduling for {parameters.employee_count} employees over {parameters.days_in_schedule} days"
282
-
283
- logger.info(
284
- "Starting task composer workflow - timeout: %ds, input length: %d chars",
285
- AGENTS_CONFIG.workflow_timeout,
286
- len(input_str),
287
- )
288
-
289
- logger.debug("Available skills: %s", available_skills)
290
-
291
  try:
292
- agent_output = await agent.run_workflow(
293
- query=input_str, skills=available_skills, context=context
294
- )
295
 
296
- logger.info(
297
- "Task composer workflow completed successfully - generated %d tasks",
298
- len(agent_output),
299
- )
300
 
301
- return agent_output
 
302
 
303
  except Exception as e:
304
- logger.error("Task composer workflow failed: %s", e)
305
- raise
 
 
1
  import os
2
  import pandas as pd
3
 
 
 
 
4
  from datetime import date
5
  from random import Random
6
 
7
+ pd.set_option("display.max_columns", None)
8
+ from factory.data.formatters import schedule_to_dataframe
9
 
10
+ from factory.data.generators import *
11
+ from factory.data.models import *
12
 
13
+ from factory.agents.task_composer_agent import TaskComposerAgent
14
 
15
  from constraint_solvers.timetable.domain import *
16
 
 
171
  # --- LLM TASKS ---
172
  llm_tasks = []
173
  if user_message:
 
 
174
  agent_output = await run_task_composer_agent(user_message, parameters)
175
  llm_tasks = tasks_from_agent_output(agent_output, parameters, "PROJECT")
176
  for t in llm_tasks:
 
270
  async def run_task_composer_agent(
271
  input_str: str, parameters: TimeTableDataParameters
272
  ) -> list:
273
+ """Runs the task composition agent with the given input and parameters."""
 
 
 
 
 
 
 
 
 
 
 
 
 
274
  try:
275
+ # Initialize the agent
276
+ agent = TaskComposerAgent()
 
277
 
278
+ # Run the agent with the input
279
+ output = await agent.compose_tasks(input_str, parameters)
 
 
280
 
281
+ logger.debug("Agent output: %s", output)
282
+ return output
283
 
284
  except Exception as e:
285
+ logger.error("Error running task composer agent: %s", e)
286
+ # Return empty list on error
287
+ return []
src/handlers/__init__.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Handlers module for Yuga Planner.
3
+
4
+ This module contains handlers for web UI interactions and MCP API endpoints.
5
+ """
6
+
7
+ from .web_backend import (
8
+ load_data,
9
+ show_solved,
10
+ start_timer,
11
+ auto_poll,
12
+ show_mock_project_content,
13
+ )
14
+
15
+ from .mcp_backend import (
16
+ process_message_and_attached_file,
17
+ )
18
+
19
+ __all__ = [
20
+ "load_data",
21
+ "show_solved",
22
+ "start_timer",
23
+ "auto_poll",
24
+ "show_mock_project_content",
25
+ "process_message_and_attached_file",
26
+ ]
src/{mcp_handlers.py β†’ handlers/mcp_backend.py} RENAMED
@@ -32,9 +32,9 @@ import time
32
 
33
  from utils.extract_calendar import extract_ical_entries
34
 
35
- from factory.data_provider import generate_mcp_data
36
  from services import ScheduleService, StateService
37
- from helpers import schedule_to_dataframe
38
 
39
  from utils.logging_config import setup_logging, get_logger, is_debug_enabled
40
 
@@ -143,7 +143,7 @@ async def process_message_and_attached_file(file_path: str, message_body: str) -
143
  solved_schedule = StateService.get_solved_schedule(job_id)
144
 
145
  # Check if we have a valid solution
146
- if solved_schedule and solved_schedule.score is not None:
147
  processing_time = time.time() - start_time
148
  logger.info(
149
  "Schedule solved after %d polls! (Total time: %.2fs)",
@@ -151,35 +151,58 @@ async def process_message_and_attached_file(file_path: str, message_body: str) -
151
  processing_time,
152
  )
153
 
154
- # Convert to final dataframe
155
- final_df = schedule_to_dataframe(solved_schedule)
156
-
157
- # Generate status message
158
- status_message = ScheduleService.generate_status_message(
159
- solved_schedule
160
- )
161
-
162
- logger.info("Final Status: %s", status_message)
163
-
164
- # Return comprehensive JSON response
165
- return {
166
- "status": "success",
167
- "message": "Schedule solved successfully",
168
- "file_info": {
169
- "path": file_path,
170
- "calendar_entries_count": len(calendar_entries),
171
- },
172
- "calendar_entries": calendar_entries,
173
- "solution_status": status_message,
174
- "schedule": final_df.to_dict(
175
- orient="records"
176
- ), # Convert to list of dicts for JSON
177
- "job_id": job_id,
178
- "polls_required": poll_count + 1,
179
- "processing_time_seconds": processing_time,
180
- "timestamp": time.time(),
181
- "debug_mode": debug_mode,
182
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
183
 
184
  if debug_mode:
185
  logger.debug("Poll %d/%d: Still solving...", poll_count + 1, max_polls)
 
32
 
33
  from utils.extract_calendar import extract_ical_entries
34
 
35
+ from factory.data.provider import generate_mcp_data
36
  from services import ScheduleService, StateService
37
+ from factory.data.formatters import schedule_to_dataframe
38
 
39
  from utils.logging_config import setup_logging, get_logger, is_debug_enabled
40
 
 
143
  solved_schedule = StateService.get_solved_schedule(job_id)
144
 
145
  # Check if we have a valid solution
146
+ if solved_schedule is not None:
147
  processing_time = time.time() - start_time
148
  logger.info(
149
  "Schedule solved after %d polls! (Total time: %.2fs)",
 
151
  processing_time,
152
  )
153
 
154
+ try:
155
+ # Convert to final dataframe
156
+ final_df = schedule_to_dataframe(solved_schedule)
157
+
158
+ # Generate status message
159
+ status_message = ScheduleService.generate_status_message(
160
+ solved_schedule
161
+ )
162
+
163
+ logger.info("Final Status: %s", status_message)
164
+
165
+ # Return comprehensive JSON response
166
+ response_data = {
167
+ "status": "success",
168
+ "message": "Schedule solved successfully",
169
+ "file_info": {
170
+ "path": file_path,
171
+ "calendar_entries_count": len(calendar_entries),
172
+ },
173
+ "calendar_entries": calendar_entries,
174
+ "solution_status": status_message,
175
+ "schedule": final_df.to_dict(
176
+ orient="records"
177
+ ), # Convert to list of dicts for JSON
178
+ "job_id": job_id,
179
+ "polls_required": poll_count + 1,
180
+ "processing_time_seconds": processing_time,
181
+ "timestamp": time.time(),
182
+ "debug_mode": debug_mode,
183
+ }
184
+
185
+ logger.debug(
186
+ "Returning JSON response with %d schedule entries",
187
+ len(response_data["schedule"]),
188
+ )
189
+ return response_data
190
+
191
+ except Exception as e:
192
+ logger.error(
193
+ "Error converting schedule to JSON: %s",
194
+ e,
195
+ exc_info=debug_mode,
196
+ )
197
+ # Return error response instead of raising
198
+ return {
199
+ "error": f"Error converting schedule to JSON: {str(e)}",
200
+ "status": "conversion_failed",
201
+ "job_id": job_id,
202
+ "processing_time_seconds": processing_time,
203
+ "timestamp": time.time(),
204
+ "debug_mode": debug_mode,
205
+ }
206
 
207
  if debug_mode:
208
  logger.debug("Poll %d/%d: Still solving...", poll_count + 1, max_polls)
src/{handlers.py β†’ handlers/web_backend.py} RENAMED
@@ -1,9 +1,10 @@
1
  from typing import Tuple
 
2
 
3
  import pandas as pd
4
  import gradio as gr
5
 
6
- from utils.logging_config import setup_logging, get_logger, is_debug_enabled
7
 
8
  # Initialize logging
9
  setup_logging()
@@ -247,8 +248,6 @@ def _ensure_log_streaming_setup(debug: bool = False) -> None:
247
  """
248
  if debug:
249
  # Force debug mode setup if explicitly requested
250
- import os
251
-
252
  os.environ["YUGA_DEBUG"] = "true"
253
  setup_logging("DEBUG")
254
 
 
1
  from typing import Tuple
2
+ import os
3
 
4
  import pandas as pd
5
  import gradio as gr
6
 
7
+ from utils.logging_config import setup_logging, get_logger
8
 
9
  # Initialize logging
10
  setup_logging()
 
248
  """
249
  if debug:
250
  # Force debug mode setup if explicitly requested
 
 
251
  os.environ["YUGA_DEBUG"] = "true"
252
  setup_logging("DEBUG")
253
 
src/services/__init__.py CHANGED
@@ -4,11 +4,11 @@ Services module for Yuga Planner business logic.
4
  This module contains all the business logic separated from the UI handlers.
5
  """
6
 
7
- from .logging_service import LoggingService
8
- from .schedule_service import ScheduleService
9
- from .data_service import DataService
10
- from .mock_projects_service import MockProjectService
11
- from .state_service import StateService
12
 
13
  __all__ = [
14
  "LoggingService",
 
4
  This module contains all the business logic separated from the UI handlers.
5
  """
6
 
7
+ from .logging import LoggingService
8
+ from .schedule import ScheduleService
9
+ from .data import DataService
10
+ from .mock_projects import MockProjectService
11
+ from .state import StateService
12
 
13
  __all__ = [
14
  "LoggingService",
src/services/constraint_analyzer.py ADDED
@@ -0,0 +1,278 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Dict, List, Any
2
+ from constraint_solvers.timetable.domain import EmployeeSchedule
3
+ from constraint_solvers.timetable.solver import solution_manager
4
+
5
+
6
+ class ConstraintAnalyzerService:
7
+ """
8
+ Service for analyzing scheduling solutions using Timefold's native constraint analysis.
9
+
10
+ This service provides methods to analyze constraint violations, generate suggestions,
11
+ and understand solution quality using Timefold's built-in ScoreAnalysis and SolutionManager APIs.
12
+ """
13
+
14
+ @staticmethod
15
+ def analyze_constraint_violations(schedule: EmployeeSchedule) -> str:
16
+ """
17
+ Analyze constraint violations in a schedule using Timefold's native score analysis.
18
+
19
+ Args:
20
+ schedule: The schedule to analyze
21
+
22
+ Returns:
23
+ Detailed string describing constraint violations and their breakdown
24
+ """
25
+ if not schedule.score or schedule.score.hard_score >= 0:
26
+ return "No constraint violations detected."
27
+
28
+ # Get Timefold's solution manager and analyze the schedule
29
+ score_analysis = solution_manager.analyze(schedule)
30
+
31
+ # Return the built-in summary
32
+ return score_analysis.summary
33
+
34
+ @staticmethod
35
+ def get_detailed_analysis(schedule: EmployeeSchedule) -> Dict[str, Any]:
36
+ """
37
+ Get detailed constraint analysis as a structured dictionary.
38
+
39
+ Args:
40
+ schedule: The schedule to analyze
41
+
42
+ Returns:
43
+ Dictionary containing detailed constraint analysis information
44
+ """
45
+ score_analysis = solution_manager.analyze(schedule)
46
+
47
+ analysis_result = {
48
+ "total_score": str(score_analysis.score),
49
+ "hard_score": score_analysis.score.hard_score,
50
+ "soft_score": score_analysis.score.soft_score,
51
+ "constraints": {},
52
+ }
53
+
54
+ # Analyze each constraint
55
+ for (
56
+ constraint_ref,
57
+ constraint_analysis,
58
+ ) in score_analysis.constraint_map.items():
59
+ constraint_id = constraint_ref.constraint_id
60
+ constraint_info = {
61
+ "score": str(constraint_analysis.score),
62
+ "match_count": constraint_analysis.match_count,
63
+ "matches": [],
64
+ }
65
+
66
+ # Get details for each constraint match
67
+ for match_analysis in constraint_analysis.matches:
68
+ match_info = {
69
+ "score": str(match_analysis.score),
70
+ "justification": str(match_analysis.justification)
71
+ if match_analysis.justification
72
+ else None,
73
+ }
74
+ constraint_info["matches"].append(match_info)
75
+
76
+ analysis_result["constraints"][constraint_id] = constraint_info
77
+
78
+ return analysis_result
79
+
80
+ @staticmethod
81
+ def get_broken_constraints(schedule: EmployeeSchedule) -> List[Dict[str, Any]]:
82
+ """
83
+ Get a list of broken constraints with their details.
84
+
85
+ Args:
86
+ schedule: The schedule to analyze
87
+
88
+ Returns:
89
+ List of dictionaries, each containing information about a broken constraint
90
+ """
91
+ score_analysis = solution_manager.analyze(schedule)
92
+ broken_constraints = []
93
+
94
+ for (
95
+ constraint_ref,
96
+ constraint_analysis,
97
+ ) in score_analysis.constraint_map.items():
98
+ # Only include constraints that have a negative impact on the score
99
+ if (
100
+ constraint_analysis.score.hard_score < 0
101
+ or constraint_analysis.score.soft_score < 0
102
+ ):
103
+
104
+ broken_constraints.append(
105
+ {
106
+ "constraint_id": constraint_ref.constraint_id,
107
+ "score": str(constraint_analysis.score),
108
+ "hard_score": constraint_analysis.score.hard_score,
109
+ "soft_score": constraint_analysis.score.soft_score,
110
+ "match_count": constraint_analysis.match_count,
111
+ "constraint_name": constraint_ref.constraint_name,
112
+ }
113
+ )
114
+
115
+ return broken_constraints
116
+
117
+ @staticmethod
118
+ def compare_solutions(
119
+ old_schedule: EmployeeSchedule, new_schedule: EmployeeSchedule
120
+ ) -> Dict[str, Any]:
121
+ """
122
+ Compare two solutions and identify what changed between them.
123
+
124
+ Args:
125
+ old_schedule: The previous schedule solution
126
+ new_schedule: The new schedule solution
127
+
128
+ Returns:
129
+ Dictionary containing the differences between the two solutions
130
+ """
131
+ old_analysis = solution_manager.analyze(old_schedule)
132
+ new_analysis = solution_manager.analyze(new_schedule)
133
+
134
+ # Calculate the difference
135
+ diff = old_analysis - new_analysis
136
+
137
+ comparison_result = {
138
+ "old_score": str(old_analysis.score),
139
+ "new_score": str(new_analysis.score),
140
+ "score_difference": str(diff.score),
141
+ "improved": (
142
+ new_analysis.score.hard_score > old_analysis.score.hard_score
143
+ or (
144
+ new_analysis.score.hard_score == old_analysis.score.hard_score
145
+ and new_analysis.score.soft_score > old_analysis.score.soft_score
146
+ )
147
+ ),
148
+ "changed_constraints": {},
149
+ }
150
+
151
+ # Analyze changes in constraints
152
+ for constraint_ref, constraint_analysis in diff.constraint_map.items():
153
+ comparison_result["changed_constraints"][constraint_ref.constraint_id] = {
154
+ "score_difference": str(constraint_analysis.score),
155
+ "match_count": constraint_analysis.match_count,
156
+ "changes": [
157
+ str(match.justification)
158
+ for match in constraint_analysis.matches
159
+ if match.justification
160
+ ],
161
+ }
162
+
163
+ return comparison_result
164
+
165
+ @staticmethod
166
+ def get_heat_map_data(schedule: EmployeeSchedule) -> Dict[Any, Dict[str, Any]]:
167
+ """
168
+ Get heat map data showing which planning entities have the most constraint violations.
169
+
170
+ Args:
171
+ schedule: The schedule to analyze
172
+
173
+ Returns:
174
+ Dictionary mapping planning entities to their constraint impact
175
+ """
176
+ score_explanation = solution_manager.explain(schedule)
177
+ indictment_map = score_explanation.indictment_map
178
+
179
+ heat_map_data = {}
180
+
181
+ # Process indictments for tasks
182
+ for task in schedule.tasks:
183
+ indictment = indictment_map.get(task)
184
+
185
+ if indictment is not None:
186
+ heat_map_data[task] = {
187
+ "total_score": str(indictment.score),
188
+ "hard_score": indictment.score.hard_score,
189
+ "soft_score": indictment.score.soft_score,
190
+ "constraint_matches": [
191
+ {
192
+ "constraint_name": match.constraint_name,
193
+ "score": str(match.score),
194
+ }
195
+ for match in indictment.constraint_match_set
196
+ ],
197
+ }
198
+
199
+ # Process indictments for employees
200
+ for employee in schedule.employees:
201
+ indictment = indictment_map.get(employee)
202
+
203
+ if indictment is not None:
204
+ heat_map_data[employee] = {
205
+ "total_score": str(indictment.score),
206
+ "hard_score": indictment.score.hard_score,
207
+ "soft_score": indictment.score.soft_score,
208
+ "constraint_matches": [
209
+ {
210
+ "constraint_name": match.constraint_name,
211
+ "score": str(match.score),
212
+ }
213
+ for match in indictment.constraint_match_set
214
+ ],
215
+ }
216
+
217
+ return heat_map_data
218
+
219
+ @staticmethod
220
+ def generate_improvement_suggestions(schedule: EmployeeSchedule) -> List[str]:
221
+ """
222
+ Generate improvement suggestions based on constraint analysis.
223
+
224
+ Args:
225
+ schedule: The schedule to analyze
226
+
227
+ Returns:
228
+ List of actionable suggestions for improving the schedule
229
+ """
230
+ if not schedule.score or schedule.score.hard_score >= 0:
231
+ return [
232
+ "Schedule is feasible. Consider optimizing soft constraints for better quality."
233
+ ]
234
+
235
+ broken_constraints = ConstraintAnalyzerService.get_broken_constraints(schedule)
236
+ suggestions = []
237
+
238
+ # Generate suggestions based on broken constraint types
239
+ for constraint in broken_constraints:
240
+ constraint_id = constraint["constraint_id"].lower()
241
+
242
+ match constraint_id:
243
+ case constraint_id if "skill" in constraint_id:
244
+ suggestions.append(
245
+ f"Skill constraint violation: Consider adding employees with required skills "
246
+ f"or reassigning tasks ({constraint['match_count']} violations)"
247
+ )
248
+
249
+ case constraint_id if "availability" in constraint_id or "time" in constraint_id:
250
+ suggestions.append(
251
+ f"Time/Availability constraint violation: Check employee schedules and "
252
+ f"task timing ({constraint['match_count']} violations)"
253
+ )
254
+
255
+ case constraint_id if "sequence" in constraint_id or "order" in constraint_id:
256
+ suggestions.append(
257
+ f"Sequencing constraint violation: Review task dependencies and ordering "
258
+ f"({constraint['match_count']} violations)"
259
+ )
260
+
261
+ case constraint_id if "capacity" in constraint_id or "workload" in constraint_id:
262
+ suggestions.append(
263
+ f"Capacity constraint violation: Distribute workload more evenly or "
264
+ f"add more resources ({constraint['match_count']} violations)"
265
+ )
266
+
267
+ case _:
268
+ suggestions.append(
269
+ f"Constraint '{constraint['constraint_id']}' violated "
270
+ f"({constraint['match_count']} times) - review constraint definition"
271
+ )
272
+
273
+ if not suggestions:
274
+ suggestions.append(
275
+ "Hard constraints violated. Review constraint definitions and problem data."
276
+ )
277
+
278
+ return suggestions
src/services/{data_service.py β†’ data.py} RENAMED
@@ -5,7 +5,7 @@ from typing import Dict, List, Tuple, Union, Optional, Any
5
 
6
  import pandas as pd
7
 
8
- from factory.data_provider import (
9
  generate_agent_data,
10
  DATA_PARAMS,
11
  TimeTableDataParameters,
@@ -19,8 +19,8 @@ from constraint_solvers.timetable.domain import (
19
  Employee,
20
  )
21
 
22
- from helpers import schedule_to_dataframe, employees_to_dataframe
23
- from .mock_projects_service import MockProjectService
24
  from utils.logging_config import setup_logging, get_logger
25
 
26
  # Initialize logging
 
5
 
6
  import pandas as pd
7
 
8
+ from factory.data.provider import (
9
  generate_agent_data,
10
  DATA_PARAMS,
11
  TimeTableDataParameters,
 
19
  Employee,
20
  )
21
 
22
+ from factory.data.formatters import schedule_to_dataframe, employees_to_dataframe
23
+ from .mock_projects import MockProjectService
24
  from utils.logging_config import setup_logging, get_logger
25
 
26
  # Initialize logging
src/services/{logging_service.py β†’ logging.py} RENAMED
@@ -1,8 +1,8 @@
1
- import logging
2
- import threading
3
  from datetime import datetime
4
  from typing import List
5
 
 
 
6
  from utils.logging_config import setup_logging, get_logger, is_debug_enabled
7
 
8
  # Initialize logging
 
 
 
1
  from datetime import datetime
2
  from typing import List
3
 
4
+ import logging, threading
5
+
6
  from utils.logging_config import setup_logging, get_logger, is_debug_enabled
7
 
8
  # Initialize logging
src/services/{mock_projects_service.py β†’ mock_projects.py} RENAMED
File without changes
src/services/{schedule_service.py β†’ schedule.py} RENAMED
@@ -5,21 +5,27 @@ from typing import Tuple, Dict, Any, Optional
5
  import pandas as pd
6
  import gradio as gr
7
 
8
- from .state_service import StateService
9
  from constraint_solvers.timetable.solver import solver_manager
10
- from factory.data_provider import (
11
- generate_employees,
12
- generate_employee_availability,
13
  DATA_PARAMS,
14
  TimeTableDataParameters,
15
  SLOTS_PER_DAY,
16
  )
17
 
 
 
 
 
 
 
 
18
  from constraint_solvers.timetable.domain import EmployeeSchedule, ScheduleInfo
19
 
20
- from helpers import schedule_to_dataframe, employees_to_dataframe
21
- from .data_service import DataService
22
- from constraint_solvers.timetable.analysis import ConstraintViolationAnalyzer
23
  from utils.logging_config import setup_logging, get_logger
24
 
25
  # Initialize logging
@@ -333,16 +339,22 @@ class ScheduleService:
333
  if hard_score < 0:
334
  # Hard constraints are violated - the problem is infeasible
335
  violation_count = abs(int(hard_score))
 
336
  violation_details = (
337
- ConstraintViolationAnalyzer.analyze_constraint_violations(schedule)
338
  )
339
- suggestions = ConstraintViolationAnalyzer.generate_suggestions(schedule)
 
 
 
 
340
  suggestion_text = "\n".join(f"β€’ {s}" for s in suggestions)
341
 
342
  status_message = (
343
  f"⚠️ CONSTRAINTS VIOLATED: {violation_count} hard constraint(s) could not be satisfied. "
344
  f"The schedule is not feasible.\n\n{violation_details}\n\nSuggestions:\n{suggestion_text}"
345
  )
 
346
  logger.warning(
347
  f"Infeasible solution detected. Hard score: {hard_score}"
348
  )
 
5
  import pandas as pd
6
  import gradio as gr
7
 
8
+ from .state import StateService
9
  from constraint_solvers.timetable.solver import solver_manager
10
+
11
+ from factory.data.provider import (
 
12
  DATA_PARAMS,
13
  TimeTableDataParameters,
14
  SLOTS_PER_DAY,
15
  )
16
 
17
+ from factory.data.generators import (
18
+ generate_employees,
19
+ generate_employee_availability,
20
+ )
21
+
22
+ from factory.data.formatters import schedule_to_dataframe, employees_to_dataframe
23
+
24
  from constraint_solvers.timetable.domain import EmployeeSchedule, ScheduleInfo
25
 
26
+ from .data import DataService
27
+ from .constraint_analyzer import ConstraintAnalyzerService
28
+
29
  from utils.logging_config import setup_logging, get_logger
30
 
31
  # Initialize logging
 
339
  if hard_score < 0:
340
  # Hard constraints are violated - the problem is infeasible
341
  violation_count = abs(int(hard_score))
342
+
343
  violation_details = (
344
+ ConstraintAnalyzerService.analyze_constraint_violations(schedule)
345
  )
346
+
347
+ suggestions = (
348
+ ConstraintAnalyzerService.generate_improvement_suggestions(schedule)
349
+ )
350
+
351
  suggestion_text = "\n".join(f"β€’ {s}" for s in suggestions)
352
 
353
  status_message = (
354
  f"⚠️ CONSTRAINTS VIOLATED: {violation_count} hard constraint(s) could not be satisfied. "
355
  f"The schedule is not feasible.\n\n{violation_details}\n\nSuggestions:\n{suggestion_text}"
356
  )
357
+
358
  logger.warning(
359
  f"Infeasible solution detected. Hard score: {hard_score}"
360
  )
src/services/{state_service.py β†’ state.py} RENAMED
@@ -1,6 +1,9 @@
1
  from typing import Optional
 
2
  from state import app_state
 
3
  from constraint_solvers.timetable.domain import EmployeeSchedule
 
4
  from utils.logging_config import setup_logging, get_logger
5
 
6
  # Initialize logging
 
1
  from typing import Optional
2
+
3
  from state import app_state
4
+
5
  from constraint_solvers.timetable.domain import EmployeeSchedule
6
+
7
  from utils.logging_config import setup_logging, get_logger
8
 
9
  # Initialize logging
src/utils/__init__.py CHANGED
@@ -1,3 +1,24 @@
1
  """
2
- Utils package initialization.
 
 
 
3
  """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  """
2
+ Utils package for common utilities and helper functions.
3
+
4
+ This module contains logging configuration, secret loading, calendar extraction,
5
+ and markdown analysis utilities.
6
  """
7
+
8
+ from .logging_config import setup_logging, get_logger, is_debug_enabled
9
+ from .load_secrets import load_secrets
10
+ from .extract_calendar import extract_ical_entries
11
+ from .markdown_analyzer import MarkdownAnalyzer
12
+
13
+ __all__ = [
14
+ # Logging utilities
15
+ "setup_logging",
16
+ "get_logger",
17
+ "is_debug_enabled",
18
+ # Configuration utilities
19
+ "load_secrets",
20
+ # Calendar utilities
21
+ "extract_ical_entries",
22
+ # Markdown utilities
23
+ "MarkdownAnalyzer",
24
+ ]
src/utils/load_secrets.py CHANGED
@@ -1,4 +1,5 @@
1
  import os
 
2
 
3
  from utils.logging_config import setup_logging, get_logger
4
 
@@ -19,8 +20,6 @@ def load_secrets(secrets_file: str):
19
  """
20
  try:
21
  # Import secrets from the specified file
22
- import importlib.util
23
-
24
  spec = importlib.util.spec_from_file_location("secrets", secrets_file)
25
  secrets = importlib.util.module_from_spec(spec)
26
  spec.loader.exec_module(secrets)
 
1
  import os
2
+ import importlib.util
3
 
4
  from utils.logging_config import setup_logging, get_logger
5
 
 
20
  """
21
  try:
22
  # Import secrets from the specified file
 
 
23
  spec = importlib.util.spec_from_file_location("secrets", secrets_file)
24
  secrets = importlib.util.module_from_spec(spec)
25
  spec.loader.exec_module(secrets)
tests/test_factory.py CHANGED
@@ -7,7 +7,7 @@ from icalendar import Calendar, vDDDTypes
7
  # Load environment variables for agent (if needed)
8
  load_secrets("tests/secrets/creds.py")
9
 
10
- import factory.data_provider as data_provider
11
  from src.utils.extract_calendar import extract_ical_entries
12
 
13
 
 
7
  # Load environment variables for agent (if needed)
8
  load_secrets("tests/secrets/creds.py")
9
 
10
+ import factory.data.provider as data_provider
11
  from src.utils.extract_calendar import extract_ical_entries
12
 
13