brickfrog commited on
Commit
313f83b
·
verified ·
1 Parent(s): 56fd459

Upload folder using huggingface_hub

Browse files
ankigen_core/agents/__init__.py CHANGED
@@ -16,15 +16,13 @@ from .judges import (
16
  JudgeCoordinator,
17
  )
18
  from .enhancers import RevisionAgent, EnhancementAgent
19
- from .feature_flags import AgentFeatureFlags
20
- from .metrics import AgentMetrics
21
  from .config import AgentConfigManager
22
 
23
  __all__ = [
24
  "BaseAgentWrapper",
25
  "AgentConfig",
26
  "SubjectExpertAgent",
27
- "PedagogicalAgent",
28
  "ContentStructuringAgent",
29
  "GenerationCoordinator",
30
  "ContentAccuracyJudge",
@@ -35,7 +33,5 @@ __all__ = [
35
  "JudgeCoordinator",
36
  "RevisionAgent",
37
  "EnhancementAgent",
38
- "AgentFeatureFlags",
39
- "AgentMetrics",
40
  "AgentConfigManager",
41
- ]
 
16
  JudgeCoordinator,
17
  )
18
  from .enhancers import RevisionAgent, EnhancementAgent
 
 
19
  from .config import AgentConfigManager
20
 
21
  __all__ = [
22
  "BaseAgentWrapper",
23
  "AgentConfig",
24
  "SubjectExpertAgent",
25
+ "PedagogicalAgent",
26
  "ContentStructuringAgent",
27
  "GenerationCoordinator",
28
  "ContentAccuracyJudge",
 
33
  "JudgeCoordinator",
34
  "RevisionAgent",
35
  "EnhancementAgent",
 
 
36
  "AgentConfigManager",
37
+ ]
ankigen_core/agents/base.py CHANGED
@@ -1,30 +1,50 @@
1
  # Base agent wrapper and configuration classes
2
 
3
- from typing import Dict, Any, Optional, List, Type
4
  from dataclasses import dataclass
5
  from pydantic import BaseModel
6
  import asyncio
7
- import time
8
  from openai import AsyncOpenAI
9
- from agents import Agent, Runner
10
 
11
  from ankigen_core.logging import logger
12
- from ankigen_core.models import Card
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
13
 
14
 
15
  @dataclass
16
  class AgentConfig:
17
  """Configuration for individual agents"""
 
18
  name: str
19
  instructions: str
20
- model: str = "gpt-4o"
21
  temperature: float = 0.7
22
  max_tokens: Optional[int] = None
23
  timeout: float = 30.0
24
  retry_attempts: int = 3
25
  enable_tracing: bool = True
26
  custom_prompts: Optional[Dict[str, str]] = None
27
-
 
28
  def __post_init__(self):
29
  if self.custom_prompts is None:
30
  self.custom_prompts = {}
@@ -32,162 +52,154 @@ class AgentConfig:
32
 
33
  class BaseAgentWrapper:
34
  """Base wrapper for OpenAI Agents SDK integration"""
35
-
36
  def __init__(self, config: AgentConfig, openai_client: AsyncOpenAI):
37
  self.config = config
38
  self.openai_client = openai_client
39
  self.agent = None
40
  self.runner = None
41
- self._performance_metrics = {
42
- "total_calls": 0,
43
- "successful_calls": 0,
44
- "average_response_time": 0.0,
45
- "error_count": 0,
46
- }
47
-
48
  async def initialize(self):
49
- """Initialize the OpenAI agent"""
50
  try:
51
- self.agent = Agent(
52
- name=self.config.name,
53
- instructions=self.config.instructions,
54
- model=self.config.model,
55
- temperature=self.config.temperature,
56
- )
57
-
58
- # Initialize runner with the OpenAI client
59
- self.runner = Runner(
60
- agent=self.agent,
61
- client=self.openai_client,
62
- )
63
-
64
- logger.info(f"Initialized agent: {self.config.name}")
65
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
66
  except Exception as e:
67
  logger.error(f"Failed to initialize agent {self.config.name}: {e}")
68
  raise
69
-
70
- async def execute(self, user_input: str, context: Dict[str, Any] = None) -> Any:
 
 
71
  """Execute the agent with user input and optional context"""
72
- if not self.runner:
73
  await self.initialize()
74
-
75
- start_time = time.time()
76
- self._performance_metrics["total_calls"] += 1
77
-
78
  try:
79
- # Add context to the user input if provided
80
  enhanced_input = user_input
81
  if context is not None:
82
  context_str = "\n".join([f"{k}: {v}" for k, v in context.items()])
83
  enhanced_input = f"{user_input}\n\nContext:\n{context_str}"
84
-
85
- # Execute the agent
 
 
 
 
 
 
86
  result = await asyncio.wait_for(
87
- self._run_agent(enhanced_input),
88
- timeout=self.config.timeout
 
 
 
89
  )
90
-
91
- # Update metrics
92
- response_time = time.time() - start_time
93
- self._update_performance_metrics(response_time, success=True)
94
-
95
- logger.debug(f"Agent {self.config.name} executed successfully in {response_time:.2f}s")
96
- return result
97
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
98
  except asyncio.TimeoutError:
99
- self._performance_metrics["error_count"] += 1
100
- logger.error(f"Agent {self.config.name} timed out after {self.config.timeout}s")
 
101
  raise
102
  except Exception as e:
103
- self._performance_metrics["error_count"] += 1
104
  logger.error(f"Agent {self.config.name} execution failed: {e}")
105
  raise
106
-
107
- async def _run_agent(self, input_text: str) -> Any:
108
- """Run the agent with retry logic"""
109
- last_exception = None
110
-
111
- for attempt in range(self.config.retry_attempts):
112
- try:
113
- # Create a new run
114
- run = await self.runner.create_run(messages=[
115
- {"role": "user", "content": input_text}
116
- ])
117
-
118
- # Wait for completion
119
- while run.status in ["queued", "in_progress"]:
120
- await asyncio.sleep(0.1)
121
- run = await self.runner.get_run(run.id)
122
-
123
- if run.status == "completed":
124
- # Get the final message
125
- messages = await self.runner.get_messages(run.thread_id)
126
- if messages and messages[-1].role == "assistant":
127
- return messages[-1].content
128
- else:
129
- raise ValueError("No assistant response found")
130
- else:
131
- raise ValueError(f"Run failed with status: {run.status}")
132
-
133
- except Exception as e:
134
- last_exception = e
135
- if attempt < self.config.retry_attempts - 1:
136
- wait_time = 2 ** attempt
137
- logger.warning(f"Agent {self.config.name} attempt {attempt + 1} failed, retrying in {wait_time}s: {e}")
138
- await asyncio.sleep(wait_time)
139
- else:
140
- logger.error(f"Agent {self.config.name} failed after {self.config.retry_attempts} attempts")
141
-
142
- raise last_exception
143
-
144
- def _update_performance_metrics(self, response_time: float, success: bool):
145
- """Update performance metrics"""
146
- if success:
147
- self._performance_metrics["successful_calls"] += 1
148
-
149
- # Update average response time
150
- total_successful = self._performance_metrics["successful_calls"]
151
- if total_successful > 0:
152
- current_avg = self._performance_metrics["average_response_time"]
153
- self._performance_metrics["average_response_time"] = (
154
- (current_avg * (total_successful - 1) + response_time) / total_successful
155
- )
156
-
157
- def get_performance_metrics(self) -> Dict[str, Any]:
158
- """Get performance metrics for this agent"""
159
- return {
160
- **self._performance_metrics,
161
- "success_rate": (
162
- self._performance_metrics["successful_calls"] /
163
- max(1, self._performance_metrics["total_calls"])
164
- ),
165
- "agent_name": self.config.name,
166
- }
167
-
168
- async def handoff_to(self, target_agent: "BaseAgentWrapper", context: Dict[str, Any]) -> Any:
169
  """Hand off execution to another agent with context"""
170
- logger.info(f"Handing off from {self.config.name} to {target_agent.config.name}")
171
-
 
 
172
  # Prepare handoff context
173
  handoff_context = {
174
  "from_agent": self.config.name,
175
  "handoff_reason": context.get("reason", "Standard workflow handoff"),
176
- **context
177
  }
178
-
179
  # Execute the target agent
180
  return await target_agent.execute(
181
- context.get("user_input", "Continue processing"),
182
- handoff_context
183
  )
184
 
185
 
186
  class AgentResponse(BaseModel):
187
  """Standard response format for agents"""
 
188
  success: bool
189
  data: Any
190
  agent_name: str
191
- execution_time: float
192
  metadata: Dict[str, Any] = {}
193
- errors: List[str] = []
 
1
  # Base agent wrapper and configuration classes
2
 
3
+ from typing import Dict, Any, Optional, List
4
  from dataclasses import dataclass
5
  from pydantic import BaseModel
6
  import asyncio
7
+ import json
8
  from openai import AsyncOpenAI
9
+ from agents import Agent, Runner, ModelSettings
10
 
11
  from ankigen_core.logging import logger
12
+ from .token_tracker import track_usage_from_agents_sdk
13
+
14
+
15
+ def parse_agent_json_response(response: Any) -> Dict[str, Any]:
16
+ """Parse agent response, handling markdown code blocks if present"""
17
+ if isinstance(response, str):
18
+ # Strip markdown code blocks
19
+ response = response.strip()
20
+ if response.startswith("```json"):
21
+ response = response[7:] # Remove ```json
22
+ if response.startswith("```"):
23
+ response = response[3:] # Remove ```
24
+ if response.endswith("```"):
25
+ response = response[:-3] # Remove trailing ```
26
+ response = response.strip()
27
+
28
+ return json.loads(response)
29
+ else:
30
+ return response
31
 
32
 
33
  @dataclass
34
  class AgentConfig:
35
  """Configuration for individual agents"""
36
+
37
  name: str
38
  instructions: str
39
+ model: str = "gpt-4.1"
40
  temperature: float = 0.7
41
  max_tokens: Optional[int] = None
42
  timeout: float = 30.0
43
  retry_attempts: int = 3
44
  enable_tracing: bool = True
45
  custom_prompts: Optional[Dict[str, str]] = None
46
+ output_type: Optional[type] = None # For structured outputs
47
+
48
  def __post_init__(self):
49
  if self.custom_prompts is None:
50
  self.custom_prompts = {}
 
52
 
53
  class BaseAgentWrapper:
54
  """Base wrapper for OpenAI Agents SDK integration"""
55
+
56
  def __init__(self, config: AgentConfig, openai_client: AsyncOpenAI):
57
  self.config = config
58
  self.openai_client = openai_client
59
  self.agent = None
60
  self.runner = None
61
+
 
 
 
 
 
 
62
  async def initialize(self):
63
+ """Initialize the OpenAI agent with structured output support"""
64
  try:
65
+ # Create model settings with temperature
66
+ model_settings = ModelSettings(temperature=self.config.temperature)
67
+
68
+ # Use clean instructions without JSON formatting hacks
69
+ clean_instructions = self.config.instructions
70
+
71
+ # Create agent with structured output if output_type is provided
72
+ if self.config.output_type:
73
+ self.agent = Agent(
74
+ name=self.config.name,
75
+ instructions=clean_instructions,
76
+ model=self.config.model,
77
+ model_settings=model_settings,
78
+ output_type=self.config.output_type,
79
+ )
80
+ logger.info(
81
+ f"Initialized agent with structured output: {self.config.name} -> {self.config.output_type}"
82
+ )
83
+ else:
84
+ self.agent = Agent(
85
+ name=self.config.name,
86
+ instructions=clean_instructions,
87
+ model=self.config.model,
88
+ model_settings=model_settings,
89
+ )
90
+ logger.info(
91
+ f"Initialized agent (no structured output): {self.config.name}"
92
+ )
93
+
94
  except Exception as e:
95
  logger.error(f"Failed to initialize agent {self.config.name}: {e}")
96
  raise
97
+
98
+ async def execute(
99
+ self, user_input: str, context: Optional[Dict[str, Any]] = None
100
+ ) -> tuple[Any, Dict[str, Any]]:
101
  """Execute the agent with user input and optional context"""
102
+ if not self.agent:
103
  await self.initialize()
104
+
 
 
 
105
  try:
106
+ # Add context to the user input if provided
107
  enhanced_input = user_input
108
  if context is not None:
109
  context_str = "\n".join([f"{k}: {v}" for k, v in context.items()])
110
  enhanced_input = f"{user_input}\n\nContext:\n{context_str}"
111
+
112
+ # Execute the agent using Runner.run()
113
+ if self.agent is None:
114
+ raise ValueError("Agent not initialized")
115
+
116
+ logger.info(f"🤖 EXECUTING AGENT: {self.config.name}")
117
+ logger.info(f"📝 INPUT: {enhanced_input[:200]}...")
118
+
119
  result = await asyncio.wait_for(
120
+ Runner.run(
121
+ starting_agent=self.agent,
122
+ input=enhanced_input,
123
+ ),
124
+ timeout=self.config.timeout,
125
  )
126
+
127
+ logger.info(f"Agent {self.config.name} executed successfully")
128
+
129
+ # Extract usage information from raw_responses
130
+ total_usage = {
131
+ "input_tokens": 0,
132
+ "output_tokens": 0,
133
+ "total_tokens": 0,
134
+ "requests": 0,
135
+ }
136
+
137
+ if hasattr(result, "raw_responses") and result.raw_responses:
138
+ for response in result.raw_responses:
139
+ if hasattr(response, "usage") and response.usage:
140
+ total_usage["input_tokens"] += response.usage.input_tokens
141
+ total_usage["output_tokens"] += response.usage.output_tokens
142
+ total_usage["total_tokens"] += response.usage.total_tokens
143
+ total_usage["requests"] += response.usage.requests
144
+
145
+ # Track usage with the token tracker
146
+ track_usage_from_agents_sdk(total_usage, self.config.model)
147
+ logger.info(f"💰 AGENT USAGE: {total_usage}")
148
+
149
+ # Extract the final output from the result
150
+ if hasattr(result, "new_items") and result.new_items:
151
+ # Get the last message content
152
+ from agents.items import ItemHelpers
153
+
154
+ text_output = ItemHelpers.text_message_outputs(result.new_items)
155
+
156
+ # If we have structured output, the response should already be parsed
157
+ if self.config.output_type and self.config.output_type is not str:
158
+ logger.info(
159
+ f"✅ STRUCTURED OUTPUT: {type(text_output)} -> {self.config.output_type}"
160
+ )
161
+ # The agents SDK should return the structured object directly
162
+ return text_output, total_usage
163
+ else:
164
+ return text_output, total_usage
165
+ else:
166
+ return str(result), total_usage
167
+
168
  except asyncio.TimeoutError:
169
+ logger.error(
170
+ f"Agent {self.config.name} timed out after {self.config.timeout}s"
171
+ )
172
  raise
173
  except Exception as e:
 
174
  logger.error(f"Agent {self.config.name} execution failed: {e}")
175
  raise
176
+
177
+ async def handoff_to(
178
+ self, target_agent: "BaseAgentWrapper", context: Dict[str, Any]
179
+ ) -> Any:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
180
  """Hand off execution to another agent with context"""
181
+ logger.info(
182
+ f"Handing off from {self.config.name} to {target_agent.config.name}"
183
+ )
184
+
185
  # Prepare handoff context
186
  handoff_context = {
187
  "from_agent": self.config.name,
188
  "handoff_reason": context.get("reason", "Standard workflow handoff"),
189
+ **context,
190
  }
191
+
192
  # Execute the target agent
193
  return await target_agent.execute(
194
+ context.get("user_input", "Continue processing"), handoff_context
 
195
  )
196
 
197
 
198
  class AgentResponse(BaseModel):
199
  """Standard response format for agents"""
200
+
201
  success: bool
202
  data: Any
203
  agent_name: str
 
204
  metadata: Dict[str, Any] = {}
205
+ errors: List[str] = []
ankigen_core/agents/config.py CHANGED
@@ -1,11 +1,10 @@
1
  # Agent configuration management system
2
 
3
  import json
4
- import yaml
5
- import os
6
  from typing import Dict, Any, Optional, List
7
  from pathlib import Path
8
  from dataclasses import dataclass, asdict
 
9
 
10
  from ankigen_core.logging import logger
11
  from .base import AgentConfig
@@ -14,14 +13,15 @@ from .base import AgentConfig
14
  @dataclass
15
  class AgentPromptTemplate:
16
  """Template for agent prompts with variables"""
 
17
  system_prompt: str
18
  user_prompt_template: str
19
  variables: Optional[Dict[str, str]] = None
20
-
21
  def __post_init__(self):
22
  if self.variables is None:
23
  self.variables = {}
24
-
25
  def render_system_prompt(self, **kwargs) -> str:
26
  """Render system prompt with provided variables"""
27
  try:
@@ -30,7 +30,7 @@ class AgentPromptTemplate:
30
  except KeyError as e:
31
  logger.error(f"Missing variable in system prompt template: {e}")
32
  return self.system_prompt
33
-
34
  def render_user_prompt(self, **kwargs) -> str:
35
  """Render user prompt template with provided variables"""
36
  try:
@@ -42,98 +42,139 @@ class AgentPromptTemplate:
42
 
43
 
44
  class AgentConfigManager:
45
- """Manages agent configurations from files and runtime updates"""
46
-
47
- def __init__(self, config_dir: Optional[str] = None):
48
- self.config_dir = Path(config_dir) if config_dir else Path("config/agents")
 
 
 
 
 
49
  self.configs: Dict[str, AgentConfig] = {}
50
  self.prompt_templates: Dict[str, AgentPromptTemplate] = {}
51
- self._ensure_config_dir()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
52
  self._load_default_configs()
53
-
54
- def _ensure_config_dir(self):
55
- """Ensure config directory exists"""
56
- self.config_dir.mkdir(parents=True, exist_ok=True)
57
-
58
- # Create default config files if they don't exist
59
- defaults_dir = self.config_dir / "defaults"
60
- defaults_dir.mkdir(exist_ok=True)
61
-
62
- if not (defaults_dir / "generators.yaml").exists():
63
- self._create_default_generator_configs()
64
-
65
- if not (defaults_dir / "judges.yaml").exists():
66
- self._create_default_judge_configs()
67
-
68
- if not (defaults_dir / "enhancers.yaml").exists():
69
- self._create_default_enhancer_configs()
70
-
71
  def _load_default_configs(self):
72
- """Load all default configurations"""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
73
  try:
74
- self._load_configs_from_file("defaults/generators.yaml")
75
- self._load_configs_from_file("defaults/judges.yaml")
76
- self._load_configs_from_file("defaults/enhancers.yaml")
77
- logger.info(f"Loaded {len(self.configs)} agent configurations")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
78
  except Exception as e:
79
- logger.error(f"Failed to load default agent configurations: {e}")
80
-
81
- def _load_configs_from_file(self, filename: str):
82
- """Load configurations from a YAML/JSON file"""
83
- file_path = self.config_dir / filename
84
-
85
- if not file_path.exists():
86
- logger.warning(f"Agent config file not found: {file_path}")
87
- return
88
-
89
  try:
90
- with open(file_path, 'r') as f:
91
- if filename.endswith('.yaml') or filename.endswith('.yml'):
92
- data = yaml.safe_load(f)
93
- else:
94
- data = json.load(f)
95
-
96
- # Load agent configs
97
- if 'agents' in data:
98
- for agent_name, agent_data in data['agents'].items():
99
- config = AgentConfig(
100
- name=agent_name,
101
- instructions=agent_data.get('instructions', ''),
102
- model=agent_data.get('model', 'gpt-4o'),
103
- temperature=agent_data.get('temperature', 0.7),
104
- max_tokens=agent_data.get('max_tokens'),
105
- timeout=agent_data.get('timeout', 30.0),
106
- retry_attempts=agent_data.get('retry_attempts', 3),
107
- enable_tracing=agent_data.get('enable_tracing', True),
108
- custom_prompts=agent_data.get('custom_prompts', {})
109
- )
110
- self.configs[agent_name] = config
111
-
112
- # Load prompt templates
113
- if 'prompt_templates' in data:
114
- for template_name, template_data in data['prompt_templates'].items():
115
- template = AgentPromptTemplate(
116
- system_prompt=template_data.get('system_prompt', ''),
117
- user_prompt_template=template_data.get('user_prompt_template', ''),
118
- variables=template_data.get('variables', {})
119
- )
120
- self.prompt_templates[template_name] = template
121
-
122
  except Exception as e:
123
- logger.error(f"Failed to load agent config from {file_path}: {e}")
124
-
 
 
125
  def get_agent_config(self, agent_name: str) -> Optional[AgentConfig]:
126
  """Get configuration for a specific agent"""
127
  return self.configs.get(agent_name)
128
-
129
  def get_config(self, agent_name: str) -> Optional[AgentConfig]:
130
  """Alias for get_agent_config for compatibility"""
131
  return self.get_agent_config(agent_name)
132
-
133
  def get_prompt_template(self, template_name: str) -> Optional[AgentPromptTemplate]:
134
  """Get a prompt template by name"""
135
  return self.prompt_templates.get(template_name)
136
-
137
  def update_agent_config(self, agent_name: str, **kwargs):
138
  """Update an agent's configuration at runtime"""
139
  if agent_name in self.configs:
@@ -142,356 +183,114 @@ class AgentConfigManager:
142
  if hasattr(config, key):
143
  setattr(config, key, value)
144
  logger.info(f"Updated {agent_name} config: {key} = {value}")
145
-
146
- def update_config(self, agent_name: str, updates: Dict[str, Any]) -> Optional[AgentConfig]:
 
 
147
  """Update agent configuration with a dictionary of updates"""
148
  if agent_name not in self.configs:
149
  return None
150
-
151
  config = self.configs[agent_name]
152
  for key, value in updates.items():
153
  if hasattr(config, key):
154
  setattr(config, key, value)
155
-
156
  return config
157
-
158
  def list_configs(self) -> List[str]:
159
  """List all agent configuration names"""
160
  return list(self.configs.keys())
161
-
162
  def list_prompt_templates(self) -> List[str]:
163
  """List all prompt template names"""
164
  return list(self.prompt_templates.keys())
165
-
166
  def load_config_from_dict(self, config_dict: Dict[str, Any]):
167
  """Load configuration from a dictionary"""
168
  # Load agent configs
169
- if 'agents' in config_dict:
170
- for agent_name, agent_data in config_dict['agents'].items():
171
  config = AgentConfig(
172
  name=agent_name,
173
- instructions=agent_data.get('instructions', ''),
174
- model=agent_data.get('model', 'gpt-4o'),
175
- temperature=agent_data.get('temperature', 0.7),
176
- max_tokens=agent_data.get('max_tokens'),
177
- timeout=agent_data.get('timeout', 30.0),
178
- retry_attempts=agent_data.get('retry_attempts', 3),
179
- enable_tracing=agent_data.get('enable_tracing', True),
180
- custom_prompts=agent_data.get('custom_prompts', {})
181
  )
182
  self.configs[agent_name] = config
183
-
184
  # Load prompt templates
185
- if 'prompt_templates' in config_dict:
186
- for template_name, template_data in config_dict['prompt_templates'].items():
187
  template = AgentPromptTemplate(
188
- system_prompt=template_data.get('system_prompt', ''),
189
- user_prompt_template=template_data.get('user_prompt_template', ''),
190
- variables=template_data.get('variables', {})
191
  )
192
  self.prompt_templates[template_name] = template
193
-
194
  def _validate_config(self, config_data: Dict[str, Any]) -> bool:
195
  """Validate agent configuration data"""
196
  # Check required fields
197
- if 'name' not in config_data or 'instructions' not in config_data:
198
  return False
199
-
200
  # Check temperature range
201
- temperature = config_data.get('temperature', 0.7)
202
  if not 0.0 <= temperature <= 2.0:
203
  return False
204
-
205
  # Check timeout is positive
206
- timeout = config_data.get('timeout', 30.0)
207
  if timeout <= 0:
208
  return False
209
-
210
  return True
211
-
212
  def save_config_to_file(self, filename: str, agents: List[str] = None):
213
  """Save current configurations to a file"""
214
- file_path = self.config_dir / filename
215
-
216
  # Prepare data structure
217
- data = {
218
- "agents": {},
219
- "prompt_templates": {}
220
- }
221
-
222
  # Add agent configs
223
  agents_to_save = agents if agents else list(self.configs.keys())
224
  for agent_name in agents_to_save:
225
  if agent_name in self.configs:
226
  config = self.configs[agent_name]
227
  data["agents"][agent_name] = asdict(config)
228
-
229
  # Add prompt templates
230
  for template_name, template in self.prompt_templates.items():
231
  data["prompt_templates"][template_name] = asdict(template)
232
-
233
  try:
234
- with open(file_path, 'w') as f:
235
- if filename.endswith('.yaml') or filename.endswith('.yml'):
236
- yaml.dump(data, f, default_flow_style=False, indent=2)
237
- else:
238
- json.dump(data, f, indent=2)
239
- logger.info(f"Saved agent configurations to {file_path}")
240
  except Exception as e:
241
- logger.error(f"Failed to save agent config to {file_path}: {e}")
242
-
243
- def _create_default_generator_configs(self):
244
- """Create default configuration for generator agents"""
245
- config = {
246
- "agents": {
247
- "subject_expert": {
248
- "instructions": """You are a world-class expert in {subject} with deep pedagogical knowledge.
249
- Your role is to generate high-quality flashcards that demonstrate mastery of {subject} concepts.
250
-
251
- Key responsibilities:
252
- - Ensure technical accuracy and depth appropriate for the target level
253
- - Use domain-specific terminology correctly
254
- - Include practical applications and real-world examples
255
- - Connect concepts to prerequisite knowledge
256
- - Avoid oversimplification while maintaining clarity
257
-
258
- Generate cards that test understanding, not just memorization.""",
259
- "model": "gpt-4o",
260
- "temperature": 0.7,
261
- "timeout": 45.0,
262
- "custom_prompts": {
263
- "math": "Focus on problem-solving strategies and mathematical reasoning",
264
- "science": "Emphasize experimental design and scientific method",
265
- "history": "Connect events to broader historical patterns and causation",
266
- "programming": "Include executable examples and best practices"
267
- }
268
- },
269
- "pedagogical": {
270
- "instructions": """You are an educational specialist focused on learning theory and instructional design.
271
- Your role is to ensure all flashcards follow educational best practices.
272
-
273
- Apply these frameworks:
274
- - Bloom's Taxonomy: Ensure questions target appropriate cognitive levels
275
- - Spaced Repetition: Design cards for optimal retention
276
- - Cognitive Load Theory: Avoid overwhelming learners
277
- - Active Learning: Encourage engagement and application
278
-
279
- Review cards for:
280
- - Clear learning objectives
281
- - Appropriate difficulty progression
282
- - Effective use of examples and analogies
283
- - Prerequisite knowledge alignment""",
284
- "model": "gpt-4o",
285
- "temperature": 0.6,
286
- "timeout": 30.0
287
- },
288
- "content_structuring": {
289
- "instructions": """You are a content organization specialist focused on consistency and structure.
290
- Your role is to format and organize flashcard content for optimal learning.
291
-
292
- Ensure all cards have:
293
- - Consistent formatting and style
294
- - Proper metadata and tagging
295
- - Clear, unambiguous questions
296
- - Complete, well-structured answers
297
- - Appropriate examples and explanations
298
- - Relevant categorization and difficulty levels
299
-
300
- Maintain high standards for readability and accessibility.""",
301
- "model": "gpt-4o-mini",
302
- "temperature": 0.5,
303
- "timeout": 25.0
304
- },
305
- "generation_coordinator": {
306
- "instructions": """You are the generation workflow coordinator.
307
- Your role is to orchestrate the card generation process and manage handoffs between specialized agents.
308
-
309
- Responsibilities:
310
- - Route requests to appropriate specialist agents
311
- - Coordinate parallel generation tasks
312
- - Manage workflow state and progress
313
- - Handle errors and fallback strategies
314
- - Optimize generation pipelines
315
-
316
- Make decisions based on content type, user preferences, and system load.""",
317
- "model": "gpt-4o-mini",
318
- "temperature": 0.3,
319
- "timeout": 20.0
320
- }
321
- },
322
- "prompt_templates": {
323
- "subject_generation": {
324
- "system_prompt": "You are an expert in {subject}. Generate {num_cards} flashcards covering key concepts.",
325
- "user_prompt_template": "Topic: {topic}\nDifficulty: {difficulty}\nPrerequisites: {prerequisites}\n\nGenerate cards that help learners master this topic.",
326
- "variables": {
327
- "subject": "general",
328
- "num_cards": "5",
329
- "difficulty": "intermediate",
330
- "prerequisites": "none"
331
- }
332
- }
333
- }
334
- }
335
-
336
- with open(self.config_dir / "defaults" / "generators.yaml", 'w') as f:
337
- yaml.dump(config, f, default_flow_style=False, indent=2)
338
-
339
- def _create_default_judge_configs(self):
340
- """Create default configuration for judge agents"""
341
- config = {
342
- "agents": {
343
- "content_accuracy_judge": {
344
- "instructions": """You are a fact-checking and accuracy specialist.
345
- Your role is to verify the correctness and accuracy of flashcard content.
346
-
347
- Evaluate cards for:
348
- - Factual accuracy and up-to-date information
349
- - Proper use of terminology and definitions
350
- - Absence of misconceptions or errors
351
- - Appropriate level of detail for the target audience
352
- - Consistency with authoritative sources
353
-
354
- Rate each card's accuracy and provide specific feedback on any issues found.""",
355
- "model": "gpt-4o",
356
- "temperature": 0.3,
357
- "timeout": 25.0
358
- },
359
- "pedagogical_judge": {
360
- "instructions": """You are an educational assessment specialist.
361
- Your role is to evaluate flashcards for pedagogical effectiveness.
362
-
363
- Assess cards for:
364
- - Alignment with learning objectives
365
- - Appropriate difficulty level and cognitive load
366
- - Effective use of educational principles
367
- - Clear prerequisite knowledge requirements
368
- - Potential for promoting deep learning
369
-
370
- Provide detailed feedback on educational effectiveness and improvement suggestions.""",
371
- "model": "gpt-4o",
372
- "temperature": 0.4,
373
- "timeout": 30.0
374
- },
375
- "clarity_judge": {
376
- "instructions": """You are a communication and clarity specialist.
377
- Your role is to ensure flashcards are clear, unambiguous, and well-written.
378
-
379
- Evaluate cards for:
380
- - Question clarity and specificity
381
- - Answer completeness and coherence
382
- - Absence of ambiguity or confusion
383
- - Appropriate language level for target audience
384
- - Effective use of examples and explanations
385
-
386
- Rate clarity and provide specific suggestions for improvement.""",
387
- "model": "gpt-4o-mini",
388
- "temperature": 0.3,
389
- "timeout": 20.0
390
- },
391
- "technical_judge": {
392
- "instructions": """You are a technical accuracy specialist for programming and technical content.
393
- Your role is to verify technical correctness and best practices.
394
-
395
- For technical cards, check:
396
- - Code syntax and functionality
397
- - Best practices and conventions
398
- - Security considerations
399
- - Performance implications
400
- - Tool and framework accuracy
401
-
402
- Provide detailed technical feedback and corrections.""",
403
- "model": "gpt-4o",
404
- "temperature": 0.2,
405
- "timeout": 35.0
406
- },
407
- "completeness_judge": {
408
- "instructions": """You are a completeness and quality assurance specialist.
409
- Your role is to ensure flashcards meet all requirements and quality standards.
410
-
411
- Verify cards have:
412
- - All required fields and metadata
413
- - Proper formatting and structure
414
- - Appropriate tags and categorization
415
- - Complete explanations and examples
416
- - Consistent quality across the set
417
-
418
- Rate completeness and identify missing elements.""",
419
- "model": "gpt-4o-mini",
420
- "temperature": 0.3,
421
- "timeout": 20.0
422
- },
423
- "judge_coordinator": {
424
- "instructions": """You are the quality assurance coordinator.
425
- Your role is to orchestrate the judging process and synthesize feedback from specialist judges.
426
-
427
- Responsibilities:
428
- - Route cards to appropriate specialist judges
429
- - Coordinate parallel judging tasks
430
- - Synthesize feedback from multiple judges
431
- - Make final accept/reject/revise decisions
432
- - Manage judge workload and performance
433
-
434
- Balance speed with thoroughness in quality assessment.""",
435
- "model": "gpt-4o-mini",
436
- "temperature": 0.3,
437
- "timeout": 15.0
438
- }
439
- }
440
- }
441
-
442
- with open(self.config_dir / "defaults" / "judges.yaml", 'w') as f:
443
- yaml.dump(config, f, default_flow_style=False, indent=2)
444
-
445
- def _create_default_enhancer_configs(self):
446
- """Create default configuration for enhancement agents"""
447
- config = {
448
- "agents": {
449
- "revision_agent": {
450
- "instructions": """You are a content revision specialist.
451
- Your role is to improve flashcards based on feedback from quality judges.
452
-
453
- For each revision request:
454
- - Analyze specific feedback provided
455
- - Make targeted improvements to address issues
456
- - Maintain the card's educational intent
457
- - Preserve correct information while fixing problems
458
- - Improve clarity, accuracy, and pedagogical value
459
-
460
- Focus on iterative improvement rather than complete rewrites.""",
461
- "model": "gpt-4o",
462
- "temperature": 0.6,
463
- "timeout": 40.0
464
- },
465
- "enhancement_agent": {
466
- "instructions": """You are a content enhancement specialist.
467
- Your role is to add missing elements and enrich flashcard content.
468
-
469
- Enhancement tasks:
470
- - Add missing explanations or examples
471
- - Improve metadata and tagging
472
- - Generate additional context or background
473
- - Create connections to related concepts
474
- - Enhance visual or structural elements
475
-
476
- Ensure enhancements add value without overwhelming the learner.""",
477
- "model": "gpt-4o",
478
- "temperature": 0.7,
479
- "timeout": 35.0
480
- }
481
- }
482
- }
483
-
484
- with open(self.config_dir / "defaults" / "enhancers.yaml", 'w') as f:
485
- yaml.dump(config, f, default_flow_style=False, indent=2)
486
 
487
 
488
  # Global config manager instance
489
  _global_config_manager: Optional[AgentConfigManager] = None
490
 
491
 
492
- def get_config_manager() -> AgentConfigManager:
 
 
 
493
  """Get the global agent configuration manager"""
494
  global _global_config_manager
495
  if _global_config_manager is None:
496
- _global_config_manager = AgentConfigManager()
497
- return _global_config_manager
 
 
 
 
 
 
1
  # Agent configuration management system
2
 
3
  import json
 
 
4
  from typing import Dict, Any, Optional, List
5
  from pathlib import Path
6
  from dataclasses import dataclass, asdict
7
+ from jinja2 import Environment, FileSystemLoader
8
 
9
  from ankigen_core.logging import logger
10
  from .base import AgentConfig
 
13
  @dataclass
14
  class AgentPromptTemplate:
15
  """Template for agent prompts with variables"""
16
+
17
  system_prompt: str
18
  user_prompt_template: str
19
  variables: Optional[Dict[str, str]] = None
20
+
21
  def __post_init__(self):
22
  if self.variables is None:
23
  self.variables = {}
24
+
25
  def render_system_prompt(self, **kwargs) -> str:
26
  """Render system prompt with provided variables"""
27
  try:
 
30
  except KeyError as e:
31
  logger.error(f"Missing variable in system prompt template: {e}")
32
  return self.system_prompt
33
+
34
  def render_user_prompt(self, **kwargs) -> str:
35
  """Render user prompt template with provided variables"""
36
  try:
 
42
 
43
 
44
  class AgentConfigManager:
45
+ """Manages agent configurations using Jinja templates and runtime updates"""
46
+
47
+ def __init__(
48
+ self,
49
+ model_overrides: Optional[Dict[str, str]] = None,
50
+ template_vars: Optional[Dict[str, Any]] = None,
51
+ ):
52
+ self.model_overrides = model_overrides or {}
53
+ self.template_vars = template_vars or {}
54
  self.configs: Dict[str, AgentConfig] = {}
55
  self.prompt_templates: Dict[str, AgentPromptTemplate] = {}
56
+
57
+ # Set up Jinja2 environment with templates directory
58
+ template_dir = Path(__file__).parent / "templates"
59
+ self.jinja_env = Environment(loader=FileSystemLoader(template_dir))
60
+ self._load_default_configs()
61
+
62
+ def update_models(self, model_overrides: Dict[str, str]):
63
+ """Update model selections and regenerate configs"""
64
+ self.model_overrides = model_overrides
65
+ self._load_default_configs()
66
+ logger.info(f"Updated model overrides: {model_overrides}")
67
+
68
+ def update_template_vars(self, template_vars: Dict[str, Any]):
69
+ """Update template variables and regenerate configs"""
70
+ self.template_vars = template_vars
71
  self._load_default_configs()
72
+ logger.info(f"Updated template variables: {template_vars}")
73
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
74
  def _load_default_configs(self):
75
+ """Load all default configurations from Jinja templates"""
76
+ try:
77
+ self._load_configs_from_template("generators.j2")
78
+ self._load_configs_from_template("judges.j2")
79
+ self._load_configs_from_template("enhancers.j2")
80
+ self._load_prompt_templates_from_template("prompts.j2")
81
+ logger.info(
82
+ f"Loaded {len(self.configs)} agent configurations from Jinja templates"
83
+ )
84
+ except Exception as e:
85
+ logger.error(f"Failed to load agent configurations from templates: {e}")
86
+
87
+ def _get_model_for_agent(self, agent_name: str, default_model: str) -> str:
88
+ """Get model for agent, using override if available"""
89
+ return self.model_overrides.get(agent_name, default_model)
90
+
91
+ def _load_configs_from_template(self, template_name: str):
92
+ """Load agent configurations from a Jinja template"""
93
  try:
94
+ template = self.jinja_env.get_template(template_name)
95
+
96
+ # Default models for each agent type
97
+ default_models = {
98
+ "subject_expert_model": "gpt-4.1",
99
+ "pedagogical_agent_model": "gpt-4.1-nano",
100
+ "content_structuring_model": "gpt-4.1-nano",
101
+ "generation_coordinator_model": "gpt-4.1",
102
+ "content_accuracy_judge_model": "gpt-4.1-nano",
103
+ "pedagogical_judge_model": "gpt-4.1-nano",
104
+ "clarity_judge_model": "gpt-4.1-nano",
105
+ "technical_judge_model": "gpt-4.1-nano",
106
+ "completeness_judge_model": "gpt-4.1-nano",
107
+ "judge_coordinator_model": "gpt-4.1",
108
+ "revision_agent_model": "gpt-4.1",
109
+ "enhancement_agent_model": "gpt-4.1",
110
+ }
111
+
112
+ # Simple mapping: agent_name -> agent_name_model
113
+ model_vars = {}
114
+ for agent_name, model in self.model_overrides.items():
115
+ model_vars[f"{agent_name}_model"] = model
116
+
117
+ # Merge all template variables with defaults
118
+ render_vars = {**default_models, **self.template_vars, **model_vars}
119
+
120
+ logger.info(f"Rendering template {template_name} with vars: {render_vars}")
121
+ rendered_json = template.render(**render_vars)
122
+ config_data = json.loads(rendered_json)
123
+
124
+ # Create AgentConfig objects from the rendered data
125
+ for agent_name, agent_data in config_data.items():
126
+ config = AgentConfig(
127
+ name=agent_data.get("name", agent_name),
128
+ instructions=agent_data.get("instructions", ""),
129
+ model=agent_data.get("model", "gpt-4"),
130
+ temperature=agent_data.get("temperature", 0.7),
131
+ max_tokens=agent_data.get("max_tokens"),
132
+ timeout=agent_data.get("timeout", 30.0),
133
+ retry_attempts=agent_data.get("retry_attempts", 3),
134
+ enable_tracing=agent_data.get("enable_tracing", True),
135
+ custom_prompts=agent_data.get("custom_prompts", {}),
136
+ )
137
+ self.configs[agent_name] = config
138
+ logger.info(f"Loaded config for {agent_name}: model={config.model}")
139
+
140
  except Exception as e:
141
+ logger.error(f"Failed to load configs from template {template_name}: {e}")
142
+
143
+ def _load_prompt_templates_from_template(self, template_name: str):
144
+ """Load prompt templates from a Jinja template"""
 
 
 
 
 
 
145
  try:
146
+ template = self.jinja_env.get_template(template_name)
147
+
148
+ # Render with current template variables
149
+ rendered_json = template.render(**self.template_vars)
150
+ template_data = json.loads(rendered_json)
151
+
152
+ # Create AgentPromptTemplate objects
153
+ for template_name, template_info in template_data.items():
154
+ prompt_template = AgentPromptTemplate(
155
+ system_prompt=template_info.get("system_prompt", ""),
156
+ user_prompt_template=template_info.get("user_prompt_template", ""),
157
+ variables=template_info.get("variables", {}),
158
+ )
159
+ self.prompt_templates[template_name] = prompt_template
160
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
161
  except Exception as e:
162
+ logger.error(
163
+ f"Failed to load prompt templates from template {template_name}: {e}"
164
+ )
165
+
166
  def get_agent_config(self, agent_name: str) -> Optional[AgentConfig]:
167
  """Get configuration for a specific agent"""
168
  return self.configs.get(agent_name)
169
+
170
  def get_config(self, agent_name: str) -> Optional[AgentConfig]:
171
  """Alias for get_agent_config for compatibility"""
172
  return self.get_agent_config(agent_name)
173
+
174
  def get_prompt_template(self, template_name: str) -> Optional[AgentPromptTemplate]:
175
  """Get a prompt template by name"""
176
  return self.prompt_templates.get(template_name)
177
+
178
  def update_agent_config(self, agent_name: str, **kwargs):
179
  """Update an agent's configuration at runtime"""
180
  if agent_name in self.configs:
 
183
  if hasattr(config, key):
184
  setattr(config, key, value)
185
  logger.info(f"Updated {agent_name} config: {key} = {value}")
186
+
187
+ def update_config(
188
+ self, agent_name: str, updates: Dict[str, Any]
189
+ ) -> Optional[AgentConfig]:
190
  """Update agent configuration with a dictionary of updates"""
191
  if agent_name not in self.configs:
192
  return None
193
+
194
  config = self.configs[agent_name]
195
  for key, value in updates.items():
196
  if hasattr(config, key):
197
  setattr(config, key, value)
198
+
199
  return config
200
+
201
  def list_configs(self) -> List[str]:
202
  """List all agent configuration names"""
203
  return list(self.configs.keys())
204
+
205
  def list_prompt_templates(self) -> List[str]:
206
  """List all prompt template names"""
207
  return list(self.prompt_templates.keys())
208
+
209
  def load_config_from_dict(self, config_dict: Dict[str, Any]):
210
  """Load configuration from a dictionary"""
211
  # Load agent configs
212
+ if "agents" in config_dict:
213
+ for agent_name, agent_data in config_dict["agents"].items():
214
  config = AgentConfig(
215
  name=agent_name,
216
+ instructions=agent_data.get("instructions", ""),
217
+ model=agent_data.get("model", "gpt-4.1"),
218
+ temperature=agent_data.get("temperature", 0.7),
219
+ max_tokens=agent_data.get("max_tokens"),
220
+ timeout=agent_data.get("timeout", 30.0),
221
+ retry_attempts=agent_data.get("retry_attempts", 3),
222
+ enable_tracing=agent_data.get("enable_tracing", True),
223
+ custom_prompts=agent_data.get("custom_prompts", {}),
224
  )
225
  self.configs[agent_name] = config
226
+
227
  # Load prompt templates
228
+ if "prompt_templates" in config_dict:
229
+ for template_name, template_data in config_dict["prompt_templates"].items():
230
  template = AgentPromptTemplate(
231
+ system_prompt=template_data.get("system_prompt", ""),
232
+ user_prompt_template=template_data.get("user_prompt_template", ""),
233
+ variables=template_data.get("variables", {}),
234
  )
235
  self.prompt_templates[template_name] = template
236
+
237
  def _validate_config(self, config_data: Dict[str, Any]) -> bool:
238
  """Validate agent configuration data"""
239
  # Check required fields
240
+ if "name" not in config_data or "instructions" not in config_data:
241
  return False
242
+
243
  # Check temperature range
244
+ temperature = config_data.get("temperature", 0.7)
245
  if not 0.0 <= temperature <= 2.0:
246
  return False
247
+
248
  # Check timeout is positive
249
+ timeout = config_data.get("timeout", 30.0)
250
  if timeout <= 0:
251
  return False
252
+
253
  return True
254
+
255
  def save_config_to_file(self, filename: str, agents: List[str] = None):
256
  """Save current configurations to a file"""
 
 
257
  # Prepare data structure
258
+ data = {"agents": {}, "prompt_templates": {}}
259
+
 
 
 
260
  # Add agent configs
261
  agents_to_save = agents if agents else list(self.configs.keys())
262
  for agent_name in agents_to_save:
263
  if agent_name in self.configs:
264
  config = self.configs[agent_name]
265
  data["agents"][agent_name] = asdict(config)
266
+
267
  # Add prompt templates
268
  for template_name, template in self.prompt_templates.items():
269
  data["prompt_templates"][template_name] = asdict(template)
270
+
271
  try:
272
+ with open(filename, "w") as f:
273
+ json.dump(data, f, indent=2)
274
+ logger.info(f"Saved agent configurations to {filename}")
 
 
 
275
  except Exception as e:
276
+ logger.error(f"Failed to save agent config to {filename}: {e}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
277
 
278
 
279
  # Global config manager instance
280
  _global_config_manager: Optional[AgentConfigManager] = None
281
 
282
 
283
+ def get_config_manager(
284
+ model_overrides: Optional[Dict[str, str]] = None,
285
+ template_vars: Optional[Dict[str, Any]] = None,
286
+ ) -> AgentConfigManager:
287
  """Get the global agent configuration manager"""
288
  global _global_config_manager
289
  if _global_config_manager is None:
290
+ _global_config_manager = AgentConfigManager(model_overrides, template_vars)
291
+ else:
292
+ if model_overrides:
293
+ _global_config_manager.update_models(model_overrides)
294
+ if template_vars:
295
+ _global_config_manager.update_template_vars(template_vars)
296
+ return _global_config_manager
ankigen_core/agents/enhancers.py CHANGED
@@ -2,108 +2,81 @@
2
 
3
  import json
4
  import asyncio
5
- from typing import List, Dict, Any, Optional
6
  from datetime import datetime
7
 
8
  from openai import AsyncOpenAI
9
 
10
  from ankigen_core.logging import logger
11
  from ankigen_core.models import Card, CardFront, CardBack
12
- from .base import BaseAgentWrapper, AgentConfig
13
  from .config import get_config_manager
14
- from .metrics import record_agent_execution
15
  from .judges import JudgeDecision
16
 
17
 
18
  class RevisionAgent(BaseAgentWrapper):
19
  """Agent for revising cards based on judge feedback"""
20
-
21
  def __init__(self, openai_client: AsyncOpenAI):
22
  config_manager = get_config_manager()
23
  base_config = config_manager.get_agent_config("revision_agent")
24
-
25
  if not base_config:
26
- base_config = AgentConfig(
27
- name="revision_agent",
28
- instructions="""You are a content revision specialist.
29
- Improve flashcards based on specific feedback from quality judges.
30
- Make targeted improvements while maintaining educational intent.""",
31
- model="gpt-4o",
32
- temperature=0.6
33
  )
34
-
35
  super().__init__(base_config, openai_client)
36
-
37
  async def revise_card(
38
- self,
39
- card: Card,
40
- judge_decisions: List[JudgeDecision],
41
- max_iterations: int = 3
42
  ) -> Card:
43
  """Revise a card based on judge feedback"""
44
- start_time = datetime.now()
45
-
46
  try:
47
  # Collect all feedback and improvements
48
  all_feedback = []
49
  all_improvements = []
50
-
51
  for decision in judge_decisions:
52
  if not decision.approved:
53
  all_feedback.append(f"{decision.judge_name}: {decision.feedback}")
54
  all_improvements.extend(decision.improvements)
55
-
56
  if not all_feedback:
57
  # No revisions needed
58
  return card
59
-
60
  # Build revision prompt
61
- user_input = self._build_revision_prompt(card, all_feedback, all_improvements)
62
-
 
 
63
  # Execute revision
64
  response = await self.execute(user_input)
65
-
66
  # Parse revised card
67
  revised_card = self._parse_revised_card(response, card)
68
-
69
  # Record successful execution
70
- record_agent_execution(
71
- agent_name=self.config.name,
72
- start_time=start_time,
73
- end_time=datetime.now(),
74
- success=True,
75
- metadata={
76
- "cards_revised": 1,
77
- "feedback_sources": len(judge_decisions),
78
- "improvements_applied": len(all_improvements)
79
- }
80
  )
81
-
82
- logger.info(f"RevisionAgent successfully revised card: {card.front.question[:50]}...")
83
  return revised_card
84
-
85
  except Exception as e:
86
- record_agent_execution(
87
- agent_name=self.config.name,
88
- start_time=start_time,
89
- end_time=datetime.now(),
90
- success=False,
91
- error_message=str(e)
92
- )
93
-
94
  logger.error(f"RevisionAgent failed to revise card: {e}")
95
  return card # Return original card on failure
96
-
97
  def _build_revision_prompt(
98
- self,
99
- card: Card,
100
- feedback: List[str],
101
- improvements: List[str]
102
  ) -> str:
103
  """Build the revision prompt"""
104
  feedback_str = "\n".join([f"- {fb}" for fb in feedback])
105
  improvements_str = "\n".join([f"- {imp}" for imp in improvements])
106
-
107
  return f"""Revise this flashcard based on the provided feedback and improvement suggestions:
108
 
109
  Original Card:
@@ -143,7 +116,7 @@ Return the revised card as JSON:
143
  }},
144
  "revision_notes": "Summary of changes made based on feedback"
145
  }}"""
146
-
147
  def _parse_revised_card(self, response: str, original_card: Card) -> Card:
148
  """Parse the revised card response"""
149
  try:
@@ -151,30 +124,30 @@ Return the revised card as JSON:
151
  data = json.loads(response)
152
  else:
153
  data = response
154
-
155
  # Create revised card
156
  revised_card = Card(
157
  card_type=data.get("card_type", original_card.card_type),
158
- front=CardFront(
159
- question=data["front"]["question"]
160
- ),
161
  back=CardBack(
162
  answer=data["back"]["answer"],
163
  explanation=data["back"].get("explanation", ""),
164
- example=data["back"].get("example", "")
165
  ),
166
- metadata=data.get("metadata", original_card.metadata)
167
  )
168
-
169
  # Add revision tracking to metadata
170
  if revised_card.metadata is None:
171
  revised_card.metadata = {}
172
-
173
- revised_card.metadata["revision_notes"] = data.get("revision_notes", "Revised based on judge feedback")
 
 
174
  revised_card.metadata["last_revised"] = datetime.now().isoformat()
175
-
176
  return revised_card
177
-
178
  except Exception as e:
179
  logger.error(f"Failed to parse revised card: {e}")
180
  return original_card
@@ -182,31 +155,24 @@ Return the revised card as JSON:
182
 
183
  class EnhancementAgent(BaseAgentWrapper):
184
  """Agent for enhancing cards with additional content and metadata"""
185
-
186
  def __init__(self, openai_client: AsyncOpenAI):
187
  config_manager = get_config_manager()
188
  base_config = config_manager.get_agent_config("enhancement_agent")
189
-
190
  if not base_config:
191
- base_config = AgentConfig(
192
- name="enhancement_agent",
193
- instructions="""You are a content enhancement specialist.
194
- Add missing elements and enrich flashcard content without overwhelming learners.
195
- Enhance metadata, examples, and educational value.""",
196
- model="gpt-4o",
197
- temperature=0.7
198
  )
199
-
200
  super().__init__(base_config, openai_client)
201
-
202
  async def enhance_card(
203
- self,
204
- card: Card,
205
- enhancement_targets: List[str] = None
206
  ) -> Card:
207
  """Enhance a card with additional content and metadata"""
208
- start_time = datetime.now()
209
-
210
  try:
211
  # Default enhancement targets if none specified
212
  if not enhancement_targets:
@@ -216,53 +182,34 @@ Enhance metadata, examples, and educational value.""",
216
  "metadata",
217
  "learning_outcomes",
218
  "prerequisites",
219
- "related_concepts"
220
  ]
221
-
222
  user_input = self._build_enhancement_prompt(card, enhancement_targets)
223
-
224
  # Execute enhancement
225
  response = await self.execute(user_input)
226
-
227
  # Parse enhanced card
228
  enhanced_card = self._parse_enhanced_card(response, card)
229
-
230
  # Record successful execution
231
- record_agent_execution(
232
- agent_name=self.config.name,
233
- start_time=start_time,
234
- end_time=datetime.now(),
235
- success=True,
236
- metadata={
237
- "cards_enhanced": 1,
238
- "enhancement_targets": enhancement_targets,
239
- "enhancements_applied": len(enhancement_targets)
240
- }
241
  )
242
-
243
- logger.info(f"EnhancementAgent successfully enhanced card: {card.front.question[:50]}...")
244
  return enhanced_card
245
-
246
  except Exception as e:
247
- record_agent_execution(
248
- agent_name=self.config.name,
249
- start_time=start_time,
250
- end_time=datetime.now(),
251
- success=False,
252
- error_message=str(e)
253
- )
254
-
255
  logger.error(f"EnhancementAgent failed to enhance card: {e}")
256
  return card # Return original card on failure
257
-
258
  def _build_enhancement_prompt(
259
- self,
260
- card: Card,
261
- enhancement_targets: List[str]
262
  ) -> str:
263
  """Build the enhancement prompt"""
264
  targets_str = ", ".join(enhancement_targets)
265
-
266
  return f"""Enhance this flashcard by adding missing elements and enriching the content:
267
 
268
  Current Card:
@@ -296,7 +243,7 @@ Return the enhanced card as JSON:
296
  }},
297
  "metadata": {{
298
  "topic": "specific topic",
299
- "subject": "subject area",
300
  "difficulty": "beginner|intermediate|advanced",
301
  "tags": ["comprehensive", "tag", "list"],
302
  "learning_outcomes": ["specific learning outcome 1", "outcome 2"],
@@ -309,7 +256,7 @@ Return the enhanced card as JSON:
309
  }},
310
  "enhancement_notes": "Summary of enhancements made"
311
  }}"""
312
-
313
  def _parse_enhanced_card(self, response: str, original_card: Card) -> Card:
314
  """Parse the enhanced card response"""
315
  try:
@@ -317,86 +264,67 @@ Return the enhanced card as JSON:
317
  data = json.loads(response)
318
  else:
319
  data = response
320
-
321
  # Create enhanced card
322
  enhanced_card = Card(
323
  card_type=data.get("card_type", original_card.card_type),
324
- front=CardFront(
325
- question=data["front"]["question"]
326
- ),
327
  back=CardBack(
328
  answer=data["back"]["answer"],
329
- explanation=data["back"].get("explanation", original_card.back.explanation),
330
- example=data["back"].get("example", original_card.back.example)
 
 
331
  ),
332
- metadata=data.get("metadata", original_card.metadata)
333
  )
334
-
335
  # Add enhancement tracking to metadata
336
  if enhanced_card.metadata is None:
337
  enhanced_card.metadata = {}
338
-
339
- enhanced_card.metadata["enhancement_notes"] = data.get("enhancement_notes", "Enhanced with additional content")
 
 
340
  enhanced_card.metadata["last_enhanced"] = datetime.now().isoformat()
341
-
342
  return enhanced_card
343
-
344
  except Exception as e:
345
  logger.error(f"Failed to parse enhanced card: {e}")
346
  return original_card
347
-
348
  async def enhance_card_batch(
349
- self,
350
- cards: List[Card],
351
- enhancement_targets: List[str] = None
352
  ) -> List[Card]:
353
  """Enhance multiple cards in batch"""
354
- start_time = datetime.now()
355
-
356
  try:
357
  enhanced_cards = []
358
-
359
  # Process cards in parallel for efficiency
360
- tasks = [
361
- self.enhance_card(card, enhancement_targets)
362
- for card in cards
363
- ]
364
-
365
  results = await asyncio.gather(*tasks, return_exceptions=True)
366
-
367
  for card, result in zip(cards, results):
368
  if isinstance(result, Exception):
369
  logger.warning(f"Enhancement failed for card: {result}")
370
  enhanced_cards.append(card) # Keep original
371
  else:
372
  enhanced_cards.append(result)
373
-
374
  # Record batch execution
375
- successful_enhancements = len([r for r in results if not isinstance(r, Exception)])
376
-
377
- record_agent_execution(
378
- agent_name=f"{self.config.name}_batch",
379
- start_time=start_time,
380
- end_time=datetime.now(),
381
- success=True,
382
- metadata={
383
- "cards_processed": len(cards),
384
- "successful_enhancements": successful_enhancements,
385
- "enhancement_rate": successful_enhancements / len(cards) if cards else 0
386
- }
387
  )
388
-
389
- logger.info(f"EnhancementAgent batch complete: {successful_enhancements}/{len(cards)} cards enhanced")
390
  return enhanced_cards
391
-
392
  except Exception as e:
393
- record_agent_execution(
394
- agent_name=f"{self.config.name}_batch",
395
- start_time=start_time,
396
- end_time=datetime.now(),
397
- success=False,
398
- error_message=str(e)
399
- )
400
-
401
  logger.error(f"EnhancementAgent batch failed: {e}")
402
- return cards # Return original cards on failure
 
2
 
3
  import json
4
  import asyncio
5
+ from typing import List
6
  from datetime import datetime
7
 
8
  from openai import AsyncOpenAI
9
 
10
  from ankigen_core.logging import logger
11
  from ankigen_core.models import Card, CardFront, CardBack
12
+ from .base import BaseAgentWrapper
13
  from .config import get_config_manager
 
14
  from .judges import JudgeDecision
15
 
16
 
17
  class RevisionAgent(BaseAgentWrapper):
18
  """Agent for revising cards based on judge feedback"""
19
+
20
  def __init__(self, openai_client: AsyncOpenAI):
21
  config_manager = get_config_manager()
22
  base_config = config_manager.get_agent_config("revision_agent")
23
+
24
  if not base_config:
25
+ raise ValueError(
26
+ "revision_agent configuration not found - agent system not properly initialized"
 
 
 
 
 
27
  )
28
+
29
  super().__init__(base_config, openai_client)
30
+
31
  async def revise_card(
32
+ self, card: Card, judge_decisions: List[JudgeDecision], max_iterations: int = 3
 
 
 
33
  ) -> Card:
34
  """Revise a card based on judge feedback"""
35
+ datetime.now()
36
+
37
  try:
38
  # Collect all feedback and improvements
39
  all_feedback = []
40
  all_improvements = []
41
+
42
  for decision in judge_decisions:
43
  if not decision.approved:
44
  all_feedback.append(f"{decision.judge_name}: {decision.feedback}")
45
  all_improvements.extend(decision.improvements)
46
+
47
  if not all_feedback:
48
  # No revisions needed
49
  return card
50
+
51
  # Build revision prompt
52
+ user_input = self._build_revision_prompt(
53
+ card, all_feedback, all_improvements
54
+ )
55
+
56
  # Execute revision
57
  response = await self.execute(user_input)
58
+
59
  # Parse revised card
60
  revised_card = self._parse_revised_card(response, card)
61
+
62
  # Record successful execution
63
+
64
+ logger.info(
65
+ f"RevisionAgent successfully revised card: {card.front.question[:50]}..."
 
 
 
 
 
 
 
66
  )
 
 
67
  return revised_card
68
+
69
  except Exception as e:
 
 
 
 
 
 
 
 
70
  logger.error(f"RevisionAgent failed to revise card: {e}")
71
  return card # Return original card on failure
72
+
73
  def _build_revision_prompt(
74
+ self, card: Card, feedback: List[str], improvements: List[str]
 
 
 
75
  ) -> str:
76
  """Build the revision prompt"""
77
  feedback_str = "\n".join([f"- {fb}" for fb in feedback])
78
  improvements_str = "\n".join([f"- {imp}" for imp in improvements])
79
+
80
  return f"""Revise this flashcard based on the provided feedback and improvement suggestions:
81
 
82
  Original Card:
 
116
  }},
117
  "revision_notes": "Summary of changes made based on feedback"
118
  }}"""
119
+
120
  def _parse_revised_card(self, response: str, original_card: Card) -> Card:
121
  """Parse the revised card response"""
122
  try:
 
124
  data = json.loads(response)
125
  else:
126
  data = response
127
+
128
  # Create revised card
129
  revised_card = Card(
130
  card_type=data.get("card_type", original_card.card_type),
131
+ front=CardFront(question=data["front"]["question"]),
 
 
132
  back=CardBack(
133
  answer=data["back"]["answer"],
134
  explanation=data["back"].get("explanation", ""),
135
+ example=data["back"].get("example", ""),
136
  ),
137
+ metadata=data.get("metadata", original_card.metadata),
138
  )
139
+
140
  # Add revision tracking to metadata
141
  if revised_card.metadata is None:
142
  revised_card.metadata = {}
143
+
144
+ revised_card.metadata["revision_notes"] = data.get(
145
+ "revision_notes", "Revised based on judge feedback"
146
+ )
147
  revised_card.metadata["last_revised"] = datetime.now().isoformat()
148
+
149
  return revised_card
150
+
151
  except Exception as e:
152
  logger.error(f"Failed to parse revised card: {e}")
153
  return original_card
 
155
 
156
  class EnhancementAgent(BaseAgentWrapper):
157
  """Agent for enhancing cards with additional content and metadata"""
158
+
159
  def __init__(self, openai_client: AsyncOpenAI):
160
  config_manager = get_config_manager()
161
  base_config = config_manager.get_agent_config("enhancement_agent")
162
+
163
  if not base_config:
164
+ raise ValueError(
165
+ "enhancement_agent configuration not found - agent system not properly initialized"
 
 
 
 
 
166
  )
167
+
168
  super().__init__(base_config, openai_client)
169
+
170
  async def enhance_card(
171
+ self, card: Card, enhancement_targets: List[str] = None
 
 
172
  ) -> Card:
173
  """Enhance a card with additional content and metadata"""
174
+ datetime.now()
175
+
176
  try:
177
  # Default enhancement targets if none specified
178
  if not enhancement_targets:
 
182
  "metadata",
183
  "learning_outcomes",
184
  "prerequisites",
185
+ "related_concepts",
186
  ]
187
+
188
  user_input = self._build_enhancement_prompt(card, enhancement_targets)
189
+
190
  # Execute enhancement
191
  response = await self.execute(user_input)
192
+
193
  # Parse enhanced card
194
  enhanced_card = self._parse_enhanced_card(response, card)
195
+
196
  # Record successful execution
197
+
198
+ logger.info(
199
+ f"EnhancementAgent successfully enhanced card: {card.front.question[:50]}..."
 
 
 
 
 
 
 
200
  )
 
 
201
  return enhanced_card
202
+
203
  except Exception as e:
 
 
 
 
 
 
 
 
204
  logger.error(f"EnhancementAgent failed to enhance card: {e}")
205
  return card # Return original card on failure
206
+
207
  def _build_enhancement_prompt(
208
+ self, card: Card, enhancement_targets: List[str]
 
 
209
  ) -> str:
210
  """Build the enhancement prompt"""
211
  targets_str = ", ".join(enhancement_targets)
212
+
213
  return f"""Enhance this flashcard by adding missing elements and enriching the content:
214
 
215
  Current Card:
 
243
  }},
244
  "metadata": {{
245
  "topic": "specific topic",
246
+ "subject": "subject area",
247
  "difficulty": "beginner|intermediate|advanced",
248
  "tags": ["comprehensive", "tag", "list"],
249
  "learning_outcomes": ["specific learning outcome 1", "outcome 2"],
 
256
  }},
257
  "enhancement_notes": "Summary of enhancements made"
258
  }}"""
259
+
260
  def _parse_enhanced_card(self, response: str, original_card: Card) -> Card:
261
  """Parse the enhanced card response"""
262
  try:
 
264
  data = json.loads(response)
265
  else:
266
  data = response
267
+
268
  # Create enhanced card
269
  enhanced_card = Card(
270
  card_type=data.get("card_type", original_card.card_type),
271
+ front=CardFront(question=data["front"]["question"]),
 
 
272
  back=CardBack(
273
  answer=data["back"]["answer"],
274
+ explanation=data["back"].get(
275
+ "explanation", original_card.back.explanation
276
+ ),
277
+ example=data["back"].get("example", original_card.back.example),
278
  ),
279
+ metadata=data.get("metadata", original_card.metadata),
280
  )
281
+
282
  # Add enhancement tracking to metadata
283
  if enhanced_card.metadata is None:
284
  enhanced_card.metadata = {}
285
+
286
+ enhanced_card.metadata["enhancement_notes"] = data.get(
287
+ "enhancement_notes", "Enhanced with additional content"
288
+ )
289
  enhanced_card.metadata["last_enhanced"] = datetime.now().isoformat()
290
+
291
  return enhanced_card
292
+
293
  except Exception as e:
294
  logger.error(f"Failed to parse enhanced card: {e}")
295
  return original_card
296
+
297
  async def enhance_card_batch(
298
+ self, cards: List[Card], enhancement_targets: List[str] = None
 
 
299
  ) -> List[Card]:
300
  """Enhance multiple cards in batch"""
301
+ datetime.now()
302
+
303
  try:
304
  enhanced_cards = []
305
+
306
  # Process cards in parallel for efficiency
307
+ tasks = [self.enhance_card(card, enhancement_targets) for card in cards]
308
+
 
 
 
309
  results = await asyncio.gather(*tasks, return_exceptions=True)
310
+
311
  for card, result in zip(cards, results):
312
  if isinstance(result, Exception):
313
  logger.warning(f"Enhancement failed for card: {result}")
314
  enhanced_cards.append(card) # Keep original
315
  else:
316
  enhanced_cards.append(result)
317
+
318
  # Record batch execution
319
+ successful_enhancements = len(
320
+ [r for r in results if not isinstance(r, Exception)]
321
+ )
322
+
323
+ logger.info(
324
+ f"EnhancementAgent batch complete: {successful_enhancements}/{len(cards)} cards enhanced"
 
 
 
 
 
 
325
  )
 
 
326
  return enhanced_cards
327
+
328
  except Exception as e:
 
 
 
 
 
 
 
 
329
  logger.error(f"EnhancementAgent batch failed: {e}")
330
+ return cards # Return original cards on failure
ankigen_core/agents/generators.py CHANGED
@@ -1,7 +1,6 @@
1
  # Specialized generator agents for card generation
2
 
3
  import json
4
- import asyncio
5
  from typing import List, Dict, Any, Optional
6
  from datetime import datetime
7
 
@@ -9,106 +8,71 @@ from openai import AsyncOpenAI
9
 
10
  from ankigen_core.logging import logger
11
  from ankigen_core.models import Card, CardFront, CardBack
12
- from .base import BaseAgentWrapper, AgentConfig
13
  from .config import get_config_manager
14
- from .metrics import record_agent_execution
15
 
16
 
17
  class SubjectExpertAgent(BaseAgentWrapper):
18
  """Subject matter expert agent for domain-specific card generation"""
19
-
20
  def __init__(self, openai_client: AsyncOpenAI, subject: str = "general"):
21
  config_manager = get_config_manager()
22
  base_config = config_manager.get_agent_config("subject_expert")
23
-
24
  if not base_config:
25
- # Fallback config if not found
26
- base_config = AgentConfig(
27
- name="subject_expert",
28
- instructions=f"""You are a world-class expert in {subject} with deep pedagogical knowledge.
29
- Generate high-quality flashcards that demonstrate mastery of {subject} concepts.
30
- Focus on technical accuracy, appropriate depth, and real-world applications.""",
31
- model="gpt-4o",
32
- temperature=0.7
33
  )
34
-
 
 
 
35
  # Customize instructions for the specific subject
36
  if subject != "general" and base_config.custom_prompts:
37
  subject_prompt = base_config.custom_prompts.get(subject.lower(), "")
38
  if subject_prompt:
39
- base_config.instructions += f"\n\nSubject-specific guidance: {subject_prompt}"
40
-
 
 
41
  super().__init__(base_config, openai_client)
42
  self.subject = subject
43
-
44
  async def generate_cards(
45
- self,
46
- topic: str,
47
- num_cards: int = 5,
48
- difficulty: str = "intermediate",
49
- prerequisites: List[str] = None,
50
- context: Dict[str, Any] = None
51
  ) -> List[Card]:
52
- """Generate subject-specific flashcards"""
53
- start_time = datetime.now()
54
-
55
  try:
56
- user_input = self._build_generation_prompt(
57
- topic=topic,
58
- num_cards=num_cards,
59
- difficulty=difficulty,
60
- prerequisites=prerequisites or [],
61
- context=context or {}
62
- )
63
-
64
- # Execute the agent
65
- response = await self.execute(user_input, context)
66
-
67
- # Parse the response into Card objects
68
- cards = self._parse_cards_response(response, topic)
69
-
70
- # Record successful execution
71
- record_agent_execution(
72
- agent_name=self.config.name,
73
- start_time=start_time,
74
- end_time=datetime.now(),
75
- success=True,
76
- metadata={
77
- "subject": self.subject,
78
- "topic": topic,
79
- "cards_generated": len(cards),
80
- "difficulty": difficulty
81
- }
82
- )
83
-
84
- logger.info(f"SubjectExpertAgent generated {len(cards)} cards for {topic}")
85
- return cards
86
-
87
  except Exception as e:
88
- # Record failed execution
89
- record_agent_execution(
90
- agent_name=self.config.name,
91
- start_time=start_time,
92
- end_time=datetime.now(),
93
- success=False,
94
- error_message=str(e),
95
- metadata={"subject": self.subject, "topic": topic}
96
- )
97
-
98
- logger.error(f"SubjectExpertAgent failed to generate cards: {e}")
99
  raise
100
-
101
  def _build_generation_prompt(
102
  self,
103
  topic: str,
104
  num_cards: int,
105
  difficulty: str,
106
  prerequisites: List[str],
107
- context: Dict[str, Any]
108
  ) -> str:
109
  """Build the generation prompt"""
110
  prerequisites_str = ", ".join(prerequisites) if prerequisites else "None"
111
-
112
  prompt = f"""Generate {num_cards} high-quality flashcards for the topic: {topic}
113
 
114
  Subject: {self.subject}
@@ -146,145 +110,178 @@ Return your response as a JSON object with this structure:
146
  }}
147
  ]
148
  }}"""
149
-
150
  if context.get("source_text"):
151
  prompt += f"\n\nBase the cards on this source material:\n{context['source_text'][:2000]}..."
152
-
153
  return prompt
154
-
155
- def _parse_cards_response(self, response: str, topic: str) -> List[Card]:
156
  """Parse the agent response into Card objects"""
157
  try:
158
- # Try to parse as JSON
159
- if isinstance(response, str):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
160
  data = json.loads(response)
 
 
 
161
  else:
162
- data = response
163
-
164
- if "cards" not in data:
165
- raise ValueError("Response missing 'cards' field")
166
-
167
  cards = []
168
- for i, card_data in enumerate(data["cards"]):
169
  try:
170
- # Validate required fields
171
- if "front" not in card_data or "back" not in card_data:
172
- logger.warning(f"Skipping card {i}: missing front or back")
173
- continue
174
-
175
- front_data = card_data["front"]
176
- back_data = card_data["back"]
177
-
178
- if "question" not in front_data:
179
- logger.warning(f"Skipping card {i}: missing question")
180
- continue
181
-
182
- if "answer" not in back_data:
183
- logger.warning(f"Skipping card {i}: missing answer")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
184
  continue
185
-
186
  # Create Card object
187
  card = Card(
188
- card_type=card_data.get("card_type", "basic"),
189
- front=CardFront(question=front_data["question"]),
190
  back=CardBack(
191
- answer=back_data["answer"],
192
- explanation=back_data.get("explanation", ""),
193
- example=back_data.get("example", "")
194
  ),
195
- metadata=card_data.get("metadata", {})
 
 
 
 
196
  )
197
-
198
  # Ensure metadata includes subject and topic
199
  if card.metadata is not None:
200
  if "subject" not in card.metadata:
201
  card.metadata["subject"] = self.subject
202
  if "topic" not in card.metadata:
203
  card.metadata["topic"] = topic
204
-
205
  cards.append(card)
206
-
207
  except Exception as e:
208
  logger.warning(f"Failed to parse card {i}: {e}")
209
  continue
210
-
 
211
  return cards
212
-
213
  except json.JSONDecodeError as e:
214
- logger.error(f"Failed to parse cards response as JSON: {e}")
 
 
 
 
 
 
 
 
 
215
  raise ValueError(f"Invalid JSON response from agent: {e}")
216
  except Exception as e:
217
- logger.error(f"Failed to parse cards response: {e}")
 
218
  raise
219
 
220
 
221
  class PedagogicalAgent(BaseAgentWrapper):
222
  """Pedagogical specialist for educational effectiveness"""
223
-
224
  def __init__(self, openai_client: AsyncOpenAI):
225
  config_manager = get_config_manager()
226
  base_config = config_manager.get_agent_config("pedagogical")
227
-
228
  if not base_config:
229
- base_config = AgentConfig(
230
- name="pedagogical",
231
- instructions="""You are an educational specialist focused on learning theory and instructional design.
232
- Ensure all flashcards follow educational best practices using Bloom's Taxonomy, Spaced Repetition,
233
- and Cognitive Load Theory. Review for clear learning objectives and appropriate difficulty progression.""",
234
- model="gpt-4o",
235
- temperature=0.6
236
  )
237
-
238
  super().__init__(base_config, openai_client)
239
-
240
  async def review_cards(self, cards: List[Card]) -> List[Dict[str, Any]]:
241
  """Review cards for pedagogical effectiveness"""
242
- start_time = datetime.now()
243
-
244
  try:
245
  reviews = []
246
-
247
  for i, card in enumerate(cards):
248
  user_input = self._build_review_prompt(card, i)
249
- response = await self.execute(user_input)
250
-
251
  try:
252
- review_data = json.loads(response) if isinstance(response, str) else response
 
 
253
  reviews.append(review_data)
254
  except Exception as e:
255
  logger.warning(f"Failed to parse review for card {i}: {e}")
256
- reviews.append({
257
- "approved": True,
258
- "feedback": f"Review parsing failed: {e}",
259
- "improvements": []
260
- })
261
-
 
 
262
  # Record successful execution
263
- record_agent_execution(
264
- agent_name=self.config.name,
265
- start_time=start_time,
266
- end_time=datetime.now(),
267
- success=True,
268
- metadata={
269
- "cards_reviewed": len(cards),
270
- "approvals": len([r for r in reviews if r.get("approved", False)])
271
- }
272
- )
273
-
274
  return reviews
275
-
276
  except Exception as e:
277
- record_agent_execution(
278
- agent_name=self.config.name,
279
- start_time=start_time,
280
- end_time=datetime.now(),
281
- success=False,
282
- error_message=str(e)
283
- )
284
-
285
  logger.error(f"PedagogicalAgent review failed: {e}")
286
  raise
287
-
288
  def _parse_review_response(self, response) -> Dict[str, Any]:
289
  """Parse the review response into a dictionary"""
290
  try:
@@ -292,21 +289,25 @@ and Cognitive Load Theory. Review for clear learning objectives and appropriate
292
  data = json.loads(response)
293
  else:
294
  data = response
295
-
296
  # Validate required fields
297
- required_fields = ['pedagogical_quality', 'clarity', 'learning_effectiveness']
 
 
 
 
298
  if not all(field in data for field in required_fields):
299
  raise ValueError("Missing required review fields")
300
-
301
  return data
302
-
303
  except json.JSONDecodeError as e:
304
  logger.error(f"Failed to parse review response as JSON: {e}")
305
  raise ValueError(f"Invalid review response: {e}")
306
  except Exception as e:
307
  logger.error(f"Failed to parse review response: {e}")
308
  raise ValueError(f"Invalid review response: {e}")
309
-
310
  def _build_review_prompt(self, card: Card, index: int) -> str:
311
  """Build the review prompt for a single card"""
312
  return f"""Review this flashcard for pedagogical effectiveness:
@@ -340,68 +341,45 @@ Return your assessment as JSON:
340
 
341
  class ContentStructuringAgent(BaseAgentWrapper):
342
  """Content organization and formatting specialist"""
343
-
344
  def __init__(self, openai_client: AsyncOpenAI):
345
  config_manager = get_config_manager()
346
  base_config = config_manager.get_agent_config("content_structuring")
347
-
348
  if not base_config:
349
- base_config = AgentConfig(
350
- name="content_structuring",
351
- instructions="""You are a content organization specialist focused on consistency and structure.
352
- Format and organize flashcard content for optimal learning with consistent formatting,
353
- proper metadata, clear questions, and appropriate categorization.""",
354
- model="gpt-4o-mini",
355
- temperature=0.5
356
  )
357
-
358
  super().__init__(base_config, openai_client)
359
-
360
  async def structure_cards(self, cards: List[Card]) -> List[Card]:
361
  """Structure and format cards for consistency"""
362
- start_time = datetime.now()
363
-
364
  try:
365
  structured_cards = []
366
-
367
  for i, card in enumerate(cards):
368
  user_input = self._build_structuring_prompt(card, i)
369
- response = await self.execute(user_input)
370
-
371
  try:
372
- structured_data = json.loads(response) if isinstance(response, str) else response
 
 
373
  structured_card = self._parse_structured_card(structured_data, card)
374
  structured_cards.append(structured_card)
375
  except Exception as e:
376
  logger.warning(f"Failed to structure card {i}: {e}")
377
  structured_cards.append(card) # Keep original on failure
378
-
379
- # Record successful execution
380
- record_agent_execution(
381
- agent_name=self.config.name,
382
- start_time=start_time,
383
- end_time=datetime.now(),
384
- success=True,
385
- metadata={
386
- "cards_structured": len(cards),
387
- "successful_structures": len([c for c in structured_cards if c != cards[i] for i in range(len(cards))])
388
- }
389
- )
390
-
391
  return structured_cards
392
-
393
  except Exception as e:
394
- record_agent_execution(
395
- agent_name=self.config.name,
396
- start_time=start_time,
397
- end_time=datetime.now(),
398
- success=False,
399
- error_message=str(e)
400
- )
401
-
402
  logger.error(f"ContentStructuringAgent failed: {e}")
403
  raise
404
-
405
  def _build_structuring_prompt(self, card: Card, index: int) -> str:
406
  """Build the structuring prompt for a single card"""
407
  return f"""Structure and format this flashcard for optimal learning:
@@ -444,21 +422,21 @@ Return the improved card as JSON:
444
  "category": "category name"
445
  }}
446
  }}"""
447
-
448
- def _parse_structured_card(self, structured_data: Dict[str, Any], original_card: Card) -> Card:
 
 
449
  """Parse structured card data into Card object"""
450
  try:
451
  return Card(
452
  card_type=structured_data.get("card_type", original_card.card_type),
453
- front=CardFront(
454
- question=structured_data["front"]["question"]
455
- ),
456
  back=CardBack(
457
  answer=structured_data["back"]["answer"],
458
  explanation=structured_data["back"].get("explanation", ""),
459
- example=structured_data["back"].get("example", "")
460
  ),
461
- metadata=structured_data.get("metadata", original_card.metadata)
462
  )
463
  except Exception as e:
464
  logger.warning(f"Failed to parse structured card: {e}")
@@ -467,28 +445,23 @@ Return the improved card as JSON:
467
 
468
  class GenerationCoordinator(BaseAgentWrapper):
469
  """Coordinates the multi-agent card generation workflow"""
470
-
471
  def __init__(self, openai_client: AsyncOpenAI):
472
  config_manager = get_config_manager()
473
  base_config = config_manager.get_agent_config("generation_coordinator")
474
-
475
  if not base_config:
476
- base_config = AgentConfig(
477
- name="generation_coordinator",
478
- instructions="""You are the generation workflow coordinator.
479
- Orchestrate the card generation process and manage handoffs between specialized agents.
480
- Make decisions based on content type, user preferences, and system load.""",
481
- model="gpt-4o-mini",
482
- temperature=0.3
483
  )
484
-
485
  super().__init__(base_config, openai_client)
486
-
487
  # Initialize specialized agents
488
  self.subject_expert = None
489
  self.pedagogical = PedagogicalAgent(openai_client)
490
  self.content_structuring = ContentStructuringAgent(openai_client)
491
-
492
  async def coordinate_generation(
493
  self,
494
  topic: str,
@@ -497,73 +470,121 @@ Make decisions based on content type, user preferences, and system load.""",
497
  difficulty: str = "intermediate",
498
  enable_review: bool = True,
499
  enable_structuring: bool = True,
500
- context: Dict[str, Any] = None
501
  ) -> List[Card]:
502
  """Coordinate the full card generation pipeline"""
503
- start_time = datetime.now()
504
-
505
  try:
506
  # Initialize subject expert for the specific subject
507
  if not self.subject_expert or self.subject_expert.subject != subject:
508
  self.subject_expert = SubjectExpertAgent(self.openai_client, subject)
509
-
510
  logger.info(f"Starting coordinated generation: {topic} ({subject})")
511
-
512
  # Step 1: Generate initial cards
513
  cards = await self.subject_expert.generate_cards(
514
- topic=topic,
515
- num_cards=num_cards,
516
- difficulty=difficulty,
517
- context=context
518
  )
519
-
520
  # Step 2: Pedagogical review (optional)
521
  if enable_review and cards:
522
  logger.info("Performing pedagogical review...")
523
  reviews = await self.pedagogical.review_cards(cards)
524
-
525
  # Filter or flag cards based on reviews
526
  approved_cards = []
527
  for card, review in zip(cards, reviews):
528
  if review.get("approved", True):
529
  approved_cards.append(card)
530
  else:
531
- logger.info(f"Card flagged for revision: {card.front.question[:50]}...")
532
-
 
 
533
  cards = approved_cards
534
-
535
  # Step 3: Content structuring (optional)
536
  if enable_structuring and cards:
537
  logger.info("Performing content structuring...")
538
  cards = await self.content_structuring.structure_cards(cards)
539
-
540
  # Record successful coordination
541
- record_agent_execution(
542
- agent_name=self.config.name,
543
- start_time=start_time,
544
- end_time=datetime.now(),
545
- success=True,
546
- metadata={
547
- "topic": topic,
548
- "subject": subject,
549
- "cards_generated": len(cards),
550
- "review_enabled": enable_review,
551
- "structuring_enabled": enable_structuring
552
- }
553
- )
554
-
555
  logger.info(f"Generation coordination complete: {len(cards)} cards")
556
  return cards
557
-
558
  except Exception as e:
559
- record_agent_execution(
560
- agent_name=self.config.name,
561
- start_time=start_time,
562
- end_time=datetime.now(),
563
- success=False,
564
- error_message=str(e),
565
- metadata={"topic": topic, "subject": subject}
566
- )
567
-
568
  logger.error(f"Generation coordination failed: {e}")
569
- raise
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  # Specialized generator agents for card generation
2
 
3
  import json
 
4
  from typing import List, Dict, Any, Optional
5
  from datetime import datetime
6
 
 
8
 
9
  from ankigen_core.logging import logger
10
  from ankigen_core.models import Card, CardFront, CardBack
11
+ from .base import BaseAgentWrapper
12
  from .config import get_config_manager
13
+ from .schemas import CardsGenerationSchema
14
 
15
 
16
  class SubjectExpertAgent(BaseAgentWrapper):
17
  """Subject matter expert agent for domain-specific card generation"""
18
+
19
  def __init__(self, openai_client: AsyncOpenAI, subject: str = "general"):
20
  config_manager = get_config_manager()
21
  base_config = config_manager.get_agent_config("subject_expert")
22
+
23
  if not base_config:
24
+ raise ValueError(
25
+ "subject_expert configuration not found - agent system not properly initialized"
 
 
 
 
 
 
26
  )
27
+
28
+ # Enable structured output for card generation
29
+ base_config.output_type = CardsGenerationSchema
30
+
31
  # Customize instructions for the specific subject
32
  if subject != "general" and base_config.custom_prompts:
33
  subject_prompt = base_config.custom_prompts.get(subject.lower(), "")
34
  if subject_prompt:
35
+ base_config.instructions += (
36
+ f"\n\nSubject-specific guidance: {subject_prompt}"
37
+ )
38
+
39
  super().__init__(base_config, openai_client)
40
  self.subject = subject
41
+
42
  async def generate_cards(
43
+ self, topic: str, num_cards: int = 5, context: Optional[Dict[str, Any]] = None
 
 
 
 
 
44
  ) -> List[Card]:
45
+ """Generate flashcards for a given topic"""
 
 
46
  try:
47
+ user_input = f"Generate {num_cards} flashcards for the topic: {topic}"
48
+ if context:
49
+ user_input += f"\n\nAdditional context: {context}"
50
+
51
+ response, usage = await self.execute(user_input, context)
52
+
53
+ # Log usage information
54
+ if usage and usage.get("total_tokens", 0) > 0:
55
+ logger.info(
56
+ f"💰 Token Usage: {usage['total_tokens']} tokens (Input: {usage['input_tokens']}, Output: {usage['output_tokens']})"
57
+ )
58
+
59
+ return self._parse_cards_response(response, topic)
60
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
61
  except Exception as e:
62
+ logger.error(f"Card generation failed: {e}")
 
 
 
 
 
 
 
 
 
 
63
  raise
64
+
65
  def _build_generation_prompt(
66
  self,
67
  topic: str,
68
  num_cards: int,
69
  difficulty: str,
70
  prerequisites: List[str],
71
+ context: Dict[str, Any],
72
  ) -> str:
73
  """Build the generation prompt"""
74
  prerequisites_str = ", ".join(prerequisites) if prerequisites else "None"
75
+
76
  prompt = f"""Generate {num_cards} high-quality flashcards for the topic: {topic}
77
 
78
  Subject: {self.subject}
 
110
  }}
111
  ]
112
  }}"""
113
+
114
  if context.get("source_text"):
115
  prompt += f"\n\nBase the cards on this source material:\n{context['source_text'][:2000]}..."
116
+
117
  return prompt
118
+
119
+ def _parse_cards_response(self, response: Any, topic: str) -> List[Card]:
120
  """Parse the agent response into Card objects"""
121
  try:
122
+ # Handle structured output from CardsGenerationSchema
123
+ if hasattr(response, "cards"):
124
+ # Response is already a CardsGenerationSchema object
125
+ logger.info(f"✅ STRUCTURED OUTPUT RECEIVED: {type(response)}")
126
+ card_data_list = response.cards
127
+ elif isinstance(response, dict) and "cards" in response:
128
+ # Response is a dict with cards
129
+ card_data_list = response["cards"]
130
+ elif isinstance(response, str):
131
+ # Fallback: Clean up the response - remove markdown code blocks if present
132
+ response = response.strip()
133
+ if response.startswith("```json"):
134
+ response = response[7:] # Remove ```json
135
+ if response.startswith("```"):
136
+ response = response[3:] # Remove ```
137
+ if response.endswith("```"):
138
+ response = response[:-3] # Remove trailing ```
139
+ response = response.strip()
140
+
141
  data = json.loads(response)
142
+ if "cards" not in data:
143
+ raise ValueError("Response missing 'cards' field")
144
+ card_data_list = data["cards"]
145
  else:
146
+ raise ValueError(f"Unexpected response format: {type(response)}")
147
+
 
 
 
148
  cards = []
149
+ for i, card_data in enumerate(card_data_list):
150
  try:
151
+ # Handle both Pydantic models and dictionaries
152
+ if hasattr(card_data, "front"):
153
+ # Pydantic model
154
+ front_data = card_data.front
155
+ back_data = card_data.back
156
+ metadata = card_data.metadata
157
+ card_type = card_data.card_type
158
+ else:
159
+ # Dictionary
160
+ if "front" not in card_data or "back" not in card_data:
161
+ logger.warning(f"Skipping card {i}: missing front or back")
162
+ continue
163
+ front_data = card_data["front"]
164
+ back_data = card_data["back"]
165
+ metadata = card_data.get("metadata", {})
166
+ card_type = card_data.get("card_type", "basic")
167
+
168
+ # Extract question and answer
169
+ if hasattr(front_data, "question"):
170
+ question = front_data.question
171
+ else:
172
+ question = front_data.get("question", "")
173
+
174
+ if hasattr(back_data, "answer"):
175
+ answer = back_data.answer
176
+ explanation = back_data.explanation
177
+ example = back_data.example
178
+ else:
179
+ answer = back_data.get("answer", "")
180
+ explanation = back_data.get("explanation", "")
181
+ example = back_data.get("example", "")
182
+
183
+ if not question or not answer:
184
+ logger.warning(f"Skipping card {i}: missing question or answer")
185
  continue
186
+
187
  # Create Card object
188
  card = Card(
189
+ card_type=card_type,
190
+ front=CardFront(question=question),
191
  back=CardBack(
192
+ answer=answer,
193
+ explanation=explanation,
194
+ example=example,
195
  ),
196
+ metadata=metadata
197
+ if isinstance(metadata, dict)
198
+ else metadata.dict()
199
+ if hasattr(metadata, "dict")
200
+ else {},
201
  )
202
+
203
  # Ensure metadata includes subject and topic
204
  if card.metadata is not None:
205
  if "subject" not in card.metadata:
206
  card.metadata["subject"] = self.subject
207
  if "topic" not in card.metadata:
208
  card.metadata["topic"] = topic
209
+
210
  cards.append(card)
211
+
212
  except Exception as e:
213
  logger.warning(f"Failed to parse card {i}: {e}")
214
  continue
215
+
216
+ logger.info(f"✅ PARSED {len(cards)} CARDS FROM STRUCTURED OUTPUT")
217
  return cards
218
+
219
  except json.JSONDecodeError as e:
220
+ logger.error(f"💥 JSON DECODE ERROR: {e}")
221
+ logger.error("💥 RAW RESPONSE THAT FAILED TO PARSE:")
222
+ logger.error("---FAILED RESPONSE START---")
223
+ logger.error(f"{response}")
224
+ logger.error("---FAILED RESPONSE END---")
225
+ logger.error(f"💥 RESPONSE TYPE: {type(response)}")
226
+ if isinstance(response, str):
227
+ logger.error(f"💥 RESPONSE LENGTH: {len(response)}")
228
+ logger.error(f"💥 FIRST 200 CHARS: {repr(response[:200])}")
229
+ logger.error(f"💥 LAST 200 CHARS: {repr(response[-200:])}")
230
  raise ValueError(f"Invalid JSON response from agent: {e}")
231
  except Exception as e:
232
+ logger.error(f"💥 GENERAL PARSING ERROR: {e}")
233
+ logger.error(f"💥 RESPONSE THAT CAUSED ERROR: {response}")
234
  raise
235
 
236
 
237
  class PedagogicalAgent(BaseAgentWrapper):
238
  """Pedagogical specialist for educational effectiveness"""
239
+
240
  def __init__(self, openai_client: AsyncOpenAI):
241
  config_manager = get_config_manager()
242
  base_config = config_manager.get_agent_config("pedagogical")
243
+
244
  if not base_config:
245
+ raise ValueError(
246
+ "pedagogical configuration not found - agent system not properly initialized"
 
 
 
 
 
247
  )
248
+
249
  super().__init__(base_config, openai_client)
250
+
251
  async def review_cards(self, cards: List[Card]) -> List[Dict[str, Any]]:
252
  """Review cards for pedagogical effectiveness"""
253
+ datetime.now()
254
+
255
  try:
256
  reviews = []
257
+
258
  for i, card in enumerate(cards):
259
  user_input = self._build_review_prompt(card, i)
260
+ response, usage = await self.execute(user_input)
261
+
262
  try:
263
+ review_data = (
264
+ json.loads(response) if isinstance(response, str) else response
265
+ )
266
  reviews.append(review_data)
267
  except Exception as e:
268
  logger.warning(f"Failed to parse review for card {i}: {e}")
269
+ reviews.append(
270
+ {
271
+ "approved": True,
272
+ "feedback": f"Review parsing failed: {e}",
273
+ "improvements": [],
274
+ }
275
+ )
276
+
277
  # Record successful execution
278
+
 
 
 
 
 
 
 
 
 
 
279
  return reviews
280
+
281
  except Exception as e:
 
 
 
 
 
 
 
 
282
  logger.error(f"PedagogicalAgent review failed: {e}")
283
  raise
284
+
285
  def _parse_review_response(self, response) -> Dict[str, Any]:
286
  """Parse the review response into a dictionary"""
287
  try:
 
289
  data = json.loads(response)
290
  else:
291
  data = response
292
+
293
  # Validate required fields
294
+ required_fields = [
295
+ "pedagogical_quality",
296
+ "clarity",
297
+ "learning_effectiveness",
298
+ ]
299
  if not all(field in data for field in required_fields):
300
  raise ValueError("Missing required review fields")
301
+
302
  return data
303
+
304
  except json.JSONDecodeError as e:
305
  logger.error(f"Failed to parse review response as JSON: {e}")
306
  raise ValueError(f"Invalid review response: {e}")
307
  except Exception as e:
308
  logger.error(f"Failed to parse review response: {e}")
309
  raise ValueError(f"Invalid review response: {e}")
310
+
311
  def _build_review_prompt(self, card: Card, index: int) -> str:
312
  """Build the review prompt for a single card"""
313
  return f"""Review this flashcard for pedagogical effectiveness:
 
341
 
342
  class ContentStructuringAgent(BaseAgentWrapper):
343
  """Content organization and formatting specialist"""
344
+
345
  def __init__(self, openai_client: AsyncOpenAI):
346
  config_manager = get_config_manager()
347
  base_config = config_manager.get_agent_config("content_structuring")
348
+
349
  if not base_config:
350
+ raise ValueError(
351
+ "content_structuring configuration not found - agent system not properly initialized"
 
 
 
 
 
352
  )
353
+
354
  super().__init__(base_config, openai_client)
355
+
356
  async def structure_cards(self, cards: List[Card]) -> List[Card]:
357
  """Structure and format cards for consistency"""
358
+ datetime.now()
359
+
360
  try:
361
  structured_cards = []
362
+
363
  for i, card in enumerate(cards):
364
  user_input = self._build_structuring_prompt(card, i)
365
+ response, usage = await self.execute(user_input)
366
+
367
  try:
368
+ structured_data = (
369
+ json.loads(response) if isinstance(response, str) else response
370
+ )
371
  structured_card = self._parse_structured_card(structured_data, card)
372
  structured_cards.append(structured_card)
373
  except Exception as e:
374
  logger.warning(f"Failed to structure card {i}: {e}")
375
  structured_cards.append(card) # Keep original on failure
376
+
 
 
 
 
 
 
 
 
 
 
 
 
377
  return structured_cards
378
+
379
  except Exception as e:
 
 
 
 
 
 
 
 
380
  logger.error(f"ContentStructuringAgent failed: {e}")
381
  raise
382
+
383
  def _build_structuring_prompt(self, card: Card, index: int) -> str:
384
  """Build the structuring prompt for a single card"""
385
  return f"""Structure and format this flashcard for optimal learning:
 
422
  "category": "category name"
423
  }}
424
  }}"""
425
+
426
+ def _parse_structured_card(
427
+ self, structured_data: Dict[str, Any], original_card: Card
428
+ ) -> Card:
429
  """Parse structured card data into Card object"""
430
  try:
431
  return Card(
432
  card_type=structured_data.get("card_type", original_card.card_type),
433
+ front=CardFront(question=structured_data["front"]["question"]),
 
 
434
  back=CardBack(
435
  answer=structured_data["back"]["answer"],
436
  explanation=structured_data["back"].get("explanation", ""),
437
+ example=structured_data["back"].get("example", ""),
438
  ),
439
+ metadata=structured_data.get("metadata", original_card.metadata),
440
  )
441
  except Exception as e:
442
  logger.warning(f"Failed to parse structured card: {e}")
 
445
 
446
  class GenerationCoordinator(BaseAgentWrapper):
447
  """Coordinates the multi-agent card generation workflow"""
448
+
449
  def __init__(self, openai_client: AsyncOpenAI):
450
  config_manager = get_config_manager()
451
  base_config = config_manager.get_agent_config("generation_coordinator")
452
+
453
  if not base_config:
454
+ raise ValueError(
455
+ "generation_coordinator configuration not found - agent system not properly initialized"
 
 
 
 
 
456
  )
457
+
458
  super().__init__(base_config, openai_client)
459
+
460
  # Initialize specialized agents
461
  self.subject_expert = None
462
  self.pedagogical = PedagogicalAgent(openai_client)
463
  self.content_structuring = ContentStructuringAgent(openai_client)
464
+
465
  async def coordinate_generation(
466
  self,
467
  topic: str,
 
470
  difficulty: str = "intermediate",
471
  enable_review: bool = True,
472
  enable_structuring: bool = True,
473
+ context: Dict[str, Any] = None,
474
  ) -> List[Card]:
475
  """Coordinate the full card generation pipeline"""
476
+ datetime.now()
477
+
478
  try:
479
  # Initialize subject expert for the specific subject
480
  if not self.subject_expert or self.subject_expert.subject != subject:
481
  self.subject_expert = SubjectExpertAgent(self.openai_client, subject)
482
+
483
  logger.info(f"Starting coordinated generation: {topic} ({subject})")
484
+
485
  # Step 1: Generate initial cards
486
  cards = await self.subject_expert.generate_cards(
487
+ topic=topic, num_cards=num_cards, context=context
 
 
 
488
  )
489
+
490
  # Step 2: Pedagogical review (optional)
491
  if enable_review and cards:
492
  logger.info("Performing pedagogical review...")
493
  reviews = await self.pedagogical.review_cards(cards)
494
+
495
  # Filter or flag cards based on reviews
496
  approved_cards = []
497
  for card, review in zip(cards, reviews):
498
  if review.get("approved", True):
499
  approved_cards.append(card)
500
  else:
501
+ logger.info(
502
+ f"Card flagged for revision: {card.front.question[:50]}..."
503
+ )
504
+
505
  cards = approved_cards
506
+
507
  # Step 3: Content structuring (optional)
508
  if enable_structuring and cards:
509
  logger.info("Performing content structuring...")
510
  cards = await self.content_structuring.structure_cards(cards)
511
+
512
  # Record successful coordination
513
+
 
 
 
 
 
 
 
 
 
 
 
 
 
514
  logger.info(f"Generation coordination complete: {len(cards)} cards")
515
  return cards
516
+
517
  except Exception as e:
 
 
 
 
 
 
 
 
 
518
  logger.error(f"Generation coordination failed: {e}")
519
+ raise
520
+
521
+ async def generate_structured_cards(
522
+ self,
523
+ topic: str,
524
+ num_cards: int = 5,
525
+ difficulty: str = "intermediate",
526
+ context: Optional[Dict[str, Any]] = None,
527
+ ) -> List[Card]:
528
+ """Generate structured flashcards with enhanced metadata"""
529
+ try:
530
+ user_input = f"""Generate {num_cards} structured flashcards for: {topic}
531
+
532
+ Difficulty: {difficulty}
533
+ Requirements:
534
+ - Include detailed metadata
535
+ - Add learning outcomes
536
+ - Specify prerequisites
537
+ - Include related concepts
538
+ - Estimate study time"""
539
+
540
+ response, usage = await self.execute(user_input)
541
+
542
+ # Log usage information
543
+ if usage and usage.get("total_tokens", 0) > 0:
544
+ logger.info(
545
+ f"💰 Token Usage: {usage['total_tokens']} tokens (Input: {usage['input_tokens']}, Output: {usage['output_tokens']})"
546
+ )
547
+
548
+ # Parse the structured response directly since it should be a CardsGenerationSchema
549
+ if hasattr(response, "cards") and response.cards:
550
+ return response.cards
551
+ else:
552
+ logger.warning("No cards found in structured response")
553
+ return []
554
+
555
+ except Exception as e:
556
+ logger.error(f"Structured card generation failed: {e}")
557
+ raise
558
+
559
+ async def generate_adaptive_cards(
560
+ self,
561
+ topic: str,
562
+ learning_style: str = "visual",
563
+ num_cards: int = 5,
564
+ context: Optional[Dict[str, Any]] = None,
565
+ ) -> List[Card]:
566
+ """Generate cards adapted to specific learning styles"""
567
+ try:
568
+ user_input = f"""Generate {num_cards} flashcards for: {topic}
569
+
570
+ Learning Style: {learning_style}
571
+ Adapt the content format and presentation to match this learning style."""
572
+
573
+ response, usage = await self.execute(user_input)
574
+
575
+ # Log usage information
576
+ if usage and usage.get("total_tokens", 0) > 0:
577
+ logger.info(
578
+ f"💰 Token Usage: {usage['total_tokens']} tokens (Input: {usage['input_tokens']}, Output: {usage['output_tokens']})"
579
+ )
580
+
581
+ # Parse the adaptive response directly since it should be a CardsGenerationSchema
582
+ if hasattr(response, "cards") and response.cards:
583
+ return response.cards
584
+ else:
585
+ logger.warning("No cards found in adaptive response")
586
+ return []
587
+
588
+ except Exception as e:
589
+ logger.error(f"Adaptive card generation failed: {e}")
590
+ raise
ankigen_core/agents/integration.py CHANGED
@@ -1,65 +1,61 @@
1
  # Main integration module for AnkiGen agent system
2
 
3
- import asyncio
4
- from typing import List, Dict, Any, Optional, Tuple
5
  from datetime import datetime
6
 
7
- from openai import AsyncOpenAI
8
 
9
  from ankigen_core.logging import logger
10
  from ankigen_core.models import Card
11
  from ankigen_core.llm_interface import OpenAIClientManager
12
 
13
- from .feature_flags import get_feature_flags, AgentMode
14
  from .generators import GenerationCoordinator, SubjectExpertAgent
15
- from .judges import JudgeCoordinator, JudgeDecision
16
  from .enhancers import RevisionAgent, EnhancementAgent
17
- from .metrics import get_metrics, record_agent_execution
18
 
19
 
20
  class AgentOrchestrator:
21
  """Main orchestrator for the AnkiGen agent system"""
22
-
23
  def __init__(self, client_manager: OpenAIClientManager):
24
  self.client_manager = client_manager
25
  self.openai_client = None
26
-
27
  # Initialize coordinators
28
  self.generation_coordinator = None
29
  self.judge_coordinator = None
30
  self.revision_agent = None
31
  self.enhancement_agent = None
32
-
33
- # Feature flags
34
- self.feature_flags = get_feature_flags()
35
-
36
- async def initialize(self, api_key: str):
37
  """Initialize the agent system"""
38
  try:
39
  # Initialize OpenAI client
40
  await self.client_manager.initialize_client(api_key)
41
  self.openai_client = self.client_manager.get_client()
42
-
43
- # Initialize agents based on feature flags
44
- if self.feature_flags.enable_generation_coordinator:
45
- self.generation_coordinator = GenerationCoordinator(self.openai_client)
46
-
47
- if self.feature_flags.enable_judge_coordinator:
48
- self.judge_coordinator = JudgeCoordinator(self.openai_client)
49
-
50
- if self.feature_flags.enable_revision_agent:
51
- self.revision_agent = RevisionAgent(self.openai_client)
52
-
53
- if self.feature_flags.enable_enhancement_agent:
54
- self.enhancement_agent = EnhancementAgent(self.openai_client)
55
-
 
56
  logger.info("Agent system initialized successfully")
57
- logger.info(f"Active agents: {self.feature_flags.get_enabled_agents()}")
58
-
59
  except Exception as e:
60
  logger.error(f"Failed to initialize agent system: {e}")
61
  raise
62
-
63
  async def generate_cards_with_agents(
64
  self,
65
  topic: str,
@@ -67,175 +63,148 @@ class AgentOrchestrator:
67
  num_cards: int = 5,
68
  difficulty: str = "intermediate",
69
  enable_quality_pipeline: bool = True,
70
- context: Dict[str, Any] = None
71
  ) -> Tuple[List[Card], Dict[str, Any]]:
72
  """Generate cards using the agent system"""
73
  start_time = datetime.now()
74
-
75
  try:
76
- # Check if agents should be used
77
- if not self.feature_flags.should_use_agents():
78
- raise ValueError("Agent mode not enabled")
79
-
80
  if not self.openai_client:
81
  raise ValueError("Agent system not initialized")
82
-
83
  logger.info(f"Starting agent-based card generation: {topic} ({subject})")
84
-
85
  # Phase 1: Generation
86
  cards = await self._generation_phase(
87
  topic=topic,
88
  subject=subject,
89
  num_cards=num_cards,
90
  difficulty=difficulty,
91
- context=context
92
  )
93
-
94
- # Phase 2: Quality Assessment (optional)
95
  quality_results = {}
96
- if enable_quality_pipeline and self.feature_flags.enable_judge_coordinator:
97
  cards, quality_results = await self._quality_phase(cards)
98
-
99
- # Phase 3: Enhancement (optional)
100
- if self.feature_flags.enable_enhancement_agent and self.enhancement_agent:
101
  cards = await self._enhancement_phase(cards)
102
-
103
  # Collect metadata
104
  metadata = {
105
  "generation_method": "agent_system",
106
- "agents_used": self.feature_flags.get_enabled_agents(),
107
  "generation_time": (datetime.now() - start_time).total_seconds(),
108
  "cards_generated": len(cards),
109
  "quality_results": quality_results,
110
  "topic": topic,
111
  "subject": subject,
112
- "difficulty": difficulty
113
  }
114
-
115
- # Record overall execution
116
- record_agent_execution(
117
- agent_name="agent_orchestrator",
118
- start_time=start_time,
119
- end_time=datetime.now(),
120
- success=True,
121
- metadata=metadata
122
  )
123
-
124
- logger.info(f"Agent-based generation complete: {len(cards)} cards generated")
125
  return cards, metadata
126
-
127
  except Exception as e:
128
- record_agent_execution(
129
- agent_name="agent_orchestrator",
130
- start_time=start_time,
131
- end_time=datetime.now(),
132
- success=False,
133
- error_message=str(e),
134
- metadata={"topic": topic, "subject": subject}
135
- )
136
-
137
  logger.error(f"Agent-based generation failed: {e}")
138
  raise
139
-
140
  async def _generation_phase(
141
  self,
142
  topic: str,
143
  subject: str,
144
  num_cards: int,
145
  difficulty: str,
146
- context: Dict[str, Any] = None
147
  ) -> List[Card]:
148
  """Execute the card generation phase"""
149
-
150
- if self.generation_coordinator and self.feature_flags.enable_generation_coordinator:
151
  # Use coordinated multi-agent generation
152
  cards = await self.generation_coordinator.coordinate_generation(
153
  topic=topic,
154
  subject=subject,
155
  num_cards=num_cards,
156
  difficulty=difficulty,
157
- enable_review=self.feature_flags.enable_pedagogical_agent,
158
- enable_structuring=self.feature_flags.enable_content_structuring_agent,
159
- context=context
160
  )
161
- elif self.feature_flags.enable_subject_expert_agent:
162
  # Use subject expert agent directly
163
  subject_expert = SubjectExpertAgent(self.openai_client, subject)
164
  cards = await subject_expert.generate_cards(
165
- topic=topic,
166
- num_cards=num_cards,
167
- difficulty=difficulty,
168
- context=context
169
  )
170
- else:
171
- # Fallback to legacy generation (would be implemented separately)
172
- raise ValueError("No generation agents enabled")
173
-
174
  logger.info(f"Generation phase complete: {len(cards)} cards generated")
175
  return cards
176
-
177
  async def _quality_phase(
178
- self,
179
- cards: List[Card]
180
  ) -> Tuple[List[Card], Dict[str, Any]]:
181
  """Execute the quality assessment and improvement phase"""
182
-
183
  if not self.judge_coordinator:
184
  return cards, {"message": "Judge coordinator not available"}
185
-
186
  logger.info(f"Starting quality assessment for {len(cards)} cards")
187
-
188
  # Judge all cards
189
  judge_results = await self.judge_coordinator.coordinate_judgment(
190
  cards=cards,
191
- enable_parallel=self.feature_flags.enable_parallel_judging,
192
- min_consensus=self.feature_flags.min_judge_consensus
193
  )
194
-
195
  # Separate approved and rejected cards
196
  approved_cards = []
197
  rejected_cards = []
198
-
199
  for card, decisions, approved in judge_results:
200
  if approved:
201
  approved_cards.append(card)
202
  else:
203
  rejected_cards.append((card, decisions))
204
-
205
  # Attempt to revise rejected cards
206
  revised_cards = []
207
  if self.revision_agent and rejected_cards:
208
  logger.info(f"Attempting to revise {len(rejected_cards)} rejected cards")
209
-
210
  for card, decisions in rejected_cards:
211
  try:
212
  revised_card = await self.revision_agent.revise_card(
213
  card=card,
214
  judge_decisions=decisions,
215
- max_iterations=self.feature_flags.max_revision_iterations
216
  )
217
-
218
  # Re-judge the revised card
219
- if self.feature_flags.enable_parallel_judging:
220
- revision_results = await self.judge_coordinator.coordinate_judgment(
221
- cards=[revised_card],
222
- enable_parallel=False, # Single card, no need for parallel
223
- min_consensus=self.feature_flags.min_judge_consensus
224
- )
225
-
226
- if revision_results and revision_results[0][2]: # If approved
227
- revised_cards.append(revised_card)
228
- else:
229
- logger.warning(f"Revised card still rejected: {card.front.question[:50]}...")
230
- else:
231
  revised_cards.append(revised_card)
232
-
 
 
 
 
233
  except Exception as e:
234
  logger.error(f"Failed to revise card: {e}")
235
-
236
  # Combine approved and successfully revised cards
237
  final_cards = approved_cards + revised_cards
238
-
239
  # Prepare quality results
240
  quality_results = {
241
  "total_cards_judged": len(cards),
@@ -243,106 +212,49 @@ class AgentOrchestrator:
243
  "initially_rejected": len(rejected_cards),
244
  "successfully_revised": len(revised_cards),
245
  "final_approval_rate": len(final_cards) / len(cards) if cards else 0,
246
- "judge_decisions": len(judge_results)
247
  }
248
-
249
- logger.info(f"Quality phase complete: {len(final_cards)}/{len(cards)} cards approved")
 
 
250
  return final_cards, quality_results
251
-
252
  async def _enhancement_phase(self, cards: List[Card]) -> List[Card]:
253
  """Execute the enhancement phase"""
254
-
255
  if not self.enhancement_agent:
256
  return cards
257
-
258
  logger.info(f"Starting enhancement for {len(cards)} cards")
259
-
260
  enhanced_cards = await self.enhancement_agent.enhance_card_batch(
261
- cards=cards,
262
- enhancement_targets=["explanation", "example", "metadata"]
263
  )
264
-
265
  logger.info(f"Enhancement phase complete: {len(enhanced_cards)} cards enhanced")
266
  return enhanced_cards
267
-
268
  def get_performance_metrics(self) -> Dict[str, Any]:
269
  """Get performance metrics for the agent system"""
270
- metrics = get_metrics()
271
-
272
  return {
273
- "agent_performance": metrics.get_performance_report(hours=24),
274
- "quality_metrics": metrics.get_quality_metrics(),
275
- "feature_flags": self.feature_flags.to_dict(),
276
- "enabled_agents": self.feature_flags.get_enabled_agents()
277
  }
278
 
279
 
280
  async def integrate_with_existing_workflow(
281
- client_manager: OpenAIClientManager,
282
- api_key: str,
283
- **generation_params
284
  ) -> Tuple[List[Card], Dict[str, Any]]:
285
  """Integration point for existing AnkiGen workflow"""
286
-
287
- feature_flags = get_feature_flags()
288
-
289
- # Check if agents should be used
290
- if not feature_flags.should_use_agents():
291
- logger.info("Agents disabled, falling back to legacy generation")
292
- # Would call the existing generation logic here
293
- raise NotImplementedError("Legacy fallback not implemented in this demo")
294
-
295
  # Initialize and use agent system
296
  orchestrator = AgentOrchestrator(client_manager)
297
  await orchestrator.initialize(api_key)
298
-
299
- cards, metadata = await orchestrator.generate_cards_with_agents(**generation_params)
300
-
301
- return cards, metadata
302
 
 
303
 
304
- # Example usage function for testing/demo
305
- async def demo_agent_system():
306
- """Demo function showing how to use the agent system"""
307
-
308
- # This would be replaced with actual API key in real usage
309
- api_key = "your-openai-api-key"
310
-
311
- # Initialize client manager
312
- client_manager = OpenAIClientManager()
313
-
314
- try:
315
- # Create orchestrator
316
- orchestrator = AgentOrchestrator(client_manager)
317
- await orchestrator.initialize(api_key)
318
-
319
- # Generate cards with agents
320
- cards, metadata = await orchestrator.generate_cards_with_agents(
321
- topic="Python Functions",
322
- subject="programming",
323
- num_cards=3,
324
- difficulty="intermediate",
325
- enable_quality_pipeline=True
326
- )
327
-
328
- print(f"Generated {len(cards)} cards:")
329
- for i, card in enumerate(cards, 1):
330
- print(f"\nCard {i}:")
331
- print(f"Q: {card.front.question}")
332
- print(f"A: {card.back.answer}")
333
- print(f"Subject: {card.metadata.get('subject', 'Unknown')}")
334
-
335
- print(f"\nMetadata: {metadata}")
336
-
337
- # Get performance metrics
338
- performance = orchestrator.get_performance_metrics()
339
- print(f"\nPerformance: {performance}")
340
-
341
- except Exception as e:
342
- logger.error(f"Demo failed: {e}")
343
- raise
344
-
345
-
346
- if __name__ == "__main__":
347
- # Run the demo
348
- asyncio.run(demo_agent_system())
 
1
  # Main integration module for AnkiGen agent system
2
 
3
+ from typing import List, Dict, Any, Tuple
 
4
  from datetime import datetime
5
 
 
6
 
7
  from ankigen_core.logging import logger
8
  from ankigen_core.models import Card
9
  from ankigen_core.llm_interface import OpenAIClientManager
10
 
 
11
  from .generators import GenerationCoordinator, SubjectExpertAgent
12
+ from .judges import JudgeCoordinator
13
  from .enhancers import RevisionAgent, EnhancementAgent
 
14
 
15
 
16
  class AgentOrchestrator:
17
  """Main orchestrator for the AnkiGen agent system"""
18
+
19
  def __init__(self, client_manager: OpenAIClientManager):
20
  self.client_manager = client_manager
21
  self.openai_client = None
22
+
23
  # Initialize coordinators
24
  self.generation_coordinator = None
25
  self.judge_coordinator = None
26
  self.revision_agent = None
27
  self.enhancement_agent = None
28
+
29
+ # All agents enabled by default
30
+ self.all_agents_enabled = True
31
+
32
+ async def initialize(self, api_key: str, model_overrides: Dict[str, str] = None):
33
  """Initialize the agent system"""
34
  try:
35
  # Initialize OpenAI client
36
  await self.client_manager.initialize_client(api_key)
37
  self.openai_client = self.client_manager.get_client()
38
+
39
+ # Set up model overrides if provided
40
+ if model_overrides:
41
+ from ankigen_core.agents.config import get_config_manager
42
+
43
+ config_manager = get_config_manager()
44
+ config_manager.update_models(model_overrides)
45
+ logger.info(f"Applied model overrides: {model_overrides}")
46
+
47
+ # Initialize all agents
48
+ self.generation_coordinator = GenerationCoordinator(self.openai_client)
49
+ self.judge_coordinator = JudgeCoordinator(self.openai_client)
50
+ self.revision_agent = RevisionAgent(self.openai_client)
51
+ self.enhancement_agent = EnhancementAgent(self.openai_client)
52
+
53
  logger.info("Agent system initialized successfully")
54
+
 
55
  except Exception as e:
56
  logger.error(f"Failed to initialize agent system: {e}")
57
  raise
58
+
59
  async def generate_cards_with_agents(
60
  self,
61
  topic: str,
 
63
  num_cards: int = 5,
64
  difficulty: str = "intermediate",
65
  enable_quality_pipeline: bool = True,
66
+ context: Dict[str, Any] = None,
67
  ) -> Tuple[List[Card], Dict[str, Any]]:
68
  """Generate cards using the agent system"""
69
  start_time = datetime.now()
70
+
71
  try:
72
+ # Agents are always enabled now
73
+
 
 
74
  if not self.openai_client:
75
  raise ValueError("Agent system not initialized")
76
+
77
  logger.info(f"Starting agent-based card generation: {topic} ({subject})")
78
+
79
  # Phase 1: Generation
80
  cards = await self._generation_phase(
81
  topic=topic,
82
  subject=subject,
83
  num_cards=num_cards,
84
  difficulty=difficulty,
85
+ context=context,
86
  )
87
+
88
+ # Phase 2: Quality Assessment
89
  quality_results = {}
90
+ if enable_quality_pipeline and self.judge_coordinator:
91
  cards, quality_results = await self._quality_phase(cards)
92
+
93
+ # Phase 3: Enhancement
94
+ if self.enhancement_agent:
95
  cards = await self._enhancement_phase(cards)
96
+
97
  # Collect metadata
98
  metadata = {
99
  "generation_method": "agent_system",
 
100
  "generation_time": (datetime.now() - start_time).total_seconds(),
101
  "cards_generated": len(cards),
102
  "quality_results": quality_results,
103
  "topic": topic,
104
  "subject": subject,
105
+ "difficulty": difficulty,
106
  }
107
+
108
+ logger.info(
109
+ f"Agent-based generation complete: {len(cards)} cards generated"
 
 
 
 
 
110
  )
 
 
111
  return cards, metadata
112
+
113
  except Exception as e:
 
 
 
 
 
 
 
 
 
114
  logger.error(f"Agent-based generation failed: {e}")
115
  raise
116
+
117
  async def _generation_phase(
118
  self,
119
  topic: str,
120
  subject: str,
121
  num_cards: int,
122
  difficulty: str,
123
+ context: Dict[str, Any] = None,
124
  ) -> List[Card]:
125
  """Execute the card generation phase"""
126
+
127
+ if self.generation_coordinator:
128
  # Use coordinated multi-agent generation
129
  cards = await self.generation_coordinator.coordinate_generation(
130
  topic=topic,
131
  subject=subject,
132
  num_cards=num_cards,
133
  difficulty=difficulty,
134
+ enable_review=True,
135
+ enable_structuring=True,
136
+ context=context,
137
  )
138
+ else:
139
  # Use subject expert agent directly
140
  subject_expert = SubjectExpertAgent(self.openai_client, subject)
141
  cards = await subject_expert.generate_cards(
142
+ topic=topic, num_cards=num_cards, difficulty=difficulty, context=context
 
 
 
143
  )
144
+
 
 
 
145
  logger.info(f"Generation phase complete: {len(cards)} cards generated")
146
  return cards
147
+
148
  async def _quality_phase(
149
+ self, cards: List[Card]
 
150
  ) -> Tuple[List[Card], Dict[str, Any]]:
151
  """Execute the quality assessment and improvement phase"""
152
+
153
  if not self.judge_coordinator:
154
  return cards, {"message": "Judge coordinator not available"}
155
+
156
  logger.info(f"Starting quality assessment for {len(cards)} cards")
157
+
158
  # Judge all cards
159
  judge_results = await self.judge_coordinator.coordinate_judgment(
160
  cards=cards,
161
+ enable_parallel=True,
162
+ min_consensus=0.6,
163
  )
164
+
165
  # Separate approved and rejected cards
166
  approved_cards = []
167
  rejected_cards = []
168
+
169
  for card, decisions, approved in judge_results:
170
  if approved:
171
  approved_cards.append(card)
172
  else:
173
  rejected_cards.append((card, decisions))
174
+
175
  # Attempt to revise rejected cards
176
  revised_cards = []
177
  if self.revision_agent and rejected_cards:
178
  logger.info(f"Attempting to revise {len(rejected_cards)} rejected cards")
179
+
180
  for card, decisions in rejected_cards:
181
  try:
182
  revised_card = await self.revision_agent.revise_card(
183
  card=card,
184
  judge_decisions=decisions,
185
+ max_iterations=2,
186
  )
187
+
188
  # Re-judge the revised card
189
+ revision_results = await self.judge_coordinator.coordinate_judgment(
190
+ cards=[revised_card],
191
+ enable_parallel=False, # Single card, no need for parallel
192
+ min_consensus=0.6,
193
+ )
194
+
195
+ if revision_results and revision_results[0][2]: # If approved
 
 
 
 
 
196
  revised_cards.append(revised_card)
197
+ else:
198
+ logger.warning(
199
+ f"Revised card still rejected: {card.front.question[:50]}..."
200
+ )
201
+
202
  except Exception as e:
203
  logger.error(f"Failed to revise card: {e}")
204
+
205
  # Combine approved and successfully revised cards
206
  final_cards = approved_cards + revised_cards
207
+
208
  # Prepare quality results
209
  quality_results = {
210
  "total_cards_judged": len(cards),
 
212
  "initially_rejected": len(rejected_cards),
213
  "successfully_revised": len(revised_cards),
214
  "final_approval_rate": len(final_cards) / len(cards) if cards else 0,
215
+ "judge_decisions": len(judge_results),
216
  }
217
+
218
+ logger.info(
219
+ f"Quality phase complete: {len(final_cards)}/{len(cards)} cards approved"
220
+ )
221
  return final_cards, quality_results
222
+
223
  async def _enhancement_phase(self, cards: List[Card]) -> List[Card]:
224
  """Execute the enhancement phase"""
225
+
226
  if not self.enhancement_agent:
227
  return cards
228
+
229
  logger.info(f"Starting enhancement for {len(cards)} cards")
230
+
231
  enhanced_cards = await self.enhancement_agent.enhance_card_batch(
232
+ cards=cards, enhancement_targets=["explanation", "example", "metadata"]
 
233
  )
234
+
235
  logger.info(f"Enhancement phase complete: {len(enhanced_cards)} cards enhanced")
236
  return enhanced_cards
237
+
238
  def get_performance_metrics(self) -> Dict[str, Any]:
239
  """Get performance metrics for the agent system"""
240
+
241
+ # Basic performance info only
242
  return {
243
+ "agents_enabled": True,
 
 
 
244
  }
245
 
246
 
247
  async def integrate_with_existing_workflow(
248
+ client_manager: OpenAIClientManager, api_key: str, **generation_params
 
 
249
  ) -> Tuple[List[Card], Dict[str, Any]]:
250
  """Integration point for existing AnkiGen workflow"""
251
+
252
+ # Agents are always enabled
253
+
 
 
 
 
 
 
254
  # Initialize and use agent system
255
  orchestrator = AgentOrchestrator(client_manager)
256
  await orchestrator.initialize(api_key)
 
 
 
 
257
 
258
+ cards, metadata = await orchestrator.generate_cards_with_agents(**generation_params)
259
 
260
+ return cards, metadata
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ankigen_core/agents/judges.py CHANGED
@@ -2,8 +2,9 @@
2
 
3
  import json
4
  import asyncio
5
- from typing import List, Dict, Any, Optional, Tuple
6
  from datetime import datetime
 
7
 
8
  from openai import AsyncOpenAI
9
 
@@ -11,124 +12,105 @@ from ankigen_core.logging import logger
11
  from ankigen_core.models import Card
12
  from .base import BaseAgentWrapper, AgentConfig
13
  from .config import get_config_manager
14
- from .metrics import record_agent_execution
15
 
16
 
 
17
  class JudgeDecision:
18
- """Represents a judge's decision on a card"""
19
-
20
- def __init__(
21
- self,
22
- approved: bool,
23
- score: float,
24
- feedback: str,
25
- improvements: List[str] = None,
26
- judge_name: str = "",
27
- metadata: Dict[str, Any] = None
28
- ):
29
- self.approved = approved
30
- self.score = score # 0.0 to 1.0
31
- self.feedback = feedback
32
- self.improvements = improvements or []
33
- self.judge_name = judge_name
34
- self.metadata = metadata or {}
35
 
36
 
37
  class ContentAccuracyJudge(BaseAgentWrapper):
38
  """Judge for factual accuracy and content correctness"""
39
-
40
  def __init__(self, openai_client: AsyncOpenAI):
41
  config_manager = get_config_manager()
42
  base_config = config_manager.get_agent_config("content_accuracy_judge")
43
-
44
  if not base_config:
45
- base_config = AgentConfig(
46
- name="content_accuracy_judge",
47
- instructions="""You are a fact-checking and accuracy specialist.
48
- Verify the correctness and accuracy of flashcard content, checking for factual errors,
49
- misconceptions, and ensuring consistency with authoritative sources.""",
50
- model="gpt-4o",
51
- temperature=0.3
52
  )
53
-
 
 
 
54
  super().__init__(base_config, openai_client)
55
-
56
- async def judge_card(self, card: Card) -> JudgeDecision:
57
- """Judge a single card for content accuracy"""
58
- start_time = datetime.now()
59
-
60
  try:
61
- user_input = self._build_judgment_prompt(card)
62
- response = await self.execute(user_input)
63
-
64
- # Parse the response
65
- decision_data = json.loads(response) if isinstance(response, str) else response
66
- decision = self._parse_decision(decision_data)
67
-
68
- # Record successful execution
69
- record_agent_execution(
70
- agent_name=self.config.name,
71
- start_time=start_time,
72
- end_time=datetime.now(),
73
- success=True,
74
- metadata={
75
- "cards_judged": 1,
76
- "approved": 1 if decision.approved else 0,
77
- "score": decision.score
78
- }
79
- )
80
-
81
- return decision
82
-
 
83
  except Exception as e:
84
- record_agent_execution(
85
- agent_name=self.config.name,
86
- start_time=start_time,
87
- end_time=datetime.now(),
88
- success=False,
89
- error_message=str(e)
90
- )
91
-
92
- logger.error(f"ContentAccuracyJudge failed: {e}")
93
- # Return default approval to avoid blocking workflow
94
- return JudgeDecision(
95
- approved=True,
96
- score=0.5,
97
- feedback=f"Judgment failed: {str(e)}",
98
- judge_name=self.config.name
 
 
 
 
 
 
 
99
  )
100
-
101
- def _build_judgment_prompt(self, card: Card) -> str:
102
- """Build the judgment prompt for content accuracy"""
103
- return f"""Evaluate this flashcard for factual accuracy and content correctness:
104
 
105
- Card:
106
- Question: {card.front.question}
107
- Answer: {card.back.answer}
108
- Explanation: {card.back.explanation}
109
- Example: {card.back.example}
110
- Subject: {card.metadata.get('subject', 'Unknown')}
111
- Topic: {card.metadata.get('topic', 'Unknown')}
112
 
113
- Evaluate for:
114
- 1. Factual Accuracy: Are all statements factually correct?
115
- 2. Source Consistency: Does content align with authoritative sources?
116
- 3. Terminology: Is domain-specific terminology used correctly?
117
- 4. Misconceptions: Does the card avoid or address common misconceptions?
118
- 5. Currency: Is the information up-to-date?
119
 
120
- Return your assessment as JSON:
121
- {{
122
- "approved": true/false,
123
- "accuracy_score": 0.0-1.0,
124
- "factual_errors": ["error1", "error2"],
125
- "terminology_issues": ["issue1", "issue2"],
126
- "misconceptions": ["misconception1"],
127
- "suggestions": ["improvement1", "improvement2"],
128
- "confidence": 0.0-1.0,
129
- "detailed_feedback": "Comprehensive assessment of content accuracy"
130
- }}"""
131
-
132
  def _parse_decision(self, decision_data: Dict[str, Any]) -> JudgeDecision:
133
  """Parse the judge response into a JudgeDecision"""
134
  return JudgeDecision(
@@ -141,72 +123,72 @@ Return your assessment as JSON:
141
  "factual_errors": decision_data.get("factual_errors", []),
142
  "terminology_issues": decision_data.get("terminology_issues", []),
143
  "misconceptions": decision_data.get("misconceptions", []),
144
- "confidence": decision_data.get("confidence", 0.5)
145
- }
146
  )
147
 
148
 
149
  class PedagogicalJudge(BaseAgentWrapper):
150
  """Judge for educational effectiveness and pedagogical principles"""
151
-
152
  def __init__(self, openai_client: AsyncOpenAI):
153
  config_manager = get_config_manager()
154
  base_config = config_manager.get_agent_config("pedagogical_judge")
155
-
156
  if not base_config:
157
  base_config = AgentConfig(
158
  name="pedagogical_judge",
159
  instructions="""You are an educational assessment specialist.
160
  Evaluate flashcards for pedagogical effectiveness, learning objectives,
161
  cognitive levels, and educational best practices.""",
162
- model="gpt-4o",
163
- temperature=0.4
164
  )
165
-
166
  super().__init__(base_config, openai_client)
167
-
168
  async def judge_card(self, card: Card) -> JudgeDecision:
169
  """Judge a single card for pedagogical effectiveness"""
170
- start_time = datetime.now()
171
-
172
  try:
173
  user_input = self._build_judgment_prompt(card)
174
  response = await self.execute(user_input)
175
-
176
- decision_data = json.loads(response) if isinstance(response, str) else response
177
- decision = self._parse_decision(decision_data)
178
-
179
- record_agent_execution(
180
- agent_name=self.config.name,
181
- start_time=start_time,
182
- end_time=datetime.now(),
183
- success=True,
184
- metadata={
185
- "cards_judged": 1,
186
- "approved": 1 if decision.approved else 0,
187
- "score": decision.score
188
- }
189
  )
190
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
191
  return decision
192
-
193
  except Exception as e:
194
- record_agent_execution(
195
- agent_name=self.config.name,
196
- start_time=start_time,
197
- end_time=datetime.now(),
198
- success=False,
199
- error_message=str(e)
200
- )
201
-
202
  logger.error(f"PedagogicalJudge failed: {e}")
203
  return JudgeDecision(
204
  approved=True,
205
  score=0.5,
206
  feedback=f"Judgment failed: {str(e)}",
207
- judge_name=self.config.name
208
  )
209
-
210
  def _build_judgment_prompt(self, card: Card) -> str:
211
  """Build the judgment prompt for pedagogical effectiveness"""
212
  return f"""Evaluate this flashcard for pedagogical effectiveness:
@@ -237,7 +219,7 @@ Return your assessment as JSON:
237
  "improvement_suggestions": ["suggestion1", "suggestion2"],
238
  "detailed_feedback": "Comprehensive pedagogical assessment"
239
  }}"""
240
-
241
  def _parse_decision(self, decision_data: Dict[str, Any]) -> JudgeDecision:
242
  """Parse the judge response into a JudgeDecision"""
243
  return JudgeDecision(
@@ -251,72 +233,70 @@ Return your assessment as JSON:
251
  "cognitive_load": decision_data.get("cognitive_load", "medium"),
252
  "learning_objectives": decision_data.get("learning_objectives", []),
253
  "engagement_factors": decision_data.get("engagement_factors", []),
254
- "pedagogical_issues": decision_data.get("pedagogical_issues", [])
255
- }
256
  )
257
 
258
 
259
  class ClarityJudge(BaseAgentWrapper):
260
  """Judge for clarity, readability, and communication effectiveness"""
261
-
262
  def __init__(self, openai_client: AsyncOpenAI):
263
  config_manager = get_config_manager()
264
  base_config = config_manager.get_agent_config("clarity_judge")
265
-
266
  if not base_config:
267
  base_config = AgentConfig(
268
  name="clarity_judge",
269
  instructions="""You are a communication and clarity specialist.
270
  Ensure flashcards are clear, unambiguous, well-written, and accessible
271
  to the target audience.""",
272
- model="gpt-4o-mini",
273
- temperature=0.3
274
  )
275
-
276
  super().__init__(base_config, openai_client)
277
-
278
  async def judge_card(self, card: Card) -> JudgeDecision:
279
  """Judge a single card for clarity and communication"""
280
- start_time = datetime.now()
281
-
282
  try:
283
  user_input = self._build_judgment_prompt(card)
284
  response = await self.execute(user_input)
285
-
286
- decision_data = json.loads(response) if isinstance(response, str) else response
287
- decision = self._parse_decision(decision_data)
288
-
289
- record_agent_execution(
290
- agent_name=self.config.name,
291
- start_time=start_time,
292
- end_time=datetime.now(),
293
- success=True,
294
- metadata={
295
- "cards_judged": 1,
296
- "approved": 1 if decision.approved else 0,
297
- "score": decision.score
298
- }
299
  )
300
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
301
  return decision
302
-
303
  except Exception as e:
304
- record_agent_execution(
305
- agent_name=self.config.name,
306
- start_time=start_time,
307
- end_time=datetime.now(),
308
- success=False,
309
- error_message=str(e)
310
- )
311
-
312
  logger.error(f"ClarityJudge failed: {e}")
313
  return JudgeDecision(
314
  approved=True,
315
  score=0.5,
316
  feedback=f"Judgment failed: {str(e)}",
317
- judge_name=self.config.name
318
  )
319
-
320
  def _build_judgment_prompt(self, card: Card) -> str:
321
  """Build the judgment prompt for clarity assessment"""
322
  return f"""Evaluate this flashcard for clarity and communication effectiveness:
@@ -346,7 +326,7 @@ Return your assessment as JSON:
346
  "improvement_suggestions": ["suggestion1", "suggestion2"],
347
  "detailed_feedback": "Comprehensive clarity assessment"
348
  }}"""
349
-
350
  def _parse_decision(self, decision_data: Dict[str, Any]) -> JudgeDecision:
351
  """Parse the judge response into a JudgeDecision"""
352
  return JudgeDecision(
@@ -360,33 +340,33 @@ Return your assessment as JSON:
360
  "answer_completeness": decision_data.get("answer_completeness", 0.5),
361
  "readability_level": decision_data.get("readability_level", "unknown"),
362
  "ambiguities": decision_data.get("ambiguities", []),
363
- "clarity_issues": decision_data.get("clarity_issues", [])
364
- }
365
  )
366
 
367
 
368
  class TechnicalJudge(BaseAgentWrapper):
369
  """Judge for technical accuracy in programming and technical content"""
370
-
371
  def __init__(self, openai_client: AsyncOpenAI):
372
  config_manager = get_config_manager()
373
  base_config = config_manager.get_agent_config("technical_judge")
374
-
375
  if not base_config:
376
  base_config = AgentConfig(
377
  name="technical_judge",
378
  instructions="""You are a technical accuracy specialist for programming and technical content.
379
  Verify code syntax, best practices, security considerations, and technical correctness.""",
380
- model="gpt-4o",
381
- temperature=0.2
382
  )
383
-
384
  super().__init__(base_config, openai_client)
385
-
386
  async def judge_card(self, card: Card) -> JudgeDecision:
387
  """Judge a single card for technical accuracy"""
388
- start_time = datetime.now()
389
-
390
  try:
391
  # Only judge technical content
392
  if not self._is_technical_content(card):
@@ -394,60 +374,60 @@ Verify code syntax, best practices, security considerations, and technical corre
394
  approved=True,
395
  score=1.0,
396
  feedback="Non-technical content - no technical review needed",
397
- judge_name=self.config.name
398
  )
399
-
400
  user_input = self._build_judgment_prompt(card)
401
  response = await self.execute(user_input)
402
-
403
- decision_data = json.loads(response) if isinstance(response, str) else response
404
- decision = self._parse_decision(decision_data)
405
-
406
- record_agent_execution(
407
- agent_name=self.config.name,
408
- start_time=start_time,
409
- end_time=datetime.now(),
410
- success=True,
411
- metadata={
412
- "cards_judged": 1,
413
- "approved": 1 if decision.approved else 0,
414
- "score": decision.score,
415
- "is_technical": True
416
- }
417
  )
418
-
 
419
  return decision
420
-
421
  except Exception as e:
422
- record_agent_execution(
423
- agent_name=self.config.name,
424
- start_time=start_time,
425
- end_time=datetime.now(),
426
- success=False,
427
- error_message=str(e)
428
- )
429
-
430
  logger.error(f"TechnicalJudge failed: {e}")
431
  return JudgeDecision(
432
  approved=True,
433
  score=0.5,
434
  feedback=f"Technical judgment failed: {str(e)}",
435
- judge_name=self.config.name
436
  )
437
-
438
  def _is_technical_content(self, card: Card) -> bool:
439
  """Determine if card contains technical content requiring technical review"""
440
  technical_keywords = [
441
- "code", "programming", "algorithm", "function", "class", "method",
442
- "syntax", "API", "database", "SQL", "python", "javascript", "java",
443
- "framework", "library", "development", "software", "technical"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
444
  ]
445
-
446
- content = f"{card.front.question} {card.back.answer} {card.back.explanation}".lower()
 
 
447
  subject = card.metadata.get("subject", "").lower()
448
-
449
- return any(keyword in content or keyword in subject for keyword in technical_keywords)
450
-
 
 
451
  def _build_judgment_prompt(self, card: Card) -> str:
452
  """Build the judgment prompt for technical accuracy"""
453
  return f"""Evaluate this technical flashcard for accuracy and best practices:
@@ -478,7 +458,7 @@ Return your assessment as JSON:
478
  "improvement_suggestions": ["suggestion1", "suggestion2"],
479
  "detailed_feedback": "Comprehensive technical assessment"
480
  }}"""
481
-
482
  def _parse_decision(self, decision_data: Dict[str, Any]) -> JudgeDecision:
483
  """Parse the judge response into a JudgeDecision"""
484
  return JudgeDecision(
@@ -489,75 +469,59 @@ Return your assessment as JSON:
489
  judge_name=self.config.name,
490
  metadata={
491
  "syntax_errors": decision_data.get("syntax_errors", []),
492
- "best_practice_violations": decision_data.get("best_practice_violations", []),
 
 
493
  "security_issues": decision_data.get("security_issues", []),
494
  "performance_concerns": decision_data.get("performance_concerns", []),
495
- "tool_inaccuracies": decision_data.get("tool_inaccuracies", [])
496
- }
497
  )
498
 
499
 
500
  class CompletenessJudge(BaseAgentWrapper):
501
  """Judge for completeness and quality standards"""
502
-
503
  def __init__(self, openai_client: AsyncOpenAI):
504
  config_manager = get_config_manager()
505
  base_config = config_manager.get_agent_config("completeness_judge")
506
-
507
  if not base_config:
508
  base_config = AgentConfig(
509
  name="completeness_judge",
510
  instructions="""You are a completeness and quality assurance specialist.
511
  Ensure flashcards meet all requirements, have complete information,
512
  and maintain consistent quality standards.""",
513
- model="gpt-4o-mini",
514
- temperature=0.3
515
  )
516
-
517
  super().__init__(base_config, openai_client)
518
-
519
  async def judge_card(self, card: Card) -> JudgeDecision:
520
  """Judge a single card for completeness"""
521
- start_time = datetime.now()
522
-
523
  try:
524
  user_input = self._build_judgment_prompt(card)
525
  response = await self.execute(user_input)
526
-
527
- decision_data = json.loads(response) if isinstance(response, str) else response
528
- decision = self._parse_decision(decision_data)
529
-
530
- record_agent_execution(
531
- agent_name=self.config.name,
532
- start_time=start_time,
533
- end_time=datetime.now(),
534
- success=True,
535
- metadata={
536
- "cards_judged": 1,
537
- "approved": 1 if decision.approved else 0,
538
- "score": decision.score
539
- }
540
  )
541
-
 
542
  return decision
543
-
544
  except Exception as e:
545
- record_agent_execution(
546
- agent_name=self.config.name,
547
- start_time=start_time,
548
- end_time=datetime.now(),
549
- success=False,
550
- error_message=str(e)
551
- )
552
-
553
  logger.error(f"CompletenessJudge failed: {e}")
554
  return JudgeDecision(
555
  approved=True,
556
  score=0.5,
557
  feedback=f"Completeness judgment failed: {str(e)}",
558
- judge_name=self.config.name
559
  )
560
-
561
  def _build_judgment_prompt(self, card: Card) -> str:
562
  """Build the judgment prompt for completeness assessment"""
563
  return f"""Evaluate this flashcard for completeness and quality standards:
@@ -588,7 +552,7 @@ Return your assessment as JSON:
588
  "improvement_suggestions": ["suggestion1", "suggestion2"],
589
  "detailed_feedback": "Comprehensive completeness assessment"
590
  }}"""
591
-
592
  def _parse_decision(self, decision_data: Dict[str, Any]) -> JudgeDecision:
593
  """Parse the judge response into a JudgeDecision"""
594
  return JudgeDecision(
@@ -601,54 +565,54 @@ Return your assessment as JSON:
601
  "missing_fields": decision_data.get("missing_fields", []),
602
  "incomplete_sections": decision_data.get("incomplete_sections", []),
603
  "metadata_issues": decision_data.get("metadata_issues", []),
604
- "quality_concerns": decision_data.get("quality_concerns", [])
605
- }
606
  )
607
 
608
 
609
  class JudgeCoordinator(BaseAgentWrapper):
610
  """Coordinates multiple judges and synthesizes their decisions"""
611
-
612
  def __init__(self, openai_client: AsyncOpenAI):
613
  config_manager = get_config_manager()
614
  base_config = config_manager.get_agent_config("judge_coordinator")
615
-
616
  if not base_config:
617
  base_config = AgentConfig(
618
  name="judge_coordinator",
619
  instructions="""You are the quality assurance coordinator.
620
  Orchestrate the judging process and synthesize feedback from specialist judges.
621
  Balance speed with thoroughness in quality assessment.""",
622
- model="gpt-4o-mini",
623
- temperature=0.3
624
  )
625
-
626
  super().__init__(base_config, openai_client)
627
-
628
  # Initialize specialist judges
629
  self.content_accuracy = ContentAccuracyJudge(openai_client)
630
  self.pedagogical = PedagogicalJudge(openai_client)
631
  self.clarity = ClarityJudge(openai_client)
632
  self.technical = TechnicalJudge(openai_client)
633
  self.completeness = CompletenessJudge(openai_client)
634
-
635
  async def coordinate_judgment(
636
  self,
637
  cards: List[Card],
638
  enable_parallel: bool = True,
639
- min_consensus: float = 0.6
640
  ) -> List[Tuple[Card, List[JudgeDecision], bool]]:
641
  """Coordinate judgment of multiple cards"""
642
- start_time = datetime.now()
643
-
644
  try:
645
  results = []
646
-
647
  if enable_parallel:
648
  # Process all cards in parallel
649
  tasks = [self._judge_single_card(card, min_consensus) for card in cards]
650
  card_results = await asyncio.gather(*tasks, return_exceptions=True)
651
-
652
  for card, result in zip(cards, card_results):
653
  if isinstance(result, Exception):
654
  logger.error(f"Parallel judgment failed for card: {result}")
@@ -664,62 +628,41 @@ Balance speed with thoroughness in quality assessment.""",
664
  except Exception as e:
665
  logger.error(f"Sequential judgment failed for card: {e}")
666
  results.append((card, [], False))
667
-
668
- # Calculate summary statistics
669
- total_cards = len(cards)
670
- approved_cards = len([result for _, _, approved in results if approved])
671
-
672
- record_agent_execution(
673
- agent_name=self.config.name,
674
- start_time=start_time,
675
- end_time=datetime.now(),
676
- success=True,
677
- metadata={
678
- "cards_judged": total_cards,
679
- "cards_approved": approved_cards,
680
- "approval_rate": approved_cards / total_cards if total_cards > 0 else 0,
681
- "parallel_processing": enable_parallel
682
- }
683
  )
684
-
685
- logger.info(f"Judge coordination complete: {approved_cards}/{total_cards} cards approved")
686
  return results
687
-
688
  except Exception as e:
689
- record_agent_execution(
690
- agent_name=self.config.name,
691
- start_time=start_time,
692
- end_time=datetime.now(),
693
- success=False,
694
- error_message=str(e)
695
- )
696
-
697
  logger.error(f"Judge coordination failed: {e}")
698
  raise
699
-
700
  async def _judge_single_card(
701
- self,
702
- card: Card,
703
- min_consensus: float
704
  ) -> Tuple[Card, List[JudgeDecision], bool]:
705
  """Judge a single card with all relevant judges"""
706
-
707
  # Determine which judges to use based on card content
708
  judges = [
709
  self.content_accuracy,
710
  self.pedagogical,
711
  self.clarity,
712
- self.completeness
713
  ]
714
-
715
  # Add technical judge only for technical content
716
  if self.technical._is_technical_content(card):
717
  judges.append(self.technical)
718
-
719
  # Execute all judges in parallel
720
  judge_tasks = [judge.judge_card(card) for judge in judges]
721
  decisions = await asyncio.gather(*judge_tasks, return_exceptions=True)
722
-
723
  # Filter out failed decisions
724
  valid_decisions = []
725
  for decision in decisions:
@@ -727,15 +670,35 @@ Balance speed with thoroughness in quality assessment.""",
727
  valid_decisions.append(decision)
728
  else:
729
  logger.warning(f"Judge decision failed: {decision}")
730
-
731
  # Calculate consensus
732
  if not valid_decisions:
733
  return (card, [], False)
734
-
735
  approval_votes = len([d for d in valid_decisions if d.approved])
736
  consensus_score = approval_votes / len(valid_decisions)
737
-
738
  # Determine final approval based on consensus
739
  final_approval = consensus_score >= min_consensus
740
-
741
- return (card, valid_decisions, final_approval)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
 
3
  import json
4
  import asyncio
5
+ from typing import List, Dict, Any, Tuple, Optional
6
  from datetime import datetime
7
+ from dataclasses import dataclass
8
 
9
  from openai import AsyncOpenAI
10
 
 
12
  from ankigen_core.models import Card
13
  from .base import BaseAgentWrapper, AgentConfig
14
  from .config import get_config_manager
15
+ from .schemas import JudgeDecisionSchema
16
 
17
 
18
+ @dataclass
19
  class JudgeDecision:
20
+ """Decision from a judge agent"""
21
+
22
+ approved: bool
23
+ score: float
24
+ feedback: str
25
+ judge_name: str
26
+ improvements: Optional[List[str]] = None
27
+ metadata: Optional[Dict[str, Any]] = None
28
+
29
+ def __post_init__(self):
30
+ if self.metadata is None:
31
+ self.metadata = {}
32
+ if self.improvements is None:
33
+ self.improvements = []
 
 
 
34
 
35
 
36
  class ContentAccuracyJudge(BaseAgentWrapper):
37
  """Judge for factual accuracy and content correctness"""
38
+
39
  def __init__(self, openai_client: AsyncOpenAI):
40
  config_manager = get_config_manager()
41
  base_config = config_manager.get_agent_config("content_accuracy_judge")
42
+
43
  if not base_config:
44
+ raise ValueError(
45
+ "content_accuracy_judge configuration not found - agent system not properly initialized"
 
 
 
 
 
46
  )
47
+
48
+ # Enable structured output for judge decisions
49
+ base_config.response_format = JudgeDecisionSchema
50
+
51
  super().__init__(base_config, openai_client)
52
+
53
+ async def judge_card(
54
+ self, card: Card, context: Optional[Dict[str, Any]] = None
55
+ ) -> JudgeDecision:
56
+ """Judge a card for content accuracy"""
57
  try:
58
+ user_input = f"""Evaluate this flashcard for factual accuracy:
59
+
60
+ Front: {card.front.content}
61
+ Back: {card.back.content}
62
+
63
+ Assess:
64
+ 1. Factual correctness
65
+ 2. Completeness of information
66
+ 3. Clarity and precision
67
+ 4. Potential misconceptions
68
+
69
+ Provide a score (0-1) and detailed feedback."""
70
+
71
+ response, usage = await self.execute(user_input)
72
+
73
+ # Log usage information
74
+ if usage and usage.get("total_tokens", 0) > 0:
75
+ logger.info(
76
+ f"💰 Token Usage: {usage['total_tokens']} tokens (Input: {usage['input_tokens']}, Output: {usage['output_tokens']})"
77
+ )
78
+
79
+ return self._parse_judge_response(response, "ContentAccuracyJudge")
80
+
81
  except Exception as e:
82
+ logger.error(f"Content accuracy judgment failed: {e}")
83
+ raise
84
+
85
+ def _parse_judge_response(
86
+ self, response: Dict[str, Any], judge_name: str
87
+ ) -> JudgeDecision:
88
+ """Parse the judge response into a JudgeDecision"""
89
+ decision_data = json.loads(response) if isinstance(response, str) else response
90
+ decision = self._parse_decision(decision_data)
91
+
92
+ # Enhanced logging for judge decisions
93
+ logger.info(f"🎯 {judge_name.upper()} DECISION:")
94
+ logger.info(" Card: [Card content]")
95
+ logger.info(f" Approved: {decision.approved}")
96
+ logger.info(f" 📊 Score: {decision.score:.2f}")
97
+ logger.info(f" 💭 Feedback: {decision.feedback}")
98
+
99
+ if decision.metadata.get("factual_errors"):
100
+ logger.info(f" ❌ Factual Errors: {decision.metadata['factual_errors']}")
101
+ if decision.metadata.get("terminology_issues"):
102
+ logger.info(
103
+ f" ⚠️ Terminology Issues: {decision.metadata['terminology_issues']}"
104
  )
105
+ if decision.improvements:
106
+ logger.info(f" 🔧 Suggested Improvements: {decision.improvements}")
 
 
107
 
108
+ logger.info(
109
+ f" 🎯 Judge Confidence: {decision.metadata.get('confidence', 'N/A')}"
110
+ )
 
 
 
 
111
 
112
+ return decision
 
 
 
 
 
113
 
 
 
 
 
 
 
 
 
 
 
 
 
114
  def _parse_decision(self, decision_data: Dict[str, Any]) -> JudgeDecision:
115
  """Parse the judge response into a JudgeDecision"""
116
  return JudgeDecision(
 
123
  "factual_errors": decision_data.get("factual_errors", []),
124
  "terminology_issues": decision_data.get("terminology_issues", []),
125
  "misconceptions": decision_data.get("misconceptions", []),
126
+ "confidence": decision_data.get("confidence", 0.5),
127
+ },
128
  )
129
 
130
 
131
  class PedagogicalJudge(BaseAgentWrapper):
132
  """Judge for educational effectiveness and pedagogical principles"""
133
+
134
  def __init__(self, openai_client: AsyncOpenAI):
135
  config_manager = get_config_manager()
136
  base_config = config_manager.get_agent_config("pedagogical_judge")
137
+
138
  if not base_config:
139
  base_config = AgentConfig(
140
  name="pedagogical_judge",
141
  instructions="""You are an educational assessment specialist.
142
  Evaluate flashcards for pedagogical effectiveness, learning objectives,
143
  cognitive levels, and educational best practices.""",
144
+ model="gpt-4.1",
145
+ temperature=0.4,
146
  )
147
+
148
  super().__init__(base_config, openai_client)
149
+
150
  async def judge_card(self, card: Card) -> JudgeDecision:
151
  """Judge a single card for pedagogical effectiveness"""
152
+ datetime.now()
153
+
154
  try:
155
  user_input = self._build_judgment_prompt(card)
156
  response = await self.execute(user_input)
157
+
158
+ decision_data = (
159
+ json.loads(response) if isinstance(response, str) else response
 
 
 
 
 
 
 
 
 
 
 
160
  )
161
+ decision = self._parse_decision(decision_data)
162
+
163
+ # Enhanced logging for pedagogical judge decisions
164
+ logger.info(f"🎓 {self.config.name.upper()} DECISION:")
165
+ logger.info(f" Card: {card.front.question[:80]}...")
166
+ logger.info(f" ✅ Approved: {decision.approved}")
167
+ logger.info(f" 📊 Score: {decision.score:.2f}")
168
+ logger.info(f" 💭 Feedback: {decision.feedback}")
169
+
170
+ if decision.metadata and decision.metadata.get("cognitive_level"):
171
+ logger.info(
172
+ f" 🧠 Cognitive Level: {decision.metadata['cognitive_level']}"
173
+ )
174
+ if decision.metadata and decision.metadata.get("pedagogical_issues"):
175
+ logger.info(
176
+ f" ⚠️ Pedagogical Issues: {decision.metadata['pedagogical_issues']}"
177
+ )
178
+ if decision.improvements:
179
+ logger.info(f" 🔧 Suggested Improvements: {decision.improvements}")
180
+
181
  return decision
182
+
183
  except Exception as e:
 
 
 
 
 
 
 
 
184
  logger.error(f"PedagogicalJudge failed: {e}")
185
  return JudgeDecision(
186
  approved=True,
187
  score=0.5,
188
  feedback=f"Judgment failed: {str(e)}",
189
+ judge_name=self.config.name,
190
  )
191
+
192
  def _build_judgment_prompt(self, card: Card) -> str:
193
  """Build the judgment prompt for pedagogical effectiveness"""
194
  return f"""Evaluate this flashcard for pedagogical effectiveness:
 
219
  "improvement_suggestions": ["suggestion1", "suggestion2"],
220
  "detailed_feedback": "Comprehensive pedagogical assessment"
221
  }}"""
222
+
223
  def _parse_decision(self, decision_data: Dict[str, Any]) -> JudgeDecision:
224
  """Parse the judge response into a JudgeDecision"""
225
  return JudgeDecision(
 
233
  "cognitive_load": decision_data.get("cognitive_load", "medium"),
234
  "learning_objectives": decision_data.get("learning_objectives", []),
235
  "engagement_factors": decision_data.get("engagement_factors", []),
236
+ "pedagogical_issues": decision_data.get("pedagogical_issues", []),
237
+ },
238
  )
239
 
240
 
241
  class ClarityJudge(BaseAgentWrapper):
242
  """Judge for clarity, readability, and communication effectiveness"""
243
+
244
  def __init__(self, openai_client: AsyncOpenAI):
245
  config_manager = get_config_manager()
246
  base_config = config_manager.get_agent_config("clarity_judge")
247
+
248
  if not base_config:
249
  base_config = AgentConfig(
250
  name="clarity_judge",
251
  instructions="""You are a communication and clarity specialist.
252
  Ensure flashcards are clear, unambiguous, well-written, and accessible
253
  to the target audience.""",
254
+ model="gpt-4.1-mini",
255
+ temperature=0.3,
256
  )
257
+
258
  super().__init__(base_config, openai_client)
259
+
260
  async def judge_card(self, card: Card) -> JudgeDecision:
261
  """Judge a single card for clarity and communication"""
262
+ datetime.now()
263
+
264
  try:
265
  user_input = self._build_judgment_prompt(card)
266
  response = await self.execute(user_input)
267
+
268
+ decision_data = (
269
+ json.loads(response) if isinstance(response, str) else response
 
 
 
 
 
 
 
 
 
 
 
270
  )
271
+ decision = self._parse_decision(decision_data)
272
+
273
+ # Enhanced logging for clarity judge decisions
274
+ logger.info(f"✨ {self.config.name.upper()} DECISION:")
275
+ logger.info(f" Card: {card.front.question[:80]}...")
276
+ logger.info(f" ✅ Approved: {decision.approved}")
277
+ logger.info(f" 📊 Score: {decision.score:.2f}")
278
+ logger.info(f" 💭 Feedback: {decision.feedback}")
279
+
280
+ if decision.metadata and decision.metadata.get("readability_level"):
281
+ logger.info(
282
+ f" 📚 Readability: {decision.metadata['readability_level']}"
283
+ )
284
+ if decision.metadata and decision.metadata.get("ambiguities"):
285
+ logger.info(f" ❓ Ambiguities: {decision.metadata['ambiguities']}")
286
+ if decision.improvements:
287
+ logger.info(f" 🔧 Suggested Improvements: {decision.improvements}")
288
+
289
  return decision
290
+
291
  except Exception as e:
 
 
 
 
 
 
 
 
292
  logger.error(f"ClarityJudge failed: {e}")
293
  return JudgeDecision(
294
  approved=True,
295
  score=0.5,
296
  feedback=f"Judgment failed: {str(e)}",
297
+ judge_name=self.config.name,
298
  )
299
+
300
  def _build_judgment_prompt(self, card: Card) -> str:
301
  """Build the judgment prompt for clarity assessment"""
302
  return f"""Evaluate this flashcard for clarity and communication effectiveness:
 
326
  "improvement_suggestions": ["suggestion1", "suggestion2"],
327
  "detailed_feedback": "Comprehensive clarity assessment"
328
  }}"""
329
+
330
  def _parse_decision(self, decision_data: Dict[str, Any]) -> JudgeDecision:
331
  """Parse the judge response into a JudgeDecision"""
332
  return JudgeDecision(
 
340
  "answer_completeness": decision_data.get("answer_completeness", 0.5),
341
  "readability_level": decision_data.get("readability_level", "unknown"),
342
  "ambiguities": decision_data.get("ambiguities", []),
343
+ "clarity_issues": decision_data.get("clarity_issues", []),
344
+ },
345
  )
346
 
347
 
348
  class TechnicalJudge(BaseAgentWrapper):
349
  """Judge for technical accuracy in programming and technical content"""
350
+
351
  def __init__(self, openai_client: AsyncOpenAI):
352
  config_manager = get_config_manager()
353
  base_config = config_manager.get_agent_config("technical_judge")
354
+
355
  if not base_config:
356
  base_config = AgentConfig(
357
  name="technical_judge",
358
  instructions="""You are a technical accuracy specialist for programming and technical content.
359
  Verify code syntax, best practices, security considerations, and technical correctness.""",
360
+ model="gpt-4.1",
361
+ temperature=0.2,
362
  )
363
+
364
  super().__init__(base_config, openai_client)
365
+
366
  async def judge_card(self, card: Card) -> JudgeDecision:
367
  """Judge a single card for technical accuracy"""
368
+ datetime.now()
369
+
370
  try:
371
  # Only judge technical content
372
  if not self._is_technical_content(card):
 
374
  approved=True,
375
  score=1.0,
376
  feedback="Non-technical content - no technical review needed",
377
+ judge_name=self.config.name,
378
  )
379
+
380
  user_input = self._build_judgment_prompt(card)
381
  response = await self.execute(user_input)
382
+
383
+ decision_data = (
384
+ json.loads(response) if isinstance(response, str) else response
 
 
 
 
 
 
 
 
 
 
 
 
385
  )
386
+ decision = self._parse_decision(decision_data)
387
+
388
  return decision
389
+
390
  except Exception as e:
 
 
 
 
 
 
 
 
391
  logger.error(f"TechnicalJudge failed: {e}")
392
  return JudgeDecision(
393
  approved=True,
394
  score=0.5,
395
  feedback=f"Technical judgment failed: {str(e)}",
396
+ judge_name=self.config.name,
397
  )
398
+
399
  def _is_technical_content(self, card: Card) -> bool:
400
  """Determine if card contains technical content requiring technical review"""
401
  technical_keywords = [
402
+ "code",
403
+ "programming",
404
+ "algorithm",
405
+ "function",
406
+ "class",
407
+ "method",
408
+ "syntax",
409
+ "API",
410
+ "database",
411
+ "SQL",
412
+ "python",
413
+ "javascript",
414
+ "java",
415
+ "framework",
416
+ "library",
417
+ "development",
418
+ "software",
419
+ "technical",
420
  ]
421
+
422
+ content = (
423
+ f"{card.front.question} {card.back.answer} {card.back.explanation}".lower()
424
+ )
425
  subject = card.metadata.get("subject", "").lower()
426
+
427
+ return any(
428
+ keyword in content or keyword in subject for keyword in technical_keywords
429
+ )
430
+
431
  def _build_judgment_prompt(self, card: Card) -> str:
432
  """Build the judgment prompt for technical accuracy"""
433
  return f"""Evaluate this technical flashcard for accuracy and best practices:
 
458
  "improvement_suggestions": ["suggestion1", "suggestion2"],
459
  "detailed_feedback": "Comprehensive technical assessment"
460
  }}"""
461
+
462
  def _parse_decision(self, decision_data: Dict[str, Any]) -> JudgeDecision:
463
  """Parse the judge response into a JudgeDecision"""
464
  return JudgeDecision(
 
469
  judge_name=self.config.name,
470
  metadata={
471
  "syntax_errors": decision_data.get("syntax_errors", []),
472
+ "best_practice_violations": decision_data.get(
473
+ "best_practice_violations", []
474
+ ),
475
  "security_issues": decision_data.get("security_issues", []),
476
  "performance_concerns": decision_data.get("performance_concerns", []),
477
+ "tool_inaccuracies": decision_data.get("tool_inaccuracies", []),
478
+ },
479
  )
480
 
481
 
482
  class CompletenessJudge(BaseAgentWrapper):
483
  """Judge for completeness and quality standards"""
484
+
485
  def __init__(self, openai_client: AsyncOpenAI):
486
  config_manager = get_config_manager()
487
  base_config = config_manager.get_agent_config("completeness_judge")
488
+
489
  if not base_config:
490
  base_config = AgentConfig(
491
  name="completeness_judge",
492
  instructions="""You are a completeness and quality assurance specialist.
493
  Ensure flashcards meet all requirements, have complete information,
494
  and maintain consistent quality standards.""",
495
+ model="gpt-4.1-mini",
496
+ temperature=0.3,
497
  )
498
+
499
  super().__init__(base_config, openai_client)
500
+
501
  async def judge_card(self, card: Card) -> JudgeDecision:
502
  """Judge a single card for completeness"""
503
+ datetime.now()
504
+
505
  try:
506
  user_input = self._build_judgment_prompt(card)
507
  response = await self.execute(user_input)
508
+
509
+ decision_data = (
510
+ json.loads(response) if isinstance(response, str) else response
 
 
 
 
 
 
 
 
 
 
 
511
  )
512
+ decision = self._parse_decision(decision_data)
513
+
514
  return decision
515
+
516
  except Exception as e:
 
 
 
 
 
 
 
 
517
  logger.error(f"CompletenessJudge failed: {e}")
518
  return JudgeDecision(
519
  approved=True,
520
  score=0.5,
521
  feedback=f"Completeness judgment failed: {str(e)}",
522
+ judge_name=self.config.name,
523
  )
524
+
525
  def _build_judgment_prompt(self, card: Card) -> str:
526
  """Build the judgment prompt for completeness assessment"""
527
  return f"""Evaluate this flashcard for completeness and quality standards:
 
552
  "improvement_suggestions": ["suggestion1", "suggestion2"],
553
  "detailed_feedback": "Comprehensive completeness assessment"
554
  }}"""
555
+
556
  def _parse_decision(self, decision_data: Dict[str, Any]) -> JudgeDecision:
557
  """Parse the judge response into a JudgeDecision"""
558
  return JudgeDecision(
 
565
  "missing_fields": decision_data.get("missing_fields", []),
566
  "incomplete_sections": decision_data.get("incomplete_sections", []),
567
  "metadata_issues": decision_data.get("metadata_issues", []),
568
+ "quality_concerns": decision_data.get("quality_concerns", []),
569
+ },
570
  )
571
 
572
 
573
  class JudgeCoordinator(BaseAgentWrapper):
574
  """Coordinates multiple judges and synthesizes their decisions"""
575
+
576
  def __init__(self, openai_client: AsyncOpenAI):
577
  config_manager = get_config_manager()
578
  base_config = config_manager.get_agent_config("judge_coordinator")
579
+
580
  if not base_config:
581
  base_config = AgentConfig(
582
  name="judge_coordinator",
583
  instructions="""You are the quality assurance coordinator.
584
  Orchestrate the judging process and synthesize feedback from specialist judges.
585
  Balance speed with thoroughness in quality assessment.""",
586
+ model="gpt-4.1-mini",
587
+ temperature=0.3,
588
  )
589
+
590
  super().__init__(base_config, openai_client)
591
+
592
  # Initialize specialist judges
593
  self.content_accuracy = ContentAccuracyJudge(openai_client)
594
  self.pedagogical = PedagogicalJudge(openai_client)
595
  self.clarity = ClarityJudge(openai_client)
596
  self.technical = TechnicalJudge(openai_client)
597
  self.completeness = CompletenessJudge(openai_client)
598
+
599
  async def coordinate_judgment(
600
  self,
601
  cards: List[Card],
602
  enable_parallel: bool = True,
603
+ min_consensus: float = 0.6,
604
  ) -> List[Tuple[Card, List[JudgeDecision], bool]]:
605
  """Coordinate judgment of multiple cards"""
606
+ datetime.now()
607
+
608
  try:
609
  results = []
610
+
611
  if enable_parallel:
612
  # Process all cards in parallel
613
  tasks = [self._judge_single_card(card, min_consensus) for card in cards]
614
  card_results = await asyncio.gather(*tasks, return_exceptions=True)
615
+
616
  for card, result in zip(cards, card_results):
617
  if isinstance(result, Exception):
618
  logger.error(f"Parallel judgment failed for card: {result}")
 
628
  except Exception as e:
629
  logger.error(f"Sequential judgment failed for card: {e}")
630
  results.append((card, [], False))
631
+
632
+ # Calculate summary statistics
633
+ total_cards = len(cards)
634
+ approved_cards = len([result for _, _, approved in results if approved])
635
+
636
+ logger.info(
637
+ f"Judge coordination complete: {approved_cards}/{total_cards} cards approved"
 
 
 
 
 
 
 
 
 
638
  )
 
 
639
  return results
640
+
641
  except Exception as e:
 
 
 
 
 
 
 
 
642
  logger.error(f"Judge coordination failed: {e}")
643
  raise
644
+
645
  async def _judge_single_card(
646
+ self, card: Card, min_consensus: float
 
 
647
  ) -> Tuple[Card, List[JudgeDecision], bool]:
648
  """Judge a single card with all relevant judges"""
649
+
650
  # Determine which judges to use based on card content
651
  judges = [
652
  self.content_accuracy,
653
  self.pedagogical,
654
  self.clarity,
655
+ self.completeness,
656
  ]
657
+
658
  # Add technical judge only for technical content
659
  if self.technical._is_technical_content(card):
660
  judges.append(self.technical)
661
+
662
  # Execute all judges in parallel
663
  judge_tasks = [judge.judge_card(card) for judge in judges]
664
  decisions = await asyncio.gather(*judge_tasks, return_exceptions=True)
665
+
666
  # Filter out failed decisions
667
  valid_decisions = []
668
  for decision in decisions:
 
670
  valid_decisions.append(decision)
671
  else:
672
  logger.warning(f"Judge decision failed: {decision}")
673
+
674
  # Calculate consensus
675
  if not valid_decisions:
676
  return (card, [], False)
677
+
678
  approval_votes = len([d for d in valid_decisions if d.approved])
679
  consensus_score = approval_votes / len(valid_decisions)
680
+
681
  # Determine final approval based on consensus
682
  final_approval = consensus_score >= min_consensus
683
+
684
+ # Enhanced logging for judge coordination
685
+ logger.info("🏛️ JUDGE COORDINATION RESULT:")
686
+ logger.info(f" Card: {card.front.question[:80]}...")
687
+ logger.info(f" 👥 Judges Consulted: {len(valid_decisions)}")
688
+ logger.info(f" ✅ Approval Votes: {approval_votes}/{len(valid_decisions)}")
689
+ logger.info(
690
+ f" 📊 Consensus Score: {consensus_score:.2f} (min: {min_consensus:.2f})"
691
+ )
692
+ logger.info(
693
+ f" 🏆 Final Decision: {'APPROVED' if final_approval else 'REJECTED'}"
694
+ )
695
+
696
+ if not final_approval:
697
+ logger.info(" 📝 Rejection Reasons:")
698
+ for decision in valid_decisions:
699
+ if not decision.approved:
700
+ logger.info(
701
+ f" • {decision.judge_name}: {decision.feedback[:100]}..."
702
+ )
703
+
704
+ return (card, valid_decisions, final_approval)
ankigen_core/agents/performance.py CHANGED
@@ -5,27 +5,26 @@ import time
5
  import hashlib
6
  from typing import Dict, Any, List, Optional, Callable, TypeVar, Generic
7
  from dataclasses import dataclass, field
8
- from datetime import datetime, timedelta
9
  from functools import wraps, lru_cache
10
- import pickle
11
  import json
12
 
13
  from ankigen_core.logging import logger
14
  from ankigen_core.models import Card
15
 
16
 
17
- T = TypeVar('T')
18
 
19
 
20
  @dataclass
21
  class CacheConfig:
22
  """Configuration for agent response caching"""
 
23
  enable_caching: bool = True
24
  cache_ttl: int = 3600 # seconds
25
  max_cache_size: int = 1000
26
  cache_backend: str = "memory" # "memory" or "file"
27
  cache_directory: Optional[str] = None
28
-
29
  def __post_init__(self):
30
  if self.cache_backend == "file" and not self.cache_directory:
31
  self.cache_directory = "cache/agents"
@@ -34,6 +33,7 @@ class CacheConfig:
34
  @dataclass
35
  class PerformanceConfig:
36
  """Configuration for performance optimizations"""
 
37
  enable_batch_processing: bool = True
38
  max_batch_size: int = 10
39
  batch_timeout: float = 2.0 # seconds
@@ -47,16 +47,17 @@ class PerformanceConfig:
47
  @dataclass
48
  class CacheEntry(Generic[T]):
49
  """Cache entry with metadata"""
 
50
  value: T
51
  created_at: float
52
  access_count: int = 0
53
  last_accessed: float = field(default_factory=time.time)
54
  cache_key: str = ""
55
-
56
  def is_expired(self, ttl: int) -> bool:
57
  """Check if cache entry is expired"""
58
  return time.time() - self.created_at > ttl
59
-
60
  def touch(self):
61
  """Update access metadata"""
62
  self.access_count += 1
@@ -65,60 +66,56 @@ class CacheEntry(Generic[T]):
65
 
66
  class MemoryCache(Generic[T]):
67
  """In-memory cache with LRU eviction"""
68
-
69
  def __init__(self, config: CacheConfig):
70
  self.config = config
71
  self._cache: Dict[str, CacheEntry[T]] = {}
72
  self._access_order: List[str] = []
73
  self._lock = asyncio.Lock()
74
-
75
  async def get(self, key: str) -> Optional[T]:
76
  """Get value from cache"""
77
  async with self._lock:
78
  entry = self._cache.get(key)
79
  if not entry:
80
  return None
81
-
82
  if entry.is_expired(self.config.cache_ttl):
83
  await self._remove(key)
84
  return None
85
-
86
  entry.touch()
87
  self._update_access_order(key)
88
-
89
  logger.debug(f"Cache hit for key: {key[:20]}...")
90
  return entry.value
91
-
92
  async def set(self, key: str, value: T) -> None:
93
  """Set value in cache"""
94
  async with self._lock:
95
  # Check if we need to evict entries
96
  if len(self._cache) >= self.config.max_cache_size:
97
  await self._evict_lru()
98
-
99
- entry = CacheEntry(
100
- value=value,
101
- created_at=time.time(),
102
- cache_key=key
103
- )
104
-
105
  self._cache[key] = entry
106
  self._update_access_order(key)
107
-
108
  logger.debug(f"Cache set for key: {key[:20]}...")
109
-
110
  async def remove(self, key: str) -> bool:
111
  """Remove entry from cache"""
112
  async with self._lock:
113
  return await self._remove(key)
114
-
115
  async def clear(self) -> None:
116
  """Clear all cache entries"""
117
  async with self._lock:
118
  self._cache.clear()
119
  self._access_order.clear()
120
  logger.info("Cache cleared")
121
-
122
  async def _remove(self, key: str) -> bool:
123
  """Internal remove method"""
124
  if key in self._cache:
@@ -127,25 +124,25 @@ class MemoryCache(Generic[T]):
127
  self._access_order.remove(key)
128
  return True
129
  return False
130
-
131
  async def _evict_lru(self) -> None:
132
  """Evict least recently used entries"""
133
  if not self._access_order:
134
  return
135
-
136
  # Remove oldest entries
137
- to_remove = self._access_order[:len(self._access_order) // 4] # Remove 25%
138
  for key in to_remove:
139
  await self._remove(key)
140
-
141
  logger.debug(f"Evicted {len(to_remove)} cache entries")
142
-
143
  def _update_access_order(self, key: str) -> None:
144
  """Update access order for LRU tracking"""
145
  if key in self._access_order:
146
  self._access_order.remove(key)
147
  self._access_order.append(key)
148
-
149
  def get_stats(self) -> Dict[str, Any]:
150
  """Get cache statistics"""
151
  total_accesses = sum(entry.access_count for entry in self._cache.values())
@@ -153,137 +150,134 @@ class MemoryCache(Generic[T]):
153
  "entries": len(self._cache),
154
  "max_size": self.config.max_cache_size,
155
  "total_accesses": total_accesses,
156
- "hit_rate": total_accesses / max(1, len(self._cache))
157
  }
158
 
159
 
160
  class BatchProcessor:
161
  """Batch processor for agent requests"""
162
-
163
  def __init__(self, config: PerformanceConfig):
164
  self.config = config
165
  self._batches: Dict[str, List[Dict[str, Any]]] = {}
166
  self._batch_timers: Dict[str, asyncio.Task] = {}
167
  self._lock = asyncio.Lock()
168
-
169
  async def add_request(
170
- self,
171
- batch_key: str,
172
- request_data: Dict[str, Any],
173
- processor_func: Callable
174
  ) -> Any:
175
  """Add request to batch for processing"""
176
-
177
  if not self.config.enable_batch_processing:
178
  # Process immediately if batching is disabled
179
  return await processor_func([request_data])
180
-
181
  async with self._lock:
182
  # Initialize batch if needed
183
  if batch_key not in self._batches:
184
  self._batches[batch_key] = []
185
  self._start_batch_timer(batch_key, processor_func)
186
-
187
  # Add request to batch
188
  self._batches[batch_key].append(request_data)
189
-
190
  # Process immediately if batch is full
191
  if len(self._batches[batch_key]) >= self.config.max_batch_size:
192
  return await self._process_batch(batch_key, processor_func)
193
-
194
  # Wait for timer or batch completion
195
- return await self._wait_for_batch_result(batch_key, request_data, processor_func)
196
-
 
 
197
  def _start_batch_timer(self, batch_key: str, processor_func: Callable) -> None:
198
  """Start timer for batch processing"""
 
199
  async def timer():
200
  await asyncio.sleep(self.config.batch_timeout)
201
  async with self._lock:
202
  if batch_key in self._batches and self._batches[batch_key]:
203
  await self._process_batch(batch_key, processor_func)
204
-
205
  self._batch_timers[batch_key] = asyncio.create_task(timer())
206
-
207
- async def _process_batch(self, batch_key: str, processor_func: Callable) -> List[Any]:
 
 
208
  """Process accumulated batch"""
209
  if batch_key not in self._batches:
210
  return []
211
-
212
  batch = self._batches.pop(batch_key)
213
-
214
  # Cancel timer
215
  if batch_key in self._batch_timers:
216
  self._batch_timers[batch_key].cancel()
217
  del self._batch_timers[batch_key]
218
-
219
  if not batch:
220
  return []
221
-
222
  logger.debug(f"Processing batch {batch_key} with {len(batch)} requests")
223
-
224
  try:
225
  # Process the batch
226
  results = await processor_func(batch)
227
  return results if isinstance(results, list) else [results]
228
-
229
  except Exception as e:
230
  logger.error(f"Batch processing failed for {batch_key}: {e}")
231
  raise
232
-
233
  async def _wait_for_batch_result(
234
- self,
235
- batch_key: str,
236
- request_data: Dict[str, Any],
237
- processor_func: Callable
238
  ) -> Any:
239
  """Wait for batch processing to complete"""
240
  # This is a simplified implementation
241
  # In a real implementation, you'd use events/conditions to coordinate
242
  # between requests in the same batch
243
-
244
  while batch_key in self._batches:
245
  await asyncio.sleep(0.1)
246
-
247
  # For now, process individually as fallback
248
  return await processor_func([request_data])
249
 
250
 
251
  class RequestDeduplicator:
252
  """Deduplicates identical agent requests"""
253
-
254
  def __init__(self):
255
  self._pending_requests: Dict[str, asyncio.Future] = {}
256
  self._lock = asyncio.Lock()
257
-
258
  @lru_cache(maxsize=1000)
259
  def _generate_request_hash(self, request_data: str) -> str:
260
  """Generate hash for request deduplication"""
261
  return hashlib.md5(request_data.encode()).hexdigest()
262
-
263
  async def deduplicate_request(
264
- self,
265
- request_data: Dict[str, Any],
266
- processor_func: Callable
267
  ) -> Any:
268
  """Deduplicate and process request"""
269
-
270
  # Generate hash for deduplication
271
  request_str = json.dumps(request_data, sort_keys=True)
272
  request_hash = self._generate_request_hash(request_str)
273
-
274
  async with self._lock:
275
  # Check if request is already pending
276
  if request_hash in self._pending_requests:
277
  logger.debug(f"Deduplicating request: {request_hash[:16]}...")
278
  return await self._pending_requests[request_hash]
279
-
280
  # Create future for this request
281
- future = asyncio.create_task(self._process_unique_request(
282
- request_hash, request_data, processor_func
283
- ))
284
-
285
  self._pending_requests[request_hash] = future
286
-
287
  try:
288
  result = await future
289
  return result
@@ -291,12 +285,9 @@ class RequestDeduplicator:
291
  # Clean up completed request
292
  async with self._lock:
293
  self._pending_requests.pop(request_hash, None)
294
-
295
  async def _process_unique_request(
296
- self,
297
- request_hash: str,
298
- request_data: Dict[str, Any],
299
- processor_func: Callable
300
  ) -> Any:
301
  """Process unique request"""
302
  logger.debug(f"Processing unique request: {request_hash[:16]}...")
@@ -305,36 +296,41 @@ class RequestDeduplicator:
305
 
306
  class PerformanceOptimizer:
307
  """Main performance optimization coordinator"""
308
-
309
  def __init__(self, config: PerformanceConfig):
310
  self.config = config
311
- self.cache = MemoryCache(config.cache_config) if config.enable_response_caching else None
312
- self.batch_processor = BatchProcessor(config) if config.enable_batch_processing else None
313
- self.deduplicator = RequestDeduplicator() if config.enable_request_deduplication else None
 
 
 
 
 
 
314
  self._semaphore = asyncio.Semaphore(config.max_concurrent_requests)
315
-
316
  async def optimize_agent_call(
317
  self,
318
  agent_name: str,
319
  request_data: Dict[str, Any],
320
  processor_func: Callable,
321
- cache_key_generator: Optional[Callable[[Dict[str, Any]], str]] = None
322
  ) -> Any:
323
  """Optimize agent call with caching, batching, and deduplication"""
324
-
325
  # Generate cache key
326
  cache_key = None
327
  if self.cache and cache_key_generator:
328
  cache_key = cache_key_generator(request_data)
329
-
330
  # Check cache first
331
  cached_result = await self.cache.get(cache_key)
332
  if cached_result is not None:
333
  return cached_result
334
-
335
  # Apply rate limiting
336
  async with self._semaphore:
337
-
338
  # Apply deduplication
339
  if self.deduplicator and self.config.enable_request_deduplication:
340
  result = await self.deduplicator.deduplicate_request(
@@ -342,18 +338,15 @@ class PerformanceOptimizer:
342
  )
343
  else:
344
  result = await processor_func(request_data)
345
-
346
  # Cache result
347
  if self.cache and cache_key and result is not None:
348
  await self.cache.set(cache_key, result)
349
-
350
  return result
351
-
352
  async def optimize_batch_processing(
353
- self,
354
- batch_key: str,
355
- request_data: Dict[str, Any],
356
- processor_func: Callable
357
  ) -> Any:
358
  """Optimize using batch processing"""
359
  if self.batch_processor:
@@ -362,7 +355,7 @@ class PerformanceOptimizer:
362
  )
363
  else:
364
  return await processor_func([request_data])
365
-
366
  def get_performance_stats(self) -> Dict[str, Any]:
367
  """Get performance optimization statistics"""
368
  stats = {
@@ -375,12 +368,12 @@ class PerformanceOptimizer:
375
  "concurrency": {
376
  "max_concurrent": self.config.max_concurrent_requests,
377
  "current_available": self._semaphore._value,
378
- }
379
  }
380
-
381
  if self.cache:
382
  stats["cache"] = self.cache.get_stats()
383
-
384
  return stats
385
 
386
 
@@ -388,7 +381,9 @@ class PerformanceOptimizer:
388
  _global_optimizer: Optional[PerformanceOptimizer] = None
389
 
390
 
391
- def get_performance_optimizer(config: Optional[PerformanceConfig] = None) -> PerformanceOptimizer:
 
 
392
  """Get global performance optimizer instance"""
393
  global _global_optimizer
394
  if _global_optimizer is None:
@@ -399,121 +394,82 @@ def get_performance_optimizer(config: Optional[PerformanceConfig] = None) -> Per
399
  # Decorators for performance optimization
400
  def cache_response(cache_key_func: Callable[[Any], str], ttl: int = 3600):
401
  """Decorator to cache function responses"""
 
402
  def decorator(func):
403
  @wraps(func)
404
  async def wrapper(*args, **kwargs):
405
  optimizer = get_performance_optimizer()
406
  if not optimizer.cache:
407
  return await func(*args, **kwargs)
408
-
409
  # Generate cache key
410
  cache_key = cache_key_func(*args, **kwargs)
411
-
412
  # Check cache
413
  cached_result = await optimizer.cache.get(cache_key)
414
  if cached_result is not None:
415
  return cached_result
416
-
417
  # Execute function
418
  result = await func(*args, **kwargs)
419
-
420
  # Cache result
421
  if result is not None:
422
  await optimizer.cache.set(cache_key, result)
423
-
424
  return result
425
-
426
  return wrapper
 
427
  return decorator
428
 
429
 
430
  def rate_limit(max_concurrent: int = 5):
431
  """Decorator to apply rate limiting"""
432
  semaphore = asyncio.Semaphore(max_concurrent)
433
-
434
  def decorator(func):
435
  @wraps(func)
436
  async def wrapper(*args, **kwargs):
437
  async with semaphore:
438
  return await func(*args, **kwargs)
 
439
  return wrapper
 
440
  return decorator
441
 
442
 
443
  # Utility functions for cache key generation
444
- def generate_card_cache_key(topic: str, subject: str, num_cards: int, difficulty: str, **kwargs) -> str:
 
 
445
  """Generate cache key for card generation"""
446
  key_data = {
447
  "topic": topic,
448
  "subject": subject,
449
  "num_cards": num_cards,
450
  "difficulty": difficulty,
451
- "context": kwargs.get("context", {})
452
  }
453
  key_str = json.dumps(key_data, sort_keys=True)
454
  return f"cards:{hashlib.md5(key_str.encode()).hexdigest()}"
455
 
456
 
457
- def generate_judgment_cache_key(cards: List[Card], judgment_type: str = "general") -> str:
 
 
458
  """Generate cache key for card judgment"""
459
  # Use card content to generate stable hash
460
  card_data = []
461
  for card in cards:
462
- card_data.append({
463
- "question": card.front.question,
464
- "answer": card.back.answer,
465
- "type": card.card_type
466
- })
467
-
468
- key_data = {
469
- "cards": card_data,
470
- "judgment_type": judgment_type
471
- }
472
  key_str = json.dumps(key_data, sort_keys=True)
473
  return f"judgment:{hashlib.md5(key_str.encode()).hexdigest()}"
474
-
475
-
476
- # Performance monitoring
477
- class PerformanceMonitor:
478
- """Monitor performance metrics"""
479
-
480
- def __init__(self):
481
- self._metrics: Dict[str, List[float]] = {}
482
- self._lock = asyncio.Lock()
483
-
484
- async def record_execution_time(self, operation: str, execution_time: float):
485
- """Record execution time for an operation"""
486
- async with self._lock:
487
- if operation not in self._metrics:
488
- self._metrics[operation] = []
489
-
490
- self._metrics[operation].append(execution_time)
491
-
492
- # Keep only recent metrics (last 1000)
493
- if len(self._metrics[operation]) > 1000:
494
- self._metrics[operation] = self._metrics[operation][-1000:]
495
-
496
- def get_performance_report(self) -> Dict[str, Dict[str, float]]:
497
- """Get performance report for all operations"""
498
- report = {}
499
-
500
- for operation, times in self._metrics.items():
501
- if times:
502
- report[operation] = {
503
- "count": len(times),
504
- "avg_time": sum(times) / len(times),
505
- "min_time": min(times),
506
- "max_time": max(times),
507
- "p95_time": sorted(times)[int(len(times) * 0.95)] if len(times) > 20 else max(times)
508
- }
509
-
510
- return report
511
-
512
-
513
- # Global performance monitor
514
- _global_monitor = PerformanceMonitor()
515
-
516
-
517
- def get_performance_monitor() -> PerformanceMonitor:
518
- """Get global performance monitor"""
519
- return _global_monitor
 
5
  import hashlib
6
  from typing import Dict, Any, List, Optional, Callable, TypeVar, Generic
7
  from dataclasses import dataclass, field
 
8
  from functools import wraps, lru_cache
 
9
  import json
10
 
11
  from ankigen_core.logging import logger
12
  from ankigen_core.models import Card
13
 
14
 
15
+ T = TypeVar("T")
16
 
17
 
18
  @dataclass
19
  class CacheConfig:
20
  """Configuration for agent response caching"""
21
+
22
  enable_caching: bool = True
23
  cache_ttl: int = 3600 # seconds
24
  max_cache_size: int = 1000
25
  cache_backend: str = "memory" # "memory" or "file"
26
  cache_directory: Optional[str] = None
27
+
28
  def __post_init__(self):
29
  if self.cache_backend == "file" and not self.cache_directory:
30
  self.cache_directory = "cache/agents"
 
33
  @dataclass
34
  class PerformanceConfig:
35
  """Configuration for performance optimizations"""
36
+
37
  enable_batch_processing: bool = True
38
  max_batch_size: int = 10
39
  batch_timeout: float = 2.0 # seconds
 
47
  @dataclass
48
  class CacheEntry(Generic[T]):
49
  """Cache entry with metadata"""
50
+
51
  value: T
52
  created_at: float
53
  access_count: int = 0
54
  last_accessed: float = field(default_factory=time.time)
55
  cache_key: str = ""
56
+
57
  def is_expired(self, ttl: int) -> bool:
58
  """Check if cache entry is expired"""
59
  return time.time() - self.created_at > ttl
60
+
61
  def touch(self):
62
  """Update access metadata"""
63
  self.access_count += 1
 
66
 
67
  class MemoryCache(Generic[T]):
68
  """In-memory cache with LRU eviction"""
69
+
70
  def __init__(self, config: CacheConfig):
71
  self.config = config
72
  self._cache: Dict[str, CacheEntry[T]] = {}
73
  self._access_order: List[str] = []
74
  self._lock = asyncio.Lock()
75
+
76
  async def get(self, key: str) -> Optional[T]:
77
  """Get value from cache"""
78
  async with self._lock:
79
  entry = self._cache.get(key)
80
  if not entry:
81
  return None
82
+
83
  if entry.is_expired(self.config.cache_ttl):
84
  await self._remove(key)
85
  return None
86
+
87
  entry.touch()
88
  self._update_access_order(key)
89
+
90
  logger.debug(f"Cache hit for key: {key[:20]}...")
91
  return entry.value
92
+
93
  async def set(self, key: str, value: T) -> None:
94
  """Set value in cache"""
95
  async with self._lock:
96
  # Check if we need to evict entries
97
  if len(self._cache) >= self.config.max_cache_size:
98
  await self._evict_lru()
99
+
100
+ entry = CacheEntry(value=value, created_at=time.time(), cache_key=key)
101
+
 
 
 
 
102
  self._cache[key] = entry
103
  self._update_access_order(key)
104
+
105
  logger.debug(f"Cache set for key: {key[:20]}...")
106
+
107
  async def remove(self, key: str) -> bool:
108
  """Remove entry from cache"""
109
  async with self._lock:
110
  return await self._remove(key)
111
+
112
  async def clear(self) -> None:
113
  """Clear all cache entries"""
114
  async with self._lock:
115
  self._cache.clear()
116
  self._access_order.clear()
117
  logger.info("Cache cleared")
118
+
119
  async def _remove(self, key: str) -> bool:
120
  """Internal remove method"""
121
  if key in self._cache:
 
124
  self._access_order.remove(key)
125
  return True
126
  return False
127
+
128
  async def _evict_lru(self) -> None:
129
  """Evict least recently used entries"""
130
  if not self._access_order:
131
  return
132
+
133
  # Remove oldest entries
134
+ to_remove = self._access_order[: len(self._access_order) // 4] # Remove 25%
135
  for key in to_remove:
136
  await self._remove(key)
137
+
138
  logger.debug(f"Evicted {len(to_remove)} cache entries")
139
+
140
  def _update_access_order(self, key: str) -> None:
141
  """Update access order for LRU tracking"""
142
  if key in self._access_order:
143
  self._access_order.remove(key)
144
  self._access_order.append(key)
145
+
146
  def get_stats(self) -> Dict[str, Any]:
147
  """Get cache statistics"""
148
  total_accesses = sum(entry.access_count for entry in self._cache.values())
 
150
  "entries": len(self._cache),
151
  "max_size": self.config.max_cache_size,
152
  "total_accesses": total_accesses,
153
+ "hit_rate": total_accesses / max(1, len(self._cache)),
154
  }
155
 
156
 
157
  class BatchProcessor:
158
  """Batch processor for agent requests"""
159
+
160
  def __init__(self, config: PerformanceConfig):
161
  self.config = config
162
  self._batches: Dict[str, List[Dict[str, Any]]] = {}
163
  self._batch_timers: Dict[str, asyncio.Task] = {}
164
  self._lock = asyncio.Lock()
165
+
166
  async def add_request(
167
+ self, batch_key: str, request_data: Dict[str, Any], processor_func: Callable
 
 
 
168
  ) -> Any:
169
  """Add request to batch for processing"""
170
+
171
  if not self.config.enable_batch_processing:
172
  # Process immediately if batching is disabled
173
  return await processor_func([request_data])
174
+
175
  async with self._lock:
176
  # Initialize batch if needed
177
  if batch_key not in self._batches:
178
  self._batches[batch_key] = []
179
  self._start_batch_timer(batch_key, processor_func)
180
+
181
  # Add request to batch
182
  self._batches[batch_key].append(request_data)
183
+
184
  # Process immediately if batch is full
185
  if len(self._batches[batch_key]) >= self.config.max_batch_size:
186
  return await self._process_batch(batch_key, processor_func)
187
+
188
  # Wait for timer or batch completion
189
+ return await self._wait_for_batch_result(
190
+ batch_key, request_data, processor_func
191
+ )
192
+
193
  def _start_batch_timer(self, batch_key: str, processor_func: Callable) -> None:
194
  """Start timer for batch processing"""
195
+
196
  async def timer():
197
  await asyncio.sleep(self.config.batch_timeout)
198
  async with self._lock:
199
  if batch_key in self._batches and self._batches[batch_key]:
200
  await self._process_batch(batch_key, processor_func)
201
+
202
  self._batch_timers[batch_key] = asyncio.create_task(timer())
203
+
204
+ async def _process_batch(
205
+ self, batch_key: str, processor_func: Callable
206
+ ) -> List[Any]:
207
  """Process accumulated batch"""
208
  if batch_key not in self._batches:
209
  return []
210
+
211
  batch = self._batches.pop(batch_key)
212
+
213
  # Cancel timer
214
  if batch_key in self._batch_timers:
215
  self._batch_timers[batch_key].cancel()
216
  del self._batch_timers[batch_key]
217
+
218
  if not batch:
219
  return []
220
+
221
  logger.debug(f"Processing batch {batch_key} with {len(batch)} requests")
222
+
223
  try:
224
  # Process the batch
225
  results = await processor_func(batch)
226
  return results if isinstance(results, list) else [results]
227
+
228
  except Exception as e:
229
  logger.error(f"Batch processing failed for {batch_key}: {e}")
230
  raise
231
+
232
  async def _wait_for_batch_result(
233
+ self, batch_key: str, request_data: Dict[str, Any], processor_func: Callable
 
 
 
234
  ) -> Any:
235
  """Wait for batch processing to complete"""
236
  # This is a simplified implementation
237
  # In a real implementation, you'd use events/conditions to coordinate
238
  # between requests in the same batch
239
+
240
  while batch_key in self._batches:
241
  await asyncio.sleep(0.1)
242
+
243
  # For now, process individually as fallback
244
  return await processor_func([request_data])
245
 
246
 
247
  class RequestDeduplicator:
248
  """Deduplicates identical agent requests"""
249
+
250
  def __init__(self):
251
  self._pending_requests: Dict[str, asyncio.Future] = {}
252
  self._lock = asyncio.Lock()
253
+
254
  @lru_cache(maxsize=1000)
255
  def _generate_request_hash(self, request_data: str) -> str:
256
  """Generate hash for request deduplication"""
257
  return hashlib.md5(request_data.encode()).hexdigest()
258
+
259
  async def deduplicate_request(
260
+ self, request_data: Dict[str, Any], processor_func: Callable
 
 
261
  ) -> Any:
262
  """Deduplicate and process request"""
263
+
264
  # Generate hash for deduplication
265
  request_str = json.dumps(request_data, sort_keys=True)
266
  request_hash = self._generate_request_hash(request_str)
267
+
268
  async with self._lock:
269
  # Check if request is already pending
270
  if request_hash in self._pending_requests:
271
  logger.debug(f"Deduplicating request: {request_hash[:16]}...")
272
  return await self._pending_requests[request_hash]
273
+
274
  # Create future for this request
275
+ future = asyncio.create_task(
276
+ self._process_unique_request(request_hash, request_data, processor_func)
277
+ )
278
+
279
  self._pending_requests[request_hash] = future
280
+
281
  try:
282
  result = await future
283
  return result
 
285
  # Clean up completed request
286
  async with self._lock:
287
  self._pending_requests.pop(request_hash, None)
288
+
289
  async def _process_unique_request(
290
+ self, request_hash: str, request_data: Dict[str, Any], processor_func: Callable
 
 
 
291
  ) -> Any:
292
  """Process unique request"""
293
  logger.debug(f"Processing unique request: {request_hash[:16]}...")
 
296
 
297
  class PerformanceOptimizer:
298
  """Main performance optimization coordinator"""
299
+
300
  def __init__(self, config: PerformanceConfig):
301
  self.config = config
302
+ self.cache = (
303
+ MemoryCache(config.cache_config) if config.enable_response_caching else None
304
+ )
305
+ self.batch_processor = (
306
+ BatchProcessor(config) if config.enable_batch_processing else None
307
+ )
308
+ self.deduplicator = (
309
+ RequestDeduplicator() if config.enable_request_deduplication else None
310
+ )
311
  self._semaphore = asyncio.Semaphore(config.max_concurrent_requests)
312
+
313
  async def optimize_agent_call(
314
  self,
315
  agent_name: str,
316
  request_data: Dict[str, Any],
317
  processor_func: Callable,
318
+ cache_key_generator: Optional[Callable[[Dict[str, Any]], str]] = None,
319
  ) -> Any:
320
  """Optimize agent call with caching, batching, and deduplication"""
321
+
322
  # Generate cache key
323
  cache_key = None
324
  if self.cache and cache_key_generator:
325
  cache_key = cache_key_generator(request_data)
326
+
327
  # Check cache first
328
  cached_result = await self.cache.get(cache_key)
329
  if cached_result is not None:
330
  return cached_result
331
+
332
  # Apply rate limiting
333
  async with self._semaphore:
 
334
  # Apply deduplication
335
  if self.deduplicator and self.config.enable_request_deduplication:
336
  result = await self.deduplicator.deduplicate_request(
 
338
  )
339
  else:
340
  result = await processor_func(request_data)
341
+
342
  # Cache result
343
  if self.cache and cache_key and result is not None:
344
  await self.cache.set(cache_key, result)
345
+
346
  return result
347
+
348
  async def optimize_batch_processing(
349
+ self, batch_key: str, request_data: Dict[str, Any], processor_func: Callable
 
 
 
350
  ) -> Any:
351
  """Optimize using batch processing"""
352
  if self.batch_processor:
 
355
  )
356
  else:
357
  return await processor_func([request_data])
358
+
359
  def get_performance_stats(self) -> Dict[str, Any]:
360
  """Get performance optimization statistics"""
361
  stats = {
 
368
  "concurrency": {
369
  "max_concurrent": self.config.max_concurrent_requests,
370
  "current_available": self._semaphore._value,
371
+ },
372
  }
373
+
374
  if self.cache:
375
  stats["cache"] = self.cache.get_stats()
376
+
377
  return stats
378
 
379
 
 
381
  _global_optimizer: Optional[PerformanceOptimizer] = None
382
 
383
 
384
+ def get_performance_optimizer(
385
+ config: Optional[PerformanceConfig] = None,
386
+ ) -> PerformanceOptimizer:
387
  """Get global performance optimizer instance"""
388
  global _global_optimizer
389
  if _global_optimizer is None:
 
394
  # Decorators for performance optimization
395
  def cache_response(cache_key_func: Callable[[Any], str], ttl: int = 3600):
396
  """Decorator to cache function responses"""
397
+
398
  def decorator(func):
399
  @wraps(func)
400
  async def wrapper(*args, **kwargs):
401
  optimizer = get_performance_optimizer()
402
  if not optimizer.cache:
403
  return await func(*args, **kwargs)
404
+
405
  # Generate cache key
406
  cache_key = cache_key_func(*args, **kwargs)
407
+
408
  # Check cache
409
  cached_result = await optimizer.cache.get(cache_key)
410
  if cached_result is not None:
411
  return cached_result
412
+
413
  # Execute function
414
  result = await func(*args, **kwargs)
415
+
416
  # Cache result
417
  if result is not None:
418
  await optimizer.cache.set(cache_key, result)
419
+
420
  return result
421
+
422
  return wrapper
423
+
424
  return decorator
425
 
426
 
427
  def rate_limit(max_concurrent: int = 5):
428
  """Decorator to apply rate limiting"""
429
  semaphore = asyncio.Semaphore(max_concurrent)
430
+
431
  def decorator(func):
432
  @wraps(func)
433
  async def wrapper(*args, **kwargs):
434
  async with semaphore:
435
  return await func(*args, **kwargs)
436
+
437
  return wrapper
438
+
439
  return decorator
440
 
441
 
442
  # Utility functions for cache key generation
443
+ def generate_card_cache_key(
444
+ topic: str, subject: str, num_cards: int, difficulty: str, **kwargs
445
+ ) -> str:
446
  """Generate cache key for card generation"""
447
  key_data = {
448
  "topic": topic,
449
  "subject": subject,
450
  "num_cards": num_cards,
451
  "difficulty": difficulty,
452
+ "context": kwargs.get("context", {}),
453
  }
454
  key_str = json.dumps(key_data, sort_keys=True)
455
  return f"cards:{hashlib.md5(key_str.encode()).hexdigest()}"
456
 
457
 
458
+ def generate_judgment_cache_key(
459
+ cards: List[Card], judgment_type: str = "general"
460
+ ) -> str:
461
  """Generate cache key for card judgment"""
462
  # Use card content to generate stable hash
463
  card_data = []
464
  for card in cards:
465
+ card_data.append(
466
+ {
467
+ "question": card.front.question,
468
+ "answer": card.back.answer,
469
+ "type": card.card_type,
470
+ }
471
+ )
472
+
473
+ key_data = {"cards": card_data, "judgment_type": judgment_type}
 
474
  key_str = json.dumps(key_data, sort_keys=True)
475
  return f"judgment:{hashlib.md5(key_str.encode()).hexdigest()}"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ankigen_core/agents/schemas.py ADDED
@@ -0,0 +1,136 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Pydantic schemas for structured outputs from agents.
3
+ These schemas ensure type safety and eliminate JSON parsing errors.
4
+ """
5
+
6
+ from typing import List, Dict, Any, Optional
7
+ from pydantic import BaseModel, Field
8
+ from enum import Enum
9
+
10
+
11
+ class DifficultyLevel(str, Enum):
12
+ """Difficulty levels for flashcards"""
13
+
14
+ BEGINNER = "beginner"
15
+ INTERMEDIATE = "intermediate"
16
+ ADVANCED = "advanced"
17
+
18
+
19
+ class CardType(str, Enum):
20
+ """Types of flashcards"""
21
+
22
+ BASIC = "basic"
23
+ CLOZE = "cloze"
24
+
25
+
26
+ class CardFrontSchema(BaseModel):
27
+ """Schema for the front of a flashcard"""
28
+
29
+ question: str = Field(..., description="The question or prompt for the flashcard")
30
+
31
+
32
+ class CardBackSchema(BaseModel):
33
+ """Schema for the back of a flashcard"""
34
+
35
+ answer: str = Field(..., description="The main answer to the question")
36
+ explanation: str = Field(..., description="Detailed explanation of the answer")
37
+ example: str = Field(..., description="A concrete example illustrating the concept")
38
+
39
+
40
+ class CardMetadataSchema(BaseModel):
41
+ """Schema for flashcard metadata"""
42
+
43
+ topic: str = Field(..., description="The main topic of the card")
44
+ subject: str = Field(..., description="The subject area (e.g., Biology, History)")
45
+ difficulty: DifficultyLevel = Field(..., description="The difficulty level")
46
+ tags: Optional[List[str]] = Field(
47
+ None, description="Relevant tags for categorization"
48
+ )
49
+ learning_outcomes: Optional[List[str]] = Field(
50
+ None, description="What the learner should achieve"
51
+ )
52
+ prerequisites: Optional[List[str]] = Field(
53
+ None, description="Required prior knowledge"
54
+ )
55
+ related_concepts: Optional[List[str]] = Field(
56
+ None, description="Related concepts to explore"
57
+ )
58
+ estimated_time: Optional[str] = Field(None, description="Estimated time to learn")
59
+ common_mistakes: Optional[List[str]] = Field(
60
+ None, description="Common mistakes to avoid"
61
+ )
62
+ memory_aids: Optional[List[str]] = Field(
63
+ None, description="Memory aids or mnemonics"
64
+ )
65
+ real_world_applications: Optional[List[str]] = Field(
66
+ None, description="Real-world applications"
67
+ )
68
+
69
+
70
+ class CardSchema(BaseModel):
71
+ """Complete schema for a flashcard"""
72
+
73
+ card_type: CardType = Field(..., description="The type of flashcard")
74
+ front: CardFrontSchema = Field(..., description="The front of the card")
75
+ back: CardBackSchema = Field(..., description="The back of the card")
76
+ metadata: CardMetadataSchema = Field(..., description="Metadata about the card")
77
+ enhancement_notes: Optional[str] = Field(
78
+ None, description="Notes about enhancements made"
79
+ )
80
+
81
+
82
+ class CardsGenerationSchema(BaseModel):
83
+ """Schema for multiple cards generation"""
84
+
85
+ cards: List[CardSchema] = Field(..., description="List of generated flashcards")
86
+
87
+
88
+ class JudgeDecisionSchema(BaseModel):
89
+ """Schema for judge decisions"""
90
+
91
+ approved: bool = Field(..., description="Whether the card is approved")
92
+ score: float = Field(
93
+ ..., ge=0.0, le=1.0, description="Quality score between 0 and 1"
94
+ )
95
+ feedback: str = Field(..., description="Detailed feedback about the card")
96
+ improvements: Optional[List[str]] = Field(
97
+ None, description="Suggested improvements"
98
+ )
99
+ reasoning: str = Field(..., description="Detailed reasoning for the decision")
100
+ confidence: float = Field(
101
+ ..., ge=0.0, le=1.0, description="Confidence in the decision"
102
+ )
103
+ metadata: Optional[Dict[str, Any]] = Field(None, description="Additional metadata")
104
+
105
+
106
+ class EnhancementSchema(BaseModel):
107
+ """Schema for card enhancements"""
108
+
109
+ enhanced_card: CardSchema = Field(..., description="The enhanced flashcard")
110
+ enhancement_summary: str = Field(..., description="Summary of what was enhanced")
111
+ enhancement_details: Optional[Dict[str, Any]] = Field(
112
+ None, description="Detailed enhancement information"
113
+ )
114
+
115
+
116
+ class GenerationRequestSchema(BaseModel):
117
+ """Schema for generation requests"""
118
+
119
+ topic: str = Field(..., description="The topic to generate cards for")
120
+ subject: str = Field(..., description="The subject area")
121
+ num_cards: int = Field(..., ge=1, le=20, description="Number of cards to generate")
122
+ difficulty: DifficultyLevel = Field(..., description="Target difficulty level")
123
+ context: Optional[Dict[str, Any]] = Field(None, description="Additional context")
124
+ preferences: Optional[Dict[str, Any]] = Field(None, description="User preferences")
125
+
126
+
127
+ class TokenUsageSchema(BaseModel):
128
+ """Schema for token usage tracking"""
129
+
130
+ prompt_tokens: int = Field(..., ge=0, description="Number of tokens in the prompt")
131
+ completion_tokens: int = Field(
132
+ ..., ge=0, description="Number of tokens in the completion"
133
+ )
134
+ total_tokens: int = Field(..., ge=0, description="Total tokens used")
135
+ estimated_cost: float = Field(..., ge=0.0, description="Estimated cost in USD")
136
+ model: str = Field(..., description="Model used for the request")
ankigen_core/agents/security.py CHANGED
@@ -5,7 +5,7 @@ import hashlib
5
  import re
6
  from typing import Dict, Any, Optional, List
7
  from dataclasses import dataclass, field
8
- from datetime import datetime, timedelta
9
  from collections import defaultdict
10
  import asyncio
11
 
@@ -15,6 +15,7 @@ from ankigen_core.logging import logger
15
  @dataclass
16
  class RateLimitConfig:
17
  """Configuration for rate limiting"""
 
18
  requests_per_minute: int = 60
19
  requests_per_hour: int = 1000
20
  burst_limit: int = 10
@@ -24,243 +25,260 @@ class RateLimitConfig:
24
  @dataclass
25
  class SecurityConfig:
26
  """Security configuration for agents"""
 
27
  enable_input_validation: bool = True
28
  enable_output_filtering: bool = True
29
  enable_rate_limiting: bool = True
30
  max_input_length: int = 10000
31
  max_output_length: int = 50000
32
  blocked_patterns: List[str] = field(default_factory=list)
33
- allowed_file_extensions: List[str] = field(default_factory=lambda: ['.txt', '.md', '.json', '.yaml'])
34
-
 
 
35
  def __post_init__(self):
36
  if not self.blocked_patterns:
37
  self.blocked_patterns = [
38
- r'(?i)(api[_\-]?key|secret|password|token|credential)',
39
- r'(?i)(sk-[a-zA-Z0-9]{48,})', # OpenAI API key pattern
40
- r'(?i)(access[_\-]?token)',
41
- r'(?i)(private[_\-]?key)',
42
- r'(?i)(<script\b[^<]*(?:(?!<\/script>)<[^<]*)*<\/script>)', # Script tags
43
- r'(?i)(javascript:|data:|vbscript:)', # URL schemes
44
  ]
45
 
46
 
47
  class RateLimiter:
48
  """Rate limiter for API calls and agent executions"""
49
-
50
  def __init__(self, config: RateLimitConfig):
51
  self.config = config
52
  self._requests: Dict[str, List[float]] = defaultdict(list)
53
  self._locks: Dict[str, asyncio.Lock] = defaultdict(asyncio.Lock)
54
-
55
  async def check_rate_limit(self, identifier: str) -> bool:
56
  """Check if request is within rate limits"""
57
  async with self._locks[identifier]:
58
  now = time.time()
59
-
60
  # Clean old requests
61
  self._requests[identifier] = [
62
- req_time for req_time in self._requests[identifier]
 
63
  if now - req_time < 3600 # Keep last hour
64
  ]
65
-
66
  recent_requests = self._requests[identifier]
67
-
68
  # Check burst limit (last minute)
69
  last_minute = [req for req in recent_requests if now - req < 60]
70
  if len(last_minute) >= self.config.burst_limit:
71
  logger.warning(f"Burst limit exceeded for {identifier}")
72
  return False
73
-
74
  # Check per-minute limit
75
  if len(last_minute) >= self.config.requests_per_minute:
76
  logger.warning(f"Per-minute rate limit exceeded for {identifier}")
77
  return False
78
-
79
  # Check per-hour limit
80
  if len(recent_requests) >= self.config.requests_per_hour:
81
  logger.warning(f"Per-hour rate limit exceeded for {identifier}")
82
  return False
83
-
84
  # Record this request
85
  self._requests[identifier].append(now)
86
  return True
87
-
88
  def get_reset_time(self, identifier: str) -> Optional[datetime]:
89
  """Get when rate limits will reset for identifier"""
90
  if identifier not in self._requests:
91
  return None
92
-
93
  now = time.time()
94
- recent_requests = [
95
- req for req in self._requests[identifier]
96
- if now - req < 60
97
- ]
98
-
99
  if len(recent_requests) >= self.config.requests_per_minute:
100
  oldest_request = min(recent_requests)
101
  return datetime.fromtimestamp(oldest_request + 60)
102
-
103
  return None
104
 
105
 
106
  class SecurityValidator:
107
  """Security validator for agent inputs and outputs"""
108
-
109
  def __init__(self, config: SecurityConfig):
110
  self.config = config
111
- self._blocked_patterns = [re.compile(pattern) for pattern in config.blocked_patterns]
112
-
 
 
113
  def validate_input(self, input_text: str, source: str = "unknown") -> bool:
114
  """Validate input for security issues"""
115
  if not self.config.enable_input_validation:
116
  return True
117
-
118
  try:
119
  # Check input length
120
  if len(input_text) > self.config.max_input_length:
121
  logger.warning(f"Input too long from {source}: {len(input_text)} chars")
122
  return False
123
-
124
  # Check for blocked patterns
125
  for pattern in self._blocked_patterns:
126
  if pattern.search(input_text):
127
  logger.warning(f"Blocked pattern detected in input from {source}")
128
  return False
129
-
130
  # Check for suspicious content
131
  if self._contains_suspicious_content(input_text):
132
  logger.warning(f"Suspicious content detected in input from {source}")
133
  return False
134
-
135
  return True
136
-
137
  except Exception as e:
138
  logger.error(f"Error validating input from {source}: {e}")
139
  return False
140
-
141
  def validate_output(self, output_text: str, agent_name: str = "unknown") -> bool:
142
  """Validate output for security issues"""
143
  if not self.config.enable_output_filtering:
144
  return True
145
-
146
  try:
147
  # Check output length
148
  if len(output_text) > self.config.max_output_length:
149
- logger.warning(f"Output too long from {agent_name}: {len(output_text)} chars")
 
 
150
  return False
151
-
152
  # Check for leaked sensitive information
153
  for pattern in self._blocked_patterns:
154
  if pattern.search(output_text):
155
- logger.warning(f"Potential data leak detected in output from {agent_name}")
 
 
156
  return False
157
-
158
  return True
159
-
160
  except Exception as e:
161
  logger.error(f"Error validating output from {agent_name}: {e}")
162
  return False
163
-
164
  def sanitize_input(self, input_text: str) -> str:
165
  """Sanitize input by removing potentially dangerous content"""
166
  try:
167
  # Remove HTML/XML tags
168
- sanitized = re.sub(r'<[^>]+>', '', input_text)
169
-
170
  # Remove suspicious URLs
171
- sanitized = re.sub(r'(?i)(javascript:|data:|vbscript:)[^\s]*', '[URL_REMOVED]', sanitized)
172
-
 
 
173
  # Truncate if too long
174
  if len(sanitized) > self.config.max_input_length:
175
- sanitized = sanitized[:self.config.max_input_length] + "...[TRUNCATED]"
176
-
177
  return sanitized
178
-
179
  except Exception as e:
180
  logger.error(f"Error sanitizing input: {e}")
181
  return input_text[:1000] # Return truncated original as fallback
182
-
183
  def sanitize_output(self, output_text: str) -> str:
184
  """Sanitize output by removing sensitive information"""
185
  try:
186
  sanitized = output_text
187
-
188
  # Replace potential API keys or secrets
189
  for pattern in self._blocked_patterns:
190
- sanitized = pattern.sub('[REDACTED]', sanitized)
191
-
192
  # Truncate if too long
193
  if len(sanitized) > self.config.max_output_length:
194
- sanitized = sanitized[:self.config.max_output_length] + "...[TRUNCATED]"
195
-
 
 
196
  return sanitized
197
-
198
  except Exception as e:
199
  logger.error(f"Error sanitizing output: {e}")
200
  return output_text[:5000] # Return truncated original as fallback
201
-
202
  def _contains_suspicious_content(self, text: str) -> bool:
203
  """Check for suspicious content patterns"""
204
  suspicious_patterns = [
205
- r'(?i)(\beval\s*\()', # eval() calls
206
- r'(?i)(\bexec\s*\()', # exec() calls
207
- r'(?i)(__import__)', # Dynamic imports
208
- r'(?i)(subprocess|os\.system)', # System commands
209
- r'(?i)(file://|ftp://)', # File/FTP URLs
210
- r'\b(?:[0-9]{1,3}\.){3}[0-9]{1,3}\b', # IP addresses
211
  ]
212
-
213
  for pattern in suspicious_patterns:
214
  if re.search(pattern, text):
215
  return True
216
-
217
  return False
218
 
219
 
220
  class SecureAgentWrapper:
221
  """Secure wrapper for agent execution with rate limiting and validation"""
222
-
223
- def __init__(self, base_agent, rate_limiter: RateLimiter, validator: SecurityValidator):
 
 
224
  self.base_agent = base_agent
225
  self.rate_limiter = rate_limiter
226
  self.validator = validator
227
  self._identifier = self._generate_identifier()
228
-
229
  def _generate_identifier(self) -> str:
230
  """Generate unique identifier for rate limiting"""
231
- agent_name = getattr(self.base_agent, 'config', {}).get('name', 'unknown')
232
  # Include agent name and some randomness for fairness
233
- return hashlib.md5(f"{agent_name}_{id(self.base_agent)}".encode()).hexdigest()[:16]
234
-
235
- async def secure_execute(self, user_input: str, context: Dict[str, Any] = None) -> Any:
 
 
 
 
236
  """Execute agent with security checks and rate limiting"""
237
-
238
  # Rate limiting check
239
  if not await self.rate_limiter.check_rate_limit(self._identifier):
240
  reset_time = self.rate_limiter.get_reset_time(self._identifier)
241
  raise SecurityError(f"Rate limit exceeded. Reset at: {reset_time}")
242
-
243
  # Input validation
244
  if not self.validator.validate_input(user_input, self._identifier):
245
  raise SecurityError("Input validation failed")
246
-
247
  # Sanitize input
248
  sanitized_input = self.validator.sanitize_input(user_input)
249
-
250
  try:
251
  # Execute the base agent
252
  result = await self.base_agent.execute(sanitized_input, context)
253
-
254
  # Validate output
255
  if isinstance(result, str):
256
  if not self.validator.validate_output(result, self._identifier):
257
  raise SecurityError("Output validation failed")
258
-
259
  # Sanitize output
260
  result = self.validator.sanitize_output(result)
261
-
262
  return result
263
-
264
  except Exception as e:
265
  logger.error(f"Secure execution failed for {self._identifier}: {e}")
266
  raise
@@ -268,6 +286,7 @@ class SecureAgentWrapper:
268
 
269
  class SecurityError(Exception):
270
  """Custom exception for security-related errors"""
 
271
  pass
272
 
273
 
@@ -284,7 +303,9 @@ def get_rate_limiter(config: Optional[RateLimitConfig] = None) -> RateLimiter:
284
  return _global_rate_limiter
285
 
286
 
287
- def get_security_validator(config: Optional[SecurityConfig] = None) -> SecurityValidator:
 
 
288
  """Get global security validator instance"""
289
  global _global_validator
290
  if _global_validator is None:
@@ -292,8 +313,11 @@ def get_security_validator(config: Optional[SecurityConfig] = None) -> SecurityV
292
  return _global_validator
293
 
294
 
295
- def create_secure_agent(base_agent, rate_config: Optional[RateLimitConfig] = None,
296
- security_config: Optional[SecurityConfig] = None) -> SecureAgentWrapper:
 
 
 
297
  """Create a secure wrapper for an agent"""
298
  rate_limiter = get_rate_limiter(rate_config)
299
  validator = get_security_validator(security_config)
@@ -306,11 +330,11 @@ def set_secure_file_permissions(file_path: str):
306
  try:
307
  import os
308
  import stat
309
-
310
  # Set read/write for owner only (0o600)
311
  os.chmod(file_path, stat.S_IRUSR | stat.S_IWUSR)
312
  logger.info(f"Set secure permissions for {file_path}")
313
-
314
  except Exception as e:
315
  logger.warning(f"Could not set secure permissions for {file_path}: {e}")
316
 
@@ -319,19 +343,19 @@ def set_secure_file_permissions(file_path: str):
319
  def strip_html_tags(text: str) -> str:
320
  """Strip HTML tags from text (improved version)"""
321
  import html
322
-
323
  # Decode HTML entities first
324
  text = html.unescape(text)
325
-
326
  # Remove HTML/XML tags
327
- text = re.sub(r'<[^>]+>', '', text)
328
-
329
  # Remove remaining HTML entities
330
- text = re.sub(r'&[a-zA-Z0-9#]+;', '', text)
331
-
332
  # Clean up whitespace
333
- text = re.sub(r'\s+', ' ', text).strip()
334
-
335
  return text
336
 
337
 
@@ -339,20 +363,20 @@ def validate_api_key_format(api_key: str) -> bool:
339
  """Validate OpenAI API key format without logging it"""
340
  if not api_key:
341
  return False
342
-
343
  # Check basic format (starts with sk- and has correct length)
344
- if not api_key.startswith('sk-'):
345
  return False
346
-
347
  if len(api_key) < 20: # Minimum reasonable length
348
  return False
349
-
350
  # Check for obvious fake keys
351
- fake_patterns = ['test', 'fake', 'demo', 'example', 'placeholder']
352
  lower_key = api_key.lower()
353
  if any(pattern in lower_key for pattern in fake_patterns):
354
  return False
355
-
356
  return True
357
 
358
 
@@ -361,13 +385,13 @@ def sanitize_for_logging(text: str, max_length: int = 100) -> str:
361
  """Sanitize text for safe logging"""
362
  if not text:
363
  return "[EMPTY]"
364
-
365
  # Remove potential secrets
366
  validator = get_security_validator()
367
  sanitized = validator.sanitize_output(text)
368
-
369
  # Truncate for logging
370
  if len(sanitized) > max_length:
371
  sanitized = sanitized[:max_length] + "...[TRUNCATED]"
372
-
373
- return sanitized
 
5
  import re
6
  from typing import Dict, Any, Optional, List
7
  from dataclasses import dataclass, field
8
+ from datetime import datetime
9
  from collections import defaultdict
10
  import asyncio
11
 
 
15
  @dataclass
16
  class RateLimitConfig:
17
  """Configuration for rate limiting"""
18
+
19
  requests_per_minute: int = 60
20
  requests_per_hour: int = 1000
21
  burst_limit: int = 10
 
25
  @dataclass
26
  class SecurityConfig:
27
  """Security configuration for agents"""
28
+
29
  enable_input_validation: bool = True
30
  enable_output_filtering: bool = True
31
  enable_rate_limiting: bool = True
32
  max_input_length: int = 10000
33
  max_output_length: int = 50000
34
  blocked_patterns: List[str] = field(default_factory=list)
35
+ allowed_file_extensions: List[str] = field(
36
+ default_factory=lambda: [".txt", ".md", ".json", ".yaml"]
37
+ )
38
+
39
  def __post_init__(self):
40
  if not self.blocked_patterns:
41
  self.blocked_patterns = [
42
+ r"(?i)(api[_\-]?key|secret|password|token|credential)",
43
+ r"(?i)(sk-[a-zA-Z0-9]{48,})", # OpenAI API key pattern
44
+ r"(?i)(access[_\-]?token)",
45
+ r"(?i)(private[_\-]?key)",
46
+ r"(?i)(<script\b[^<]*(?:(?!<\/script>)<[^<]*)*<\/script>)", # Script tags
47
+ r"(?i)(javascript:|data:|vbscript:)", # URL schemes
48
  ]
49
 
50
 
51
  class RateLimiter:
52
  """Rate limiter for API calls and agent executions"""
53
+
54
  def __init__(self, config: RateLimitConfig):
55
  self.config = config
56
  self._requests: Dict[str, List[float]] = defaultdict(list)
57
  self._locks: Dict[str, asyncio.Lock] = defaultdict(asyncio.Lock)
58
+
59
  async def check_rate_limit(self, identifier: str) -> bool:
60
  """Check if request is within rate limits"""
61
  async with self._locks[identifier]:
62
  now = time.time()
63
+
64
  # Clean old requests
65
  self._requests[identifier] = [
66
+ req_time
67
+ for req_time in self._requests[identifier]
68
  if now - req_time < 3600 # Keep last hour
69
  ]
70
+
71
  recent_requests = self._requests[identifier]
72
+
73
  # Check burst limit (last minute)
74
  last_minute = [req for req in recent_requests if now - req < 60]
75
  if len(last_minute) >= self.config.burst_limit:
76
  logger.warning(f"Burst limit exceeded for {identifier}")
77
  return False
78
+
79
  # Check per-minute limit
80
  if len(last_minute) >= self.config.requests_per_minute:
81
  logger.warning(f"Per-minute rate limit exceeded for {identifier}")
82
  return False
83
+
84
  # Check per-hour limit
85
  if len(recent_requests) >= self.config.requests_per_hour:
86
  logger.warning(f"Per-hour rate limit exceeded for {identifier}")
87
  return False
88
+
89
  # Record this request
90
  self._requests[identifier].append(now)
91
  return True
92
+
93
  def get_reset_time(self, identifier: str) -> Optional[datetime]:
94
  """Get when rate limits will reset for identifier"""
95
  if identifier not in self._requests:
96
  return None
97
+
98
  now = time.time()
99
+ recent_requests = [req for req in self._requests[identifier] if now - req < 60]
100
+
 
 
 
101
  if len(recent_requests) >= self.config.requests_per_minute:
102
  oldest_request = min(recent_requests)
103
  return datetime.fromtimestamp(oldest_request + 60)
104
+
105
  return None
106
 
107
 
108
  class SecurityValidator:
109
  """Security validator for agent inputs and outputs"""
110
+
111
  def __init__(self, config: SecurityConfig):
112
  self.config = config
113
+ self._blocked_patterns = [
114
+ re.compile(pattern) for pattern in config.blocked_patterns
115
+ ]
116
+
117
  def validate_input(self, input_text: str, source: str = "unknown") -> bool:
118
  """Validate input for security issues"""
119
  if not self.config.enable_input_validation:
120
  return True
121
+
122
  try:
123
  # Check input length
124
  if len(input_text) > self.config.max_input_length:
125
  logger.warning(f"Input too long from {source}: {len(input_text)} chars")
126
  return False
127
+
128
  # Check for blocked patterns
129
  for pattern in self._blocked_patterns:
130
  if pattern.search(input_text):
131
  logger.warning(f"Blocked pattern detected in input from {source}")
132
  return False
133
+
134
  # Check for suspicious content
135
  if self._contains_suspicious_content(input_text):
136
  logger.warning(f"Suspicious content detected in input from {source}")
137
  return False
138
+
139
  return True
140
+
141
  except Exception as e:
142
  logger.error(f"Error validating input from {source}: {e}")
143
  return False
144
+
145
  def validate_output(self, output_text: str, agent_name: str = "unknown") -> bool:
146
  """Validate output for security issues"""
147
  if not self.config.enable_output_filtering:
148
  return True
149
+
150
  try:
151
  # Check output length
152
  if len(output_text) > self.config.max_output_length:
153
+ logger.warning(
154
+ f"Output too long from {agent_name}: {len(output_text)} chars"
155
+ )
156
  return False
157
+
158
  # Check for leaked sensitive information
159
  for pattern in self._blocked_patterns:
160
  if pattern.search(output_text):
161
+ logger.warning(
162
+ f"Potential data leak detected in output from {agent_name}"
163
+ )
164
  return False
165
+
166
  return True
167
+
168
  except Exception as e:
169
  logger.error(f"Error validating output from {agent_name}: {e}")
170
  return False
171
+
172
  def sanitize_input(self, input_text: str) -> str:
173
  """Sanitize input by removing potentially dangerous content"""
174
  try:
175
  # Remove HTML/XML tags
176
+ sanitized = re.sub(r"<[^>]+>", "", input_text)
177
+
178
  # Remove suspicious URLs
179
+ sanitized = re.sub(
180
+ r"(?i)(javascript:|data:|vbscript:)[^\s]*", "[URL_REMOVED]", sanitized
181
+ )
182
+
183
  # Truncate if too long
184
  if len(sanitized) > self.config.max_input_length:
185
+ sanitized = sanitized[: self.config.max_input_length] + "...[TRUNCATED]"
186
+
187
  return sanitized
188
+
189
  except Exception as e:
190
  logger.error(f"Error sanitizing input: {e}")
191
  return input_text[:1000] # Return truncated original as fallback
192
+
193
  def sanitize_output(self, output_text: str) -> str:
194
  """Sanitize output by removing sensitive information"""
195
  try:
196
  sanitized = output_text
197
+
198
  # Replace potential API keys or secrets
199
  for pattern in self._blocked_patterns:
200
+ sanitized = pattern.sub("[REDACTED]", sanitized)
201
+
202
  # Truncate if too long
203
  if len(sanitized) > self.config.max_output_length:
204
+ sanitized = (
205
+ sanitized[: self.config.max_output_length] + "...[TRUNCATED]"
206
+ )
207
+
208
  return sanitized
209
+
210
  except Exception as e:
211
  logger.error(f"Error sanitizing output: {e}")
212
  return output_text[:5000] # Return truncated original as fallback
213
+
214
  def _contains_suspicious_content(self, text: str) -> bool:
215
  """Check for suspicious content patterns"""
216
  suspicious_patterns = [
217
+ r"(?i)(\beval\s*\()", # eval() calls
218
+ r"(?i)(\bexec\s*\()", # exec() calls
219
+ r"(?i)(__import__)", # Dynamic imports
220
+ r"(?i)(subprocess|os\.system)", # System commands
221
+ r"(?i)(file://|ftp://)", # File/FTP URLs
222
+ r"\b(?:[0-9]{1,3}\.){3}[0-9]{1,3}\b", # IP addresses
223
  ]
224
+
225
  for pattern in suspicious_patterns:
226
  if re.search(pattern, text):
227
  return True
228
+
229
  return False
230
 
231
 
232
  class SecureAgentWrapper:
233
  """Secure wrapper for agent execution with rate limiting and validation"""
234
+
235
+ def __init__(
236
+ self, base_agent, rate_limiter: RateLimiter, validator: SecurityValidator
237
+ ):
238
  self.base_agent = base_agent
239
  self.rate_limiter = rate_limiter
240
  self.validator = validator
241
  self._identifier = self._generate_identifier()
242
+
243
  def _generate_identifier(self) -> str:
244
  """Generate unique identifier for rate limiting"""
245
+ agent_name = getattr(self.base_agent, "config", {}).get("name", "unknown")
246
  # Include agent name and some randomness for fairness
247
+ return hashlib.md5(f"{agent_name}_{id(self.base_agent)}".encode()).hexdigest()[
248
+ :16
249
+ ]
250
+
251
+ async def secure_execute(
252
+ self, user_input: str, context: Dict[str, Any] = None
253
+ ) -> Any:
254
  """Execute agent with security checks and rate limiting"""
255
+
256
  # Rate limiting check
257
  if not await self.rate_limiter.check_rate_limit(self._identifier):
258
  reset_time = self.rate_limiter.get_reset_time(self._identifier)
259
  raise SecurityError(f"Rate limit exceeded. Reset at: {reset_time}")
260
+
261
  # Input validation
262
  if not self.validator.validate_input(user_input, self._identifier):
263
  raise SecurityError("Input validation failed")
264
+
265
  # Sanitize input
266
  sanitized_input = self.validator.sanitize_input(user_input)
267
+
268
  try:
269
  # Execute the base agent
270
  result = await self.base_agent.execute(sanitized_input, context)
271
+
272
  # Validate output
273
  if isinstance(result, str):
274
  if not self.validator.validate_output(result, self._identifier):
275
  raise SecurityError("Output validation failed")
276
+
277
  # Sanitize output
278
  result = self.validator.sanitize_output(result)
279
+
280
  return result
281
+
282
  except Exception as e:
283
  logger.error(f"Secure execution failed for {self._identifier}: {e}")
284
  raise
 
286
 
287
  class SecurityError(Exception):
288
  """Custom exception for security-related errors"""
289
+
290
  pass
291
 
292
 
 
303
  return _global_rate_limiter
304
 
305
 
306
+ def get_security_validator(
307
+ config: Optional[SecurityConfig] = None,
308
+ ) -> SecurityValidator:
309
  """Get global security validator instance"""
310
  global _global_validator
311
  if _global_validator is None:
 
313
  return _global_validator
314
 
315
 
316
+ def create_secure_agent(
317
+ base_agent,
318
+ rate_config: Optional[RateLimitConfig] = None,
319
+ security_config: Optional[SecurityConfig] = None,
320
+ ) -> SecureAgentWrapper:
321
  """Create a secure wrapper for an agent"""
322
  rate_limiter = get_rate_limiter(rate_config)
323
  validator = get_security_validator(security_config)
 
330
  try:
331
  import os
332
  import stat
333
+
334
  # Set read/write for owner only (0o600)
335
  os.chmod(file_path, stat.S_IRUSR | stat.S_IWUSR)
336
  logger.info(f"Set secure permissions for {file_path}")
337
+
338
  except Exception as e:
339
  logger.warning(f"Could not set secure permissions for {file_path}: {e}")
340
 
 
343
  def strip_html_tags(text: str) -> str:
344
  """Strip HTML tags from text (improved version)"""
345
  import html
346
+
347
  # Decode HTML entities first
348
  text = html.unescape(text)
349
+
350
  # Remove HTML/XML tags
351
+ text = re.sub(r"<[^>]+>", "", text)
352
+
353
  # Remove remaining HTML entities
354
+ text = re.sub(r"&[a-zA-Z0-9#]+;", "", text)
355
+
356
  # Clean up whitespace
357
+ text = re.sub(r"\s+", " ", text).strip()
358
+
359
  return text
360
 
361
 
 
363
  """Validate OpenAI API key format without logging it"""
364
  if not api_key:
365
  return False
366
+
367
  # Check basic format (starts with sk- and has correct length)
368
+ if not api_key.startswith("sk-"):
369
  return False
370
+
371
  if len(api_key) < 20: # Minimum reasonable length
372
  return False
373
+
374
  # Check for obvious fake keys
375
+ fake_patterns = ["test", "fake", "demo", "example", "placeholder"]
376
  lower_key = api_key.lower()
377
  if any(pattern in lower_key for pattern in fake_patterns):
378
  return False
379
+
380
  return True
381
 
382
 
 
385
  """Sanitize text for safe logging"""
386
  if not text:
387
  return "[EMPTY]"
388
+
389
  # Remove potential secrets
390
  validator = get_security_validator()
391
  sanitized = validator.sanitize_output(text)
392
+
393
  # Truncate for logging
394
  if len(sanitized) > max_length:
395
  sanitized = sanitized[:max_length] + "...[TRUNCATED]"
396
+
397
+ return sanitized
ankigen_core/agents/templates/enhancers.j2 ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {# Enhancement agent configuration template #}
2
+ {
3
+ "revision_agent": {
4
+ "name": "revision_agent",
5
+ "instructions": "You are a content revision specialist.\nYour role is to improve flashcards based on feedback from quality judges.\n\nFor each revision request:\n- Analyze specific feedback provided\n- Make targeted improvements to address issues\n- Maintain the card's educational intent\n- Preserve correct information while fixing problems\n- Improve clarity, accuracy, and pedagogical value\n\nFocus on iterative improvement rather than complete rewrites.",
6
+ "model": "{{ revision_agent_model }}",
7
+ "temperature": 0.6,
8
+ "timeout": 40.0
9
+ },
10
+ "enhancement_agent": {
11
+ "name": "enhancement_agent",
12
+ "instructions": "You are a content enhancement specialist.\nYour role is to add missing elements and enrich flashcard content.\n\nEnhancement tasks:\n- Add missing explanations or examples\n- Improve metadata and tagging\n- Generate additional context or background\n- Create connections to related concepts\n- Enhance visual or structural elements\n\nEnsure enhancements add value without overwhelming the learner.",
13
+ "model": "{{ enhancement_agent_model }}",
14
+ "temperature": 0.7,
15
+ "timeout": 35.0
16
+ }
17
+ }
ankigen_core/agents/templates/generators.j2 ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {# Generator agent configuration template #}
2
+ {
3
+ "subject_expert": {
4
+ "name": "subject_expert",
5
+ "instructions": "You are a world-class expert in {{ subject | default('the subject area') }} with deep pedagogical knowledge. \nYour role is to generate high-quality flashcards that demonstrate mastery of {{ subject | default('the subject') }} concepts.\n\nKey responsibilities:\n- Ensure technical accuracy and depth appropriate for the target level\n- Use domain-specific terminology correctly\n- Include practical applications and real-world examples\n- Connect concepts to prerequisite knowledge\n- Avoid oversimplification while maintaining clarity\n\nGenerate cards that test understanding, not just memorization.",
6
+ "model": "{{ subject_expert_model }}",
7
+ "temperature": 0.7,
8
+ "timeout": 45.0,
9
+ "custom_prompts": {
10
+ "math": "Focus on problem-solving strategies and mathematical reasoning",
11
+ "science": "Emphasize experimental design and scientific method",
12
+ "history": "Connect events to broader historical patterns and causation",
13
+ "programming": "Include executable examples and best practices"
14
+ }
15
+ },
16
+ "pedagogical": {
17
+ "name": "pedagogical",
18
+ "instructions": "You are an educational specialist focused on learning theory and instructional design.\nYour role is to ensure all flashcards follow educational best practices.\n\nApply these frameworks:\n- Bloom's Taxonomy: Ensure questions target appropriate cognitive levels\n- Spaced Repetition: Design cards for optimal retention\n- Cognitive Load Theory: Avoid overwhelming learners\n- Active Learning: Encourage engagement and application\n\nReview cards for:\n- Clear learning objectives\n- Appropriate difficulty progression\n- Effective use of examples and analogies\n- Prerequisite knowledge alignment",
19
+ "model": "{{ pedagogical_agent_model }}",
20
+ "temperature": 0.6,
21
+ "timeout": 30.0
22
+ },
23
+ "content_structuring": {
24
+ "name": "content_structuring",
25
+ "instructions": "You are a content organization specialist focused on consistency and structure.\nYour role is to format and organize flashcard content for optimal learning.\n\nEnsure all cards have:\n- Consistent formatting and style\n- Proper metadata and tagging\n- Clear, unambiguous questions\n- Complete, well-structured answers\n- Appropriate examples and explanations\n- Relevant categorization and difficulty levels\n\nMaintain high standards for readability and accessibility.",
26
+ "model": "{{ content_structuring_model }}",
27
+ "temperature": 0.5,
28
+ "timeout": 25.0
29
+ },
30
+ "generation_coordinator": {
31
+ "name": "generation_coordinator",
32
+ "instructions": "You are the generation workflow coordinator. \nYour role is to orchestrate the card generation process and manage handoffs between specialized agents.\n\nResponsibilities:\n- Route requests to appropriate specialist agents\n- Coordinate parallel generation tasks\n- Manage workflow state and progress\n- Handle errors and fallback strategies\n- Optimize generation pipelines\n\nMake decisions based on content type, user preferences, and system load.",
33
+ "model": "{{ generation_coordinator_model }}",
34
+ "temperature": 0.3,
35
+ "timeout": 20.0
36
+ }
37
+ }
ankigen_core/agents/templates/judges.j2 ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {# Judge agent configuration template #}
2
+ {
3
+ "content_accuracy_judge": {
4
+ "name": "content_accuracy_judge",
5
+ "instructions": "You are a fact-checking and accuracy specialist.\nYour role is to verify the correctness and accuracy of flashcard content.\n\nEvaluate cards for:\n- Factual accuracy and up-to-date information\n- Proper use of terminology and definitions\n- Absence of misconceptions or errors\n- Appropriate level of detail for the target audience\n- Consistency with authoritative sources\n\nRate each card's accuracy and provide specific feedback on any issues found.",
6
+ "model": "{{ content_accuracy_judge_model }}",
7
+ "temperature": 0.3,
8
+ "timeout": 25.0
9
+ },
10
+ "pedagogical_judge": {
11
+ "name": "pedagogical_judge",
12
+ "instructions": "You are an educational assessment specialist.\nYour role is to evaluate flashcards for pedagogical effectiveness.\n\nAssess cards for:\n- Alignment with learning objectives\n- Appropriate difficulty level and cognitive load\n- Effective use of educational principles\n- Clear prerequisite knowledge requirements\n- Potential for promoting deep learning\n\nProvide detailed feedback on educational effectiveness and improvement suggestions.",
13
+ "model": "{{ pedagogical_judge_model }}",
14
+ "temperature": 0.4,
15
+ "timeout": 30.0
16
+ },
17
+ "clarity_judge": {
18
+ "name": "clarity_judge",
19
+ "instructions": "You are a communication and clarity specialist.\nYour role is to ensure flashcards are clear, unambiguous, and well-written.\n\nEvaluate cards for:\n- Question clarity and specificity\n- Answer completeness and coherence\n- Absence of ambiguity or confusion\n- Appropriate language level for target audience\n- Effective use of examples and explanations\n\nRate clarity and provide specific suggestions for improvement.",
20
+ "model": "{{ clarity_judge_model }}",
21
+ "temperature": 0.3,
22
+ "timeout": 20.0
23
+ },
24
+ "technical_judge": {
25
+ "name": "technical_judge",
26
+ "instructions": "You are a technical accuracy specialist for programming and technical content.\nYour role is to verify technical correctness and best practices.\n\nFor technical cards, check:\n- Code syntax and functionality\n- Best practices and conventions\n- Security considerations\n- Performance implications\n- Tool and framework accuracy\n\nProvide detailed technical feedback and corrections.",
27
+ "model": "{{ technical_judge_model }}",
28
+ "temperature": 0.2,
29
+ "timeout": 35.0
30
+ },
31
+ "completeness_judge": {
32
+ "name": "completeness_judge",
33
+ "instructions": "You are a completeness and quality assurance specialist.\nYour role is to ensure flashcards meet all requirements and quality standards.\n\nVerify cards have:\n- All required fields and metadata\n- Proper formatting and structure\n- Appropriate tags and categorization\n- Complete explanations and examples\n- Consistent quality across the set\n\nRate completeness and identify missing elements.",
34
+ "model": "{{ completeness_judge_model }}",
35
+ "temperature": 0.3,
36
+ "timeout": 20.0
37
+ },
38
+ "judge_coordinator": {
39
+ "name": "judge_coordinator",
40
+ "instructions": "You are the quality assurance coordinator.\nYour role is to orchestrate the judging process and synthesize feedback from specialist judges.\n\nResponsibilities:\n- Route cards to appropriate specialist judges\n- Coordinate parallel judging tasks\n- Synthesize feedback from multiple judges\n- Make final accept/reject/revise decisions\n- Manage judge workload and performance\n\nBalance speed with thoroughness in quality assessment.",
41
+ "model": "{{ judge_coordinator_model }}",
42
+ "temperature": 0.3,
43
+ "timeout": 15.0
44
+ }
45
+ }
ankigen_core/agents/templates/prompts.j2 ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {# Prompt template configurations #}
2
+ {
3
+ "subject_generation": {
4
+ "system_prompt": "You are an expert in {{ subject | default('general studies') }}. Generate {{ num_cards | default('5') }} flashcards covering key concepts for {{ difficulty | default('intermediate') }} level learners.",
5
+ "user_prompt_template": "Topic: {{ topic }}\nDifficulty: {{ difficulty | default('intermediate') }}\nPrerequisites: {{ prerequisites | default('none') }}\nFocus Areas: {{ focus_areas | default('core concepts') }}\n\nGenerate {{ num_cards | default('5') }} high-quality flashcards that help learners master this topic. Include examples, explanations, and common misconceptions where appropriate.",
6
+ "variables": {
7
+ "subject": "{{ subject | default('general') }}",
8
+ "num_cards": "{{ num_cards | default('5') }}",
9
+ "difficulty": "{{ difficulty | default('intermediate') }}",
10
+ "prerequisites": "{{ prerequisites | default('none') }}",
11
+ "topic": "{{ topic | default('general concepts') }}",
12
+ "focus_areas": "{{ focus_areas | default('core concepts') }}"
13
+ }
14
+ },
15
+ "quality_assessment": {
16
+ "system_prompt": "You are a quality assessment specialist for educational content. Evaluate flashcards for {{ assessment_type | default('overall quality') }}.",
17
+ "user_prompt_template": "Please evaluate the following flashcard for {{ assessment_type | default('quality') }}:\n\nQuestion: {{ question }}\nAnswer: {{ answer }}\n{% if explanation %}Explanation: {{ explanation }}{% endif %}\n\nProvide a rating (1-10) and specific feedback for improvement.",
18
+ "variables": {
19
+ "assessment_type": "{{ assessment_type | default('overall quality') }}",
20
+ "question": "{{ question }}",
21
+ "answer": "{{ answer }}",
22
+ "explanation": "{{ explanation | default('') }}"
23
+ }
24
+ }
25
+ }
ankigen_core/agents/token_tracker.py ADDED
@@ -0,0 +1,210 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Token usage tracking for OpenAI API calls using tiktoken.
3
+ Provides accurate token counting and cost estimation.
4
+ """
5
+
6
+ import tiktoken
7
+ from typing import Dict, List, Any, Optional
8
+ from dataclasses import dataclass, field
9
+ from datetime import datetime
10
+
11
+ from ankigen_core.logging import logger
12
+
13
+
14
+ @dataclass
15
+ class TokenUsage:
16
+ """Track token usage for a single request"""
17
+
18
+ prompt_tokens: int
19
+ completion_tokens: int
20
+ total_tokens: int
21
+ estimated_cost: Optional[float]
22
+ model: str
23
+ timestamp: datetime = field(default_factory=datetime.now)
24
+
25
+
26
+ class TokenTracker:
27
+ """Track token usage across multiple requests"""
28
+
29
+ def __init__(self):
30
+ self.usage_history: List[TokenUsage] = []
31
+ self.total_cost = 0.0
32
+ self.total_tokens = 0
33
+
34
+ def count_tokens_for_messages(
35
+ self, messages: List[Dict[str, str]], model: str
36
+ ) -> int:
37
+ try:
38
+ encoding = tiktoken.encoding_for_model(model)
39
+ except KeyError:
40
+ encoding = tiktoken.get_encoding("o200k_base")
41
+
42
+ tokens_per_message = 3
43
+ tokens_per_name = 1
44
+
45
+ num_tokens = 0
46
+ for message in messages:
47
+ num_tokens += tokens_per_message
48
+ for key, value in message.items():
49
+ num_tokens += len(encoding.encode(str(value)))
50
+ if key == "name":
51
+ num_tokens += tokens_per_name
52
+
53
+ num_tokens += 3
54
+ return num_tokens
55
+
56
+ def count_tokens_for_text(self, text: str, model: str) -> int:
57
+ try:
58
+ encoding = tiktoken.encoding_for_model(model)
59
+ except KeyError:
60
+ encoding = tiktoken.get_encoding("o200k_base")
61
+
62
+ return len(encoding.encode(text))
63
+
64
+ def estimate_cost(
65
+ self, prompt_tokens: int, completion_tokens: int, model: str
66
+ ) -> Optional[float]:
67
+ return None
68
+
69
+ def track_usage_from_response(
70
+ self, response_data, model: str
71
+ ) -> Optional[TokenUsage]:
72
+ try:
73
+ if hasattr(response_data, "usage"):
74
+ usage = response_data.usage
75
+ prompt_tokens = usage.prompt_tokens
76
+ completion_tokens = usage.completion_tokens
77
+
78
+ actual_cost = None
79
+ if hasattr(usage, "total_cost"):
80
+ actual_cost = usage.total_cost
81
+ elif hasattr(usage, "cost"):
82
+ actual_cost = usage.cost
83
+
84
+ return self.track_usage(
85
+ prompt_tokens, completion_tokens, model, actual_cost
86
+ )
87
+ return None
88
+ except Exception as e:
89
+ logger.error(f"Failed to track usage from response: {e}")
90
+ return None
91
+
92
+ def track_usage(
93
+ self,
94
+ prompt_tokens: int,
95
+ completion_tokens: int,
96
+ model: str,
97
+ actual_cost: Optional[float] = None,
98
+ ) -> TokenUsage:
99
+ total_tokens = prompt_tokens + completion_tokens
100
+
101
+ if actual_cost is not None:
102
+ final_cost = actual_cost
103
+ else:
104
+ final_cost = self.estimate_cost(prompt_tokens, completion_tokens, model)
105
+
106
+ usage = TokenUsage(
107
+ prompt_tokens=prompt_tokens,
108
+ completion_tokens=completion_tokens,
109
+ total_tokens=total_tokens,
110
+ estimated_cost=final_cost,
111
+ model=model,
112
+ )
113
+
114
+ self.usage_history.append(usage)
115
+ if final_cost:
116
+ self.total_cost += final_cost
117
+ self.total_tokens += total_tokens
118
+
119
+ logger.info(
120
+ f"💰 Token usage - Model: {model}, Prompt: {prompt_tokens}, Completion: {completion_tokens}, Cost: ${final_cost:.4f}"
121
+ if final_cost
122
+ else f"💰 Token usage - Model: {model}, Prompt: {prompt_tokens}, Completion: {completion_tokens}"
123
+ )
124
+
125
+ return usage
126
+
127
+ def get_session_summary(self) -> Dict[str, Any]:
128
+ if not self.usage_history:
129
+ return {
130
+ "total_requests": 0,
131
+ "total_tokens": 0,
132
+ "total_cost": 0.0,
133
+ "by_model": {},
134
+ }
135
+
136
+ by_model = {}
137
+ for usage in self.usage_history:
138
+ if usage.model not in by_model:
139
+ by_model[usage.model] = {"requests": 0, "tokens": 0, "cost": 0.0}
140
+ by_model[usage.model]["requests"] += 1
141
+ by_model[usage.model]["tokens"] += usage.total_tokens
142
+ if usage.estimated_cost:
143
+ by_model[usage.model]["cost"] += usage.estimated_cost
144
+
145
+ return {
146
+ "total_requests": len(self.usage_history),
147
+ "total_tokens": self.total_tokens,
148
+ "total_cost": self.total_cost,
149
+ "by_model": by_model,
150
+ }
151
+
152
+ def get_session_usage(self) -> Dict[str, Any]:
153
+ return self.get_session_summary()
154
+
155
+ def reset_session(self):
156
+ self.usage_history.clear()
157
+ self.total_cost = 0.0
158
+ self.total_tokens = 0
159
+ logger.info("🔄 Token usage tracking reset")
160
+
161
+ def track_usage_from_agents_sdk(
162
+ self, usage_dict: Dict[str, Any], model: str
163
+ ) -> Optional[TokenUsage]:
164
+ """Track usage from OpenAI Agents SDK usage format"""
165
+ try:
166
+ if not usage_dict or usage_dict.get("total_tokens", 0) == 0:
167
+ return None
168
+
169
+ prompt_tokens = usage_dict.get("input_tokens", 0)
170
+ completion_tokens = usage_dict.get("output_tokens", 0)
171
+
172
+ return self.track_usage(prompt_tokens, completion_tokens, model)
173
+ except Exception as e:
174
+ logger.error(f"Failed to track usage from agents SDK: {e}")
175
+ return None
176
+
177
+
178
+ # Global token tracker instance
179
+ _global_tracker = TokenTracker()
180
+
181
+
182
+ def get_token_tracker() -> TokenTracker:
183
+ return _global_tracker
184
+
185
+
186
+ def track_agent_usage(
187
+ prompt_text: str,
188
+ completion_text: str,
189
+ model: str,
190
+ actual_cost: Optional[float] = None,
191
+ ) -> TokenUsage:
192
+ tracker = get_token_tracker()
193
+
194
+ prompt_tokens = tracker.count_tokens_for_text(prompt_text, model)
195
+ completion_tokens = tracker.count_tokens_for_text(completion_text, model)
196
+
197
+ return tracker.track_usage(prompt_tokens, completion_tokens, model, actual_cost)
198
+
199
+
200
+ def track_usage_from_openai_response(response_data, model: str) -> Optional[TokenUsage]:
201
+ tracker = get_token_tracker()
202
+ return tracker.track_usage_from_response(response_data, model)
203
+
204
+
205
+ def track_usage_from_agents_sdk(
206
+ usage_dict: Dict[str, Any], model: str
207
+ ) -> Optional[TokenUsage]:
208
+ """Track usage from OpenAI Agents SDK usage format"""
209
+ tracker = get_token_tracker()
210
+ return tracker.track_usage_from_agents_sdk(usage_dict, model)
ankigen_core/card_generator.py CHANGED
@@ -20,30 +20,29 @@ from ankigen_core.models import (
20
  CardBack,
21
  ) # Import necessary Pydantic models
22
 
 
 
 
 
23
  logger = get_logger()
24
 
25
- # Import agent system
26
- try:
27
- from ankigen_core.agents.integration import AgentOrchestrator
28
- from ankigen_core.agents.feature_flags import get_feature_flags
29
- AGENTS_AVAILABLE = True
30
- logger.info("Agent system loaded successfully")
31
- except ImportError:
32
- # Graceful fallback if agent system not available
33
- AGENTS_AVAILABLE = False
34
- logger.info("Agent system not available, using legacy generation only")
35
 
36
  # --- Constants --- (Moved from app.py)
37
  AVAILABLE_MODELS = [
38
  {
39
  "value": "gpt-4.1",
40
- "label": "gpt-4.1 (Best Quality)",
41
- "description": "Highest quality, slower generation",
42
  },
43
  {
44
  "value": "gpt-4.1-nano",
45
- "label": "gpt-4.1 Nano (Fast & Efficient)",
46
- "description": "Optimized for speed and lower cost",
47
  },
48
  ]
49
 
@@ -256,65 +255,118 @@ async def orchestrate_card_generation( # MODIFIED: Added async
256
 
257
  # --- AGENT SYSTEM INTEGRATION ---
258
  if AGENTS_AVAILABLE:
259
- feature_flags = get_feature_flags()
260
- if feature_flags.should_use_agents():
261
- logger.info("🤖 Using agent system for card generation")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
262
  try:
263
- # Initialize agent orchestrator
264
- orchestrator = AgentOrchestrator(client_manager)
265
- await orchestrator.initialize(api_key_input)
266
-
267
- # Map generation mode to subject
268
- agent_subject = "general"
269
- if generation_mode == "subject":
270
- agent_subject = subject if subject else "general"
271
- elif generation_mode == "path":
272
- agent_subject = "curriculum_design"
273
- elif generation_mode == "text":
274
- agent_subject = "content_analysis"
275
-
276
- # Calculate total cards needed
277
- total_cards_needed = topic_number * cards_per_topic
278
-
279
- # Prepare context for text mode
280
- context = {}
281
- if generation_mode == "text" and source_text:
282
- context["source_text"] = source_text
283
-
284
- # Generate cards with agents
285
- agent_cards, agent_metadata = await orchestrator.generate_cards_with_agents(
286
- topic=subject if subject else "Mixed Topics",
287
- subject=agent_subject,
288
- num_cards=total_cards_needed,
289
- difficulty="intermediate", # Could be made configurable
290
- enable_quality_pipeline=True,
291
- context=context
292
- )
293
-
294
- # Convert agent cards to dataframe format
295
- if agent_cards:
296
- formatted_cards = format_cards_for_dataframe(
297
- agent_cards,
298
- topic_name=f"Agent Generated - {subject}" if subject else "Agent Generated",
299
- start_index=1
300
- )
301
-
302
- output_df = pd.DataFrame(formatted_cards, columns=get_dataframe_columns())
303
- total_cards_message = f"<div><b>🤖 Agent Generated Cards:</b> <span id='total-cards-count'>{len(output_df)}</span></div>"
304
-
305
- logger.info(f"Agent system generated {len(output_df)} cards successfully")
306
- return output_df, total_cards_message
307
  else:
308
- logger.warning("Agent system returned no cards, falling back to legacy")
309
- gr.Info("🔄 Agent system returned no cards, using legacy generation...")
310
-
311
  except Exception as e:
312
- logger.error(f"Agent system failed: {e}, falling back to legacy generation")
313
- gr.Warning(f"🔄 Agent system error: {str(e)}, using legacy generation...")
314
- # Continue to legacy generation below
315
 
316
- # --- LEGACY SYSTEM INITIALIZATION AND VALIDATION ---
317
- logger.info("Using legacy card generation system")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
318
  if not api_key_input:
319
  logger.warning("No API key provided to orchestrator")
320
  gr.Error("OpenAI API key is required")
@@ -379,7 +431,7 @@ async def orchestrate_card_generation( # MODIFIED: Added async
379
  )
380
 
381
  topics_for_generation = []
382
- max(1, topic_number // len(individual_subjects)) # Distribute topic_number
383
 
384
  for ind_subject in individual_subjects:
385
  # For single/multiple subjects, we might generate sub-topics or just use the subject as a topic
@@ -1023,3 +1075,12 @@ def generate_cards_from_crawled_content(
1023
  }
1024
  data_for_dataframe.append(card_dict)
1025
  return data_for_dataframe
 
 
 
 
 
 
 
 
 
 
20
  CardBack,
21
  ) # Import necessary Pydantic models
22
 
23
+ # Import agent system - required
24
+ from ankigen_core.agents.integration import AgentOrchestrator
25
+ from agents import set_tracing_disabled
26
+
27
  logger = get_logger()
28
 
29
+ # Disable tracing to prevent metrics persistence issues
30
+ set_tracing_disabled(True)
31
+
32
+ AGENTS_AVAILABLE = True
33
+ logger.info("Agent system loaded successfully")
 
 
 
 
 
34
 
35
  # --- Constants --- (Moved from app.py)
36
  AVAILABLE_MODELS = [
37
  {
38
  "value": "gpt-4.1",
39
+ "label": "GPT-4.1 (Best Quality)",
40
+ "description": "Highest quality, large context window",
41
  },
42
  {
43
  "value": "gpt-4.1-nano",
44
+ "label": "GPT-4.1 Nano (Ultra Fast)",
45
+ "description": "Ultra-fast and cost-effective",
46
  },
47
  ]
48
 
 
255
 
256
  # --- AGENT SYSTEM INTEGRATION ---
257
  if AGENTS_AVAILABLE:
258
+ logger.info("🤖 Using agent system for card generation")
259
+ try:
260
+ # Initialize token tracker
261
+ from ankigen_core.agents.token_tracker import get_token_tracker
262
+
263
+ token_tracker = get_token_tracker()
264
+
265
+ # Initialize agent orchestrator with the actual model from UI
266
+ # Initialize orchestrator with model overrides
267
+ orchestrator = AgentOrchestrator(client_manager)
268
+
269
+ # Set model overrides for all agents
270
+ logger.info(f"Overriding all agent models to use: {model_name}")
271
+ model_overrides = {
272
+ "generation_coordinator": model_name,
273
+ "subject_expert": model_name,
274
+ "pedagogical_agent": model_name,
275
+ "content_structuring": model_name,
276
+ "enhancement_agent": model_name,
277
+ "revision_agent": model_name,
278
+ "content_accuracy_judge": model_name,
279
+ "pedagogical_judge": model_name,
280
+ "clarity_judge": model_name,
281
+ "technical_judge": model_name,
282
+ "completeness_judge": model_name,
283
+ "judge_coordinator": model_name,
284
+ }
285
+
286
+ # Initialize with model overrides
287
+ await orchestrator.initialize(api_key_input, model_overrides)
288
+
289
+ # Map generation mode to subject
290
+ agent_subject = "general"
291
+ if generation_mode == "subject":
292
+ agent_subject = subject if subject else "general"
293
+ elif generation_mode == "path":
294
+ agent_subject = "curriculum_design"
295
+ elif generation_mode == "text":
296
+ agent_subject = "content_analysis"
297
+
298
+ # Calculate total cards needed
299
+ total_cards_needed = topic_number * cards_per_topic
300
+
301
+ # Prepare context for text mode
302
+ context = {}
303
+ if generation_mode == "text" and source_text:
304
+ context["source_text"] = source_text
305
+
306
+ # Generate cards with agents using the actual model from UI
307
+ agent_cards, agent_metadata = await orchestrator.generate_cards_with_agents(
308
+ topic=subject if subject else "Mixed Topics",
309
+ subject=agent_subject,
310
+ num_cards=total_cards_needed,
311
+ difficulty="intermediate", # Could be made configurable
312
+ enable_quality_pipeline=True,
313
+ context=context,
314
+ )
315
+
316
+ # Get token usage from session
317
  try:
318
+ # Try both method names for compatibility
319
+ if hasattr(token_tracker, "get_session_summary"):
320
+ token_usage = token_tracker.get_session_summary()
321
+ elif hasattr(token_tracker, "get_session_usage"):
322
+ token_usage = token_tracker.get_session_usage()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
323
  else:
324
+ raise AttributeError("TokenTracker has no session summary method")
325
+
326
+ token_usage_html = f"<div style='margin-top: 8px;'><b>Token Usage:</b> {token_usage['total_tokens']} tokens</div>"
327
  except Exception as e:
328
+ logger.error(f"Token usage collection failed: {e}")
329
+ token_usage_html = "<div style='margin-top: 8px;'><b>Token Usage:</b> No usage data</div>"
 
330
 
331
+ # Convert agent cards to dataframe format
332
+ if agent_cards:
333
+ formatted_cards = format_cards_for_dataframe(
334
+ agent_cards,
335
+ topic_name=f"Agent Generated - {subject}"
336
+ if subject
337
+ else "Agent Generated",
338
+ start_index=1,
339
+ )
340
+
341
+ output_df = pd.DataFrame(
342
+ formatted_cards, columns=get_dataframe_columns()
343
+ )
344
+ total_cards_message = f"<div><b>🤖 Agent Generated Cards:</b> <span id='total-cards-count'>{len(output_df)}</span></div>"
345
+
346
+ logger.info(
347
+ f"Agent system generated {len(output_df)} cards successfully"
348
+ )
349
+ return output_df, total_cards_message, token_usage_html
350
+ else:
351
+ logger.error("Agent system returned no cards")
352
+ gr.Error("🤖 Agent system returned no cards")
353
+ return (
354
+ pd.DataFrame(columns=get_dataframe_columns()),
355
+ "Agent system returned no cards.",
356
+ "",
357
+ )
358
+
359
+ except Exception as e:
360
+ logger.error(f"Agent system failed: {e}")
361
+ gr.Error(f"🤖 Agent system error: {str(e)}")
362
+ return (
363
+ pd.DataFrame(columns=get_dataframe_columns()),
364
+ f"Agent system error: {str(e)}",
365
+ "",
366
+ )
367
+
368
+ # This should never be reached since agents are required
369
+ logger.error("Agent system not available but required")
370
  if not api_key_input:
371
  logger.warning("No API key provided to orchestrator")
372
  gr.Error("OpenAI API key is required")
 
431
  )
432
 
433
  topics_for_generation = []
434
+ # max(1, topic_number // len(individual_subjects)) # Distribute topic_number
435
 
436
  for ind_subject in individual_subjects:
437
  # For single/multiple subjects, we might generate sub-topics or just use the subject as a topic
 
1075
  }
1076
  data_for_dataframe.append(card_dict)
1077
  return data_for_dataframe
1078
+
1079
+
1080
+ def generate_token_usage_html(token_usage=None):
1081
+ """Generate HTML for token usage display"""
1082
+ if token_usage and isinstance(token_usage, dict):
1083
+ total_tokens = token_usage.get("total_tokens", 0)
1084
+ return f"<div style='margin-top: 8px;'><b>Token Usage:</b> {total_tokens} tokens</div>"
1085
+ else:
1086
+ return "<div style='margin-top: 8px;'><b>Token Usage:</b> No usage data</div>"
ankigen_core/exporters.py CHANGED
@@ -80,35 +80,35 @@ BASIC_MODEL = genanki.Model(
80
  </div>
81
  </div>
82
  <hr>
83
-
84
  <div class=\"answer-section\">
85
  <h3>Answer</h3>
86
  <div class=\"answer\">{{Answer}}</div>
87
  </div>
88
-
89
  <div class=\"explanation-section\">
90
  <h3>Explanation</h3>
91
  <div class=\"explanation-text\">{{Explanation}}</div>
92
  </div>
93
-
94
  <div class=\"example-section\">
95
  <h3>Example</h3>
96
- <div class=\"example-text\">{{Example}}</div>
97
  <!-- Example field might contain pre/code or plain text -->
98
  <!-- Handled by how HTML is put into the Example field -->
99
  </div>
100
-
101
  <div class=\"metadata-section\">
102
  <div class=\"learning-outcomes\">
103
  <h3>Learning Outcomes</h3>
104
  <div>{{Learning_Outcomes}}</div>
105
  </div>
106
-
107
  <div class=\"misconceptions\">
108
  <h3>Common Misconceptions - Debunked</h3>
109
  <div>{{Common_Misconceptions}}</div>
110
  </div>
111
-
112
  <div class=\"difficulty\">
113
  <h3>Difficulty Level</h3>
114
  <div>{{Difficulty}}</div>
@@ -132,20 +132,20 @@ BASIC_MODEL = genanki.Model(
132
  padding: 20px;
133
  background: #ffffff;
134
  }
135
-
136
  @media (max-width: 768px) {
137
  .card {
138
  font-size: 14px;
139
  padding: 15px;
140
  }
141
  }
142
-
143
  /* Question side */
144
  .question-side {
145
  position: relative;
146
  min-height: 200px;
147
  }
148
-
149
  .difficulty-indicator {
150
  position: absolute;
151
  top: 10px;
@@ -154,30 +154,30 @@ BASIC_MODEL = genanki.Model(
154
  height: 10px;
155
  border-radius: 50%;
156
  }
157
-
158
  .difficulty-indicator.beginner { background: #4ade80; }
159
  .difficulty-indicator.intermediate { background: #fbbf24; }
160
  .difficulty-indicator.advanced { background: #ef4444; }
161
-
162
  .question {
163
  font-size: 1.3em;
164
  font-weight: 600;
165
  color: #2563eb;
166
  margin-bottom: 1.5em;
167
  }
168
-
169
  .prerequisites {
170
  margin-top: 1em;
171
  font-size: 0.9em;
172
  color: #666;
173
  }
174
-
175
  .prerequisites-toggle {
176
  color: #2563eb;
177
  cursor: pointer;
178
  text-decoration: underline;
179
  }
180
-
181
  .prerequisites-content {
182
  display: none;
183
  margin-top: 0.5em;
@@ -185,11 +185,11 @@ BASIC_MODEL = genanki.Model(
185
  background: #f8fafc;
186
  border-radius: 4px;
187
  }
188
-
189
  .prerequisites.show .prerequisites-content {
190
  display: block;
191
  }
192
-
193
  /* Answer side */
194
  .answer-section,
195
  .explanation-section,
@@ -199,17 +199,17 @@ BASIC_MODEL = genanki.Model(
199
  border-radius: 8px;
200
  box-shadow: 0 2px 4px rgba(0,0,0,0.05);
201
  }
202
-
203
  .answer-section {
204
  background: #f0f9ff;
205
  border-left: 4px solid #2563eb;
206
  }
207
-
208
  .explanation-section {
209
  background: #f0fdf4;
210
  border-left: 4px solid #4ade80;
211
  }
212
-
213
  .example-section {
214
  background: #fefce8; /* Light yellow */
215
  border-left: 4px solid #facc15; /* Yellow */
@@ -228,7 +228,7 @@ BASIC_MODEL = genanki.Model(
228
  .example-section code {
229
  font-family: 'Consolas', 'Monaco', 'Menlo', monospace;
230
  }
231
-
232
  .metadata-section {
233
  margin-top: 2em;
234
  padding-top: 1em;
@@ -236,13 +236,13 @@ BASIC_MODEL = genanki.Model(
236
  font-size: 0.9em;
237
  color: #4b5563; /* Cool gray */
238
  }
239
-
240
  .metadata-section h3 {
241
  font-size: 1em;
242
  color: #1f2937; /* Darker gray for headings */
243
  margin-bottom: 0.5em;
244
  }
245
-
246
  .metadata-section > div {
247
  margin-bottom: 0.8em;
248
  }
@@ -254,7 +254,7 @@ BASIC_MODEL = genanki.Model(
254
  .source-url a:hover {
255
  text-decoration: underline;
256
  }
257
-
258
  /* Styles for cloze deletion cards */
259
  .cloze {
260
  font-weight: bold;
@@ -263,14 +263,14 @@ BASIC_MODEL = genanki.Model(
263
  .nightMode .cloze {
264
  color: lightblue;
265
  }
266
-
267
  /* General utility */
268
  hr {
269
  border: none;
270
  border-top: 1px dashed #cbd5e1; /* Light dashed line */
271
  margin: 1.5em 0;
272
  }
273
-
274
  /* Rich text field styling (if Anki adds classes for these) */
275
  .field ul, .field ol {
276
  margin-left: 1.5em;
@@ -289,13 +289,13 @@ BASIC_MODEL = genanki.Model(
289
  margin: 1em 0;
290
  }
291
  }
292
-
293
  /* Animations */
294
  @keyframes fadeIn {
295
  from { opacity: 0; }
296
  to { opacity: 1; }
297
  }
298
-
299
  .card {
300
  animation: fadeIn 0.3s ease-in-out;
301
  }
@@ -350,35 +350,35 @@ CLOZE_MODEL = genanki.Model(
350
  </div>
351
  </div>
352
  <hr>
353
-
354
  {{#Back Extra}}
355
  <div class=\"back-extra-section\">
356
  <h3>Additional Information</h3>
357
  <div class=\"back-extra-text\">{{Back Extra}}</div>
358
  </div>
359
  {{/Back Extra}}
360
-
361
  <div class=\"explanation-section\">
362
  <h3>Explanation</h3>
363
  <div class=\"explanation-text\">{{Explanation}}</div>
364
  </div>
365
-
366
  <div class=\"example-section\">
367
  <h3>Example</h3>
368
  <div class=\"example-text\">{{Example}}</div>
369
  </div>
370
-
371
  <div class=\"metadata-section\">
372
  <div class=\"learning-outcomes\">
373
  <h3>Learning Outcomes</h3>
374
  <div>{{Learning_Outcomes}}</div>
375
  </div>
376
-
377
  <div class=\"misconceptions\">
378
  <h3>Common Misconceptions - Debunked</h3>
379
  <div>{{Common_Misconceptions}}</div>
380
  </div>
381
-
382
  <div class=\"difficulty\">
383
  <h3>Difficulty Level</h3>
384
  <div>{{Difficulty}}</div>
@@ -402,20 +402,20 @@ CLOZE_MODEL = genanki.Model(
402
  padding: 20px;
403
  background: #ffffff;
404
  }
405
-
406
  @media (max-width: 768px) {
407
  .card {
408
  font-size: 14px;
409
  padding: 15px;
410
  }
411
  }
412
-
413
  /* Question side */
414
  .question-side {
415
  position: relative;
416
  min-height: 200px;
417
  }
418
-
419
  .difficulty-indicator {
420
  position: absolute;
421
  top: 10px;
@@ -424,30 +424,30 @@ CLOZE_MODEL = genanki.Model(
424
  height: 10px;
425
  border-radius: 50%;
426
  }
427
-
428
  .difficulty-indicator.beginner { background: #4ade80; }
429
  .difficulty-indicator.intermediate { background: #fbbf24; }
430
  .difficulty-indicator.advanced { background: #ef4444; }
431
-
432
  .question {
433
  font-size: 1.3em;
434
  font-weight: 600;
435
  color: #2563eb;
436
  margin-bottom: 1.5em;
437
  }
438
-
439
  .prerequisites {
440
  margin-top: 1em;
441
  font-size: 0.9em;
442
  color: #666;
443
  }
444
-
445
  .prerequisites-toggle {
446
  color: #2563eb;
447
  cursor: pointer;
448
  text-decoration: underline;
449
  }
450
-
451
  .prerequisites-content {
452
  display: none;
453
  margin-top: 0.5em;
@@ -455,11 +455,11 @@ CLOZE_MODEL = genanki.Model(
455
  background: #f8fafc;
456
  border-radius: 4px;
457
  }
458
-
459
  .prerequisites.show .prerequisites-content {
460
  display: block;
461
  }
462
-
463
  /* Answer side */
464
  .answer-section,
465
  .explanation-section,
@@ -469,7 +469,7 @@ CLOZE_MODEL = genanki.Model(
469
  border-radius: 8px;
470
  box-shadow: 0 2px 4px rgba(0,0,0,0.05);
471
  }
472
-
473
  .answer-section { /* Shared with question for cloze, but can be general */
474
  background: #f0f9ff;
475
  border-left: 4px solid #2563eb;
@@ -483,7 +483,7 @@ CLOZE_MODEL = genanki.Model(
483
  border-radius: 8px;
484
  box-shadow: 0 2px 4px rgba(0,0,0,0.05);
485
  }
486
-
487
  .explanation-section {
488
  background: #f0fdf4;
489
  border-left: 4px solid #4ade80;
@@ -507,7 +507,7 @@ CLOZE_MODEL = genanki.Model(
507
  .example-section code {
508
  font-family: 'Consolas', 'Monaco', 'Menlo', monospace;
509
  }
510
-
511
  .metadata-section {
512
  margin-top: 2em;
513
  padding-top: 1em;
@@ -515,13 +515,13 @@ CLOZE_MODEL = genanki.Model(
515
  font-size: 0.9em;
516
  color: #4b5563; /* Cool gray */
517
  }
518
-
519
  .metadata-section h3 {
520
  font-size: 1em;
521
  color: #1f2937; /* Darker gray for headings */
522
  margin-bottom: 0.5em;
523
  }
524
-
525
  .metadata-section > div {
526
  margin-bottom: 0.8em;
527
  }
@@ -533,7 +533,7 @@ CLOZE_MODEL = genanki.Model(
533
  .source-url a:hover {
534
  text-decoration: underline;
535
  }
536
-
537
  /* Styles for cloze deletion cards */
538
  .cloze {
539
  font-weight: bold;
@@ -542,14 +542,14 @@ CLOZE_MODEL = genanki.Model(
542
  .nightMode .cloze {
543
  color: lightblue;
544
  }
545
-
546
  /* General utility */
547
  hr {
548
  border: none;
549
  border-top: 1px dashed #cbd5e1; /* Light dashed line */
550
  margin: 1.5em 0;
551
  }
552
-
553
  /* Rich text field styling (if Anki adds classes for these) */
554
  .field ul, .field ol {
555
  margin-left: 1.5em;
 
80
  </div>
81
  </div>
82
  <hr>
83
+
84
  <div class=\"answer-section\">
85
  <h3>Answer</h3>
86
  <div class=\"answer\">{{Answer}}</div>
87
  </div>
88
+
89
  <div class=\"explanation-section\">
90
  <h3>Explanation</h3>
91
  <div class=\"explanation-text\">{{Explanation}}</div>
92
  </div>
93
+
94
  <div class=\"example-section\">
95
  <h3>Example</h3>
96
+ <div class=\"example-text\">{{Example}}</div>
97
  <!-- Example field might contain pre/code or plain text -->
98
  <!-- Handled by how HTML is put into the Example field -->
99
  </div>
100
+
101
  <div class=\"metadata-section\">
102
  <div class=\"learning-outcomes\">
103
  <h3>Learning Outcomes</h3>
104
  <div>{{Learning_Outcomes}}</div>
105
  </div>
106
+
107
  <div class=\"misconceptions\">
108
  <h3>Common Misconceptions - Debunked</h3>
109
  <div>{{Common_Misconceptions}}</div>
110
  </div>
111
+
112
  <div class=\"difficulty\">
113
  <h3>Difficulty Level</h3>
114
  <div>{{Difficulty}}</div>
 
132
  padding: 20px;
133
  background: #ffffff;
134
  }
135
+
136
  @media (max-width: 768px) {
137
  .card {
138
  font-size: 14px;
139
  padding: 15px;
140
  }
141
  }
142
+
143
  /* Question side */
144
  .question-side {
145
  position: relative;
146
  min-height: 200px;
147
  }
148
+
149
  .difficulty-indicator {
150
  position: absolute;
151
  top: 10px;
 
154
  height: 10px;
155
  border-radius: 50%;
156
  }
157
+
158
  .difficulty-indicator.beginner { background: #4ade80; }
159
  .difficulty-indicator.intermediate { background: #fbbf24; }
160
  .difficulty-indicator.advanced { background: #ef4444; }
161
+
162
  .question {
163
  font-size: 1.3em;
164
  font-weight: 600;
165
  color: #2563eb;
166
  margin-bottom: 1.5em;
167
  }
168
+
169
  .prerequisites {
170
  margin-top: 1em;
171
  font-size: 0.9em;
172
  color: #666;
173
  }
174
+
175
  .prerequisites-toggle {
176
  color: #2563eb;
177
  cursor: pointer;
178
  text-decoration: underline;
179
  }
180
+
181
  .prerequisites-content {
182
  display: none;
183
  margin-top: 0.5em;
 
185
  background: #f8fafc;
186
  border-radius: 4px;
187
  }
188
+
189
  .prerequisites.show .prerequisites-content {
190
  display: block;
191
  }
192
+
193
  /* Answer side */
194
  .answer-section,
195
  .explanation-section,
 
199
  border-radius: 8px;
200
  box-shadow: 0 2px 4px rgba(0,0,0,0.05);
201
  }
202
+
203
  .answer-section {
204
  background: #f0f9ff;
205
  border-left: 4px solid #2563eb;
206
  }
207
+
208
  .explanation-section {
209
  background: #f0fdf4;
210
  border-left: 4px solid #4ade80;
211
  }
212
+
213
  .example-section {
214
  background: #fefce8; /* Light yellow */
215
  border-left: 4px solid #facc15; /* Yellow */
 
228
  .example-section code {
229
  font-family: 'Consolas', 'Monaco', 'Menlo', monospace;
230
  }
231
+
232
  .metadata-section {
233
  margin-top: 2em;
234
  padding-top: 1em;
 
236
  font-size: 0.9em;
237
  color: #4b5563; /* Cool gray */
238
  }
239
+
240
  .metadata-section h3 {
241
  font-size: 1em;
242
  color: #1f2937; /* Darker gray for headings */
243
  margin-bottom: 0.5em;
244
  }
245
+
246
  .metadata-section > div {
247
  margin-bottom: 0.8em;
248
  }
 
254
  .source-url a:hover {
255
  text-decoration: underline;
256
  }
257
+
258
  /* Styles for cloze deletion cards */
259
  .cloze {
260
  font-weight: bold;
 
263
  .nightMode .cloze {
264
  color: lightblue;
265
  }
266
+
267
  /* General utility */
268
  hr {
269
  border: none;
270
  border-top: 1px dashed #cbd5e1; /* Light dashed line */
271
  margin: 1.5em 0;
272
  }
273
+
274
  /* Rich text field styling (if Anki adds classes for these) */
275
  .field ul, .field ol {
276
  margin-left: 1.5em;
 
289
  margin: 1em 0;
290
  }
291
  }
292
+
293
  /* Animations */
294
  @keyframes fadeIn {
295
  from { opacity: 0; }
296
  to { opacity: 1; }
297
  }
298
+
299
  .card {
300
  animation: fadeIn 0.3s ease-in-out;
301
  }
 
350
  </div>
351
  </div>
352
  <hr>
353
+
354
  {{#Back Extra}}
355
  <div class=\"back-extra-section\">
356
  <h3>Additional Information</h3>
357
  <div class=\"back-extra-text\">{{Back Extra}}</div>
358
  </div>
359
  {{/Back Extra}}
360
+
361
  <div class=\"explanation-section\">
362
  <h3>Explanation</h3>
363
  <div class=\"explanation-text\">{{Explanation}}</div>
364
  </div>
365
+
366
  <div class=\"example-section\">
367
  <h3>Example</h3>
368
  <div class=\"example-text\">{{Example}}</div>
369
  </div>
370
+
371
  <div class=\"metadata-section\">
372
  <div class=\"learning-outcomes\">
373
  <h3>Learning Outcomes</h3>
374
  <div>{{Learning_Outcomes}}</div>
375
  </div>
376
+
377
  <div class=\"misconceptions\">
378
  <h3>Common Misconceptions - Debunked</h3>
379
  <div>{{Common_Misconceptions}}</div>
380
  </div>
381
+
382
  <div class=\"difficulty\">
383
  <h3>Difficulty Level</h3>
384
  <div>{{Difficulty}}</div>
 
402
  padding: 20px;
403
  background: #ffffff;
404
  }
405
+
406
  @media (max-width: 768px) {
407
  .card {
408
  font-size: 14px;
409
  padding: 15px;
410
  }
411
  }
412
+
413
  /* Question side */
414
  .question-side {
415
  position: relative;
416
  min-height: 200px;
417
  }
418
+
419
  .difficulty-indicator {
420
  position: absolute;
421
  top: 10px;
 
424
  height: 10px;
425
  border-radius: 50%;
426
  }
427
+
428
  .difficulty-indicator.beginner { background: #4ade80; }
429
  .difficulty-indicator.intermediate { background: #fbbf24; }
430
  .difficulty-indicator.advanced { background: #ef4444; }
431
+
432
  .question {
433
  font-size: 1.3em;
434
  font-weight: 600;
435
  color: #2563eb;
436
  margin-bottom: 1.5em;
437
  }
438
+
439
  .prerequisites {
440
  margin-top: 1em;
441
  font-size: 0.9em;
442
  color: #666;
443
  }
444
+
445
  .prerequisites-toggle {
446
  color: #2563eb;
447
  cursor: pointer;
448
  text-decoration: underline;
449
  }
450
+
451
  .prerequisites-content {
452
  display: none;
453
  margin-top: 0.5em;
 
455
  background: #f8fafc;
456
  border-radius: 4px;
457
  }
458
+
459
  .prerequisites.show .prerequisites-content {
460
  display: block;
461
  }
462
+
463
  /* Answer side */
464
  .answer-section,
465
  .explanation-section,
 
469
  border-radius: 8px;
470
  box-shadow: 0 2px 4px rgba(0,0,0,0.05);
471
  }
472
+
473
  .answer-section { /* Shared with question for cloze, but can be general */
474
  background: #f0f9ff;
475
  border-left: 4px solid #2563eb;
 
483
  border-radius: 8px;
484
  box-shadow: 0 2px 4px rgba(0,0,0,0.05);
485
  }
486
+
487
  .explanation-section {
488
  background: #f0fdf4;
489
  border-left: 4px solid #4ade80;
 
507
  .example-section code {
508
  font-family: 'Consolas', 'Monaco', 'Menlo', monospace;
509
  }
510
+
511
  .metadata-section {
512
  margin-top: 2em;
513
  padding-top: 1em;
 
515
  font-size: 0.9em;
516
  color: #4b5563; /* Cool gray */
517
  }
518
+
519
  .metadata-section h3 {
520
  font-size: 1em;
521
  color: #1f2937; /* Darker gray for headings */
522
  margin-bottom: 0.5em;
523
  }
524
+
525
  .metadata-section > div {
526
  margin-bottom: 0.8em;
527
  }
 
533
  .source-url a:hover {
534
  text-decoration: underline;
535
  }
536
+
537
  /* Styles for cloze deletion cards */
538
  .cloze {
539
  font-weight: bold;
 
542
  .nightMode .cloze {
543
  color: lightblue;
544
  }
545
+
546
  /* General utility */
547
  hr {
548
  border: none;
549
  border-top: 1px dashed #cbd5e1; /* Light dashed line */
550
  margin: 1.5em 0;
551
  }
552
+
553
  /* Rich text field styling (if Anki adds classes for these) */
554
  .field ul, .field ol {
555
  margin-left: 1.5em;
ankigen_core/ui_logic.py CHANGED
@@ -15,7 +15,6 @@ import asyncio
15
  from ankigen_core.crawler import WebCrawler
16
  from ankigen_core.llm_interface import (
17
  OpenAIClientManager,
18
- process_crawled_pages,
19
  )
20
  from ankigen_core.card_generator import (
21
  generate_cards_from_crawled_content,
@@ -37,12 +36,10 @@ from ankigen_core.models import (
37
  )
38
 
39
  # Import agent system for web crawling
40
- try:
41
- from ankigen_core.agents.integration import AgentOrchestrator
42
- from ankigen_core.agents.feature_flags import get_feature_flags
43
- AGENTS_AVAILABLE_UI = True
44
- except ImportError:
45
- AGENTS_AVAILABLE_UI = False
46
  # --- End moved imports ---
47
 
48
  # Get an instance of the logger for this module
@@ -311,6 +308,7 @@ def create_crawler_main_mode_elements() -> (
311
  label="AI Model for Content Processing", # Clarified label
312
  value=default_model_value_crawler,
313
  elem_id="crawler_model_dropdown",
 
314
  )
315
  ui_components.append(model_dropdown)
316
 
@@ -544,120 +542,59 @@ async def crawl_and_generate(
544
  )
545
 
546
  # --- AGENT SYSTEM INTEGRATION FOR WEB CRAWLING ---
547
- if AGENTS_AVAILABLE_UI:
548
- feature_flags = get_feature_flags()
549
- if feature_flags.should_use_agents():
550
- crawler_ui_logger.info("🤖 Using agent system for web crawling card generation")
551
- try:
552
- # Initialize agent orchestrator
553
- orchestrator = AgentOrchestrator(client_manager)
554
- await orchestrator.initialize("dummy-key") # Key already in client_manager
555
-
556
- # Combine all crawled content into a single context
557
- combined_content = "\n\n--- PAGE BREAK ---\n\n".join([
558
- f"URL: {page.url}\nTitle: {page.title}\nContent: {page.text_content[:2000]}..."
559
- for page in crawled_pages[:10] # Limit to first 10 pages to avoid token limits
560
- ])
561
-
562
- context = {
563
- "source_text": combined_content,
564
- "crawl_source": url,
565
- "pages_crawled": len(crawled_pages)
566
- }
567
-
568
- progress(0.6, desc="🤖 Processing with agent system...")
569
-
570
- # Generate cards with agents
571
- agent_cards, agent_metadata = await orchestrator.generate_cards_with_agents(
572
- topic=f"Content from {url}",
573
- subject="web_content",
574
- num_cards=min(len(crawled_pages) * 3, 50), # 3 cards per page, max 50
575
- difficulty="intermediate",
576
- enable_quality_pipeline=True,
577
- context=context
578
- )
579
-
580
- if agent_cards:
581
- progress(0.9, desc=f"🤖 Agent system generated {len(agent_cards)} cards")
582
-
583
- cards_for_dataframe_export = generate_cards_from_crawled_content(agent_cards)
584
-
585
- final_message = f"🤖 Agent system processed content from {len(crawled_pages)} pages. Generated {len(agent_cards)} high-quality cards."
586
- progress(1.0, desc=final_message)
587
-
588
- return (
589
- final_message,
590
- cards_for_dataframe_export,
591
- agent_cards,
592
- )
593
- else:
594
- crawler_ui_logger.warning("Agent system returned no cards for web content, falling back to legacy")
595
- progress(0.5, desc="🔄 Agent system returned no cards, using legacy processing...")
596
-
597
- except Exception as e:
598
- crawler_ui_logger.error(f"Agent system failed for web crawling: {e}, falling back to legacy")
599
- progress(0.5, desc=f"🔄 Agent error: {str(e)}, using legacy processing...")
600
-
601
- # --- LEGACY WEB PROCESSING ---
602
- crawler_ui_logger.info("Using legacy LLM processing for web content")
603
- openai_client = client_manager.get_client()
604
- processed_llm_pages = 0
605
-
606
- def llm_progress_callback(completed_count: int, total_count: int):
607
- nonlocal processed_llm_pages
608
- processed_llm_pages = completed_count
609
- progress(
610
- 0.5 + (completed_count / total_count) * 0.4,
611
- desc=f"Processing content: {completed_count}/{total_count} pages processed by LLM.",
612
- )
613
-
614
- crawler_ui_logger.info(
615
- f"Starting LLM processing for {len(crawled_pages)} pages..."
616
- )
617
- progress(
618
- 0.55, desc=f"Processing {len(crawled_pages)} pages with LLM ({model})..."
619
- )
620
- all_cards = await process_crawled_pages( # This now returns List[Card]
621
- openai_client=openai_client,
622
- pages=crawled_pages,
623
- model=model,
624
- max_prompt_content_tokens=6000,
625
- max_concurrent_requests=5,
626
- custom_system_prompt=custom_system_prompt
627
- if custom_system_prompt and custom_system_prompt.strip()
628
- else None,
629
- custom_user_prompt_template=custom_user_prompt_template
630
- if custom_user_prompt_template and custom_user_prompt_template.strip()
631
- else None,
632
- progress_callback=llm_progress_callback,
633
- )
634
- crawler_ui_logger.info(
635
- f"LLM processing finished. Generated {len(all_cards)} Card objects." # Changed AnkiCardData to Card
636
  )
637
- progress(
638
- 0.9,
639
- desc=f"LLM processing finished. Generated {len(all_cards)} Anki cards.",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
640
  )
641
 
642
- if not all_cards:
643
- progress(
644
- 1.0, desc="LLM processing complete, but no Anki cards were generated."
645
- )
646
- return (
647
- "LLM processing complete, but no Anki cards were generated.",
648
- pd.DataFrame().to_dict(orient="records"), # Empty DataFrame data
649
- [], # Empty list of raw cards
650
  )
651
 
652
- cards_for_dataframe_export = generate_cards_from_crawled_content(
653
- all_cards
654
- ) # Expects List[Card]
655
- if not cards_for_dataframe_export:
656
- progress(
657
- 1.0, desc="Card processing (formatting, etc.) resulted in no cards."
 
658
  )
 
 
659
  return (
660
- "Card processing resulted in no cards.",
661
  pd.DataFrame().to_dict(orient="records"),
662
  [],
663
  )
@@ -692,8 +629,8 @@ async def crawl_and_generate(
692
  return (
693
  final_message,
694
  cards_for_dataframe_export,
695
- all_cards,
696
- ) # all_cards is List[Card]
697
 
698
 
699
  # --- Card Preview and Editing Utilities (Task 13.3) ---
 
15
  from ankigen_core.crawler import WebCrawler
16
  from ankigen_core.llm_interface import (
17
  OpenAIClientManager,
 
18
  )
19
  from ankigen_core.card_generator import (
20
  generate_cards_from_crawled_content,
 
36
  )
37
 
38
  # Import agent system for web crawling
39
+ # Agent system is required for web crawling
40
+ from ankigen_core.agents.integration import AgentOrchestrator
41
+
42
+ AGENTS_AVAILABLE_UI = True
 
 
43
  # --- End moved imports ---
44
 
45
  # Get an instance of the logger for this module
 
308
  label="AI Model for Content Processing", # Clarified label
309
  value=default_model_value_crawler,
310
  elem_id="crawler_model_dropdown",
311
+ allow_custom_value=True,
312
  )
313
  ui_components.append(model_dropdown)
314
 
 
542
  )
543
 
544
  # --- AGENT SYSTEM INTEGRATION FOR WEB CRAWLING ---
545
+ crawler_ui_logger.info("🤖 Using agent system for web crawling card generation")
546
+
547
+ # Initialize agent orchestrator
548
+ orchestrator = AgentOrchestrator(client_manager)
549
+ await orchestrator.initialize("dummy-key") # Key already in client_manager
550
+
551
+ # Combine all crawled content into a single context
552
+ combined_content = "\n\n--- PAGE BREAK ---\n\n".join(
553
+ [
554
+ f"URL: {page.url}\nTitle: {page.title}\nContent: {page.text_content[:2000]}..."
555
+ for page in crawled_pages[
556
+ :10
557
+ ] # Limit to first 10 pages to avoid token limits
558
+ ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
559
  )
560
+
561
+ context = {
562
+ "source_text": combined_content,
563
+ "crawl_source": url,
564
+ "pages_crawled": len(crawled_pages),
565
+ }
566
+
567
+ progress(0.6, desc="🤖 Processing with agent system...")
568
+
569
+ # Generate cards with agents
570
+ agent_cards, agent_metadata = await orchestrator.generate_cards_with_agents(
571
+ topic=f"Content from {url}",
572
+ subject="web_content",
573
+ num_cards=min(len(crawled_pages) * 3, 50), # 3 cards per page, max 50
574
+ difficulty="intermediate",
575
+ enable_quality_pipeline=True,
576
+ context=context,
577
  )
578
 
579
+ if agent_cards:
580
+ progress(0.9, desc=f"🤖 Agent system generated {len(agent_cards)} cards")
581
+
582
+ cards_for_dataframe_export = generate_cards_from_crawled_content(
583
+ agent_cards
 
 
 
584
  )
585
 
586
+ final_message = f"🤖 Agent system processed content from {len(crawled_pages)} pages. Generated {len(agent_cards)} high-quality cards."
587
+ progress(1.0, desc=final_message)
588
+
589
+ return (
590
+ final_message,
591
+ cards_for_dataframe_export,
592
+ agent_cards,
593
  )
594
+ else:
595
+ progress(1.0, desc="🤖 Agent system returned no cards")
596
  return (
597
+ "Agent system returned no cards",
598
  pd.DataFrame().to_dict(orient="records"),
599
  [],
600
  )
 
629
  return (
630
  final_message,
631
  cards_for_dataframe_export,
632
+ agent_cards,
633
+ ) # agent_cards is List[Card]
634
 
635
 
636
  # --- Card Preview and Editing Utilities (Task 13.3) ---
app.py CHANGED
@@ -1,50 +1,45 @@
1
  # Standard library imports
 
2
  import os
3
- from pathlib import Path # Potentially for favicon_path
4
- from datetime import datetime
5
  import re
6
- import asyncio
 
7
 
8
  import gradio as gr
9
  import pandas as pd
10
 
11
- from ankigen_core.utils import (
12
- get_logger,
13
- ResponseCache,
14
- ) # fetch_webpage_text is used by card_generator
15
-
16
- from ankigen_core.llm_interface import (
17
- OpenAIClientManager,
18
- ) # structured_output_completion is internal to core modules
19
  from ankigen_core.card_generator import (
20
- orchestrate_card_generation,
21
  AVAILABLE_MODELS,
 
22
  ) # GENERATION_MODES is internal to card_generator
23
- from ankigen_core.learning_path import analyze_learning_path
24
  from ankigen_core.exporters import (
25
- export_dataframe_to_csv,
26
  export_dataframe_to_apkg,
 
27
  ) # Anki models (BASIC_MODEL, CLOZE_MODEL) are internal to exporters
 
 
 
 
28
  from ankigen_core.ui_logic import (
 
 
29
  update_mode_visibility,
30
  use_selected_subjects,
31
- create_crawler_main_mode_elements,
32
- crawl_and_generate,
33
  )
 
 
 
 
34
 
35
  # --- Initialization ---
36
  logger = get_logger()
37
  response_cache = ResponseCache() # Initialize cache
38
  client_manager = OpenAIClientManager() # Initialize client manager
39
 
40
- # Check agent system availability
41
- try:
42
- from ankigen_core.agents.feature_flags import get_feature_flags
43
- AGENTS_AVAILABLE_APP = True
44
- logger.info("Agent system is available")
45
- except ImportError:
46
- AGENTS_AVAILABLE_APP = False
47
- logger.info("Agent system not available, using legacy generation only")
48
 
49
  js_storage = """
50
  async () => {
@@ -61,13 +56,17 @@ async () => {
61
  }
62
  """
63
 
64
- custom_theme = gr.themes.Soft().set(
65
- body_background_fill="*background_fill_secondary",
66
- block_background_fill="*background_fill_primary",
67
- block_border_width="0",
68
- button_primary_background_fill="*primary_500",
69
- button_primary_text_color="white",
70
- )
 
 
 
 
71
 
72
  # --- Example Data for Initialization ---
73
  example_data = pd.DataFrame(
@@ -129,15 +128,16 @@ def get_recent_logs(logger_name="ankigen") -> str:
129
  log_file = os.path.join(log_dir, f"{logger_name}_{timestamp}.log")
130
 
131
  if os.path.exists(log_file):
132
- with open(log_file, "r") as f:
133
  lines = f.readlines()
134
  # Display last N lines, e.g., 100
135
  return "\n".join(lines[-100:]) # Ensured this is standard newline
136
  return f"Log file for today ({log_file}) not found or is empty."
137
  except Exception as e:
138
- # Use the main app logger to log this error, but don't let it crash the UI function
 
139
  logger.error(f"Error reading logs: {e}", exc_info=True)
140
- return f"Error reading logs: {str(e)}"
141
 
142
 
143
  def create_ankigen_interface():
@@ -154,26 +154,26 @@ def create_ankigen_interface():
154
  .export-group > .gradio-group { margin-bottom: 0 !important; padding-bottom: 5px !important; }
155
 
156
  /* REMOVING CSS previously intended for DataFrame readability to ensure plain text */
157
- /*
158
- .explanation-text {
159
- background: #f0fdf4;
160
- border-left: 3px solid #4ade80;
161
  padding: 0.5em;
162
  margin-bottom: 0.5em;
163
  border-radius: 4px;
164
  }
165
- .example-text-plain {
166
- background: #fff7ed;
167
- border-left: 3px solid #f97316;
168
  padding: 0.5em;
169
  margin-bottom: 0.5em;
170
  border-radius: 4px;
171
  }
172
- pre code {
173
  display: block;
174
  padding: 0.8em;
175
- background: #1e293b;
176
- color: #e2e8f0;
177
  border-radius: 4px;
178
  overflow-x: auto;
179
  font-family: 'Fira Code', 'Consolas', monospace;
@@ -187,25 +187,6 @@ def create_ankigen_interface():
187
  with gr.Column(elem_classes="contain"):
188
  gr.Markdown("# 📚 AnkiGen - Advanced Anki Card Generator")
189
  gr.Markdown("#### Generate comprehensive Anki flashcards using AI.")
190
-
191
- # Agent system status indicator
192
- if AGENTS_AVAILABLE_APP:
193
- try:
194
- feature_flags = get_feature_flags()
195
- if feature_flags.should_use_agents():
196
- agent_status_emoji = "🤖"
197
- agent_status_text = "**Agent System Active** - Enhanced quality with multi-agent pipeline"
198
- else:
199
- agent_status_emoji = "🔧"
200
- agent_status_text = "**Legacy Mode** - Set `ANKIGEN_AGENT_MODE=agent_only` to enable agents"
201
- except:
202
- agent_status_emoji = "⚙️"
203
- agent_status_text = "**Agent System Available** - Configure environment variables to activate"
204
- else:
205
- agent_status_emoji = "💡"
206
- agent_status_text = "**Legacy Mode** - Agent system not installed"
207
-
208
- gr.Markdown(f"{agent_status_emoji} {agent_status_text}")
209
 
210
  with gr.Accordion("Configuration Settings", open=True):
211
  with gr.Row():
@@ -233,7 +214,8 @@ def create_ankigen_interface():
233
  lines=5,
234
  )
235
  analyze_button = gr.Button(
236
- "Analyze & Break Down", variant="secondary"
 
237
  )
238
  with gr.Group(visible=False) as text_mode:
239
  source_text = gr.Textbox(
@@ -244,7 +226,7 @@ def create_ankigen_interface():
244
  with gr.Group(visible=False) as web_mode:
245
  # --- BEGIN INTEGRATED CRAWLER UI (Task 16) ---
246
  logger.info(
247
- "Setting up integrated Web Crawler UI elements..."
248
  )
249
  (
250
  crawler_input_ui_elements, # List of inputs like URL, depth, model, patterns
@@ -296,9 +278,10 @@ def create_ankigen_interface():
296
  value=default_model_value,
297
  label="Model Selection",
298
  info="Select AI model for generation",
 
299
  )
300
  _model_info = gr.Markdown(
301
- "**gpt-4.1**: Best quality | **gpt-4.1-nano**: Faster/Cheaper"
302
  )
303
  topic_number = gr.Slider(
304
  label="Number of Topics",
@@ -328,6 +311,161 @@ def create_ankigen_interface():
328
  value=False,
329
  )
330
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
331
  generate_button = gr.Button("Generate Cards", variant="primary")
332
 
333
  with gr.Group(visible=False) as path_results:
@@ -349,7 +487,7 @@ def create_ankigen_interface():
349
  gr.Markdown("### Generated Cards")
350
  with gr.Accordion("Output Format", open=False):
351
  gr.Markdown(
352
- "Cards: Index, Topic, Type, Q, A, Explanation, Example, Prerequisites, Outcomes, Misconceptions, Difficulty. Export: CSV, .apkg"
353
  )
354
  with gr.Accordion("Example Card Format", open=False):
355
  gr.Code(
@@ -407,6 +545,12 @@ def create_ankigen_interface():
407
  visible=False,
408
  )
409
 
 
 
 
 
 
 
410
  # Export buttons
411
  with gr.Row(elem_classes="export-group"):
412
  export_csv_button = gr.Button("Export to CSV")
@@ -465,11 +609,11 @@ def create_ankigen_interface():
465
  # to prevent a subsequent Gradio error about mismatched return values.
466
  gr.Error(str(e)) # This will be shown in the UI.
467
  empty_subjects_df = pd.DataFrame(
468
- columns=["Subject", "Prerequisites", "Time Estimate"]
469
  )
470
  return (
471
  gr.update(
472
- value=empty_subjects_df
473
  ), # For subjects_list (DataFrame)
474
  gr.update(value=""), # For learning_order (Markdown)
475
  gr.update(value=""), # For projects (Markdown)
@@ -523,8 +667,96 @@ def create_ankigen_interface():
523
  preference_prompt_val,
524
  generate_cloze_checkbox_val,
525
  llm_judge_checkbox_val,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
526
  progress=gr.Progress(track_tqdm=True), # Added progress tracker
527
  ):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
528
  # Recreate the partial function call, but now it can be awaited
529
  # The actual orchestrate_card_generation is already partially applied with client_manager and response_cache
530
  # So, we need to get that specific partial object if it's stored, or redefine the partial logic here.
@@ -544,6 +776,7 @@ def create_ankigen_interface():
544
  generate_cloze_checkbox_val,
545
  llm_judge_checkbox_val,
546
  )
 
547
 
548
  generate_button.click(
549
  fn=handle_generate_click, # MODIFIED: Use the new async handler
@@ -559,8 +792,22 @@ def create_ankigen_interface():
559
  preference_prompt,
560
  generate_cloze_checkbox,
561
  llm_judge_checkbox,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
562
  ],
563
- outputs=[output, total_cards_html],
564
  show_progress="full",
565
  )
566
 
@@ -583,19 +830,19 @@ def create_ankigen_interface():
583
  if exported_path_relative:
584
  exported_path_absolute = os.path.abspath(exported_path_relative)
585
  gr.Info(
586
- f"CSV ready for download: {os.path.basename(exported_path_absolute)}"
587
  )
588
  return gr.update(value=exported_path_absolute, visible=True)
589
- else:
590
- # This case might happen if export_dataframe_to_csv itself had an internal issue
591
- # and returned None, though it typically raises an error or returns path.
592
- gr.Warning("CSV export failed or returned no path.")
593
- return gr.update(value=None, visible=False)
594
  except Exception as e:
595
  logger.error(
596
- f"Error exporting DataFrame to CSV: {e}", exc_info=True
 
597
  )
598
- gr.Error(f"Failed to export to CSV: {str(e)}")
599
  return gr.update(value=None, visible=False)
600
 
601
  export_csv_button.click(
@@ -607,7 +854,8 @@ def create_ankigen_interface():
607
 
608
  # Define handler for APKG export from DataFrame (Item 5)
609
  async def handle_export_dataframe_to_apkg_click(
610
- df: pd.DataFrame, subject_for_deck_name: str
 
611
  ):
612
  if df is None or df.empty:
613
  gr.Warning("No cards generated to export.")
@@ -620,13 +868,17 @@ def create_ankigen_interface():
620
  )
621
  if subject_for_deck_name and subject_for_deck_name.strip():
622
  clean_subject = re.sub(
623
- r"[^a-zA-Z0-9\s_.-]", "", subject_for_deck_name.strip()
 
 
624
  )
625
  deck_name_inside_anki = f"AnkiGen - {clean_subject}"
626
  elif not df.empty and "Topic" in df.columns and df["Topic"].iloc[0]:
627
  first_topic = df["Topic"].iloc[0]
628
  clean_first_topic = re.sub(
629
- r"[^a-zA-Z0-9\s_.-]", "", str(first_topic).strip()
 
 
630
  )
631
  deck_name_inside_anki = f"AnkiGen - {clean_first_topic}"
632
  else:
@@ -657,14 +909,15 @@ def create_ankigen_interface():
657
  exported_path_absolute = os.path.abspath(exported_path_relative)
658
 
659
  gr.Info(
660
- f"Successfully exported deck '{deck_name_inside_anki}' to {exported_path_absolute}"
661
  )
662
  return gr.update(value=exported_path_absolute, visible=True)
663
  except Exception as e:
664
  logger.error(
665
- f"Error exporting DataFrame to APKG: {e}", exc_info=True
 
666
  )
667
- gr.Error(f"Failed to export to APKG: {str(e)}")
668
  return gr.update(value=None, visible=False)
669
 
670
  # Wire button to handler (Item 6)
@@ -675,9 +928,6 @@ def create_ankigen_interface():
675
  api_name="export_main_to_apkg",
676
  )
677
 
678
- # --- CRAWLER EVENT HANDLER (Task 16) ---
679
- # This handler is for the new "Crawl Content & Prepare Cards" button within web_mode
680
-
681
  async def handle_web_crawl_click(
682
  api_key_val: str,
683
  url: str,
@@ -695,7 +945,7 @@ def create_ankigen_interface():
695
  progress(0, desc="Initializing web crawl...")
696
  yield {
697
  web_crawl_status_textbox: gr.update(
698
- value="Initializing web crawl..."
699
  ),
700
  output: gr.update(value=None), # Clear main output table
701
  total_cards_html: gr.update(
@@ -708,7 +958,7 @@ def create_ankigen_interface():
708
  logger.error("API Key is missing for web crawler operation.")
709
  yield {
710
  web_crawl_status_textbox: gr.update(
711
- value="Error: OpenAI API Key is required."
712
  ),
713
  }
714
  return
@@ -721,7 +971,7 @@ def create_ankigen_interface():
721
  )
722
  yield {
723
  web_crawl_status_textbox: gr.update(
724
- value=f"Error: Client init failed: {str(e)}"
725
  ),
726
  }
727
  return
@@ -760,7 +1010,7 @@ def create_ankigen_interface():
760
  col in preview_df_value.columns for col in expected_cols
761
  ):
762
  logger.warning(
763
- "Crawled card data columns mismatch main output, attempting to use available data."
764
  )
765
  # Potentially select only common columns or reindex if necessary
766
  # For now, we'll pass it as is, Gradio might handle extra/missing cols gracefully or error.
@@ -772,7 +1022,8 @@ def create_ankigen_interface():
772
  web_crawl_status_textbox: gr.update(value=message),
773
  output: gr.update(value=preview_df_value),
774
  total_cards_html: gr.update(
775
- visible=True, value=total_cards_update
 
776
  ),
777
  }
778
  except Exception as e:
@@ -782,7 +1033,7 @@ def create_ankigen_interface():
782
  )
783
  yield {
784
  web_crawl_status_textbox: gr.update(
785
- value=f"{message} (Error displaying cards: {str(e)})"
786
  ),
787
  output: gr.update(value=None),
788
  total_cards_html: gr.update(visible=False),
@@ -790,36 +1041,12 @@ def create_ankigen_interface():
790
  else:
791
  yield {
792
  web_crawl_status_textbox: gr.update(
793
- value=message
794
  ), # Message from crawl_and_generate (e.g. no cards)
795
  output: gr.update(value=None),
796
  total_cards_html: gr.update(visible=False),
797
  }
798
 
799
- # Wire the new crawl button
800
- # Need to get the actual UI components from crawler_input_ui_elements by index or name
801
- # Assuming create_crawler_main_mode_elements returns them in a predictable order in the list
802
- # or returns them individually. The Tuple return is better.
803
-
804
- # crawler_input_ui_elements[0] is url_input
805
- # crawler_input_ui_elements[1] is max_depth_slider
806
- # crawler_input_ui_elements[2] is crawler_req_per_sec_slider
807
- # crawler_input_ui_elements[3] is model_dropdown
808
- # crawler_input_ui_elements[4] is include_patterns_textbox
809
- # crawler_input_ui_elements[5] is exclude_patterns_textbox
810
-
811
- # The other components are returned individually:
812
- # web_crawl_custom_system_prompt, web_crawl_custom_user_prompt_template,
813
- # web_crawl_use_sitemap_checkbox, web_crawl_sitemap_url_textbox
814
-
815
- # Already unpacked above:
816
- # web_crawl_url_input = crawler_input_ui_elements[0]
817
- # web_crawl_max_depth_slider = crawler_input_ui_elements[1]
818
- # web_crawl_req_per_sec_slider = crawler_input_ui_elements[2]
819
- # web_crawl_model_dropdown = crawler_input_ui_elements[3] # model for LLM processing
820
- # web_crawl_include_patterns_textbox = crawler_input_ui_elements[4]
821
- # web_crawl_exclude_patterns_textbox = crawler_input_ui_elements[5]
822
-
823
  web_crawl_button.click(
824
  fn=handle_web_crawl_click,
825
  inputs=[
@@ -858,7 +1085,7 @@ if __name__ == "__main__":
858
  ankigen_interface.launch(share=False, favicon_path=str(favicon_path))
859
  else:
860
  logger.warning(
861
- f"Favicon not found at {favicon_path}, launching without it."
862
  )
863
  ankigen_interface.launch(share=False)
864
  except Exception as e:
 
1
  # Standard library imports
2
+ import asyncio
3
  import os
 
 
4
  import re
5
+ from datetime import datetime
6
+ from pathlib import Path # Potentially for favicon_path
7
 
8
  import gradio as gr
9
  import pandas as pd
10
 
 
 
 
 
 
 
 
 
11
  from ankigen_core.card_generator import (
 
12
  AVAILABLE_MODELS,
13
+ orchestrate_card_generation,
14
  ) # GENERATION_MODES is internal to card_generator
 
15
  from ankigen_core.exporters import (
 
16
  export_dataframe_to_apkg,
17
+ export_dataframe_to_csv,
18
  ) # Anki models (BASIC_MODEL, CLOZE_MODEL) are internal to exporters
19
+ from ankigen_core.learning_path import analyze_learning_path
20
+ from ankigen_core.llm_interface import (
21
+ OpenAIClientManager,
22
+ ) # structured_output_completion is internal to core modules
23
  from ankigen_core.ui_logic import (
24
+ crawl_and_generate,
25
+ create_crawler_main_mode_elements,
26
  update_mode_visibility,
27
  use_selected_subjects,
 
 
28
  )
29
+ from ankigen_core.utils import (
30
+ ResponseCache,
31
+ get_logger,
32
+ ) # fetch_webpage_text is used by card_generator
33
 
34
  # --- Initialization ---
35
  logger = get_logger()
36
  response_cache = ResponseCache() # Initialize cache
37
  client_manager = OpenAIClientManager() # Initialize client manager
38
 
39
+ # Agent system is required
40
+
41
+ AGENTS_AVAILABLE_APP = True
42
+ logger.info("Agent system is available")
 
 
 
 
43
 
44
  js_storage = """
45
  async () => {
 
56
  }
57
  """
58
 
59
+ try:
60
+ custom_theme = gr.themes.Soft().set( # type: ignore
61
+ body_background_fill="*background_fill_secondary",
62
+ block_background_fill="*background_fill_primary",
63
+ block_border_width="0",
64
+ button_primary_background_fill="*primary_500",
65
+ button_primary_text_color="white",
66
+ )
67
+ except (AttributeError, ImportError):
68
+ # Fallback for older gradio versions or when themes are not available
69
+ custom_theme = None
70
 
71
  # --- Example Data for Initialization ---
72
  example_data = pd.DataFrame(
 
128
  log_file = os.path.join(log_dir, f"{logger_name}_{timestamp}.log")
129
 
130
  if os.path.exists(log_file):
131
+ with open(log_file) as f:
132
  lines = f.readlines()
133
  # Display last N lines, e.g., 100
134
  return "\n".join(lines[-100:]) # Ensured this is standard newline
135
  return f"Log file for today ({log_file}) not found or is empty."
136
  except Exception as e:
137
+ # Use the main app logger to log this error, but don't let it crash the UI
138
+ # function
139
  logger.error(f"Error reading logs: {e}", exc_info=True)
140
+ return f"Error reading logs: {e!s}"
141
 
142
 
143
  def create_ankigen_interface():
 
154
  .export-group > .gradio-group { margin-bottom: 0 !important; padding-bottom: 5px !important; }
155
 
156
  /* REMOVING CSS previously intended for DataFrame readability to ensure plain text */
157
+ /*
158
+ .explanation-text {
159
+ background: #f0fdf4;
160
+ border-left: 3px solid #4ade80;
161
  padding: 0.5em;
162
  margin-bottom: 0.5em;
163
  border-radius: 4px;
164
  }
165
+ .example-text-plain {
166
+ background: #fff7ed;
167
+ border-left: 3px solid #f97316;
168
  padding: 0.5em;
169
  margin-bottom: 0.5em;
170
  border-radius: 4px;
171
  }
172
+ pre code {
173
  display: block;
174
  padding: 0.8em;
175
+ background: #1e293b;
176
+ color: #e2e8f0;
177
  border-radius: 4px;
178
  overflow-x: auto;
179
  font-family: 'Fira Code', 'Consolas', monospace;
 
187
  with gr.Column(elem_classes="contain"):
188
  gr.Markdown("# 📚 AnkiGen - Advanced Anki Card Generator")
189
  gr.Markdown("#### Generate comprehensive Anki flashcards using AI.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
190
 
191
  with gr.Accordion("Configuration Settings", open=True):
192
  with gr.Row():
 
214
  lines=5,
215
  )
216
  analyze_button = gr.Button(
217
+ "Analyze & Break Down",
218
+ variant="secondary",
219
  )
220
  with gr.Group(visible=False) as text_mode:
221
  source_text = gr.Textbox(
 
226
  with gr.Group(visible=False) as web_mode:
227
  # --- BEGIN INTEGRATED CRAWLER UI (Task 16) ---
228
  logger.info(
229
+ "Setting up integrated Web Crawler UI elements...",
230
  )
231
  (
232
  crawler_input_ui_elements, # List of inputs like URL, depth, model, patterns
 
278
  value=default_model_value,
279
  label="Model Selection",
280
  info="Select AI model for generation",
281
+ allow_custom_value=True,
282
  )
283
  _model_info = gr.Markdown(
284
+ "**gpt-4.1**: Best quality | **gpt-4.1-nano**: Faster/Cheaper",
285
  )
286
  topic_number = gr.Slider(
287
  label="Number of Topics",
 
311
  value=False,
312
  )
313
 
314
+ # Agent System Controls (simplified since we're agent-only)
315
+ if AGENTS_AVAILABLE_APP:
316
+ # Hidden dropdown for compatibility - always set to agent_only
317
+ agent_mode_dropdown = gr.Dropdown(
318
+ choices=[("Agent Only", "agent_only")],
319
+ value="agent_only",
320
+ label="Agent Mode",
321
+ visible=False,
322
+ )
323
+
324
+ with gr.Accordion("Agent Configuration", open=False):
325
+ gr.Markdown("**Core Generation Pipeline**")
326
+ enable_subject_expert = gr.Checkbox(
327
+ label="Subject Expert Agent",
328
+ value=True,
329
+ info="Domain-specific expertise",
330
+ )
331
+ enable_generation_coordinator = gr.Checkbox(
332
+ label="Generation Coordinator",
333
+ value=True,
334
+ info="Orchestrates multi-agent generation",
335
+ )
336
+
337
+ gr.Markdown("**Quality Assurance**")
338
+ enable_content_judge = gr.Checkbox(
339
+ label="Content Accuracy Judge",
340
+ value=True,
341
+ info="Factual correctness validation",
342
+ )
343
+ enable_clarity_judge = gr.Checkbox(
344
+ label="Clarity Judge",
345
+ value=True,
346
+ info="Language clarity and comprehension",
347
+ )
348
+
349
+ gr.Markdown("**Optional Enhancements**")
350
+ enable_pedagogical_agent = gr.Checkbox(
351
+ label="Pedagogical Agent",
352
+ value=False,
353
+ info="Educational effectiveness review",
354
+ )
355
+ enable_pedagogical_judge = gr.Checkbox(
356
+ label="Pedagogical Judge",
357
+ value=False,
358
+ info="Learning theory compliance",
359
+ )
360
+ enable_enhancement_agent = gr.Checkbox(
361
+ label="Enhancement Agent",
362
+ value=False,
363
+ info="Content enrichment and metadata",
364
+ )
365
+
366
+ with gr.Accordion(
367
+ "🛠️ Agent Model Selection", open=False
368
+ ):
369
+ gr.Markdown("**Individual Agent Models**")
370
+
371
+ # Generator models
372
+ subject_expert_model = gr.Dropdown(
373
+ choices=model_choices_ui,
374
+ value="gpt-4.1",
375
+ label="Subject Expert Model",
376
+ info="Model for domain expertise",
377
+ allow_custom_value=True,
378
+ )
379
+ generation_coordinator_model = gr.Dropdown(
380
+ choices=model_choices_ui,
381
+ value="gpt-4.1-nano",
382
+ label="Generation Coordinator Model",
383
+ info="Model for orchestration",
384
+ allow_custom_value=True,
385
+ )
386
+
387
+ # Judge models
388
+ content_judge_model = gr.Dropdown(
389
+ choices=model_choices_ui,
390
+ value="gpt-4.1",
391
+ label="Content Accuracy Judge Model",
392
+ info="Model for fact-checking",
393
+ allow_custom_value=True,
394
+ )
395
+ clarity_judge_model = gr.Dropdown(
396
+ choices=model_choices_ui,
397
+ value="gpt-4.1-nano",
398
+ label="Clarity Judge Model",
399
+ info="Model for language clarity",
400
+ allow_custom_value=True,
401
+ )
402
+
403
+ # Enhancement models
404
+ pedagogical_agent_model = gr.Dropdown(
405
+ choices=model_choices_ui,
406
+ value="gpt-4.1",
407
+ label="Pedagogical Agent Model",
408
+ info="Model for educational theory",
409
+ allow_custom_value=True,
410
+ )
411
+ enhancement_agent_model = gr.Dropdown(
412
+ choices=model_choices_ui,
413
+ value="gpt-4.1",
414
+ label="Enhancement Agent Model",
415
+ info="Model for content enrichment",
416
+ allow_custom_value=True,
417
+ )
418
+ else:
419
+ # Placeholder when agents not available
420
+ agent_mode_dropdown = gr.Dropdown(
421
+ choices=[("Legacy Only", "legacy")],
422
+ value="legacy",
423
+ label="Agent Mode",
424
+ info="Agent system not available",
425
+ interactive=False,
426
+ )
427
+ enable_subject_expert = gr.Checkbox(
428
+ value=False, visible=False
429
+ )
430
+ enable_generation_coordinator = gr.Checkbox(
431
+ value=False, visible=False
432
+ )
433
+ enable_pedagogical_agent = gr.Checkbox(
434
+ value=False, visible=False
435
+ )
436
+ enable_content_judge = gr.Checkbox(
437
+ value=False, visible=False
438
+ )
439
+ enable_clarity_judge = gr.Checkbox(
440
+ value=False, visible=False
441
+ )
442
+ enable_pedagogical_judge = gr.Checkbox(
443
+ value=False, visible=False
444
+ )
445
+ enable_enhancement_agent = gr.Checkbox(
446
+ value=False, visible=False
447
+ )
448
+
449
+ # Hidden model dropdowns for non-agent mode
450
+ subject_expert_model = gr.Dropdown(
451
+ value="gpt-4.1", visible=False
452
+ )
453
+ generation_coordinator_model = gr.Dropdown(
454
+ value="gpt-4.1-nano", visible=False
455
+ )
456
+ content_judge_model = gr.Dropdown(
457
+ value="gpt-4.1", visible=False
458
+ )
459
+ clarity_judge_model = gr.Dropdown(
460
+ value="gpt-4.1-nano", visible=False
461
+ )
462
+ pedagogical_agent_model = gr.Dropdown(
463
+ value="gpt-4.1", visible=False
464
+ )
465
+ enhancement_agent_model = gr.Dropdown(
466
+ value="gpt-4.1", visible=False
467
+ )
468
+
469
  generate_button = gr.Button("Generate Cards", variant="primary")
470
 
471
  with gr.Group(visible=False) as path_results:
 
487
  gr.Markdown("### Generated Cards")
488
  with gr.Accordion("Output Format", open=False):
489
  gr.Markdown(
490
+ "Cards: Index, Topic, Type, Q, A, Explanation, Example, Prerequisites, Outcomes, Misconceptions, Difficulty. Export: CSV, .apkg",
491
  )
492
  with gr.Accordion("Example Card Format", open=False):
493
  gr.Code(
 
545
  visible=False,
546
  )
547
 
548
+ # Token usage display
549
+ token_usage_html = gr.HTML(
550
+ value="<div style='margin-top: 8px;'><b>Token Usage:</b> <span id='token-usage-display'>No usage data</span></div>",
551
+ visible=True,
552
+ )
553
+
554
  # Export buttons
555
  with gr.Row(elem_classes="export-group"):
556
  export_csv_button = gr.Button("Export to CSV")
 
609
  # to prevent a subsequent Gradio error about mismatched return values.
610
  gr.Error(str(e)) # This will be shown in the UI.
611
  empty_subjects_df = pd.DataFrame(
612
+ columns=["Subject", "Prerequisites", "Time Estimate"],
613
  )
614
  return (
615
  gr.update(
616
+ value=empty_subjects_df,
617
  ), # For subjects_list (DataFrame)
618
  gr.update(value=""), # For learning_order (Markdown)
619
  gr.update(value=""), # For projects (Markdown)
 
667
  preference_prompt_val,
668
  generate_cloze_checkbox_val,
669
  llm_judge_checkbox_val,
670
+ agent_mode_val,
671
+ enable_subject_expert_val,
672
+ enable_generation_coordinator_val,
673
+ enable_pedagogical_agent_val,
674
+ enable_content_judge_val,
675
+ enable_clarity_judge_val,
676
+ enable_pedagogical_judge_val,
677
+ enable_enhancement_agent_val,
678
+ subject_expert_model_val,
679
+ generation_coordinator_model_val,
680
+ content_judge_model_val,
681
+ clarity_judge_model_val,
682
+ pedagogical_agent_model_val,
683
+ enhancement_agent_model_val,
684
  progress=gr.Progress(track_tqdm=True), # Added progress tracker
685
  ):
686
+ # Apply agent settings if agents are available
687
+ if AGENTS_AVAILABLE_APP:
688
+ import os
689
+
690
+ # Set agent mode
691
+ os.environ["ANKIGEN_AGENT_MODE"] = agent_mode_val
692
+
693
+ # Set individual agent flags (using correct environment variable names)
694
+ os.environ["ANKIGEN_ENABLE_SUBJECT_EXPERT"] = str(
695
+ enable_subject_expert_val
696
+ ).lower()
697
+ os.environ["ANKIGEN_ENABLE_GENERATION_COORDINATOR"] = str(
698
+ enable_generation_coordinator_val
699
+ ).lower()
700
+ os.environ["ANKIGEN_ENABLE_PEDAGOGICAL_AGENT"] = str(
701
+ enable_pedagogical_agent_val
702
+ ).lower()
703
+ os.environ["ANKIGEN_ENABLE_CONTENT_JUDGE"] = str(
704
+ enable_content_judge_val
705
+ ).lower()
706
+ os.environ["ANKIGEN_ENABLE_CLARITY_JUDGE"] = str(
707
+ enable_clarity_judge_val
708
+ ).lower()
709
+ os.environ["ANKIGEN_ENABLE_PEDAGOGICAL_JUDGE"] = str(
710
+ enable_pedagogical_judge_val
711
+ ).lower()
712
+ os.environ["ANKIGEN_ENABLE_ENHANCEMENT_AGENT"] = str(
713
+ enable_enhancement_agent_val
714
+ ).lower()
715
+
716
+ # Enable additional required flags for proper agent coordination
717
+ os.environ["ANKIGEN_ENABLE_JUDGE_COORDINATOR"] = (
718
+ "true" # Required for judge coordination
719
+ )
720
+ os.environ["ANKIGEN_ENABLE_PARALLEL_JUDGING"] = (
721
+ "true" # Enable parallel judging for performance
722
+ )
723
+
724
+ # Configure agent models from UI selections
725
+ model_overrides = {
726
+ "subject_expert": subject_expert_model_val,
727
+ "generation_coordinator": generation_coordinator_model_val,
728
+ "content_accuracy_judge": content_judge_model_val,
729
+ "clarity_judge": clarity_judge_model_val,
730
+ "pedagogical_agent": pedagogical_agent_model_val,
731
+ "enhancement_agent": enhancement_agent_model_val,
732
+ }
733
+
734
+ # Template variables for Jinja rendering
735
+ template_vars = {
736
+ "subject": subject_val or "general studies",
737
+ "difficulty": "intermediate", # Could be made configurable
738
+ "topic": subject_val or "general concepts",
739
+ }
740
+
741
+ # Initialize config manager with model overrides and template variables
742
+ from ankigen_core.agents.config import get_config_manager
743
+
744
+ get_config_manager(model_overrides, template_vars)
745
+
746
+ # Log the agent configuration
747
+ logger.info(f"Agent mode set to: {agent_mode_val}")
748
+ logger.info(f"Model overrides: {model_overrides}")
749
+ logger.info(
750
+ f"Active agents: Subject Expert={enable_subject_expert_val}, Generation Coordinator={enable_generation_coordinator_val}, Content Judge={enable_content_judge_val}, Clarity Judge={enable_clarity_judge_val}"
751
+ )
752
+
753
+ # Reload feature flags to pick up the new environment variables
754
+ try:
755
+ # Agent system is available
756
+ logger.info("Agent system enabled")
757
+ except Exception as e:
758
+ logger.warning(f"Failed to reload feature flags: {e}")
759
+
760
  # Recreate the partial function call, but now it can be awaited
761
  # The actual orchestrate_card_generation is already partially applied with client_manager and response_cache
762
  # So, we need to get that specific partial object if it's stored, or redefine the partial logic here.
 
776
  generate_cloze_checkbox_val,
777
  llm_judge_checkbox_val,
778
  )
779
+ # Expect 3-tuple return (dataframe, total_cards_html, token_usage_html)
780
 
781
  generate_button.click(
782
  fn=handle_generate_click, # MODIFIED: Use the new async handler
 
792
  preference_prompt,
793
  generate_cloze_checkbox,
794
  llm_judge_checkbox,
795
+ agent_mode_dropdown,
796
+ enable_subject_expert,
797
+ enable_generation_coordinator,
798
+ enable_pedagogical_agent,
799
+ enable_content_judge,
800
+ enable_clarity_judge,
801
+ enable_pedagogical_judge,
802
+ enable_enhancement_agent,
803
+ subject_expert_model,
804
+ generation_coordinator_model,
805
+ content_judge_model,
806
+ clarity_judge_model,
807
+ pedagogical_agent_model,
808
+ enhancement_agent_model,
809
  ],
810
+ outputs=[output, total_cards_html, token_usage_html],
811
  show_progress="full",
812
  )
813
 
 
830
  if exported_path_relative:
831
  exported_path_absolute = os.path.abspath(exported_path_relative)
832
  gr.Info(
833
+ f"CSV ready for download: {os.path.basename(exported_path_absolute)}",
834
  )
835
  return gr.update(value=exported_path_absolute, visible=True)
836
+ # This case might happen if export_dataframe_to_csv itself had an internal issue
837
+ # and returned None, though it typically raises an error or returns path.
838
+ gr.Warning("CSV export failed or returned no path.")
839
+ return gr.update(value=None, visible=False)
 
840
  except Exception as e:
841
  logger.error(
842
+ f"Error exporting DataFrame to CSV: {e}",
843
+ exc_info=True,
844
  )
845
+ gr.Error(f"Failed to export to CSV: {e!s}")
846
  return gr.update(value=None, visible=False)
847
 
848
  export_csv_button.click(
 
854
 
855
  # Define handler for APKG export from DataFrame (Item 5)
856
  async def handle_export_dataframe_to_apkg_click(
857
+ df: pd.DataFrame,
858
+ subject_for_deck_name: str,
859
  ):
860
  if df is None or df.empty:
861
  gr.Warning("No cards generated to export.")
 
868
  )
869
  if subject_for_deck_name and subject_for_deck_name.strip():
870
  clean_subject = re.sub(
871
+ r"[^a-zA-Z0-9\s_.-]",
872
+ "",
873
+ subject_for_deck_name.strip(),
874
  )
875
  deck_name_inside_anki = f"AnkiGen - {clean_subject}"
876
  elif not df.empty and "Topic" in df.columns and df["Topic"].iloc[0]:
877
  first_topic = df["Topic"].iloc[0]
878
  clean_first_topic = re.sub(
879
+ r"[^a-zA-Z0-9\s_.-]",
880
+ "",
881
+ str(first_topic).strip(),
882
  )
883
  deck_name_inside_anki = f"AnkiGen - {clean_first_topic}"
884
  else:
 
909
  exported_path_absolute = os.path.abspath(exported_path_relative)
910
 
911
  gr.Info(
912
+ f"Successfully exported deck '{deck_name_inside_anki}' to {exported_path_absolute}",
913
  )
914
  return gr.update(value=exported_path_absolute, visible=True)
915
  except Exception as e:
916
  logger.error(
917
+ f"Error exporting DataFrame to APKG: {e}",
918
+ exc_info=True,
919
  )
920
+ gr.Error(f"Failed to export to APKG: {e!s}")
921
  return gr.update(value=None, visible=False)
922
 
923
  # Wire button to handler (Item 6)
 
928
  api_name="export_main_to_apkg",
929
  )
930
 
 
 
 
931
  async def handle_web_crawl_click(
932
  api_key_val: str,
933
  url: str,
 
945
  progress(0, desc="Initializing web crawl...")
946
  yield {
947
  web_crawl_status_textbox: gr.update(
948
+ value="Initializing web crawl...",
949
  ),
950
  output: gr.update(value=None), # Clear main output table
951
  total_cards_html: gr.update(
 
958
  logger.error("API Key is missing for web crawler operation.")
959
  yield {
960
  web_crawl_status_textbox: gr.update(
961
+ value="Error: OpenAI API Key is required.",
962
  ),
963
  }
964
  return
 
971
  )
972
  yield {
973
  web_crawl_status_textbox: gr.update(
974
+ value=f"Error: Client init failed: {e!s}",
975
  ),
976
  }
977
  return
 
1010
  col in preview_df_value.columns for col in expected_cols
1011
  ):
1012
  logger.warning(
1013
+ "Crawled card data columns mismatch main output, attempting to use available data.",
1014
  )
1015
  # Potentially select only common columns or reindex if necessary
1016
  # For now, we'll pass it as is, Gradio might handle extra/missing cols gracefully or error.
 
1022
  web_crawl_status_textbox: gr.update(value=message),
1023
  output: gr.update(value=preview_df_value),
1024
  total_cards_html: gr.update(
1025
+ visible=True,
1026
+ value=total_cards_update,
1027
  ),
1028
  }
1029
  except Exception as e:
 
1033
  )
1034
  yield {
1035
  web_crawl_status_textbox: gr.update(
1036
+ value=f"{message} (Error displaying cards: {e!s})",
1037
  ),
1038
  output: gr.update(value=None),
1039
  total_cards_html: gr.update(visible=False),
 
1041
  else:
1042
  yield {
1043
  web_crawl_status_textbox: gr.update(
1044
+ value=message,
1045
  ), # Message from crawl_and_generate (e.g. no cards)
1046
  output: gr.update(value=None),
1047
  total_cards_html: gr.update(visible=False),
1048
  }
1049
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1050
  web_crawl_button.click(
1051
  fn=handle_web_crawl_click,
1052
  inputs=[
 
1085
  ankigen_interface.launch(share=False, favicon_path=str(favicon_path))
1086
  else:
1087
  logger.warning(
1088
+ f"Favicon not found at {favicon_path}, launching without it.",
1089
  )
1090
  ankigen_interface.launch(share=False)
1091
  except Exception as e:
uv.lock CHANGED
@@ -25,6 +25,7 @@ dependencies = [
25
  { name = "gradio" },
26
  { name = "lxml" },
27
  { name = "openai" },
 
28
  { name = "pandas" },
29
  { name = "pydantic" },
30
  { name = "tenacity" },
@@ -50,6 +51,7 @@ requires-dist = [
50
  { name = "gradio", specifier = ">=5.34.2" },
51
  { name = "lxml", specifier = "==5.2.2" },
52
  { name = "openai", specifier = ">=1.91.0" },
 
53
  { name = "pandas", specifier = "==2.2.3" },
54
  { name = "pre-commit", marker = "extra == 'dev'", specifier = ">=4.2.0" },
55
  { name = "pydantic", specifier = "==2.10.6" },
@@ -86,6 +88,15 @@ wheels = [
86
  { url = "https://files.pythonhosted.org/packages/a1/ee/48ca1a7c89ffec8b6a0c5d02b89c305671d5ffd8d3c94acf8b8c408575bb/anyio-4.9.0-py3-none-any.whl", hash = "sha256:9f76d541cad6e36af7beb62e978876f3b41e3e04f2c1fbf0884604c0a9c4d93c", size = 100916, upload-time = "2025-03-17T00:02:52.713Z" },
87
  ]
88
 
 
 
 
 
 
 
 
 
 
89
  [[package]]
90
  name = "audioop-lts"
91
  version = "0.2.1"
@@ -438,6 +449,18 @@ wheels = [
438
  { url = "https://files.pythonhosted.org/packages/ea/72/1e76abc821f8efaaeb2e3bd727a6c97bf87c6a9a0ffacfed0647e587824a/gradio_client-1.10.3-py3-none-any.whl", hash = "sha256:941e7f8d9a160f88487e9780a3db2736a40ea2b8b69d53ffdb306e47ef658b76", size = 323599, upload-time = "2025-06-10T00:51:45.204Z" },
439
  ]
440
 
 
 
 
 
 
 
 
 
 
 
 
 
441
  [[package]]
442
  name = "groovy"
443
  version = "0.1.2"
@@ -499,6 +522,15 @@ wheels = [
499
  { url = "https://files.pythonhosted.org/packages/2a/39/e50c7c3a983047577ee07d2a9e53faf5a69493943ec3f6a384bdc792deb2/httpx-0.28.1-py3-none-any.whl", hash = "sha256:d909fcccc110f8c7faf814ca82a9a4d816bc5a6dbfea25d6591d6985b8ba59ad", size = 73517, upload-time = "2024-12-06T15:37:21.509Z" },
500
  ]
501
 
 
 
 
 
 
 
 
 
 
502
  [[package]]
503
  name = "huggingface-hub"
504
  version = "0.33.1"
@@ -605,6 +637,33 @@ wheels = [
605
  { url = "https://files.pythonhosted.org/packages/b3/4a/4175a563579e884192ba6e81725fc0448b042024419be8d83aa8a80a3f44/jiter-0.10.0-cp314-cp314t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3aa96f2abba33dc77f79b4cf791840230375f9534e5fac927ccceb58c5e604a5", size = 354213, upload-time = "2025-05-18T19:04:41.894Z" },
606
  ]
607
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
608
  [[package]]
609
  name = "lxml"
610
  version = "5.2.2"
@@ -684,6 +743,27 @@ wheels = [
684
  { url = "https://files.pythonhosted.org/packages/4f/65/6079a46068dfceaeabb5dcad6d674f5f5c61a6fa5673746f42a9f4c233b3/MarkupSafe-3.0.2-cp313-cp313t-win_amd64.whl", hash = "sha256:e444a31f8db13eb18ada366ab3cf45fd4b31e4db1236a4448f68778c1d1a5a2f", size = 15739, upload-time = "2024-10-18T15:21:42.784Z" },
685
  ]
686
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
687
  [[package]]
688
  name = "mdurl"
689
  version = "0.1.2"
@@ -771,6 +851,24 @@ wheels = [
771
  { url = "https://files.pythonhosted.org/packages/7a/d2/f99bdd6fc737d6b3cf0df895508d621fc9a386b375a1230ee81d46c5436e/openai-1.91.0-py3-none-any.whl", hash = "sha256:207f87aa3bc49365e014fac2f7e291b99929f4fe126c4654143440e0ad446a5f", size = 735837, upload-time = "2025-06-23T18:27:08.913Z" },
772
  ]
773
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
774
  [[package]]
775
  name = "orjson"
776
  version = "3.10.18"
@@ -989,6 +1087,20 @@ wheels = [
989
  { url = "https://files.pythonhosted.org/packages/51/b2/b2b50d5ecf21acf870190ae5d093602d95f66c9c31f9d5de6062eb329ad1/pydantic_core-2.27.2-cp313-cp313-win_arm64.whl", hash = "sha256:ac4dbfd1691affb8f48c2c13241a2e3b60ff23247cbcf981759c768b6633cf8b", size = 1885186, upload-time = "2024-12-18T11:29:37.649Z" },
990
  ]
991
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
992
  [[package]]
993
  name = "pydub"
994
  version = "0.25.1"
@@ -1074,6 +1186,15 @@ wheels = [
1074
  { url = "https://files.pythonhosted.org/packages/ec/57/56b9bcc3c9c6a792fcbaf139543cee77261f3651ca9da0c93f5c1221264b/python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427", size = 229892, upload-time = "2024-03-01T18:36:18.57Z" },
1075
  ]
1076
 
 
 
 
 
 
 
 
 
 
1077
  [[package]]
1078
  name = "python-multipart"
1079
  version = "0.0.20"
@@ -1118,6 +1239,20 @@ wheels = [
1118
  { url = "https://files.pythonhosted.org/packages/fa/de/02b54f42487e3d3c6efb3f89428677074ca7bf43aae402517bc7cca949f3/PyYAML-6.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:8388ee1976c416731879ac16da0aff3f63b286ffdd57cdeb95f3f2e085687563", size = 156446, upload-time = "2024-08-06T20:33:04.33Z" },
1119
  ]
1120
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1121
  [[package]]
1122
  name = "regex"
1123
  version = "2024.11.6"
@@ -1184,6 +1319,82 @@ wheels = [
1184
  { url = "https://files.pythonhosted.org/packages/0d/9b/63f4c7ebc259242c89b3acafdb37b41d1185c07ff0011164674e9076b491/rich-14.0.0-py3-none-any.whl", hash = "sha256:1c9491e1951aac09caffd42f448ee3d04e58923ffe14993f6e83068dc395d7e0", size = 243229, upload-time = "2025-03-30T14:15:12.283Z" },
1185
  ]
1186
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1187
  [[package]]
1188
  name = "ruff"
1189
  version = "0.12.0"
@@ -1266,6 +1477,18 @@ wheels = [
1266
  { url = "https://files.pythonhosted.org/packages/e7/9c/0e6afc12c269578be5c0c1c9f4b49a8d32770a080260c333ac04cc1c832d/soupsieve-2.7-py3-none-any.whl", hash = "sha256:6e60cc5c1ffaf1cebcc12e8188320b72071e922c2e897f737cadce79ad5d30c4", size = 36677, upload-time = "2025-04-20T18:50:07.196Z" },
1267
  ]
1268
 
 
 
 
 
 
 
 
 
 
 
 
 
1269
  [[package]]
1270
  name = "starlette"
1271
  version = "0.46.2"
@@ -1347,6 +1570,18 @@ wheels = [
1347
  { url = "https://files.pythonhosted.org/packages/76/42/3efaf858001d2c2913de7f354563e3a3a2f0decae3efe98427125a8f441e/typer-0.16.0-py3-none-any.whl", hash = "sha256:1f79bed11d4d02d4310e3c1b7ba594183bcedb0ac73b27a9e5f28f6fb5b98855", size = 46317, upload-time = "2025-05-26T14:30:30.523Z" },
1348
  ]
1349
 
 
 
 
 
 
 
 
 
 
 
 
 
1350
  [[package]]
1351
  name = "typing-extensions"
1352
  version = "4.14.0"
@@ -1356,6 +1591,18 @@ wheels = [
1356
  { url = "https://files.pythonhosted.org/packages/69/e0/552843e0d356fbb5256d21449fa957fa4eff3bbc135a74a691ee70c7c5da/typing_extensions-4.14.0-py3-none-any.whl", hash = "sha256:a1514509136dd0b477638fc68d6a91497af5076466ad0fa6c338e44e359944af", size = 43839, upload-time = "2025-06-02T14:52:10.026Z" },
1357
  ]
1358
 
 
 
 
 
 
 
 
 
 
 
 
 
1359
  [[package]]
1360
  name = "tzdata"
1361
  version = "2025.2"
 
25
  { name = "gradio" },
26
  { name = "lxml" },
27
  { name = "openai" },
28
+ { name = "openai-agents" },
29
  { name = "pandas" },
30
  { name = "pydantic" },
31
  { name = "tenacity" },
 
51
  { name = "gradio", specifier = ">=5.34.2" },
52
  { name = "lxml", specifier = "==5.2.2" },
53
  { name = "openai", specifier = ">=1.91.0" },
54
+ { name = "openai-agents", specifier = ">=0.1.0" },
55
  { name = "pandas", specifier = "==2.2.3" },
56
  { name = "pre-commit", marker = "extra == 'dev'", specifier = ">=4.2.0" },
57
  { name = "pydantic", specifier = "==2.10.6" },
 
88
  { url = "https://files.pythonhosted.org/packages/a1/ee/48ca1a7c89ffec8b6a0c5d02b89c305671d5ffd8d3c94acf8b8c408575bb/anyio-4.9.0-py3-none-any.whl", hash = "sha256:9f76d541cad6e36af7beb62e978876f3b41e3e04f2c1fbf0884604c0a9c4d93c", size = 100916, upload-time = "2025-03-17T00:02:52.713Z" },
89
  ]
90
 
91
+ [[package]]
92
+ name = "attrs"
93
+ version = "25.3.0"
94
+ source = { registry = "https://pypi.org/simple" }
95
+ sdist = { url = "https://files.pythonhosted.org/packages/5a/b0/1367933a8532ee6ff8d63537de4f1177af4bff9f3e829baf7331f595bb24/attrs-25.3.0.tar.gz", hash = "sha256:75d7cefc7fb576747b2c81b4442d4d4a1ce0900973527c011d1030fd3bf4af1b", size = 812032, upload-time = "2025-03-13T11:10:22.779Z" }
96
+ wheels = [
97
+ { url = "https://files.pythonhosted.org/packages/77/06/bb80f5f86020c4551da315d78b3ab75e8228f89f0162f2c3a819e407941a/attrs-25.3.0-py3-none-any.whl", hash = "sha256:427318ce031701fea540783410126f03899a97ffc6f61596ad581ac2e40e3bc3", size = 63815, upload-time = "2025-03-13T11:10:21.14Z" },
98
+ ]
99
+
100
  [[package]]
101
  name = "audioop-lts"
102
  version = "0.2.1"
 
449
  { url = "https://files.pythonhosted.org/packages/ea/72/1e76abc821f8efaaeb2e3bd727a6c97bf87c6a9a0ffacfed0647e587824a/gradio_client-1.10.3-py3-none-any.whl", hash = "sha256:941e7f8d9a160f88487e9780a3db2736a40ea2b8b69d53ffdb306e47ef658b76", size = 323599, upload-time = "2025-06-10T00:51:45.204Z" },
450
  ]
451
 
452
+ [[package]]
453
+ name = "griffe"
454
+ version = "1.7.3"
455
+ source = { registry = "https://pypi.org/simple" }
456
+ dependencies = [
457
+ { name = "colorama" },
458
+ ]
459
+ sdist = { url = "https://files.pythonhosted.org/packages/a9/3e/5aa9a61f7c3c47b0b52a1d930302992229d191bf4bc76447b324b731510a/griffe-1.7.3.tar.gz", hash = "sha256:52ee893c6a3a968b639ace8015bec9d36594961e156e23315c8e8e51401fa50b", size = 395137, upload-time = "2025-04-23T11:29:09.147Z" }
460
+ wheels = [
461
+ { url = "https://files.pythonhosted.org/packages/58/c6/5c20af38c2a57c15d87f7f38bee77d63c1d2a3689f74fefaf35915dd12b2/griffe-1.7.3-py3-none-any.whl", hash = "sha256:c6b3ee30c2f0f17f30bcdef5068d6ab7a2a4f1b8bf1a3e74b56fffd21e1c5f75", size = 129303, upload-time = "2025-04-23T11:29:07.145Z" },
462
+ ]
463
+
464
  [[package]]
465
  name = "groovy"
466
  version = "0.1.2"
 
522
  { url = "https://files.pythonhosted.org/packages/2a/39/e50c7c3a983047577ee07d2a9e53faf5a69493943ec3f6a384bdc792deb2/httpx-0.28.1-py3-none-any.whl", hash = "sha256:d909fcccc110f8c7faf814ca82a9a4d816bc5a6dbfea25d6591d6985b8ba59ad", size = 73517, upload-time = "2024-12-06T15:37:21.509Z" },
523
  ]
524
 
525
+ [[package]]
526
+ name = "httpx-sse"
527
+ version = "0.4.1"
528
+ source = { registry = "https://pypi.org/simple" }
529
+ sdist = { url = "https://files.pythonhosted.org/packages/6e/fa/66bd985dd0b7c109a3bcb89272ee0bfb7e2b4d06309ad7b38ff866734b2a/httpx_sse-0.4.1.tar.gz", hash = "sha256:8f44d34414bc7b21bf3602713005c5df4917884f76072479b21f68befa4ea26e", size = 12998, upload-time = "2025-06-24T13:21:05.71Z" }
530
+ wheels = [
531
+ { url = "https://files.pythonhosted.org/packages/25/0a/6269e3473b09aed2dab8aa1a600c70f31f00ae1349bee30658f7e358a159/httpx_sse-0.4.1-py3-none-any.whl", hash = "sha256:cba42174344c3a5b06f255ce65b350880f962d99ead85e776f23c6618a377a37", size = 8054, upload-time = "2025-06-24T13:21:04.772Z" },
532
+ ]
533
+
534
  [[package]]
535
  name = "huggingface-hub"
536
  version = "0.33.1"
 
637
  { url = "https://files.pythonhosted.org/packages/b3/4a/4175a563579e884192ba6e81725fc0448b042024419be8d83aa8a80a3f44/jiter-0.10.0-cp314-cp314t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3aa96f2abba33dc77f79b4cf791840230375f9534e5fac927ccceb58c5e604a5", size = 354213, upload-time = "2025-05-18T19:04:41.894Z" },
638
  ]
639
 
640
+ [[package]]
641
+ name = "jsonschema"
642
+ version = "4.24.0"
643
+ source = { registry = "https://pypi.org/simple" }
644
+ dependencies = [
645
+ { name = "attrs" },
646
+ { name = "jsonschema-specifications" },
647
+ { name = "referencing" },
648
+ { name = "rpds-py" },
649
+ ]
650
+ sdist = { url = "https://files.pythonhosted.org/packages/bf/d3/1cf5326b923a53515d8f3a2cd442e6d7e94fcc444716e879ea70a0ce3177/jsonschema-4.24.0.tar.gz", hash = "sha256:0b4e8069eb12aedfa881333004bccaec24ecef5a8a6a4b6df142b2cc9599d196", size = 353480, upload-time = "2025-05-26T18:48:10.459Z" }
651
+ wheels = [
652
+ { url = "https://files.pythonhosted.org/packages/a2/3d/023389198f69c722d039351050738d6755376c8fd343e91dc493ea485905/jsonschema-4.24.0-py3-none-any.whl", hash = "sha256:a462455f19f5faf404a7902952b6f0e3ce868f3ee09a359b05eca6673bd8412d", size = 88709, upload-time = "2025-05-26T18:48:08.417Z" },
653
+ ]
654
+
655
+ [[package]]
656
+ name = "jsonschema-specifications"
657
+ version = "2025.4.1"
658
+ source = { registry = "https://pypi.org/simple" }
659
+ dependencies = [
660
+ { name = "referencing" },
661
+ ]
662
+ sdist = { url = "https://files.pythonhosted.org/packages/bf/ce/46fbd9c8119cfc3581ee5643ea49464d168028cfb5caff5fc0596d0cf914/jsonschema_specifications-2025.4.1.tar.gz", hash = "sha256:630159c9f4dbea161a6a2205c3011cc4f18ff381b189fff48bb39b9bf26ae608", size = 15513, upload-time = "2025-04-23T12:34:07.418Z" }
663
+ wheels = [
664
+ { url = "https://files.pythonhosted.org/packages/01/0e/b27cdbaccf30b890c40ed1da9fd4a3593a5cf94dae54fb34f8a4b74fcd3f/jsonschema_specifications-2025.4.1-py3-none-any.whl", hash = "sha256:4653bffbd6584f7de83a67e0d620ef16900b390ddc7939d56684d6c81e33f1af", size = 18437, upload-time = "2025-04-23T12:34:05.422Z" },
665
+ ]
666
+
667
  [[package]]
668
  name = "lxml"
669
  version = "5.2.2"
 
743
  { url = "https://files.pythonhosted.org/packages/4f/65/6079a46068dfceaeabb5dcad6d674f5f5c61a6fa5673746f42a9f4c233b3/MarkupSafe-3.0.2-cp313-cp313t-win_amd64.whl", hash = "sha256:e444a31f8db13eb18ada366ab3cf45fd4b31e4db1236a4448f68778c1d1a5a2f", size = 15739, upload-time = "2024-10-18T15:21:42.784Z" },
744
  ]
745
 
746
+ [[package]]
747
+ name = "mcp"
748
+ version = "1.10.1"
749
+ source = { registry = "https://pypi.org/simple" }
750
+ dependencies = [
751
+ { name = "anyio" },
752
+ { name = "httpx" },
753
+ { name = "httpx-sse" },
754
+ { name = "jsonschema" },
755
+ { name = "pydantic" },
756
+ { name = "pydantic-settings" },
757
+ { name = "python-multipart" },
758
+ { name = "sse-starlette" },
759
+ { name = "starlette" },
760
+ { name = "uvicorn", marker = "sys_platform != 'emscripten'" },
761
+ ]
762
+ sdist = { url = "https://files.pythonhosted.org/packages/7c/68/63045305f29ff680a9cd5be360c755270109e6b76f696ea6824547ddbc30/mcp-1.10.1.tar.gz", hash = "sha256:aaa0957d8307feeff180da2d9d359f2b801f35c0c67f1882136239055ef034c2", size = 392969, upload-time = "2025-06-27T12:03:08.982Z" }
763
+ wheels = [
764
+ { url = "https://files.pythonhosted.org/packages/d7/3f/435a5b3d10ae242a9d6c2b33175551173c3c61fe637dc893be05c4ed0aaf/mcp-1.10.1-py3-none-any.whl", hash = "sha256:4d08301aefe906dce0fa482289db55ce1db831e3e67212e65b5e23ad8454b3c5", size = 150878, upload-time = "2025-06-27T12:03:07.328Z" },
765
+ ]
766
+
767
  [[package]]
768
  name = "mdurl"
769
  version = "0.1.2"
 
851
  { url = "https://files.pythonhosted.org/packages/7a/d2/f99bdd6fc737d6b3cf0df895508d621fc9a386b375a1230ee81d46c5436e/openai-1.91.0-py3-none-any.whl", hash = "sha256:207f87aa3bc49365e014fac2f7e291b99929f4fe126c4654143440e0ad446a5f", size = 735837, upload-time = "2025-06-23T18:27:08.913Z" },
852
  ]
853
 
854
+ [[package]]
855
+ name = "openai-agents"
856
+ version = "0.1.0"
857
+ source = { registry = "https://pypi.org/simple" }
858
+ dependencies = [
859
+ { name = "griffe" },
860
+ { name = "mcp" },
861
+ { name = "openai" },
862
+ { name = "pydantic" },
863
+ { name = "requests" },
864
+ { name = "types-requests" },
865
+ { name = "typing-extensions" },
866
+ ]
867
+ sdist = { url = "https://files.pythonhosted.org/packages/99/f8/a292d8f506997355755d88db619966539ec838ce18f070c5a101e5a430ec/openai_agents-0.1.0.tar.gz", hash = "sha256:a697a4fdd881a7a16db8c0dcafba0f17d9e90b6236a4b79923bd043b6ae86d80", size = 1379588, upload-time = "2025-06-27T20:58:03.186Z" }
868
+ wheels = [
869
+ { url = "https://files.pythonhosted.org/packages/31/5b/326e6b1b661dbef718977a8379f9702a4eec1df772450517870beeb3af35/openai_agents-0.1.0-py3-none-any.whl", hash = "sha256:6a8ef71d3f20aecba0f01bca2e059590d1c23f5adc02d780cb5921ea8a7ca774", size = 130620, upload-time = "2025-06-27T20:58:01.461Z" },
870
+ ]
871
+
872
  [[package]]
873
  name = "orjson"
874
  version = "3.10.18"
 
1087
  { url = "https://files.pythonhosted.org/packages/51/b2/b2b50d5ecf21acf870190ae5d093602d95f66c9c31f9d5de6062eb329ad1/pydantic_core-2.27.2-cp313-cp313-win_arm64.whl", hash = "sha256:ac4dbfd1691affb8f48c2c13241a2e3b60ff23247cbcf981759c768b6633cf8b", size = 1885186, upload-time = "2024-12-18T11:29:37.649Z" },
1088
  ]
1089
 
1090
+ [[package]]
1091
+ name = "pydantic-settings"
1092
+ version = "2.10.1"
1093
+ source = { registry = "https://pypi.org/simple" }
1094
+ dependencies = [
1095
+ { name = "pydantic" },
1096
+ { name = "python-dotenv" },
1097
+ { name = "typing-inspection" },
1098
+ ]
1099
+ sdist = { url = "https://files.pythonhosted.org/packages/68/85/1ea668bbab3c50071ca613c6ab30047fb36ab0da1b92fa8f17bbc38fd36c/pydantic_settings-2.10.1.tar.gz", hash = "sha256:06f0062169818d0f5524420a360d632d5857b83cffd4d42fe29597807a1614ee", size = 172583, upload-time = "2025-06-24T13:26:46.841Z" }
1100
+ wheels = [
1101
+ { url = "https://files.pythonhosted.org/packages/58/f0/427018098906416f580e3cf1366d3b1abfb408a0652e9f31600c24a1903c/pydantic_settings-2.10.1-py3-none-any.whl", hash = "sha256:a60952460b99cf661dc25c29c0ef171721f98bfcb52ef8d9ea4c943d7c8cc796", size = 45235, upload-time = "2025-06-24T13:26:45.485Z" },
1102
+ ]
1103
+
1104
  [[package]]
1105
  name = "pydub"
1106
  version = "0.25.1"
 
1186
  { url = "https://files.pythonhosted.org/packages/ec/57/56b9bcc3c9c6a792fcbaf139543cee77261f3651ca9da0c93f5c1221264b/python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427", size = 229892, upload-time = "2024-03-01T18:36:18.57Z" },
1187
  ]
1188
 
1189
+ [[package]]
1190
+ name = "python-dotenv"
1191
+ version = "1.1.1"
1192
+ source = { registry = "https://pypi.org/simple" }
1193
+ sdist = { url = "https://files.pythonhosted.org/packages/f6/b0/4bc07ccd3572a2f9df7e6782f52b0c6c90dcbb803ac4a167702d7d0dfe1e/python_dotenv-1.1.1.tar.gz", hash = "sha256:a8a6399716257f45be6a007360200409fce5cda2661e3dec71d23dc15f6189ab", size = 41978, upload-time = "2025-06-24T04:21:07.341Z" }
1194
+ wheels = [
1195
+ { url = "https://files.pythonhosted.org/packages/5f/ed/539768cf28c661b5b068d66d96a2f155c4971a5d55684a514c1a0e0dec2f/python_dotenv-1.1.1-py3-none-any.whl", hash = "sha256:31f23644fe2602f88ff55e1f5c79ba497e01224ee7737937930c448e4d0e24dc", size = 20556, upload-time = "2025-06-24T04:21:06.073Z" },
1196
+ ]
1197
+
1198
  [[package]]
1199
  name = "python-multipart"
1200
  version = "0.0.20"
 
1239
  { url = "https://files.pythonhosted.org/packages/fa/de/02b54f42487e3d3c6efb3f89428677074ca7bf43aae402517bc7cca949f3/PyYAML-6.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:8388ee1976c416731879ac16da0aff3f63b286ffdd57cdeb95f3f2e085687563", size = 156446, upload-time = "2024-08-06T20:33:04.33Z" },
1240
  ]
1241
 
1242
+ [[package]]
1243
+ name = "referencing"
1244
+ version = "0.36.2"
1245
+ source = { registry = "https://pypi.org/simple" }
1246
+ dependencies = [
1247
+ { name = "attrs" },
1248
+ { name = "rpds-py" },
1249
+ { name = "typing-extensions", marker = "python_full_version < '3.13'" },
1250
+ ]
1251
+ sdist = { url = "https://files.pythonhosted.org/packages/2f/db/98b5c277be99dd18bfd91dd04e1b759cad18d1a338188c936e92f921c7e2/referencing-0.36.2.tar.gz", hash = "sha256:df2e89862cd09deabbdba16944cc3f10feb6b3e6f18e902f7cc25609a34775aa", size = 74744, upload-time = "2025-01-25T08:48:16.138Z" }
1252
+ wheels = [
1253
+ { url = "https://files.pythonhosted.org/packages/c1/b1/3baf80dc6d2b7bc27a95a67752d0208e410351e3feb4eb78de5f77454d8d/referencing-0.36.2-py3-none-any.whl", hash = "sha256:e8699adbbf8b5c7de96d8ffa0eb5c158b3beafce084968e2ea8bb08c6794dcd0", size = 26775, upload-time = "2025-01-25T08:48:14.241Z" },
1254
+ ]
1255
+
1256
  [[package]]
1257
  name = "regex"
1258
  version = "2024.11.6"
 
1319
  { url = "https://files.pythonhosted.org/packages/0d/9b/63f4c7ebc259242c89b3acafdb37b41d1185c07ff0011164674e9076b491/rich-14.0.0-py3-none-any.whl", hash = "sha256:1c9491e1951aac09caffd42f448ee3d04e58923ffe14993f6e83068dc395d7e0", size = 243229, upload-time = "2025-03-30T14:15:12.283Z" },
1320
  ]
1321
 
1322
+ [[package]]
1323
+ name = "rpds-py"
1324
+ version = "0.26.0"
1325
+ source = { registry = "https://pypi.org/simple" }
1326
+ sdist = { url = "https://files.pythonhosted.org/packages/a5/aa/4456d84bbb54adc6a916fb10c9b374f78ac840337644e4a5eda229c81275/rpds_py-0.26.0.tar.gz", hash = "sha256:20dae58a859b0906f0685642e591056f1e787f3a8b39c8e8749a45dc7d26bdb0", size = 27385, upload-time = "2025-07-01T15:57:13.958Z" }
1327
+ wheels = [
1328
+ { url = "https://files.pythonhosted.org/packages/ea/86/90eb87c6f87085868bd077c7a9938006eb1ce19ed4d06944a90d3560fce2/rpds_py-0.26.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:894514d47e012e794f1350f076c427d2347ebf82f9b958d554d12819849a369d", size = 363933, upload-time = "2025-07-01T15:54:15.734Z" },
1329
+ { url = "https://files.pythonhosted.org/packages/63/78/4469f24d34636242c924626082b9586f064ada0b5dbb1e9d096ee7a8e0c6/rpds_py-0.26.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:fc921b96fa95a097add244da36a1d9e4f3039160d1d30f1b35837bf108c21136", size = 350447, upload-time = "2025-07-01T15:54:16.922Z" },
1330
+ { url = "https://files.pythonhosted.org/packages/ad/91/c448ed45efdfdade82348d5e7995e15612754826ea640afc20915119734f/rpds_py-0.26.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3e1157659470aa42a75448b6e943c895be8c70531c43cb78b9ba990778955582", size = 384711, upload-time = "2025-07-01T15:54:18.101Z" },
1331
+ { url = "https://files.pythonhosted.org/packages/ec/43/e5c86fef4be7f49828bdd4ecc8931f0287b1152c0bb0163049b3218740e7/rpds_py-0.26.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:521ccf56f45bb3a791182dc6b88ae5f8fa079dd705ee42138c76deb1238e554e", size = 400865, upload-time = "2025-07-01T15:54:19.295Z" },
1332
+ { url = "https://files.pythonhosted.org/packages/55/34/e00f726a4d44f22d5c5fe2e5ddd3ac3d7fd3f74a175607781fbdd06fe375/rpds_py-0.26.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9def736773fd56b305c0eef698be5192c77bfa30d55a0e5885f80126c4831a15", size = 517763, upload-time = "2025-07-01T15:54:20.858Z" },
1333
+ { url = "https://files.pythonhosted.org/packages/52/1c/52dc20c31b147af724b16104500fba13e60123ea0334beba7b40e33354b4/rpds_py-0.26.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cdad4ea3b4513b475e027be79e5a0ceac8ee1c113a1a11e5edc3c30c29f964d8", size = 406651, upload-time = "2025-07-01T15:54:22.508Z" },
1334
+ { url = "https://files.pythonhosted.org/packages/2e/77/87d7bfabfc4e821caa35481a2ff6ae0b73e6a391bb6b343db2c91c2b9844/rpds_py-0.26.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:82b165b07f416bdccf5c84546a484cc8f15137ca38325403864bfdf2b5b72f6a", size = 386079, upload-time = "2025-07-01T15:54:23.987Z" },
1335
+ { url = "https://files.pythonhosted.org/packages/e3/d4/7f2200c2d3ee145b65b3cddc4310d51f7da6a26634f3ac87125fd789152a/rpds_py-0.26.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d04cab0a54b9dba4d278fe955a1390da3cf71f57feb78ddc7cb67cbe0bd30323", size = 421379, upload-time = "2025-07-01T15:54:25.073Z" },
1336
+ { url = "https://files.pythonhosted.org/packages/ae/13/9fdd428b9c820869924ab62236b8688b122baa22d23efdd1c566938a39ba/rpds_py-0.26.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:79061ba1a11b6a12743a2b0f72a46aa2758613d454aa6ba4f5a265cc48850158", size = 562033, upload-time = "2025-07-01T15:54:26.225Z" },
1337
+ { url = "https://files.pythonhosted.org/packages/f3/e1/b69686c3bcbe775abac3a4c1c30a164a2076d28df7926041f6c0eb5e8d28/rpds_py-0.26.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:f405c93675d8d4c5ac87364bb38d06c988e11028a64b52a47158a355079661f3", size = 591639, upload-time = "2025-07-01T15:54:27.424Z" },
1338
+ { url = "https://files.pythonhosted.org/packages/5c/c9/1e3d8c8863c84a90197ac577bbc3d796a92502124c27092413426f670990/rpds_py-0.26.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:dafd4c44b74aa4bed4b250f1aed165b8ef5de743bcca3b88fc9619b6087093d2", size = 557105, upload-time = "2025-07-01T15:54:29.93Z" },
1339
+ { url = "https://files.pythonhosted.org/packages/9f/c5/90c569649057622959f6dcc40f7b516539608a414dfd54b8d77e3b201ac0/rpds_py-0.26.0-cp312-cp312-win32.whl", hash = "sha256:3da5852aad63fa0c6f836f3359647870e21ea96cf433eb393ffa45263a170d44", size = 223272, upload-time = "2025-07-01T15:54:31.128Z" },
1340
+ { url = "https://files.pythonhosted.org/packages/7d/16/19f5d9f2a556cfed454eebe4d354c38d51c20f3db69e7b4ce6cff904905d/rpds_py-0.26.0-cp312-cp312-win_amd64.whl", hash = "sha256:cf47cfdabc2194a669dcf7a8dbba62e37a04c5041d2125fae0233b720da6f05c", size = 234995, upload-time = "2025-07-01T15:54:32.195Z" },
1341
+ { url = "https://files.pythonhosted.org/packages/83/f0/7935e40b529c0e752dfaa7880224771b51175fce08b41ab4a92eb2fbdc7f/rpds_py-0.26.0-cp312-cp312-win_arm64.whl", hash = "sha256:20ab1ae4fa534f73647aad289003f1104092890849e0266271351922ed5574f8", size = 223198, upload-time = "2025-07-01T15:54:33.271Z" },
1342
+ { url = "https://files.pythonhosted.org/packages/6a/67/bb62d0109493b12b1c6ab00de7a5566aa84c0e44217c2d94bee1bd370da9/rpds_py-0.26.0-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:696764a5be111b036256c0b18cd29783fab22154690fc698062fc1b0084b511d", size = 363917, upload-time = "2025-07-01T15:54:34.755Z" },
1343
+ { url = "https://files.pythonhosted.org/packages/4b/f3/34e6ae1925a5706c0f002a8d2d7f172373b855768149796af87bd65dcdb9/rpds_py-0.26.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:1e6c15d2080a63aaed876e228efe4f814bc7889c63b1e112ad46fdc8b368b9e1", size = 350073, upload-time = "2025-07-01T15:54:36.292Z" },
1344
+ { url = "https://files.pythonhosted.org/packages/75/83/1953a9d4f4e4de7fd0533733e041c28135f3c21485faaef56a8aadbd96b5/rpds_py-0.26.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:390e3170babf42462739a93321e657444f0862c6d722a291accc46f9d21ed04e", size = 384214, upload-time = "2025-07-01T15:54:37.469Z" },
1345
+ { url = "https://files.pythonhosted.org/packages/48/0e/983ed1b792b3322ea1d065e67f4b230f3b96025f5ce3878cc40af09b7533/rpds_py-0.26.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:7da84c2c74c0f5bc97d853d9e17bb83e2dcafcff0dc48286916001cc114379a1", size = 400113, upload-time = "2025-07-01T15:54:38.954Z" },
1346
+ { url = "https://files.pythonhosted.org/packages/69/7f/36c0925fff6f660a80be259c5b4f5e53a16851f946eb080351d057698528/rpds_py-0.26.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4c5fe114a6dd480a510b6d3661d09d67d1622c4bf20660a474507aaee7eeeee9", size = 515189, upload-time = "2025-07-01T15:54:40.57Z" },
1347
+ { url = "https://files.pythonhosted.org/packages/13/45/cbf07fc03ba7a9b54662c9badb58294ecfb24f828b9732970bd1a431ed5c/rpds_py-0.26.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3100b3090269f3a7ea727b06a6080d4eb7439dca4c0e91a07c5d133bb1727ea7", size = 406998, upload-time = "2025-07-01T15:54:43.025Z" },
1348
+ { url = "https://files.pythonhosted.org/packages/6c/b0/8fa5e36e58657997873fd6a1cf621285ca822ca75b4b3434ead047daa307/rpds_py-0.26.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2c03c9b0c64afd0320ae57de4c982801271c0c211aa2d37f3003ff5feb75bb04", size = 385903, upload-time = "2025-07-01T15:54:44.752Z" },
1349
+ { url = "https://files.pythonhosted.org/packages/4b/f7/b25437772f9f57d7a9fbd73ed86d0dcd76b4c7c6998348c070d90f23e315/rpds_py-0.26.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:5963b72ccd199ade6ee493723d18a3f21ba7d5b957017607f815788cef50eaf1", size = 419785, upload-time = "2025-07-01T15:54:46.043Z" },
1350
+ { url = "https://files.pythonhosted.org/packages/a7/6b/63ffa55743dfcb4baf2e9e77a0b11f7f97ed96a54558fcb5717a4b2cd732/rpds_py-0.26.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:9da4e873860ad5bab3291438525cae80169daecbfafe5657f7f5fb4d6b3f96b9", size = 561329, upload-time = "2025-07-01T15:54:47.64Z" },
1351
+ { url = "https://files.pythonhosted.org/packages/2f/07/1f4f5e2886c480a2346b1e6759c00278b8a69e697ae952d82ae2e6ee5db0/rpds_py-0.26.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:5afaddaa8e8c7f1f7b4c5c725c0070b6eed0228f705b90a1732a48e84350f4e9", size = 590875, upload-time = "2025-07-01T15:54:48.9Z" },
1352
+ { url = "https://files.pythonhosted.org/packages/cc/bc/e6639f1b91c3a55f8c41b47d73e6307051b6e246254a827ede730624c0f8/rpds_py-0.26.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4916dc96489616a6f9667e7526af8fa693c0fdb4f3acb0e5d9f4400eb06a47ba", size = 556636, upload-time = "2025-07-01T15:54:50.619Z" },
1353
+ { url = "https://files.pythonhosted.org/packages/05/4c/b3917c45566f9f9a209d38d9b54a1833f2bb1032a3e04c66f75726f28876/rpds_py-0.26.0-cp313-cp313-win32.whl", hash = "sha256:2a343f91b17097c546b93f7999976fd6c9d5900617aa848c81d794e062ab302b", size = 222663, upload-time = "2025-07-01T15:54:52.023Z" },
1354
+ { url = "https://files.pythonhosted.org/packages/e0/0b/0851bdd6025775aaa2365bb8de0697ee2558184c800bfef8d7aef5ccde58/rpds_py-0.26.0-cp313-cp313-win_amd64.whl", hash = "sha256:0a0b60701f2300c81b2ac88a5fb893ccfa408e1c4a555a77f908a2596eb875a5", size = 234428, upload-time = "2025-07-01T15:54:53.692Z" },
1355
+ { url = "https://files.pythonhosted.org/packages/ed/e8/a47c64ed53149c75fb581e14a237b7b7cd18217e969c30d474d335105622/rpds_py-0.26.0-cp313-cp313-win_arm64.whl", hash = "sha256:257d011919f133a4746958257f2c75238e3ff54255acd5e3e11f3ff41fd14256", size = 222571, upload-time = "2025-07-01T15:54:54.822Z" },
1356
+ { url = "https://files.pythonhosted.org/packages/89/bf/3d970ba2e2bcd17d2912cb42874107390f72873e38e79267224110de5e61/rpds_py-0.26.0-cp313-cp313t-macosx_10_12_x86_64.whl", hash = "sha256:529c8156d7506fba5740e05da8795688f87119cce330c244519cf706a4a3d618", size = 360475, upload-time = "2025-07-01T15:54:56.228Z" },
1357
+ { url = "https://files.pythonhosted.org/packages/82/9f/283e7e2979fc4ec2d8ecee506d5a3675fce5ed9b4b7cb387ea5d37c2f18d/rpds_py-0.26.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:f53ec51f9d24e9638a40cabb95078ade8c99251945dad8d57bf4aabe86ecee35", size = 346692, upload-time = "2025-07-01T15:54:58.561Z" },
1358
+ { url = "https://files.pythonhosted.org/packages/e3/03/7e50423c04d78daf391da3cc4330bdb97042fc192a58b186f2d5deb7befd/rpds_py-0.26.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7ab504c4d654e4a29558eaa5bb8cea5fdc1703ea60a8099ffd9c758472cf913f", size = 379415, upload-time = "2025-07-01T15:54:59.751Z" },
1359
+ { url = "https://files.pythonhosted.org/packages/57/00/d11ee60d4d3b16808432417951c63df803afb0e0fc672b5e8d07e9edaaae/rpds_py-0.26.0-cp313-cp313t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:fd0641abca296bc1a00183fe44f7fced8807ed49d501f188faa642d0e4975b83", size = 391783, upload-time = "2025-07-01T15:55:00.898Z" },
1360
+ { url = "https://files.pythonhosted.org/packages/08/b3/1069c394d9c0d6d23c5b522e1f6546b65793a22950f6e0210adcc6f97c3e/rpds_py-0.26.0-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:69b312fecc1d017b5327afa81d4da1480f51c68810963a7336d92203dbb3d4f1", size = 512844, upload-time = "2025-07-01T15:55:02.201Z" },
1361
+ { url = "https://files.pythonhosted.org/packages/08/3b/c4fbf0926800ed70b2c245ceca99c49f066456755f5d6eb8863c2c51e6d0/rpds_py-0.26.0-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c741107203954f6fc34d3066d213d0a0c40f7bb5aafd698fb39888af277c70d8", size = 402105, upload-time = "2025-07-01T15:55:03.698Z" },
1362
+ { url = "https://files.pythonhosted.org/packages/1c/b0/db69b52ca07413e568dae9dc674627a22297abb144c4d6022c6d78f1e5cc/rpds_py-0.26.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fc3e55a7db08dc9a6ed5fb7103019d2c1a38a349ac41901f9f66d7f95750942f", size = 383440, upload-time = "2025-07-01T15:55:05.398Z" },
1363
+ { url = "https://files.pythonhosted.org/packages/4c/e1/c65255ad5b63903e56b3bb3ff9dcc3f4f5c3badde5d08c741ee03903e951/rpds_py-0.26.0-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:9e851920caab2dbcae311fd28f4313c6953993893eb5c1bb367ec69d9a39e7ed", size = 412759, upload-time = "2025-07-01T15:55:08.316Z" },
1364
+ { url = "https://files.pythonhosted.org/packages/e4/22/bb731077872377a93c6e93b8a9487d0406c70208985831034ccdeed39c8e/rpds_py-0.26.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:dfbf280da5f876d0b00c81f26bedce274e72a678c28845453885a9b3c22ae632", size = 556032, upload-time = "2025-07-01T15:55:09.52Z" },
1365
+ { url = "https://files.pythonhosted.org/packages/e0/8b/393322ce7bac5c4530fb96fc79cc9ea2f83e968ff5f6e873f905c493e1c4/rpds_py-0.26.0-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:1cc81d14ddfa53d7f3906694d35d54d9d3f850ef8e4e99ee68bc0d1e5fed9a9c", size = 585416, upload-time = "2025-07-01T15:55:11.216Z" },
1366
+ { url = "https://files.pythonhosted.org/packages/49/ae/769dc372211835bf759319a7aae70525c6eb523e3371842c65b7ef41c9c6/rpds_py-0.26.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:dca83c498b4650a91efcf7b88d669b170256bf8017a5db6f3e06c2bf031f57e0", size = 554049, upload-time = "2025-07-01T15:55:13.004Z" },
1367
+ { url = "https://files.pythonhosted.org/packages/6b/f9/4c43f9cc203d6ba44ce3146246cdc38619d92c7bd7bad4946a3491bd5b70/rpds_py-0.26.0-cp313-cp313t-win32.whl", hash = "sha256:4d11382bcaf12f80b51d790dee295c56a159633a8e81e6323b16e55d81ae37e9", size = 218428, upload-time = "2025-07-01T15:55:14.486Z" },
1368
+ { url = "https://files.pythonhosted.org/packages/7e/8b/9286b7e822036a4a977f2f1e851c7345c20528dbd56b687bb67ed68a8ede/rpds_py-0.26.0-cp313-cp313t-win_amd64.whl", hash = "sha256:ff110acded3c22c033e637dd8896e411c7d3a11289b2edf041f86663dbc791e9", size = 231524, upload-time = "2025-07-01T15:55:15.745Z" },
1369
+ { url = "https://files.pythonhosted.org/packages/55/07/029b7c45db910c74e182de626dfdae0ad489a949d84a468465cd0ca36355/rpds_py-0.26.0-cp314-cp314-macosx_10_12_x86_64.whl", hash = "sha256:da619979df60a940cd434084355c514c25cf8eb4cf9a508510682f6c851a4f7a", size = 364292, upload-time = "2025-07-01T15:55:17.001Z" },
1370
+ { url = "https://files.pythonhosted.org/packages/13/d1/9b3d3f986216b4d1f584878dca15ce4797aaf5d372d738974ba737bf68d6/rpds_py-0.26.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:ea89a2458a1a75f87caabefe789c87539ea4e43b40f18cff526052e35bbb4fdf", size = 350334, upload-time = "2025-07-01T15:55:18.922Z" },
1371
+ { url = "https://files.pythonhosted.org/packages/18/98/16d5e7bc9ec715fa9668731d0cf97f6b032724e61696e2db3d47aeb89214/rpds_py-0.26.0-cp314-cp314-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:feac1045b3327a45944e7dcbeb57530339f6b17baff154df51ef8b0da34c8c12", size = 384875, upload-time = "2025-07-01T15:55:20.399Z" },
1372
+ { url = "https://files.pythonhosted.org/packages/f9/13/aa5e2b1ec5ab0e86a5c464d53514c0467bec6ba2507027d35fc81818358e/rpds_py-0.26.0-cp314-cp314-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:b818a592bd69bfe437ee8368603d4a2d928c34cffcdf77c2e761a759ffd17d20", size = 399993, upload-time = "2025-07-01T15:55:21.729Z" },
1373
+ { url = "https://files.pythonhosted.org/packages/17/03/8021810b0e97923abdbab6474c8b77c69bcb4b2c58330777df9ff69dc559/rpds_py-0.26.0-cp314-cp314-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1a8b0dd8648709b62d9372fc00a57466f5fdeefed666afe3fea5a6c9539a0331", size = 516683, upload-time = "2025-07-01T15:55:22.918Z" },
1374
+ { url = "https://files.pythonhosted.org/packages/dc/b1/da8e61c87c2f3d836954239fdbbfb477bb7b54d74974d8f6fcb34342d166/rpds_py-0.26.0-cp314-cp314-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6d3498ad0df07d81112aa6ec6c95a7e7b1ae00929fb73e7ebee0f3faaeabad2f", size = 408825, upload-time = "2025-07-01T15:55:24.207Z" },
1375
+ { url = "https://files.pythonhosted.org/packages/38/bc/1fc173edaaa0e52c94b02a655db20697cb5fa954ad5a8e15a2c784c5cbdd/rpds_py-0.26.0-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:24a4146ccb15be237fdef10f331c568e1b0e505f8c8c9ed5d67759dac58ac246", size = 387292, upload-time = "2025-07-01T15:55:25.554Z" },
1376
+ { url = "https://files.pythonhosted.org/packages/7c/eb/3a9bb4bd90867d21916f253caf4f0d0be7098671b6715ad1cead9fe7bab9/rpds_py-0.26.0-cp314-cp314-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a9a63785467b2d73635957d32a4f6e73d5e4df497a16a6392fa066b753e87387", size = 420435, upload-time = "2025-07-01T15:55:27.798Z" },
1377
+ { url = "https://files.pythonhosted.org/packages/cd/16/e066dcdb56f5632713445271a3f8d3d0b426d51ae9c0cca387799df58b02/rpds_py-0.26.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:de4ed93a8c91debfd5a047be327b7cc8b0cc6afe32a716bbbc4aedca9e2a83af", size = 562410, upload-time = "2025-07-01T15:55:29.057Z" },
1378
+ { url = "https://files.pythonhosted.org/packages/60/22/ddbdec7eb82a0dc2e455be44c97c71c232983e21349836ce9f272e8a3c29/rpds_py-0.26.0-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:caf51943715b12af827696ec395bfa68f090a4c1a1d2509eb4e2cb69abbbdb33", size = 590724, upload-time = "2025-07-01T15:55:30.719Z" },
1379
+ { url = "https://files.pythonhosted.org/packages/2c/b4/95744085e65b7187d83f2fcb0bef70716a1ea0a9e5d8f7f39a86e5d83424/rpds_py-0.26.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:4a59e5bc386de021f56337f757301b337d7ab58baa40174fb150accd480bc953", size = 558285, upload-time = "2025-07-01T15:55:31.981Z" },
1380
+ { url = "https://files.pythonhosted.org/packages/37/37/6309a75e464d1da2559446f9c811aa4d16343cebe3dbb73701e63f760caa/rpds_py-0.26.0-cp314-cp314-win32.whl", hash = "sha256:92c8db839367ef16a662478f0a2fe13e15f2227da3c1430a782ad0f6ee009ec9", size = 223459, upload-time = "2025-07-01T15:55:33.312Z" },
1381
+ { url = "https://files.pythonhosted.org/packages/d9/6f/8e9c11214c46098b1d1391b7e02b70bb689ab963db3b19540cba17315291/rpds_py-0.26.0-cp314-cp314-win_amd64.whl", hash = "sha256:b0afb8cdd034150d4d9f53926226ed27ad15b7f465e93d7468caaf5eafae0d37", size = 236083, upload-time = "2025-07-01T15:55:34.933Z" },
1382
+ { url = "https://files.pythonhosted.org/packages/47/af/9c4638994dd623d51c39892edd9d08e8be8220a4b7e874fa02c2d6e91955/rpds_py-0.26.0-cp314-cp314-win_arm64.whl", hash = "sha256:ca3f059f4ba485d90c8dc75cb5ca897e15325e4e609812ce57f896607c1c0867", size = 223291, upload-time = "2025-07-01T15:55:36.202Z" },
1383
+ { url = "https://files.pythonhosted.org/packages/4d/db/669a241144460474aab03e254326b32c42def83eb23458a10d163cb9b5ce/rpds_py-0.26.0-cp314-cp314t-macosx_10_12_x86_64.whl", hash = "sha256:5afea17ab3a126006dc2f293b14ffc7ef3c85336cf451564a0515ed7648033da", size = 361445, upload-time = "2025-07-01T15:55:37.483Z" },
1384
+ { url = "https://files.pythonhosted.org/packages/3b/2d/133f61cc5807c6c2fd086a46df0eb8f63a23f5df8306ff9f6d0fd168fecc/rpds_py-0.26.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:69f0c0a3df7fd3a7eec50a00396104bb9a843ea6d45fcc31c2d5243446ffd7a7", size = 347206, upload-time = "2025-07-01T15:55:38.828Z" },
1385
+ { url = "https://files.pythonhosted.org/packages/05/bf/0e8fb4c05f70273469eecf82f6ccf37248558526a45321644826555db31b/rpds_py-0.26.0-cp314-cp314t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:801a71f70f9813e82d2513c9a96532551fce1e278ec0c64610992c49c04c2dad", size = 380330, upload-time = "2025-07-01T15:55:40.175Z" },
1386
+ { url = "https://files.pythonhosted.org/packages/d4/a8/060d24185d8b24d3923322f8d0ede16df4ade226a74e747b8c7c978e3dd3/rpds_py-0.26.0-cp314-cp314t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:df52098cde6d5e02fa75c1f6244f07971773adb4a26625edd5c18fee906fa84d", size = 392254, upload-time = "2025-07-01T15:55:42.015Z" },
1387
+ { url = "https://files.pythonhosted.org/packages/b9/7b/7c2e8a9ee3e6bc0bae26bf29f5219955ca2fbb761dca996a83f5d2f773fe/rpds_py-0.26.0-cp314-cp314t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9bc596b30f86dc6f0929499c9e574601679d0341a0108c25b9b358a042f51bca", size = 516094, upload-time = "2025-07-01T15:55:43.603Z" },
1388
+ { url = "https://files.pythonhosted.org/packages/75/d6/f61cafbed8ba1499b9af9f1777a2a199cd888f74a96133d8833ce5eaa9c5/rpds_py-0.26.0-cp314-cp314t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9dfbe56b299cf5875b68eb6f0ebaadc9cac520a1989cac0db0765abfb3709c19", size = 402889, upload-time = "2025-07-01T15:55:45.275Z" },
1389
+ { url = "https://files.pythonhosted.org/packages/92/19/c8ac0a8a8df2dd30cdec27f69298a5c13e9029500d6d76718130f5e5be10/rpds_py-0.26.0-cp314-cp314t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ac64f4b2bdb4ea622175c9ab7cf09444e412e22c0e02e906978b3b488af5fde8", size = 384301, upload-time = "2025-07-01T15:55:47.098Z" },
1390
+ { url = "https://files.pythonhosted.org/packages/41/e1/6b1859898bc292a9ce5776016c7312b672da00e25cec74d7beced1027286/rpds_py-0.26.0-cp314-cp314t-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:181ef9b6bbf9845a264f9aa45c31836e9f3c1f13be565d0d010e964c661d1e2b", size = 412891, upload-time = "2025-07-01T15:55:48.412Z" },
1391
+ { url = "https://files.pythonhosted.org/packages/ef/b9/ceb39af29913c07966a61367b3c08b4f71fad841e32c6b59a129d5974698/rpds_py-0.26.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:49028aa684c144ea502a8e847d23aed5e4c2ef7cadfa7d5eaafcb40864844b7a", size = 557044, upload-time = "2025-07-01T15:55:49.816Z" },
1392
+ { url = "https://files.pythonhosted.org/packages/2f/27/35637b98380731a521f8ec4f3fd94e477964f04f6b2f8f7af8a2d889a4af/rpds_py-0.26.0-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:e5d524d68a474a9688336045bbf76cb0def88549c1b2ad9dbfec1fb7cfbe9170", size = 585774, upload-time = "2025-07-01T15:55:51.192Z" },
1393
+ { url = "https://files.pythonhosted.org/packages/52/d9/3f0f105420fecd18551b678c9a6ce60bd23986098b252a56d35781b3e7e9/rpds_py-0.26.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:c1851f429b822831bd2edcbe0cfd12ee9ea77868f8d3daf267b189371671c80e", size = 554886, upload-time = "2025-07-01T15:55:52.541Z" },
1394
+ { url = "https://files.pythonhosted.org/packages/6b/c5/347c056a90dc8dd9bc240a08c527315008e1b5042e7a4cf4ac027be9d38a/rpds_py-0.26.0-cp314-cp314t-win32.whl", hash = "sha256:7bdb17009696214c3b66bb3590c6d62e14ac5935e53e929bcdbc5a495987a84f", size = 219027, upload-time = "2025-07-01T15:55:53.874Z" },
1395
+ { url = "https://files.pythonhosted.org/packages/75/04/5302cea1aa26d886d34cadbf2dc77d90d7737e576c0065f357b96dc7a1a6/rpds_py-0.26.0-cp314-cp314t-win_amd64.whl", hash = "sha256:f14440b9573a6f76b4ee4770c13f0b5921f71dde3b6fcb8dabbefd13b7fe05d7", size = 232821, upload-time = "2025-07-01T15:55:55.167Z" },
1396
+ ]
1397
+
1398
  [[package]]
1399
  name = "ruff"
1400
  version = "0.12.0"
 
1477
  { url = "https://files.pythonhosted.org/packages/e7/9c/0e6afc12c269578be5c0c1c9f4b49a8d32770a080260c333ac04cc1c832d/soupsieve-2.7-py3-none-any.whl", hash = "sha256:6e60cc5c1ffaf1cebcc12e8188320b72071e922c2e897f737cadce79ad5d30c4", size = 36677, upload-time = "2025-04-20T18:50:07.196Z" },
1478
  ]
1479
 
1480
+ [[package]]
1481
+ name = "sse-starlette"
1482
+ version = "2.3.6"
1483
+ source = { registry = "https://pypi.org/simple" }
1484
+ dependencies = [
1485
+ { name = "anyio" },
1486
+ ]
1487
+ sdist = { url = "https://files.pythonhosted.org/packages/8c/f4/989bc70cb8091eda43a9034ef969b25145291f3601703b82766e5172dfed/sse_starlette-2.3.6.tar.gz", hash = "sha256:0382336f7d4ec30160cf9ca0518962905e1b69b72d6c1c995131e0a703b436e3", size = 18284, upload-time = "2025-05-30T13:34:12.914Z" }
1488
+ wheels = [
1489
+ { url = "https://files.pythonhosted.org/packages/81/05/78850ac6e79af5b9508f8841b0f26aa9fd329a1ba00bf65453c2d312bcc8/sse_starlette-2.3.6-py3-none-any.whl", hash = "sha256:d49a8285b182f6e2228e2609c350398b2ca2c36216c2675d875f81e93548f760", size = 10606, upload-time = "2025-05-30T13:34:11.703Z" },
1490
+ ]
1491
+
1492
  [[package]]
1493
  name = "starlette"
1494
  version = "0.46.2"
 
1570
  { url = "https://files.pythonhosted.org/packages/76/42/3efaf858001d2c2913de7f354563e3a3a2f0decae3efe98427125a8f441e/typer-0.16.0-py3-none-any.whl", hash = "sha256:1f79bed11d4d02d4310e3c1b7ba594183bcedb0ac73b27a9e5f28f6fb5b98855", size = 46317, upload-time = "2025-05-26T14:30:30.523Z" },
1571
  ]
1572
 
1573
+ [[package]]
1574
+ name = "types-requests"
1575
+ version = "2.32.4.20250611"
1576
+ source = { registry = "https://pypi.org/simple" }
1577
+ dependencies = [
1578
+ { name = "urllib3" },
1579
+ ]
1580
+ sdist = { url = "https://files.pythonhosted.org/packages/6d/7f/73b3a04a53b0fd2a911d4ec517940ecd6600630b559e4505cc7b68beb5a0/types_requests-2.32.4.20250611.tar.gz", hash = "sha256:741c8777ed6425830bf51e54d6abe245f79b4dcb9019f1622b773463946bf826", size = 23118, upload-time = "2025-06-11T03:11:41.272Z" }
1581
+ wheels = [
1582
+ { url = "https://files.pythonhosted.org/packages/3d/ea/0be9258c5a4fa1ba2300111aa5a0767ee6d18eb3fd20e91616c12082284d/types_requests-2.32.4.20250611-py3-none-any.whl", hash = "sha256:ad2fe5d3b0cb3c2c902c8815a70e7fb2302c4b8c1f77bdcd738192cdb3878072", size = 20643, upload-time = "2025-06-11T03:11:40.186Z" },
1583
+ ]
1584
+
1585
  [[package]]
1586
  name = "typing-extensions"
1587
  version = "4.14.0"
 
1591
  { url = "https://files.pythonhosted.org/packages/69/e0/552843e0d356fbb5256d21449fa957fa4eff3bbc135a74a691ee70c7c5da/typing_extensions-4.14.0-py3-none-any.whl", hash = "sha256:a1514509136dd0b477638fc68d6a91497af5076466ad0fa6c338e44e359944af", size = 43839, upload-time = "2025-06-02T14:52:10.026Z" },
1592
  ]
1593
 
1594
+ [[package]]
1595
+ name = "typing-inspection"
1596
+ version = "0.4.1"
1597
+ source = { registry = "https://pypi.org/simple" }
1598
+ dependencies = [
1599
+ { name = "typing-extensions" },
1600
+ ]
1601
+ sdist = { url = "https://files.pythonhosted.org/packages/f8/b1/0c11f5058406b3af7609f121aaa6b609744687f1d158b3c3a5bf4cc94238/typing_inspection-0.4.1.tar.gz", hash = "sha256:6ae134cc0203c33377d43188d4064e9b357dba58cff3185f22924610e70a9d28", size = 75726, upload-time = "2025-05-21T18:55:23.885Z" }
1602
+ wheels = [
1603
+ { url = "https://files.pythonhosted.org/packages/17/69/cd203477f944c353c31bade965f880aa1061fd6bf05ded0726ca845b6ff7/typing_inspection-0.4.1-py3-none-any.whl", hash = "sha256:389055682238f53b04f7badcb49b989835495a96700ced5dab2d8feae4b26f51", size = 14552, upload-time = "2025-05-21T18:55:22.152Z" },
1604
+ ]
1605
+
1606
  [[package]]
1607
  name = "tzdata"
1608
  version = "2025.2"