Upload folder using huggingface_hub
Browse files- README.md +1 -1
- ankigen_core/agents/generators.py +5 -0
- ankigen_core/agents/integration.py +6 -62
- ankigen_core/agents/templates/generators.j2 +1 -1
- ankigen_core/card_generator.py +1 -1
- pyproject.toml +1 -1
- uv.lock +0 -0
README.md
CHANGED
@@ -3,7 +3,7 @@ title: AnkiGen
|
|
3 |
emoji: π
|
4 |
app_file: app.py
|
5 |
requirements: requirements.txt
|
6 |
-
python: 3.
|
7 |
sdk: gradio
|
8 |
sdk_version: 5.38.1
|
9 |
---
|
|
|
3 |
emoji: π
|
4 |
app_file: app.py
|
5 |
requirements: requirements.txt
|
6 |
+
python: 3.12
|
7 |
sdk: gradio
|
8 |
sdk_version: 5.38.1
|
9 |
---
|
ankigen_core/agents/generators.py
CHANGED
@@ -115,6 +115,11 @@ class SubjectExpertAgent(BaseAgentWrapper):
|
|
115 |
user_input = (
|
116 |
f"Generate {cards_in_this_batch} flashcards for the topic: {topic}"
|
117 |
)
|
|
|
|
|
|
|
|
|
|
|
118 |
if context:
|
119 |
user_input += f"\n\nAdditional context: {context}"
|
120 |
|
|
|
115 |
user_input = (
|
116 |
f"Generate {cards_in_this_batch} flashcards for the topic: {topic}"
|
117 |
)
|
118 |
+
|
119 |
+
# Add cloze generation instruction if enabled
|
120 |
+
if context and context.get("generate_cloze"):
|
121 |
+
user_input += "\n\nIMPORTANT: Generate a mix of card types including cloze cards. For code examples, syntax, and fill-in-the-blank concepts, use cloze cards (card_type='cloze'). Aim for roughly 50% cloze cards when dealing with technical/programming content."
|
122 |
+
|
123 |
if context:
|
124 |
user_input += f"\n\nAdditional context: {context}"
|
125 |
|
ankigen_core/agents/integration.py
CHANGED
@@ -9,8 +9,7 @@ from ankigen_core.models import Card
|
|
9 |
from ankigen_core.llm_interface import OpenAIClientManager
|
10 |
from ankigen_core.context7 import Context7Client
|
11 |
|
12 |
-
from .generators import SubjectExpertAgent
|
13 |
-
from ankigen_core.agents.config import get_config_manager
|
14 |
|
15 |
|
16 |
class AgentOrchestrator:
|
@@ -21,7 +20,6 @@ class AgentOrchestrator:
|
|
21 |
self.openai_client = None
|
22 |
|
23 |
self.subject_expert = None
|
24 |
-
self.quality_reviewer = None
|
25 |
|
26 |
async def initialize(self, api_key: str, model_overrides: Dict[str, str] = None):
|
27 |
"""Initialize the agent system"""
|
@@ -50,10 +48,10 @@ class AgentOrchestrator:
|
|
50 |
subject: str = "general",
|
51 |
num_cards: int = 5,
|
52 |
difficulty: str = "intermediate",
|
53 |
-
enable_quality_pipeline: bool = True,
|
54 |
context: Dict[str, Any] = None,
|
55 |
library_name: Optional[str] = None,
|
56 |
library_topic: Optional[str] = None,
|
|
|
57 |
) -> Tuple[List[Card], Dict[str, Any]]:
|
58 |
"""Generate cards using the agent system"""
|
59 |
start_time = datetime.now()
|
@@ -117,18 +115,14 @@ class AgentOrchestrator:
|
|
117 |
num_cards=num_cards,
|
118 |
difficulty=difficulty,
|
119 |
context=enhanced_context,
|
|
|
120 |
)
|
121 |
|
122 |
-
review_results = {}
|
123 |
-
if enable_quality_pipeline:
|
124 |
-
cards, review_results = await self._quality_review_phase(cards)
|
125 |
-
|
126 |
# Collect metadata
|
127 |
metadata = {
|
128 |
"generation_method": "agent_system",
|
129 |
"generation_time": (datetime.now() - start_time).total_seconds(),
|
130 |
"cards_generated": len(cards),
|
131 |
-
"review_results": review_results,
|
132 |
"topic": topic,
|
133 |
"subject": subject,
|
134 |
"difficulty": difficulty,
|
@@ -152,16 +146,18 @@ class AgentOrchestrator:
|
|
152 |
num_cards: int,
|
153 |
difficulty: str,
|
154 |
context: Dict[str, Any] = None,
|
|
|
155 |
) -> List[Card]:
|
156 |
"""Execute the card generation phase"""
|
157 |
|
158 |
if not self.subject_expert or self.subject_expert.subject != subject:
|
159 |
self.subject_expert = SubjectExpertAgent(self.openai_client, subject)
|
160 |
|
161 |
-
# Add difficulty to context
|
162 |
if context is None:
|
163 |
context = {}
|
164 |
context["difficulty"] = difficulty
|
|
|
165 |
|
166 |
cards = await self.subject_expert.generate_cards(
|
167 |
topic=topic, num_cards=num_cards, context=context
|
@@ -170,58 +166,6 @@ class AgentOrchestrator:
|
|
170 |
logger.info(f"Generation phase complete: {len(cards)} cards generated")
|
171 |
return cards
|
172 |
|
173 |
-
async def _quality_review_phase(
|
174 |
-
self, cards: List[Card]
|
175 |
-
) -> Tuple[List[Card], Dict[str, Any]]:
|
176 |
-
"""Perform a single quality-review pass with optional fixes."""
|
177 |
-
|
178 |
-
if not cards:
|
179 |
-
return cards, {"message": "No cards to review"}
|
180 |
-
|
181 |
-
logger.info(f"Performing quality review for {len(cards)} cards")
|
182 |
-
|
183 |
-
if not self.quality_reviewer:
|
184 |
-
# Use the same model as the subject expert by default.
|
185 |
-
subject_config = get_config_manager().get_agent_config("subject_expert")
|
186 |
-
reviewer_model = subject_config.model if subject_config else "gpt-4.1"
|
187 |
-
self.quality_reviewer = QualityReviewAgent(
|
188 |
-
self.openai_client, reviewer_model
|
189 |
-
)
|
190 |
-
|
191 |
-
reviewed_cards: List[Card] = []
|
192 |
-
approvals: List[Dict[str, Any]] = []
|
193 |
-
|
194 |
-
for card in cards:
|
195 |
-
reviewed_card, approved, reason = await self.quality_reviewer.review_card(
|
196 |
-
card
|
197 |
-
)
|
198 |
-
if approved:
|
199 |
-
reviewed_cards.append(reviewed_card)
|
200 |
-
else:
|
201 |
-
approvals.append(
|
202 |
-
{
|
203 |
-
"question": card.front.question if card.front else "",
|
204 |
-
"reason": reason,
|
205 |
-
}
|
206 |
-
)
|
207 |
-
|
208 |
-
review_results = {
|
209 |
-
"total_cards_reviewed": len(cards),
|
210 |
-
"approved_cards": len(reviewed_cards),
|
211 |
-
"rejected_cards": approvals,
|
212 |
-
}
|
213 |
-
|
214 |
-
if approvals:
|
215 |
-
logger.warning(
|
216 |
-
"Quality review rejected cards: %s",
|
217 |
-
"; ".join(
|
218 |
-
f"{entry['question'][:50]}β¦ ({entry['reason']})"
|
219 |
-
for entry in approvals
|
220 |
-
),
|
221 |
-
)
|
222 |
-
|
223 |
-
return reviewed_cards, review_results
|
224 |
-
|
225 |
def get_performance_metrics(self) -> Dict[str, Any]:
|
226 |
"""Get performance metrics for the agent system"""
|
227 |
|
|
|
9 |
from ankigen_core.llm_interface import OpenAIClientManager
|
10 |
from ankigen_core.context7 import Context7Client
|
11 |
|
12 |
+
from .generators import SubjectExpertAgent
|
|
|
13 |
|
14 |
|
15 |
class AgentOrchestrator:
|
|
|
20 |
self.openai_client = None
|
21 |
|
22 |
self.subject_expert = None
|
|
|
23 |
|
24 |
async def initialize(self, api_key: str, model_overrides: Dict[str, str] = None):
|
25 |
"""Initialize the agent system"""
|
|
|
48 |
subject: str = "general",
|
49 |
num_cards: int = 5,
|
50 |
difficulty: str = "intermediate",
|
|
|
51 |
context: Dict[str, Any] = None,
|
52 |
library_name: Optional[str] = None,
|
53 |
library_topic: Optional[str] = None,
|
54 |
+
generate_cloze: bool = False,
|
55 |
) -> Tuple[List[Card], Dict[str, Any]]:
|
56 |
"""Generate cards using the agent system"""
|
57 |
start_time = datetime.now()
|
|
|
115 |
num_cards=num_cards,
|
116 |
difficulty=difficulty,
|
117 |
context=enhanced_context,
|
118 |
+
generate_cloze=generate_cloze,
|
119 |
)
|
120 |
|
|
|
|
|
|
|
|
|
121 |
# Collect metadata
|
122 |
metadata = {
|
123 |
"generation_method": "agent_system",
|
124 |
"generation_time": (datetime.now() - start_time).total_seconds(),
|
125 |
"cards_generated": len(cards),
|
|
|
126 |
"topic": topic,
|
127 |
"subject": subject,
|
128 |
"difficulty": difficulty,
|
|
|
146 |
num_cards: int,
|
147 |
difficulty: str,
|
148 |
context: Dict[str, Any] = None,
|
149 |
+
generate_cloze: bool = False,
|
150 |
) -> List[Card]:
|
151 |
"""Execute the card generation phase"""
|
152 |
|
153 |
if not self.subject_expert or self.subject_expert.subject != subject:
|
154 |
self.subject_expert = SubjectExpertAgent(self.openai_client, subject)
|
155 |
|
156 |
+
# Add difficulty and cloze preference to context
|
157 |
if context is None:
|
158 |
context = {}
|
159 |
context["difficulty"] = difficulty
|
160 |
+
context["generate_cloze"] = generate_cloze
|
161 |
|
162 |
cards = await self.subject_expert.generate_cards(
|
163 |
topic=topic, num_cards=num_cards, context=context
|
|
|
166 |
logger.info(f"Generation phase complete: {len(cards)} cards generated")
|
167 |
return cards
|
168 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
169 |
def get_performance_metrics(self) -> Dict[str, Any]:
|
170 |
"""Get performance metrics for the agent system"""
|
171 |
|
ankigen_core/agents/templates/generators.j2
CHANGED
@@ -2,7 +2,7 @@
|
|
2 |
{
|
3 |
"subject_expert": {
|
4 |
"name": "subject_expert",
|
5 |
-
"instructions": "You are a world-class expert in {{ subject | default('the subject area') }} with deep pedagogical knowledge. \nYour role is to generate high-quality flashcards that demonstrate mastery of {{ subject | default('the subject') }} concepts.\n\nKey responsibilities:\n- Create ATOMIC cards: extremely short (1-9 words on back), break complex info into multiple simple cards\n- Use standardized, bland prompts without fancy formatting or unusual words\n- Design prompts that match real-life recall situations\n- Put ALL to-be-learned information on the BACK of cards, never in prompts\n- Ensure technical accuracy and depth appropriate for the target level\n- Use domain-specific terminology correctly\n- Connect concepts to prerequisite knowledge\n\nPrioritize atomic simplicity over comprehensive single cards. Generate cards that test understanding through simple, direct recall.",
|
6 |
"model": "{{ subject_expert_model }}",
|
7 |
"temperature": 0.7,
|
8 |
"timeout": 120.0,
|
|
|
2 |
{
|
3 |
"subject_expert": {
|
4 |
"name": "subject_expert",
|
5 |
+
"instructions": "You are a world-class expert in {{ subject | default('the subject area') }} with deep pedagogical knowledge. \nYour role is to generate high-quality flashcards that demonstrate mastery of {{ subject | default('the subject') }} concepts.\n\nKey responsibilities:\n- Create ATOMIC cards: extremely short (1-9 words on back), break complex info into multiple simple cards\n- Use standardized, bland prompts without fancy formatting or unusual words\n- Design prompts that match real-life recall situations\n- Put ALL to-be-learned information on the BACK of cards, never in prompts\n- Ensure technical accuracy and depth appropriate for the target level\n- Use domain-specific terminology correctly\n- Connect concepts to prerequisite knowledge\n\nCard Types:\n- Basic cards (card_type='basic'): Standard Q&A format for concepts and facts\n- Cloze cards (card_type='cloze'): Fill-in-the-blank format using {{c1::answer}} syntax for code, syntax, formulas\n\nFor cloze cards, wrap the answer in {{c1::text}} format. Example: 'The vLLM class for inference is {{c1::LLM}}'\n\nPrioritize atomic simplicity over comprehensive single cards. Generate cards that test understanding through simple, direct recall.",
|
6 |
"model": "{{ subject_expert_model }}",
|
7 |
"temperature": 0.7,
|
8 |
"timeout": 120.0,
|
ankigen_core/card_generator.py
CHANGED
@@ -128,10 +128,10 @@ async def orchestrate_card_generation( # MODIFIED: Added async
|
|
128 |
subject=agent_subject,
|
129 |
num_cards=total_cards_needed,
|
130 |
difficulty="intermediate",
|
131 |
-
enable_quality_pipeline=True,
|
132 |
context=context,
|
133 |
library_name=library_name,
|
134 |
library_topic=library_topic,
|
|
|
135 |
)
|
136 |
|
137 |
# Get token usage from session
|
|
|
128 |
subject=agent_subject,
|
129 |
num_cards=total_cards_needed,
|
130 |
difficulty="intermediate",
|
|
|
131 |
context=context,
|
132 |
library_name=library_name,
|
133 |
library_topic=library_topic,
|
134 |
+
generate_cloze=generate_cloze,
|
135 |
)
|
136 |
|
137 |
# Get token usage from session
|
pyproject.toml
CHANGED
@@ -10,7 +10,7 @@ authors = [
|
|
10 |
{ name = "Justin", email = "[email protected]" },
|
11 |
]
|
12 |
readme = "README.md"
|
13 |
-
requires-python = ">=3.
|
14 |
dependencies = [
|
15 |
"openai>=1.109.1",
|
16 |
"openai-agents>=0.3.2",
|
|
|
10 |
{ name = "Justin", email = "[email protected]" },
|
11 |
]
|
12 |
readme = "README.md"
|
13 |
+
requires-python = ">=3.12"
|
14 |
dependencies = [
|
15 |
"openai>=1.109.1",
|
16 |
"openai-agents>=0.3.2",
|
uv.lock
CHANGED
The diff for this file is too large to render.
See raw diff
|
|