Upload 35 files
Browse files- app.py +69 -0
- coursecrafter/__init__.py +21 -0
- coursecrafter/__pycache__/__init__.cpython-311.pyc +0 -0
- coursecrafter/__pycache__/types.cpython-311.pyc +0 -0
- coursecrafter/agents/__init__.py +13 -0
- coursecrafter/agents/__pycache__/__init__.cpython-311.pyc +0 -0
- coursecrafter/agents/__pycache__/llm_client.cpython-311.pyc +0 -0
- coursecrafter/agents/__pycache__/simple_course_agent.cpython-311.pyc +0 -0
- coursecrafter/agents/llm_client.py +270 -0
- coursecrafter/agents/simple_course_agent.py +844 -0
- coursecrafter/tools/__init__.py +19 -0
- coursecrafter/tools/__pycache__/__init__.cpython-311.pyc +0 -0
- coursecrafter/tools/__pycache__/image_generation.cpython-311.pyc +0 -0
- coursecrafter/tools/__pycache__/web_research.cpython-311.pyc +0 -0
- coursecrafter/tools/image_generation.py +254 -0
- coursecrafter/tools/web_research.py +257 -0
- coursecrafter/types.py +326 -0
- coursecrafter/ui/__init__.py +16 -0
- coursecrafter/ui/__pycache__/__init__.cpython-311.pyc +0 -0
- coursecrafter/ui/__pycache__/components.cpython-311.pyc +0 -0
- coursecrafter/ui/__pycache__/gradio_app.cpython-311.pyc +0 -0
- coursecrafter/ui/__pycache__/progress_tracker.cpython-311.pyc +0 -0
- coursecrafter/ui/__pycache__/styling.cpython-311.pyc +0 -0
- coursecrafter/ui/components.py +703 -0
- coursecrafter/ui/gradio_app.py +1483 -0
- coursecrafter/ui/progress_tracker.py +509 -0
- coursecrafter/ui/styling.py +451 -0
- coursecrafter/utils/__init__.py +37 -0
- coursecrafter/utils/__pycache__/__init__.cpython-311.pyc +0 -0
- coursecrafter/utils/__pycache__/config.cpython-311.pyc +0 -0
- coursecrafter/utils/__pycache__/export.cpython-311.pyc +0 -0
- coursecrafter/utils/__pycache__/helpers.cpython-311.pyc +0 -0
- coursecrafter/utils/config.py +261 -0
- coursecrafter/utils/helpers.py +475 -0
- requirements.txt +57 -0
app.py
ADDED
@@ -0,0 +1,69 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python3
|
2 |
+
"""
|
3 |
+
🎓 Course Creator AI - Main Application Entry Point
|
4 |
+
|
5 |
+
Launch the Gradio interface for course generation.
|
6 |
+
|
7 |
+
Made with ❤️ by Pink Pixel
|
8 |
+
"Dream it, Pixel it" ✨
|
9 |
+
"""
|
10 |
+
|
11 |
+
import os
|
12 |
+
import sys
|
13 |
+
import gradio as gr
|
14 |
+
from pathlib import Path
|
15 |
+
|
16 |
+
# Add the project root to Python path
|
17 |
+
project_root = Path(__file__).parent
|
18 |
+
sys.path.insert(0, str(project_root))
|
19 |
+
|
20 |
+
from coursecrafter.ui.gradio_app import create_coursecrafter_interface
|
21 |
+
from coursecrafter.utils.config import config
|
22 |
+
|
23 |
+
|
24 |
+
def setup_backend_credentials():
|
25 |
+
"""Set up backend credentials for image generation"""
|
26 |
+
# Set up Pollinations credentials for image generation
|
27 |
+
if not os.getenv("POLLINATIONS_API_REFERENCE"):
|
28 |
+
os.environ["POLLINATIONS_API_REFERENCE"] = "course-creator-ai-hf"
|
29 |
+
|
30 |
+
print("🎨 Backend image generation credentials configured")
|
31 |
+
|
32 |
+
|
33 |
+
def main():
|
34 |
+
"""Main application entry point"""
|
35 |
+
print("🎓 Starting Course Creator AI...")
|
36 |
+
|
37 |
+
# Set up backend credentials
|
38 |
+
setup_backend_credentials()
|
39 |
+
|
40 |
+
# Create and launch the Gradio interface
|
41 |
+
try:
|
42 |
+
interface = create_coursecrafter_interface()
|
43 |
+
|
44 |
+
# Launch configuration
|
45 |
+
launch_kwargs = {
|
46 |
+
"server_name": "0.0.0.0",
|
47 |
+
"server_port": 7860,
|
48 |
+
"share": False,
|
49 |
+
"show_error": True,
|
50 |
+
"quiet": False
|
51 |
+
}
|
52 |
+
|
53 |
+
# Check if running in Hugging Face Spaces
|
54 |
+
if os.getenv("SPACE_ID"):
|
55 |
+
print("🚀 Running in Hugging Face Spaces")
|
56 |
+
launch_kwargs["share"] = True
|
57 |
+
else:
|
58 |
+
print("🖥️ Running locally")
|
59 |
+
|
60 |
+
print("🌐 Launching Course Creator AI interface...")
|
61 |
+
interface.launch(**launch_kwargs)
|
62 |
+
|
63 |
+
except Exception as e:
|
64 |
+
print(f"❌ Failed to launch application: {e}")
|
65 |
+
sys.exit(1)
|
66 |
+
|
67 |
+
|
68 |
+
if __name__ == "__main__":
|
69 |
+
main()
|
coursecrafter/__init__.py
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
🎓 Course Creator AI - Intelligent Course Generation Agent
|
3 |
+
|
4 |
+
Transform any topic into a complete, structured mini-course with interactive learning materials.
|
5 |
+
|
6 |
+
Made with ❤️ by Pink Pixel
|
7 |
+
"Dream it, Pixel it" ✨
|
8 |
+
"""
|
9 |
+
|
10 |
+
__version__ = "1.0.0"
|
11 |
+
__author__ = "Pink Pixel"
|
12 |
+
__email__ = "[email protected]"
|
13 |
+
__description__ = "Course Creator AI is an intelligent agent that generates structured mini-courses from any topic, complete with interactive learning materials."
|
14 |
+
|
15 |
+
from .agents.simple_course_agent import SimpleCourseAgent
|
16 |
+
from .ui.gradio_app import create_coursecrafter_interface
|
17 |
+
|
18 |
+
__all__ = [
|
19 |
+
"SimpleCourseAgent",
|
20 |
+
"create_coursecrafter_interface"
|
21 |
+
]
|
coursecrafter/__pycache__/__init__.cpython-311.pyc
ADDED
Binary file (918 Bytes). View file
|
|
coursecrafter/__pycache__/types.cpython-311.pyc
ADDED
Binary file (19 kB). View file
|
|
coursecrafter/agents/__init__.py
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
🧠 Agent Components
|
3 |
+
|
4 |
+
Core agent classes for course generation.
|
5 |
+
"""
|
6 |
+
|
7 |
+
from .simple_course_agent import SimpleCourseAgent
|
8 |
+
from .llm_client import LlmClient
|
9 |
+
|
10 |
+
__all__ = [
|
11 |
+
"SimpleCourseAgent",
|
12 |
+
"LlmClient"
|
13 |
+
]
|
coursecrafter/agents/__pycache__/__init__.cpython-311.pyc
ADDED
Binary file (467 Bytes). View file
|
|
coursecrafter/agents/__pycache__/llm_client.cpython-311.pyc
ADDED
Binary file (13.9 kB). View file
|
|
coursecrafter/agents/__pycache__/simple_course_agent.cpython-311.pyc
ADDED
Binary file (41 kB). View file
|
|
coursecrafter/agents/llm_client.py
ADDED
@@ -0,0 +1,270 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
🧠 LLM Client for CourseCrafter AI
|
3 |
+
|
4 |
+
Multi-provider LLM client with streaming support.
|
5 |
+
"""
|
6 |
+
|
7 |
+
import json
|
8 |
+
from typing import Dict, List, Any, Optional, AsyncGenerator
|
9 |
+
from dataclasses import dataclass
|
10 |
+
from abc import ABC, abstractmethod
|
11 |
+
|
12 |
+
import openai
|
13 |
+
import anthropic
|
14 |
+
import google.generativeai as genai
|
15 |
+
|
16 |
+
from ..types import LLMProvider, StreamChunk
|
17 |
+
from ..utils.config import config
|
18 |
+
|
19 |
+
|
20 |
+
@dataclass
|
21 |
+
class Message:
|
22 |
+
"""Standard message format"""
|
23 |
+
role: str # "system", "user", "assistant"
|
24 |
+
content: str
|
25 |
+
|
26 |
+
|
27 |
+
class BaseLLMClient(ABC):
|
28 |
+
"""Abstract base class for LLM clients"""
|
29 |
+
|
30 |
+
def __init__(self, provider: LLMProvider):
|
31 |
+
self.provider = provider
|
32 |
+
self.config = config.get_llm_config(provider)
|
33 |
+
|
34 |
+
@abstractmethod
|
35 |
+
async def generate_stream(self, messages: List[Message]) -> AsyncGenerator[StreamChunk, None]:
|
36 |
+
"""Generate streaming response"""
|
37 |
+
pass
|
38 |
+
|
39 |
+
|
40 |
+
class OpenAIClient(BaseLLMClient):
|
41 |
+
"""OpenAI client with streaming support (works with OpenAI and compatible endpoints)"""
|
42 |
+
|
43 |
+
def __init__(self, provider: LLMProvider = "openai"):
|
44 |
+
super().__init__(provider)
|
45 |
+
|
46 |
+
# Build client kwargs
|
47 |
+
client_kwargs = {
|
48 |
+
"api_key": self.config.api_key or "dummy",
|
49 |
+
"timeout": self.config.timeout
|
50 |
+
}
|
51 |
+
|
52 |
+
# Add base_url for compatible endpoints
|
53 |
+
if hasattr(self.config, 'base_url') and self.config.base_url:
|
54 |
+
client_kwargs["base_url"] = self.config.base_url
|
55 |
+
|
56 |
+
self.client = openai.AsyncOpenAI(**client_kwargs)
|
57 |
+
|
58 |
+
def _format_messages(self, messages: List[Message]) -> List[Dict[str, Any]]:
|
59 |
+
"""Format messages for OpenAI"""
|
60 |
+
return [{"role": msg.role, "content": msg.content} for msg in messages]
|
61 |
+
|
62 |
+
async def generate_stream(self, messages: List[Message]) -> AsyncGenerator[StreamChunk, None]:
|
63 |
+
"""Generate streaming response from OpenAI"""
|
64 |
+
|
65 |
+
formatted_messages = self._format_messages(messages)
|
66 |
+
|
67 |
+
kwargs = {
|
68 |
+
"model": self.config.model,
|
69 |
+
"messages": formatted_messages,
|
70 |
+
"temperature": self.config.temperature,
|
71 |
+
"stream": True
|
72 |
+
}
|
73 |
+
|
74 |
+
if self.config.max_tokens:
|
75 |
+
kwargs["max_tokens"] = self.config.max_tokens
|
76 |
+
|
77 |
+
try:
|
78 |
+
stream = await self.client.chat.completions.create(**kwargs)
|
79 |
+
|
80 |
+
async for chunk in stream:
|
81 |
+
if chunk.choices and chunk.choices[0].delta:
|
82 |
+
delta = chunk.choices[0].delta
|
83 |
+
|
84 |
+
if delta.content:
|
85 |
+
yield StreamChunk(
|
86 |
+
type="text",
|
87 |
+
content=delta.content
|
88 |
+
)
|
89 |
+
|
90 |
+
except Exception as e:
|
91 |
+
yield StreamChunk(
|
92 |
+
type="error",
|
93 |
+
content=f"OpenAI API error: {str(e)}"
|
94 |
+
)
|
95 |
+
|
96 |
+
|
97 |
+
|
98 |
+
|
99 |
+
class AnthropicClient(BaseLLMClient):
|
100 |
+
"""Anthropic client with streaming support"""
|
101 |
+
|
102 |
+
def __init__(self):
|
103 |
+
super().__init__("anthropic")
|
104 |
+
self.client = anthropic.AsyncAnthropic(
|
105 |
+
api_key=self.config.api_key,
|
106 |
+
timeout=self.config.timeout
|
107 |
+
)
|
108 |
+
|
109 |
+
def _format_messages(self, messages: List[Message]) -> tuple[List[Dict[str, Any]], Optional[str]]:
|
110 |
+
"""Format messages for Anthropic"""
|
111 |
+
formatted = []
|
112 |
+
system_message = None
|
113 |
+
|
114 |
+
for msg in messages:
|
115 |
+
if msg.role == "system":
|
116 |
+
system_message = msg.content
|
117 |
+
elif msg.role in ["user", "assistant"]:
|
118 |
+
formatted.append({
|
119 |
+
"role": msg.role,
|
120 |
+
"content": msg.content
|
121 |
+
})
|
122 |
+
|
123 |
+
return formatted, system_message
|
124 |
+
|
125 |
+
async def generate_stream(self, messages: List[Message]) -> AsyncGenerator[StreamChunk, None]:
|
126 |
+
"""Generate streaming response from Anthropic"""
|
127 |
+
|
128 |
+
formatted_messages, system_message = self._format_messages(messages)
|
129 |
+
|
130 |
+
kwargs = {
|
131 |
+
"model": self.config.model,
|
132 |
+
"messages": formatted_messages,
|
133 |
+
"temperature": self.config.temperature,
|
134 |
+
"stream": True
|
135 |
+
}
|
136 |
+
|
137 |
+
if system_message:
|
138 |
+
kwargs["system"] = system_message
|
139 |
+
|
140 |
+
if self.config.max_tokens:
|
141 |
+
kwargs["max_tokens"] = self.config.max_tokens
|
142 |
+
|
143 |
+
try:
|
144 |
+
stream = await self.client.messages.create(**kwargs)
|
145 |
+
|
146 |
+
async for chunk in stream:
|
147 |
+
if chunk.type == "content_block_delta":
|
148 |
+
if hasattr(chunk.delta, 'text'):
|
149 |
+
yield StreamChunk(
|
150 |
+
type="text",
|
151 |
+
content=chunk.delta.text
|
152 |
+
)
|
153 |
+
|
154 |
+
except Exception as e:
|
155 |
+
yield StreamChunk(
|
156 |
+
type="error",
|
157 |
+
content=f"Anthropic API error: {str(e)}"
|
158 |
+
)
|
159 |
+
|
160 |
+
|
161 |
+
class GoogleClient(BaseLLMClient):
|
162 |
+
"""Google Gemini client with streaming support"""
|
163 |
+
|
164 |
+
def __init__(self):
|
165 |
+
super().__init__("google")
|
166 |
+
genai.configure(api_key=self.config.api_key)
|
167 |
+
self.model = genai.GenerativeModel(self.config.model)
|
168 |
+
|
169 |
+
def _format_messages(self, messages: List[Message]) -> List[Dict[str, Any]]:
|
170 |
+
"""Format messages for Google"""
|
171 |
+
formatted = []
|
172 |
+
|
173 |
+
for msg in messages:
|
174 |
+
if msg.role == "system":
|
175 |
+
# Google handles system messages differently
|
176 |
+
formatted.append({
|
177 |
+
"role": "user",
|
178 |
+
"parts": [{"text": f"System: {msg.content}"}]
|
179 |
+
})
|
180 |
+
elif msg.role == "user":
|
181 |
+
formatted.append({
|
182 |
+
"role": "user",
|
183 |
+
"parts": [{"text": msg.content}]
|
184 |
+
})
|
185 |
+
elif msg.role == "assistant":
|
186 |
+
formatted.append({
|
187 |
+
"role": "model",
|
188 |
+
"parts": [{"text": msg.content}]
|
189 |
+
})
|
190 |
+
|
191 |
+
return formatted
|
192 |
+
|
193 |
+
async def generate_stream(self, messages: List[Message]) -> AsyncGenerator[StreamChunk, None]:
|
194 |
+
"""Generate streaming response from Google"""
|
195 |
+
|
196 |
+
formatted_messages = self._format_messages(messages)
|
197 |
+
|
198 |
+
generation_config = {
|
199 |
+
"temperature": self.config.temperature,
|
200 |
+
}
|
201 |
+
|
202 |
+
if self.config.max_tokens:
|
203 |
+
generation_config["max_output_tokens"] = self.config.max_tokens
|
204 |
+
|
205 |
+
try:
|
206 |
+
response = await self.model.generate_content_async(
|
207 |
+
formatted_messages,
|
208 |
+
generation_config=generation_config,
|
209 |
+
stream=True
|
210 |
+
)
|
211 |
+
|
212 |
+
async for chunk in response:
|
213 |
+
if chunk.text:
|
214 |
+
yield StreamChunk(
|
215 |
+
type="text",
|
216 |
+
content=chunk.text
|
217 |
+
)
|
218 |
+
|
219 |
+
except Exception as e:
|
220 |
+
yield StreamChunk(
|
221 |
+
type="error",
|
222 |
+
content=f"Google API error: {str(e)}"
|
223 |
+
)
|
224 |
+
|
225 |
+
|
226 |
+
class LlmClient:
|
227 |
+
"""
|
228 |
+
Unified LLM client that manages multiple providers
|
229 |
+
"""
|
230 |
+
|
231 |
+
def __init__(self):
|
232 |
+
self.clients = {}
|
233 |
+
self._initialize_clients()
|
234 |
+
|
235 |
+
def _initialize_clients(self):
|
236 |
+
"""Initialize available LLM clients"""
|
237 |
+
available_providers = config.get_available_llm_providers()
|
238 |
+
|
239 |
+
for provider in available_providers:
|
240 |
+
try:
|
241 |
+
if provider in ["openai", "openai_compatible"]:
|
242 |
+
self.clients[provider] = OpenAIClient(provider)
|
243 |
+
elif provider == "anthropic":
|
244 |
+
self.clients[provider] = AnthropicClient()
|
245 |
+
elif provider == "google":
|
246 |
+
self.clients[provider] = GoogleClient()
|
247 |
+
|
248 |
+
print(f"✅ Initialized {provider} client")
|
249 |
+
except Exception as e:
|
250 |
+
print(f"❌ Failed to initialize {provider} client: {e}")
|
251 |
+
|
252 |
+
def get_available_providers(self) -> List[LLMProvider]:
|
253 |
+
"""Get list of available providers"""
|
254 |
+
return list(self.clients.keys())
|
255 |
+
|
256 |
+
def get_client(self, provider: LLMProvider) -> BaseLLMClient:
|
257 |
+
"""Get client for specific provider"""
|
258 |
+
if provider not in self.clients:
|
259 |
+
raise ValueError(f"Provider {provider} not available")
|
260 |
+
return self.clients[provider]
|
261 |
+
|
262 |
+
async def generate_stream(
|
263 |
+
self,
|
264 |
+
provider: LLMProvider,
|
265 |
+
messages: List[Message]
|
266 |
+
) -> AsyncGenerator[StreamChunk, None]:
|
267 |
+
"""Generate streaming response using specified provider"""
|
268 |
+
client = self.get_client(provider)
|
269 |
+
async for chunk in client.generate_stream(messages):
|
270 |
+
yield chunk
|
coursecrafter/agents/simple_course_agent.py
ADDED
@@ -0,0 +1,844 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
🎓 Simplified CourseCrafter Agent - Core Functionality Only
|
3 |
+
|
4 |
+
Focuses on web search, lesson creation, flashcards, and quizzes without complex MCP dependencies.
|
5 |
+
"""
|
6 |
+
|
7 |
+
import json
|
8 |
+
from typing import Dict, List, Any, Optional, AsyncGenerator, Callable
|
9 |
+
from datetime import datetime
|
10 |
+
|
11 |
+
from .llm_client import LlmClient, Message
|
12 |
+
from ..types import (
|
13 |
+
GenerationOptions, ProgressUpdate, StreamChunk
|
14 |
+
)
|
15 |
+
from ..utils.config import config
|
16 |
+
from ..utils.helpers import smart_json_loads
|
17 |
+
|
18 |
+
from ..tools.web_research import research_topic
|
19 |
+
from ..tools.image_generation import (
|
20 |
+
generate_educational_image,
|
21 |
+
extract_image_placeholders,
|
22 |
+
replace_image_placeholders
|
23 |
+
)
|
24 |
+
|
25 |
+
class SimpleCourseAgent:
|
26 |
+
"""
|
27 |
+
Main agent for course generation:
|
28 |
+
- Web search for research
|
29 |
+
- Lesson generation
|
30 |
+
- Flashcard creation
|
31 |
+
- Quiz generation
|
32 |
+
- Image generation
|
33 |
+
"""
|
34 |
+
|
35 |
+
def __init__(self):
|
36 |
+
self.llm_client = LlmClient()
|
37 |
+
self.system_prompt = self._get_system_prompt()
|
38 |
+
self.default_provider = config.get_default_llm_provider()
|
39 |
+
print(f"🎓 SimpleCourseAgent initialized with default provider: {self.default_provider}")
|
40 |
+
|
41 |
+
def _get_system_prompt(self) -> str:
|
42 |
+
"""Get the system prompt for course generation"""
|
43 |
+
return """You are Course Creator AI, an expert educational content creator and course designer. Your goal is to create high-quality educational content that is comprehensive, well-structured, engaging, and tailored to the needs of lerners. Create detailed lessons, generate flashcards, quizzes, and educational images.
|
44 |
+
|
45 |
+
## Your Capabilities:
|
46 |
+
- Research topics thoroughly using web search and content analysis
|
47 |
+
- Generate engaging, structured lesson content with clear explanations and objectives
|
48 |
+
- Generate interactive flashcards for key concepts for repetition learning
|
49 |
+
- Create multiple-choice quizzes to reinforce learning
|
50 |
+
- Generate educational images and visual aids
|
51 |
+
- Ensure content quality and educational effectiveness
|
52 |
+
|
53 |
+
## Quality Standards:
|
54 |
+
- Content must be accurate, well-researched, and up-to-date
|
55 |
+
- Lessons should build upon each other logically
|
56 |
+
- Include practical examples and real-world applications
|
57 |
+
- Maintain appropriate difficulty level for target audience
|
58 |
+
- Ensure content is engaging and interactive
|
59 |
+
- Provide clear learning objectives and outcomes
|
60 |
+
|
61 |
+
## RESPONSE FORMAT:
|
62 |
+
- Always respond with valid JSON only. No markdown, no explanations, just pure JSON.
|
63 |
+
- Follow the format below for each type of content:
|
64 |
+
|
65 |
+
For course planning, return:
|
66 |
+
{
|
67 |
+
"title": "Course title",
|
68 |
+
"description": "Brief description",
|
69 |
+
"learning_objectives": ["objective1", "objective2"],
|
70 |
+
"lesson_titles": ["Lesson 1 title", "Lesson 2 title"],
|
71 |
+
"estimated_duration": 60
|
72 |
+
}
|
73 |
+
|
74 |
+
For lessons, return:
|
75 |
+
{
|
76 |
+
"title": "Lesson title",
|
77 |
+
"duration": 15,
|
78 |
+
"objectives": ["Learn X", "Understand Y"],
|
79 |
+
"content": "Detailed lesson content in markdown",
|
80 |
+
"key_takeaways": ["Key point 1", "Key point 2"],
|
81 |
+
"examples": ["Example 1", "Example 2"]
|
82 |
+
}
|
83 |
+
|
84 |
+
For flashcards, return:
|
85 |
+
[
|
86 |
+
{
|
87 |
+
"question": "What is X?",
|
88 |
+
"answer": "X is...",
|
89 |
+
"category": "Category name"
|
90 |
+
}
|
91 |
+
]
|
92 |
+
|
93 |
+
For quizzes, return:
|
94 |
+
{
|
95 |
+
"title": "Quiz title",
|
96 |
+
"instructions": "Instructions text",
|
97 |
+
"questions": [
|
98 |
+
{
|
99 |
+
"question": "Question text?",
|
100 |
+
"options": ["A) Option 1", "B) Option 2", "C) Option 3", "D) Option 4"],
|
101 |
+
"correct_answer": "A",
|
102 |
+
"explanation": "Why A is correct"
|
103 |
+
}
|
104 |
+
]
|
105 |
+
}
|
106 |
+
|
107 |
+
Always strive to create courses that are not just informative, but are easy to understand, engaging, learning experiences."""
|
108 |
+
|
109 |
+
async def generate_course(
|
110 |
+
self,
|
111 |
+
topic: str,
|
112 |
+
options: GenerationOptions,
|
113 |
+
provider: Optional[str] = None,
|
114 |
+
progress_callback: Optional[Callable[[ProgressUpdate], None]] = None
|
115 |
+
) -> AsyncGenerator[StreamChunk, None]:
|
116 |
+
"""Generate a complete course on the given topic"""
|
117 |
+
|
118 |
+
# Use provided provider or fall back to default
|
119 |
+
if provider is None:
|
120 |
+
provider = self.default_provider
|
121 |
+
|
122 |
+
print(f"🚀 Starting course generation for: {topic}")
|
123 |
+
print(f"📋 Options: {options.lesson_count} lessons, {options.difficulty.value} difficulty")
|
124 |
+
print(f"🧠 Using LLM provider: {provider}")
|
125 |
+
|
126 |
+
try:
|
127 |
+
# Step 1: Research the topic
|
128 |
+
if progress_callback:
|
129 |
+
progress_callback(ProgressUpdate(
|
130 |
+
stage="research",
|
131 |
+
progress=0.1,
|
132 |
+
message="Researching topic..."
|
133 |
+
))
|
134 |
+
|
135 |
+
print("🔍 Step 1: Researching topic...")
|
136 |
+
research_data = await self._research_topic(topic, provider)
|
137 |
+
# Store research data for use in lesson generation
|
138 |
+
self._current_research = research_data
|
139 |
+
yield StreamChunk(type="progress", content="✅ Research completed")
|
140 |
+
|
141 |
+
# Step 2: Generate course structure
|
142 |
+
if progress_callback:
|
143 |
+
progress_callback(ProgressUpdate(
|
144 |
+
stage="planning",
|
145 |
+
progress=0.3,
|
146 |
+
message="Planning course structure..."
|
147 |
+
))
|
148 |
+
|
149 |
+
print("📋 Step 2: Planning course structure...")
|
150 |
+
course_plan = await self._plan_course(topic, options, provider)
|
151 |
+
print(f"✅ Course plan created: {course_plan.get('title', 'Unknown')}")
|
152 |
+
yield StreamChunk(type="progress", content="✅ Course structure planned")
|
153 |
+
|
154 |
+
# Step 3: Generate lessons
|
155 |
+
if progress_callback:
|
156 |
+
progress_callback(ProgressUpdate(
|
157 |
+
stage="lessons",
|
158 |
+
progress=0.5,
|
159 |
+
message="Creating lessons..."
|
160 |
+
))
|
161 |
+
|
162 |
+
print("📚 Step 3: Generating lessons...")
|
163 |
+
lessons = await self._generate_lessons(course_plan, options, provider)
|
164 |
+
print(f"✅ Generated {len(lessons)} lessons")
|
165 |
+
yield StreamChunk(type="progress", content="✅ Lessons created")
|
166 |
+
|
167 |
+
# Step 4: Generate flashcards
|
168 |
+
if progress_callback and options.include_flashcards:
|
169 |
+
progress_callback(ProgressUpdate(
|
170 |
+
stage="flashcards",
|
171 |
+
progress=0.7,
|
172 |
+
message="Creating flashcards..."
|
173 |
+
))
|
174 |
+
|
175 |
+
flashcards = []
|
176 |
+
if options.include_flashcards:
|
177 |
+
print("🃏 Step 4: Generating flashcards...")
|
178 |
+
flashcards = await self._generate_flashcards(lessons, provider, options.difficulty.value)
|
179 |
+
print(f"✅ Generated {len(flashcards)} flashcards")
|
180 |
+
yield StreamChunk(type="progress", content="✅ Flashcards created")
|
181 |
+
|
182 |
+
# Step 5: Generate quiz
|
183 |
+
if progress_callback and options.include_quizzes:
|
184 |
+
progress_callback(ProgressUpdate(
|
185 |
+
stage="quiz",
|
186 |
+
progress=0.8,
|
187 |
+
message="Creating quiz..."
|
188 |
+
))
|
189 |
+
|
190 |
+
quiz = None
|
191 |
+
if options.include_quizzes:
|
192 |
+
print("📝 Step 5: Generating quiz...")
|
193 |
+
quiz = await self._generate_quiz(lessons, provider, options.difficulty.value)
|
194 |
+
print(f"✅ Generated quiz with {len(quiz.get('questions', []))} questions")
|
195 |
+
yield StreamChunk(type="progress", content="✅ Quiz created")
|
196 |
+
|
197 |
+
# Step 6: Generate images (if requested)
|
198 |
+
images = []
|
199 |
+
if options.include_images:
|
200 |
+
if progress_callback:
|
201 |
+
progress_callback(ProgressUpdate(
|
202 |
+
stage="images",
|
203 |
+
progress=0.9,
|
204 |
+
message="Generating images..."
|
205 |
+
))
|
206 |
+
|
207 |
+
print("🖼️ Step 6: Generating images...")
|
208 |
+
try:
|
209 |
+
images = await self._generate_images(lessons)
|
210 |
+
print(f"✅ Generated {len(images)} images")
|
211 |
+
yield StreamChunk(type="progress", content="✅ Images generated")
|
212 |
+
|
213 |
+
# Attach images to lessons
|
214 |
+
for i, lesson in enumerate(lessons):
|
215 |
+
if i < len(images):
|
216 |
+
lesson["images"] = [images[i]]
|
217 |
+
|
218 |
+
print(f"📎 Attached images to {min(len(lessons), len(images))} lessons")
|
219 |
+
|
220 |
+
except Exception as e:
|
221 |
+
print(f"⚠️ Image generation failed: {e}")
|
222 |
+
# Continue without images
|
223 |
+
images = []
|
224 |
+
yield StreamChunk(type="progress", content="⚠️ Images skipped (generation failed)")
|
225 |
+
else:
|
226 |
+
print("🖼️ Image generation skipped (not requested)")
|
227 |
+
yield StreamChunk(type="progress", content="⏭️ Images skipped")
|
228 |
+
|
229 |
+
# Final assembly
|
230 |
+
if progress_callback:
|
231 |
+
progress_callback(ProgressUpdate(
|
232 |
+
stage="completion",
|
233 |
+
progress=1.0,
|
234 |
+
message="Course generation complete!"
|
235 |
+
))
|
236 |
+
|
237 |
+
print("🔄 Assembling final course data...")
|
238 |
+
yield StreamChunk(type="progress", content="✅ Finalizing course")
|
239 |
+
|
240 |
+
# Yield the complete course
|
241 |
+
course_data = {
|
242 |
+
"course_info": course_plan,
|
243 |
+
"lessons": lessons,
|
244 |
+
"flashcards": flashcards,
|
245 |
+
"quiz": quiz,
|
246 |
+
"images": images,
|
247 |
+
"generated_at": datetime.now().isoformat()
|
248 |
+
}
|
249 |
+
|
250 |
+
print("🎉 Course generation completed successfully!")
|
251 |
+
print(f"📊 Final course data: {len(lessons)} lessons, {len(flashcards)} flashcards, {len(quiz.get('questions', []) if quiz else [])} quiz questions")
|
252 |
+
|
253 |
+
yield StreamChunk(type="course_complete", content=json.dumps(course_data, indent=2))
|
254 |
+
|
255 |
+
except Exception as e:
|
256 |
+
error_msg = f"Error generating course: {str(e)}"
|
257 |
+
print(f"❌ {error_msg}")
|
258 |
+
import traceback
|
259 |
+
traceback.print_exc()
|
260 |
+
yield StreamChunk(type="error", content=error_msg)
|
261 |
+
|
262 |
+
async def _research_topic(self, topic: str, provider: str = None) -> Dict[str, Any]:
|
263 |
+
"""Research the topic using web search and content extraction"""
|
264 |
+
print(f"🔍 Researching topic: {topic}")
|
265 |
+
|
266 |
+
# Use the provided provider or fall back to default
|
267 |
+
research_provider = provider or self.default_provider
|
268 |
+
print(f"🧠 Using LLM provider for research: {research_provider}")
|
269 |
+
|
270 |
+
try:
|
271 |
+
# Use the new web research tools with the specified provider
|
272 |
+
research_results = await research_topic(topic, llm_provider=research_provider)
|
273 |
+
|
274 |
+
if research_results and research_results.get("success"):
|
275 |
+
print(f"✅ Web research successful: {research_results.get('successful_sources', 0)} sources")
|
276 |
+
return research_results
|
277 |
+
else:
|
278 |
+
print(f"⚠️ Web research failed or returned no results")
|
279 |
+
|
280 |
+
except Exception as e:
|
281 |
+
print(f"⚠️ Web research failed: {e}")
|
282 |
+
|
283 |
+
# Fallback to enhanced simulated research
|
284 |
+
print("🔄 Using fallback research data")
|
285 |
+
return {
|
286 |
+
"topic": topic,
|
287 |
+
"key_concepts": [
|
288 |
+
f"Fundamental concepts of {topic}",
|
289 |
+
f"Practical applications of {topic}",
|
290 |
+
f"Tools and resources for {topic}",
|
291 |
+
f"Best practices in {topic}",
|
292 |
+
f"Common challenges in {topic}"
|
293 |
+
],
|
294 |
+
"sources": [
|
295 |
+
f"Educational resources for {topic}",
|
296 |
+
f"Documentation and tutorials for {topic}",
|
297 |
+
f"Community forums and discussions about {topic}",
|
298 |
+
f"Official guides and specifications for {topic}"
|
299 |
+
],
|
300 |
+
"research_summary": f"Comprehensive research on {topic} covering fundamental concepts, practical applications, available tools and resources, best practices, and common challenges. This research would typically include web search results, documentation extraction, and content analysis from multiple authoritative sources.",
|
301 |
+
"success": True,
|
302 |
+
"fallback": True
|
303 |
+
}
|
304 |
+
|
305 |
+
async def _plan_course(
|
306 |
+
self,
|
307 |
+
topic: str,
|
308 |
+
options: GenerationOptions,
|
309 |
+
provider: str
|
310 |
+
) -> Dict[str, Any]:
|
311 |
+
"""Plan the overall course structure"""
|
312 |
+
|
313 |
+
print(f"📋 Planning course for {topic} with {options.lesson_count} lessons")
|
314 |
+
|
315 |
+
prompt = f"""Create a course plan for: "{topic}"
|
316 |
+
|
317 |
+
Requirements:
|
318 |
+
- {options.lesson_count} lessons
|
319 |
+
- {options.difficulty.value} difficulty level
|
320 |
+
- {options.max_lesson_duration} minutes per lesson
|
321 |
+
|
322 |
+
Return ONLY a JSON object with:
|
323 |
+
- title: Course title
|
324 |
+
- description: Brief description
|
325 |
+
- learning_objectives: Array of 3-5 objectives
|
326 |
+
- lesson_titles: Array of lesson titles
|
327 |
+
- estimated_duration: Total course duration
|
328 |
+
|
329 |
+
Focus on practical, engaging content. Return only valid JSON, no other text. Do not wrap the JSON in markdown code blocks or backticks."""
|
330 |
+
|
331 |
+
messages = [
|
332 |
+
Message(role="system", content=self.system_prompt),
|
333 |
+
Message(role="user", content=prompt)
|
334 |
+
]
|
335 |
+
|
336 |
+
try:
|
337 |
+
response_text = await self._get_llm_response(provider, messages)
|
338 |
+
print(f"📋 LLM response for course plan: {response_text[:200]}...")
|
339 |
+
|
340 |
+
# Try to parse JSON with smart parser
|
341 |
+
course_plan = smart_json_loads(response_text)
|
342 |
+
if course_plan is not None:
|
343 |
+
print(f"✅ Successfully parsed course plan JSON")
|
344 |
+
return course_plan
|
345 |
+
else:
|
346 |
+
raise ValueError("Failed to extract valid JSON from response")
|
347 |
+
|
348 |
+
except Exception as e:
|
349 |
+
print(f"❌ JSON parsing failed for course plan: {e}")
|
350 |
+
print(f"Raw response: {response_text}")
|
351 |
+
# Fallback if JSON parsing fails
|
352 |
+
return {
|
353 |
+
"title": f"Course: {topic}",
|
354 |
+
"description": f"A comprehensive introduction to {topic}",
|
355 |
+
"learning_objectives": [
|
356 |
+
f"Understand the fundamentals of {topic}",
|
357 |
+
f"Apply key concepts of {topic}",
|
358 |
+
f"Analyze real-world applications of {topic}"
|
359 |
+
],
|
360 |
+
"lesson_titles": [f"Lesson {i+1}: {topic} Fundamentals" for i in range(options.lesson_count)],
|
361 |
+
"estimated_duration": options.lesson_count * options.max_lesson_duration
|
362 |
+
}
|
363 |
+
except Exception as e:
|
364 |
+
print(f"❌ Error in course planning: {e}")
|
365 |
+
# Return fallback
|
366 |
+
return {
|
367 |
+
"title": f"Course: {topic}",
|
368 |
+
"description": f"A comprehensive introduction to {topic}",
|
369 |
+
"learning_objectives": [
|
370 |
+
f"Understand the fundamentals of {topic}",
|
371 |
+
f"Apply key concepts of {topic}",
|
372 |
+
f"Analyze real-world applications of {topic}"
|
373 |
+
],
|
374 |
+
"lesson_titles": [f"Lesson {i+1}: {topic} Basics" for i in range(options.lesson_count)],
|
375 |
+
"estimated_duration": options.lesson_count * options.max_lesson_duration
|
376 |
+
}
|
377 |
+
|
378 |
+
async def _generate_lessons(
|
379 |
+
self,
|
380 |
+
course_plan: Dict[str, Any],
|
381 |
+
options: GenerationOptions,
|
382 |
+
provider: str
|
383 |
+
) -> List[Dict[str, Any]]:
|
384 |
+
"""Generate detailed lesson content"""
|
385 |
+
|
386 |
+
lessons = []
|
387 |
+
lesson_titles = course_plan.get("lesson_titles", [])
|
388 |
+
print(f"📚 Generating {len(lesson_titles)} lessons")
|
389 |
+
|
390 |
+
for i, title in enumerate(lesson_titles):
|
391 |
+
print(f"📖 Generating lesson {i+1}: {title}")
|
392 |
+
|
393 |
+
# Include research data in the prompt
|
394 |
+
research_context = ""
|
395 |
+
if hasattr(self, '_current_research') and self._current_research:
|
396 |
+
research_context = f"""
|
397 |
+
Research Context:
|
398 |
+
{self._current_research.get('research_summary', '')}
|
399 |
+
|
400 |
+
Key Concepts: {', '.join(self._current_research.get('key_concepts', [])[:3])}
|
401 |
+
"""
|
402 |
+
|
403 |
+
# Create difficulty-specific guidelines
|
404 |
+
difficulty_guidelines = {
|
405 |
+
"beginner": """
|
406 |
+
- Use simple, clear language and avoid technical jargon
|
407 |
+
- Explain every concept from the ground up with no assumed prior knowledge
|
408 |
+
- Include step-by-step instructions with detailed explanations for each step
|
409 |
+
- Use basic, relatable examples that anyone can understand
|
410 |
+
- Focus on fundamental concepts and practical applications
|
411 |
+
- Include plenty of context and background information
|
412 |
+
- Break down complex ideas into smaller, digestible parts""",
|
413 |
+
"intermediate": """
|
414 |
+
- Use some technical terminology but explain it when first introduced
|
415 |
+
- Assume basic familiarity with the subject area
|
416 |
+
- Include moderately complex examples that build on fundamental knowledge
|
417 |
+
- Focus on practical applications and real-world scenarios
|
418 |
+
- Introduce some advanced concepts but explain them thoroughly
|
419 |
+
- Include best practices and common patterns
|
420 |
+
- Balance theory with hands-on practice""",
|
421 |
+
"advanced": """
|
422 |
+
- Use technical language and industry-standard terminology
|
423 |
+
- Assume solid foundational knowledge in the subject area
|
424 |
+
- Include complex, real-world examples and edge cases
|
425 |
+
- Focus on advanced techniques, optimization, and expert-level practices
|
426 |
+
- Discuss trade-offs, limitations, and alternative approaches
|
427 |
+
- Include cutting-edge developments and research
|
428 |
+
- Emphasize problem-solving and critical thinking"""
|
429 |
+
}
|
430 |
+
|
431 |
+
prompt = f"""Create comprehensive, detailed educational content for: "{title}"
|
432 |
+
|
433 |
+
This is lesson {i+1} of {len(lesson_titles)} in a course about "{course_plan.get('title', '')}"
|
434 |
+
|
435 |
+
{research_context}
|
436 |
+
|
437 |
+
Requirements:
|
438 |
+
- Duration: {options.max_lesson_duration} minutes
|
439 |
+
- Difficulty Level: {options.difficulty.value.upper()}
|
440 |
+
|
441 |
+
DIFFICULTY-SPECIFIC GUIDELINES for {options.difficulty.value.upper()} level:
|
442 |
+
{difficulty_guidelines.get(options.difficulty.value, difficulty_guidelines["intermediate"])}
|
443 |
+
|
444 |
+
Content Requirements:
|
445 |
+
- Create EXTENSIVE, thorough content that truly teaches the topic (aim for 2000+ words)
|
446 |
+
- Include multiple practical examples with code/step-by-step instructions
|
447 |
+
- Provide detailed explanations that help students understand complex concepts
|
448 |
+
- Include real-world applications and use cases
|
449 |
+
- Add troubleshooting tips and common pitfalls
|
450 |
+
- Make content comprehensive enough to actually learn from
|
451 |
+
- Include image placeholders where visual aids would be helpful
|
452 |
+
|
453 |
+
IMPORTANT: When you want to include an educational image, use this format:
|
454 |
+
{{{{IMAGE_PLACEHOLDER:{title}:Description of the image needed}}}}
|
455 |
+
|
456 |
+
LIMIT: Use a MAXIMUM of 3 image placeholders per lesson. Choose the most important visual aids.
|
457 |
+
|
458 |
+
For example:
|
459 |
+
{{{{IMAGE_PLACEHOLDER:{title}:Diagram showing the main components}}}}
|
460 |
+
{{{{IMAGE_PLACEHOLDER:{title}:Screenshot of the user interface}}}}
|
461 |
+
{{{{IMAGE_PLACEHOLDER:{title}:Flowchart of the process}}}}
|
462 |
+
|
463 |
+
The content should be substantial and educational. Include sections like:
|
464 |
+
- Introduction with context and importance
|
465 |
+
- Core concepts with detailed explanations
|
466 |
+
- Multiple practical examples with code/instructions
|
467 |
+
- Step-by-step tutorials
|
468 |
+
- Best practices and tips
|
469 |
+
- Common mistakes to avoid
|
470 |
+
- Real-world applications
|
471 |
+
- Further resources and next steps
|
472 |
+
|
473 |
+
Return ONLY a JSON object with:
|
474 |
+
- title: Lesson title
|
475 |
+
- duration: Estimated duration
|
476 |
+
- objectives: Learning objectives for this lesson (3-5 specific objectives)
|
477 |
+
- content: EXTENSIVE lesson content in markdown format (2000+ words, with image placeholders)
|
478 |
+
- key_takeaways: Array of 5-7 key points
|
479 |
+
- examples: Array of 3-5 detailed practical examples with explanations
|
480 |
+
|
481 |
+
Return only valid JSON, no other text. Do not wrap the JSON in markdown code blocks or backticks."""
|
482 |
+
|
483 |
+
messages = [
|
484 |
+
Message(role="system", content=self.system_prompt),
|
485 |
+
Message(role="user", content=prompt)
|
486 |
+
]
|
487 |
+
|
488 |
+
try:
|
489 |
+
response_text = await self._get_llm_response(provider, messages)
|
490 |
+
print(f"📖 LLM response for lesson {i+1}: {response_text[:100]}...")
|
491 |
+
|
492 |
+
lesson_data = smart_json_loads(response_text)
|
493 |
+
if lesson_data is not None:
|
494 |
+
lessons.append(lesson_data)
|
495 |
+
print(f"✅ Successfully generated lesson {i+1}")
|
496 |
+
else:
|
497 |
+
raise ValueError("Failed to extract valid JSON from response")
|
498 |
+
|
499 |
+
except Exception as e:
|
500 |
+
print(f"❌ JSON parsing failed for lesson {i+1}: {e}")
|
501 |
+
# Fallback lesson structure
|
502 |
+
lessons.append({
|
503 |
+
"title": title,
|
504 |
+
"duration": options.max_lesson_duration,
|
505 |
+
"objectives": [f"Learn about {title}"],
|
506 |
+
"content": f"# {title}\n\nThis lesson covers the fundamentals of {title}.\n\n## Key Concepts\n\n- Important concept 1\n- Important concept 2\n- Important concept 3\n\n## Examples\n\nHere are some practical examples related to {title}...",
|
507 |
+
"key_takeaways": [f"Key concept from {title}", f"Important principle of {title}"],
|
508 |
+
"examples": [f"Example 1 related to {title}", f"Example 2 related to {title}"]
|
509 |
+
})
|
510 |
+
except Exception as e:
|
511 |
+
print(f"❌ Error generating lesson {i+1}: {e}")
|
512 |
+
# Fallback lesson structure
|
513 |
+
lessons.append({
|
514 |
+
"title": title,
|
515 |
+
"duration": options.max_lesson_duration,
|
516 |
+
"objectives": [f"Learn about {title}"],
|
517 |
+
"content": f"# {title}\n\nDetailed content about {title}...",
|
518 |
+
"key_takeaways": [f"Key concept from {title}"],
|
519 |
+
"examples": [f"Example related to {title}"]
|
520 |
+
})
|
521 |
+
|
522 |
+
return lessons
|
523 |
+
|
524 |
+
async def _generate_flashcards(
|
525 |
+
self,
|
526 |
+
lessons: List[Dict[str, Any]],
|
527 |
+
provider: str,
|
528 |
+
difficulty: str = "intermediate"
|
529 |
+
) -> List[Dict[str, Any]]:
|
530 |
+
"""Generate flashcards from lesson content with difficulty-appropriate complexity"""
|
531 |
+
|
532 |
+
print(f"🃏 Generating {difficulty} level flashcards from lesson content")
|
533 |
+
|
534 |
+
# Combine all lesson content
|
535 |
+
all_content = "\n\n".join([
|
536 |
+
lesson.get("content", "") + "\n" +
|
537 |
+
"\n".join(lesson.get("key_takeaways", []))
|
538 |
+
for lesson in lessons
|
539 |
+
])
|
540 |
+
|
541 |
+
# Create difficulty-specific flashcard guidelines
|
542 |
+
flashcard_guidelines = {
|
543 |
+
"beginner": """
|
544 |
+
- Focus on basic definitions and simple facts
|
545 |
+
- Use clear, simple language in both questions and answers
|
546 |
+
- Test fundamental concepts and terminology
|
547 |
+
- Include basic examples and straightforward explanations
|
548 |
+
- Avoid complex relationships or multi-step reasoning
|
549 |
+
- Keep answers concise and direct""",
|
550 |
+
"intermediate": """
|
551 |
+
- Include concepts, relationships, and applications
|
552 |
+
- Test understanding of how concepts connect
|
553 |
+
- Use moderate complexity in questions and explanations
|
554 |
+
- Include practical examples and use cases
|
555 |
+
- Test both knowledge and basic application
|
556 |
+
- Balance definitions with conceptual understanding""",
|
557 |
+
"advanced": """
|
558 |
+
- Focus on complex principles, applications, and analysis
|
559 |
+
- Test deep understanding and critical thinking
|
560 |
+
- Include challenging scenarios and edge cases
|
561 |
+
- Test ability to synthesize and evaluate information
|
562 |
+
- Include questions about trade-offs and best practices
|
563 |
+
- Emphasize expert-level insights and nuanced understanding"""
|
564 |
+
}
|
565 |
+
|
566 |
+
prompt = f"""Create flashcards based on this lesson content:
|
567 |
+
|
568 |
+
{all_content[:2000]}...
|
569 |
+
|
570 |
+
DIFFICULTY LEVEL: {difficulty.upper()}
|
571 |
+
|
572 |
+
FLASHCARD GUIDELINES for {difficulty.upper()} level:
|
573 |
+
{flashcard_guidelines.get(difficulty, flashcard_guidelines["intermediate"])}
|
574 |
+
|
575 |
+
Generate 10-15 flashcards covering the most important concepts at the {difficulty} level.
|
576 |
+
|
577 |
+
Return ONLY a JSON array where each flashcard has:
|
578 |
+
- question: The question/prompt
|
579 |
+
- answer: The answer/explanation
|
580 |
+
- category: Which lesson/topic this relates to
|
581 |
+
|
582 |
+
Ensure flashcards match the {difficulty} difficulty level. Return only valid JSON, no other text. Do not wrap the JSON in markdown code blocks or backticks."""
|
583 |
+
|
584 |
+
messages = [
|
585 |
+
Message(role="system", content=self.system_prompt),
|
586 |
+
Message(role="user", content=prompt)
|
587 |
+
]
|
588 |
+
|
589 |
+
try:
|
590 |
+
response_text = await self._get_llm_response(provider, messages)
|
591 |
+
print(f"🃏 LLM response for flashcards: {response_text[:100]}...")
|
592 |
+
|
593 |
+
flashcards = smart_json_loads(response_text)
|
594 |
+
if flashcards is not None:
|
595 |
+
print(f"✅ Successfully generated {len(flashcards)} flashcards")
|
596 |
+
return flashcards
|
597 |
+
else:
|
598 |
+
raise ValueError("Failed to extract valid JSON from response")
|
599 |
+
|
600 |
+
except Exception as e:
|
601 |
+
print(f"❌ JSON parsing failed for flashcards: {e}")
|
602 |
+
# Fallback flashcards
|
603 |
+
return [
|
604 |
+
{
|
605 |
+
"question": f"What is the main concept in {lesson.get('title', 'this lesson')}?",
|
606 |
+
"answer": f"The main concept is related to {lesson.get('title', 'the lesson topic')}",
|
607 |
+
"category": lesson.get('title', 'General')
|
608 |
+
}
|
609 |
+
for lesson in lessons[:5] # Limit to 5 fallback cards
|
610 |
+
]
|
611 |
+
except Exception as e:
|
612 |
+
print(f"❌ Error generating flashcards: {e}")
|
613 |
+
return []
|
614 |
+
|
615 |
+
async def _generate_quiz(
|
616 |
+
self,
|
617 |
+
lessons: List[Dict[str, Any]],
|
618 |
+
provider: str,
|
619 |
+
difficulty: str = "intermediate"
|
620 |
+
) -> Dict[str, Any]:
|
621 |
+
"""Generate a multiple-choice quiz with difficulty-appropriate questions"""
|
622 |
+
|
623 |
+
print(f"📝 Generating {difficulty} level quiz from lesson content")
|
624 |
+
|
625 |
+
# Combine lesson content
|
626 |
+
all_content = "\n\n".join([
|
627 |
+
lesson.get("content", "") + "\n" +
|
628 |
+
"\n".join(lesson.get("key_takeaways", []))
|
629 |
+
for lesson in lessons
|
630 |
+
])
|
631 |
+
|
632 |
+
# Create difficulty-specific quiz guidelines
|
633 |
+
quiz_guidelines = {
|
634 |
+
"beginner": """
|
635 |
+
- Focus on basic recall and recognition questions
|
636 |
+
- Test fundamental concepts and definitions
|
637 |
+
- Use simple, clear language in questions
|
638 |
+
- Include straightforward examples
|
639 |
+
- Avoid trick questions or complex scenarios
|
640 |
+
- Test one concept per question""",
|
641 |
+
"intermediate": """
|
642 |
+
- Include application and analysis questions
|
643 |
+
- Test understanding of relationships between concepts
|
644 |
+
- Use moderate complexity in scenarios
|
645 |
+
- Include some problem-solving questions
|
646 |
+
- Test ability to apply knowledge to new situations
|
647 |
+
- Mix recall with application questions""",
|
648 |
+
"advanced": """
|
649 |
+
- Focus on analysis, synthesis, and evaluation questions
|
650 |
+
- Test complex problem-solving abilities
|
651 |
+
- Include multi-step reasoning questions
|
652 |
+
- Use challenging real-world scenarios
|
653 |
+
- Test ability to compare and contrast approaches
|
654 |
+
- Include questions about trade-offs and limitations"""
|
655 |
+
}
|
656 |
+
|
657 |
+
prompt = f"""Create a 10-question multiple-choice quiz based on this content:
|
658 |
+
|
659 |
+
{all_content[:2000]}...
|
660 |
+
|
661 |
+
DIFFICULTY LEVEL: {difficulty.upper()}
|
662 |
+
|
663 |
+
QUIZ GUIDELINES for {difficulty.upper()} level:
|
664 |
+
{quiz_guidelines.get(difficulty, quiz_guidelines["intermediate"])}
|
665 |
+
|
666 |
+
Return ONLY a JSON object with:
|
667 |
+
- title: Quiz title
|
668 |
+
- instructions: Brief instructions
|
669 |
+
- questions: Array of question objects
|
670 |
+
|
671 |
+
Each question should have:
|
672 |
+
- question: The question text
|
673 |
+
- options: Array of 4 multiple choice options (A, B, C, D)
|
674 |
+
- correct_answer: The letter of the correct answer
|
675 |
+
- explanation: Why this answer is correct
|
676 |
+
|
677 |
+
Ensure questions match the {difficulty} difficulty level. Return only valid JSON, no other text. Do not wrap the JSON in markdown code blocks or backticks."""
|
678 |
+
|
679 |
+
messages = [
|
680 |
+
Message(role="system", content=self.system_prompt),
|
681 |
+
Message(role="user", content=prompt)
|
682 |
+
]
|
683 |
+
|
684 |
+
try:
|
685 |
+
response_text = await self._get_llm_response(provider, messages)
|
686 |
+
print(f"📝 LLM response for quiz: {response_text[:100]}...")
|
687 |
+
|
688 |
+
quiz = smart_json_loads(response_text)
|
689 |
+
if quiz is not None:
|
690 |
+
print(f"✅ Successfully generated quiz with {len(quiz.get('questions', []))} questions")
|
691 |
+
return quiz
|
692 |
+
else:
|
693 |
+
raise ValueError("Failed to extract valid JSON from response")
|
694 |
+
|
695 |
+
except Exception as e:
|
696 |
+
print(f"❌ JSON parsing failed for quiz: {e}")
|
697 |
+
# Fallback quiz
|
698 |
+
return {
|
699 |
+
"title": "Course Quiz",
|
700 |
+
"instructions": "Choose the best answer for each question.",
|
701 |
+
"questions": [
|
702 |
+
{
|
703 |
+
"question": f"What is a key concept from {lesson.get('title', 'this lesson')}?",
|
704 |
+
"options": ["A) Option A", "B) Option B", "C) Option C", "D) Option D"],
|
705 |
+
"correct_answer": "A",
|
706 |
+
"explanation": "This is the correct answer based on the lesson content."
|
707 |
+
}
|
708 |
+
for lesson in lessons[:3] # Limit to 3 fallback questions
|
709 |
+
]
|
710 |
+
}
|
711 |
+
except Exception as e:
|
712 |
+
print(f"❌ Error generating quiz: {e}")
|
713 |
+
return {
|
714 |
+
"title": "Course Quiz",
|
715 |
+
"instructions": "Choose the best answer for each question.",
|
716 |
+
"questions": []
|
717 |
+
}
|
718 |
+
|
719 |
+
async def _generate_images(self, lessons: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
|
720 |
+
"""Generate educational images for lessons using Pollinations API"""
|
721 |
+
|
722 |
+
print("🖼️ Generating actual images for lessons using Pollinations API")
|
723 |
+
|
724 |
+
images = []
|
725 |
+
|
726 |
+
# Process each lesson separately to avoid duplication
|
727 |
+
for lesson in lessons:
|
728 |
+
lesson_title = lesson.get("title", "")
|
729 |
+
content = lesson.get("content", "")
|
730 |
+
|
731 |
+
print(f"📚 Processing images for lesson: {lesson_title}")
|
732 |
+
|
733 |
+
# Extract placeholders for this specific lesson
|
734 |
+
placeholders = extract_image_placeholders(content)
|
735 |
+
|
736 |
+
if not placeholders:
|
737 |
+
print(f"📝 No image placeholders found for {lesson_title}, generating 1 default image")
|
738 |
+
# Generate one default image for the lesson
|
739 |
+
topic = lesson_title.split(":")[0] if ":" in lesson_title else lesson_title
|
740 |
+
|
741 |
+
try:
|
742 |
+
image_data = await generate_educational_image(lesson_title, topic, "educational")
|
743 |
+
if image_data:
|
744 |
+
images.append(image_data)
|
745 |
+
print(f"✅ Generated default image for: {lesson_title}")
|
746 |
+
else:
|
747 |
+
print(f"⚠️ Failed to generate default image for: {lesson_title}")
|
748 |
+
except Exception as e:
|
749 |
+
print(f"❌ Error generating default image for {lesson_title}: {e}")
|
750 |
+
else:
|
751 |
+
# Generate images for ALL placeholders to avoid unreplaced ones
|
752 |
+
print(f"🎨 Found {len(placeholders)} placeholders for {lesson_title}, generating images for ALL of them")
|
753 |
+
|
754 |
+
for i, placeholder in enumerate(placeholders):
|
755 |
+
placeholder_lesson_title = placeholder["lesson_title"]
|
756 |
+
description = placeholder["description"]
|
757 |
+
|
758 |
+
try:
|
759 |
+
# Create educational prompt from placeholder description
|
760 |
+
topic = description
|
761 |
+
image_data = await generate_educational_image(placeholder_lesson_title, topic, "educational")
|
762 |
+
|
763 |
+
if image_data:
|
764 |
+
# Store the specific placeholder description for matching
|
765 |
+
image_data["placeholder_description"] = description
|
766 |
+
image_data["placeholder_full"] = placeholder["placeholder"]
|
767 |
+
images.append(image_data)
|
768 |
+
print(f"✅ Generated image {i+1}/{len(placeholders)} for {lesson_title}: {description[:50]}...")
|
769 |
+
else:
|
770 |
+
print(f"⚠️ Failed to generate image {i+1} for {lesson_title}: {description[:50]}...")
|
771 |
+
|
772 |
+
except Exception as e:
|
773 |
+
print(f"❌ Error generating image {i+1} for {lesson_title}: {e}")
|
774 |
+
|
775 |
+
# Replace placeholders in lesson content with actual images
|
776 |
+
for lesson in lessons:
|
777 |
+
if images:
|
778 |
+
lesson["content"] = replace_image_placeholders(lesson["content"], images)
|
779 |
+
|
780 |
+
print(f"✅ Generated {len(images)} total images and updated lesson content")
|
781 |
+
return images
|
782 |
+
|
783 |
+
async def _get_llm_response(self, provider: str, messages: List[Message]) -> str:
|
784 |
+
"""Get a complete response from the LLM by collecting all streaming chunks"""
|
785 |
+
|
786 |
+
print(f"🧠 Getting LLM response from {provider}")
|
787 |
+
|
788 |
+
response_text = ""
|
789 |
+
|
790 |
+
try:
|
791 |
+
async for chunk in self.llm_client.generate_stream(
|
792 |
+
provider=provider,
|
793 |
+
messages=messages
|
794 |
+
):
|
795 |
+
if chunk.type == "text":
|
796 |
+
response_text += chunk.content
|
797 |
+
elif chunk.type == "error":
|
798 |
+
raise Exception(f"LLM error: {chunk.content}")
|
799 |
+
|
800 |
+
print(f"✅ Got LLM response ({len(response_text)} characters)")
|
801 |
+
return response_text.strip()
|
802 |
+
|
803 |
+
except Exception as e:
|
804 |
+
print(f"❌ Error getting LLM response: {e}")
|
805 |
+
raise
|
806 |
+
|
807 |
+
async def refine_course(
|
808 |
+
self,
|
809 |
+
course_data: Dict[str, Any],
|
810 |
+
user_request: str,
|
811 |
+
provider: Optional[str] = None
|
812 |
+
) -> Dict[str, Any]:
|
813 |
+
"""Refine or add to existing course based on user feedback"""
|
814 |
+
|
815 |
+
# Use provided provider or fall back to default
|
816 |
+
if provider is None:
|
817 |
+
provider = self.default_provider
|
818 |
+
|
819 |
+
prompt = f"""The user wants to modify this course:
|
820 |
+
|
821 |
+
Current course: {json.dumps(course_data, indent=2)[:1000]}...
|
822 |
+
|
823 |
+
User request: "{user_request}"
|
824 |
+
|
825 |
+
Please modify the course accordingly. If they want more information about a specific topic, research it and add detailed content. Return the updated course data in the same JSON format."""
|
826 |
+
|
827 |
+
messages = [
|
828 |
+
Message(role="system", content=self.system_prompt),
|
829 |
+
Message(role="user", content=prompt)
|
830 |
+
]
|
831 |
+
|
832 |
+
try:
|
833 |
+
response_text = await self._get_llm_response(provider, messages)
|
834 |
+
refined_course = smart_json_loads(response_text)
|
835 |
+
if refined_course is not None:
|
836 |
+
return refined_course
|
837 |
+
else:
|
838 |
+
return course_data # Return original if parsing fails
|
839 |
+
except:
|
840 |
+
return course_data # Return original if parsing fails
|
841 |
+
|
842 |
+
def get_available_providers(self) -> List[str]:
|
843 |
+
"""Get list of available LLM providers"""
|
844 |
+
return self.llm_client.get_available_providers()
|
coursecrafter/tools/__init__.py
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
from .image_generation import (
|
3 |
+
ImageGenerator,
|
4 |
+
generate_educational_image,
|
5 |
+
extract_image_placeholders,
|
6 |
+
replace_image_placeholders,
|
7 |
+
create_image_placeholder
|
8 |
+
)
|
9 |
+
from .web_research import WebResearcher, research_topic
|
10 |
+
|
11 |
+
__all__ = [
|
12 |
+
"ImageGenerator",
|
13 |
+
"generate_educational_image",
|
14 |
+
"extract_image_placeholders",
|
15 |
+
"replace_image_placeholders",
|
16 |
+
"create_image_placeholder",
|
17 |
+
"WebResearcher",
|
18 |
+
"research_topic"
|
19 |
+
]
|
coursecrafter/tools/__pycache__/__init__.cpython-311.pyc
ADDED
Binary file (630 Bytes). View file
|
|
coursecrafter/tools/__pycache__/image_generation.cpython-311.pyc
ADDED
Binary file (13.1 kB). View file
|
|
coursecrafter/tools/__pycache__/web_research.cpython-311.pyc
ADDED
Binary file (13.2 kB). View file
|
|
coursecrafter/tools/image_generation.py
ADDED
@@ -0,0 +1,254 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
🎨 Image Generation Tools
|
3 |
+
High-quality image generation using Pollinations API with gptimage model
|
4 |
+
"""
|
5 |
+
|
6 |
+
import urllib.parse
|
7 |
+
import base64
|
8 |
+
from typing import Dict, Any, Optional, List
|
9 |
+
import logging
|
10 |
+
|
11 |
+
logger = logging.getLogger(__name__)
|
12 |
+
|
13 |
+
# Import config for Pollinations settings
|
14 |
+
from ..utils.config import config
|
15 |
+
|
16 |
+
|
17 |
+
class ImageGenerator:
|
18 |
+
"""High-quality image generation using Pollinations API"""
|
19 |
+
|
20 |
+
def __init__(self):
|
21 |
+
self.base_url = "https://image.pollinations.ai/prompt"
|
22 |
+
self.image_config = config.get_image_generation_config()
|
23 |
+
|
24 |
+
# Set up default parameters from config
|
25 |
+
self.default_params = {
|
26 |
+
"width": self.image_config.get("default_width", 1280),
|
27 |
+
"height": self.image_config.get("default_height", 720),
|
28 |
+
"model": self.image_config.get("default_model", "gptimage"),
|
29 |
+
"nologo": "true",
|
30 |
+
"enhance": "true",
|
31 |
+
"private": "true"
|
32 |
+
}
|
33 |
+
|
34 |
+
# Add API credentials if available
|
35 |
+
if self.image_config.get("pollinations_api_token"):
|
36 |
+
self.default_params["token"] = self.image_config["pollinations_api_token"]
|
37 |
+
|
38 |
+
if self.image_config.get("pollinations_api_reference"):
|
39 |
+
self.default_params["referrer"] = self.image_config["pollinations_api_reference"]
|
40 |
+
|
41 |
+
async def generate_image_url(self, prompt: str, **kwargs) -> Optional[str]:
|
42 |
+
"""Generate image and return URL"""
|
43 |
+
try:
|
44 |
+
print(f"🎨 Generating image: {prompt[:50]}...")
|
45 |
+
|
46 |
+
# Merge custom params with defaults
|
47 |
+
params = {**self.default_params, **kwargs}
|
48 |
+
|
49 |
+
# Encode the prompt for URL
|
50 |
+
encoded_prompt = urllib.parse.quote(prompt)
|
51 |
+
url = f"{self.base_url}/{encoded_prompt}"
|
52 |
+
|
53 |
+
print(f"🌐 Request URL: {url}")
|
54 |
+
print(f"📋 Parameters: {params}")
|
55 |
+
|
56 |
+
# Use requests instead of aiohttp for better compatibility
|
57 |
+
import requests
|
58 |
+
|
59 |
+
try:
|
60 |
+
response = requests.get(url, params=params, timeout=300)
|
61 |
+
response.raise_for_status() # Raise an exception for bad status codes
|
62 |
+
except requests.exceptions.RequestException as e:
|
63 |
+
print(f"❌ Request failed: {e}")
|
64 |
+
if hasattr(e, 'response') and e.response is not None:
|
65 |
+
print(f"📄 Error response: {e.response.text[:200]}...")
|
66 |
+
return None
|
67 |
+
|
68 |
+
if response.status_code == 200:
|
69 |
+
image_data = response.content
|
70 |
+
|
71 |
+
# Convert to base64
|
72 |
+
base64_data = base64.b64encode(image_data).decode('utf-8')
|
73 |
+
|
74 |
+
print(f"✅ Image data generated successfully ({len(image_data)} bytes)")
|
75 |
+
|
76 |
+
return {
|
77 |
+
"prompt": prompt,
|
78 |
+
"base64_data": base64_data,
|
79 |
+
"data_url": f"data:image/jpeg;base64,{base64_data}",
|
80 |
+
"size_bytes": len(image_data),
|
81 |
+
"params": params
|
82 |
+
}
|
83 |
+
else:
|
84 |
+
print(f"❌ Image generation failed: HTTP {response.status_code}")
|
85 |
+
print(f"📄 Response text: {response.text[:200]}...")
|
86 |
+
return None
|
87 |
+
|
88 |
+
except Exception as e:
|
89 |
+
logger.error(f"Image data generation failed: {e}")
|
90 |
+
print(f"❌ Image data generation error: {e}")
|
91 |
+
return None
|
92 |
+
|
93 |
+
async def generate_educational_image(self, lesson_title: str, topic: str, style: str = "educational") -> Optional[Dict[str, Any]]:
|
94 |
+
"""Generate educational image for a lesson"""
|
95 |
+
try:
|
96 |
+
# Create varied educational prompts with different styles and approaches
|
97 |
+
import random
|
98 |
+
|
99 |
+
# Add variety with different visual styles and descriptors
|
100 |
+
visual_styles = [
|
101 |
+
"modern minimalist", "vibrant colorful", "clean professional", "sleek contemporary",
|
102 |
+
"bright engaging", "polished elegant", "dynamic visual", "crisp detailed"
|
103 |
+
]
|
104 |
+
|
105 |
+
art_styles = [
|
106 |
+
"digital illustration", "infographic design", "vector art", "educational diagram",
|
107 |
+
"technical illustration", "conceptual artwork", "instructional graphic", "learning visual"
|
108 |
+
]
|
109 |
+
|
110 |
+
descriptors = [
|
111 |
+
"high-quality", "detailed", "clear and informative", "visually appealing",
|
112 |
+
"educational focused", "learning oriented", "instructionally designed", "pedagogically sound"
|
113 |
+
]
|
114 |
+
|
115 |
+
# Randomly select style elements for variety
|
116 |
+
visual_style = random.choice(visual_styles)
|
117 |
+
art_style = random.choice(art_styles)
|
118 |
+
descriptor = random.choice(descriptors)
|
119 |
+
|
120 |
+
if style == "educational":
|
121 |
+
prompt = f"{descriptor.title()} {art_style} about {topic}. {visual_style} style with engaging visual elements."
|
122 |
+
elif style == "diagram":
|
123 |
+
prompt = f"Technical {art_style} showing {topic} concepts. {visual_style} design with clear visual hierarchy and {descriptor} presentation."
|
124 |
+
elif style == "concept":
|
125 |
+
prompt = f"Conceptual {art_style} of {topic}. {visual_style} visual representation with {descriptor} design elements."
|
126 |
+
else:
|
127 |
+
prompt = f"Professional {art_style} about {topic}. {visual_style}, {descriptor} educational style."
|
128 |
+
|
129 |
+
print(f"🎓 Generating educational image for: {lesson_title}")
|
130 |
+
|
131 |
+
# Use default parameters from config (already includes API credentials)
|
132 |
+
result = await self.generate_image_url(prompt)
|
133 |
+
|
134 |
+
if result:
|
135 |
+
result.update({
|
136 |
+
"lesson_title": lesson_title,
|
137 |
+
"topic": topic,
|
138 |
+
"style": style,
|
139 |
+
"educational": True
|
140 |
+
})
|
141 |
+
print(f"✅ Educational image generated for: {lesson_title}")
|
142 |
+
|
143 |
+
return result
|
144 |
+
|
145 |
+
except Exception as e:
|
146 |
+
logger.error(f"Educational image generation failed: {e}")
|
147 |
+
print(f"❌ Educational image generation error: {e}")
|
148 |
+
return None
|
149 |
+
|
150 |
+
@staticmethod
|
151 |
+
def create_image_placeholder(lesson_title: str, description: str) -> str:
|
152 |
+
"""Create an image placeholder marker for lessons"""
|
153 |
+
return f"{{{{IMAGE_PLACEHOLDER:{lesson_title}:{description}}}}}"
|
154 |
+
|
155 |
+
|
156 |
+
def extract_image_placeholders(content: str) -> List[Dict[str, str]]:
|
157 |
+
"""Extract image placeholder markers from content"""
|
158 |
+
import re
|
159 |
+
|
160 |
+
pattern = r'\{\{IMAGE_PLACEHOLDER:([^:]+):([^}]+)\}\}'
|
161 |
+
matches = re.findall(pattern, content)
|
162 |
+
|
163 |
+
return [
|
164 |
+
{
|
165 |
+
"lesson_title": match[0],
|
166 |
+
"description": match[1],
|
167 |
+
"placeholder": f"{{{{IMAGE_PLACEHOLDER:{match[0]}:{match[1]}}}}}"
|
168 |
+
}
|
169 |
+
for match in matches
|
170 |
+
]
|
171 |
+
|
172 |
+
|
173 |
+
def replace_image_placeholders(content: str, images: List[Dict[str, Any]]) -> str:
|
174 |
+
"""Replace image placeholders with actual image HTML"""
|
175 |
+
print(f"🔄 Replacing image placeholders in content ({len(images)} images available)")
|
176 |
+
|
177 |
+
if not images:
|
178 |
+
print("⚠️ No images provided for placeholder replacement")
|
179 |
+
return content
|
180 |
+
|
181 |
+
import re
|
182 |
+
|
183 |
+
# Track which images have been used to avoid duplication
|
184 |
+
used_images = set()
|
185 |
+
|
186 |
+
# Find all placeholders in content
|
187 |
+
placeholder_pattern = r'\{\{IMAGE_PLACEHOLDER:([^:]+):([^}]+)\}\}'
|
188 |
+
placeholders = re.findall(placeholder_pattern, content)
|
189 |
+
print(f"🔍 Found {len(placeholders)} total placeholders in content")
|
190 |
+
|
191 |
+
for placeholder_lesson, placeholder_desc in placeholders:
|
192 |
+
print(f"📝 Looking for image for placeholder: {placeholder_lesson} - {placeholder_desc[:50]}...")
|
193 |
+
|
194 |
+
# Find the best matching image that hasn't been used
|
195 |
+
best_match = None
|
196 |
+
for i, image in enumerate(images):
|
197 |
+
if i in used_images:
|
198 |
+
continue # Skip already used images
|
199 |
+
|
200 |
+
if not image or "data_url" not in image:
|
201 |
+
continue
|
202 |
+
|
203 |
+
image_lesson = image.get("lesson_title", "")
|
204 |
+
image_desc = image.get("placeholder_description", "")
|
205 |
+
|
206 |
+
# Check if this image matches the placeholder
|
207 |
+
if (image_lesson == placeholder_lesson and
|
208 |
+
(image_desc == placeholder_desc or
|
209 |
+
placeholder_desc in image_desc or
|
210 |
+
image_desc in placeholder_desc)):
|
211 |
+
best_match = (i, image)
|
212 |
+
break
|
213 |
+
|
214 |
+
if best_match:
|
215 |
+
image_index, image = best_match
|
216 |
+
used_images.add(image_index)
|
217 |
+
|
218 |
+
# Replace this specific placeholder
|
219 |
+
specific_placeholder = f"{{{{IMAGE_PLACEHOLDER:{placeholder_lesson}:{placeholder_desc}}}}}"
|
220 |
+
description = image.get("prompt", image.get("placeholder_description", "Educational image"))
|
221 |
+
|
222 |
+
img_html = f'<img src="{image["data_url"]}" alt="{description}" style="max-width: 100%; border-radius: 8px; box-shadow: 0 4px 8px rgba(0,0,0,0.3); border: 2px solid #4a4a7a; margin: 1rem 0;">'
|
223 |
+
|
224 |
+
content = content.replace(specific_placeholder, img_html)
|
225 |
+
print(f"✅ Replaced placeholder with image {image_index+1}: {placeholder_desc[:50]}...")
|
226 |
+
else:
|
227 |
+
print(f"⚠️ No matching image found for: {placeholder_lesson} - {placeholder_desc[:50]}...")
|
228 |
+
|
229 |
+
# Check if any placeholders remain
|
230 |
+
remaining_placeholders = re.findall(r'\{\{IMAGE_PLACEHOLDER:[^}]+\}\}', content)
|
231 |
+
if remaining_placeholders:
|
232 |
+
print(f"⚠️ {len(remaining_placeholders)} placeholders still remain unreplaced")
|
233 |
+
for placeholder in remaining_placeholders[:3]: # Show first 3
|
234 |
+
print(f" - {placeholder}")
|
235 |
+
else:
|
236 |
+
print("✅ All image placeholders have been replaced")
|
237 |
+
|
238 |
+
print(f"📊 Used {len(used_images)} out of {len(images)} available images")
|
239 |
+
|
240 |
+
return content
|
241 |
+
|
242 |
+
|
243 |
+
def create_image_placeholder(lesson_title: str, description: str) -> str:
|
244 |
+
"""Convenience function for creating image placeholders"""
|
245 |
+
return ImageGenerator.create_image_placeholder(lesson_title, description)
|
246 |
+
|
247 |
+
|
248 |
+
# Global instance for convenience
|
249 |
+
_image_generator = ImageGenerator()
|
250 |
+
|
251 |
+
|
252 |
+
async def generate_educational_image(lesson_title: str, topic: str, style: str = "educational") -> Optional[Dict[str, Any]]:
|
253 |
+
"""Convenience function for generating educational images"""
|
254 |
+
return await _image_generator.generate_educational_image(lesson_title, topic, style)
|
coursecrafter/tools/web_research.py
ADDED
@@ -0,0 +1,257 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
🔍 Web Research Tools
|
3 |
+
Advanced web research using DuckDuckGo search and Crawl4AI content extraction
|
4 |
+
"""
|
5 |
+
|
6 |
+
import os
|
7 |
+
from typing import List, Dict, Any, Optional
|
8 |
+
from duckduckgo_search import DDGS
|
9 |
+
from crawl4ai import (
|
10 |
+
AsyncWebCrawler, BrowserConfig, CrawlerRunConfig, CacheMode, LLMConfig,
|
11 |
+
LLMContentFilter, DefaultMarkdownGenerator
|
12 |
+
)
|
13 |
+
import logging
|
14 |
+
|
15 |
+
logger = logging.getLogger(__name__)
|
16 |
+
|
17 |
+
|
18 |
+
class WebResearcher:
|
19 |
+
"""Advanced web research using DuckDuckGo and Crawl4AI"""
|
20 |
+
|
21 |
+
def __init__(self, max_results: int = 10, max_crawl_pages: int = 7, llm_provider: str = None):
|
22 |
+
self.max_results = max_results
|
23 |
+
self.max_crawl_pages = max_crawl_pages
|
24 |
+
self.llm_provider = llm_provider or "openai" # Default fallback
|
25 |
+
self.browser_config = BrowserConfig(
|
26 |
+
headless=True,
|
27 |
+
viewport_width=1280,
|
28 |
+
viewport_height=720
|
29 |
+
)
|
30 |
+
|
31 |
+
async def search_topic(self, topic: str, region: str = "us-en") -> List[Dict[str, Any]]:
|
32 |
+
"""Search for a topic using DuckDuckGo"""
|
33 |
+
try:
|
34 |
+
print(f"🔍 Searching DuckDuckGo for: {topic}")
|
35 |
+
|
36 |
+
with DDGS() as ddgs:
|
37 |
+
results = []
|
38 |
+
search_results = ddgs.text(
|
39 |
+
keywords=topic,
|
40 |
+
region=region,
|
41 |
+
safesearch="moderate",
|
42 |
+
max_results=self.max_results
|
43 |
+
)
|
44 |
+
|
45 |
+
for result in search_results:
|
46 |
+
results.append({
|
47 |
+
"title": result.get("title", ""),
|
48 |
+
"url": result.get("href", ""),
|
49 |
+
"snippet": result.get("body", ""),
|
50 |
+
"source": "duckduckgo"
|
51 |
+
})
|
52 |
+
|
53 |
+
print(f"✅ Found {len(results)} search results")
|
54 |
+
return results
|
55 |
+
|
56 |
+
except Exception as e:
|
57 |
+
logger.error(f"Search failed: {e}")
|
58 |
+
print(f"❌ Search failed: {e}")
|
59 |
+
return []
|
60 |
+
|
61 |
+
async def extract_content(self, urls: List[str], topic: str) -> List[Dict[str, Any]]:
|
62 |
+
"""Extract content from URLs using Crawl4AI with LLM filtering"""
|
63 |
+
try:
|
64 |
+
print(f"📄 Extracting content from {len(urls)} URLs...")
|
65 |
+
|
66 |
+
# Try to configure LLM content filter for educational content
|
67 |
+
try:
|
68 |
+
# Use the provider passed to the class, or fall back to environment/default
|
69 |
+
crawl4ai_provider_simple = self.llm_provider
|
70 |
+
|
71 |
+
# Map simple provider names to full provider/model format
|
72 |
+
provider_mapping = {
|
73 |
+
"openai": "openai/gpt-4o-mini",
|
74 |
+
"google": "gemini/gemini-2.0-flash-exp",
|
75 |
+
"gemini": "gemini/gemini-2.0-flash-exp",
|
76 |
+
"anthropic": "gemini/gemini-2.0-flash-exp" # Fallback since Crawl4AI doesn't support Anthropic directly
|
77 |
+
}
|
78 |
+
|
79 |
+
crawl4ai_provider = provider_mapping.get(crawl4ai_provider_simple, "openai/gpt-4o-mini")
|
80 |
+
|
81 |
+
if crawl4ai_provider.startswith("gemini"):
|
82 |
+
# Check if Google API key is available
|
83 |
+
if not os.getenv("GOOGLE_API_KEY"):
|
84 |
+
print("⚠️ GOOGLE_API_KEY not found, falling back to OpenAI")
|
85 |
+
llm_config = LLMConfig(
|
86 |
+
provider="openai/gpt-4o-mini",
|
87 |
+
api_token="env:OPENAI_API_KEY"
|
88 |
+
)
|
89 |
+
print("🧠 Using OpenAI for content filtering: gpt-4o-mini (fallback)")
|
90 |
+
else:
|
91 |
+
llm_config = LLMConfig(
|
92 |
+
provider=crawl4ai_provider,
|
93 |
+
api_token="env:GOOGLE_API_KEY"
|
94 |
+
)
|
95 |
+
print(f"🧠 Using Gemini for content filtering: {crawl4ai_provider}")
|
96 |
+
else:
|
97 |
+
# Default to OpenAI
|
98 |
+
llm_config = LLMConfig(
|
99 |
+
provider="openai/gpt-4o-mini",
|
100 |
+
api_token="env:OPENAI_API_KEY"
|
101 |
+
)
|
102 |
+
print("🧠 Using OpenAI for content filtering: gpt-4o-mini")
|
103 |
+
|
104 |
+
content_filter = LLMContentFilter(
|
105 |
+
llm_config=llm_config,
|
106 |
+
instruction=f"""
|
107 |
+
Extract educational content related to "{topic}".
|
108 |
+
Focus on:
|
109 |
+
- Key concepts and explanations
|
110 |
+
- Practical examples and tutorials
|
111 |
+
- Technical details and specifications
|
112 |
+
- Best practices and guidelines
|
113 |
+
- Code examples and implementations
|
114 |
+
|
115 |
+
Exclude:
|
116 |
+
- Navigation menus and sidebars
|
117 |
+
- Advertisements and promotional content
|
118 |
+
- Footer content and legal text
|
119 |
+
- Unrelated content
|
120 |
+
|
121 |
+
Format as clean markdown with proper headers and code blocks.
|
122 |
+
""",
|
123 |
+
chunk_token_threshold=1000,
|
124 |
+
verbose=False
|
125 |
+
)
|
126 |
+
|
127 |
+
markdown_generator = DefaultMarkdownGenerator(
|
128 |
+
content_filter=content_filter,
|
129 |
+
options={"ignore_links": False}
|
130 |
+
)
|
131 |
+
except Exception as e:
|
132 |
+
print(f"⚠️ Could not configure LLM content filter: {e}")
|
133 |
+
# Fallback to basic markdown generator
|
134 |
+
markdown_generator = DefaultMarkdownGenerator(
|
135 |
+
options={"ignore_links": False}
|
136 |
+
)
|
137 |
+
|
138 |
+
run_config = CrawlerRunConfig(
|
139 |
+
markdown_generator=markdown_generator,
|
140 |
+
cache_mode=CacheMode.BYPASS,
|
141 |
+
wait_for_images=False,
|
142 |
+
process_iframes=False,
|
143 |
+
remove_overlay_elements=True
|
144 |
+
)
|
145 |
+
|
146 |
+
extracted_content = []
|
147 |
+
|
148 |
+
async with AsyncWebCrawler(config=self.browser_config) as crawler:
|
149 |
+
for i, url in enumerate(urls[:self.max_crawl_pages]):
|
150 |
+
try:
|
151 |
+
print(f"📖 Crawling {i+1}/{min(len(urls), self.max_crawl_pages)}: {url}")
|
152 |
+
|
153 |
+
result = await crawler.arun(url=url, config=run_config)
|
154 |
+
|
155 |
+
if result.success and result.markdown:
|
156 |
+
extracted_content.append({
|
157 |
+
"url": url,
|
158 |
+
"title": result.metadata.get("title", ""),
|
159 |
+
"content": result.markdown,
|
160 |
+
"word_count": len(result.markdown.split()),
|
161 |
+
"extraction_success": True
|
162 |
+
})
|
163 |
+
print(f"✅ Extracted {len(result.markdown.split())} words from {url}")
|
164 |
+
else:
|
165 |
+
print(f"⚠️ Failed to extract content from {url}: {result.error_message}")
|
166 |
+
extracted_content.append({
|
167 |
+
"url": url,
|
168 |
+
"title": "",
|
169 |
+
"content": "",
|
170 |
+
"word_count": 0,
|
171 |
+
"extraction_success": False,
|
172 |
+
"error": result.error_message
|
173 |
+
})
|
174 |
+
|
175 |
+
except Exception as e:
|
176 |
+
logger.error(f"Error crawling {url}: {e}")
|
177 |
+
print(f"❌ Error crawling {url}: {e}")
|
178 |
+
extracted_content.append({
|
179 |
+
"url": url,
|
180 |
+
"title": "",
|
181 |
+
"content": "",
|
182 |
+
"word_count": 0,
|
183 |
+
"extraction_success": False,
|
184 |
+
"error": str(e)
|
185 |
+
})
|
186 |
+
|
187 |
+
successful_extractions = [c for c in extracted_content if c["extraction_success"]]
|
188 |
+
print(f"✅ Successfully extracted content from {len(successful_extractions)}/{len(urls)} URLs")
|
189 |
+
|
190 |
+
return extracted_content
|
191 |
+
|
192 |
+
except Exception as e:
|
193 |
+
logger.error(f"Content extraction failed: {e}")
|
194 |
+
print(f"❌ Content extraction failed: {e}")
|
195 |
+
return []
|
196 |
+
|
197 |
+
async def research_topic(self, topic: str) -> Dict[str, Any]:
|
198 |
+
"""Complete research workflow: search + extract + summarize"""
|
199 |
+
try:
|
200 |
+
print(f"🚀 Starting comprehensive research for: {topic}")
|
201 |
+
|
202 |
+
# Step 1: Search for relevant URLs
|
203 |
+
search_results = await self.search_topic(topic)
|
204 |
+
|
205 |
+
if not search_results:
|
206 |
+
return {
|
207 |
+
"topic": topic,
|
208 |
+
"search_results": [],
|
209 |
+
"extracted_content": [],
|
210 |
+
"summary": f"No search results found for {topic}",
|
211 |
+
"success": False
|
212 |
+
}
|
213 |
+
|
214 |
+
# Step 2: Extract content from top URLs
|
215 |
+
urls = [result["url"] for result in search_results]
|
216 |
+
extracted_content = await self.extract_content(urls, topic)
|
217 |
+
|
218 |
+
# Step 3: Compile research summary
|
219 |
+
successful_content = [c for c in extracted_content if c["extraction_success"]]
|
220 |
+
total_words = sum(c["word_count"] for c in successful_content)
|
221 |
+
|
222 |
+
summary = f"""
|
223 |
+
Research completed for "{topic}":
|
224 |
+
- Found {len(search_results)} search results
|
225 |
+
- Successfully extracted content from {len(successful_content)} sources
|
226 |
+
- Total content: {total_words} words
|
227 |
+
- Sources include educational articles, documentation, and tutorials
|
228 |
+
"""
|
229 |
+
|
230 |
+
print(f"🎉 Research completed: {len(successful_content)} sources, {total_words} words")
|
231 |
+
|
232 |
+
return {
|
233 |
+
"topic": topic,
|
234 |
+
"search_results": search_results,
|
235 |
+
"extracted_content": extracted_content,
|
236 |
+
"summary": summary.strip(),
|
237 |
+
"total_words": total_words,
|
238 |
+
"successful_sources": len(successful_content),
|
239 |
+
"success": True
|
240 |
+
}
|
241 |
+
|
242 |
+
except Exception as e:
|
243 |
+
logger.error(f"Research failed: {e}")
|
244 |
+
print(f"❌ Research failed: {e}")
|
245 |
+
return {
|
246 |
+
"topic": topic,
|
247 |
+
"search_results": [],
|
248 |
+
"extracted_content": [],
|
249 |
+
"summary": f"Research failed for {topic}: {str(e)}",
|
250 |
+
"success": False
|
251 |
+
}
|
252 |
+
|
253 |
+
|
254 |
+
async def research_topic(topic: str, llm_provider: str = "openai") -> Dict[str, Any]:
|
255 |
+
"""Convenience function for topic research with LLM provider"""
|
256 |
+
web_researcher = WebResearcher(llm_provider=llm_provider)
|
257 |
+
return await web_researcher.research_topic(topic)
|
coursecrafter/types.py
ADDED
@@ -0,0 +1,326 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
📊 Type Definitions for Course Creator AI
|
3 |
+
|
4 |
+
Comprehensive data structures and type hints for the course generation system.
|
5 |
+
"""
|
6 |
+
|
7 |
+
from dataclasses import dataclass, field
|
8 |
+
from typing import List, Dict, Optional, Any, Union, Literal
|
9 |
+
from datetime import datetime
|
10 |
+
from enum import Enum
|
11 |
+
|
12 |
+
|
13 |
+
class DifficultyLevel(str, Enum):
|
14 |
+
"""Course difficulty levels"""
|
15 |
+
BEGINNER = "beginner"
|
16 |
+
INTERMEDIATE = "intermediate"
|
17 |
+
ADVANCED = "advanced"
|
18 |
+
|
19 |
+
|
20 |
+
class LearningStyle(str, Enum):
|
21 |
+
"""Learning style preferences"""
|
22 |
+
VISUAL = "visual"
|
23 |
+
AUDITORY = "auditory"
|
24 |
+
KINESTHETIC = "kinesthetic"
|
25 |
+
READING = "reading"
|
26 |
+
|
27 |
+
|
28 |
+
class ExportFormat(str, Enum):
|
29 |
+
"""Export format options"""
|
30 |
+
PDF = "pdf"
|
31 |
+
JSON = "json"
|
32 |
+
MARKDOWN = "markdown"
|
33 |
+
HTML = "html"
|
34 |
+
ANKI = "anki"
|
35 |
+
|
36 |
+
|
37 |
+
@dataclass
|
38 |
+
class Source:
|
39 |
+
"""Information about a content source"""
|
40 |
+
url: str
|
41 |
+
title: str
|
42 |
+
author: Optional[str] = None
|
43 |
+
date: Optional[datetime] = None
|
44 |
+
credibility_score: float = 0.0
|
45 |
+
content_type: str = "web"
|
46 |
+
|
47 |
+
|
48 |
+
@dataclass
|
49 |
+
class LearningObjective:
|
50 |
+
"""A specific learning objective"""
|
51 |
+
description: str
|
52 |
+
level: DifficultyLevel
|
53 |
+
estimated_time: int # minutes
|
54 |
+
skills: List[str] = field(default_factory=list)
|
55 |
+
|
56 |
+
|
57 |
+
@dataclass
|
58 |
+
class Exercise:
|
59 |
+
"""A practice exercise"""
|
60 |
+
title: str
|
61 |
+
description: str
|
62 |
+
instructions: str
|
63 |
+
solution: Optional[str] = None
|
64 |
+
difficulty: DifficultyLevel = DifficultyLevel.BEGINNER
|
65 |
+
estimated_time: int = 10 # minutes
|
66 |
+
|
67 |
+
|
68 |
+
@dataclass
|
69 |
+
class CodeExample:
|
70 |
+
"""A code example with explanation"""
|
71 |
+
title: str
|
72 |
+
code: str
|
73 |
+
language: str = "python"
|
74 |
+
explanation: str = ""
|
75 |
+
tags: List[str] = field(default_factory=list)
|
76 |
+
difficulty: str = "intermediate"
|
77 |
+
metadata: Dict[str, Any] = field(default_factory=dict)
|
78 |
+
|
79 |
+
|
80 |
+
@dataclass
|
81 |
+
class Flashcard:
|
82 |
+
"""A flashcard for spaced repetition learning"""
|
83 |
+
front: str
|
84 |
+
back: str
|
85 |
+
category: str
|
86 |
+
difficulty: int = 1 # 1-5 scale
|
87 |
+
tags: List[str] = field(default_factory=list)
|
88 |
+
image_url: Optional[str] = None
|
89 |
+
|
90 |
+
|
91 |
+
@dataclass
|
92 |
+
class QuizQuestion:
|
93 |
+
"""A single quiz question"""
|
94 |
+
id: str
|
95 |
+
question: str
|
96 |
+
question_type: str # multiple_choice, true_false, fill_blank, short_answer
|
97 |
+
options: List[str] = field(default_factory=list)
|
98 |
+
correct_answer: str = ""
|
99 |
+
explanation: str = ""
|
100 |
+
points: int = 1
|
101 |
+
difficulty: str = "medium" # easy, medium, hard
|
102 |
+
tags: List[str] = field(default_factory=list)
|
103 |
+
metadata: Dict[str, Any] = field(default_factory=dict)
|
104 |
+
|
105 |
+
|
106 |
+
@dataclass
|
107 |
+
class Quiz:
|
108 |
+
"""A quiz with multiple questions"""
|
109 |
+
title: str
|
110 |
+
questions: List[QuizQuestion] = field(default_factory=list)
|
111 |
+
passing_score: int = 70
|
112 |
+
time_limit: Optional[int] = None # minutes
|
113 |
+
difficulty: DifficultyLevel = DifficultyLevel.BEGINNER
|
114 |
+
|
115 |
+
|
116 |
+
@dataclass
|
117 |
+
class Image:
|
118 |
+
"""Generated or sourced image"""
|
119 |
+
url: str
|
120 |
+
caption: str
|
121 |
+
alt_text: str
|
122 |
+
width: Optional[int] = None
|
123 |
+
height: Optional[int] = None
|
124 |
+
format: str = "png"
|
125 |
+
|
126 |
+
|
127 |
+
@dataclass
|
128 |
+
class ImageAsset:
|
129 |
+
"""Represents an image asset for a course"""
|
130 |
+
id: str
|
131 |
+
title: str
|
132 |
+
description: str
|
133 |
+
image_url: str
|
134 |
+
local_path: Optional[str] = None
|
135 |
+
image_type: str = "illustration" # cover, illustration, concept, diagram, infographic
|
136 |
+
lesson_id: Optional[int] = None
|
137 |
+
tags: List[str] = field(default_factory=list)
|
138 |
+
metadata: Dict[str, Any] = field(default_factory=dict)
|
139 |
+
|
140 |
+
|
141 |
+
@dataclass
|
142 |
+
class ContentSource:
|
143 |
+
"""Represents a content source for research"""
|
144 |
+
title: str
|
145 |
+
url: str
|
146 |
+
content: str
|
147 |
+
source_type: str # web, research, documentation, model, dataset, concept
|
148 |
+
relevance_score: float = 0.5
|
149 |
+
credibility_score: float = 0.5
|
150 |
+
tags: List[str] = field(default_factory=list)
|
151 |
+
metadata: Dict[str, Any] = field(default_factory=dict)
|
152 |
+
|
153 |
+
|
154 |
+
@dataclass
|
155 |
+
class ResearchResult:
|
156 |
+
"""Results from research conducted for a topic"""
|
157 |
+
topic: str
|
158 |
+
sources: List[ContentSource] = field(default_factory=list)
|
159 |
+
key_concepts: List[str] = field(default_factory=list)
|
160 |
+
learning_objectives: List[str] = field(default_factory=list)
|
161 |
+
difficulty_level: str = "intermediate"
|
162 |
+
target_audience: str = "general"
|
163 |
+
metadata: Dict[str, Any] = field(default_factory=dict)
|
164 |
+
|
165 |
+
|
166 |
+
@dataclass
|
167 |
+
class CourseStructure:
|
168 |
+
"""High-level course structure and planning"""
|
169 |
+
title: str
|
170 |
+
description: str
|
171 |
+
difficulty_level: str = "intermediate"
|
172 |
+
estimated_duration: float = 2.0 # hours
|
173 |
+
prerequisites: List[str] = field(default_factory=list)
|
174 |
+
learning_objectives: List[str] = field(default_factory=list)
|
175 |
+
lessons: List['Lesson'] = field(default_factory=list) # Forward reference
|
176 |
+
tags: List[str] = field(default_factory=list)
|
177 |
+
target_audience: str = "general"
|
178 |
+
metadata: Dict[str, Any] = field(default_factory=dict)
|
179 |
+
|
180 |
+
|
181 |
+
@dataclass
|
182 |
+
class Lesson:
|
183 |
+
"""A single lesson in the course"""
|
184 |
+
title: str
|
185 |
+
content: str
|
186 |
+
objectives: List[LearningObjective]
|
187 |
+
duration: int # minutes
|
188 |
+
exercises: List[Exercise] = field(default_factory=list)
|
189 |
+
images: List[Image] = field(default_factory=list)
|
190 |
+
flashcards: List[Flashcard] = field(default_factory=list)
|
191 |
+
quiz: Optional[Quiz] = None
|
192 |
+
prerequisites: List[str] = field(default_factory=list)
|
193 |
+
key_concepts: List[str] = field(default_factory=list)
|
194 |
+
|
195 |
+
|
196 |
+
@dataclass
|
197 |
+
class CourseMetadata:
|
198 |
+
"""Metadata about the course"""
|
199 |
+
created_at: datetime
|
200 |
+
topic: str
|
201 |
+
difficulty: DifficultyLevel
|
202 |
+
estimated_duration: int # total minutes
|
203 |
+
sources: List[Source]
|
204 |
+
word_count: int
|
205 |
+
lesson_count: int
|
206 |
+
flashcard_count: int
|
207 |
+
image_count: int
|
208 |
+
target_audience: str = "general"
|
209 |
+
prerequisites: List[str] = field(default_factory=list)
|
210 |
+
learning_outcomes: List[str] = field(default_factory=list)
|
211 |
+
|
212 |
+
|
213 |
+
@dataclass
|
214 |
+
class Course:
|
215 |
+
"""Complete course structure"""
|
216 |
+
title: str
|
217 |
+
description: str
|
218 |
+
lessons: List[Lesson]
|
219 |
+
metadata: CourseMetadata
|
220 |
+
flashcards: List[Flashcard] = field(default_factory=list)
|
221 |
+
images: List[Image] = field(default_factory=list)
|
222 |
+
summary: str = ""
|
223 |
+
glossary: Dict[str, str] = field(default_factory=dict)
|
224 |
+
resources: List[Source] = field(default_factory=list)
|
225 |
+
|
226 |
+
|
227 |
+
@dataclass
|
228 |
+
class ResearchData:
|
229 |
+
"""Research data collected for course generation"""
|
230 |
+
topic: str
|
231 |
+
sources: List[Source]
|
232 |
+
key_concepts: List[str]
|
233 |
+
content_outline: Dict[str, Any]
|
234 |
+
related_topics: List[str] = field(default_factory=list)
|
235 |
+
expert_quotes: List[str] = field(default_factory=list)
|
236 |
+
statistics: List[str] = field(default_factory=list)
|
237 |
+
|
238 |
+
|
239 |
+
@dataclass
|
240 |
+
class CourseOutline:
|
241 |
+
"""High-level course structure"""
|
242 |
+
title: str
|
243 |
+
description: str
|
244 |
+
difficulty: DifficultyLevel
|
245 |
+
lesson_titles: List[str]
|
246 |
+
learning_objectives: List[LearningObjective]
|
247 |
+
estimated_duration: int
|
248 |
+
prerequisites: List[str] = field(default_factory=list)
|
249 |
+
|
250 |
+
|
251 |
+
@dataclass
|
252 |
+
class GenerationOptions:
|
253 |
+
"""Options for course generation"""
|
254 |
+
difficulty: DifficultyLevel = DifficultyLevel.BEGINNER
|
255 |
+
lesson_count: int = 5
|
256 |
+
include_audio: bool = False
|
257 |
+
include_images: bool = True
|
258 |
+
include_flashcards: bool = True
|
259 |
+
include_quizzes: bool = True
|
260 |
+
learning_style: LearningStyle = LearningStyle.VISUAL
|
261 |
+
max_lesson_duration: int = 30 # minutes
|
262 |
+
export_formats: List[ExportFormat] = field(default_factory=list)
|
263 |
+
|
264 |
+
|
265 |
+
@dataclass
|
266 |
+
class ExportResult:
|
267 |
+
"""Result of an export operation"""
|
268 |
+
format: ExportFormat
|
269 |
+
success: bool
|
270 |
+
file_path: Optional[str] = None
|
271 |
+
download_url: Optional[str] = None
|
272 |
+
error_message: Optional[str] = None
|
273 |
+
metadata: Dict[str, Any] = field(default_factory=dict)
|
274 |
+
|
275 |
+
|
276 |
+
@dataclass
|
277 |
+
class ProgressUpdate:
|
278 |
+
"""Progress update during course generation"""
|
279 |
+
stage: str
|
280 |
+
progress: float # 0.0 to 1.0
|
281 |
+
message: str
|
282 |
+
details: Optional[Dict[str, Any]] = None
|
283 |
+
timestamp: datetime = field(default_factory=datetime.now)
|
284 |
+
|
285 |
+
|
286 |
+
# LLM Provider Types
|
287 |
+
LLMProvider = Literal["openai", "anthropic", "google", "openai_compatible"]
|
288 |
+
|
289 |
+
|
290 |
+
@dataclass
|
291 |
+
class LLMConfig:
|
292 |
+
"""Configuration for LLM providers"""
|
293 |
+
provider: LLMProvider
|
294 |
+
model: str
|
295 |
+
api_key: str
|
296 |
+
temperature: float = 0.7
|
297 |
+
max_tokens: Optional[int] = None
|
298 |
+
timeout: int = 60
|
299 |
+
|
300 |
+
# API Response Types
|
301 |
+
@dataclass
|
302 |
+
class APIResponse:
|
303 |
+
"""Standard API response structure"""
|
304 |
+
success: bool
|
305 |
+
data: Optional[Any] = None
|
306 |
+
error: Optional[str] = None
|
307 |
+
metadata: Dict[str, Any] = field(default_factory=dict)
|
308 |
+
|
309 |
+
|
310 |
+
# Validation Types
|
311 |
+
@dataclass
|
312 |
+
class ValidationResult:
|
313 |
+
"""Result of content validation"""
|
314 |
+
is_valid: bool
|
315 |
+
score: float # 0.0 to 1.0
|
316 |
+
issues: List[str] = field(default_factory=list)
|
317 |
+
suggestions: List[str] = field(default_factory=list)
|
318 |
+
|
319 |
+
|
320 |
+
# Stream Types for real-time updates
|
321 |
+
@dataclass
|
322 |
+
class StreamChunk:
|
323 |
+
"""Chunk of streaming data"""
|
324 |
+
type: Literal["text", "tool_call", "progress", "error"]
|
325 |
+
content: str
|
326 |
+
metadata: Dict[str, Any] = field(default_factory=dict)
|
coursecrafter/ui/__init__.py
ADDED
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
🖥️ User Interface Components
|
3 |
+
|
4 |
+
Gradio interface and custom UI components.
|
5 |
+
"""
|
6 |
+
|
7 |
+
from .gradio_app import create_coursecrafter_interface
|
8 |
+
from .components import CoursePreview, FlashcardViewer, ProgressTracker
|
9 |
+
from .styling import get_custom_css, get_theme_colors, get_component_styles
|
10 |
+
|
11 |
+
__all__ = [
|
12 |
+
"create_coursecrafter_interface",
|
13 |
+
"CoursePreview",
|
14 |
+
"FlashcardViewer",
|
15 |
+
"ProgressTracker"
|
16 |
+
]
|
coursecrafter/ui/__pycache__/__init__.cpython-311.pyc
ADDED
Binary file (694 Bytes). View file
|
|
coursecrafter/ui/__pycache__/components.cpython-311.pyc
ADDED
Binary file (30.2 kB). View file
|
|
coursecrafter/ui/__pycache__/gradio_app.cpython-311.pyc
ADDED
Binary file (60.5 kB). View file
|
|
coursecrafter/ui/__pycache__/progress_tracker.cpython-311.pyc
ADDED
Binary file (26.8 kB). View file
|
|
coursecrafter/ui/__pycache__/styling.cpython-311.pyc
ADDED
Binary file (11.9 kB). View file
|
|
coursecrafter/ui/components.py
ADDED
@@ -0,0 +1,703 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
🎨 UI Components for Course Creator AI
|
3 |
+
|
4 |
+
Beautiful, modern Gradio components with custom styling and interactions.
|
5 |
+
"""
|
6 |
+
|
7 |
+
import gradio as gr
|
8 |
+
import json
|
9 |
+
from typing import Dict, List, Any, Optional, Tuple
|
10 |
+
from dataclasses import asdict
|
11 |
+
import logging
|
12 |
+
|
13 |
+
from ..types import Course, Lesson, Flashcard, Quiz, ImageAsset
|
14 |
+
|
15 |
+
logger = logging.getLogger(__name__)
|
16 |
+
|
17 |
+
|
18 |
+
class CourseGenerationForm:
|
19 |
+
"""Main course generation form component"""
|
20 |
+
|
21 |
+
def __init__(self):
|
22 |
+
self.current_course = None
|
23 |
+
self.generation_progress = 0
|
24 |
+
|
25 |
+
def create_input_form(self) -> gr.Group:
|
26 |
+
"""Create the main input form for course generation"""
|
27 |
+
|
28 |
+
with gr.Group() as form:
|
29 |
+
gr.HTML("""
|
30 |
+
<div class="header-section">
|
31 |
+
<h1>🚀 Course Creater AI</h1>
|
32 |
+
<p class="tagline">Transform any topic into an engaging course with AI</p>
|
33 |
+
</div>
|
34 |
+
""")
|
35 |
+
|
36 |
+
with gr.Row():
|
37 |
+
with gr.Column(scale=2):
|
38 |
+
# Main topic input
|
39 |
+
topic_input = gr.Textbox(
|
40 |
+
label="📚 Course Topic",
|
41 |
+
placeholder="e.g., Introduction to Machine Learning, Python for Beginners, Digital Marketing Basics",
|
42 |
+
lines=2,
|
43 |
+
elem_id="topic-input"
|
44 |
+
)
|
45 |
+
|
46 |
+
# Course configuration
|
47 |
+
with gr.Row():
|
48 |
+
difficulty_level = gr.Dropdown(
|
49 |
+
choices=["Beginner", "Intermediate", "Advanced"],
|
50 |
+
value="Intermediate",
|
51 |
+
label="🎯 Difficulty Level",
|
52 |
+
elem_id="difficulty-select"
|
53 |
+
)
|
54 |
+
|
55 |
+
duration = gr.Slider(
|
56 |
+
minimum=0.5,
|
57 |
+
maximum=8.0,
|
58 |
+
value=2.0,
|
59 |
+
step=0.5,
|
60 |
+
label="⏱️ Duration (hours)",
|
61 |
+
elem_id="duration-slider"
|
62 |
+
)
|
63 |
+
|
64 |
+
with gr.Row():
|
65 |
+
num_lessons = gr.Slider(
|
66 |
+
minimum=3,
|
67 |
+
maximum=12,
|
68 |
+
value=6,
|
69 |
+
step=1,
|
70 |
+
label="📖 Number of Lessons",
|
71 |
+
elem_id="lessons-slider"
|
72 |
+
)
|
73 |
+
|
74 |
+
target_audience = gr.Dropdown(
|
75 |
+
choices=["Students", "Professionals", "Hobbyists", "General Public"],
|
76 |
+
value="General Public",
|
77 |
+
label="👥 Target Audience",
|
78 |
+
elem_id="audience-select"
|
79 |
+
)
|
80 |
+
|
81 |
+
with gr.Column(scale=1):
|
82 |
+
# Advanced options
|
83 |
+
gr.HTML("<h3>🔧 Advanced Options</h3>")
|
84 |
+
|
85 |
+
include_images = gr.Checkbox(
|
86 |
+
value=True,
|
87 |
+
label="🎨 Generate Images",
|
88 |
+
elem_id="images-checkbox"
|
89 |
+
)
|
90 |
+
|
91 |
+
include_quizzes = gr.Checkbox(
|
92 |
+
value=True,
|
93 |
+
label="🎯 Include Quizzes",
|
94 |
+
elem_id="quizzes-checkbox"
|
95 |
+
)
|
96 |
+
|
97 |
+
include_flashcards = gr.Checkbox(
|
98 |
+
value=True,
|
99 |
+
label="🃏 Create Flashcards",
|
100 |
+
elem_id="flashcards-checkbox"
|
101 |
+
)
|
102 |
+
|
103 |
+
content_style = gr.Dropdown(
|
104 |
+
choices=["Conversational", "Technical", "Academic", "Casual"],
|
105 |
+
value="Conversational",
|
106 |
+
label="✍️ Content Style",
|
107 |
+
elem_id="style-select"
|
108 |
+
)
|
109 |
+
|
110 |
+
# Generation button
|
111 |
+
with gr.Row():
|
112 |
+
generate_btn = gr.Button(
|
113 |
+
"🚀 Generate Course",
|
114 |
+
variant="primary",
|
115 |
+
size="lg",
|
116 |
+
elem_id="generate-button"
|
117 |
+
)
|
118 |
+
|
119 |
+
clear_btn = gr.Button(
|
120 |
+
"🗑️ Clear",
|
121 |
+
variant="secondary",
|
122 |
+
elem_id="clear-button"
|
123 |
+
)
|
124 |
+
|
125 |
+
return form, {
|
126 |
+
"topic_input": topic_input,
|
127 |
+
"difficulty_level": difficulty_level,
|
128 |
+
"duration": duration,
|
129 |
+
"num_lessons": num_lessons,
|
130 |
+
"target_audience": target_audience,
|
131 |
+
"include_images": include_images,
|
132 |
+
"include_quizzes": include_quizzes,
|
133 |
+
"include_flashcards": include_flashcards,
|
134 |
+
"content_style": content_style,
|
135 |
+
"generate_btn": generate_btn,
|
136 |
+
"clear_btn": clear_btn
|
137 |
+
}
|
138 |
+
|
139 |
+
|
140 |
+
class ProgressTracker:
|
141 |
+
"""Real-time progress tracking component"""
|
142 |
+
|
143 |
+
def __init__(self):
|
144 |
+
self.current_step = 0
|
145 |
+
self.total_steps = 6
|
146 |
+
self.step_names = [
|
147 |
+
"🔍 Researching Topic",
|
148 |
+
"📋 Planning Course Structure",
|
149 |
+
"✍️ Generating Content",
|
150 |
+
"🎯 Creating Assessments",
|
151 |
+
"🎨 Generating Images",
|
152 |
+
"📦 Finalizing Course"
|
153 |
+
]
|
154 |
+
|
155 |
+
def create_progress_display(self) -> gr.Group:
|
156 |
+
"""Create progress tracking display"""
|
157 |
+
|
158 |
+
with gr.Group() as progress_group:
|
159 |
+
gr.HTML("<h3>📊 Generation Progress</h3>")
|
160 |
+
|
161 |
+
# Progress bar
|
162 |
+
progress_bar = gr.Progress()
|
163 |
+
|
164 |
+
# Current step indicator
|
165 |
+
current_step_display = gr.HTML(
|
166 |
+
"<div class='step-indicator'>Ready to generate course</div>",
|
167 |
+
elem_id="step-indicator"
|
168 |
+
)
|
169 |
+
|
170 |
+
# Detailed progress log
|
171 |
+
progress_log = gr.Textbox(
|
172 |
+
label="📝 Progress Log",
|
173 |
+
lines=8,
|
174 |
+
max_lines=15,
|
175 |
+
interactive=False,
|
176 |
+
elem_id="progress-log"
|
177 |
+
)
|
178 |
+
|
179 |
+
# Status indicators
|
180 |
+
with gr.Row():
|
181 |
+
research_status = gr.HTML("⏳ Research", elem_id="research-status")
|
182 |
+
planning_status = gr.HTML("⏳ Planning", elem_id="planning-status")
|
183 |
+
content_status = gr.HTML("⏳ Content", elem_id="content-status")
|
184 |
+
assessment_status = gr.HTML("⏳ Assessment", elem_id="assessment-status")
|
185 |
+
images_status = gr.HTML("⏳ Images", elem_id="images-status")
|
186 |
+
finalize_status = gr.HTML("⏳ Finalize", elem_id="finalize-status")
|
187 |
+
|
188 |
+
return progress_group, {
|
189 |
+
"progress_bar": progress_bar,
|
190 |
+
"current_step_display": current_step_display,
|
191 |
+
"progress_log": progress_log,
|
192 |
+
"status_indicators": {
|
193 |
+
"research": research_status,
|
194 |
+
"planning": planning_status,
|
195 |
+
"content": content_status,
|
196 |
+
"assessment": assessment_status,
|
197 |
+
"images": images_status,
|
198 |
+
"finalize": finalize_status
|
199 |
+
}
|
200 |
+
}
|
201 |
+
|
202 |
+
def update_progress(self, step: int, message: str, log_entry: str = "") -> Tuple[str, str]:
|
203 |
+
"""Update progress display"""
|
204 |
+
|
205 |
+
self.current_step = step
|
206 |
+
progress_percent = (step / self.total_steps) * 100
|
207 |
+
|
208 |
+
# Update step indicator
|
209 |
+
if step < len(self.step_names):
|
210 |
+
step_html = f"""
|
211 |
+
<div class='step-indicator active'>
|
212 |
+
<div class='step-icon'>{self.step_names[step].split()[0]}</div>
|
213 |
+
<div class='step-text'>{self.step_names[step]}</div>
|
214 |
+
<div class='step-message'>{message}</div>
|
215 |
+
</div>
|
216 |
+
"""
|
217 |
+
else:
|
218 |
+
step_html = "<div class='step-indicator complete'>✅ Course Generation Complete!</div>"
|
219 |
+
|
220 |
+
return step_html, log_entry
|
221 |
+
|
222 |
+
|
223 |
+
class CoursePreview:
|
224 |
+
"""Interactive course preview component"""
|
225 |
+
|
226 |
+
def __init__(self):
|
227 |
+
self.current_course = None
|
228 |
+
|
229 |
+
def create_preview_tabs(self) -> gr.Tabs:
|
230 |
+
"""Create tabbed course preview interface"""
|
231 |
+
|
232 |
+
with gr.Tabs() as preview_tabs:
|
233 |
+
# Course Overview Tab
|
234 |
+
with gr.Tab("📚 Course Overview", elem_id="overview-tab"):
|
235 |
+
course_overview = self._create_overview_section()
|
236 |
+
|
237 |
+
# Lessons Tab
|
238 |
+
with gr.Tab("📖 Lessons", elem_id="lessons-tab"):
|
239 |
+
lessons_section = self._create_lessons_section()
|
240 |
+
|
241 |
+
# Flashcards Tab
|
242 |
+
with gr.Tab("🃏 Flashcards", elem_id="flashcards-tab"):
|
243 |
+
flashcards_section = self._create_flashcards_section()
|
244 |
+
|
245 |
+
# Quizzes Tab
|
246 |
+
with gr.Tab("🎯 Quizzes", elem_id="quizzes-tab"):
|
247 |
+
quizzes_section = self._create_quizzes_section()
|
248 |
+
|
249 |
+
# Images Tab
|
250 |
+
with gr.Tab("🎨 Images", elem_id="images-tab"):
|
251 |
+
images_section = self._create_images_section()
|
252 |
+
|
253 |
+
# Export Tab
|
254 |
+
with gr.Tab("📤 Export", elem_id="export-tab"):
|
255 |
+
export_section = self._create_export_section()
|
256 |
+
|
257 |
+
return preview_tabs, {
|
258 |
+
"course_overview": course_overview,
|
259 |
+
"lessons_section": lessons_section,
|
260 |
+
"flashcards_section": flashcards_section,
|
261 |
+
"quizzes_section": quizzes_section,
|
262 |
+
"images_section": images_section,
|
263 |
+
"export_section": export_section
|
264 |
+
}
|
265 |
+
|
266 |
+
def _create_overview_section(self) -> Dict[str, Any]:
|
267 |
+
"""Create course overview section"""
|
268 |
+
|
269 |
+
with gr.Group():
|
270 |
+
# Course header
|
271 |
+
course_title = gr.HTML(
|
272 |
+
"<h2>Course will appear here after generation</h2>",
|
273 |
+
elem_id="course-title"
|
274 |
+
)
|
275 |
+
|
276 |
+
course_metadata = gr.HTML(
|
277 |
+
"<div class='course-metadata'>Generate a course to see details</div>",
|
278 |
+
elem_id="course-metadata"
|
279 |
+
)
|
280 |
+
|
281 |
+
# Course description
|
282 |
+
course_description = gr.Markdown(
|
283 |
+
"Course description will appear here...",
|
284 |
+
elem_id="course-description"
|
285 |
+
)
|
286 |
+
|
287 |
+
# Learning objectives
|
288 |
+
learning_objectives = gr.HTML(
|
289 |
+
"<div class='learning-objectives'>Learning objectives will appear here</div>",
|
290 |
+
elem_id="learning-objectives"
|
291 |
+
)
|
292 |
+
|
293 |
+
# Course structure
|
294 |
+
course_structure = gr.HTML(
|
295 |
+
"<div class='course-structure'>Course structure will appear here</div>",
|
296 |
+
elem_id="course-structure"
|
297 |
+
)
|
298 |
+
|
299 |
+
return {
|
300 |
+
"course_title": course_title,
|
301 |
+
"course_metadata": course_metadata,
|
302 |
+
"course_description": course_description,
|
303 |
+
"learning_objectives": learning_objectives,
|
304 |
+
"course_structure": course_structure
|
305 |
+
}
|
306 |
+
|
307 |
+
def _create_lessons_section(self) -> Dict[str, Any]:
|
308 |
+
"""Create lessons preview section"""
|
309 |
+
|
310 |
+
with gr.Group():
|
311 |
+
# Lesson selector
|
312 |
+
lesson_selector = gr.Dropdown(
|
313 |
+
choices=[],
|
314 |
+
label="📖 Select Lesson",
|
315 |
+
elem_id="lesson-selector"
|
316 |
+
)
|
317 |
+
|
318 |
+
# Lesson content display
|
319 |
+
lesson_title = gr.HTML(
|
320 |
+
"<h3>Select a lesson to view content</h3>",
|
321 |
+
elem_id="lesson-title"
|
322 |
+
)
|
323 |
+
|
324 |
+
lesson_content = gr.Markdown(
|
325 |
+
"Lesson content will appear here...",
|
326 |
+
elem_id="lesson-content"
|
327 |
+
)
|
328 |
+
|
329 |
+
# Lesson navigation
|
330 |
+
with gr.Row():
|
331 |
+
prev_lesson_btn = gr.Button(
|
332 |
+
"⬅️ Previous",
|
333 |
+
elem_id="prev-lesson-btn"
|
334 |
+
)
|
335 |
+
next_lesson_btn = gr.Button(
|
336 |
+
"➡️ Next",
|
337 |
+
elem_id="next-lesson-btn"
|
338 |
+
)
|
339 |
+
|
340 |
+
return {
|
341 |
+
"lesson_selector": lesson_selector,
|
342 |
+
"lesson_title": lesson_title,
|
343 |
+
"lesson_content": lesson_content,
|
344 |
+
"prev_lesson_btn": prev_lesson_btn,
|
345 |
+
"next_lesson_btn": next_lesson_btn
|
346 |
+
}
|
347 |
+
|
348 |
+
def _create_flashcards_section(self) -> Dict[str, Any]:
|
349 |
+
"""Create flashcards preview section"""
|
350 |
+
|
351 |
+
with gr.Group():
|
352 |
+
# Flashcard display
|
353 |
+
flashcard_display = gr.HTML(
|
354 |
+
"<div class='flashcard-container'>Flashcards will appear here</div>",
|
355 |
+
elem_id="flashcard-display"
|
356 |
+
)
|
357 |
+
|
358 |
+
# Flashcard controls
|
359 |
+
with gr.Row():
|
360 |
+
flip_card_btn = gr.Button(
|
361 |
+
"🔄 Flip Card",
|
362 |
+
elem_id="flip-card-btn"
|
363 |
+
)
|
364 |
+
prev_card_btn = gr.Button(
|
365 |
+
"⬅️ Previous",
|
366 |
+
elem_id="prev-card-btn"
|
367 |
+
)
|
368 |
+
next_card_btn = gr.Button(
|
369 |
+
"➡️ Next",
|
370 |
+
elem_id="next-card-btn"
|
371 |
+
)
|
372 |
+
|
373 |
+
# Flashcard progress
|
374 |
+
flashcard_progress = gr.HTML(
|
375 |
+
"<div class='flashcard-progress'>Card 1 of 0</div>",
|
376 |
+
elem_id="flashcard-progress"
|
377 |
+
)
|
378 |
+
|
379 |
+
return {
|
380 |
+
"flashcard_display": flashcard_display,
|
381 |
+
"flip_card_btn": flip_card_btn,
|
382 |
+
"prev_card_btn": prev_card_btn,
|
383 |
+
"next_card_btn": next_card_btn,
|
384 |
+
"flashcard_progress": flashcard_progress
|
385 |
+
}
|
386 |
+
|
387 |
+
def _create_quizzes_section(self) -> Dict[str, Any]:
|
388 |
+
"""Create quizzes preview section"""
|
389 |
+
|
390 |
+
with gr.Group():
|
391 |
+
# Quiz selector
|
392 |
+
quiz_selector = gr.Dropdown(
|
393 |
+
choices=[],
|
394 |
+
label="🎯 Select Quiz",
|
395 |
+
elem_id="quiz-selector"
|
396 |
+
)
|
397 |
+
|
398 |
+
# Quiz display
|
399 |
+
quiz_content = gr.HTML(
|
400 |
+
"<div class='quiz-container'>Select a quiz to begin</div>",
|
401 |
+
elem_id="quiz-content"
|
402 |
+
)
|
403 |
+
|
404 |
+
# Quiz controls
|
405 |
+
with gr.Row():
|
406 |
+
start_quiz_btn = gr.Button(
|
407 |
+
"▶️ Start Quiz",
|
408 |
+
variant="primary",
|
409 |
+
elem_id="start-quiz-btn"
|
410 |
+
)
|
411 |
+
reset_quiz_btn = gr.Button(
|
412 |
+
"🔄 Reset",
|
413 |
+
elem_id="reset-quiz-btn"
|
414 |
+
)
|
415 |
+
|
416 |
+
return {
|
417 |
+
"quiz_selector": quiz_selector,
|
418 |
+
"quiz_content": quiz_content,
|
419 |
+
"start_quiz_btn": start_quiz_btn,
|
420 |
+
"reset_quiz_btn": reset_quiz_btn
|
421 |
+
}
|
422 |
+
|
423 |
+
def _create_images_section(self) -> Dict[str, Any]:
|
424 |
+
"""Create images gallery section"""
|
425 |
+
|
426 |
+
with gr.Group():
|
427 |
+
# Image gallery
|
428 |
+
image_gallery = gr.Gallery(
|
429 |
+
label="🎨 Generated Images",
|
430 |
+
show_label=True,
|
431 |
+
elem_id="image-gallery",
|
432 |
+
columns=3,
|
433 |
+
rows=2,
|
434 |
+
height="auto"
|
435 |
+
)
|
436 |
+
|
437 |
+
# Image details
|
438 |
+
image_details = gr.HTML(
|
439 |
+
"<div class='image-details'>Select an image to view details</div>",
|
440 |
+
elem_id="image-details"
|
441 |
+
)
|
442 |
+
|
443 |
+
return {
|
444 |
+
"image_gallery": image_gallery,
|
445 |
+
"image_details": image_details
|
446 |
+
}
|
447 |
+
|
448 |
+
def _create_export_section(self) -> Dict[str, Any]:
|
449 |
+
"""Create export options section"""
|
450 |
+
|
451 |
+
with gr.Group():
|
452 |
+
gr.HTML("<h3>📤 Export Your Course</h3>")
|
453 |
+
|
454 |
+
# Export format selection
|
455 |
+
with gr.Row():
|
456 |
+
export_pdf = gr.Checkbox(
|
457 |
+
value=True,
|
458 |
+
label="📄 PDF Course Book"
|
459 |
+
)
|
460 |
+
export_json = gr.Checkbox(
|
461 |
+
value=True,
|
462 |
+
label="📋 JSON Data"
|
463 |
+
)
|
464 |
+
export_anki = gr.Checkbox(
|
465 |
+
value=False,
|
466 |
+
label="🃏 Anki Deck"
|
467 |
+
)
|
468 |
+
|
469 |
+
with gr.Row():
|
470 |
+
export_notion = gr.Checkbox(
|
471 |
+
value=False,
|
472 |
+
label="📝 Notion Pages"
|
473 |
+
)
|
474 |
+
export_github = gr.Checkbox(
|
475 |
+
value=False,
|
476 |
+
label="🐙 GitHub Repository"
|
477 |
+
)
|
478 |
+
export_drive = gr.Checkbox(
|
479 |
+
value=False,
|
480 |
+
label="☁️ Google Drive"
|
481 |
+
)
|
482 |
+
|
483 |
+
# Export button
|
484 |
+
export_btn = gr.Button(
|
485 |
+
"📦 Export Course",
|
486 |
+
variant="primary",
|
487 |
+
size="lg",
|
488 |
+
elem_id="export-btn"
|
489 |
+
)
|
490 |
+
|
491 |
+
# Download links
|
492 |
+
download_links = gr.HTML(
|
493 |
+
"<div class='download-links'>Export files will appear here</div>",
|
494 |
+
elem_id="download-links"
|
495 |
+
)
|
496 |
+
|
497 |
+
return {
|
498 |
+
"export_options": {
|
499 |
+
"pdf": export_pdf,
|
500 |
+
"json": export_json,
|
501 |
+
"anki": export_anki,
|
502 |
+
"notion": export_notion,
|
503 |
+
"github": export_github,
|
504 |
+
"drive": export_drive
|
505 |
+
},
|
506 |
+
"export_btn": export_btn,
|
507 |
+
"download_links": download_links
|
508 |
+
}
|
509 |
+
|
510 |
+
|
511 |
+
class FlashcardViewer:
|
512 |
+
"""Interactive flashcard viewer component"""
|
513 |
+
|
514 |
+
def __init__(self):
|
515 |
+
self.current_card_index = 0
|
516 |
+
self.show_back = False
|
517 |
+
self.flashcards = []
|
518 |
+
|
519 |
+
def create_flashcard_interface(self, flashcards: List[Flashcard]) -> gr.Group:
|
520 |
+
"""Create interactive flashcard viewer"""
|
521 |
+
|
522 |
+
self.flashcards = flashcards
|
523 |
+
|
524 |
+
with gr.Group() as flashcard_group:
|
525 |
+
gr.HTML("<h3>🃏 Interactive Flashcards</h3>")
|
526 |
+
|
527 |
+
if not flashcards:
|
528 |
+
gr.HTML("<p>No flashcards available</p>")
|
529 |
+
return flashcard_group, {}
|
530 |
+
|
531 |
+
# Card counter
|
532 |
+
card_counter = gr.HTML(
|
533 |
+
f"<div class='card-counter'>Card 1 of {len(flashcards)}</div>",
|
534 |
+
elem_id="card-counter"
|
535 |
+
)
|
536 |
+
|
537 |
+
# Flashcard display
|
538 |
+
with gr.Row():
|
539 |
+
with gr.Column(scale=1):
|
540 |
+
# Card content
|
541 |
+
card_display = gr.HTML(
|
542 |
+
self._format_flashcard_html(flashcards[0], show_back=False),
|
543 |
+
elem_id="flashcard-display"
|
544 |
+
)
|
545 |
+
|
546 |
+
# Flip button
|
547 |
+
flip_btn = gr.Button(
|
548 |
+
"🔄 Flip Card",
|
549 |
+
variant="secondary",
|
550 |
+
elem_id="flip-button"
|
551 |
+
)
|
552 |
+
|
553 |
+
# Navigation buttons
|
554 |
+
with gr.Row():
|
555 |
+
prev_btn = gr.Button(
|
556 |
+
"⬅️ Previous",
|
557 |
+
variant="secondary",
|
558 |
+
interactive=False,
|
559 |
+
elem_id="prev-button"
|
560 |
+
)
|
561 |
+
|
562 |
+
next_btn = gr.Button(
|
563 |
+
"➡️ Next",
|
564 |
+
variant="secondary",
|
565 |
+
interactive=len(flashcards) > 1,
|
566 |
+
elem_id="next-button"
|
567 |
+
)
|
568 |
+
|
569 |
+
return flashcard_group, {
|
570 |
+
"card_counter": card_counter,
|
571 |
+
"card_display": card_display,
|
572 |
+
"flip_btn": flip_btn,
|
573 |
+
"prev_btn": prev_btn,
|
574 |
+
"next_btn": next_btn
|
575 |
+
}
|
576 |
+
|
577 |
+
def _format_flashcard_html(self, flashcard: Flashcard, show_back: bool = False) -> str:
|
578 |
+
"""Format flashcard as HTML"""
|
579 |
+
|
580 |
+
if show_back:
|
581 |
+
content = f"""
|
582 |
+
<div class="flashcard flashcard-back">
|
583 |
+
<div class="flashcard-header">Answer</div>
|
584 |
+
<div class="flashcard-content">{flashcard.back}</div>
|
585 |
+
<div class="flashcard-footer">
|
586 |
+
<span class="difficulty">{flashcard.difficulty}</span>
|
587 |
+
<span class="tags">{', '.join(flashcard.tags) if flashcard.tags else ''}</span>
|
588 |
+
</div>
|
589 |
+
</div>
|
590 |
+
"""
|
591 |
+
else:
|
592 |
+
content = f"""
|
593 |
+
<div class="flashcard flashcard-front">
|
594 |
+
<div class="flashcard-header">Question</div>
|
595 |
+
<div class="flashcard-content">{flashcard.front}</div>
|
596 |
+
<div class="flashcard-footer">
|
597 |
+
<span class="difficulty">{flashcard.difficulty}</span>
|
598 |
+
</div>
|
599 |
+
</div>
|
600 |
+
"""
|
601 |
+
|
602 |
+
return content
|
603 |
+
|
604 |
+
|
605 |
+
class UIHelpers:
|
606 |
+
"""Helper functions for UI components"""
|
607 |
+
|
608 |
+
@staticmethod
|
609 |
+
def format_course_metadata(course: Course) -> str:
|
610 |
+
"""Format course metadata for display"""
|
611 |
+
|
612 |
+
metadata_html = f"""
|
613 |
+
<div class="course-metadata">
|
614 |
+
<div class="metadata-item">
|
615 |
+
<span class="label">🎯 Difficulty:</span>
|
616 |
+
<span class="value">{course.difficulty_level}</span>
|
617 |
+
</div>
|
618 |
+
<div class="metadata-item">
|
619 |
+
<span class="label">⏱️ Duration:</span>
|
620 |
+
<span class="value">{course.estimated_duration} hours</span>
|
621 |
+
</div>
|
622 |
+
<div class="metadata-item">
|
623 |
+
<span class="label">📖 Lessons:</span>
|
624 |
+
<span class="value">{len(course.lessons)}</span>
|
625 |
+
</div>
|
626 |
+
<div class="metadata-item">
|
627 |
+
<span class="label">👥 Audience:</span>
|
628 |
+
<span class="value">{course.target_audience}</span>
|
629 |
+
</div>
|
630 |
+
<div class="metadata-item">
|
631 |
+
<span class="label">🏷️ Tags:</span>
|
632 |
+
<span class="value">{', '.join(course.tags)}</span>
|
633 |
+
</div>
|
634 |
+
</div>
|
635 |
+
"""
|
636 |
+
|
637 |
+
return metadata_html
|
638 |
+
|
639 |
+
@staticmethod
|
640 |
+
def format_learning_objectives(objectives: List[str]) -> str:
|
641 |
+
"""Format learning objectives for display"""
|
642 |
+
|
643 |
+
objectives_html = """
|
644 |
+
<div class="learning-objectives">
|
645 |
+
<h4>🎯 Learning Objectives</h4>
|
646 |
+
<ul>
|
647 |
+
"""
|
648 |
+
|
649 |
+
for objective in objectives:
|
650 |
+
objectives_html += f"<li>{objective}</li>"
|
651 |
+
|
652 |
+
objectives_html += """
|
653 |
+
</ul>
|
654 |
+
</div>
|
655 |
+
"""
|
656 |
+
|
657 |
+
return objectives_html
|
658 |
+
|
659 |
+
@staticmethod
|
660 |
+
def format_flashcard(flashcard: Flashcard, show_back: bool = False) -> str:
|
661 |
+
"""Format flashcard for display"""
|
662 |
+
|
663 |
+
card_class = "flashcard flipped" if show_back else "flashcard"
|
664 |
+
content = flashcard.back if show_back else flashcard.front
|
665 |
+
|
666 |
+
flashcard_html = f"""
|
667 |
+
<div class="{card_class}">
|
668 |
+
<div class="flashcard-content">
|
669 |
+
<div class="flashcard-category">{flashcard.category}</div>
|
670 |
+
<div class="flashcard-text">{content}</div>
|
671 |
+
<div class="flashcard-difficulty">Difficulty: {flashcard.difficulty}/5</div>
|
672 |
+
</div>
|
673 |
+
</div>
|
674 |
+
"""
|
675 |
+
|
676 |
+
return flashcard_html
|
677 |
+
|
678 |
+
@staticmethod
|
679 |
+
def create_error_display(error_message: str) -> str:
|
680 |
+
"""Create error display HTML"""
|
681 |
+
|
682 |
+
error_html = f"""
|
683 |
+
<div class="error-display">
|
684 |
+
<div class="error-icon">❌</div>
|
685 |
+
<div class="error-message">{error_message}</div>
|
686 |
+
<div class="error-suggestion">Please try again or contact support if the issue persists.</div>
|
687 |
+
</div>
|
688 |
+
"""
|
689 |
+
|
690 |
+
return error_html
|
691 |
+
|
692 |
+
@staticmethod
|
693 |
+
def create_success_display(success_message: str) -> str:
|
694 |
+
"""Create success display HTML"""
|
695 |
+
|
696 |
+
success_html = f"""
|
697 |
+
<div class="success-display">
|
698 |
+
<div class="success-icon">✅</div>
|
699 |
+
<div class="success-message">{success_message}</div>
|
700 |
+
</div>
|
701 |
+
"""
|
702 |
+
|
703 |
+
return success_html
|
coursecrafter/ui/gradio_app.py
ADDED
@@ -0,0 +1,1483 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
🎨 Gradio Application for Course Creator AI
|
3 |
+
|
4 |
+
Main Gradio interface for course generation.
|
5 |
+
"""
|
6 |
+
|
7 |
+
import gradio as gr
|
8 |
+
from typing import Dict, Any, Optional, Tuple
|
9 |
+
import asyncio
|
10 |
+
import json
|
11 |
+
import markdown
|
12 |
+
import re
|
13 |
+
|
14 |
+
from ..agents.simple_course_agent import SimpleCourseAgent
|
15 |
+
from ..types import DifficultyLevel, GenerationOptions, LearningStyle
|
16 |
+
from .components import CoursePreview
|
17 |
+
from .styling import get_custom_css
|
18 |
+
|
19 |
+
|
20 |
+
def format_lessons(lessons: list) -> str:
|
21 |
+
"""Format lessons from JSON data into HTML with dark theme and markdown support"""
|
22 |
+
if not lessons:
|
23 |
+
return "<div class='info'>📚 No lessons generated yet.</div>"
|
24 |
+
|
25 |
+
# Add CSS for lesson styling
|
26 |
+
css = """
|
27 |
+
<style>
|
28 |
+
/* Force dark theme for all lesson elements */
|
29 |
+
.lessons-container * {
|
30 |
+
background: transparent !important;
|
31 |
+
color: inherit !important;
|
32 |
+
}
|
33 |
+
|
34 |
+
.lessons-container {
|
35 |
+
padding: 1rem !important;
|
36 |
+
background: #1a1a2e !important;
|
37 |
+
border-radius: 12px !important;
|
38 |
+
margin: 1rem 0 !important;
|
39 |
+
max-height: none !important;
|
40 |
+
overflow: visible !important;
|
41 |
+
}
|
42 |
+
|
43 |
+
.lesson-card {
|
44 |
+
background: #2d2d54 !important;
|
45 |
+
border: 1px solid #4a4a7a !important;
|
46 |
+
border-radius: 12px !important;
|
47 |
+
padding: 2rem !important;
|
48 |
+
margin: 1.5rem 0 !important;
|
49 |
+
box-shadow: 0 4px 8px rgba(0,0,0,0.3) !important;
|
50 |
+
color: #e0e7ff !important;
|
51 |
+
}
|
52 |
+
|
53 |
+
.lesson-card h3 {
|
54 |
+
color: #667eea !important;
|
55 |
+
margin-bottom: 1rem !important;
|
56 |
+
font-size: 1.5rem !important;
|
57 |
+
border-bottom: 2px solid #667eea !important;
|
58 |
+
padding-bottom: 0.5rem !important;
|
59 |
+
background: transparent !important;
|
60 |
+
}
|
61 |
+
|
62 |
+
.lesson-card h4 {
|
63 |
+
color: #8b9dc3 !important;
|
64 |
+
margin: 1.5rem 0 0.75rem 0 !important;
|
65 |
+
font-size: 1.2rem !important;
|
66 |
+
background: transparent !important;
|
67 |
+
}
|
68 |
+
|
69 |
+
.lesson-card p {
|
70 |
+
color: #b8c5d6 !important;
|
71 |
+
line-height: 1.6 !important;
|
72 |
+
margin: 0.75rem 0 !important;
|
73 |
+
font-size: 1rem !important;
|
74 |
+
background: transparent !important;
|
75 |
+
}
|
76 |
+
|
77 |
+
.lesson-card ul {
|
78 |
+
color: #e0e7ff !important;
|
79 |
+
margin: 0.75rem 0 !important;
|
80 |
+
padding-left: 1.5rem !important;
|
81 |
+
background: transparent !important;
|
82 |
+
}
|
83 |
+
|
84 |
+
.lesson-card li {
|
85 |
+
color: #e0e7ff !important;
|
86 |
+
margin: 0.5rem 0 !important;
|
87 |
+
line-height: 1.5 !important;
|
88 |
+
background: transparent !important;
|
89 |
+
}
|
90 |
+
|
91 |
+
.lesson-content {
|
92 |
+
background: #3a3a6b !important;
|
93 |
+
border-radius: 8px !important;
|
94 |
+
padding: 1.5rem !important;
|
95 |
+
margin: 1rem 0 !important;
|
96 |
+
border-left: 4px solid #667eea !important;
|
97 |
+
}
|
98 |
+
|
99 |
+
.lesson-content h1, .lesson-content h2, .lesson-content h3,
|
100 |
+
.lesson-content h4, .lesson-content h5, .lesson-content h6 {
|
101 |
+
color: #667eea !important;
|
102 |
+
margin: 1rem 0 0.5rem 0 !important;
|
103 |
+
background: transparent !important;
|
104 |
+
}
|
105 |
+
|
106 |
+
.lesson-content p {
|
107 |
+
color: #e0e7ff !important;
|
108 |
+
margin: 0.75rem 0 !important;
|
109 |
+
line-height: 1.6 !important;
|
110 |
+
background: transparent !important;
|
111 |
+
}
|
112 |
+
|
113 |
+
.lesson-content ul, .lesson-content ol {
|
114 |
+
color: #e0e7ff !important;
|
115 |
+
margin: 0.75rem 0 !important;
|
116 |
+
padding-left: 1.5rem !important;
|
117 |
+
background: transparent !important;
|
118 |
+
}
|
119 |
+
|
120 |
+
.lesson-content li {
|
121 |
+
color: #e0e7ff !important;
|
122 |
+
margin: 0.5rem 0 !important;
|
123 |
+
background: transparent !important;
|
124 |
+
}
|
125 |
+
|
126 |
+
.lesson-content strong {
|
127 |
+
color: #8b9dc3 !important;
|
128 |
+
}
|
129 |
+
|
130 |
+
.lesson-content em {
|
131 |
+
color: #b8c5d6 !important;
|
132 |
+
}
|
133 |
+
|
134 |
+
.lesson-content code {
|
135 |
+
background: #4a4a7a !important;
|
136 |
+
color: #e0e7ff !important;
|
137 |
+
padding: 0.2rem 0.4rem;
|
138 |
+
border-radius: 4px;
|
139 |
+
font-family: monospace;
|
140 |
+
}
|
141 |
+
|
142 |
+
.lesson-content pre {
|
143 |
+
background: #4a4a7a !important;
|
144 |
+
color: #e0e7ff !important;
|
145 |
+
padding: 1rem;
|
146 |
+
border-radius: 8px;
|
147 |
+
overflow-x: auto;
|
148 |
+
margin: 1rem 0;
|
149 |
+
}
|
150 |
+
|
151 |
+
.lesson-card ul {
|
152 |
+
color: #e0e7ff !important;
|
153 |
+
margin: 0.75rem 0;
|
154 |
+
padding-left: 1.5rem;
|
155 |
+
}
|
156 |
+
|
157 |
+
.lesson-card li {
|
158 |
+
color: #e0e7ff !important;
|
159 |
+
margin: 0.5rem 0;
|
160 |
+
line-height: 1.5;
|
161 |
+
}
|
162 |
+
|
163 |
+
.lesson-image {
|
164 |
+
margin: 1.5rem 0;
|
165 |
+
text-align: center;
|
166 |
+
}
|
167 |
+
|
168 |
+
.image-placeholder {
|
169 |
+
background: #4a4a7a;
|
170 |
+
border: 2px dashed #667eea;
|
171 |
+
border-radius: 8px;
|
172 |
+
padding: 2rem;
|
173 |
+
text-align: center;
|
174 |
+
color: #b8c5d6;
|
175 |
+
}
|
176 |
+
|
177 |
+
.image-icon {
|
178 |
+
font-size: 3rem;
|
179 |
+
margin-bottom: 1rem;
|
180 |
+
}
|
181 |
+
|
182 |
+
.image-description {
|
183 |
+
font-size: 1.1rem;
|
184 |
+
margin-bottom: 0.5rem;
|
185 |
+
color: #e0e7ff;
|
186 |
+
}
|
187 |
+
|
188 |
+
.image-note {
|
189 |
+
font-size: 0.9rem;
|
190 |
+
font-style: italic;
|
191 |
+
opacity: 0.7;
|
192 |
+
}
|
193 |
+
|
194 |
+
.duration-info {
|
195 |
+
background: #4a4a7a !important;
|
196 |
+
color: #e0e7ff !important;
|
197 |
+
padding: 0.5rem 1rem !important;
|
198 |
+
border-radius: 20px !important;
|
199 |
+
display: inline-block !important;
|
200 |
+
margin-bottom: 1rem !important;
|
201 |
+
font-size: 0.9rem !important;
|
202 |
+
}
|
203 |
+
|
204 |
+
/* Ultimate override for any stubborn white backgrounds */
|
205 |
+
.lessons-container .lesson-card,
|
206 |
+
.lessons-container .lesson-card *,
|
207 |
+
.lessons-container .lesson-content,
|
208 |
+
.lessons-container .lesson-content * {
|
209 |
+
background-color: transparent !important;
|
210 |
+
}
|
211 |
+
|
212 |
+
.lessons-container .lesson-card {
|
213 |
+
background: #2d2d54 !important;
|
214 |
+
}
|
215 |
+
|
216 |
+
.lessons-container .lesson-content {
|
217 |
+
background: #3a3a6b !important;
|
218 |
+
}
|
219 |
+
</style>
|
220 |
+
"""
|
221 |
+
|
222 |
+
html = css + "<div class='lessons-container'>"
|
223 |
+
for i, lesson in enumerate(lessons, 1):
|
224 |
+
title = lesson.get("title", f"Lesson {i}")
|
225 |
+
content = lesson.get("content", "")
|
226 |
+
duration = lesson.get("duration", "")
|
227 |
+
objectives = lesson.get("objectives", [])
|
228 |
+
key_takeaways = lesson.get("key_takeaways", [])
|
229 |
+
image_description = lesson.get("image_description", "")
|
230 |
+
|
231 |
+
# Convert markdown content to HTML
|
232 |
+
if content:
|
233 |
+
try:
|
234 |
+
# Create markdown instance with extensions
|
235 |
+
import markdown
|
236 |
+
md = markdown.Markdown(extensions=['extra', 'codehilite'])
|
237 |
+
content_html = md.convert(content)
|
238 |
+
except ImportError:
|
239 |
+
# Fallback if markdown is not available
|
240 |
+
content_html = content.replace('\n\n', '</p><p>').replace('\n', '<br>')
|
241 |
+
if content_html and not content_html.startswith('<p>'):
|
242 |
+
content_html = f'<p>{content_html}</p>'
|
243 |
+
else:
|
244 |
+
content_html = "<p>No content available.</p>"
|
245 |
+
|
246 |
+
# Generate image placeholder or actual image
|
247 |
+
image_html = ""
|
248 |
+
if image_description:
|
249 |
+
# Check if we have actual image data
|
250 |
+
images = lesson.get("images", [])
|
251 |
+
if images and len(images) > 0:
|
252 |
+
# Display actual generated images
|
253 |
+
image_html = "<div class='lesson-images'>"
|
254 |
+
for img in images:
|
255 |
+
if isinstance(img, dict) and img.get("url"):
|
256 |
+
img_url = img.get("url", "")
|
257 |
+
img_caption = img.get("description", image_description)
|
258 |
+
image_html += f"""
|
259 |
+
<div class='lesson-image'>
|
260 |
+
<img src='{img_url}' alt='{img_caption}' loading='lazy' style='max-width: 100%; border-radius: 8px; box-shadow: 0 4px 8px rgba(0,0,0,0.3); border: 2px solid #4a4a7a;'>
|
261 |
+
<p class='image-caption'>{img_caption}</p>
|
262 |
+
</div>
|
263 |
+
"""
|
264 |
+
image_html += "</div>"
|
265 |
+
else:
|
266 |
+
# Fallback to placeholder
|
267 |
+
image_html = f"""
|
268 |
+
<div class='lesson-image'>
|
269 |
+
<div class='image-placeholder'>
|
270 |
+
<div class='image-icon'>🖼️</div>
|
271 |
+
<div class='image-description'>{image_description}</div>
|
272 |
+
<div class='image-note'>(Image generation in progress...)</div>
|
273 |
+
</div>
|
274 |
+
</div>
|
275 |
+
"""
|
276 |
+
|
277 |
+
html += f"""
|
278 |
+
<div class='lesson-card'>
|
279 |
+
<h3>📖 {title}</h3>
|
280 |
+
{f"<div class='duration-info'>⏱️ Duration: {duration} minutes</div>" if duration else ""}
|
281 |
+
|
282 |
+
{f"<h4>🎯 Learning Objectives:</h4><ul>{''.join([f'<li>{obj}</li>' for obj in objectives])}</ul>" if objectives else ""}
|
283 |
+
|
284 |
+
{image_html}
|
285 |
+
|
286 |
+
<div class='lesson-content'>
|
287 |
+
{content_html}
|
288 |
+
</div>
|
289 |
+
|
290 |
+
{f"<h4>💡 Key Takeaways:</h4><ul>{''.join([f'<li>{takeaway}</li>' for takeaway in key_takeaways])}</ul>" if key_takeaways else ""}
|
291 |
+
</div>
|
292 |
+
"""
|
293 |
+
html += "</div>"
|
294 |
+
return html
|
295 |
+
|
296 |
+
|
297 |
+
def format_flashcards(flashcards: list) -> str:
|
298 |
+
"""Format flashcards from JSON data into interactive HTML with CSS-only flip"""
|
299 |
+
if not flashcards:
|
300 |
+
return "<div class='info'>🃏 No flashcards generated yet.</div>"
|
301 |
+
|
302 |
+
# Add the CSS for flashcard flip functionality
|
303 |
+
css = """
|
304 |
+
<style>
|
305 |
+
.flashcards-container {
|
306 |
+
padding: 1rem;
|
307 |
+
background: #1a1a2e;
|
308 |
+
border-radius: 12px;
|
309 |
+
margin: 1rem 0;
|
310 |
+
max-height: none !important;
|
311 |
+
overflow: visible !important;
|
312 |
+
}
|
313 |
+
|
314 |
+
.flashcard-wrapper {
|
315 |
+
perspective: 1000px;
|
316 |
+
margin: 1rem 0;
|
317 |
+
height: 200px;
|
318 |
+
}
|
319 |
+
|
320 |
+
.flip-checkbox {
|
321 |
+
display: none;
|
322 |
+
}
|
323 |
+
|
324 |
+
.flashcard {
|
325 |
+
position: relative;
|
326 |
+
width: 100%;
|
327 |
+
height: 100%;
|
328 |
+
cursor: pointer;
|
329 |
+
transform-style: preserve-3d;
|
330 |
+
transition: transform 0.6s;
|
331 |
+
display: block;
|
332 |
+
}
|
333 |
+
|
334 |
+
.flip-checkbox:checked + .flashcard {
|
335 |
+
transform: rotateY(180deg);
|
336 |
+
}
|
337 |
+
|
338 |
+
.flashcard-inner {
|
339 |
+
position: relative;
|
340 |
+
width: 100%;
|
341 |
+
height: 100%;
|
342 |
+
transform-style: preserve-3d;
|
343 |
+
}
|
344 |
+
|
345 |
+
.flashcard-front, .flashcard-back {
|
346 |
+
position: absolute;
|
347 |
+
width: 100%;
|
348 |
+
height: 100%;
|
349 |
+
backface-visibility: hidden;
|
350 |
+
border-radius: 12px;
|
351 |
+
padding: 1.5rem;
|
352 |
+
display: flex;
|
353 |
+
flex-direction: column;
|
354 |
+
justify-content: center;
|
355 |
+
align-items: center;
|
356 |
+
text-align: center;
|
357 |
+
box-shadow: 0 4px 8px rgba(0,0,0,0.3);
|
358 |
+
}
|
359 |
+
|
360 |
+
.flashcard-front {
|
361 |
+
background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
|
362 |
+
color: white;
|
363 |
+
}
|
364 |
+
|
365 |
+
.flashcard-back {
|
366 |
+
background: linear-gradient(135deg, #764ba2 0%, #667eea 100%);
|
367 |
+
color: white;
|
368 |
+
transform: rotateY(180deg);
|
369 |
+
}
|
370 |
+
|
371 |
+
.flashcard-category {
|
372 |
+
position: absolute;
|
373 |
+
top: 10px;
|
374 |
+
right: 15px;
|
375 |
+
background: rgba(255,255,255,0.2);
|
376 |
+
padding: 0.25rem 0.5rem;
|
377 |
+
border-radius: 12px;
|
378 |
+
font-size: 0.8rem;
|
379 |
+
font-weight: bold;
|
380 |
+
}
|
381 |
+
|
382 |
+
.flashcard-content {
|
383 |
+
font-size: 1.2rem;
|
384 |
+
font-weight: 500;
|
385 |
+
line-height: 1.4;
|
386 |
+
margin: 1rem 0;
|
387 |
+
color: white;
|
388 |
+
}
|
389 |
+
|
390 |
+
.flashcard-hint {
|
391 |
+
position: absolute;
|
392 |
+
bottom: 10px;
|
393 |
+
left: 50%;
|
394 |
+
transform: translateX(-50%);
|
395 |
+
font-size: 0.8rem;
|
396 |
+
opacity: 0.8;
|
397 |
+
font-style: italic;
|
398 |
+
}
|
399 |
+
|
400 |
+
.flashcard:hover {
|
401 |
+
box-shadow: 0 6px 12px rgba(0,0,0,0.4);
|
402 |
+
}
|
403 |
+
</style>
|
404 |
+
"""
|
405 |
+
|
406 |
+
html = css + "<div class='flashcards-container'>"
|
407 |
+
html += "<p style='color: #e0e7ff; text-align: center; margin-bottom: 1rem;'><strong>🃏 Click on any flashcard to flip it and see the answer!</strong></p>"
|
408 |
+
|
409 |
+
for i, card in enumerate(flashcards):
|
410 |
+
question = card.get("question", "")
|
411 |
+
answer = card.get("answer", "")
|
412 |
+
category = card.get("category", "General")
|
413 |
+
|
414 |
+
# Use CSS-only flip with checkbox hack
|
415 |
+
html += f"""
|
416 |
+
<div class='flashcard-wrapper'>
|
417 |
+
<input type='checkbox' id='flip-{i}' class='flip-checkbox'>
|
418 |
+
<label for='flip-{i}' class='flashcard'>
|
419 |
+
<div class='flashcard-inner'>
|
420 |
+
<div class='flashcard-front'>
|
421 |
+
<div class='flashcard-category'>{category}</div>
|
422 |
+
<div class='flashcard-content'>{question}</div>
|
423 |
+
<div class='flashcard-hint'>Click to flip</div>
|
424 |
+
</div>
|
425 |
+
<div class='flashcard-back'>
|
426 |
+
<div class='flashcard-category'>{category}</div>
|
427 |
+
<div class='flashcard-content'>{answer}</div>
|
428 |
+
<div class='flashcard-hint'>Click to flip back</div>
|
429 |
+
</div>
|
430 |
+
</div>
|
431 |
+
</label>
|
432 |
+
</div>
|
433 |
+
"""
|
434 |
+
|
435 |
+
html += "</div>"
|
436 |
+
return html
|
437 |
+
|
438 |
+
|
439 |
+
def format_quiz(quiz: dict) -> str:
|
440 |
+
"""Format quiz from JSON data into interactive HTML with working JavaScript."""
|
441 |
+
if not quiz or not quiz.get("questions"):
|
442 |
+
return "<div class='info'>📝 No quiz generated yet.</div>"
|
443 |
+
|
444 |
+
title = quiz.get("title", "Course Quiz")
|
445 |
+
instructions = quiz.get("instructions", "Choose the best answer for each question.")
|
446 |
+
questions = quiz.get("questions", [])
|
447 |
+
|
448 |
+
if not questions:
|
449 |
+
return "<div class='info'>📝 No quiz questions available.</div>"
|
450 |
+
|
451 |
+
# Generate unique quiz ID
|
452 |
+
quiz_id = f"quiz_{abs(hash(str(questions)))%10000}"
|
453 |
+
|
454 |
+
# CSS and JavaScript for quiz functionality
|
455 |
+
quiz_html = f"""
|
456 |
+
<style>
|
457 |
+
.quiz-container {{
|
458 |
+
background: #1a1a2e;
|
459 |
+
border-radius: 12px;
|
460 |
+
padding: 2rem;
|
461 |
+
color: #e0e7ff;
|
462 |
+
max-height: none !important;
|
463 |
+
overflow: visible !important;
|
464 |
+
}}
|
465 |
+
.quiz-container h3 {{
|
466 |
+
color: #667eea;
|
467 |
+
text-align: center;
|
468 |
+
margin-bottom: 1rem;
|
469 |
+
}}
|
470 |
+
.quiz-question {{
|
471 |
+
background: #2d2d54;
|
472 |
+
border-radius: 8px;
|
473 |
+
padding: 1.5rem;
|
474 |
+
margin: 1.5rem 0;
|
475 |
+
border-left: 4px solid #667eea;
|
476 |
+
}}
|
477 |
+
.quiz-question h4 {{
|
478 |
+
color: #e0e7ff;
|
479 |
+
margin-bottom: 1rem;
|
480 |
+
font-size: 1.1rem;
|
481 |
+
}}
|
482 |
+
.quiz-options {{
|
483 |
+
margin: 1rem 0;
|
484 |
+
}}
|
485 |
+
.quiz-option-label {{
|
486 |
+
display: flex;
|
487 |
+
align-items: center;
|
488 |
+
padding: 0.75rem 1rem;
|
489 |
+
background: #3a3a6b;
|
490 |
+
border: 2px solid #4a4a7a;
|
491 |
+
border-radius: 8px;
|
492 |
+
margin: 0.5rem 0;
|
493 |
+
cursor: pointer;
|
494 |
+
color: #e0e7ff;
|
495 |
+
transition: all 0.2s;
|
496 |
+
}}
|
497 |
+
.quiz-option-label:hover {{
|
498 |
+
background: #4a4a7a;
|
499 |
+
border-color: #667eea;
|
500 |
+
}}
|
501 |
+
.quiz-radio {{
|
502 |
+
display: none;
|
503 |
+
}}
|
504 |
+
.quiz-radio:checked + .quiz-option-label {{
|
505 |
+
background: #667eea;
|
506 |
+
color: white;
|
507 |
+
border-color: #667eea;
|
508 |
+
}}
|
509 |
+
.radio-custom {{
|
510 |
+
width: 20px;
|
511 |
+
height: 20px;
|
512 |
+
border: 2px solid #667eea;
|
513 |
+
border-radius: 50%;
|
514 |
+
margin-right: 0.75rem;
|
515 |
+
position: relative;
|
516 |
+
}}
|
517 |
+
.quiz-radio:checked + .quiz-option-label .radio-custom::after {{
|
518 |
+
content: '';
|
519 |
+
width: 10px;
|
520 |
+
height: 10px;
|
521 |
+
border-radius: 50%;
|
522 |
+
background: white;
|
523 |
+
position: absolute;
|
524 |
+
top: 50%;
|
525 |
+
left: 50%;
|
526 |
+
transform: translate(-50%, -50%);
|
527 |
+
}}
|
528 |
+
.quiz-feedback {{
|
529 |
+
margin-top: 1rem;
|
530 |
+
padding: 1rem;
|
531 |
+
border-radius: 6px;
|
532 |
+
font-weight: 500;
|
533 |
+
display: none;
|
534 |
+
}}
|
535 |
+
.feedback-correct {{
|
536 |
+
background: #d4edda;
|
537 |
+
color: #155724;
|
538 |
+
border: 1px solid #c3e6cb;
|
539 |
+
}}
|
540 |
+
.feedback-incorrect {{
|
541 |
+
background: #f8d7da;
|
542 |
+
color: #721c24;
|
543 |
+
border: 1px solid #f5c6cb;
|
544 |
+
}}
|
545 |
+
.feedback-unanswered {{
|
546 |
+
background: #fff3cd;
|
547 |
+
color: #856404;
|
548 |
+
border: 1px solid #ffeaa7;
|
549 |
+
}}
|
550 |
+
.quiz-results {{
|
551 |
+
margin-top: 2rem;
|
552 |
+
padding: 1.5rem;
|
553 |
+
background: linear-gradient(135deg, #667eea, #764ba2);
|
554 |
+
color: white;
|
555 |
+
border-radius: 8px;
|
556 |
+
text-align: center;
|
557 |
+
font-size: 1.1rem;
|
558 |
+
display: none;
|
559 |
+
}}
|
560 |
+
.quiz-score {{
|
561 |
+
font-size: 1.5rem;
|
562 |
+
font-weight: bold;
|
563 |
+
margin-bottom: 0.5rem;
|
564 |
+
}}
|
565 |
+
</style>
|
566 |
+
|
567 |
+
<div class="quiz-container" id="{quiz_id}">
|
568 |
+
<h3>📝 {title}</h3>
|
569 |
+
<p style="text-align:center; color:#b8c5d6; margin-bottom: 2rem;"><em>{instructions}</em></p>
|
570 |
+
<form id="quiz-form-{quiz_id}">
|
571 |
+
"""
|
572 |
+
|
573 |
+
# Display each question
|
574 |
+
for idx, q in enumerate(questions):
|
575 |
+
question_text = q.get("question", "")
|
576 |
+
options = q.get("options", [])
|
577 |
+
correct_answer = q.get("correct_answer", "A")
|
578 |
+
explanation = q.get("explanation", "")
|
579 |
+
|
580 |
+
quiz_html += f"""
|
581 |
+
<div class="quiz-question" data-correct="{correct_answer}" data-explanation="{explanation}">
|
582 |
+
<h4>Q{idx+1}: {question_text}</h4>
|
583 |
+
<div class="quiz-options">
|
584 |
+
"""
|
585 |
+
|
586 |
+
# Display options
|
587 |
+
for j, option in enumerate(options):
|
588 |
+
option_letter = option[0] if option and len(option) > 0 else chr(65 + j)
|
589 |
+
option_text = option[3:] if option.startswith(f"{option_letter}. ") else option
|
590 |
+
|
591 |
+
quiz_html += f"""
|
592 |
+
<div>
|
593 |
+
<input type="radio" id="q{idx}_o{j}_{quiz_id}" name="q{idx}" value="{option_letter}" class="quiz-radio">
|
594 |
+
<label for="q{idx}_o{j}_{quiz_id}" class="quiz-option-label">
|
595 |
+
<span class="radio-custom"></span>
|
596 |
+
<strong>{option_letter}.</strong> {option_text}
|
597 |
+
</label>
|
598 |
+
</div>
|
599 |
+
"""
|
600 |
+
|
601 |
+
quiz_html += f"""
|
602 |
+
</div>
|
603 |
+
<div class="quiz-feedback" id="feedback-{idx}-{quiz_id}"></div>
|
604 |
+
</div>
|
605 |
+
"""
|
606 |
+
|
607 |
+
# Close form and add results container
|
608 |
+
quiz_html += f"""
|
609 |
+
</form>
|
610 |
+
</div>
|
611 |
+
|
612 |
+
|
613 |
+
"""
|
614 |
+
|
615 |
+
return quiz_html
|
616 |
+
|
617 |
+
|
618 |
+
def create_coursecrafter_interface() -> gr.Blocks:
|
619 |
+
"""Create the main Course Creator Gradio interface"""
|
620 |
+
|
621 |
+
with gr.Blocks(
|
622 |
+
title="Course Creator AI - Intelligent Course Generator",
|
623 |
+
css=get_custom_css(),
|
624 |
+
theme=gr.themes.Soft()
|
625 |
+
) as interface:
|
626 |
+
|
627 |
+
# Header
|
628 |
+
gr.HTML("""
|
629 |
+
<div class="header-container">
|
630 |
+
<h1>🎓 Course Creator AI</h1>
|
631 |
+
<p>Generate comprehensive mini-courses with AI-powered content, flashcards, and quizzes</p>
|
632 |
+
</div>
|
633 |
+
""")
|
634 |
+
|
635 |
+
# LLM Provider Configuration
|
636 |
+
with gr.Row():
|
637 |
+
with gr.Column():
|
638 |
+
gr.HTML("<h3>🤖 LLM Provider Configuration</h3>")
|
639 |
+
with gr.Row():
|
640 |
+
llm_provider = gr.Dropdown(
|
641 |
+
label="LLM Provider",
|
642 |
+
choices=["openai", "anthropic", "google", "openai_compatible"],
|
643 |
+
value="google",
|
644 |
+
info="Choose your preferred LLM provider"
|
645 |
+
)
|
646 |
+
api_key_input = gr.Textbox(
|
647 |
+
label="API Key",
|
648 |
+
placeholder="Enter your API key here...",
|
649 |
+
type="password",
|
650 |
+
info="Your API key for the selected provider (optional for OpenAI-compatible)"
|
651 |
+
)
|
652 |
+
|
653 |
+
# OpenAI-Compatible endpoint configuration (initially hidden)
|
654 |
+
with gr.Row(visible=False) as openai_compatible_row:
|
655 |
+
endpoint_url_input = gr.Textbox(
|
656 |
+
label="Endpoint URL",
|
657 |
+
placeholder="https://your-endpoint.com/v1",
|
658 |
+
info="Base URL for OpenAI-compatible API"
|
659 |
+
)
|
660 |
+
model_name_input = gr.Textbox(
|
661 |
+
label="Model Name",
|
662 |
+
placeholder="your-model-name",
|
663 |
+
info="Model name to use with the endpoint"
|
664 |
+
)
|
665 |
+
|
666 |
+
# Main interface
|
667 |
+
with gr.Row():
|
668 |
+
with gr.Column(scale=1):
|
669 |
+
# Course generation form
|
670 |
+
topic_input = gr.Textbox(
|
671 |
+
label="Course Topic",
|
672 |
+
placeholder="e.g., Introduction to Python Programming",
|
673 |
+
lines=1
|
674 |
+
)
|
675 |
+
|
676 |
+
difficulty_input = gr.Dropdown(
|
677 |
+
label="Difficulty Level",
|
678 |
+
choices=["beginner", "intermediate", "advanced"],
|
679 |
+
value="beginner"
|
680 |
+
)
|
681 |
+
|
682 |
+
lesson_count = gr.Slider(
|
683 |
+
label="Number of Lessons",
|
684 |
+
minimum=1,
|
685 |
+
maximum=10,
|
686 |
+
value=5,
|
687 |
+
step=1
|
688 |
+
)
|
689 |
+
|
690 |
+
generate_btn = gr.Button(
|
691 |
+
"🚀 Generate Course",
|
692 |
+
variant="primary",
|
693 |
+
size="lg"
|
694 |
+
)
|
695 |
+
|
696 |
+
# Chat interface for course refinement
|
697 |
+
gr.HTML("<hr><h3>💬 Course Assistant</h3>")
|
698 |
+
|
699 |
+
# Chat window with proper styling
|
700 |
+
with gr.Column():
|
701 |
+
chat_display = gr.HTML(
|
702 |
+
value="""
|
703 |
+
<div class='chat-window'>
|
704 |
+
<div class='chat-messages' id='chat-messages'>
|
705 |
+
<div class='chat-message assistant-message'>
|
706 |
+
<div class='message-avatar'>🤖</div>
|
707 |
+
<div class='message-content'>
|
708 |
+
<div class='message-text'>Hi! I'm your Course Assistant. Generate a course first, then ask me questions about the lessons, concepts, or content!</div>
|
709 |
+
</div>
|
710 |
+
</div>
|
711 |
+
</div>
|
712 |
+
</div>
|
713 |
+
""",
|
714 |
+
elem_id="chat-display"
|
715 |
+
)
|
716 |
+
|
717 |
+
with gr.Row():
|
718 |
+
chat_input = gr.Textbox(
|
719 |
+
placeholder="Ask me to modify the course...",
|
720 |
+
lines=1,
|
721 |
+
scale=4,
|
722 |
+
container=False
|
723 |
+
)
|
724 |
+
chat_btn = gr.Button("Send", variant="secondary", scale=1)
|
725 |
+
|
726 |
+
with gr.Column(scale=2):
|
727 |
+
# Course preview tabs with enhanced components
|
728 |
+
course_preview = CoursePreview()
|
729 |
+
|
730 |
+
with gr.Tabs():
|
731 |
+
with gr.Tab("📖 Lessons"):
|
732 |
+
lessons_output = gr.HTML(
|
733 |
+
value="""
|
734 |
+
<div class='lessons-container' style='padding: 2rem; text-align: center; background: #1a1a2e; border-radius: 12px; color: #e0e7ff;'>
|
735 |
+
<h3 style='color: #667eea; margin-bottom: 1rem;'>🎓 Ready to Generate Your Course!</h3>
|
736 |
+
<p style='color: #b8c5d6; font-size: 1.1rem; margin-bottom: 1.5rem;'>Enter a topic and click "Generate Course" to create comprehensive lessons with AI-powered content.</p>
|
737 |
+
<div style='background: #2d2d54; padding: 1.5rem; border-radius: 8px; border-left: 4px solid #667eea;'>
|
738 |
+
<p style='color: #e0e7ff; margin: 0;'>💡 <strong>Tip:</strong> Try topics like "Introduction to Python Programming", "Digital Marketing Basics", or "Climate Change Science"</p>
|
739 |
+
</div>
|
740 |
+
</div>
|
741 |
+
"""
|
742 |
+
)
|
743 |
+
|
744 |
+
with gr.Tab("🃏 Flashcards"):
|
745 |
+
flashcards_output = gr.HTML(
|
746 |
+
value="""
|
747 |
+
<div class='flashcards-container' style='padding: 2rem; text-align: center; background: #1a1a2e; border-radius: 12px; color: #e0e7ff;'>
|
748 |
+
<h3 style='color: #667eea; margin-bottom: 1rem;'>🃏 Interactive Flashcards</h3>
|
749 |
+
<p style='color: #b8c5d6;'>Flashcards will appear here after course generation. They'll help reinforce key concepts with spaced repetition learning!</p>
|
750 |
+
</div>
|
751 |
+
"""
|
752 |
+
)
|
753 |
+
|
754 |
+
with gr.Tab("📝 Quizzes"):
|
755 |
+
# Quiz functionality with HTML content and state management
|
756 |
+
quiz_state = gr.State({}) # Store quiz data
|
757 |
+
quizzes_output = gr.HTML(
|
758 |
+
value="""
|
759 |
+
<div class='quiz-container' style='padding: 2rem; text-align: center; background: #1a1a2e; border-radius: 12px; color: #e0e7ff;'>
|
760 |
+
<h3 style='color: #667eea; margin-bottom: 1rem;'>📝 Knowledge Assessment</h3>
|
761 |
+
<p style='color: #b8c5d6;'>Interactive quizzes will appear here to test your understanding of the course material!</p>
|
762 |
+
</div>
|
763 |
+
"""
|
764 |
+
)
|
765 |
+
quiz_results = gr.HTML(visible=False)
|
766 |
+
quiz_submit_btn = gr.Button("Submit Quiz", variant="primary", visible=False)
|
767 |
+
|
768 |
+
with gr.Tab("🎨 Images"):
|
769 |
+
images_section = course_preview._create_images_section()
|
770 |
+
image_gallery = images_section["image_gallery"]
|
771 |
+
image_details = images_section["image_details"]
|
772 |
+
|
773 |
+
# Store generated course content for chat context
|
774 |
+
course_context = {"content": "", "topic": "", "agent": None}
|
775 |
+
|
776 |
+
# Provider change handler to show/hide OpenAI-compatible fields
|
777 |
+
def on_provider_change(provider):
|
778 |
+
if provider == "openai_compatible":
|
779 |
+
return gr.update(visible=True)
|
780 |
+
else:
|
781 |
+
return gr.update(visible=False)
|
782 |
+
|
783 |
+
# Event handlers
|
784 |
+
async def generate_course_wrapper(topic: str, difficulty: str, lessons: int, provider: str, api_key: str, endpoint_url: str, model_name: str, progress=gr.Progress()):
|
785 |
+
"""Wrapper for course generation with progress tracking"""
|
786 |
+
if not topic.strip():
|
787 |
+
return (
|
788 |
+
"<div class='error'>❌ Please enter a topic for your course.</div>",
|
789 |
+
"", "",
|
790 |
+
gr.update(visible=False), [], "<div class='image-details'>Error loading images</div>"
|
791 |
+
)
|
792 |
+
|
793 |
+
if not api_key.strip() and provider != "openai_compatible":
|
794 |
+
return (
|
795 |
+
"<div class='error'>❌ Please enter your API key for the selected LLM provider.</div>",
|
796 |
+
"", "",
|
797 |
+
gr.update(visible=False), [], "<div class='image-details'>Error loading images</div>"
|
798 |
+
)
|
799 |
+
|
800 |
+
if provider == "openai_compatible" and not endpoint_url.strip():
|
801 |
+
return (
|
802 |
+
"<div class='error'>❌ Please enter the endpoint URL for OpenAI-compatible provider.</div>",
|
803 |
+
"", "",
|
804 |
+
gr.update(visible=False), [], "<div class='image-details'>Error loading images</div>"
|
805 |
+
)
|
806 |
+
|
807 |
+
if provider == "openai_compatible" and not model_name.strip():
|
808 |
+
return (
|
809 |
+
"<div class='error'>❌ Please enter the model name for OpenAI-compatible provider.</div>",
|
810 |
+
"", "",
|
811 |
+
gr.update(visible=False), [], "<div class='image-details'>Error loading images</div>"
|
812 |
+
)
|
813 |
+
|
814 |
+
try:
|
815 |
+
# Initialize progress
|
816 |
+
progress(0, desc="🚀 Initializing Course Generator...")
|
817 |
+
|
818 |
+
# Set the API key and configuration for the selected provider
|
819 |
+
import os
|
820 |
+
if provider == "openai":
|
821 |
+
os.environ["OPENAI_API_KEY"] = api_key
|
822 |
+
elif provider == "anthropic":
|
823 |
+
os.environ["ANTHROPIC_API_KEY"] = api_key
|
824 |
+
elif provider == "google":
|
825 |
+
os.environ["GOOGLE_API_KEY"] = api_key
|
826 |
+
elif provider == "openai_compatible":
|
827 |
+
if api_key.strip():
|
828 |
+
os.environ["OPENAI_COMPATIBLE_API_KEY"] = api_key
|
829 |
+
os.environ["OPENAI_COMPATIBLE_BASE_URL"] = endpoint_url
|
830 |
+
os.environ["OPENAI_COMPATIBLE_MODEL"] = model_name
|
831 |
+
|
832 |
+
# Initialize the simplified agent
|
833 |
+
agent = SimpleCourseAgent()
|
834 |
+
# Override the default provider with user selection
|
835 |
+
agent.default_provider = provider
|
836 |
+
course_context["agent"] = agent
|
837 |
+
course_context["topic"] = topic
|
838 |
+
|
839 |
+
progress(0.1, desc="⚙️ Setting up generation options...")
|
840 |
+
|
841 |
+
# Create generation options
|
842 |
+
options = GenerationOptions(
|
843 |
+
difficulty=DifficultyLevel(difficulty),
|
844 |
+
lesson_count=lessons,
|
845 |
+
include_images=True,
|
846 |
+
include_flashcards=True,
|
847 |
+
include_quizzes=True
|
848 |
+
)
|
849 |
+
|
850 |
+
progress(0.15, desc="🔍 Checking available providers...")
|
851 |
+
|
852 |
+
# Get available providers
|
853 |
+
available_providers = agent.get_available_providers()
|
854 |
+
if not available_providers:
|
855 |
+
return (
|
856 |
+
"<div class='error'>❌ No LLM providers available. Please check your API keys.</div>",
|
857 |
+
"", "",
|
858 |
+
gr.update(visible=False), [], "<div class='image-details'>Error loading images</div>"
|
859 |
+
)
|
860 |
+
|
861 |
+
progress(0.2, desc="🎓 Starting course generation...")
|
862 |
+
|
863 |
+
# Use the default provider from config (no need to override)
|
864 |
+
# The agent will automatically use the configured default provider
|
865 |
+
|
866 |
+
# Start course generation
|
867 |
+
lessons_html = ""
|
868 |
+
flashcards_html = ""
|
869 |
+
quizzes_html = ""
|
870 |
+
|
871 |
+
# Stream the generation process
|
872 |
+
course_data = None
|
873 |
+
current_progress = 0.2
|
874 |
+
|
875 |
+
# Add a simple counter for fallback progress
|
876 |
+
chunk_count = 0
|
877 |
+
max_expected_chunks = 10 # Rough estimate
|
878 |
+
|
879 |
+
async for chunk in agent.generate_course(topic, options):
|
880 |
+
chunk_count += 1
|
881 |
+
print(f"📊 Progress Debug: Received chunk type='{chunk.type}', content='{chunk.content}'")
|
882 |
+
|
883 |
+
# Update progress based on chunk content
|
884 |
+
if chunk.type == "progress":
|
885 |
+
# Check if the progress message matches our known steps (handle emojis)
|
886 |
+
step_found = False
|
887 |
+
progress_message = chunk.content.lower()
|
888 |
+
print(f"🔍 Checking progress message: '{progress_message}'")
|
889 |
+
|
890 |
+
if "research completed" in progress_message:
|
891 |
+
current_progress = 0.3
|
892 |
+
step_found = True
|
893 |
+
print(f"✅ Matched: Research completed -> {current_progress}")
|
894 |
+
progress(current_progress, desc="📚 Research completed, planning course structure...")
|
895 |
+
elif "course structure planned" in progress_message:
|
896 |
+
current_progress = 0.4
|
897 |
+
step_found = True
|
898 |
+
print(f"✅ Matched: Course structure planned -> {current_progress}")
|
899 |
+
progress(current_progress, desc="📝 Course structure planned, generating content...")
|
900 |
+
elif "lessons created" in progress_message:
|
901 |
+
current_progress = 0.6
|
902 |
+
step_found = True
|
903 |
+
print(f"✅ Matched: Lessons created -> {current_progress}")
|
904 |
+
progress(current_progress, desc="✍️ Lessons created, generating flashcards...")
|
905 |
+
elif "flashcards created" in progress_message:
|
906 |
+
current_progress = 0.75
|
907 |
+
step_found = True
|
908 |
+
print(f"✅ Matched: Flashcards created -> {current_progress}")
|
909 |
+
progress(current_progress, desc="🃏 Flashcards created, creating quiz...")
|
910 |
+
elif "quiz created" in progress_message:
|
911 |
+
current_progress = 0.8
|
912 |
+
step_found = True
|
913 |
+
print(f"✅ Matched: Quiz created -> {current_progress}")
|
914 |
+
progress(current_progress, desc="❓ Quiz created, generating images...")
|
915 |
+
elif "images generated" in progress_message:
|
916 |
+
current_progress = 0.9
|
917 |
+
step_found = True
|
918 |
+
print(f"✅ Matched: Images generated -> {current_progress}")
|
919 |
+
progress(current_progress, desc="🎨 Images generated, finalizing course...")
|
920 |
+
elif "finalizing course" in progress_message:
|
921 |
+
current_progress = 0.95
|
922 |
+
step_found = True
|
923 |
+
print(f"✅ Matched: Finalizing course -> {current_progress}")
|
924 |
+
progress(current_progress, desc="📦 Assembling final course data...")
|
925 |
+
|
926 |
+
if not step_found:
|
927 |
+
# Fallback: increment progress based on chunk count
|
928 |
+
fallback_progress = min(0.2 + (chunk_count / max_expected_chunks) * 0.6, 0.85)
|
929 |
+
current_progress = max(current_progress, fallback_progress)
|
930 |
+
print(f"⚠️ No match found, using fallback: {fallback_progress}")
|
931 |
+
progress(current_progress, desc=f"�� {chunk.content}")
|
932 |
+
|
933 |
+
elif chunk.type == "course_complete":
|
934 |
+
current_progress = 0.95
|
935 |
+
progress(current_progress, desc="📦 Finalizing course data...")
|
936 |
+
# Parse the complete course data
|
937 |
+
try:
|
938 |
+
course_data = json.loads(chunk.content)
|
939 |
+
except:
|
940 |
+
course_data = None
|
941 |
+
|
942 |
+
progress(0.97, desc="🎨 Processing course content...")
|
943 |
+
|
944 |
+
# If we got course data, format it nicely
|
945 |
+
if course_data:
|
946 |
+
course_context["content"] = course_data
|
947 |
+
|
948 |
+
# Format lessons
|
949 |
+
lessons_html = format_lessons(course_data.get("lessons", []))
|
950 |
+
|
951 |
+
# Format flashcards
|
952 |
+
flashcards_html = format_flashcards(course_data.get("flashcards", []))
|
953 |
+
|
954 |
+
# Format quiz
|
955 |
+
quiz_data = course_data.get("quiz", {})
|
956 |
+
quizzes_html = format_quiz(quiz_data)
|
957 |
+
|
958 |
+
# Show quiz button if quiz exists
|
959 |
+
quiz_btn_visible = bool(quiz_data and quiz_data.get("questions"))
|
960 |
+
|
961 |
+
progress(0.98, desc="🖼️ Processing images for gallery...")
|
962 |
+
|
963 |
+
# Prepare image gallery data - fix the format for Gradio Gallery
|
964 |
+
images = []
|
965 |
+
image_details_list = []
|
966 |
+
|
967 |
+
# Process images from lessons
|
968 |
+
for lesson in course_data.get("lessons", []):
|
969 |
+
lesson_images = lesson.get("images", [])
|
970 |
+
for i, img in enumerate(lesson_images):
|
971 |
+
try:
|
972 |
+
if isinstance(img, dict):
|
973 |
+
# Handle different image data formats
|
974 |
+
image_url = img.get("url") or img.get("data_url")
|
975 |
+
if image_url:
|
976 |
+
alt_text = img.get("caption", img.get("description", "Educational image"))
|
977 |
+
|
978 |
+
# Handle base64 data URLs by converting to temp files
|
979 |
+
if image_url.startswith('data:image/'):
|
980 |
+
import base64
|
981 |
+
import tempfile
|
982 |
+
import os
|
983 |
+
|
984 |
+
# Extract base64 data
|
985 |
+
header, data = image_url.split(',', 1)
|
986 |
+
image_data = base64.b64decode(data)
|
987 |
+
|
988 |
+
# Determine file extension from header
|
989 |
+
if 'jpeg' in header or 'jpg' in header:
|
990 |
+
ext = '.jpg'
|
991 |
+
elif 'png' in header:
|
992 |
+
ext = '.png'
|
993 |
+
elif 'gif' in header:
|
994 |
+
ext = '.gif'
|
995 |
+
elif 'webp' in header:
|
996 |
+
ext = '.webp'
|
997 |
+
else:
|
998 |
+
ext = '.jpg' # Default
|
999 |
+
|
1000 |
+
# Create temp file
|
1001 |
+
temp_fd, temp_path = tempfile.mkstemp(suffix=ext, prefix=f'course_img_{i}_')
|
1002 |
+
try:
|
1003 |
+
with os.fdopen(temp_fd, 'wb') as f:
|
1004 |
+
f.write(image_data)
|
1005 |
+
images.append(temp_path)
|
1006 |
+
image_details_list.append({
|
1007 |
+
"url": temp_path,
|
1008 |
+
"caption": alt_text,
|
1009 |
+
"lesson": lesson.get("title", "Unknown lesson")
|
1010 |
+
})
|
1011 |
+
except Exception as e:
|
1012 |
+
print(f"⚠️ Failed to save temp image {i}: {e}")
|
1013 |
+
os.close(temp_fd) # Close if write failed
|
1014 |
+
continue
|
1015 |
+
|
1016 |
+
elif image_url.startswith('http'):
|
1017 |
+
# Regular URL - Gradio can handle these directly
|
1018 |
+
images.append(image_url)
|
1019 |
+
image_details_list.append({
|
1020 |
+
"url": image_url,
|
1021 |
+
"caption": alt_text,
|
1022 |
+
"lesson": lesson.get("title", "Unknown lesson")
|
1023 |
+
})
|
1024 |
+
else:
|
1025 |
+
# Assume it's a file path
|
1026 |
+
if len(image_url) <= 260: # Windows path limit
|
1027 |
+
images.append(image_url)
|
1028 |
+
image_details_list.append({
|
1029 |
+
"url": image_url,
|
1030 |
+
"caption": alt_text,
|
1031 |
+
"lesson": lesson.get("title", "Unknown lesson")
|
1032 |
+
})
|
1033 |
+
else:
|
1034 |
+
print(f"⚠️ Skipping image {i}: path too long ({len(image_url)} chars)")
|
1035 |
+
elif isinstance(img, str):
|
1036 |
+
# Handle case where image is just a URL string
|
1037 |
+
if img.startswith('data:image/'):
|
1038 |
+
# Handle base64 data URLs
|
1039 |
+
import base64
|
1040 |
+
import tempfile
|
1041 |
+
import os
|
1042 |
+
|
1043 |
+
try:
|
1044 |
+
header, data = img.split(',', 1)
|
1045 |
+
image_data = base64.b64decode(data)
|
1046 |
+
|
1047 |
+
# Determine file extension from header
|
1048 |
+
if 'jpeg' in header or 'jpg' in header:
|
1049 |
+
ext = '.jpg'
|
1050 |
+
elif 'png' in header:
|
1051 |
+
ext = '.png'
|
1052 |
+
elif 'gif' in header:
|
1053 |
+
ext = '.gif'
|
1054 |
+
elif 'webp' in header:
|
1055 |
+
ext = '.webp'
|
1056 |
+
else:
|
1057 |
+
ext = '.jpg' # Default
|
1058 |
+
|
1059 |
+
# Create temp file
|
1060 |
+
temp_fd, temp_path = tempfile.mkstemp(suffix=ext, prefix=f'course_img_{i}_')
|
1061 |
+
try:
|
1062 |
+
with os.fdopen(temp_fd, 'wb') as f:
|
1063 |
+
f.write(image_data)
|
1064 |
+
images.append(temp_path)
|
1065 |
+
image_details_list.append({
|
1066 |
+
"url": temp_path,
|
1067 |
+
"caption": "Educational image",
|
1068 |
+
"lesson": lesson.get("title", "Unknown lesson")
|
1069 |
+
})
|
1070 |
+
except Exception as e:
|
1071 |
+
print(f"⚠️ Failed to save temp image {i}: {e}")
|
1072 |
+
os.close(temp_fd) # Close if write failed
|
1073 |
+
continue
|
1074 |
+
except Exception as e:
|
1075 |
+
print(f"⚠️ Error processing base64 image {i}: {e}")
|
1076 |
+
continue
|
1077 |
+
else:
|
1078 |
+
# Regular URL or file path
|
1079 |
+
images.append(img)
|
1080 |
+
image_details_list.append({
|
1081 |
+
"url": img,
|
1082 |
+
"caption": "Educational image",
|
1083 |
+
"lesson": lesson.get("title", "Unknown lesson")
|
1084 |
+
})
|
1085 |
+
except Exception as e:
|
1086 |
+
print(f"⚠️ Error processing image {i}: {e}")
|
1087 |
+
continue
|
1088 |
+
|
1089 |
+
# Also check for standalone images in course data
|
1090 |
+
standalone_images = course_data.get("images", [])
|
1091 |
+
for i, img in enumerate(standalone_images):
|
1092 |
+
try:
|
1093 |
+
if isinstance(img, dict):
|
1094 |
+
image_url = img.get("url") or img.get("data_url")
|
1095 |
+
if image_url:
|
1096 |
+
alt_text = img.get("caption", img.get("description", "Course image"))
|
1097 |
+
|
1098 |
+
# Handle base64 data URLs
|
1099 |
+
if image_url.startswith('data:image/'):
|
1100 |
+
import base64
|
1101 |
+
import tempfile
|
1102 |
+
import os
|
1103 |
+
|
1104 |
+
try:
|
1105 |
+
header, data = image_url.split(',', 1)
|
1106 |
+
image_data = base64.b64decode(data)
|
1107 |
+
|
1108 |
+
# Determine file extension from header
|
1109 |
+
if 'jpeg' in header or 'jpg' in header:
|
1110 |
+
ext = '.jpg'
|
1111 |
+
elif 'png' in header:
|
1112 |
+
ext = '.png'
|
1113 |
+
elif 'gif' in header:
|
1114 |
+
ext = '.gif'
|
1115 |
+
elif 'webp' in header:
|
1116 |
+
ext = '.webp'
|
1117 |
+
else:
|
1118 |
+
ext = '.jpg' # Default
|
1119 |
+
|
1120 |
+
# Create temp file
|
1121 |
+
temp_fd, temp_path = tempfile.mkstemp(suffix=ext, prefix=f'standalone_img_{i}_')
|
1122 |
+
try:
|
1123 |
+
with os.fdopen(temp_fd, 'wb') as f:
|
1124 |
+
f.write(image_data)
|
1125 |
+
images.append(temp_path)
|
1126 |
+
image_details_list.append({
|
1127 |
+
"url": temp_path,
|
1128 |
+
"caption": alt_text,
|
1129 |
+
"lesson": "Course Overview"
|
1130 |
+
})
|
1131 |
+
except Exception as e:
|
1132 |
+
print(f"⚠️ Failed to save temp standalone image {i}: {e}")
|
1133 |
+
os.close(temp_fd) # Close if write failed
|
1134 |
+
continue
|
1135 |
+
except Exception as e:
|
1136 |
+
print(f"⚠️ Error processing base64 standalone image {i}: {e}")
|
1137 |
+
continue
|
1138 |
+
else:
|
1139 |
+
images.append(image_url)
|
1140 |
+
image_details_list.append({
|
1141 |
+
"url": image_url,
|
1142 |
+
"caption": alt_text,
|
1143 |
+
"lesson": "Course Overview"
|
1144 |
+
})
|
1145 |
+
elif isinstance(img, str):
|
1146 |
+
if img.startswith('data:image/'):
|
1147 |
+
# Handle base64 data URLs
|
1148 |
+
import base64
|
1149 |
+
import tempfile
|
1150 |
+
import os
|
1151 |
+
|
1152 |
+
try:
|
1153 |
+
header, data = img.split(',', 1)
|
1154 |
+
image_data = base64.b64decode(data)
|
1155 |
+
|
1156 |
+
# Determine file extension from header
|
1157 |
+
if 'jpeg' in header or 'jpg' in header:
|
1158 |
+
ext = '.jpg'
|
1159 |
+
elif 'png' in header:
|
1160 |
+
ext = '.png'
|
1161 |
+
elif 'gif' in header:
|
1162 |
+
ext = '.gif'
|
1163 |
+
elif 'webp' in header:
|
1164 |
+
ext = '.webp'
|
1165 |
+
else:
|
1166 |
+
ext = '.jpg' # Default
|
1167 |
+
|
1168 |
+
# Create temp file
|
1169 |
+
temp_fd, temp_path = tempfile.mkstemp(suffix=ext, prefix=f'standalone_img_{i}_')
|
1170 |
+
try:
|
1171 |
+
with os.fdopen(temp_fd, 'wb') as f:
|
1172 |
+
f.write(image_data)
|
1173 |
+
images.append(temp_path)
|
1174 |
+
image_details_list.append({
|
1175 |
+
"url": temp_path,
|
1176 |
+
"caption": "Course image",
|
1177 |
+
"lesson": "Course Overview"
|
1178 |
+
})
|
1179 |
+
except Exception as e:
|
1180 |
+
print(f"⚠️ Failed to save temp standalone image {i}: {e}")
|
1181 |
+
os.close(temp_fd) # Close if write failed
|
1182 |
+
continue
|
1183 |
+
except Exception as e:
|
1184 |
+
print(f"⚠️ Error processing base64 standalone image {i}: {e}")
|
1185 |
+
continue
|
1186 |
+
else:
|
1187 |
+
images.append(img)
|
1188 |
+
image_details_list.append({
|
1189 |
+
"url": img,
|
1190 |
+
"caption": "Course image",
|
1191 |
+
"lesson": "Course Overview"
|
1192 |
+
})
|
1193 |
+
except Exception as e:
|
1194 |
+
print(f"⚠️ Error processing standalone image {i}: {e}")
|
1195 |
+
continue
|
1196 |
+
|
1197 |
+
print(f"📸 Prepared {len(images)} images for gallery display")
|
1198 |
+
|
1199 |
+
# Create image details HTML for display
|
1200 |
+
if image_details_list:
|
1201 |
+
image_details_html = "<div class='image-details-container'>"
|
1202 |
+
image_details_html += "<h4>🖼️ Image Gallery</h4>"
|
1203 |
+
image_details_html += f"<p>Total images: {len(image_details_list)}</p>"
|
1204 |
+
image_details_html += "<ul>"
|
1205 |
+
for i, img_detail in enumerate(image_details_list, 1):
|
1206 |
+
image_details_html += f"<li><strong>Image {i}:</strong> {img_detail['caption']} (from {img_detail['lesson']})</li>"
|
1207 |
+
image_details_html += "</ul></div>"
|
1208 |
+
else:
|
1209 |
+
image_details_html = "<div class='image-details'>No images available</div>"
|
1210 |
+
|
1211 |
+
progress(1.0, desc="✅ Course generation complete!")
|
1212 |
+
|
1213 |
+
return (
|
1214 |
+
lessons_html, flashcards_html, quizzes_html,
|
1215 |
+
gr.update(visible=quiz_btn_visible), images, image_details_html
|
1216 |
+
)
|
1217 |
+
else:
|
1218 |
+
quiz_btn_visible = False
|
1219 |
+
progress(1.0, desc="⚠️ Course generation completed with issues")
|
1220 |
+
|
1221 |
+
return (
|
1222 |
+
"", "", "",
|
1223 |
+
gr.update(visible=quiz_btn_visible), [], "<div class='image-details'>No images available</div>"
|
1224 |
+
)
|
1225 |
+
|
1226 |
+
except Exception as e:
|
1227 |
+
import traceback
|
1228 |
+
error_details = traceback.format_exc()
|
1229 |
+
print(f"Error in course generation: {error_details}")
|
1230 |
+
return (
|
1231 |
+
"", "", "",
|
1232 |
+
gr.update(visible=False), [], "<div class='image-details'>Error loading images</div>"
|
1233 |
+
)
|
1234 |
+
|
1235 |
+
def handle_quiz_submit():
|
1236 |
+
"""Handle quiz submission using client-side processing"""
|
1237 |
+
# This function will be replaced by client-side JavaScript
|
1238 |
+
return gr.update()
|
1239 |
+
|
1240 |
+
async def handle_chat(message: str, current_chat: str):
|
1241 |
+
"""Handle chat messages for answering questions about the course content"""
|
1242 |
+
if not message.strip():
|
1243 |
+
return current_chat, ""
|
1244 |
+
|
1245 |
+
if not course_context["content"] or not course_context["agent"]:
|
1246 |
+
assistant_response = "Please generate a course first before asking questions about it."
|
1247 |
+
else:
|
1248 |
+
try:
|
1249 |
+
# Get the agent and course content
|
1250 |
+
agent = course_context["agent"]
|
1251 |
+
course_data = course_context["content"]
|
1252 |
+
topic = course_context["topic"]
|
1253 |
+
|
1254 |
+
# Create context from the course content
|
1255 |
+
course_context_text = f"Course Topic: {topic}\n\n"
|
1256 |
+
|
1257 |
+
# Add lessons content
|
1258 |
+
lessons = course_data.get("lessons", [])
|
1259 |
+
for i, lesson in enumerate(lessons, 1):
|
1260 |
+
course_context_text += f"Lesson {i}: {lesson.get('title', '')}\n"
|
1261 |
+
course_context_text += f"Content: {lesson.get('content', '')[:1000]}...\n"
|
1262 |
+
if lesson.get('key_takeaways'):
|
1263 |
+
course_context_text += f"Key Takeaways: {', '.join(lesson.get('key_takeaways', []))}\n"
|
1264 |
+
course_context_text += "\n"
|
1265 |
+
|
1266 |
+
# Add flashcards context
|
1267 |
+
flashcards = course_data.get("flashcards", [])
|
1268 |
+
if flashcards:
|
1269 |
+
course_context_text += "Flashcards:\n"
|
1270 |
+
for card in flashcards[:5]: # Limit to first 5
|
1271 |
+
course_context_text += f"Q: {card.get('question', '')} A: {card.get('answer', '')}\n"
|
1272 |
+
course_context_text += "\n"
|
1273 |
+
|
1274 |
+
# Create a focused prompt for answering questions
|
1275 |
+
prompt = f"""You are a helpful course assistant. Answer the user's question about the course content below.
|
1276 |
+
|
1277 |
+
Course Content:
|
1278 |
+
{course_context_text}
|
1279 |
+
|
1280 |
+
User Question: {message}
|
1281 |
+
|
1282 |
+
Instructions:
|
1283 |
+
- Answer based ONLY on the course content provided above
|
1284 |
+
- Be helpful, clear, and educational
|
1285 |
+
- If the question is about something not covered in the course, say so politely
|
1286 |
+
- Keep responses concise but informative
|
1287 |
+
- Use a friendly, teaching tone
|
1288 |
+
|
1289 |
+
Answer:"""
|
1290 |
+
|
1291 |
+
# Use the default provider (same as course generation)
|
1292 |
+
provider = agent.default_provider
|
1293 |
+
available_providers = agent.get_available_providers()
|
1294 |
+
if provider not in available_providers:
|
1295 |
+
# Fallback to first available if default isn't available
|
1296 |
+
provider = available_providers[0] if available_providers else None
|
1297 |
+
|
1298 |
+
if provider:
|
1299 |
+
# Use the agent's LLM to get a response
|
1300 |
+
from ..agents.simple_course_agent import Message
|
1301 |
+
messages = [
|
1302 |
+
Message(role="system", content="You are a helpful course assistant that answers questions about course content."),
|
1303 |
+
Message(role="user", content=prompt)
|
1304 |
+
]
|
1305 |
+
|
1306 |
+
print(f"🤖 Chat using LLM provider: {provider}")
|
1307 |
+
assistant_response = await agent._get_llm_response(provider, messages)
|
1308 |
+
|
1309 |
+
# Clean up the response
|
1310 |
+
assistant_response = assistant_response.strip()
|
1311 |
+
if assistant_response.startswith("Answer:"):
|
1312 |
+
assistant_response = assistant_response[7:].strip()
|
1313 |
+
|
1314 |
+
else:
|
1315 |
+
assistant_response = "Sorry, no LLM providers are available to answer your question."
|
1316 |
+
|
1317 |
+
except Exception as e:
|
1318 |
+
print(f"Error in chat: {e}")
|
1319 |
+
assistant_response = "Sorry, I encountered an error while trying to answer your question. Please try again."
|
1320 |
+
|
1321 |
+
# Extract existing messages from current chat HTML
|
1322 |
+
existing_messages = ""
|
1323 |
+
if current_chat and "chat-message" in current_chat:
|
1324 |
+
# Keep existing messages
|
1325 |
+
start = current_chat.find('<div class="chat-messages"')
|
1326 |
+
if start != -1:
|
1327 |
+
end = current_chat.find('</div>', start)
|
1328 |
+
if end != -1:
|
1329 |
+
existing_content = current_chat[start:end]
|
1330 |
+
# Extract just the message divs
|
1331 |
+
import re
|
1332 |
+
messages_match = re.findall(r'<div class="chat-message.*?</div>\s*</div>', existing_content, re.DOTALL)
|
1333 |
+
existing_messages = ''.join(messages_match)
|
1334 |
+
|
1335 |
+
# Create new chat HTML with existing messages plus new ones
|
1336 |
+
new_chat = f"""
|
1337 |
+
<div class='chat-window'>
|
1338 |
+
<div class='chat-messages' id='chat-messages'>
|
1339 |
+
{existing_messages}
|
1340 |
+
<div class='chat-message user-message'>
|
1341 |
+
<div class='message-avatar'>👤</div>
|
1342 |
+
<div class='message-content'>
|
1343 |
+
<div class='message-text'>{message}</div>
|
1344 |
+
</div>
|
1345 |
+
</div>
|
1346 |
+
<div class='chat-message assistant-message'>
|
1347 |
+
<div class='message-avatar'>🤖</div>
|
1348 |
+
<div class='message-content'>
|
1349 |
+
<div class='message-text'>{assistant_response}</div>
|
1350 |
+
</div>
|
1351 |
+
</div>
|
1352 |
+
</div>
|
1353 |
+
</div>
|
1354 |
+
"""
|
1355 |
+
|
1356 |
+
return new_chat, ""
|
1357 |
+
|
1358 |
+
|
1359 |
+
|
1360 |
+
# Connect provider change event
|
1361 |
+
llm_provider.change(
|
1362 |
+
fn=on_provider_change,
|
1363 |
+
inputs=[llm_provider],
|
1364 |
+
outputs=[openai_compatible_row]
|
1365 |
+
)
|
1366 |
+
|
1367 |
+
generate_btn.click(
|
1368 |
+
fn=generate_course_wrapper,
|
1369 |
+
inputs=[topic_input, difficulty_input, lesson_count, llm_provider, api_key_input, endpoint_url_input, model_name_input],
|
1370 |
+
outputs=[
|
1371 |
+
lessons_output, flashcards_output, quizzes_output, quiz_submit_btn, image_gallery, image_details
|
1372 |
+
]
|
1373 |
+
)
|
1374 |
+
|
1375 |
+
chat_btn.click(
|
1376 |
+
fn=handle_chat,
|
1377 |
+
inputs=[chat_input, chat_display],
|
1378 |
+
outputs=[chat_display, chat_input]
|
1379 |
+
)
|
1380 |
+
|
1381 |
+
# Use a much simpler approach with direct JavaScript execution
|
1382 |
+
quiz_submit_btn.click(
|
1383 |
+
fn=None, # No Python function needed
|
1384 |
+
js="""
|
1385 |
+
function() {
|
1386 |
+
// Find all quiz questions and process them
|
1387 |
+
const questions = document.querySelectorAll('.quiz-question');
|
1388 |
+
if (questions.length === 0) {
|
1389 |
+
alert('No quiz questions found!');
|
1390 |
+
return;
|
1391 |
+
}
|
1392 |
+
|
1393 |
+
let score = 0;
|
1394 |
+
let total = questions.length;
|
1395 |
+
let hasAnswers = false;
|
1396 |
+
|
1397 |
+
questions.forEach((question, idx) => {
|
1398 |
+
const radios = question.querySelectorAll('input[type="radio"]');
|
1399 |
+
const correctAnswer = question.dataset.correct;
|
1400 |
+
const explanation = question.dataset.explanation || '';
|
1401 |
+
|
1402 |
+
let selectedRadio = null;
|
1403 |
+
radios.forEach(radio => {
|
1404 |
+
if (radio.checked) {
|
1405 |
+
selectedRadio = radio;
|
1406 |
+
hasAnswers = true;
|
1407 |
+
}
|
1408 |
+
});
|
1409 |
+
|
1410 |
+
// Create or find feedback element
|
1411 |
+
let feedback = question.querySelector('.quiz-feedback');
|
1412 |
+
if (!feedback) {
|
1413 |
+
feedback = document.createElement('div');
|
1414 |
+
feedback.className = 'quiz-feedback';
|
1415 |
+
question.appendChild(feedback);
|
1416 |
+
}
|
1417 |
+
|
1418 |
+
if (selectedRadio) {
|
1419 |
+
const userAnswer = selectedRadio.value;
|
1420 |
+
if (userAnswer === correctAnswer) {
|
1421 |
+
score++;
|
1422 |
+
feedback.innerHTML = `<div style="background: #d4edda; color: #155724; padding: 1rem; border-radius: 6px; margin-top: 1rem;">✅ <strong>Correct!</strong> ${explanation}</div>`;
|
1423 |
+
} else {
|
1424 |
+
feedback.innerHTML = `<div style="background: #f8d7da; color: #721c24; padding: 1rem; border-radius: 6px; margin-top: 1rem;">❌ <strong>Incorrect.</strong> The correct answer is <strong>${correctAnswer}</strong>. ${explanation}</div>`;
|
1425 |
+
}
|
1426 |
+
} else {
|
1427 |
+
feedback.innerHTML = `<div style="background: #fff3cd; color: #856404; padding: 1rem; border-radius: 6px; margin-top: 1rem;">⚠️ <strong>No answer selected.</strong> The correct answer is <strong>${correctAnswer}</strong>. ${explanation}</div>`;
|
1428 |
+
}
|
1429 |
+
|
1430 |
+
feedback.style.display = 'block';
|
1431 |
+
});
|
1432 |
+
|
1433 |
+
if (hasAnswers) {
|
1434 |
+
const percentage = Math.round((score / total) * 100);
|
1435 |
+
|
1436 |
+
// Create or find results container
|
1437 |
+
let resultsContainer = document.querySelector('.quiz-results');
|
1438 |
+
if (!resultsContainer) {
|
1439 |
+
resultsContainer = document.createElement('div');
|
1440 |
+
resultsContainer.className = 'quiz-results';
|
1441 |
+
resultsContainer.style.cssText = 'margin-top: 2rem; padding: 1.5rem; background: linear-gradient(135deg, #667eea, #764ba2); color: white; border-radius: 8px; text-align: center; font-size: 1.1rem;';
|
1442 |
+
document.querySelector('.quiz-container').appendChild(resultsContainer);
|
1443 |
+
}
|
1444 |
+
|
1445 |
+
let message = '';
|
1446 |
+
if (percentage >= 80) {
|
1447 |
+
message = '🎉 Excellent work!';
|
1448 |
+
} else if (percentage >= 60) {
|
1449 |
+
message = '👍 Good job!';
|
1450 |
+
} else {
|
1451 |
+
message = '📚 Keep studying!';
|
1452 |
+
}
|
1453 |
+
|
1454 |
+
resultsContainer.innerHTML = `
|
1455 |
+
<div style="font-size: 1.5rem; font-weight: bold; margin-bottom: 0.5rem;">📊 Final Score: ${score}/${total} (${percentage}%)</div>
|
1456 |
+
<p>${message}</p>
|
1457 |
+
`;
|
1458 |
+
|
1459 |
+
resultsContainer.style.display = 'block';
|
1460 |
+
resultsContainer.scrollIntoView({ behavior: 'smooth', block: 'center' });
|
1461 |
+
} else {
|
1462 |
+
alert('Please answer at least one question before submitting!');
|
1463 |
+
}
|
1464 |
+
}
|
1465 |
+
"""
|
1466 |
+
)
|
1467 |
+
|
1468 |
+
return interface
|
1469 |
+
|
1470 |
+
|
1471 |
+
def launch_app(share: bool = False, debug: bool = False) -> None:
|
1472 |
+
"""Launch the Course Creator application"""
|
1473 |
+
interface = create_coursecrafter_interface()
|
1474 |
+
interface.launch(
|
1475 |
+
share=share,
|
1476 |
+
debug=debug,
|
1477 |
+
server_name="0.0.0.0",
|
1478 |
+
server_port=7862
|
1479 |
+
)
|
1480 |
+
|
1481 |
+
|
1482 |
+
if __name__ == "__main__":
|
1483 |
+
launch_app(debug=True)
|
coursecrafter/ui/progress_tracker.py
ADDED
@@ -0,0 +1,509 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
📊 Real-time Progress Tracker for CourseCrafter AI
|
3 |
+
|
4 |
+
Advanced progress tracking with visual feedback and status updates.
|
5 |
+
"""
|
6 |
+
|
7 |
+
import gradio as gr
|
8 |
+
import asyncio
|
9 |
+
import time
|
10 |
+
from typing import Dict, List, Any, Optional, Callable, Generator
|
11 |
+
from dataclasses import dataclass
|
12 |
+
from enum import Enum
|
13 |
+
import logging
|
14 |
+
import threading
|
15 |
+
from datetime import datetime
|
16 |
+
|
17 |
+
logger = logging.getLogger(__name__)
|
18 |
+
|
19 |
+
|
20 |
+
class ProgressStatus(Enum):
|
21 |
+
"""Progress status enumeration"""
|
22 |
+
PENDING = "pending"
|
23 |
+
ACTIVE = "active"
|
24 |
+
COMPLETE = "complete"
|
25 |
+
ERROR = "error"
|
26 |
+
SKIPPED = "skipped"
|
27 |
+
|
28 |
+
|
29 |
+
@dataclass
|
30 |
+
class ProgressStep:
|
31 |
+
"""Individual progress step"""
|
32 |
+
id: str
|
33 |
+
name: str
|
34 |
+
description: str
|
35 |
+
emoji: str
|
36 |
+
status: ProgressStatus = ProgressStatus.PENDING
|
37 |
+
progress: float = 0.0
|
38 |
+
start_time: Optional[datetime] = None
|
39 |
+
end_time: Optional[datetime] = None
|
40 |
+
error_message: Optional[str] = None
|
41 |
+
substeps: List['ProgressStep'] = None
|
42 |
+
|
43 |
+
def __post_init__(self):
|
44 |
+
if self.substeps is None:
|
45 |
+
self.substeps = []
|
46 |
+
|
47 |
+
|
48 |
+
class RealTimeProgressTracker:
|
49 |
+
"""Real-time progress tracking with visual updates"""
|
50 |
+
|
51 |
+
def __init__(self):
|
52 |
+
self.steps: List[ProgressStep] = []
|
53 |
+
self.current_step_index = 0
|
54 |
+
self.overall_progress = 0.0
|
55 |
+
self.is_running = False
|
56 |
+
self.start_time: Optional[datetime] = None
|
57 |
+
self.end_time: Optional[datetime] = None
|
58 |
+
self.log_entries: List[str] = []
|
59 |
+
self.update_callbacks: List[Callable] = []
|
60 |
+
|
61 |
+
# Initialize default steps
|
62 |
+
self._initialize_default_steps()
|
63 |
+
|
64 |
+
def _initialize_default_steps(self):
|
65 |
+
"""Initialize the default course generation steps"""
|
66 |
+
|
67 |
+
self.steps = [
|
68 |
+
ProgressStep(
|
69 |
+
id="research",
|
70 |
+
name="Research & Analysis",
|
71 |
+
description="Gathering information and analyzing the topic",
|
72 |
+
emoji="🔍",
|
73 |
+
substeps=[
|
74 |
+
ProgressStep("research_web", "Web Search", "Searching for relevant information", "🌐"),
|
75 |
+
ProgressStep("research_academic", "Academic Sources", "Finding scholarly content", "📚"),
|
76 |
+
ProgressStep("research_analysis", "Content Analysis", "Analyzing gathered information", "🧠"),
|
77 |
+
]
|
78 |
+
),
|
79 |
+
ProgressStep(
|
80 |
+
id="planning",
|
81 |
+
name="Course Planning",
|
82 |
+
description="Creating the course structure and outline",
|
83 |
+
emoji="📋",
|
84 |
+
substeps=[
|
85 |
+
ProgressStep("plan_structure", "Course Structure", "Designing lesson flow", "🏗️"),
|
86 |
+
ProgressStep("plan_objectives", "Learning Objectives", "Defining learning goals", "🎯"),
|
87 |
+
ProgressStep("plan_assessment", "Assessment Strategy", "Planning quizzes and activities", "📝"),
|
88 |
+
]
|
89 |
+
),
|
90 |
+
ProgressStep(
|
91 |
+
id="content",
|
92 |
+
name="Content Generation",
|
93 |
+
description="Creating engaging lesson content",
|
94 |
+
emoji="✍️",
|
95 |
+
substeps=[
|
96 |
+
ProgressStep("content_lessons", "Lesson Content", "Writing lesson materials", "📖"),
|
97 |
+
ProgressStep("content_examples", "Examples & Exercises", "Creating practical examples", "💡"),
|
98 |
+
ProgressStep("content_review", "Content Review", "Reviewing and refining content", "🔍"),
|
99 |
+
]
|
100 |
+
),
|
101 |
+
ProgressStep(
|
102 |
+
id="assessments",
|
103 |
+
name="Assessment Creation",
|
104 |
+
description="Generating quizzes and flashcards",
|
105 |
+
emoji="🎯",
|
106 |
+
substeps=[
|
107 |
+
ProgressStep("assess_flashcards", "Flashcards", "Creating study flashcards", "🃏"),
|
108 |
+
ProgressStep("assess_quizzes", "Quizzes", "Generating quiz questions", "❓"),
|
109 |
+
ProgressStep("assess_validation", "Validation", "Validating assessment quality", "✅"),
|
110 |
+
]
|
111 |
+
),
|
112 |
+
ProgressStep(
|
113 |
+
id="images",
|
114 |
+
name="Visual Content",
|
115 |
+
description="Generating images and diagrams",
|
116 |
+
emoji="🎨",
|
117 |
+
substeps=[
|
118 |
+
ProgressStep("images_cover", "Course Cover", "Creating course cover image", "🖼️"),
|
119 |
+
ProgressStep("images_lessons", "Lesson Images", "Generating lesson illustrations", "🎭"),
|
120 |
+
ProgressStep("images_diagrams", "Diagrams", "Creating concept diagrams", "📊"),
|
121 |
+
]
|
122 |
+
),
|
123 |
+
ProgressStep(
|
124 |
+
id="finalize",
|
125 |
+
name="Finalization",
|
126 |
+
description="Packaging and finalizing the course",
|
127 |
+
emoji="📦",
|
128 |
+
substeps=[
|
129 |
+
ProgressStep("final_package", "Course Package", "Assembling course materials", "📁"),
|
130 |
+
ProgressStep("final_metadata", "Metadata", "Adding course metadata", "🏷️"),
|
131 |
+
ProgressStep("final_validation", "Final Validation", "Final quality check", "🔍"),
|
132 |
+
]
|
133 |
+
)
|
134 |
+
]
|
135 |
+
|
136 |
+
def start_generation(self) -> None:
|
137 |
+
"""Start the course generation process"""
|
138 |
+
|
139 |
+
self.is_running = True
|
140 |
+
self.start_time = datetime.now()
|
141 |
+
self.current_step_index = 0
|
142 |
+
self.overall_progress = 0.0
|
143 |
+
self.log_entries = []
|
144 |
+
|
145 |
+
self.add_log_entry("🚀 Starting course generation...")
|
146 |
+
self.add_log_entry(f"⏰ Started at {self.start_time.strftime('%H:%M:%S')}")
|
147 |
+
|
148 |
+
def update_step_progress(self, step_id: str, progress: float, message: str = "") -> None:
|
149 |
+
"""Update progress for a specific step"""
|
150 |
+
|
151 |
+
step = self._find_step_by_id(step_id)
|
152 |
+
if not step:
|
153 |
+
logger.warning(f"Step {step_id} not found")
|
154 |
+
return
|
155 |
+
|
156 |
+
step.progress = min(100.0, max(0.0, progress))
|
157 |
+
|
158 |
+
if step.status == ProgressStatus.PENDING and progress > 0:
|
159 |
+
step.status = ProgressStatus.ACTIVE
|
160 |
+
step.start_time = datetime.now()
|
161 |
+
self.add_log_entry(f"{step.emoji} Started: {step.name}")
|
162 |
+
|
163 |
+
if progress >= 100.0:
|
164 |
+
step.status = ProgressStatus.COMPLETE
|
165 |
+
step.end_time = datetime.now()
|
166 |
+
duration = (step.end_time - step.start_time).total_seconds() if step.start_time else 0
|
167 |
+
self.add_log_entry(f"✅ Completed: {step.name} ({duration:.1f}s)")
|
168 |
+
|
169 |
+
if message:
|
170 |
+
self.add_log_entry(f" {message}")
|
171 |
+
|
172 |
+
self._update_overall_progress()
|
173 |
+
self._notify_callbacks()
|
174 |
+
|
175 |
+
def update_substep_progress(self, step_id: str, substep_id: str, progress: float, message: str = "") -> None:
|
176 |
+
"""Update progress for a substep"""
|
177 |
+
|
178 |
+
step = self._find_step_by_id(step_id)
|
179 |
+
if not step:
|
180 |
+
return
|
181 |
+
|
182 |
+
substep = self._find_substep_by_id(step, substep_id)
|
183 |
+
if not substep:
|
184 |
+
return
|
185 |
+
|
186 |
+
substep.progress = min(100.0, max(0.0, progress))
|
187 |
+
|
188 |
+
if substep.status == ProgressStatus.PENDING and progress > 0:
|
189 |
+
substep.status = ProgressStatus.ACTIVE
|
190 |
+
substep.start_time = datetime.now()
|
191 |
+
self.add_log_entry(f" {substep.emoji} {substep.name}...")
|
192 |
+
|
193 |
+
if progress >= 100.0:
|
194 |
+
substep.status = ProgressStatus.COMPLETE
|
195 |
+
substep.end_time = datetime.now()
|
196 |
+
|
197 |
+
if message:
|
198 |
+
self.add_log_entry(f" {message}")
|
199 |
+
|
200 |
+
# Update parent step progress based on substeps
|
201 |
+
self._update_step_from_substeps(step)
|
202 |
+
self._notify_callbacks()
|
203 |
+
|
204 |
+
def mark_step_error(self, step_id: str, error_message: str) -> None:
|
205 |
+
"""Mark a step as having an error"""
|
206 |
+
|
207 |
+
step = self._find_step_by_id(step_id)
|
208 |
+
if not step:
|
209 |
+
return
|
210 |
+
|
211 |
+
step.status = ProgressStatus.ERROR
|
212 |
+
step.error_message = error_message
|
213 |
+
step.end_time = datetime.now()
|
214 |
+
|
215 |
+
self.add_log_entry(f"❌ Error in {step.name}: {error_message}")
|
216 |
+
self._notify_callbacks()
|
217 |
+
|
218 |
+
def skip_step(self, step_id: str, reason: str = "") -> None:
|
219 |
+
"""Skip a step"""
|
220 |
+
|
221 |
+
step = self._find_step_by_id(step_id)
|
222 |
+
if not step:
|
223 |
+
return
|
224 |
+
|
225 |
+
step.status = ProgressStatus.SKIPPED
|
226 |
+
step.progress = 100.0
|
227 |
+
step.end_time = datetime.now()
|
228 |
+
|
229 |
+
skip_msg = f"⏭️ Skipped: {step.name}"
|
230 |
+
if reason:
|
231 |
+
skip_msg += f" ({reason})"
|
232 |
+
|
233 |
+
self.add_log_entry(skip_msg)
|
234 |
+
self._update_overall_progress()
|
235 |
+
self._notify_callbacks()
|
236 |
+
|
237 |
+
def complete_generation(self, success: bool = True) -> None:
|
238 |
+
"""Complete the course generation process"""
|
239 |
+
|
240 |
+
self.is_running = False
|
241 |
+
self.end_time = datetime.now()
|
242 |
+
|
243 |
+
if success:
|
244 |
+
self.overall_progress = 100.0
|
245 |
+
total_time = (self.end_time - self.start_time).total_seconds() if self.start_time else 0
|
246 |
+
self.add_log_entry(f"🎉 Course generation completed successfully!")
|
247 |
+
self.add_log_entry(f"⏱️ Total time: {total_time:.1f} seconds")
|
248 |
+
else:
|
249 |
+
self.add_log_entry("❌ Course generation failed")
|
250 |
+
|
251 |
+
self._notify_callbacks()
|
252 |
+
|
253 |
+
def add_log_entry(self, message: str) -> None:
|
254 |
+
"""Add an entry to the progress log"""
|
255 |
+
|
256 |
+
timestamp = datetime.now().strftime("%H:%M:%S")
|
257 |
+
log_entry = f"[{timestamp}] {message}"
|
258 |
+
self.log_entries.append(log_entry)
|
259 |
+
|
260 |
+
# Keep only last 100 entries
|
261 |
+
if len(self.log_entries) > 100:
|
262 |
+
self.log_entries = self.log_entries[-100:]
|
263 |
+
|
264 |
+
def get_progress_display(self) -> Dict[str, Any]:
|
265 |
+
"""Get current progress display data"""
|
266 |
+
|
267 |
+
return {
|
268 |
+
"overall_progress": self.overall_progress,
|
269 |
+
"current_step": self.get_current_step_display(),
|
270 |
+
"step_indicators": self.get_step_indicators(),
|
271 |
+
"progress_log": "\n".join(self.log_entries[-20:]), # Last 20 entries
|
272 |
+
"is_running": self.is_running,
|
273 |
+
"elapsed_time": self.get_elapsed_time()
|
274 |
+
}
|
275 |
+
|
276 |
+
def get_current_step_display(self) -> str:
|
277 |
+
"""Get current step display HTML"""
|
278 |
+
|
279 |
+
if not self.is_running:
|
280 |
+
if self.overall_progress >= 100:
|
281 |
+
return """
|
282 |
+
<div class='step-indicator complete animate-bounce'>
|
283 |
+
<div class='step-icon'>🎉</div>
|
284 |
+
<div class='step-text'>Course Generation Complete!</div>
|
285 |
+
<div class='step-message'>Your course is ready to explore</div>
|
286 |
+
</div>
|
287 |
+
"""
|
288 |
+
else:
|
289 |
+
return """
|
290 |
+
<div class='step-indicator pending'>
|
291 |
+
<div class='step-icon'>🚀</div>
|
292 |
+
<div class='step-text'>Ready to Generate Course</div>
|
293 |
+
<div class='step-message'>Click the generate button to begin</div>
|
294 |
+
</div>
|
295 |
+
"""
|
296 |
+
|
297 |
+
current_step = self._get_current_active_step()
|
298 |
+
if not current_step:
|
299 |
+
return "<div class='step-indicator'>Processing...</div>"
|
300 |
+
|
301 |
+
status_class = current_step.status.value
|
302 |
+
|
303 |
+
return f"""
|
304 |
+
<div class='step-indicator {status_class} animate-pulse'>
|
305 |
+
<div class='step-icon'>{current_step.emoji}</div>
|
306 |
+
<div class='step-text'>{current_step.name}</div>
|
307 |
+
<div class='step-message'>{current_step.description}</div>
|
308 |
+
<div class='step-progress'>
|
309 |
+
<div class='progress-bar'>
|
310 |
+
<div class='progress-fill' style='width: {current_step.progress}%'></div>
|
311 |
+
</div>
|
312 |
+
<div class='progress-text'>{current_step.progress:.0f}%</div>
|
313 |
+
</div>
|
314 |
+
</div>
|
315 |
+
"""
|
316 |
+
|
317 |
+
def get_step_indicators(self) -> str:
|
318 |
+
"""Get step indicators HTML"""
|
319 |
+
|
320 |
+
indicators_html = "<div class='status-grid'>"
|
321 |
+
|
322 |
+
for step in self.steps:
|
323 |
+
status_class = step.status.value
|
324 |
+
|
325 |
+
indicators_html += f"""
|
326 |
+
<div class='status-item {status_class}' title='{step.description}'>
|
327 |
+
<div class='status-icon'>{step.emoji}</div>
|
328 |
+
<div class='status-name'>{step.name}</div>
|
329 |
+
<div class='status-progress'>{step.progress:.0f}%</div>
|
330 |
+
</div>
|
331 |
+
"""
|
332 |
+
|
333 |
+
indicators_html += "</div>"
|
334 |
+
return indicators_html
|
335 |
+
|
336 |
+
def get_elapsed_time(self) -> str:
|
337 |
+
"""Get elapsed time string"""
|
338 |
+
|
339 |
+
if not self.start_time:
|
340 |
+
return "00:00"
|
341 |
+
|
342 |
+
end_time = self.end_time or datetime.now()
|
343 |
+
elapsed = (end_time - self.start_time).total_seconds()
|
344 |
+
|
345 |
+
minutes = int(elapsed // 60)
|
346 |
+
seconds = int(elapsed % 60)
|
347 |
+
|
348 |
+
return f"{minutes:02d}:{seconds:02d}"
|
349 |
+
|
350 |
+
def add_update_callback(self, callback: Callable) -> None:
|
351 |
+
"""Add a callback for progress updates"""
|
352 |
+
self.update_callbacks.append(callback)
|
353 |
+
|
354 |
+
def _find_step_by_id(self, step_id: str) -> Optional[ProgressStep]:
|
355 |
+
"""Find a step by ID"""
|
356 |
+
for step in self.steps:
|
357 |
+
if step.id == step_id:
|
358 |
+
return step
|
359 |
+
return None
|
360 |
+
|
361 |
+
def _find_substep_by_id(self, step: ProgressStep, substep_id: str) -> Optional[ProgressStep]:
|
362 |
+
"""Find a substep by ID"""
|
363 |
+
for substep in step.substeps:
|
364 |
+
if substep.id == substep_id:
|
365 |
+
return substep
|
366 |
+
return None
|
367 |
+
|
368 |
+
def _update_step_from_substeps(self, step: ProgressStep) -> None:
|
369 |
+
"""Update step progress based on substeps"""
|
370 |
+
|
371 |
+
if not step.substeps:
|
372 |
+
return
|
373 |
+
|
374 |
+
total_progress = sum(substep.progress for substep in step.substeps)
|
375 |
+
step.progress = total_progress / len(step.substeps)
|
376 |
+
|
377 |
+
# Update step status based on substeps
|
378 |
+
if all(substep.status == ProgressStatus.COMPLETE for substep in step.substeps):
|
379 |
+
step.status = ProgressStatus.COMPLETE
|
380 |
+
if not step.end_time:
|
381 |
+
step.end_time = datetime.now()
|
382 |
+
elif any(substep.status == ProgressStatus.ACTIVE for substep in step.substeps):
|
383 |
+
if step.status == ProgressStatus.PENDING:
|
384 |
+
step.status = ProgressStatus.ACTIVE
|
385 |
+
step.start_time = datetime.now()
|
386 |
+
elif any(substep.status == ProgressStatus.ERROR for substep in step.substeps):
|
387 |
+
step.status = ProgressStatus.ERROR
|
388 |
+
|
389 |
+
def _update_overall_progress(self) -> None:
|
390 |
+
"""Update overall progress based on all steps"""
|
391 |
+
|
392 |
+
total_progress = sum(step.progress for step in self.steps)
|
393 |
+
self.overall_progress = total_progress / len(self.steps) if self.steps else 0.0
|
394 |
+
|
395 |
+
def _get_current_active_step(self) -> Optional[ProgressStep]:
|
396 |
+
"""Get the currently active step"""
|
397 |
+
|
398 |
+
for step in self.steps:
|
399 |
+
if step.status == ProgressStatus.ACTIVE:
|
400 |
+
return step
|
401 |
+
|
402 |
+
# If no active step, return the first pending step
|
403 |
+
for step in self.steps:
|
404 |
+
if step.status == ProgressStatus.PENDING:
|
405 |
+
return step
|
406 |
+
|
407 |
+
return None
|
408 |
+
|
409 |
+
def _notify_callbacks(self) -> None:
|
410 |
+
"""Notify all registered callbacks"""
|
411 |
+
|
412 |
+
for callback in self.update_callbacks:
|
413 |
+
try:
|
414 |
+
callback()
|
415 |
+
except Exception as e:
|
416 |
+
logger.error(f"Error in progress callback: {e}")
|
417 |
+
|
418 |
+
|
419 |
+
class ProgressSimulator:
|
420 |
+
"""Simulates realistic progress for demonstration"""
|
421 |
+
|
422 |
+
def __init__(self, tracker: RealTimeProgressTracker):
|
423 |
+
self.tracker = tracker
|
424 |
+
self.is_running = False
|
425 |
+
|
426 |
+
async def simulate_course_generation(self) -> None:
|
427 |
+
"""Simulate a realistic course generation process"""
|
428 |
+
|
429 |
+
self.is_running = True
|
430 |
+
self.tracker.start_generation()
|
431 |
+
|
432 |
+
try:
|
433 |
+
# Research phase
|
434 |
+
await self._simulate_step("research", [
|
435 |
+
("research_web", "Searching web for relevant content...", 3.0),
|
436 |
+
("research_academic", "Finding academic sources...", 2.0),
|
437 |
+
("research_analysis", "Analyzing gathered information...", 2.5),
|
438 |
+
])
|
439 |
+
|
440 |
+
# Planning phase
|
441 |
+
await self._simulate_step("planning", [
|
442 |
+
("plan_structure", "Designing course structure...", 2.0),
|
443 |
+
("plan_objectives", "Defining learning objectives...", 1.5),
|
444 |
+
("plan_assessment", "Planning assessments...", 1.0),
|
445 |
+
])
|
446 |
+
|
447 |
+
# Content generation phase
|
448 |
+
await self._simulate_step("content", [
|
449 |
+
("content_lessons", "Generating lesson content...", 4.0),
|
450 |
+
("content_examples", "Creating examples and exercises...", 3.0),
|
451 |
+
("content_review", "Reviewing content quality...", 1.5),
|
452 |
+
])
|
453 |
+
|
454 |
+
# Assessment creation phase
|
455 |
+
await self._simulate_step("assessments", [
|
456 |
+
("assess_flashcards", "Creating flashcards...", 2.0),
|
457 |
+
("assess_quizzes", "Generating quiz questions...", 2.5),
|
458 |
+
("assess_validation", "Validating assessments...", 1.0),
|
459 |
+
])
|
460 |
+
|
461 |
+
# Image generation phase
|
462 |
+
await self._simulate_step("images", [
|
463 |
+
("images_cover", "Creating course cover image...", 3.0),
|
464 |
+
("images_lessons", "Generating lesson illustrations...", 4.0),
|
465 |
+
("images_diagrams", "Creating concept diagrams...", 2.0),
|
466 |
+
])
|
467 |
+
|
468 |
+
# Finalization phase
|
469 |
+
await self._simulate_step("finalize", [
|
470 |
+
("final_package", "Packaging course materials...", 1.5),
|
471 |
+
("final_metadata", "Adding metadata...", 0.5),
|
472 |
+
("final_validation", "Final quality check...", 1.0),
|
473 |
+
])
|
474 |
+
|
475 |
+
self.tracker.complete_generation(success=True)
|
476 |
+
|
477 |
+
except Exception as e:
|
478 |
+
self.tracker.add_log_entry(f"❌ Simulation error: {str(e)}")
|
479 |
+
self.tracker.complete_generation(success=False)
|
480 |
+
|
481 |
+
finally:
|
482 |
+
self.is_running = False
|
483 |
+
|
484 |
+
async def _simulate_step(self, step_id: str, substeps: List[tuple]) -> None:
|
485 |
+
"""Simulate a step with substeps"""
|
486 |
+
|
487 |
+
for substep_id, message, duration in substeps:
|
488 |
+
if not self.is_running:
|
489 |
+
break
|
490 |
+
|
491 |
+
self.tracker.add_log_entry(f"Starting {message}")
|
492 |
+
|
493 |
+
# Simulate gradual progress
|
494 |
+
steps = 20
|
495 |
+
for i in range(steps + 1):
|
496 |
+
if not self.is_running:
|
497 |
+
break
|
498 |
+
|
499 |
+
progress = (i / steps) * 100
|
500 |
+
self.tracker.update_substep_progress(step_id, substep_id, progress)
|
501 |
+
|
502 |
+
await asyncio.sleep(duration / steps)
|
503 |
+
|
504 |
+
# Add some realistic variation
|
505 |
+
await asyncio.sleep(0.2)
|
506 |
+
|
507 |
+
def stop_simulation(self) -> None:
|
508 |
+
"""Stop the simulation"""
|
509 |
+
self.is_running = False
|
coursecrafter/ui/styling.py
ADDED
@@ -0,0 +1,451 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
🎨 Styling and CSS for CourseCrafter UI
|
3 |
+
|
4 |
+
Custom styling for Gradio components.
|
5 |
+
"""
|
6 |
+
|
7 |
+
def get_custom_css() -> str:
|
8 |
+
"""Get custom CSS for the CourseCrafter interface"""
|
9 |
+
|
10 |
+
return """
|
11 |
+
/* CourseCrafter AI Custom Styling - Dark Theme */
|
12 |
+
|
13 |
+
/* Global dark theme overrides */
|
14 |
+
.gradio-container {
|
15 |
+
background: #1a1a2e !important;
|
16 |
+
color: #e0e7ff !important;
|
17 |
+
}
|
18 |
+
|
19 |
+
.header-container {
|
20 |
+
text-align: center;
|
21 |
+
padding: 2rem;
|
22 |
+
background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
|
23 |
+
color: white;
|
24 |
+
border-radius: 10px;
|
25 |
+
margin-bottom: 2rem;
|
26 |
+
box-shadow: 0 4px 8px rgba(0,0,0,0.3);
|
27 |
+
}
|
28 |
+
|
29 |
+
.header-container h1 {
|
30 |
+
font-size: 2.5rem;
|
31 |
+
margin-bottom: 0.5rem;
|
32 |
+
font-weight: bold;
|
33 |
+
text-shadow: 0 2px 4px rgba(0,0,0,0.3);
|
34 |
+
}
|
35 |
+
|
36 |
+
.header-container p {
|
37 |
+
font-size: 1.1rem;
|
38 |
+
opacity: 0.9;
|
39 |
+
}
|
40 |
+
|
41 |
+
.progress-container {
|
42 |
+
padding: 1rem;
|
43 |
+
background: #2d2d54;
|
44 |
+
border-radius: 8px;
|
45 |
+
border-left: 4px solid #667eea;
|
46 |
+
margin: 1rem 0;
|
47 |
+
color: #e0e7ff;
|
48 |
+
}
|
49 |
+
|
50 |
+
.success {
|
51 |
+
color: #28a745 !important;
|
52 |
+
font-weight: bold;
|
53 |
+
padding: 0.5rem;
|
54 |
+
background: #1e4d2b;
|
55 |
+
border-radius: 4px;
|
56 |
+
border: 1px solid #28a745;
|
57 |
+
}
|
58 |
+
|
59 |
+
.error {
|
60 |
+
color: #dc3545 !important;
|
61 |
+
font-weight: bold;
|
62 |
+
padding: 0.5rem;
|
63 |
+
background: #4d1e1e;
|
64 |
+
border-radius: 4px;
|
65 |
+
border: 1px solid #dc3545;
|
66 |
+
}
|
67 |
+
|
68 |
+
.warning {
|
69 |
+
color: #ffc107 !important;
|
70 |
+
font-weight: bold;
|
71 |
+
padding: 0.5rem;
|
72 |
+
background: #4d3d1e;
|
73 |
+
border-radius: 4px;
|
74 |
+
border: 1px solid #ffc107;
|
75 |
+
}
|
76 |
+
|
77 |
+
.progress {
|
78 |
+
color: #17a2b8 !important;
|
79 |
+
font-weight: bold;
|
80 |
+
padding: 0.5rem;
|
81 |
+
background: #1e3d4d;
|
82 |
+
border-radius: 4px;
|
83 |
+
border: 1px solid #17a2b8;
|
84 |
+
}
|
85 |
+
|
86 |
+
.flashcard {
|
87 |
+
background: #fff;
|
88 |
+
border: 1px solid #dee2e6;
|
89 |
+
border-radius: 8px;
|
90 |
+
padding: 1rem;
|
91 |
+
margin: 0.5rem 0;
|
92 |
+
box-shadow: 0 2px 4px rgba(0,0,0,0.1);
|
93 |
+
}
|
94 |
+
|
95 |
+
.quiz {
|
96 |
+
background: #e3f2fd;
|
97 |
+
border: 1px solid #bbdefb;
|
98 |
+
border-radius: 8px;
|
99 |
+
padding: 1rem;
|
100 |
+
margin: 0.5rem 0;
|
101 |
+
}
|
102 |
+
|
103 |
+
/* Dark theme form inputs */
|
104 |
+
.gradio-textbox, .gradio-dropdown, .gradio-slider {
|
105 |
+
background: #2d2d54 !important;
|
106 |
+
border: 2px solid #4a4a7a !important;
|
107 |
+
border-radius: 8px !important;
|
108 |
+
color: #e0e7ff !important;
|
109 |
+
transition: border-color 0.3s ease !important;
|
110 |
+
}
|
111 |
+
|
112 |
+
.gradio-textbox:focus, .gradio-dropdown:focus {
|
113 |
+
border-color: #667eea !important;
|
114 |
+
box-shadow: 0 0 0 0.2rem rgba(102, 126, 234, 0.25) !important;
|
115 |
+
background: #3a3a6b !important;
|
116 |
+
}
|
117 |
+
|
118 |
+
.gradio-textbox::placeholder {
|
119 |
+
color: #8b9dc3 !important;
|
120 |
+
}
|
121 |
+
|
122 |
+
/* Button styling */
|
123 |
+
.gradio-button {
|
124 |
+
background: linear-gradient(135deg, #667eea 0%, #764ba2 100%) !important;
|
125 |
+
border: none !important;
|
126 |
+
border-radius: 8px !important;
|
127 |
+
color: white !important;
|
128 |
+
font-weight: bold !important;
|
129 |
+
transition: all 0.3s ease !important;
|
130 |
+
box-shadow: 0 4px 8px rgba(0,0,0,0.3) !important;
|
131 |
+
}
|
132 |
+
|
133 |
+
.gradio-button:hover {
|
134 |
+
transform: translateY(-2px) !important;
|
135 |
+
box-shadow: 0 6px 12px rgba(0,0,0,0.4) !important;
|
136 |
+
}
|
137 |
+
|
138 |
+
/* Ensure all buttons use the same blue theme */
|
139 |
+
button {
|
140 |
+
background: linear-gradient(135deg, #667eea 0%, #764ba2 100%) !important;
|
141 |
+
border: none !important;
|
142 |
+
border-radius: 8px !important;
|
143 |
+
color: white !important;
|
144 |
+
font-weight: bold !important;
|
145 |
+
transition: all 0.3s ease !important;
|
146 |
+
box-shadow: 0 4px 8px rgba(0,0,0,0.3) !important;
|
147 |
+
}
|
148 |
+
|
149 |
+
button:hover {
|
150 |
+
transform: translateY(-2px) !important;
|
151 |
+
box-shadow: 0 6px 12px rgba(0,0,0,0.4) !important;
|
152 |
+
}
|
153 |
+
|
154 |
+
/* Tab styling */
|
155 |
+
.gradio-tabs {
|
156 |
+
border-radius: 8px !important;
|
157 |
+
overflow: hidden !important;
|
158 |
+
background: #2d2d54 !important;
|
159 |
+
}
|
160 |
+
|
161 |
+
.gradio-tab {
|
162 |
+
background: #3a3a6b !important;
|
163 |
+
border: 1px solid #4a4a7a !important;
|
164 |
+
padding: 0.75rem 1.5rem !important;
|
165 |
+
font-weight: 500 !important;
|
166 |
+
color: #b8c5d6 !important;
|
167 |
+
transition: all 0.3s ease !important;
|
168 |
+
}
|
169 |
+
|
170 |
+
.gradio-tab.selected {
|
171 |
+
background: #667eea !important;
|
172 |
+
color: white !important;
|
173 |
+
border-color: #667eea !important;
|
174 |
+
}
|
175 |
+
|
176 |
+
.gradio-tab:hover:not(.selected) {
|
177 |
+
background: #4a4a7a !important;
|
178 |
+
color: #e0e7ff !important;
|
179 |
+
}
|
180 |
+
|
181 |
+
/* Labels and text */
|
182 |
+
label {
|
183 |
+
color: #e0e7ff !important;
|
184 |
+
font-weight: 500 !important;
|
185 |
+
}
|
186 |
+
|
187 |
+
/* Slider styling */
|
188 |
+
.gradio-slider input[type="range"] {
|
189 |
+
background: #4a4a7a !important;
|
190 |
+
}
|
191 |
+
|
192 |
+
.gradio-slider input[type="range"]::-webkit-slider-thumb {
|
193 |
+
background: #667eea !important;
|
194 |
+
}
|
195 |
+
|
196 |
+
.gradio-slider input[type="range"]::-moz-range-thumb {
|
197 |
+
background: #667eea !important;
|
198 |
+
}
|
199 |
+
|
200 |
+
/* Course content styling */
|
201 |
+
.lessons-container {
|
202 |
+
max-height: 600px;
|
203 |
+
overflow-y: auto;
|
204 |
+
padding: 1rem;
|
205 |
+
}
|
206 |
+
|
207 |
+
.lesson-card {
|
208 |
+
background: white;
|
209 |
+
border: 1px solid #dee2e6;
|
210 |
+
border-radius: 8px;
|
211 |
+
padding: 1.5rem;
|
212 |
+
margin: 1rem 0;
|
213 |
+
box-shadow: 0 2px 4px rgba(0,0,0,0.1);
|
214 |
+
}
|
215 |
+
|
216 |
+
.lesson-card h3 {
|
217 |
+
color: #667eea;
|
218 |
+
margin-bottom: 1rem;
|
219 |
+
}
|
220 |
+
|
221 |
+
.flashcards-container {
|
222 |
+
max-height: none !important;
|
223 |
+
overflow: visible !important;
|
224 |
+
padding: 1rem;
|
225 |
+
}
|
226 |
+
|
227 |
+
.quiz-container {
|
228 |
+
max-height: none !important;
|
229 |
+
overflow: visible !important;
|
230 |
+
padding: 1rem;
|
231 |
+
}
|
232 |
+
|
233 |
+
/* Chat interface styling - Dark Theme */
|
234 |
+
.chat-window {
|
235 |
+
background: #2d2d54 !important;
|
236 |
+
border-radius: 12px !important;
|
237 |
+
padding: 1rem !important;
|
238 |
+
margin: 1rem 0 !important;
|
239 |
+
border: 1px solid #4a4a7a !important;
|
240 |
+
}
|
241 |
+
|
242 |
+
.chat-messages {
|
243 |
+
max-height: 400px !important;
|
244 |
+
overflow-y: auto !important;
|
245 |
+
padding: 0.5rem !important;
|
246 |
+
}
|
247 |
+
|
248 |
+
.chat-message {
|
249 |
+
margin: 1rem 0 !important;
|
250 |
+
display: flex !important;
|
251 |
+
align-items: flex-start !important;
|
252 |
+
gap: 0.75rem !important;
|
253 |
+
}
|
254 |
+
|
255 |
+
.chat-message.user-message {
|
256 |
+
flex-direction: row-reverse !important;
|
257 |
+
}
|
258 |
+
|
259 |
+
.chat-message.assistant-message {
|
260 |
+
flex-direction: row !important;
|
261 |
+
}
|
262 |
+
|
263 |
+
.message-avatar {
|
264 |
+
width: 40px !important;
|
265 |
+
height: 40px !important;
|
266 |
+
border-radius: 50% !important;
|
267 |
+
background: #667eea !important;
|
268 |
+
display: flex !important;
|
269 |
+
align-items: center !important;
|
270 |
+
justify-content: center !important;
|
271 |
+
font-size: 1.2rem !important;
|
272 |
+
flex-shrink: 0 !important;
|
273 |
+
}
|
274 |
+
|
275 |
+
.user-message .message-avatar {
|
276 |
+
background: #764ba2 !important;
|
277 |
+
}
|
278 |
+
|
279 |
+
.message-content {
|
280 |
+
flex: 1 !important;
|
281 |
+
max-width: 70% !important;
|
282 |
+
}
|
283 |
+
|
284 |
+
.message-text {
|
285 |
+
background: #3a3a6b !important;
|
286 |
+
color: #e0e7ff !important;
|
287 |
+
padding: 0.75rem 1rem !important;
|
288 |
+
border-radius: 12px !important;
|
289 |
+
line-height: 1.5 !important;
|
290 |
+
word-wrap: break-word !important;
|
291 |
+
}
|
292 |
+
|
293 |
+
.user-message .message-text {
|
294 |
+
background: #667eea !important;
|
295 |
+
color: white !important;
|
296 |
+
}
|
297 |
+
|
298 |
+
.assistant-message .message-text {
|
299 |
+
background: #4a4a7a !important;
|
300 |
+
color: #e0e7ff !important;
|
301 |
+
}
|
302 |
+
|
303 |
+
.info {
|
304 |
+
background: #2d3748 !important;
|
305 |
+
border: 1px solid #4a5568 !important;
|
306 |
+
border-radius: 8px !important;
|
307 |
+
padding: 1rem !important;
|
308 |
+
color: #a0aec0 !important;
|
309 |
+
text-align: center !important;
|
310 |
+
font-style: italic !important;
|
311 |
+
}
|
312 |
+
|
313 |
+
/* Note: Flashcard styling is now handled inline in the format_flashcards function */
|
314 |
+
|
315 |
+
/* Note: Quiz styling is now handled inline in the format_quiz function */
|
316 |
+
|
317 |
+
/* Note: Lesson styling is now handled inline in the format_lessons function */
|
318 |
+
|
319 |
+
/* Enhanced Course Header Styling */
|
320 |
+
.course-header {
|
321 |
+
background: linear-gradient(135deg, #667eea 0%, #764ba2 100%) !important;
|
322 |
+
color: white !important;
|
323 |
+
padding: 2rem !important;
|
324 |
+
border-radius: 12px !important;
|
325 |
+
margin: 1rem 0 !important;
|
326 |
+
box-shadow: 0 4px 8px rgba(0,0,0,0.3) !important;
|
327 |
+
text-align: center !important;
|
328 |
+
}
|
329 |
+
|
330 |
+
.course-header h1 {
|
331 |
+
font-size: 2.2rem !important;
|
332 |
+
margin-bottom: 1rem !important;
|
333 |
+
font-weight: bold !important;
|
334 |
+
text-shadow: 0 2px 4px rgba(0,0,0,0.3) !important;
|
335 |
+
color: white !important;
|
336 |
+
}
|
337 |
+
|
338 |
+
.course-stats {
|
339 |
+
display: flex !important;
|
340 |
+
justify-content: center !important;
|
341 |
+
gap: 1.5rem !important;
|
342 |
+
flex-wrap: wrap !important;
|
343 |
+
margin-top: 1rem !important;
|
344 |
+
}
|
345 |
+
|
346 |
+
.course-stats .stat {
|
347 |
+
background: rgba(255,255,255,0.2) !important;
|
348 |
+
padding: 0.5rem 1rem !important;
|
349 |
+
border-radius: 20px !important;
|
350 |
+
font-size: 0.9rem !important;
|
351 |
+
font-weight: 500 !important;
|
352 |
+
color: white !important;
|
353 |
+
backdrop-filter: blur(10px) !important;
|
354 |
+
}
|
355 |
+
|
356 |
+
/* Progress Tracker Styling */
|
357 |
+
.step-indicator {
|
358 |
+
background: #2d2d54 !important;
|
359 |
+
border: 2px solid #4a4a7a !important;
|
360 |
+
border-radius: 12px !important;
|
361 |
+
padding: 1.5rem !important;
|
362 |
+
margin: 1rem 0 !important;
|
363 |
+
color: #e0e7ff !important;
|
364 |
+
text-align: center !important;
|
365 |
+
}
|
366 |
+
|
367 |
+
.step-indicator.active {
|
368 |
+
border-color: #667eea !important;
|
369 |
+
background: #3a3a6b !important;
|
370 |
+
}
|
371 |
+
|
372 |
+
.step-indicator.complete {
|
373 |
+
border-color: #28a745 !important;
|
374 |
+
background: #1e4d2b !important;
|
375 |
+
color: #28a745 !important;
|
376 |
+
}
|
377 |
+
|
378 |
+
.step-icon {
|
379 |
+
font-size: 2rem !important;
|
380 |
+
margin-bottom: 0.5rem !important;
|
381 |
+
}
|
382 |
+
|
383 |
+
.step-text {
|
384 |
+
font-size: 1.2rem !important;
|
385 |
+
font-weight: bold !important;
|
386 |
+
margin-bottom: 0.5rem !important;
|
387 |
+
}
|
388 |
+
|
389 |
+
.step-message {
|
390 |
+
font-size: 0.9rem !important;
|
391 |
+
opacity: 0.8 !important;
|
392 |
+
}
|
393 |
+
|
394 |
+
/* Note: All component-specific styling is now handled inline for better dark theme support */
|
395 |
+
|
396 |
+
/* Responsive design */
|
397 |
+
@media (max-width: 768px) {
|
398 |
+
.header-container h1 {
|
399 |
+
font-size: 2rem;
|
400 |
+
}
|
401 |
+
|
402 |
+
.header-container p {
|
403 |
+
font-size: 1rem;
|
404 |
+
}
|
405 |
+
|
406 |
+
.lesson-card {
|
407 |
+
padding: 1rem;
|
408 |
+
}
|
409 |
+
}
|
410 |
+
"""
|
411 |
+
|
412 |
+
|
413 |
+
def get_theme_colors() -> dict:
|
414 |
+
"""Get theme color palette"""
|
415 |
+
|
416 |
+
return {
|
417 |
+
"primary": "#d068a5",
|
418 |
+
"secondary": "#764ba2",
|
419 |
+
"success": "#37cb5a",
|
420 |
+
"error": "#dc3545",
|
421 |
+
"warning": "#eec54c",
|
422 |
+
"info": "#09a6be",
|
423 |
+
"light": "#f5f7e6",
|
424 |
+
"dark": "#343a40"
|
425 |
+
}
|
426 |
+
|
427 |
+
|
428 |
+
def get_component_styles() -> dict:
|
429 |
+
"""Get component-specific styles"""
|
430 |
+
|
431 |
+
return {
|
432 |
+
"button_primary": {
|
433 |
+
"background": "linear-gradient(135deg, #667eea 0%, #764ba2 100%)",
|
434 |
+
"color": "white",
|
435 |
+
"border": "none",
|
436 |
+
"border_radius": "8px",
|
437 |
+
"font_weight": "bold"
|
438 |
+
},
|
439 |
+
"card": {
|
440 |
+
"background": "white",
|
441 |
+
"border": "1px solid #dee2e6",
|
442 |
+
"border_radius": "8px",
|
443 |
+
"padding": "1rem",
|
444 |
+
"box_shadow": "0 2px 4px rgba(0,0,0,0.1)"
|
445 |
+
},
|
446 |
+
"progress_bar": {
|
447 |
+
"background": "#d1f0e7",
|
448 |
+
"border_radius": "4px",
|
449 |
+
"height": "8px"
|
450 |
+
}
|
451 |
+
}
|
coursecrafter/utils/__init__.py
ADDED
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
🛠️ Utilities Package for Course Creator AI
|
3 |
+
"""
|
4 |
+
|
5 |
+
from .config import Config
|
6 |
+
|
7 |
+
from .helpers import (
|
8 |
+
generate_id, clean_text, truncate_text, extract_keywords,
|
9 |
+
format_duration, estimate_reading_time, safe_json_loads,
|
10 |
+
safe_json_dumps, merge_dicts, flatten_list, chunk_list,
|
11 |
+
deduplicate_list, validate_email, sanitize_filename,
|
12 |
+
calculate_similarity, format_file_size, Timer, RateLimiter
|
13 |
+
|
14 |
+
)
|
15 |
+
|
16 |
+
__all__ = [
|
17 |
+
"Config",
|
18 |
+
# Helper functions
|
19 |
+
"generate_id",
|
20 |
+
"clean_text",
|
21 |
+
"truncate_text",
|
22 |
+
"extract_keywords",
|
23 |
+
"format_duration",
|
24 |
+
"estimate_reading_time",
|
25 |
+
"safe_json_loads",
|
26 |
+
"safe_json_dumps",
|
27 |
+
"merge_dicts",
|
28 |
+
"flatten_list",
|
29 |
+
"chunk_list",
|
30 |
+
"deduplicate_list",
|
31 |
+
"validate_email",
|
32 |
+
"sanitize_filename",
|
33 |
+
"calculate_similarity",
|
34 |
+
"format_file_size",
|
35 |
+
"Timer",
|
36 |
+
"RateLimiter"
|
37 |
+
]
|
coursecrafter/utils/__pycache__/__init__.cpython-311.pyc
ADDED
Binary file (1.13 kB). View file
|
|
coursecrafter/utils/__pycache__/config.cpython-311.pyc
ADDED
Binary file (13.2 kB). View file
|
|
coursecrafter/utils/__pycache__/export.cpython-311.pyc
ADDED
Binary file (26.6 kB). View file
|
|
coursecrafter/utils/__pycache__/helpers.cpython-311.pyc
ADDED
Binary file (23.9 kB). View file
|
|
coursecrafter/utils/config.py
ADDED
@@ -0,0 +1,261 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
⚙️ Configuration Management for CourseCrafter AI
|
3 |
+
|
4 |
+
Centralized configuration system with environment variable support and validation.
|
5 |
+
"""
|
6 |
+
|
7 |
+
import os
|
8 |
+
import json
|
9 |
+
from typing import Dict, Any, Optional, List
|
10 |
+
from dataclasses import dataclass, field
|
11 |
+
from pathlib import Path
|
12 |
+
from dotenv import load_dotenv
|
13 |
+
|
14 |
+
from ..types import LLMProvider
|
15 |
+
|
16 |
+
|
17 |
+
@dataclass
|
18 |
+
class LLMProviderConfig:
|
19 |
+
"""Configuration for a specific LLM provider"""
|
20 |
+
api_key: str
|
21 |
+
model: str
|
22 |
+
temperature: float = 0.7
|
23 |
+
max_tokens: Optional[int] = None
|
24 |
+
timeout: int = 60
|
25 |
+
base_url: Optional[str] = None
|
26 |
+
|
27 |
+
class Config:
|
28 |
+
"""
|
29 |
+
Centralized configuration management for Course Creator AI
|
30 |
+
|
31 |
+
Handles environment variables, API keys, and URL configurations.
|
32 |
+
"""
|
33 |
+
|
34 |
+
def __init__(self):
|
35 |
+
# Load environment variables
|
36 |
+
load_dotenv()
|
37 |
+
|
38 |
+
# Initialize configuration
|
39 |
+
self._config = self._load_default_config()
|
40 |
+
self._validate_config()
|
41 |
+
|
42 |
+
def _load_default_config(self) -> Dict[str, Any]:
|
43 |
+
"""Load default configuration with environment variable overrides"""
|
44 |
+
# Get default model from env or fallback
|
45 |
+
default_model = os.getenv("DEFAULT_MODEL", "gpt-4.1-nano")
|
46 |
+
|
47 |
+
# Get default LLM provider from env or fallback to first available
|
48 |
+
default_llm_provider = os.getenv("DEFAULT_LLM_PROVIDER", "openai")
|
49 |
+
|
50 |
+
return {
|
51 |
+
# LLM Provider Configurations
|
52 |
+
"llm_providers": {
|
53 |
+
"openai": {
|
54 |
+
"api_key": os.getenv("OPENAI_API_KEY", ""),
|
55 |
+
"model": os.getenv("OPENAI_MODEL", default_model),
|
56 |
+
"temperature": float(os.getenv("OPENAI_TEMPERATURE", "0.7")),
|
57 |
+
"max_tokens": int(os.getenv("OPENAI_MAX_TOKENS", "20000")) if os.getenv("OPENAI_MAX_TOKENS") else None,
|
58 |
+
"timeout": int(os.getenv("OPENAI_TIMEOUT", "60"))
|
59 |
+
},
|
60 |
+
"anthropic": {
|
61 |
+
"api_key": os.getenv("ANTHROPIC_API_KEY", ""),
|
62 |
+
"model": os.getenv("ANTHROPIC_MODEL", "claude-3-5-sonnet-20241022"),
|
63 |
+
"temperature": float(os.getenv("ANTHROPIC_TEMPERATURE", "0.7")),
|
64 |
+
"max_tokens": int(os.getenv("ANTHROPIC_MAX_TOKENS", "20000")) if os.getenv("ANTHROPIC_MAX_TOKENS") else None,
|
65 |
+
"timeout": int(os.getenv("ANTHROPIC_TIMEOUT", "60"))
|
66 |
+
},
|
67 |
+
"google": {
|
68 |
+
"api_key": os.getenv("GOOGLE_API_KEY", ""),
|
69 |
+
"model": os.getenv("GOOGLE_MODEL", "gemini-2.0-flash"),
|
70 |
+
"temperature": float(os.getenv("GOOGLE_TEMPERATURE", "0.7")),
|
71 |
+
"max_tokens": int(os.getenv("GOOGLE_MAX_TOKENS", "20000")) if os.getenv("GOOGLE_MAX_TOKENS") else None,
|
72 |
+
"timeout": int(os.getenv("GOOGLE_TIMEOUT", "60"))
|
73 |
+
},
|
74 |
+
"openai_compatible": {
|
75 |
+
"api_key": os.getenv("OPENAI_COMPATIBLE_API_KEY", "dummy"),
|
76 |
+
"base_url": os.getenv("OPENAI_COMPATIBLE_BASE_URL", ""),
|
77 |
+
"model": os.getenv("OPENAI_COMPATIBLE_MODEL", ""),
|
78 |
+
"temperature": float(os.getenv("OPENAI_COMPATIBLE_TEMPERATURE", "0.7")),
|
79 |
+
"max_tokens": int(os.getenv("OPENAI_COMPATIBLE_MAX_TOKENS", "20000")) if os.getenv("OPENAI_COMPATIBLE_MAX_TOKENS") else None,
|
80 |
+
"timeout": int(os.getenv("OPENAI_COMPATIBLE_TIMEOUT", "60"))
|
81 |
+
}
|
82 |
+
},
|
83 |
+
|
84 |
+
# Course Generation Settings
|
85 |
+
"course_generation": {
|
86 |
+
"default_difficulty": "beginner",
|
87 |
+
"default_lesson_count": 5,
|
88 |
+
"max_lesson_duration": 30,
|
89 |
+
"include_images": True,
|
90 |
+
"include_flashcards": True,
|
91 |
+
"include_quizzes": True,
|
92 |
+
"research_depth": "comprehensive"
|
93 |
+
},
|
94 |
+
|
95 |
+
# Image Generation Settings
|
96 |
+
"image_generation": {
|
97 |
+
"pollinations_api_token": os.getenv("POLLINATIONS_API_TOKEN", ""),
|
98 |
+
"pollinations_api_reference": os.getenv("POLLINATIONS_API_REFERENCE", ""),
|
99 |
+
"default_width": 1280,
|
100 |
+
"default_height": 720,
|
101 |
+
"default_model": "gptimage",
|
102 |
+
"enhance_prompts": True,
|
103 |
+
"no_logo": True
|
104 |
+
},
|
105 |
+
|
106 |
+
# Export Settings
|
107 |
+
"export": {
|
108 |
+
"default_formats": ["pdf", "markdown"],
|
109 |
+
"output_directory": os.getenv("COURSECRAFTER_OUTPUT_DIR", "./output"),
|
110 |
+
"max_file_size": 50 * 1024 * 1024, # 50MB
|
111 |
+
"compression": True
|
112 |
+
},
|
113 |
+
|
114 |
+
# UI Settings
|
115 |
+
"ui": {
|
116 |
+
"theme": "soft",
|
117 |
+
"show_progress": True,
|
118 |
+
"auto_scroll": True,
|
119 |
+
"max_concurrent_generations": 3
|
120 |
+
},
|
121 |
+
|
122 |
+
# System Settings
|
123 |
+
"system": {
|
124 |
+
"default_llm_provider": default_llm_provider,
|
125 |
+
"max_turns": 25,
|
126 |
+
"timeout": 300, # 5 minutes
|
127 |
+
"retry_attempts": 3,
|
128 |
+
"log_level": os.getenv("LOG_LEVEL", "INFO"),
|
129 |
+
"debug_mode": os.getenv("DEBUG", "false").lower() == "true"
|
130 |
+
}
|
131 |
+
}
|
132 |
+
|
133 |
+
def _validate_config(self):
|
134 |
+
"""Validate configuration and warn about missing required settings"""
|
135 |
+
warnings = []
|
136 |
+
|
137 |
+
# Check LLM provider API keys
|
138 |
+
for provider, config in self._config["llm_providers"].items():
|
139 |
+
if provider == "openai_compatible":
|
140 |
+
# For openai_compatible, check for base_url instead of api_key
|
141 |
+
if not config.get("base_url"):
|
142 |
+
warnings.append(f"Missing base_url for {provider}")
|
143 |
+
else:
|
144 |
+
# For other providers, check for api_key
|
145 |
+
if not config["api_key"]:
|
146 |
+
warnings.append(f"Missing API key for {provider}")
|
147 |
+
|
148 |
+
# Check if at least one LLM provider is configured
|
149 |
+
has_provider = False
|
150 |
+
for provider, config in self._config["llm_providers"].items():
|
151 |
+
if provider == "openai_compatible":
|
152 |
+
if config.get("base_url"):
|
153 |
+
has_provider = True
|
154 |
+
break
|
155 |
+
else:
|
156 |
+
if config["api_key"]:
|
157 |
+
has_provider = True
|
158 |
+
break
|
159 |
+
|
160 |
+
if not has_provider:
|
161 |
+
# Only warn instead of raising error - allows app to start for UI configuration
|
162 |
+
print("⚠️ Warning: No LLM providers configured. Please configure at least one provider in the UI.")
|
163 |
+
|
164 |
+
def get_llm_config(self, provider: LLMProvider) -> LLMProviderConfig:
|
165 |
+
"""Get configuration for a specific LLM provider"""
|
166 |
+
if provider not in self._config["llm_providers"]:
|
167 |
+
raise ValueError(f"Unknown LLM provider: {provider}")
|
168 |
+
|
169 |
+
config = self._config["llm_providers"][provider]
|
170 |
+
return LLMProviderConfig(**config)
|
171 |
+
|
172 |
+
def get_available_llm_providers(self) -> List[LLMProvider]:
|
173 |
+
"""Get list of available LLM providers with API keys"""
|
174 |
+
available = []
|
175 |
+
for provider, config in self._config["llm_providers"].items():
|
176 |
+
if provider == "openai_compatible":
|
177 |
+
# For openai_compatible, require base_url instead of api_key
|
178 |
+
if config.get("base_url"):
|
179 |
+
available.append(provider)
|
180 |
+
else:
|
181 |
+
# For other providers, require api_key
|
182 |
+
if config["api_key"]:
|
183 |
+
available.append(provider)
|
184 |
+
return available
|
185 |
+
|
186 |
+
def get_default_llm_provider(self) -> LLMProvider:
|
187 |
+
"""Get the default LLM provider, falling back to first available if not configured"""
|
188 |
+
default_provider = self._config["system"]["default_llm_provider"]
|
189 |
+
available_providers = self.get_available_llm_providers()
|
190 |
+
|
191 |
+
# If the default provider is available, use it
|
192 |
+
if default_provider in available_providers:
|
193 |
+
return default_provider
|
194 |
+
|
195 |
+
# Otherwise, use the first available provider
|
196 |
+
if available_providers:
|
197 |
+
print(f"⚠️ Default provider '{default_provider}' not configured, using '{available_providers[0]}'")
|
198 |
+
return available_providers[0]
|
199 |
+
|
200 |
+
# If no providers are available, raise an error
|
201 |
+
raise ValueError("No LLM providers are configured. Please set up at least one provider.")
|
202 |
+
|
203 |
+
def get_image_generation_config(self) -> Dict[str, Any]:
|
204 |
+
"""Get image generation configuration"""
|
205 |
+
return self._config["image_generation"]
|
206 |
+
|
207 |
+
def get(self, key: str, default: Any = None) -> Any:
|
208 |
+
"""Get a configuration value using dot notation"""
|
209 |
+
keys = key.split(".")
|
210 |
+
value = self._config
|
211 |
+
|
212 |
+
try:
|
213 |
+
for k in keys:
|
214 |
+
value = value[k]
|
215 |
+
return value
|
216 |
+
except (KeyError, TypeError):
|
217 |
+
return default
|
218 |
+
|
219 |
+
def set(self, key: str, value: Any):
|
220 |
+
"""Set a configuration value using dot notation"""
|
221 |
+
keys = key.split(".")
|
222 |
+
config = self._config
|
223 |
+
|
224 |
+
for k in keys[:-1]:
|
225 |
+
if k not in config:
|
226 |
+
config[k] = {}
|
227 |
+
config = config[k]
|
228 |
+
|
229 |
+
config[keys[-1]] = value
|
230 |
+
|
231 |
+
def update_llm_provider(self, provider: LLMProvider, **kwargs):
|
232 |
+
"""Update LLM provider configuration"""
|
233 |
+
if provider not in self._config["llm_providers"]:
|
234 |
+
raise ValueError(f"Unknown LLM provider: {provider}")
|
235 |
+
|
236 |
+
self._config["llm_providers"][provider].update(kwargs)
|
237 |
+
|
238 |
+
def to_dict(self) -> Dict[str, Any]:
|
239 |
+
"""Convert configuration to dictionary"""
|
240 |
+
return self._config.copy()
|
241 |
+
|
242 |
+
def save_to_file(self, filepath: str):
|
243 |
+
"""Save configuration to JSON file"""
|
244 |
+
with open(filepath, 'w') as f:
|
245 |
+
json.dump(self._config, f, indent=2)
|
246 |
+
|
247 |
+
@classmethod
|
248 |
+
def load_from_file(cls, filepath: str) -> 'Config':
|
249 |
+
"""Load configuration from JSON file"""
|
250 |
+
instance = cls()
|
251 |
+
|
252 |
+
if os.path.exists(filepath):
|
253 |
+
with open(filepath, 'r') as f:
|
254 |
+
file_config = json.load(f)
|
255 |
+
instance._config.update(file_config)
|
256 |
+
|
257 |
+
instance._validate_config()
|
258 |
+
return instance
|
259 |
+
|
260 |
+
# Global configuration instance
|
261 |
+
config = Config()
|
coursecrafter/utils/helpers.py
ADDED
@@ -0,0 +1,475 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
🛠️ Helper Utilities for CourseCrafter AI
|
3 |
+
|
4 |
+
Common utility functions and helpers used throughout the application.
|
5 |
+
"""
|
6 |
+
|
7 |
+
import re
|
8 |
+
import json
|
9 |
+
import hashlib
|
10 |
+
import asyncio
|
11 |
+
from typing import Any, Dict, List, Optional, Union, Callable
|
12 |
+
from datetime import datetime, timedelta
|
13 |
+
import logging
|
14 |
+
|
15 |
+
|
16 |
+
def generate_id(prefix: str = "", length: int = 8) -> str:
|
17 |
+
"""Generate a unique ID with optional prefix"""
|
18 |
+
timestamp = str(int(datetime.now().timestamp() * 1000))
|
19 |
+
hash_obj = hashlib.md5(timestamp.encode())
|
20 |
+
unique_id = hash_obj.hexdigest()[:length]
|
21 |
+
|
22 |
+
if prefix:
|
23 |
+
return f"{prefix}-{unique_id}"
|
24 |
+
return unique_id
|
25 |
+
|
26 |
+
|
27 |
+
def clean_text(text: str) -> str:
|
28 |
+
"""Clean and normalize text content"""
|
29 |
+
if not text:
|
30 |
+
return ""
|
31 |
+
|
32 |
+
# Remove extra whitespace
|
33 |
+
text = re.sub(r'\s+', ' ', text.strip())
|
34 |
+
|
35 |
+
# Remove control characters
|
36 |
+
text = re.sub(r'[\x00-\x08\x0b\x0c\x0e-\x1f\x7f]', '', text)
|
37 |
+
|
38 |
+
# Normalize quotes
|
39 |
+
text = text.replace('"', '"').replace('"', '"')
|
40 |
+
text = text.replace(''', "'").replace(''', "'")
|
41 |
+
|
42 |
+
return text
|
43 |
+
|
44 |
+
|
45 |
+
def truncate_text(text: str, max_length: int = 100, suffix: str = "...") -> str:
|
46 |
+
"""Truncate text to specified length with suffix"""
|
47 |
+
if not text or len(text) <= max_length:
|
48 |
+
return text
|
49 |
+
|
50 |
+
# Try to break at word boundary
|
51 |
+
truncated = text[:max_length - len(suffix)]
|
52 |
+
last_space = truncated.rfind(' ')
|
53 |
+
|
54 |
+
if last_space > max_length * 0.7: # If we can break at a reasonable word boundary
|
55 |
+
truncated = truncated[:last_space]
|
56 |
+
|
57 |
+
return truncated + suffix
|
58 |
+
|
59 |
+
|
60 |
+
def extract_keywords(text: str, max_keywords: int = 10) -> List[str]:
|
61 |
+
"""Extract keywords from text using simple frequency analysis"""
|
62 |
+
if not text:
|
63 |
+
return []
|
64 |
+
|
65 |
+
# Clean text and convert to lowercase
|
66 |
+
clean = re.sub(r'[^\w\s]', ' ', text.lower())
|
67 |
+
words = clean.split()
|
68 |
+
|
69 |
+
# Filter out common stop words
|
70 |
+
stop_words = {
|
71 |
+
'the', 'a', 'an', 'and', 'or', 'but', 'in', 'on', 'at', 'to', 'for',
|
72 |
+
'of', 'with', 'by', 'is', 'are', 'was', 'were', 'be', 'been', 'have',
|
73 |
+
'has', 'had', 'do', 'does', 'did', 'will', 'would', 'could', 'should',
|
74 |
+
'this', 'that', 'these', 'those', 'i', 'you', 'he', 'she', 'it', 'we',
|
75 |
+
'they', 'me', 'him', 'her', 'us', 'them', 'my', 'your', 'his', 'its',
|
76 |
+
'our', 'their', 'can', 'may', 'might', 'must', 'shall', 'from', 'up',
|
77 |
+
'out', 'down', 'off', 'over', 'under', 'again', 'further', 'then',
|
78 |
+
'once', 'here', 'there', 'when', 'where', 'why', 'how', 'all', 'any',
|
79 |
+
'both', 'each', 'few', 'more', 'most', 'other', 'some', 'such', 'no',
|
80 |
+
'nor', 'not', 'only', 'own', 'same', 'so', 'than', 'too', 'very'
|
81 |
+
}
|
82 |
+
|
83 |
+
# Filter words and count frequency
|
84 |
+
filtered_words = [word for word in words if len(word) > 2 and word not in stop_words]
|
85 |
+
word_freq = {}
|
86 |
+
|
87 |
+
for word in filtered_words:
|
88 |
+
word_freq[word] = word_freq.get(word, 0) + 1
|
89 |
+
|
90 |
+
# Sort by frequency and return top keywords
|
91 |
+
sorted_words = sorted(word_freq.items(), key=lambda x: x[1], reverse=True)
|
92 |
+
return [word for word, freq in sorted_words[:max_keywords]]
|
93 |
+
|
94 |
+
|
95 |
+
def format_duration(seconds: int) -> str:
|
96 |
+
"""Format duration in seconds to human-readable string"""
|
97 |
+
if seconds < 60:
|
98 |
+
return f"{seconds} seconds"
|
99 |
+
elif seconds < 3600:
|
100 |
+
minutes = seconds // 60
|
101 |
+
remaining_seconds = seconds % 60
|
102 |
+
if remaining_seconds == 0:
|
103 |
+
return f"{minutes} minutes"
|
104 |
+
return f"{minutes} minutes {remaining_seconds} seconds"
|
105 |
+
else:
|
106 |
+
hours = seconds // 3600
|
107 |
+
remaining_minutes = (seconds % 3600) // 60
|
108 |
+
if remaining_minutes == 0:
|
109 |
+
return f"{hours} hours"
|
110 |
+
return f"{hours} hours {remaining_minutes} minutes"
|
111 |
+
|
112 |
+
|
113 |
+
def estimate_reading_time(text: str, words_per_minute: int = 200) -> int:
|
114 |
+
"""Estimate reading time in minutes for given text"""
|
115 |
+
if not text:
|
116 |
+
return 0
|
117 |
+
|
118 |
+
word_count = len(text.split())
|
119 |
+
minutes = max(1, round(word_count / words_per_minute))
|
120 |
+
return minutes
|
121 |
+
|
122 |
+
|
123 |
+
def safe_json_loads(json_str: str, default: Any = None) -> Any:
|
124 |
+
"""Safely parse JSON string with fallback"""
|
125 |
+
try:
|
126 |
+
return json.loads(json_str)
|
127 |
+
except (json.JSONDecodeError, TypeError):
|
128 |
+
return default
|
129 |
+
|
130 |
+
|
131 |
+
def extract_json_from_response(response_text: str) -> str:
|
132 |
+
"""
|
133 |
+
Extract JSON from LLM response that might be wrapped in markdown code blocks.
|
134 |
+
|
135 |
+
Handles cases like:
|
136 |
+
- Plain JSON: {"key": "value"}
|
137 |
+
- Markdown wrapped: ```json\n{"key": "value"}\n```
|
138 |
+
- Mixed content: Some text\n```json\n{"key": "value"}\n```\nMore text
|
139 |
+
"""
|
140 |
+
if not response_text:
|
141 |
+
return ""
|
142 |
+
|
143 |
+
# First, try to find JSON in markdown code blocks
|
144 |
+
import re
|
145 |
+
|
146 |
+
# Pattern to match ```json ... ``` or ``` ... ``` blocks
|
147 |
+
json_block_patterns = [
|
148 |
+
r'```json\s*\n(.*?)\n```', # ```json ... ```
|
149 |
+
r'```\s*\n(.*?)\n```', # ``` ... ```
|
150 |
+
r'`(.*?)`', # Single backticks (less common)
|
151 |
+
]
|
152 |
+
|
153 |
+
for pattern in json_block_patterns:
|
154 |
+
matches = re.findall(pattern, response_text, re.DOTALL | re.IGNORECASE)
|
155 |
+
for match in matches:
|
156 |
+
# Try to parse each match as JSON
|
157 |
+
try:
|
158 |
+
json.loads(match.strip())
|
159 |
+
return match.strip() # Return the first valid JSON found
|
160 |
+
except json.JSONDecodeError:
|
161 |
+
continue
|
162 |
+
|
163 |
+
# If no markdown blocks found, try to extract JSON from the response
|
164 |
+
# Look for content between first { and last }
|
165 |
+
first_brace = response_text.find('{')
|
166 |
+
last_brace = response_text.rfind('}')
|
167 |
+
|
168 |
+
if first_brace != -1 and last_brace != -1 and last_brace > first_brace:
|
169 |
+
potential_json = response_text[first_brace:last_brace + 1]
|
170 |
+
try:
|
171 |
+
json.loads(potential_json)
|
172 |
+
return potential_json
|
173 |
+
except json.JSONDecodeError:
|
174 |
+
pass
|
175 |
+
|
176 |
+
# Look for content between first [ and last ] (for arrays)
|
177 |
+
first_bracket = response_text.find('[')
|
178 |
+
last_bracket = response_text.rfind(']')
|
179 |
+
|
180 |
+
if first_bracket != -1 and last_bracket != -1 and last_bracket > first_bracket:
|
181 |
+
potential_json = response_text[first_bracket:last_bracket + 1]
|
182 |
+
try:
|
183 |
+
json.loads(potential_json)
|
184 |
+
return potential_json
|
185 |
+
except json.JSONDecodeError:
|
186 |
+
pass
|
187 |
+
|
188 |
+
# If all else fails, return the original response
|
189 |
+
return response_text.strip()
|
190 |
+
|
191 |
+
|
192 |
+
def smart_json_loads(response_text: str, default: Any = None) -> Any:
|
193 |
+
"""
|
194 |
+
Smart JSON parser that handles markdown-wrapped JSON and other common LLM response formats.
|
195 |
+
|
196 |
+
This function:
|
197 |
+
1. Extracts JSON from markdown code blocks
|
198 |
+
2. Handles mixed content responses
|
199 |
+
3. Provides fallback for malformed JSON
|
200 |
+
4. Logs parsing attempts for debugging
|
201 |
+
"""
|
202 |
+
if not response_text:
|
203 |
+
return default
|
204 |
+
|
205 |
+
# Extract potential JSON from the response
|
206 |
+
json_text = extract_json_from_response(response_text)
|
207 |
+
|
208 |
+
# Try to parse the extracted JSON
|
209 |
+
try:
|
210 |
+
result = json.loads(json_text)
|
211 |
+
return result
|
212 |
+
except json.JSONDecodeError as e:
|
213 |
+
# Log the parsing failure for debugging
|
214 |
+
print(f"🔍 JSON parsing failed: {e}")
|
215 |
+
print(f"📝 Original response length: {len(response_text)} chars")
|
216 |
+
print(f"📝 Extracted JSON length: {len(json_text)} chars")
|
217 |
+
print(f"📝 First 200 chars of original: {response_text[:200]}...")
|
218 |
+
print(f"📝 First 200 chars of extracted: {json_text[:200]}...")
|
219 |
+
return default
|
220 |
+
except Exception as e:
|
221 |
+
print(f"❌ Unexpected error in JSON parsing: {e}")
|
222 |
+
return default
|
223 |
+
|
224 |
+
|
225 |
+
def safe_json_dumps(obj: Any, default: Any = None) -> str:
|
226 |
+
"""Safely serialize object to JSON string"""
|
227 |
+
try:
|
228 |
+
return json.dumps(obj, default=str, ensure_ascii=False, indent=2)
|
229 |
+
except (TypeError, ValueError):
|
230 |
+
return json.dumps(default or {})
|
231 |
+
|
232 |
+
|
233 |
+
def merge_dicts(*dicts: Dict[str, Any]) -> Dict[str, Any]:
|
234 |
+
"""Merge multiple dictionaries, with later ones taking precedence"""
|
235 |
+
result = {}
|
236 |
+
for d in dicts:
|
237 |
+
if isinstance(d, dict):
|
238 |
+
result.update(d)
|
239 |
+
return result
|
240 |
+
|
241 |
+
|
242 |
+
def flatten_list(nested_list: List[Any]) -> List[Any]:
|
243 |
+
"""Flatten a nested list structure"""
|
244 |
+
result = []
|
245 |
+
for item in nested_list:
|
246 |
+
if isinstance(item, list):
|
247 |
+
result.extend(flatten_list(item))
|
248 |
+
else:
|
249 |
+
result.append(item)
|
250 |
+
return result
|
251 |
+
|
252 |
+
|
253 |
+
def chunk_list(lst: List[Any], chunk_size: int) -> List[List[Any]]:
|
254 |
+
"""Split a list into chunks of specified size"""
|
255 |
+
return [lst[i:i + chunk_size] for i in range(0, len(lst), chunk_size)]
|
256 |
+
|
257 |
+
|
258 |
+
def deduplicate_list(lst: List[Any], key_func: Optional[Callable] = None) -> List[Any]:
|
259 |
+
"""Remove duplicates from list while preserving order"""
|
260 |
+
if key_func is None:
|
261 |
+
seen = set()
|
262 |
+
result = []
|
263 |
+
for item in lst:
|
264 |
+
if item not in seen:
|
265 |
+
seen.add(item)
|
266 |
+
result.append(item)
|
267 |
+
return result
|
268 |
+
else:
|
269 |
+
seen = set()
|
270 |
+
result = []
|
271 |
+
for item in lst:
|
272 |
+
key = key_func(item)
|
273 |
+
if key not in seen:
|
274 |
+
seen.add(key)
|
275 |
+
result.append(item)
|
276 |
+
return result
|
277 |
+
|
278 |
+
|
279 |
+
def retry_async(max_attempts: int = 3, delay: float = 1.0, backoff: float = 2.0):
|
280 |
+
"""Decorator for retrying async functions with exponential backoff"""
|
281 |
+
def decorator(func: Callable) -> Callable:
|
282 |
+
async def wrapper(*args, **kwargs):
|
283 |
+
last_exception = None
|
284 |
+
current_delay = delay
|
285 |
+
|
286 |
+
for attempt in range(max_attempts):
|
287 |
+
try:
|
288 |
+
return await func(*args, **kwargs)
|
289 |
+
except Exception as e:
|
290 |
+
last_exception = e
|
291 |
+
if attempt < max_attempts - 1:
|
292 |
+
logging.warning(f"Attempt {attempt + 1} failed: {str(e)}. Retrying in {current_delay}s...")
|
293 |
+
await asyncio.sleep(current_delay)
|
294 |
+
current_delay *= backoff
|
295 |
+
else:
|
296 |
+
logging.error(f"All {max_attempts} attempts failed. Last error: {str(e)}")
|
297 |
+
|
298 |
+
raise last_exception
|
299 |
+
|
300 |
+
return wrapper
|
301 |
+
return decorator
|
302 |
+
|
303 |
+
|
304 |
+
def validate_email(email: str) -> bool:
|
305 |
+
"""Validate email address format"""
|
306 |
+
if not email or not isinstance(email, str):
|
307 |
+
return False
|
308 |
+
|
309 |
+
pattern = r'^[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}$'
|
310 |
+
return bool(re.match(pattern, email))
|
311 |
+
|
312 |
+
|
313 |
+
def sanitize_filename(filename: str, max_length: int = 100) -> str:
|
314 |
+
"""Sanitize filename for cross-platform compatibility"""
|
315 |
+
if not filename:
|
316 |
+
return "untitled"
|
317 |
+
|
318 |
+
# Remove or replace invalid characters
|
319 |
+
filename = re.sub(r'[<>:"/\\|?*]', '_', filename)
|
320 |
+
|
321 |
+
# Remove leading/trailing dots and spaces
|
322 |
+
filename = filename.strip('. ')
|
323 |
+
|
324 |
+
# Limit length
|
325 |
+
if len(filename) > max_length:
|
326 |
+
name, ext = filename.rsplit('.', 1) if '.' in filename else (filename, '')
|
327 |
+
max_name_length = max_length - len(ext) - 1 if ext else max_length
|
328 |
+
filename = name[:max_name_length] + ('.' + ext if ext else '')
|
329 |
+
|
330 |
+
# Ensure it's not empty
|
331 |
+
if not filename:
|
332 |
+
filename = "untitled"
|
333 |
+
|
334 |
+
return filename
|
335 |
+
|
336 |
+
|
337 |
+
def calculate_similarity(text1: str, text2: str) -> float:
|
338 |
+
"""Calculate simple text similarity using Jaccard similarity"""
|
339 |
+
if not text1 or not text2:
|
340 |
+
return 0.0
|
341 |
+
|
342 |
+
# Convert to sets of words
|
343 |
+
words1 = set(text1.lower().split())
|
344 |
+
words2 = set(text2.lower().split())
|
345 |
+
|
346 |
+
# Calculate Jaccard similarity
|
347 |
+
intersection = len(words1.intersection(words2))
|
348 |
+
union = len(words1.union(words2))
|
349 |
+
|
350 |
+
return intersection / union if union > 0 else 0.0
|
351 |
+
|
352 |
+
|
353 |
+
def format_file_size(size_bytes: int) -> str:
|
354 |
+
"""Format file size in bytes to human-readable string"""
|
355 |
+
if size_bytes == 0:
|
356 |
+
return "0 B"
|
357 |
+
|
358 |
+
size_names = ["B", "KB", "MB", "GB", "TB"]
|
359 |
+
i = 0
|
360 |
+
size = float(size_bytes)
|
361 |
+
|
362 |
+
while size >= 1024.0 and i < len(size_names) - 1:
|
363 |
+
size /= 1024.0
|
364 |
+
i += 1
|
365 |
+
|
366 |
+
return f"{size:.1f} {size_names[i]}"
|
367 |
+
|
368 |
+
|
369 |
+
def create_progress_callback(total_steps: int, callback_func: Optional[Callable] = None):
|
370 |
+
"""Create a progress tracking callback function"""
|
371 |
+
current_step = 0
|
372 |
+
|
373 |
+
def update_progress(step_name: str = "", increment: int = 1):
|
374 |
+
nonlocal current_step
|
375 |
+
current_step += increment
|
376 |
+
progress = min(current_step / total_steps, 1.0)
|
377 |
+
|
378 |
+
if callback_func:
|
379 |
+
callback_func(progress, step_name, current_step, total_steps)
|
380 |
+
|
381 |
+
return progress
|
382 |
+
|
383 |
+
return update_progress
|
384 |
+
|
385 |
+
|
386 |
+
def debounce(wait_time: float):
|
387 |
+
"""Decorator to debounce function calls"""
|
388 |
+
def decorator(func: Callable) -> Callable:
|
389 |
+
last_called = [0.0]
|
390 |
+
|
391 |
+
async def wrapper(*args, **kwargs):
|
392 |
+
now = asyncio.get_event_loop().time()
|
393 |
+
if now - last_called[0] >= wait_time:
|
394 |
+
last_called[0] = now
|
395 |
+
return await func(*args, **kwargs)
|
396 |
+
|
397 |
+
return wrapper
|
398 |
+
return decorator
|
399 |
+
|
400 |
+
|
401 |
+
class Timer:
|
402 |
+
"""Simple timer context manager"""
|
403 |
+
|
404 |
+
def __init__(self, name: str = "Operation"):
|
405 |
+
self.name = name
|
406 |
+
self.start_time = None
|
407 |
+
self.end_time = None
|
408 |
+
|
409 |
+
def __enter__(self):
|
410 |
+
self.start_time = datetime.now()
|
411 |
+
return self
|
412 |
+
|
413 |
+
def __exit__(self, exc_type, exc_val, exc_tb):
|
414 |
+
self.end_time = datetime.now()
|
415 |
+
duration = self.end_time - self.start_time
|
416 |
+
print(f"{self.name} completed in {duration.total_seconds():.2f} seconds")
|
417 |
+
|
418 |
+
@property
|
419 |
+
def duration(self) -> Optional[timedelta]:
|
420 |
+
if self.start_time and self.end_time:
|
421 |
+
return self.end_time - self.start_time
|
422 |
+
return None
|
423 |
+
|
424 |
+
|
425 |
+
class RateLimiter:
|
426 |
+
"""Simple rate limiter for API calls"""
|
427 |
+
|
428 |
+
def __init__(self, max_calls: int, time_window: float):
|
429 |
+
self.max_calls = max_calls
|
430 |
+
self.time_window = time_window
|
431 |
+
self.calls = []
|
432 |
+
|
433 |
+
async def acquire(self):
|
434 |
+
"""Wait if necessary to respect rate limits"""
|
435 |
+
now = datetime.now()
|
436 |
+
|
437 |
+
# Remove old calls outside the time window
|
438 |
+
self.calls = [call_time for call_time in self.calls
|
439 |
+
if (now - call_time).total_seconds() < self.time_window]
|
440 |
+
|
441 |
+
# If we're at the limit, wait
|
442 |
+
if len(self.calls) >= self.max_calls:
|
443 |
+
oldest_call = min(self.calls)
|
444 |
+
wait_time = self.time_window - (now - oldest_call).total_seconds()
|
445 |
+
if wait_time > 0:
|
446 |
+
await asyncio.sleep(wait_time)
|
447 |
+
|
448 |
+
# Record this call
|
449 |
+
self.calls.append(now)
|
450 |
+
|
451 |
+
|
452 |
+
def get_nested_value(data: Dict[str, Any], key_path: str, default: Any = None) -> Any:
|
453 |
+
"""Get nested dictionary value using dot notation"""
|
454 |
+
keys = key_path.split('.')
|
455 |
+
current = data
|
456 |
+
|
457 |
+
try:
|
458 |
+
for key in keys:
|
459 |
+
current = current[key]
|
460 |
+
return current
|
461 |
+
except (KeyError, TypeError):
|
462 |
+
return default
|
463 |
+
|
464 |
+
|
465 |
+
def set_nested_value(data: Dict[str, Any], key_path: str, value: Any) -> None:
|
466 |
+
"""Set nested dictionary value using dot notation"""
|
467 |
+
keys = key_path.split('.')
|
468 |
+
current = data
|
469 |
+
|
470 |
+
for key in keys[:-1]:
|
471 |
+
if key not in current or not isinstance(current[key], dict):
|
472 |
+
current[key] = {}
|
473 |
+
current = current[key]
|
474 |
+
|
475 |
+
current[keys[-1]] = value
|
requirements.txt
ADDED
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# 🎓 CourseCrafter AI Dependencies
|
2 |
+
# Core framework and UI
|
3 |
+
gradio>=4.0.0
|
4 |
+
streamlit>=1.28.0
|
5 |
+
|
6 |
+
# LLM Providers
|
7 |
+
openai>=1.0.0
|
8 |
+
anthropic>=0.20.0
|
9 |
+
google-generativeai>=0.3.0
|
10 |
+
|
11 |
+
# MCP Integration
|
12 |
+
mcp>=1.0.0
|
13 |
+
|
14 |
+
# Content Processing
|
15 |
+
markdown>=3.5.0
|
16 |
+
beautifulsoup4>=4.12.0
|
17 |
+
requests>=2.31.0
|
18 |
+
aiohttp>=3.8.0
|
19 |
+
duckduckgo-search>=6.0.0
|
20 |
+
crawl4ai>=0.3.0
|
21 |
+
|
22 |
+
# PDF Generation
|
23 |
+
reportlab>=4.0.0
|
24 |
+
weasyprint>=60.0
|
25 |
+
|
26 |
+
# Image Processing
|
27 |
+
pillow>=10.0.0
|
28 |
+
matplotlib>=3.7.0
|
29 |
+
|
30 |
+
# Data Handling
|
31 |
+
pandas>=2.0.0
|
32 |
+
numpy>=1.24.0
|
33 |
+
pydantic>=2.0.0
|
34 |
+
|
35 |
+
# Async Support
|
36 |
+
asyncio>=3.4.3
|
37 |
+
asyncio-throttle>=1.0.2
|
38 |
+
|
39 |
+
# Environment & Config
|
40 |
+
python-dotenv>=1.0.0
|
41 |
+
pyyaml>=6.0
|
42 |
+
|
43 |
+
# Utilities
|
44 |
+
uuid>=1.30
|
45 |
+
python-dateutil>=2.8.0
|
46 |
+
tqdm>=4.65.0
|
47 |
+
|
48 |
+
# Development & Testing
|
49 |
+
pytest>=7.4.0
|
50 |
+
pytest-asyncio>=0.21.0
|
51 |
+
black>=23.0.0
|
52 |
+
flake8>=6.0.0
|
53 |
+
|
54 |
+
# Cloud Integration (optional)
|
55 |
+
google-auth>=2.22.0
|
56 |
+
google-auth-oauthlib>=1.0.0
|
57 |
+
google-auth-httplib2>=0.1.0
|