"""
🎨 Gradio Application for Course Creator AI
Main Gradio interface for course generation.
"""
import gradio as gr
from typing import Dict, Any, Optional, Tuple
import asyncio
import json
import markdown
import re
from ..agents.simple_course_agent import SimpleCourseAgent
from ..types import DifficultyLevel, GenerationOptions, LearningStyle
from .components import CoursePreview
from .styling import get_custom_css
def format_lessons(lessons: list) -> str:
"""Format lessons from JSON data into HTML with dark theme and markdown support"""
if not lessons:
return "
📚 No lessons generated yet.
"
# Add CSS for lesson styling
css = """
"""
html = css + "
"
for i, lesson in enumerate(lessons, 1):
title = lesson.get("title", f"Lesson {i}")
content = lesson.get("content", "")
duration = lesson.get("duration", "")
objectives = lesson.get("objectives", [])
key_takeaways = lesson.get("key_takeaways", [])
image_description = lesson.get("image_description", "")
# Convert markdown content to HTML
if content:
try:
# Create markdown instance with extensions
import markdown
md = markdown.Markdown(extensions=['extra', 'codehilite'])
content_html = md.convert(content)
except ImportError:
# Fallback if markdown is not available
content_html = content.replace('\n\n', '
').replace('\n', ' ')
if content_html and not content_html.startswith('
'):
content_html = f'
{content_html}
'
else:
content_html = "
No content available.
"
# Generate image placeholder or actual image
image_html = ""
if image_description:
# Check if we have actual image data
images = lesson.get("images", [])
if images and len(images) > 0:
# Display actual generated images
image_html = "
"
for img in images:
if isinstance(img, dict) and img.get("url"):
img_url = img.get("url", "")
img_caption = img.get("description", image_description)
image_html += f"""
{img_caption}
"""
image_html += "
"
else:
# Fallback to placeholder
image_html = f"""
🖼️
{image_description}
(Image generation in progress...)
"""
html += f"""
📖 {title}
{f"
⏱️ Duration: {duration} minutes
" if duration else ""}
{f"
🎯 Learning Objectives:
{''.join([f'
{obj}
' for obj in objectives])}
" if objectives else ""}
{image_html}
{content_html}
{f"
💡 Key Takeaways:
{''.join([f'
{takeaway}
' for takeaway in key_takeaways])}
" if key_takeaways else ""}
"""
html += "
"
return html
def format_flashcards(flashcards: list) -> str:
"""Format flashcards from JSON data into interactive HTML with CSS-only flip"""
if not flashcards:
return "
🃏 No flashcards generated yet.
"
# Add the CSS for flashcard flip functionality
css = """
"""
html = css + "
"
html += "
🃏 Click on any flashcard to flip it and see the answer!
"
for i, card in enumerate(flashcards):
question = card.get("question", "")
answer = card.get("answer", "")
category = card.get("category", "General")
# Use CSS-only flip with checkbox hack
html += f"""
"""
html += "
"
return html
def format_quiz(quiz: dict) -> str:
"""Format quiz from JSON data into interactive HTML with working JavaScript."""
if not quiz or not quiz.get("questions"):
return "
📝 No quiz generated yet.
"
title = quiz.get("title", "Course Quiz")
instructions = quiz.get("instructions", "Choose the best answer for each question.")
questions = quiz.get("questions", [])
if not questions:
return "
📝 No quiz questions available.
"
# Generate unique quiz ID
quiz_id = f"quiz_{abs(hash(str(questions)))%10000}"
# CSS and JavaScript for quiz functionality
quiz_html = f"""
📝 {title}
{instructions}
"""
return quiz_html
def create_coursecrafter_interface() -> gr.Blocks:
"""Create the main Course Creator Gradio interface"""
with gr.Blocks(
title="Course Creator AI - Intelligent Course Generator",
css=get_custom_css(),
theme=gr.themes.Soft()
) as interface:
# Header
gr.HTML("""
🎓 Course Creator AI
Generate comprehensive mini-courses with AI-powered content, flashcards, and quizzes
""")
# LLM Provider Configuration
with gr.Row():
with gr.Column():
gr.HTML("
🤖 LLM Provider Configuration
")
with gr.Row():
llm_provider = gr.Dropdown(
label="LLM Provider",
choices=["openai", "anthropic", "google", "openai_compatible"],
value="google",
info="Choose your preferred LLM provider"
)
api_key_input = gr.Textbox(
label="API Key",
placeholder="Enter your API key here...",
type="password",
info="Your API key for the selected provider (optional for OpenAI-compatible)"
)
# OpenAI-Compatible endpoint configuration (initially hidden)
with gr.Row(visible=False) as openai_compatible_row:
endpoint_url_input = gr.Textbox(
label="Endpoint URL",
placeholder="https://your-endpoint.com/v1",
info="Base URL for OpenAI-compatible API"
)
model_name_input = gr.Textbox(
label="Model Name",
placeholder="your-model-name",
info="Model name to use with the endpoint"
)
# Main interface
with gr.Row():
with gr.Column(scale=1):
# Course generation form
topic_input = gr.Textbox(
label="Course Topic",
placeholder="e.g., Introduction to Python Programming",
lines=1
)
difficulty_input = gr.Dropdown(
label="Difficulty Level",
choices=["beginner", "intermediate", "advanced"],
value="beginner"
)
lesson_count = gr.Slider(
label="Number of Lessons",
minimum=1,
maximum=10,
value=5,
step=1
)
generate_btn = gr.Button(
"🚀 Generate Course",
variant="primary",
size="lg"
)
# Chat interface for course refinement
gr.HTML("
💬 Course Assistant
")
# Chat window with proper styling
with gr.Column():
chat_display = gr.HTML(
value="""
🤖
Hi! I'm your Course Assistant. Generate a course first, then ask me questions about the lessons, concepts, or content!
""",
elem_id="chat-display"
)
with gr.Row():
chat_input = gr.Textbox(
placeholder="Ask me to modify the course...",
lines=1,
scale=4,
container=False
)
chat_btn = gr.Button("Send", variant="secondary", scale=1)
with gr.Column(scale=2):
# Course preview tabs with enhanced components
course_preview = CoursePreview()
with gr.Tabs():
with gr.Tab("📖 Lessons"):
lessons_output = gr.HTML(
value="""
🎓 Ready to Generate Your Course!
Enter a topic and click "Generate Course" to create comprehensive lessons with AI-powered content.
💡 Tip: Try topics like "Introduction to Python Programming", "Digital Marketing Basics", or "Climate Change Science"
"""
)
with gr.Tab("🃏 Flashcards"):
flashcards_output = gr.HTML(
value="""
🃏 Interactive Flashcards
Flashcards will appear here after course generation. They'll help reinforce key concepts with spaced repetition learning!
"""
)
with gr.Tab("📝 Quizzes"):
# Quiz functionality with HTML content and state management
quiz_state = gr.State({}) # Store quiz data
quizzes_output = gr.HTML(
value="""
📝 Knowledge Assessment
Interactive quizzes will appear here to test your understanding of the course material!
"
)
if not api_key.strip() and provider != "openai_compatible":
return (
"
❌ Please enter your API key for the selected LLM provider.
",
"", "",
gr.update(visible=False), [], "
Error loading images
"
)
if provider == "openai_compatible" and not endpoint_url.strip():
return (
"
❌ Please enter the endpoint URL for OpenAI-compatible provider.
",
"", "",
gr.update(visible=False), [], "
Error loading images
"
)
if provider == "openai_compatible" and not model_name.strip():
return (
"
❌ Please enter the model name for OpenAI-compatible provider.
",
"", "",
gr.update(visible=False), [], "
Error loading images
"
)
try:
# Initialize progress
progress(0, desc="🚀 Initializing Course Generator...")
# Set the API key and configuration for the selected provider
import os
if provider == "openai":
os.environ["OPENAI_API_KEY"] = api_key
elif provider == "anthropic":
os.environ["ANTHROPIC_API_KEY"] = api_key
elif provider == "google":
os.environ["GOOGLE_API_KEY"] = api_key
elif provider == "openai_compatible":
if api_key.strip():
os.environ["OPENAI_COMPATIBLE_API_KEY"] = api_key
os.environ["OPENAI_COMPATIBLE_BASE_URL"] = endpoint_url
os.environ["OPENAI_COMPATIBLE_MODEL"] = model_name
# IMPORTANT: Create a fresh agent instance to pick up the new environment variables
# This ensures the LlmClient reinitializes with the updated API keys
agent = SimpleCourseAgent()
# Use the new dynamic configuration method to update provider settings
config_kwargs = {}
if provider == "openai_compatible":
config_kwargs["base_url"] = endpoint_url
config_kwargs["model"] = model_name
# Update provider configuration dynamically
config_success = agent.update_provider_config(provider, api_key, **config_kwargs)
if not config_success:
return (
f"
❌ Failed to configure provider '{provider}'. Please check your API key and settings.
",
"", "",
gr.update(visible=False), [], "
Error loading images
"
)
course_context["agent"] = agent
course_context["topic"] = topic
# Verify the provider is available with the new configuration
available_providers = agent.get_available_providers()
if provider not in available_providers:
return (
f"
❌ Provider '{provider}' is not available after configuration. Please check your API key and configuration.
",
"", "",
gr.update(visible=False), [], "
Error loading images
"
)
progress(0.1, desc="⚙️ Setting up generation options...")
# Create generation options
options = GenerationOptions(
difficulty=DifficultyLevel(difficulty),
lesson_count=lessons,
include_images=True,
include_flashcards=True,
include_quizzes=True
)
progress(0.15, desc="🔍 Checking available providers...")
# Get available providers
available_providers = agent.get_available_providers()
if not available_providers:
return (
"
❌ No LLM providers available. Please check your API keys.
",
"", "",
gr.update(visible=False), [], "
Error loading images
"
)
progress(0.2, desc="🎓 Starting course generation...")
# Use the default provider from config (no need to override)
# The agent will automatically use the configured default provider
# Start course generation
lessons_html = ""
flashcards_html = ""
quizzes_html = ""
# Stream the generation process
course_data = None
current_progress = 0.2
# Add a simple counter for fallback progress
chunk_count = 0
max_expected_chunks = 10 # Rough estimate
async for chunk in agent.generate_course(topic, options):
chunk_count += 1
print(f"📊 Progress Debug: Received chunk type='{chunk.type}', content='{chunk.content}'")
# Update progress based on chunk content
if chunk.type == "progress":
# Check if the progress message matches our known steps (handle emojis)
step_found = False
progress_message = chunk.content.lower()
print(f"🔍 Checking progress message: '{progress_message}'")
if "research completed" in progress_message:
current_progress = 0.3
step_found = True
print(f"✅ Matched: Research completed -> {current_progress}")
progress(current_progress, desc="📚 Research completed, planning course structure...")
elif "course structure planned" in progress_message:
current_progress = 0.4
step_found = True
print(f"✅ Matched: Course structure planned -> {current_progress}")
progress(current_progress, desc="📝 Course structure planned, generating content...")
elif "lessons created" in progress_message:
current_progress = 0.6
step_found = True
print(f"✅ Matched: Lessons created -> {current_progress}")
progress(current_progress, desc="✍️ Lessons created, generating flashcards...")
elif "flashcards created" in progress_message:
current_progress = 0.75
step_found = True
print(f"✅ Matched: Flashcards created -> {current_progress}")
progress(current_progress, desc="🃏 Flashcards created, creating quiz...")
elif "quiz created" in progress_message:
current_progress = 0.8
step_found = True
print(f"✅ Matched: Quiz created -> {current_progress}")
progress(current_progress, desc="❓ Quiz created, generating images...")
elif "images generated" in progress_message:
current_progress = 0.9
step_found = True
print(f"✅ Matched: Images generated -> {current_progress}")
progress(current_progress, desc="🎨 Images generated, finalizing course...")
elif "finalizing course" in progress_message:
current_progress = 0.95
step_found = True
print(f"✅ Matched: Finalizing course -> {current_progress}")
progress(current_progress, desc="📦 Assembling final course data...")
if not step_found:
# Fallback: increment progress based on chunk count
fallback_progress = min(0.2 + (chunk_count / max_expected_chunks) * 0.6, 0.85)
current_progress = max(current_progress, fallback_progress)
print(f"⚠️ No match found, using fallback: {fallback_progress}")
progress(current_progress, desc=f"�� {chunk.content}")
elif chunk.type == "course_complete":
current_progress = 0.95
progress(current_progress, desc="📦 Finalizing course data...")
# Parse the complete course data
try:
course_data = json.loads(chunk.content)
except:
course_data = None
progress(0.97, desc="🎨 Processing course content...")
# If we got course data, format it nicely
if course_data:
course_context["content"] = course_data
# Format lessons
lessons_html = format_lessons(course_data.get("lessons", []))
# Format flashcards
flashcards_html = format_flashcards(course_data.get("flashcards", []))
# Format quiz
quiz_data = course_data.get("quiz", {})
quizzes_html = format_quiz(quiz_data)
# Show quiz button if quiz exists - be more permissive to ensure it shows
quiz_btn_visible = bool(quiz_data and (quiz_data.get("questions") or len(str(quiz_data)) > 50))
print(f"🎯 Quiz button visibility: {quiz_btn_visible} (quiz_data: {bool(quiz_data)}, questions: {bool(quiz_data.get('questions') if quiz_data else False)})")
# Force quiz button to be visible if we have any quiz content
if quiz_data and not quiz_btn_visible:
print("⚠️ Forcing quiz button to be visible due to quiz data presence")
quiz_btn_visible = True
progress(0.98, desc="🖼️ Processing images for gallery...")
# Prepare image gallery data - fix the format for Gradio Gallery
images = []
image_details_list = []
# Process images from lessons
for lesson in course_data.get("lessons", []):
lesson_images = lesson.get("images", [])
for i, img in enumerate(lesson_images):
try:
if isinstance(img, dict):
# Handle different image data formats
image_url = img.get("url") or img.get("data_url")
if image_url:
alt_text = img.get("caption", img.get("description", "Educational image"))
# Handle base64 data URLs by converting to temp files
if image_url.startswith('data:image/'):
import base64
import tempfile
import os
# Extract base64 data
header, data = image_url.split(',', 1)
image_data = base64.b64decode(data)
# Determine file extension from header
if 'jpeg' in header or 'jpg' in header:
ext = '.jpg'
elif 'png' in header:
ext = '.png'
elif 'gif' in header:
ext = '.gif'
elif 'webp' in header:
ext = '.webp'
else:
ext = '.jpg' # Default
# Create temp file
temp_fd, temp_path = tempfile.mkstemp(suffix=ext, prefix=f'course_img_{i}_')
try:
with os.fdopen(temp_fd, 'wb') as f:
f.write(image_data)
images.append(temp_path)
image_details_list.append({
"url": temp_path,
"caption": alt_text,
"lesson": lesson.get("title", "Unknown lesson")
})
except Exception as e:
print(f"⚠️ Failed to save temp image {i}: {e}")
os.close(temp_fd) # Close if write failed
continue
elif image_url.startswith('http'):
# Regular URL - Gradio can handle these directly
images.append(image_url)
image_details_list.append({
"url": image_url,
"caption": alt_text,
"lesson": lesson.get("title", "Unknown lesson")
})
else:
# Assume it's a file path
if len(image_url) <= 260: # Windows path limit
images.append(image_url)
image_details_list.append({
"url": image_url,
"caption": alt_text,
"lesson": lesson.get("title", "Unknown lesson")
})
else:
print(f"⚠️ Skipping image {i}: path too long ({len(image_url)} chars)")
elif isinstance(img, str):
# Handle case where image is just a URL string
if img.startswith('data:image/'):
# Handle base64 data URLs
import base64
import tempfile
import os
try:
header, data = img.split(',', 1)
image_data = base64.b64decode(data)
# Determine file extension from header
if 'jpeg' in header or 'jpg' in header:
ext = '.jpg'
elif 'png' in header:
ext = '.png'
elif 'gif' in header:
ext = '.gif'
elif 'webp' in header:
ext = '.webp'
else:
ext = '.jpg' # Default
# Create temp file
temp_fd, temp_path = tempfile.mkstemp(suffix=ext, prefix=f'course_img_{i}_')
try:
with os.fdopen(temp_fd, 'wb') as f:
f.write(image_data)
images.append(temp_path)
image_details_list.append({
"url": temp_path,
"caption": "Educational image",
"lesson": lesson.get("title", "Unknown lesson")
})
except Exception as e:
print(f"⚠️ Failed to save temp image {i}: {e}")
os.close(temp_fd) # Close if write failed
continue
except Exception as e:
print(f"⚠️ Error processing base64 image {i}: {e}")
continue
else:
# Regular URL or file path
images.append(img)
image_details_list.append({
"url": img,
"caption": "Educational image",
"lesson": lesson.get("title", "Unknown lesson")
})
except Exception as e:
print(f"⚠️ Error processing image {i}: {e}")
continue
# Also check for standalone images in course data
standalone_images = course_data.get("images", [])
for i, img in enumerate(standalone_images):
try:
if isinstance(img, dict):
image_url = img.get("url") or img.get("data_url")
if image_url:
alt_text = img.get("caption", img.get("description", "Course image"))
# Handle base64 data URLs
if image_url.startswith('data:image/'):
import base64
import tempfile
import os
try:
header, data = image_url.split(',', 1)
image_data = base64.b64decode(data)
# Determine file extension from header
if 'jpeg' in header or 'jpg' in header:
ext = '.jpg'
elif 'png' in header:
ext = '.png'
elif 'gif' in header:
ext = '.gif'
elif 'webp' in header:
ext = '.webp'
else:
ext = '.jpg' # Default
# Create temp file
temp_fd, temp_path = tempfile.mkstemp(suffix=ext, prefix=f'standalone_img_{i}_')
try:
with os.fdopen(temp_fd, 'wb') as f:
f.write(image_data)
images.append(temp_path)
image_details_list.append({
"url": temp_path,
"caption": alt_text,
"lesson": "Course Overview"
})
except Exception as e:
print(f"⚠️ Failed to save temp standalone image {i}: {e}")
os.close(temp_fd) # Close if write failed
continue
except Exception as e:
print(f"⚠️ Error processing base64 standalone image {i}: {e}")
continue
else:
images.append(image_url)
image_details_list.append({
"url": image_url,
"caption": alt_text,
"lesson": "Course Overview"
})
elif isinstance(img, str):
if img.startswith('data:image/'):
# Handle base64 data URLs
import base64
import tempfile
import os
try:
header, data = img.split(',', 1)
image_data = base64.b64decode(data)
# Determine file extension from header
if 'jpeg' in header or 'jpg' in header:
ext = '.jpg'
elif 'png' in header:
ext = '.png'
elif 'gif' in header:
ext = '.gif'
elif 'webp' in header:
ext = '.webp'
else:
ext = '.jpg' # Default
# Create temp file
temp_fd, temp_path = tempfile.mkstemp(suffix=ext, prefix=f'standalone_img_{i}_')
try:
with os.fdopen(temp_fd, 'wb') as f:
f.write(image_data)
images.append(temp_path)
image_details_list.append({
"url": temp_path,
"caption": "Course image",
"lesson": "Course Overview"
})
except Exception as e:
print(f"⚠️ Failed to save temp standalone image {i}: {e}")
os.close(temp_fd) # Close if write failed
continue
except Exception as e:
print(f"⚠️ Error processing base64 standalone image {i}: {e}")
continue
else:
images.append(img)
image_details_list.append({
"url": img,
"caption": "Course image",
"lesson": "Course Overview"
})
except Exception as e:
print(f"⚠️ Error processing standalone image {i}: {e}")
continue
print(f"📸 Prepared {len(images)} images for gallery display")
# Create image details HTML for display
if image_details_list:
image_details_html = "
"
image_details_html += "
🖼️ Image Gallery
"
image_details_html += f"
Total images: {len(image_details_list)}
"
image_details_html += "
"
for i, img_detail in enumerate(image_details_list, 1):
image_details_html += f"
"
)
def handle_quiz_submit():
"""Handle quiz submission using client-side processing"""
# This function will be replaced by client-side JavaScript
return gr.update()
async def handle_chat(message: str, current_chat: str):
"""Handle chat messages for answering questions about the course content"""
if not message.strip():
return current_chat, ""
if not course_context["content"] or not course_context["agent"]:
assistant_response = "Please generate a course first before asking questions about it."
else:
try:
# Get the agent and course content
agent = course_context["agent"]
course_data = course_context["content"]
topic = course_context["topic"]
# Create context from the course content
course_context_text = f"Course Topic: {topic}\n\n"
# Add lessons content
lessons = course_data.get("lessons", [])
for i, lesson in enumerate(lessons, 1):
course_context_text += f"Lesson {i}: {lesson.get('title', '')}\n"
course_context_text += f"Content: {lesson.get('content', '')[:1000]}...\n"
if lesson.get('key_takeaways'):
course_context_text += f"Key Takeaways: {', '.join(lesson.get('key_takeaways', []))}\n"
course_context_text += "\n"
# Add flashcards context
flashcards = course_data.get("flashcards", [])
if flashcards:
course_context_text += "Flashcards:\n"
for card in flashcards[:5]: # Limit to first 5
course_context_text += f"Q: {card.get('question', '')} A: {card.get('answer', '')}\n"
course_context_text += "\n"
# Create a focused prompt for answering questions
prompt = f"""You are a helpful course assistant. Answer the user's question about the course content below.
Course Content:
{course_context_text}
User Question: {message}
Instructions:
- Answer based ONLY on the course content provided above
- Be helpful, clear, and educational
- If the question is about something not covered in the course, say so politely
- Keep responses concise but informative
- Use a friendly, teaching tone
Answer:"""
# Use the default provider (same as course generation)
provider = agent.default_provider
available_providers = agent.get_available_providers()
if provider not in available_providers:
# Fallback to first available if default isn't available
provider = available_providers[0] if available_providers else None
if provider:
# Use the agent's LLM to get a response
from ..agents.simple_course_agent import Message
messages = [
Message(role="system", content="You are a helpful course assistant that answers questions about course content."),
Message(role="user", content=prompt)
]
print(f"🤖 Chat using LLM provider: {provider}")
assistant_response = await agent._get_llm_response(provider, messages)
# Clean up the response
assistant_response = assistant_response.strip()
if assistant_response.startswith("Answer:"):
assistant_response = assistant_response[7:].strip()
else:
assistant_response = "Sorry, no LLM providers are available to answer your question."
except Exception as e:
print(f"Error in chat: {e}")
assistant_response = "Sorry, I encountered an error while trying to answer your question. Please try again."
# Extract existing messages from current chat HTML
existing_messages = ""
if current_chat and "chat-message" in current_chat:
# Keep existing messages
start = current_chat.find('
', start)
if end != -1:
existing_content = current_chat[start:end]
# Extract just the message divs
import re
messages_match = re.findall(r'
{existing_messages}
👤
{message}
🤖
{assistant_response}
"""
return new_chat, ""
# Connect provider change event
llm_provider.change(
fn=on_provider_change,
inputs=[llm_provider],
outputs=[openai_compatible_row]
)
generate_btn.click(
fn=generate_course_wrapper,
inputs=[topic_input, difficulty_input, lesson_count, llm_provider, api_key_input, endpoint_url_input, model_name_input],
outputs=[
lessons_output, flashcards_output, quizzes_output, quiz_submit_btn, image_gallery, image_details
]
)
chat_btn.click(
fn=handle_chat,
inputs=[chat_input, chat_display],
outputs=[chat_display, chat_input]
)
# Use a much simpler approach with direct JavaScript execution
quiz_submit_btn.click(
fn=None, # No Python function needed
js="""
function() {
// Find all quiz questions and process them
const questions = document.querySelectorAll('.quiz-question');
if (questions.length === 0) {
alert('No quiz questions found!');
return;
}
let score = 0;
let total = questions.length;
let hasAnswers = false;
questions.forEach((question, idx) => {
const radios = question.querySelectorAll('input[type="radio"]');
const correctAnswer = question.dataset.correct;
const explanation = question.dataset.explanation || '';
let selectedRadio = null;
radios.forEach(radio => {
if (radio.checked) {
selectedRadio = radio;
hasAnswers = true;
}
});
// Create or find feedback element
let feedback = question.querySelector('.quiz-feedback');
if (!feedback) {
feedback = document.createElement('div');
feedback.className = 'quiz-feedback';
question.appendChild(feedback);
}
if (selectedRadio) {
const userAnswer = selectedRadio.value;
if (userAnswer === correctAnswer) {
score++;
feedback.innerHTML = `
✅ Correct! ${explanation}
`;
} else {
feedback.innerHTML = `
❌ Incorrect. The correct answer is ${correctAnswer}. ${explanation}
`;
}
} else {
feedback.innerHTML = `
⚠️ No answer selected. The correct answer is ${correctAnswer}. ${explanation}