TutorX-MCP / main.py
Meet Patel
Step 4: Added assessment and analytics features with question generation and performance tracking
0228818
raw
history blame
18.5 kB
# TutorX MCP Server
from mcp.server.fastmcp import FastMCP
import json
from typing import List, Dict, Any, Optional
from datetime import datetime
# Import utility functions
from utils.multimodal import (
process_text_query,
process_voice_input,
process_handwriting,
generate_speech_response
)
from utils.assessment import (
generate_question,
evaluate_student_answer,
generate_performance_analytics,
detect_plagiarism
)
# Create the TutorX MCP server
mcp = FastMCP("TutorX")
# ------------------ Core Features ------------------
# Adaptive Learning Engine
@mcp.tool()
def assess_skill(student_id: str, concept_id: str) -> Dict[str, Any]:
"""
Assess student's skill level on a specific concept
Args:
student_id: The unique identifier for the student
concept_id: The concept to assess
Returns:
Dictionary containing skill level and recommendations
"""
# Simulated skill assessment
return {
"student_id": student_id,
"concept_id": concept_id,
"skill_level": 0.75,
"confidence": 0.85,
"recommendations": [
"Practice more complex problems",
"Review related concept: algebra_linear_equations"
],
"timestamp": datetime.now().isoformat()
}
@mcp.resource("concept-graph://")
def get_concept_graph() -> Dict[str, Any]:
"""Get the full knowledge concept graph"""
return {
"nodes": [
{"id": "math_algebra_basics", "name": "Algebra Basics", "difficulty": 1},
{"id": "math_algebra_linear_equations", "name": "Linear Equations", "difficulty": 2},
{"id": "math_algebra_quadratic_equations", "name": "Quadratic Equations", "difficulty": 3},
],
"edges": [
{"from": "math_algebra_basics", "to": "math_algebra_linear_equations", "weight": 1.0},
{"from": "math_algebra_linear_equations", "to": "math_algebra_quadratic_equations", "weight": 0.8},
]
}
@mcp.resource("learning-path://{student_id}")
def get_learning_path(student_id: str) -> Dict[str, Any]:
"""Get personalized learning path for a student"""
return {
"student_id": student_id,
"current_concepts": ["math_algebra_linear_equations"],
"recommended_next": ["math_algebra_quadratic_equations"],
"mastered": ["math_algebra_basics"],
"estimated_completion_time": "2 weeks"
}
# Assessment Suite
@mcp.tool()
def generate_quiz(concept_ids: List[str], difficulty: int = 2) -> Dict[str, Any]:
"""
Generate a quiz based on specified concepts and difficulty
Args:
concept_ids: List of concept IDs to include in the quiz
difficulty: Difficulty level from 1-5
Returns:
Quiz object with questions and answers
"""
return {
"quiz_id": "q12345",
"concept_ids": concept_ids,
"difficulty": difficulty,
"questions": [
{
"id": "q1",
"text": "Solve for x: 2x + 3 = 7",
"type": "algebraic_equation",
"answer": "x = 2",
"solution_steps": [
"2x + 3 = 7",
"2x = 7 - 3",
"2x = 4",
"x = 4/2 = 2"
]
}
]
}
# Feedback System
@mcp.tool()
def analyze_error_patterns(student_id: str, concept_id: str) -> Dict[str, Any]:
"""
Analyze common error patterns for a student on a specific concept
Args:
student_id: The student's unique identifier
concept_id: The concept to analyze
Returns:
Error pattern analysis
"""
return {
"student_id": student_id,
"concept_id": concept_id,
"common_errors": [
{
"type": "sign_error",
"frequency": 0.65,
"example": "2x - 3 = 5 β†’ 2x = 5 - 3 β†’ 2x = 2 β†’ x = 1 (should be x = 4)"
},
{
"type": "arithmetic_error",
"frequency": 0.35,
"example": "2x = 8 β†’ x = 8/2 = 3 (should be x = 4)"
}
],
"recommendations": [
"Practice more sign manipulation problems",
"Review basic arithmetic operations"
]
}
# ------------------ Advanced Features ------------------
# Neurological Engagement Monitor
@mcp.tool()
def analyze_cognitive_state(eeg_data: Dict[str, Any]) -> Dict[str, Any]:
"""
Analyze EEG data to determine cognitive state
Args:
eeg_data: Raw or processed EEG data
Returns:
Analysis of cognitive state
"""
return {
"attention_level": 0.82,
"cognitive_load": 0.65,
"stress_level": 0.25,
"recommendations": [
"Student is engaged but approaching cognitive overload",
"Consider simplifying next problems slightly"
],
"timestamp": datetime.now().isoformat()
}
# Cross-Institutional Knowledge Fusion
@mcp.resource("curriculum-standards://{country_code}")
def get_curriculum_standards(country_code: str) -> Dict[str, Any]:
"""Get curriculum standards for a specific country"""
standards = {
"us": {
"name": "Common Core State Standards",
"math_standards": {
"algebra_1": [
"CCSS.Math.Content.HSA.CED.A.1",
"CCSS.Math.Content.HSA.CED.A.2"
]
}
},
"uk": {
"name": "National Curriculum",
"math_standards": {
"algebra_1": [
"KS3.Algebra.1",
"KS3.Algebra.2"
]
}
}
}
return standards.get(country_code.lower(), {"error": "Country code not found"})
@mcp.tool()
def align_content_to_standard(content_id: str, standard_id: str) -> Dict[str, Any]:
"""
Align educational content to a specific curriculum standard
Args:
content_id: The ID of the content to align
standard_id: The curriculum standard ID
Returns:
Alignment information and recommendations
"""
return {
"content_id": content_id,
"standard_id": standard_id,
"alignment_score": 0.85,
"gaps": [
"Missing coverage of polynomial division",
"Should include more word problems"
],
"recommendations": [
"Add 2-3 examples of polynomial division",
"Convert 30% of problems to word problems"
]
}
# Automated Lesson Authoring
@mcp.tool()
def generate_lesson(topic: str, grade_level: int, duration_minutes: int = 45) -> Dict[str, Any]:
"""
Generate a complete lesson plan on a topic
Args:
topic: The lesson topic
grade_level: Target grade level (K-12)
duration_minutes: Lesson duration in minutes
Returns:
Complete lesson plan
"""
return {
"topic": topic,
"grade_level": grade_level,
"duration_minutes": duration_minutes,
"objectives": [
"Students will be able to solve linear equations in one variable",
"Students will be able to check their solutions"
],
"materials": [
"Whiteboard/projector",
"Handouts with practice problems",
"Graphing calculators (optional)"
],
"activities": [
{
"name": "Warm-up",
"duration_minutes": 5,
"description": "Review of pre-algebra concepts needed for today's lesson"
},
{
"name": "Direct Instruction",
"duration_minutes": 15,
"description": "Teacher demonstrates solving linear equations step by step"
},
{
"name": "Guided Practice",
"duration_minutes": 10,
"description": "Students solve problems with teacher guidance"
},
{
"name": "Independent Practice",
"duration_minutes": 10,
"description": "Students solve problems independently"
},
{
"name": "Closure",
"duration_minutes": 5,
"description": "Review key concepts and preview next lesson"
}
],
"assessment": {
"formative": "Teacher observation during guided and independent practice",
"summative": "Exit ticket with 3 problems to solve"
},
"differentiation": {
"struggling": "Provide equation-solving steps reference sheet",
"advanced": "Offer multi-step equations with fractions and decimals"
}
}
# ------------------ User Experience Features ------------------
@mcp.resource("student-dashboard://{student_id}")
def get_student_dashboard(student_id: str) -> Dict[str, Any]:
"""Get dashboard data for a specific student"""
return {
"student_id": student_id,
"knowledge_map": {
"mastery_percentage": 68,
"concepts_mastered": 42,
"concepts_in_progress": 15,
"concepts_not_started": 25
},
"recent_activity": [
{
"timestamp": "2025-06-06T15:30:00Z",
"activity_type": "quiz",
"description": "Algebra Quiz #3",
"performance": "85%"
},
{
"timestamp": "2025-06-05T13:45:00Z",
"activity_type": "lesson",
"description": "Quadratic Equations Introduction",
"duration_minutes": 32
}
],
"recommendations": [
"Complete Factor Polynomials practice set",
"Review Linear Systems interactive module"
]
}
@mcp.tool()
def get_accessibility_settings(student_id: str) -> Dict[str, Any]:
"""
Get accessibility settings for a student
Args:
student_id: The student's unique identifier
Returns:
Accessibility settings
"""
return {
"student_id": student_id,
"text_to_speech_enabled": True,
"font_size": "large",
"high_contrast_mode": False,
"screen_reader_compatible": True,
"keyboard_navigation_enabled": True
}
@mcp.tool()
def update_accessibility_settings(student_id: str, settings: Dict[str, Any]) -> Dict[str, Any]:
"""
Update accessibility settings for a student
Args:
student_id: The student's unique identifier
settings: Dictionary of settings to update
Returns:
Updated accessibility settings
"""
# In a real implementation, this would update a database
return {
"student_id": student_id,
"text_to_speech_enabled": settings.get("text_to_speech_enabled", True),
"font_size": settings.get("font_size", "large"),
"high_contrast_mode": settings.get("high_contrast_mode", False),
"screen_reader_compatible": settings.get("screen_reader_compatible", True),
"keyboard_navigation_enabled": settings.get("keyboard_navigation_enabled", True),
"updated_at": datetime.now().isoformat()
}
# ------------------ Multi-Modal Interaction ------------------
@mcp.tool()
def text_interaction(query: str, student_id: str, session_context: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
"""
Process a text query from the student
Args:
query: The text query from the student
student_id: The student's unique identifier
session_context: Optional context about the current session
Returns:
Processed response
"""
# Add student information to context
context = session_context or {}
context["student_id"] = student_id
return process_text_query(query, context)
@mcp.tool()
def voice_interaction(audio_data_base64: str, student_id: str) -> Dict[str, Any]:
"""
Process voice input from the student
Args:
audio_data_base64: Base64 encoded audio data
student_id: The student's unique identifier
Returns:
Transcription and response
"""
# Process voice input
result = process_voice_input(audio_data_base64)
# Process the transcription as a text query
text_response = process_text_query(result["transcription"], {"student_id": student_id})
# Generate speech response
speech_response = generate_speech_response(
text_response["response"],
{"voice_id": "educational_tutor"}
)
# Combine results
return {
"input_transcription": result["transcription"],
"input_confidence": result["confidence"],
"detected_emotions": result.get("detected_emotions", {}),
"text_response": text_response["response"],
"speech_response": speech_response,
"timestamp": datetime.now().isoformat()
}
@mcp.tool()
def handwriting_recognition(image_data_base64: str, student_id: str) -> Dict[str, Any]:
"""
Process handwritten input from the student
Args:
image_data_base64: Base64 encoded image data of handwriting
student_id: The student's unique identifier
Returns:
Transcription and analysis
"""
# Process handwriting input
result = process_handwriting(image_data_base64)
# If it's a math equation, solve it
if result["detected_content_type"] == "math_equation":
# In a real implementation, this would use a math engine to solve the equation
# For demonstration, we'll provide a simulated solution
if result["equation_type"] == "quadratic":
solution = {
"equation": result["transcription"],
"solution_steps": [
"x^2 + 5x + 6 = 0",
"Factor: (x + 2)(x + 3) = 0",
"x + 2 = 0 or x + 3 = 0",
"x = -2 or x = -3"
],
"solutions": [-2, -3]
}
else:
solution = {
"equation": result["transcription"],
"note": "Solution not implemented for this equation type"
}
else:
solution = None
return {
"transcription": result["transcription"],
"confidence": result["confidence"],
"detected_content_type": result["detected_content_type"],
"solution": solution,
"timestamp": datetime.now().isoformat()
}
# ------------------ Advanced Assessment Tools ------------------
@mcp.tool()
def create_assessment(concept_ids: List[str], num_questions: int, difficulty: int = 3) -> Dict[str, Any]:
"""
Create a complete assessment for given concepts
Args:
concept_ids: List of concept IDs to include
num_questions: Number of questions to generate
difficulty: Difficulty level (1-5)
Returns:
Complete assessment with questions
"""
questions = []
# Distribute questions evenly among concepts
questions_per_concept = num_questions // len(concept_ids)
extra_questions = num_questions % len(concept_ids)
for i, concept_id in enumerate(concept_ids):
# Determine how many questions for this concept
concept_questions = questions_per_concept
if i < extra_questions:
concept_questions += 1
# Generate questions for this concept
for _ in range(concept_questions):
questions.append(generate_question(concept_id, difficulty))
return {
"assessment_id": f"assessment_{datetime.now().strftime('%Y%m%d%H%M%S')}",
"concept_ids": concept_ids,
"difficulty": difficulty,
"num_questions": len(questions),
"questions": questions,
"created_at": datetime.now().isoformat()
}
@mcp.tool()
def grade_assessment(assessment_id: str, student_answers: Dict[str, str], questions: List[Dict[str, Any]]) -> Dict[str, Any]:
"""
Grade a completed assessment
Args:
assessment_id: The ID of the assessment
student_answers: Dictionary mapping question IDs to student answers
questions: List of question objects
Returns:
Grading results
"""
results = []
correct_count = 0
for question in questions:
question_id = question["id"]
if question_id in student_answers:
evaluation = evaluate_student_answer(question, student_answers[question_id])
results.append(evaluation)
if evaluation["is_correct"]:
correct_count += 1
# Calculate score
score = correct_count / len(questions) if questions else 0
# Analyze error patterns
error_types = {}
for result in results:
if result["error_type"]:
error_type = result["error_type"]
error_types[error_type] = error_types.get(error_type, 0) + 1
# Find most common error
most_common_error = None
if error_types:
most_common_error = max(error_types.items(), key=lambda x: x[1])
return {
"assessment_id": assessment_id,
"score": score,
"correct_count": correct_count,
"total_questions": len(questions),
"results": results,
"most_common_error": most_common_error,
"completed_at": datetime.now().isoformat()
}
@mcp.tool()
def get_student_analytics(student_id: str, timeframe_days: int = 30) -> Dict[str, Any]:
"""
Get comprehensive analytics for a student
Args:
student_id: The student's unique identifier
timeframe_days: Number of days to include in analysis
Returns:
Performance analytics
"""
return generate_performance_analytics(student_id, timeframe_days)
@mcp.tool()
def check_submission_originality(submission: str, reference_sources: List[str]) -> Dict[str, Any]:
"""
Check student submission for potential plagiarism
Args:
submission: The student's submission text
reference_sources: List of reference texts to check against
Returns:
Originality analysis
"""
return detect_plagiarism(submission, reference_sources)
if __name__ == "__main__":
mcp.run()