Spaces:
Sleeping
Sleeping
Meet Patel
Refactor TutorX MCP server to integrate Mistral OCR for document processing, update concept graph tools for LLM-driven responses, and enhance learning path generation with Gemini. Transitioned various tools to utilize LLM for improved educational interactions and streamlined API responses.
a806ca2
""" | |
Concept-related MCP tools for TutorX. | |
""" | |
import random | |
from typing import Dict, Any, Optional | |
from datetime import datetime, timezone | |
import sys | |
import os | |
from pathlib import Path | |
import json | |
# Add the parent directory to the Python path | |
current_dir = Path(__file__).parent | |
parent_dir = current_dir.parent.parent | |
sys.path.insert(0, str(parent_dir)) | |
import sys | |
import os | |
from pathlib import Path | |
# Add the parent directory to the Python path | |
current_dir = Path(__file__).parent | |
parent_dir = current_dir.parent | |
sys.path.insert(0, str(parent_dir)) | |
# Import from local resources | |
from resources.concept_graph import get_concept, get_all_concepts | |
# Import MCP | |
from mcp_server.mcp_instance import mcp | |
from mcp_server.model.gemini_flash import GeminiFlash | |
MODEL = GeminiFlash() | |
async def get_concept_tool(concept_id: str = None) -> dict: | |
""" | |
Get a specific concept or all concepts from the knowledge graph, fully LLM-driven. | |
If a concept_id is provided, use Gemini to generate a JSON object with explanation, key points, and example. | |
""" | |
if not concept_id: | |
return {"error": "concept_id is required for LLM-driven mode"} | |
prompt = ( | |
f"Explain the concept '{concept_id}' in detail. " | |
f"Return a JSON object with fields: explanation (string), key_points (list of strings), and example (string)." | |
) | |
llm_response = await MODEL.generate_text(prompt) | |
try: | |
data = json.loads(llm_response) | |
except Exception: | |
data = {"llm_raw": llm_response, "error": "Failed to parse LLM output as JSON"} | |
return data | |
async def assess_skill_tool(student_id: str, concept_id: str) -> dict: | |
""" | |
Assess a student's understanding of a specific concept, fully LLM-driven. | |
Use Gemini to generate a JSON object with a score (0-1), feedback, and recommendations. | |
""" | |
prompt = ( | |
f"A student (ID: {student_id}) is being assessed on the concept '{concept_id}'. " | |
f"Generate a JSON object with: score (float 0-1), feedback (string), and recommendations (list of strings)." | |
) | |
llm_response = await MODEL.generate_text(prompt) | |
try: | |
data = json.loads(llm_response) | |
except Exception: | |
data = {"llm_raw": llm_response, "error": "Failed to parse LLM output as JSON"} | |
return data | |