Spaces:
Sleeping
Sleeping
File size: 2,316 Bytes
1af10cc a806ca2 1af10cc a806ca2 1af10cc a806ca2 1af10cc a806ca2 1af10cc a806ca2 1af10cc a806ca2 1af10cc a806ca2 1af10cc a806ca2 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 |
"""
Concept-related MCP tools for TutorX.
"""
import random
from typing import Dict, Any, Optional
from datetime import datetime, timezone
import sys
import os
from pathlib import Path
import json
# Add the parent directory to the Python path
current_dir = Path(__file__).parent
parent_dir = current_dir.parent.parent
sys.path.insert(0, str(parent_dir))
import sys
import os
from pathlib import Path
# Add the parent directory to the Python path
current_dir = Path(__file__).parent
parent_dir = current_dir.parent
sys.path.insert(0, str(parent_dir))
# Import from local resources
from resources.concept_graph import get_concept, get_all_concepts
# Import MCP
from mcp_server.mcp_instance import mcp
from mcp_server.model.gemini_flash import GeminiFlash
MODEL = GeminiFlash()
@mcp.tool()
async def get_concept_tool(concept_id: str = None) -> dict:
"""
Get a specific concept or all concepts from the knowledge graph, fully LLM-driven.
If a concept_id is provided, use Gemini to generate a JSON object with explanation, key points, and example.
"""
if not concept_id:
return {"error": "concept_id is required for LLM-driven mode"}
prompt = (
f"Explain the concept '{concept_id}' in detail. "
f"Return a JSON object with fields: explanation (string), key_points (list of strings), and example (string)."
)
llm_response = await MODEL.generate_text(prompt)
try:
data = json.loads(llm_response)
except Exception:
data = {"llm_raw": llm_response, "error": "Failed to parse LLM output as JSON"}
return data
@mcp.tool()
async def assess_skill_tool(student_id: str, concept_id: str) -> dict:
"""
Assess a student's understanding of a specific concept, fully LLM-driven.
Use Gemini to generate a JSON object with a score (0-1), feedback, and recommendations.
"""
prompt = (
f"A student (ID: {student_id}) is being assessed on the concept '{concept_id}'. "
f"Generate a JSON object with: score (float 0-1), feedback (string), and recommendations (list of strings)."
)
llm_response = await MODEL.generate_text(prompt)
try:
data = json.loads(llm_response)
except Exception:
data = {"llm_raw": llm_response, "error": "Failed to parse LLM output as JSON"}
return data
|