Spaces:
Sleeping
Sleeping
File size: 2,177 Bytes
1af10cc 0fae407 1af10cc a806ca2 1af10cc a806ca2 1af10cc a806ca2 1af10cc a806ca2 1af10cc 0fae407 a806ca2 1af10cc |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 |
"""
Quiz generation tools for TutorX MCP.
"""
import json
import os
from pathlib import Path
from typing import Dict, Any, List, Optional
from mcp_server.mcp_instance import mcp
from model import GeminiFlash
# Load prompt template
PROMPT_TEMPLATE = (Path(__file__).parent.parent / "prompts" / "quiz_generation.txt").read_text(encoding="utf-8")
# Initialize Gemini model
MODEL = GeminiFlash()
def clean_json_trailing_commas(json_text: str) -> str:
import re
return re.sub(r',([ \t\r\n]*[}}\]])', r'\1', json_text)
def extract_json_from_text(text: str):
import re, json
if not text or not isinstance(text, str):
return None
# Remove code fences
text = re.sub(r'^\s*```(?:json)?\s*', '', text, flags=re.IGNORECASE)
text = re.sub(r'\s*```\s*$', '', text, flags=re.IGNORECASE)
text = text.strip()
# Remove trailing commas
cleaned = clean_json_trailing_commas(text)
return json.loads(cleaned)
@mcp.tool()
async def generate_quiz_tool(concept: str, difficulty: str = "medium") -> dict:
"""
Generate a quiz based on a concept and difficulty using Gemini, fully LLM-driven.
The JSON should include a list of questions, each with options and the correct answer.
"""
try:
if not concept or not isinstance(concept, str):
return {"error": "concept must be a non-empty string"}
valid_difficulties = ["easy", "medium", "hard"]
if difficulty.lower() not in valid_difficulties:
return {"error": f"difficulty must be one of {valid_difficulties}"}
prompt = (
f"Generate a {difficulty} quiz on the concept '{concept}'. "
f"Return a JSON object with a 'questions' field: a list of questions, each with 'question', 'options' (list), and 'answer'."
)
llm_response = await MODEL.generate_text(prompt, temperature=0.7)
try:
quiz_data = extract_json_from_text(llm_response)
except Exception:
quiz_data = {"llm_raw": llm_response, "error": "Failed to parse LLM output as JSON"}
return quiz_data
except Exception as e:
return {"error": f"Error generating quiz: {str(e)}"}
|