Meet Patel commited on
Commit
0fae407
·
1 Parent(s): 9a6c98c

Enhance quiz generation functionality by improving JSON response parsing in app.py and quiz_tools.py. Implement robust error handling for various response formats, including string and dictionary types. Introduce utility functions to clean and extract JSON from text, ensuring cleaner data handling and improved reliability in quiz data retrieval.

Browse files
Files changed (2) hide show
  1. app.py +17 -1
  2. mcp_server/tools/quiz_tools.py +17 -1
app.py CHANGED
@@ -420,7 +420,23 @@ with gr.Blocks(title="TutorX Educational AI", theme=gr.themes.Soft()) as demo:
420
  async with ClientSession(sse, write) as session:
421
  await session.initialize()
422
  response = await session.call_tool("generate_quiz_tool", {"concept": concept.strip(), "difficulty": difficulty_str})
423
- return response
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
424
  except Exception as e:
425
  import traceback
426
  return {
 
420
  async with ClientSession(sse, write) as session:
421
  await session.initialize()
422
  response = await session.call_tool("generate_quiz_tool", {"concept": concept.strip(), "difficulty": difficulty_str})
423
+ # --- PATCH: Parse quiz JSON for pretty display ---
424
+ if hasattr(response, 'content') and isinstance(response.content, list):
425
+ for item in response.content:
426
+ if hasattr(item, 'text') and item.text:
427
+ try:
428
+ quiz_data = json.loads(item.text)
429
+ return quiz_data
430
+ except Exception:
431
+ return {"raw": item.text}
432
+ if isinstance(response, dict):
433
+ return response
434
+ if isinstance(response, str):
435
+ try:
436
+ return json.loads(response)
437
+ except Exception:
438
+ return {"raw": response}
439
+ return {"raw": str(response)}
440
  except Exception as e:
441
  import traceback
442
  return {
mcp_server/tools/quiz_tools.py CHANGED
@@ -14,6 +14,22 @@ PROMPT_TEMPLATE = (Path(__file__).parent.parent / "prompts" / "quiz_generation.t
14
  # Initialize Gemini model
15
  MODEL = GeminiFlash()
16
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
17
  @mcp.tool()
18
  async def generate_quiz_tool(concept: str, difficulty: str = "medium") -> dict:
19
  """
@@ -32,7 +48,7 @@ async def generate_quiz_tool(concept: str, difficulty: str = "medium") -> dict:
32
  )
33
  llm_response = await MODEL.generate_text(prompt, temperature=0.7)
34
  try:
35
- quiz_data = json.loads(llm_response)
36
  except Exception:
37
  quiz_data = {"llm_raw": llm_response, "error": "Failed to parse LLM output as JSON"}
38
  return quiz_data
 
14
  # Initialize Gemini model
15
  MODEL = GeminiFlash()
16
 
17
+ def clean_json_trailing_commas(json_text: str) -> str:
18
+ import re
19
+ return re.sub(r',([ \t\r\n]*[}}\]])', r'\1', json_text)
20
+
21
+ def extract_json_from_text(text: str):
22
+ import re, json
23
+ if not text or not isinstance(text, str):
24
+ return None
25
+ # Remove code fences
26
+ text = re.sub(r'^\s*```(?:json)?\s*', '', text, flags=re.IGNORECASE)
27
+ text = re.sub(r'\s*```\s*$', '', text, flags=re.IGNORECASE)
28
+ text = text.strip()
29
+ # Remove trailing commas
30
+ cleaned = clean_json_trailing_commas(text)
31
+ return json.loads(cleaned)
32
+
33
  @mcp.tool()
34
  async def generate_quiz_tool(concept: str, difficulty: str = "medium") -> dict:
35
  """
 
48
  )
49
  llm_response = await MODEL.generate_text(prompt, temperature=0.7)
50
  try:
51
+ quiz_data = extract_json_from_text(llm_response)
52
  except Exception:
53
  quiz_data = {"llm_raw": llm_response, "error": "Failed to parse LLM output as JSON"}
54
  return quiz_data