Spaces:
Sleeping
Sleeping
Meet Patel
commited on
Commit
·
1af10cc
1
Parent(s):
15710ed
Refactor TutorX MCP server to remove legacy client and utility modules, update app.py for SSE integration, and enhance .gitignore to exclude .cursor directory. Clean up main.py for improved server configuration and streamline run script for better usability.
Browse files- .coverage +0 -0
- .gitignore +1 -0
- app.py +167 -181
- client.py +0 -390
- docs/sdk.md +293 -39
- main.py +22 -848
- mcp_server/__init__.py +11 -0
- mcp_server/mcp_instance.py +7 -0
- mcp_server/model/__init__.py +3 -0
- mcp_server/model/gemini_flash.py +147 -0
- mcp_server/prompts/quiz_generation.txt +25 -0
- mcp_server/resources/concept_graph.py +83 -0
- mcp_server/resources/curriculum_standards.py +106 -0
- mcp_server/server.py +186 -0
- mcp_server/setup.py +17 -0
- mcp_server/tools/__init__.py +38 -0
- mcp_server/tools/concept_graph_tools.py +46 -0
- mcp_server/tools/concept_tools.py +100 -0
- mcp_server/tools/interaction_tools.py +127 -0
- mcp_server/tools/learning_path_tools.py +162 -0
- mcp_server/tools/lesson_tools.py +123 -0
- mcp_server/tools/ocr_tools.py +166 -0
- mcp_server/tools/quiz_tools.py +62 -0
- pyproject.toml +7 -0
- run.py +124 -90
- utils/__init__.py +0 -3
- utils/assessment.py +0 -357
- utils/multimodal.py +0 -140
- uv.lock +35 -0
.coverage
DELETED
Binary file (53.2 kB)
|
|
.gitignore
CHANGED
@@ -5,6 +5,7 @@ build/
|
|
5 |
dist/
|
6 |
wheels/
|
7 |
*.egg-info
|
|
|
8 |
|
9 |
# Virtual environments
|
10 |
.venv
|
|
|
5 |
dist/
|
6 |
wheels/
|
7 |
*.egg-info
|
8 |
+
.cursor/
|
9 |
|
10 |
# Virtual environments
|
11 |
.venv
|
app.py
CHANGED
@@ -13,11 +13,12 @@ import aiohttp
|
|
13 |
import sseclient
|
14 |
import requests
|
15 |
|
16 |
-
# Import MCP client
|
17 |
-
from
|
|
|
18 |
|
19 |
# Server configuration
|
20 |
-
SERVER_URL = "http://localhost:
|
21 |
|
22 |
# Utility functions
|
23 |
|
@@ -26,146 +27,89 @@ async def load_concept_graph(concept_id: str = None):
|
|
26 |
"""
|
27 |
Load and visualize the concept graph for a given concept ID.
|
28 |
If no concept_id is provided, returns the first available concept.
|
|
|
29 |
|
30 |
Returns:
|
31 |
tuple: (figure, concept_details, related_concepts) or (None, error_dict, [])
|
32 |
"""
|
33 |
try:
|
34 |
print(f"[DEBUG] Loading concept graph for concept_id: {concept_id}")
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
node_colors = []
|
107 |
-
for node in G.nodes():
|
108 |
-
if G.nodes[node].get("type") == "concept":
|
109 |
-
node_colors.append("lightblue")
|
110 |
-
elif G.nodes[node].get("type") == "prerequisite":
|
111 |
-
node_colors.append("lightcoral")
|
112 |
-
else:
|
113 |
-
node_colors.append("lightgreen")
|
114 |
-
|
115 |
-
nx.draw_networkx_nodes(G, pos, node_size=2000, node_color=node_colors, alpha=0.8)
|
116 |
-
nx.draw_networkx_edges(G, pos, width=1.0, alpha=0.5)
|
117 |
-
|
118 |
-
# Add labels
|
119 |
-
labels = {node: G.nodes[node].get("label", node) for node in G.nodes()}
|
120 |
-
nx.draw_networkx_labels(G, pos, labels, font_size=10, font_weight="bold")
|
121 |
-
|
122 |
-
# Add edge labels
|
123 |
-
edge_labels = {(u, v): d["relationship"] for u, v, d in G.edges(data=True)}
|
124 |
-
nx.draw_networkx_edge_labels(G, pos, edge_labels=edge_labels, font_size=8)
|
125 |
-
|
126 |
-
plt.title(f"Concept Graph: {concept.get('name', concept_id)}")
|
127 |
-
plt.axis("off")
|
128 |
-
|
129 |
-
# Return the figure and concept details
|
130 |
-
concept_details = {
|
131 |
-
"id": concept.get("id", ""),
|
132 |
-
"name": concept.get("name", ""),
|
133 |
-
"description": concept.get("description", ""),
|
134 |
-
"related_concepts_count": len(concept.get("related", [])),
|
135 |
-
"prerequisites_count": len(concept.get("prerequisites", []))
|
136 |
-
}
|
137 |
-
|
138 |
-
return plt.gcf(), concept_details, related_concepts
|
139 |
-
|
140 |
except Exception as e:
|
141 |
import traceback
|
142 |
traceback.print_exc()
|
143 |
return None, {"error": f"Failed to load concept graph: {str(e)}"}, []
|
144 |
-
|
145 |
-
async def api_request(endpoint, method="GET", params=None, json_data=None):
|
146 |
-
"""Make an API request to the server"""
|
147 |
-
url = f"{SERVER_URL}/api/{endpoint}"
|
148 |
-
headers = {"Content-Type": "application/json"}
|
149 |
-
|
150 |
-
try:
|
151 |
-
async with aiohttp.ClientSession() as session:
|
152 |
-
if method.upper() == "GET":
|
153 |
-
async with session.get(url, params=params, headers=headers) as response:
|
154 |
-
if response.status == 200:
|
155 |
-
return await response.json()
|
156 |
-
else:
|
157 |
-
error = await response.text()
|
158 |
-
return {"error": f"API error: {response.status} - {error}"}
|
159 |
-
elif method.upper() == "POST":
|
160 |
-
async with session.post(url, json=json_data, headers=headers) as response:
|
161 |
-
if response.status == 200:
|
162 |
-
return await response.json()
|
163 |
-
else:
|
164 |
-
error = await response.text()
|
165 |
-
return {"error": f"API error: {response.status} - {error}"}
|
166 |
-
except Exception as e:
|
167 |
-
return {"error": f"Request failed: {str(e)}"}
|
168 |
-
|
169 |
# Create Gradio interface
|
170 |
with gr.Blocks(title="TutorX Educational AI", theme=gr.themes.Soft()) as demo:
|
171 |
gr.Markdown("# 📚 TutorX Educational AI Platform")
|
@@ -224,34 +168,57 @@ with gr.Blocks(title="TutorX Educational AI", theme=gr.themes.Soft()) as demo:
|
|
224 |
gr.Markdown("## Assessment Generation")
|
225 |
with gr.Row():
|
226 |
with gr.Column():
|
227 |
-
|
228 |
-
|
229 |
-
|
230 |
-
|
231 |
)
|
232 |
-
|
233 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
234 |
|
235 |
with gr.Column():
|
236 |
quiz_output = gr.JSON(label="Generated Quiz")
|
237 |
|
238 |
-
async def on_generate_quiz(
|
239 |
-
|
240 |
-
|
241 |
-
|
242 |
-
|
243 |
-
|
244 |
-
|
245 |
-
|
246 |
-
|
247 |
-
|
248 |
-
|
249 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
250 |
|
251 |
gen_quiz_btn.click(
|
252 |
fn=on_generate_quiz,
|
253 |
-
inputs=[
|
254 |
-
outputs=[quiz_output]
|
|
|
255 |
)
|
256 |
|
257 |
# Tab 2: Advanced Features
|
@@ -268,7 +235,11 @@ with gr.Blocks(title="TutorX Educational AI", theme=gr.themes.Soft()) as demo:
|
|
268 |
with gr.Column():
|
269 |
lesson_output = gr.JSON(label="Lesson Plan")
|
270 |
async def generate_lesson_async(topic, grade, duration):
|
271 |
-
|
|
|
|
|
|
|
|
|
272 |
|
273 |
gen_lesson_btn.click(
|
274 |
fn=generate_lesson_async,
|
@@ -294,31 +265,34 @@ with gr.Blocks(title="TutorX Educational AI", theme=gr.themes.Soft()) as demo:
|
|
294 |
try:
|
295 |
# Convert display text to lowercase for the API
|
296 |
country_code = country.lower()
|
297 |
-
|
298 |
-
|
299 |
-
|
300 |
-
|
301 |
-
formatted = {
|
302 |
-
"country": response["standards"]["name"],
|
303 |
-
"subjects": {},
|
304 |
-
"website": response["standards"].get("website", "")
|
305 |
-
}
|
306 |
-
|
307 |
-
# Format subjects and domains
|
308 |
-
for subj_key, subj_info in response["standards"]["subjects"].items():
|
309 |
-
formatted["subjects"][subj_key] = {
|
310 |
-
"description": subj_info["description"],
|
311 |
-
"domains": subj_info["domains"]
|
312 |
-
}
|
313 |
-
|
314 |
-
# Add grade levels or key stages if available
|
315 |
-
if "grade_levels" in response["standards"]:
|
316 |
-
formatted["grade_levels"] = response["standards"]["grade_levels"]
|
317 |
-
elif "key_stages" in response["standards"]:
|
318 |
-
formatted["key_stages"] = response["standards"]["key_stages"]
|
319 |
|
320 |
-
|
321 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
322 |
except Exception as e:
|
323 |
return {"error": f"Failed to fetch standards: {str(e)}"}
|
324 |
|
@@ -340,7 +314,11 @@ with gr.Blocks(title="TutorX Educational AI", theme=gr.themes.Soft()) as demo:
|
|
340 |
with gr.Column():
|
341 |
text_output = gr.JSON(label="Response")
|
342 |
async def text_interaction_async(text):
|
343 |
-
|
|
|
|
|
|
|
|
|
344 |
|
345 |
text_btn.click(
|
346 |
fn=text_interaction_async,
|
@@ -370,7 +348,11 @@ with gr.Blocks(title="TutorX Educational AI", theme=gr.themes.Soft()) as demo:
|
|
370 |
if not file_path or not os.path.exists(file_path):
|
371 |
return {"error": "File not found", "success": False}
|
372 |
|
373 |
-
|
|
|
|
|
|
|
|
|
374 |
except Exception as e:
|
375 |
return {"error": f"Error processing PDF: {str(e)}", "success": False}
|
376 |
|
@@ -402,7 +384,11 @@ with gr.Blocks(title="TutorX Educational AI", theme=gr.themes.Soft()) as demo:
|
|
402 |
plagiarism_output = gr.JSON(label="Originality Report")
|
403 |
|
404 |
async def check_plagiarism_async(submission, reference):
|
405 |
-
|
|
|
|
|
|
|
|
|
406 |
|
407 |
plagiarism_btn.click(
|
408 |
fn=check_plagiarism_async,
|
|
|
13 |
import sseclient
|
14 |
import requests
|
15 |
|
16 |
+
# Import MCP SSE client context managers
|
17 |
+
from mcp import ClientSession
|
18 |
+
from mcp.client.sse import sse_client
|
19 |
|
20 |
# Server configuration
|
21 |
+
SERVER_URL = "http://localhost:8000/sse" # Ensure this is the SSE endpoint
|
22 |
|
23 |
# Utility functions
|
24 |
|
|
|
27 |
"""
|
28 |
Load and visualize the concept graph for a given concept ID.
|
29 |
If no concept_id is provided, returns the first available concept.
|
30 |
+
Uses call_resource for concept graph retrieval (not a tool).
|
31 |
|
32 |
Returns:
|
33 |
tuple: (figure, concept_details, related_concepts) or (None, error_dict, [])
|
34 |
"""
|
35 |
try:
|
36 |
print(f"[DEBUG] Loading concept graph for concept_id: {concept_id}")
|
37 |
+
async with sse_client(SERVER_URL) as (sse, write):
|
38 |
+
async with ClientSession(sse, write) as session:
|
39 |
+
await session.initialize()
|
40 |
+
# Use MCP resource call for concept graph
|
41 |
+
result = await session.call_resource("resources/read", {"uri": f"concept-graph://{concept_id}" if concept_id else "concept-graph://"})
|
42 |
+
print(f"[DEBUG] Server response: {result}")
|
43 |
+
if not result or not isinstance(result, dict):
|
44 |
+
error_msg = "Invalid server response"
|
45 |
+
print(f"[ERROR] {error_msg}")
|
46 |
+
return None, {"error": error_msg}, []
|
47 |
+
if "error" in result:
|
48 |
+
print(f"[ERROR] Server returned error: {result['error']}")
|
49 |
+
return None, {"error": result["error"]}, []
|
50 |
+
if "concepts" in result and not concept_id:
|
51 |
+
if not result["concepts"]:
|
52 |
+
error_msg = "No concepts available"
|
53 |
+
print(f"[ERROR] {error_msg}")
|
54 |
+
return None, {"error": error_msg}, []
|
55 |
+
concept = result["concepts"][0]
|
56 |
+
print(f"[DEBUG] Using first concept from list: {concept.get('id')}")
|
57 |
+
else:
|
58 |
+
concept = result.get("concept", result)
|
59 |
+
print(f"[DEBUG] Using direct concept: {concept.get('id')}")
|
60 |
+
if not isinstance(concept, dict) or not concept.get('id'):
|
61 |
+
error_msg = "Invalid concept data structure"
|
62 |
+
print(f"[ERROR] {error_msg}: {concept}")
|
63 |
+
return None, {"error": error_msg}, []
|
64 |
+
import matplotlib.pyplot as plt
|
65 |
+
import networkx as nx
|
66 |
+
G = nx.DiGraph()
|
67 |
+
G.add_node(concept["id"], label=concept["name"], type="concept")
|
68 |
+
related_concepts = []
|
69 |
+
if "related" in concept:
|
70 |
+
for rel_id in concept["related"]:
|
71 |
+
rel_result = await session.call_tool("get_concept_graph", {"concept_id": rel_id})
|
72 |
+
if "error" not in rel_result:
|
73 |
+
G.add_node(rel_id, label=rel_result["name"], type="related")
|
74 |
+
G.add_edge(concept["id"], rel_id, relationship="related_to")
|
75 |
+
related_concepts.append([rel_id, rel_result.get("name", ""), rel_result.get("description", "")])
|
76 |
+
if "prerequisites" in concept:
|
77 |
+
for prereq_id in concept["prerequisites"]:
|
78 |
+
prereq_result = await session.call_tool("get_concept_graph", {"concept_id": prereq_id})
|
79 |
+
if "error" not in prereq_result:
|
80 |
+
G.add_node(prereq_id, label=prereq_result["name"], type="prerequisite")
|
81 |
+
G.add_edge(prereq_id, concept["id"], relationship="prerequisite_for")
|
82 |
+
plt.figure(figsize=(10, 8))
|
83 |
+
pos = nx.spring_layout(G)
|
84 |
+
node_colors = []
|
85 |
+
for node in G.nodes():
|
86 |
+
if G.nodes[node].get("type") == "concept":
|
87 |
+
node_colors.append("lightblue")
|
88 |
+
elif G.nodes[node].get("type") == "prerequisite":
|
89 |
+
node_colors.append("lightcoral")
|
90 |
+
else:
|
91 |
+
node_colors.append("lightgreen")
|
92 |
+
nx.draw_networkx_nodes(G, pos, node_size=2000, node_color=node_colors, alpha=0.8)
|
93 |
+
nx.draw_networkx_edges(G, pos, width=1.0, alpha=0.5)
|
94 |
+
labels = {node: G.nodes[node].get("label", node) for node in G.nodes()}
|
95 |
+
nx.draw_networkx_labels(G, pos, labels, font_size=10, font_weight="bold")
|
96 |
+
edge_labels = {(u, v): d["relationship"] for u, v, d in G.edges(data=True)}
|
97 |
+
nx.draw_networkx_edge_labels(G, pos, edge_labels=edge_labels, font_size=8)
|
98 |
+
plt.title(f"Concept Graph: {concept.get('name', concept_id)}")
|
99 |
+
plt.axis("off")
|
100 |
+
concept_details = {
|
101 |
+
"id": concept.get("id", ""),
|
102 |
+
"name": concept.get("name", ""),
|
103 |
+
"description": concept.get("description", ""),
|
104 |
+
"related_concepts_count": len(concept.get("related", [])),
|
105 |
+
"prerequisites_count": len(concept.get("prerequisites", []))
|
106 |
+
}
|
107 |
+
return plt.gcf(), concept_details, related_concepts
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
108 |
except Exception as e:
|
109 |
import traceback
|
110 |
traceback.print_exc()
|
111 |
return None, {"error": f"Failed to load concept graph: {str(e)}"}, []
|
112 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
113 |
# Create Gradio interface
|
114 |
with gr.Blocks(title="TutorX Educational AI", theme=gr.themes.Soft()) as demo:
|
115 |
gr.Markdown("# 📚 TutorX Educational AI Platform")
|
|
|
168 |
gr.Markdown("## Assessment Generation")
|
169 |
with gr.Row():
|
170 |
with gr.Column():
|
171 |
+
concept_input = gr.Textbox(
|
172 |
+
label="Enter Concept",
|
173 |
+
placeholder="e.g., Linear Equations, Photosynthesis, World War II",
|
174 |
+
lines=2
|
175 |
)
|
176 |
+
with gr.Row():
|
177 |
+
diff_input = gr.Slider(
|
178 |
+
minimum=1,
|
179 |
+
maximum=5,
|
180 |
+
value=2,
|
181 |
+
step=1,
|
182 |
+
label="Difficulty Level",
|
183 |
+
interactive=True
|
184 |
+
)
|
185 |
+
gen_quiz_btn = gr.Button("Generate Quiz", variant="primary")
|
186 |
|
187 |
with gr.Column():
|
188 |
quiz_output = gr.JSON(label="Generated Quiz")
|
189 |
|
190 |
+
async def on_generate_quiz(concept, difficulty):
|
191 |
+
try:
|
192 |
+
if not concept or not str(concept).strip():
|
193 |
+
return {"error": "Please enter a concept"}
|
194 |
+
try:
|
195 |
+
difficulty = int(float(difficulty))
|
196 |
+
difficulty = max(1, min(5, difficulty))
|
197 |
+
except (ValueError, TypeError):
|
198 |
+
difficulty = 3
|
199 |
+
# Map numeric difficulty to string
|
200 |
+
if difficulty <= 2:
|
201 |
+
difficulty_str = "easy"
|
202 |
+
elif difficulty == 3:
|
203 |
+
difficulty_str = "medium"
|
204 |
+
else:
|
205 |
+
difficulty_str = "hard"
|
206 |
+
async with sse_client(SERVER_URL) as (sse, write):
|
207 |
+
async with ClientSession(sse, write) as session:
|
208 |
+
await session.initialize()
|
209 |
+
response = await session.call_tool("generate_quiz_tool", {"concept": concept.strip(), "difficulty": difficulty_str})
|
210 |
+
return response
|
211 |
+
except Exception as e:
|
212 |
+
import traceback
|
213 |
+
return {
|
214 |
+
"error": f"Error generating quiz: {str(e)}\n\n{traceback.format_exc()}"
|
215 |
+
}
|
216 |
|
217 |
gen_quiz_btn.click(
|
218 |
fn=on_generate_quiz,
|
219 |
+
inputs=[concept_input, diff_input],
|
220 |
+
outputs=[quiz_output],
|
221 |
+
api_name="generate_quiz"
|
222 |
)
|
223 |
|
224 |
# Tab 2: Advanced Features
|
|
|
235 |
with gr.Column():
|
236 |
lesson_output = gr.JSON(label="Lesson Plan")
|
237 |
async def generate_lesson_async(topic, grade, duration):
|
238 |
+
async with sse_client(SERVER_URL) as (sse, write):
|
239 |
+
async with ClientSession(sse, write) as session:
|
240 |
+
await session.initialize()
|
241 |
+
response = await session.call_tool("generate_lesson_tool", {"topic": topic, "grade_level": grade, "duration_minutes": duration})
|
242 |
+
return response
|
243 |
|
244 |
gen_lesson_btn.click(
|
245 |
fn=generate_lesson_async,
|
|
|
265 |
try:
|
266 |
# Convert display text to lowercase for the API
|
267 |
country_code = country.lower()
|
268 |
+
async with sse_client(SERVER_URL) as (sse, write):
|
269 |
+
async with ClientSession(sse, write) as session:
|
270 |
+
await session.initialize()
|
271 |
+
response = await session.call_tool("get_curriculum_standards", {"country_code": country_code})
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
272 |
|
273 |
+
# Format the response for better display
|
274 |
+
if "standards" in response:
|
275 |
+
formatted = {
|
276 |
+
"country": response["standards"]["name"],
|
277 |
+
"subjects": {},
|
278 |
+
"website": response["standards"].get("website", "")
|
279 |
+
}
|
280 |
+
|
281 |
+
# Format subjects and domains
|
282 |
+
for subj_key, subj_info in response["standards"]["subjects"].items():
|
283 |
+
formatted["subjects"][subj_key] = {
|
284 |
+
"description": subj_info["description"],
|
285 |
+
"domains": subj_info["domains"]
|
286 |
+
}
|
287 |
+
|
288 |
+
# Add grade levels or key stages if available
|
289 |
+
if "grade_levels" in response["standards"]:
|
290 |
+
formatted["grade_levels"] = response["standards"]["grade_levels"]
|
291 |
+
elif "key_stages" in response["standards"]:
|
292 |
+
formatted["key_stages"] = response["standards"]["key_stages"]
|
293 |
+
|
294 |
+
return formatted
|
295 |
+
return response
|
296 |
except Exception as e:
|
297 |
return {"error": f"Failed to fetch standards: {str(e)}"}
|
298 |
|
|
|
314 |
with gr.Column():
|
315 |
text_output = gr.JSON(label="Response")
|
316 |
async def text_interaction_async(text):
|
317 |
+
async with sse_client(SERVER_URL) as (sse, write):
|
318 |
+
async with ClientSession(sse, write) as session:
|
319 |
+
await session.initialize()
|
320 |
+
response = await session.call_tool("text_interaction", {"query": text, "student_id": student_id})
|
321 |
+
return response
|
322 |
|
323 |
text_btn.click(
|
324 |
fn=text_interaction_async,
|
|
|
348 |
if not file_path or not os.path.exists(file_path):
|
349 |
return {"error": "File not found", "success": False}
|
350 |
|
351 |
+
async with sse_client(SERVER_URL) as (sse, write):
|
352 |
+
async with ClientSession(sse, write) as session:
|
353 |
+
await session.initialize()
|
354 |
+
response = await session.call_tool("pdf_ocr", {"pdf_file": file_path})
|
355 |
+
return response
|
356 |
except Exception as e:
|
357 |
return {"error": f"Error processing PDF: {str(e)}", "success": False}
|
358 |
|
|
|
384 |
plagiarism_output = gr.JSON(label="Originality Report")
|
385 |
|
386 |
async def check_plagiarism_async(submission, reference):
|
387 |
+
async with sse_client(SERVER_URL) as (sse, write):
|
388 |
+
async with ClientSession(sse, write) as session:
|
389 |
+
await session.initialize()
|
390 |
+
response = await session.call_tool("check_submission_originality", {"submission": submission, "reference_sources": reference})
|
391 |
+
return response
|
392 |
|
393 |
plagiarism_btn.click(
|
394 |
fn=check_plagiarism_async,
|
client.py
DELETED
@@ -1,390 +0,0 @@
|
|
1 |
-
"""
|
2 |
-
MCP client for interacting with the TutorX MCP server.
|
3 |
-
This module provides functions that interact with the MCP server
|
4 |
-
for use by the Gradio interface.
|
5 |
-
"""
|
6 |
-
|
7 |
-
import json
|
8 |
-
import aiohttp
|
9 |
-
import asyncio
|
10 |
-
from typing import Dict, Any, List, Optional
|
11 |
-
import base64
|
12 |
-
from datetime import datetime
|
13 |
-
import os
|
14 |
-
|
15 |
-
# Get server configuration from environment variables with defaults
|
16 |
-
DEFAULT_HOST = os.getenv("MCP_HOST", "127.0.0.1")
|
17 |
-
DEFAULT_PORT = int(os.getenv("MCP_PORT", "8001")) # Default port updated to 8001
|
18 |
-
DEFAULT_SERVER_URL = f"http://{DEFAULT_HOST}:{DEFAULT_PORT}"
|
19 |
-
|
20 |
-
# API endpoints
|
21 |
-
API_PREFIX = "/api"
|
22 |
-
|
23 |
-
class TutorXClient:
|
24 |
-
"""Client for interacting with the TutorX MCP server"""
|
25 |
-
|
26 |
-
def __init__(self, server_url=DEFAULT_SERVER_URL):
|
27 |
-
self.server_url = server_url
|
28 |
-
self.session = None
|
29 |
-
|
30 |
-
async def _ensure_session(self):
|
31 |
-
"""Ensure aiohttp session exists"""
|
32 |
-
if self.session is None:
|
33 |
-
self.session = aiohttp.ClientSession(
|
34 |
-
headers={
|
35 |
-
"Content-Type": "application/json",
|
36 |
-
"Accept": "application/json"
|
37 |
-
}
|
38 |
-
)
|
39 |
-
|
40 |
-
async def _call_tool(self, tool_name: str, params: Dict[str, Any], method: str = "POST") -> Dict[str, Any]:
|
41 |
-
"""
|
42 |
-
Call an MCP tool on the server
|
43 |
-
|
44 |
-
Args:
|
45 |
-
tool_name: Name of the tool to call
|
46 |
-
params: Parameters to pass to the tool
|
47 |
-
method: HTTP method to use (GET or POST)
|
48 |
-
|
49 |
-
Returns:
|
50 |
-
Tool response
|
51 |
-
"""
|
52 |
-
await self._ensure_session()
|
53 |
-
try:
|
54 |
-
url = f"{self.server_url}{API_PREFIX}/{tool_name}"
|
55 |
-
|
56 |
-
# Convert params to query string for GET requests
|
57 |
-
if method.upper() == "GET":
|
58 |
-
from urllib.parse import urlencode
|
59 |
-
if params:
|
60 |
-
query_string = urlencode(params, doseq=True)
|
61 |
-
url = f"{url}?{query_string}"
|
62 |
-
async with self.session.get(url, timeout=30) as response:
|
63 |
-
return await self._handle_response(response)
|
64 |
-
else:
|
65 |
-
async with self.session.post(url, json=params, timeout=30) as response:
|
66 |
-
return await self._handle_response(response)
|
67 |
-
|
68 |
-
except Exception as e:
|
69 |
-
return {
|
70 |
-
"error": f"Failed to call tool: {str(e)}",
|
71 |
-
"timestamp": datetime.now().isoformat()
|
72 |
-
}
|
73 |
-
|
74 |
-
async def _handle_response(self, response) -> Dict[str, Any]:
|
75 |
-
"""Handle the HTTP response"""
|
76 |
-
if response.status == 200:
|
77 |
-
return await response.json()
|
78 |
-
else:
|
79 |
-
error = await response.text()
|
80 |
-
return {
|
81 |
-
"error": f"API error ({response.status}): {error}",
|
82 |
-
"timestamp": datetime.now().isoformat()
|
83 |
-
}
|
84 |
-
|
85 |
-
async def _get_resource(self, resource_uri: str) -> Dict[str, Any]:
|
86 |
-
"""
|
87 |
-
Get an MCP resource from the server
|
88 |
-
|
89 |
-
Args:
|
90 |
-
resource_uri: URI of the resource to get
|
91 |
-
|
92 |
-
Returns:
|
93 |
-
Resource data
|
94 |
-
"""
|
95 |
-
await self._ensure_session()
|
96 |
-
try:
|
97 |
-
# Extract the resource name from the URI (e.g., 'concept-graph' from 'concept-graph://')
|
98 |
-
resource_name = resource_uri.split('://')[0]
|
99 |
-
url = f"{self.server_url}{API_PREFIX}/{resource_name}"
|
100 |
-
|
101 |
-
async with self.session.get(url, timeout=30) as response:
|
102 |
-
if response.status == 200:
|
103 |
-
return await response.json()
|
104 |
-
else:
|
105 |
-
error = await response.text()
|
106 |
-
return {
|
107 |
-
"error": f"Failed to get resource: {error}",
|
108 |
-
"timestamp": datetime.now().isoformat()
|
109 |
-
}
|
110 |
-
except Exception as e:
|
111 |
-
return {
|
112 |
-
"error": f"Failed to get resource: {str(e)}",
|
113 |
-
"timestamp": datetime.now().isoformat()
|
114 |
-
}
|
115 |
-
|
116 |
-
async def check_server_connection(self) -> bool:
|
117 |
-
"""
|
118 |
-
Check if the server is accessible
|
119 |
-
|
120 |
-
Returns:
|
121 |
-
bool: True if server is accessible, False otherwise
|
122 |
-
"""
|
123 |
-
await self._ensure_session()
|
124 |
-
try:
|
125 |
-
async with self.session.get(
|
126 |
-
f"{self.server_url}{API_PREFIX}/health",
|
127 |
-
timeout=5
|
128 |
-
) as response:
|
129 |
-
return response.status == 200
|
130 |
-
except Exception as e:
|
131 |
-
print(f"Server connection check failed: {str(e)}")
|
132 |
-
return False
|
133 |
-
|
134 |
-
# ------------ Core Features ------------
|
135 |
-
|
136 |
-
async def get_concept_graph(self, concept_id: str = None, use_mcp: bool = False) -> Dict[str, Any]:
|
137 |
-
"""
|
138 |
-
Get the concept graph for a specific concept or all concepts.
|
139 |
-
|
140 |
-
Args:
|
141 |
-
concept_id: Optional ID of the concept to fetch. If None, returns all concepts.
|
142 |
-
use_mcp: If True, uses the MCP tool interface instead of direct API call.
|
143 |
-
|
144 |
-
Returns:
|
145 |
-
Dict containing concept data or error information.
|
146 |
-
"""
|
147 |
-
try:
|
148 |
-
# Ensure we have a session
|
149 |
-
await self._ensure_session()
|
150 |
-
|
151 |
-
if use_mcp:
|
152 |
-
# Use MCP tool interface
|
153 |
-
print(f"[CLIENT] Using MCP tool to get concept graph for: {concept_id}")
|
154 |
-
return await self._call_tool("get_concept_graph", {"concept_id": concept_id} if concept_id else {})
|
155 |
-
|
156 |
-
# Use direct API call (default)
|
157 |
-
url = f"{self.server_url}/api/concept_graph"
|
158 |
-
params = {}
|
159 |
-
if concept_id:
|
160 |
-
params["concept_id"] = concept_id
|
161 |
-
|
162 |
-
print(f"[CLIENT] Fetching concept graph from {url} with params: {params}")
|
163 |
-
|
164 |
-
async with self.session.get(
|
165 |
-
url,
|
166 |
-
params=params,
|
167 |
-
timeout=30
|
168 |
-
) as response:
|
169 |
-
print(f"[CLIENT] Response status: {response.status}")
|
170 |
-
|
171 |
-
if response.status == 404:
|
172 |
-
error_msg = f"Concept {concept_id} not found"
|
173 |
-
print(f"[CLIENT] {error_msg}")
|
174 |
-
return {"error": error_msg}
|
175 |
-
|
176 |
-
response.raise_for_status()
|
177 |
-
|
178 |
-
# Parse the JSON response
|
179 |
-
result = await response.json()
|
180 |
-
print(f"[CLIENT] Received response: {result}")
|
181 |
-
|
182 |
-
return result
|
183 |
-
|
184 |
-
except asyncio.TimeoutError:
|
185 |
-
error_msg = "Request timed out"
|
186 |
-
print(f"[CLIENT] {error_msg}")
|
187 |
-
return {"error": error_msg}
|
188 |
-
|
189 |
-
except aiohttp.ClientError as e:
|
190 |
-
error_msg = f"HTTP client error: {str(e)}"
|
191 |
-
print(f"[CLIENT] {error_msg}")
|
192 |
-
return {"error": error_msg}
|
193 |
-
|
194 |
-
except Exception as e:
|
195 |
-
error_msg = f"Unexpected error: {str(e)}"
|
196 |
-
print(f"[CLIENT] {error_msg}")
|
197 |
-
import traceback
|
198 |
-
traceback.print_exc()
|
199 |
-
return {"error": error_msg}
|
200 |
-
|
201 |
-
async def assess_skill(self, student_id: str, concept_id: str) -> Dict[str, Any]:
|
202 |
-
"""Assess a student's skill on a specific concept"""
|
203 |
-
return await self._call_tool("assess_skill", {"student_id": student_id, "concept_id": concept_id})
|
204 |
-
|
205 |
-
async def get_learning_path(self, student_id: str) -> Dict[str, Any]:
|
206 |
-
"""Get personalized learning path for a student"""
|
207 |
-
return await self._get_resource(f"learning-path://{student_id}")
|
208 |
-
|
209 |
-
async def generate_quiz(self, concept_ids: List[str], difficulty: int = 2) -> Dict[str, Any]:
|
210 |
-
"""Generate a quiz based on specified concepts and difficulty"""
|
211 |
-
return await self._call_tool("generate_quiz", {
|
212 |
-
"concept_ids": concept_ids,
|
213 |
-
"difficulty": difficulty
|
214 |
-
})
|
215 |
-
|
216 |
-
|
217 |
-
|
218 |
-
# ------------ Advanced Features ------------
|
219 |
-
|
220 |
-
async def analyze_cognitive_state(self, eeg_data: Dict[str, Any]) -> Dict[str, Any]:
|
221 |
-
"""Analyze EEG data to determine cognitive state"""
|
222 |
-
return await self._call_tool("analyze_cognitive_state", {
|
223 |
-
"eeg_data": eeg_data
|
224 |
-
})
|
225 |
-
|
226 |
-
async def get_curriculum_standards(self, country_code: str) -> Dict[str, Any]:
|
227 |
-
"""Get curriculum standards for a specific country"""
|
228 |
-
return await self._get_resource(f"curriculum-standards://{country_code}")
|
229 |
-
|
230 |
-
async def align_content_to_standard(self, content_id: str, standard_id: str) -> Dict[str, Any]:
|
231 |
-
"""Align educational content to a specific curriculum standard"""
|
232 |
-
return await self._call_tool("align_content_to_standard", {
|
233 |
-
"content_id": content_id,
|
234 |
-
"standard_id": standard_id
|
235 |
-
})
|
236 |
-
|
237 |
-
async def generate_lesson(self, topic: str, grade_level: int, duration_minutes: int = 45) -> Dict[str, Any]:
|
238 |
-
"""Generate a complete lesson plan on a topic"""
|
239 |
-
return await self._call_tool("generate_lesson", {
|
240 |
-
"topic": topic,
|
241 |
-
"grade_level": grade_level,
|
242 |
-
"duration_minutes": duration_minutes
|
243 |
-
})
|
244 |
-
|
245 |
-
# ------------ User Experience ------------
|
246 |
-
|
247 |
-
async def get_student_dashboard(self, student_id: str) -> Dict[str, Any]:
|
248 |
-
"""Get dashboard data for a specific student"""
|
249 |
-
return await self._get_resource(f"student-dashboard://{student_id}")
|
250 |
-
|
251 |
-
async def get_accessibility_settings(self, student_id: str) -> Dict[str, Any]:
|
252 |
-
"""Get accessibility settings for a student"""
|
253 |
-
return await self._call_tool("get_accessibility_settings", {
|
254 |
-
"student_id": student_id
|
255 |
-
})
|
256 |
-
|
257 |
-
async def update_accessibility_settings(self, student_id: str, settings: Dict[str, Any]) -> Dict[str, Any]:
|
258 |
-
"""Update accessibility settings for a student"""
|
259 |
-
return await self._call_tool("update_accessibility_settings", {
|
260 |
-
"student_id": student_id,
|
261 |
-
"settings": settings
|
262 |
-
})
|
263 |
-
|
264 |
-
# ------------ Multi-Modal Interaction ------------
|
265 |
-
|
266 |
-
async def text_interaction(self, query: str, student_id: str) -> Dict[str, Any]:
|
267 |
-
"""Process a text query from the student"""
|
268 |
-
return await self._call_tool("text_interaction", {
|
269 |
-
"query": query,
|
270 |
-
"student_id": student_id
|
271 |
-
})
|
272 |
-
|
273 |
-
async def voice_interaction(self, audio_data_base64: str, student_id: str) -> Dict[str, Any]:
|
274 |
-
"""Process voice input from the student"""
|
275 |
-
return await self._call_tool("voice_interaction", {
|
276 |
-
"audio_data_base64": audio_data_base64,
|
277 |
-
"student_id": student_id
|
278 |
-
})
|
279 |
-
|
280 |
-
|
281 |
-
|
282 |
-
# ------------ Assessment ------------
|
283 |
-
|
284 |
-
async def create_assessment(self, concept_ids: List[str], num_questions: int, difficulty: int = 3) -> Dict[str, Any]:
|
285 |
-
"""Create a complete assessment for given concepts"""
|
286 |
-
return await self._call_tool("create_assessment", {
|
287 |
-
"concept_ids": concept_ids,
|
288 |
-
"num_questions": num_questions,
|
289 |
-
"difficulty": difficulty
|
290 |
-
})
|
291 |
-
|
292 |
-
async def grade_assessment(self, assessment_id: str, student_answers: Dict[str, str], questions: List[Dict[str, Any]]) -> Dict[str, Any]:
|
293 |
-
"""Grade a completed assessment"""
|
294 |
-
return await self._call_tool("grade_assessment", {
|
295 |
-
"assessment_id": assessment_id,
|
296 |
-
"student_answers": student_answers,
|
297 |
-
"questions": questions
|
298 |
-
})
|
299 |
-
|
300 |
-
async def get_student_analytics(self, student_id: str, timeframe_days: int = 30) -> Dict[str, Any]:
|
301 |
-
"""Get comprehensive analytics for a student"""
|
302 |
-
return await self._call_tool("get_student_analytics", {
|
303 |
-
"student_id": student_id,
|
304 |
-
"timeframe_days": timeframe_days
|
305 |
-
})
|
306 |
-
|
307 |
-
async def check_submission_originality(self, submission: str, reference_sources: List[str]) -> Dict[str, Any]:
|
308 |
-
"""Check student submission for potential plagiarism"""
|
309 |
-
return await self._call_tool("check_submission_originality", {
|
310 |
-
"submission": submission,
|
311 |
-
"reference_sources": reference_sources
|
312 |
-
})
|
313 |
-
|
314 |
-
async def pdf_ocr(self, pdf_file: str) -> Dict[str, Any]:
|
315 |
-
"""
|
316 |
-
Extract text from a PDF file using OCR
|
317 |
-
|
318 |
-
Args:
|
319 |
-
pdf_file: Path to the PDF file
|
320 |
-
|
321 |
-
Returns:
|
322 |
-
Dictionary containing extracted text and metadata
|
323 |
-
"""
|
324 |
-
try:
|
325 |
-
# Read the PDF file as binary data
|
326 |
-
with open(pdf_file, "rb") as f:
|
327 |
-
pdf_data = f.read()
|
328 |
-
|
329 |
-
# Convert to base64 for transmission
|
330 |
-
pdf_base64 = base64.b64encode(pdf_data).decode('utf-8')
|
331 |
-
|
332 |
-
# Call the server's PDF OCR endpoint
|
333 |
-
return await self._call_tool("pdf_ocr", {
|
334 |
-
"pdf_data": pdf_base64,
|
335 |
-
"filename": os.path.basename(pdf_file)
|
336 |
-
})
|
337 |
-
|
338 |
-
except Exception as e:
|
339 |
-
return {
|
340 |
-
"error": f"Failed to process PDF: {str(e)}",
|
341 |
-
"success": False,
|
342 |
-
"timestamp": datetime.now().isoformat()
|
343 |
-
}
|
344 |
-
|
345 |
-
|
346 |
-
async def get_curriculum_standards(self, country_code: str = "us") -> Dict[str, Any]:
|
347 |
-
"""
|
348 |
-
Get curriculum standards for a specific country
|
349 |
-
|
350 |
-
Args:
|
351 |
-
country_code: ISO country code (e.g., 'us', 'uk')
|
352 |
-
|
353 |
-
Returns:
|
354 |
-
Dictionary containing curriculum standards
|
355 |
-
"""
|
356 |
-
return await self._call_tool(
|
357 |
-
"curriculum-standards", # Note the endpoint name matches the API route
|
358 |
-
{"country": country_code.lower()}, # Note the parameter name matches the API
|
359 |
-
method="GET" # Use GET for this endpoint
|
360 |
-
)
|
361 |
-
|
362 |
-
async def close(self):
|
363 |
-
"""Close the aiohttp session"""
|
364 |
-
if self.session:
|
365 |
-
await self.session.close()
|
366 |
-
self.session = None
|
367 |
-
|
368 |
-
async def generate_lesson(self, topic: str, grade_level: int, duration_minutes: int) -> Dict[str, Any]:
|
369 |
-
"""
|
370 |
-
Generate a lesson plan for the given topic, grade level, and duration
|
371 |
-
|
372 |
-
Args:
|
373 |
-
topic: The topic for the lesson
|
374 |
-
grade_level: The grade level (1-12)
|
375 |
-
duration_minutes: Duration of the lesson in minutes
|
376 |
-
|
377 |
-
Returns:
|
378 |
-
Dictionary containing the generated lesson plan
|
379 |
-
"""
|
380 |
-
return await self._call_tool(
|
381 |
-
"generate_lesson",
|
382 |
-
{
|
383 |
-
"topic": topic,
|
384 |
-
"grade_level": grade_level,
|
385 |
-
"duration_minutes": duration_minutes
|
386 |
-
}
|
387 |
-
)
|
388 |
-
|
389 |
-
# Create a default client instance for easy import
|
390 |
-
client = TutorXClient()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
docs/sdk.md
CHANGED
@@ -16,33 +16,36 @@
|
|
16 |
<!-- omit in toc -->
|
17 |
## Table of Contents
|
18 |
|
19 |
-
- [
|
20 |
-
- [
|
21 |
-
- [
|
22 |
-
- [
|
23 |
-
- [
|
24 |
-
- [
|
25 |
-
- [
|
26 |
-
- [
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
- [
|
31 |
-
|
32 |
-
|
33 |
-
- [
|
34 |
-
|
35 |
-
- [
|
36 |
-
|
37 |
-
|
38 |
-
- [
|
39 |
-
|
40 |
-
|
41 |
-
- [
|
42 |
-
|
43 |
-
- [
|
44 |
-
- [
|
45 |
-
- [
|
|
|
|
|
|
|
46 |
|
47 |
[pypi-badge]: https://img.shields.io/pypi/v/mcp.svg
|
48 |
[pypi-url]: https://pypi.org/project/mcp/
|
@@ -63,22 +66,31 @@ The Model Context Protocol allows applications to provide context for LLMs in a
|
|
63 |
|
64 |
- Build MCP clients that can connect to any MCP server
|
65 |
- Create MCP servers that expose resources, prompts and tools
|
66 |
-
- Use standard transports like stdio and
|
67 |
- Handle all MCP protocol messages and lifecycle events
|
68 |
|
69 |
## Installation
|
70 |
|
71 |
### Adding MCP to your python project
|
72 |
|
73 |
-
We recommend using [uv](https://docs.astral.sh/uv/) to manage your Python projects.
|
74 |
|
75 |
-
|
76 |
-
|
77 |
-
```
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
78 |
|
79 |
Alternatively, for projects using pip for dependencies:
|
80 |
```bash
|
81 |
-
pip install mcp
|
82 |
```
|
83 |
|
84 |
### Running the standalone MCP development tools
|
@@ -143,12 +155,12 @@ The FastMCP server is your core interface to the MCP protocol. It handles connec
|
|
143 |
```python
|
144 |
# Add lifespan support for startup/shutdown with strong typing
|
145 |
from contextlib import asynccontextmanager
|
|
|
146 |
from dataclasses import dataclass
|
147 |
-
from typing import AsyncIterator
|
148 |
|
149 |
from fake_database import Database # Replace with your actual DB type
|
150 |
|
151 |
-
from mcp.server.fastmcp import
|
152 |
|
153 |
# Create a named server
|
154 |
mcp = FastMCP("My App")
|
@@ -180,8 +192,9 @@ mcp = FastMCP("My App", lifespan=app_lifespan)
|
|
180 |
|
181 |
# Access type-safe lifespan context in tools
|
182 |
@mcp.tool()
|
183 |
-
def query_db(
|
184 |
"""Tool that uses initialized resources"""
|
|
|
185 |
db = ctx.request_context.lifespan_context["db"]
|
186 |
return db.query()
|
187 |
```
|
@@ -297,6 +310,48 @@ async def long_task(files: list[str], ctx: Context) -> str:
|
|
297 |
return "Processing complete"
|
298 |
```
|
299 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
300 |
## Running Your Server
|
301 |
|
302 |
### Development Mode
|
@@ -348,13 +403,97 @@ python server.py
|
|
348 |
mcp run server.py
|
349 |
```
|
350 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
351 |
### Mounting to an Existing ASGI Server
|
352 |
|
|
|
|
|
|
|
|
|
353 |
You can mount the SSE server to an existing ASGI server using the `sse_app` method. This allows you to integrate the SSE server with other ASGI applications.
|
354 |
|
355 |
```python
|
356 |
from starlette.applications import Starlette
|
357 |
-
from starlette.
|
358 |
from mcp.server.fastmcp import FastMCP
|
359 |
|
360 |
|
@@ -371,6 +510,43 @@ app = Starlette(
|
|
371 |
app.router.routes.append(Host('mcp.acme.corp', app=mcp.sse_app()))
|
372 |
```
|
373 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
374 |
For more information on mounting applications in Starlette, see the [Starlette documentation](https://www.starlette.io/routing/#submounting-routes).
|
375 |
|
376 |
## Examples
|
@@ -442,7 +618,7 @@ For more control, you can use the low-level server implementation directly. This
|
|
442 |
|
443 |
```python
|
444 |
from contextlib import asynccontextmanager
|
445 |
-
from
|
446 |
|
447 |
from fake_database import Database # Replace with your actual DB type
|
448 |
|
@@ -543,9 +719,11 @@ if __name__ == "__main__":
|
|
543 |
asyncio.run(run())
|
544 |
```
|
545 |
|
|
|
|
|
546 |
### Writing MCP Clients
|
547 |
|
548 |
-
The SDK provides a high-level client interface for connecting to MCP servers:
|
549 |
|
550 |
```python
|
551 |
from mcp import ClientSession, StdioServerParameters, types
|
@@ -609,6 +787,82 @@ if __name__ == "__main__":
|
|
609 |
asyncio.run(run())
|
610 |
```
|
611 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
612 |
### MCP Primitives
|
613 |
|
614 |
The MCP protocol defines three core primitives that servers can implement:
|
|
|
16 |
<!-- omit in toc -->
|
17 |
## Table of Contents
|
18 |
|
19 |
+
- [MCP Python SDK](#mcp-python-sdk)
|
20 |
+
- [Overview](#overview)
|
21 |
+
- [Installation](#installation)
|
22 |
+
- [Adding MCP to your python project](#adding-mcp-to-your-python-project)
|
23 |
+
- [Running the standalone MCP development tools](#running-the-standalone-mcp-development-tools)
|
24 |
+
- [Quickstart](#quickstart)
|
25 |
+
- [What is MCP?](#what-is-mcp)
|
26 |
+
- [Core Concepts](#core-concepts)
|
27 |
+
- [Server](#server)
|
28 |
+
- [Resources](#resources)
|
29 |
+
- [Tools](#tools)
|
30 |
+
- [Prompts](#prompts)
|
31 |
+
- [Images](#images)
|
32 |
+
- [Context](#context)
|
33 |
+
- [Running Your Server](#running-your-server)
|
34 |
+
- [Development Mode](#development-mode)
|
35 |
+
- [Claude Desktop Integration](#claude-desktop-integration)
|
36 |
+
- [Direct Execution](#direct-execution)
|
37 |
+
- [Mounting to an Existing ASGI Server](#mounting-to-an-existing-asgi-server)
|
38 |
+
- [Examples](#examples)
|
39 |
+
- [Echo Server](#echo-server)
|
40 |
+
- [SQLite Explorer](#sqlite-explorer)
|
41 |
+
- [Advanced Usage](#advanced-usage)
|
42 |
+
- [Low-Level Server](#low-level-server)
|
43 |
+
- [Writing MCP Clients](#writing-mcp-clients)
|
44 |
+
- [MCP Primitives](#mcp-primitives)
|
45 |
+
- [Server Capabilities](#server-capabilities)
|
46 |
+
- [Documentation](#documentation)
|
47 |
+
- [Contributing](#contributing)
|
48 |
+
- [License](#license)
|
49 |
|
50 |
[pypi-badge]: https://img.shields.io/pypi/v/mcp.svg
|
51 |
[pypi-url]: https://pypi.org/project/mcp/
|
|
|
66 |
|
67 |
- Build MCP clients that can connect to any MCP server
|
68 |
- Create MCP servers that expose resources, prompts and tools
|
69 |
+
- Use standard transports like stdio, SSE, and Streamable HTTP
|
70 |
- Handle all MCP protocol messages and lifecycle events
|
71 |
|
72 |
## Installation
|
73 |
|
74 |
### Adding MCP to your python project
|
75 |
|
76 |
+
We recommend using [uv](https://docs.astral.sh/uv/) to manage your Python projects.
|
77 |
|
78 |
+
If you haven't created a uv-managed project yet, create one:
|
79 |
+
|
80 |
+
```bash
|
81 |
+
uv init mcp-server-demo
|
82 |
+
cd mcp-server-demo
|
83 |
+
```
|
84 |
+
|
85 |
+
Then add MCP to your project dependencies:
|
86 |
+
|
87 |
+
```bash
|
88 |
+
uv add "mcp[cli]"
|
89 |
+
```
|
90 |
|
91 |
Alternatively, for projects using pip for dependencies:
|
92 |
```bash
|
93 |
+
pip install "mcp[cli]"
|
94 |
```
|
95 |
|
96 |
### Running the standalone MCP development tools
|
|
|
155 |
```python
|
156 |
# Add lifespan support for startup/shutdown with strong typing
|
157 |
from contextlib import asynccontextmanager
|
158 |
+
from collections.abc import AsyncIterator
|
159 |
from dataclasses import dataclass
|
|
|
160 |
|
161 |
from fake_database import Database # Replace with your actual DB type
|
162 |
|
163 |
+
from mcp.server.fastmcp import FastMCP
|
164 |
|
165 |
# Create a named server
|
166 |
mcp = FastMCP("My App")
|
|
|
192 |
|
193 |
# Access type-safe lifespan context in tools
|
194 |
@mcp.tool()
|
195 |
+
def query_db() -> str:
|
196 |
"""Tool that uses initialized resources"""
|
197 |
+
ctx = mcp.get_context()
|
198 |
db = ctx.request_context.lifespan_context["db"]
|
199 |
return db.query()
|
200 |
```
|
|
|
310 |
return "Processing complete"
|
311 |
```
|
312 |
|
313 |
+
### Authentication
|
314 |
+
|
315 |
+
Authentication can be used by servers that want to expose tools accessing protected resources.
|
316 |
+
|
317 |
+
`mcp.server.auth` implements an OAuth 2.0 server interface, which servers can use by
|
318 |
+
providing an implementation of the `OAuthAuthorizationServerProvider` protocol.
|
319 |
+
|
320 |
+
```python
|
321 |
+
from mcp import FastMCP
|
322 |
+
from mcp.server.auth.provider import OAuthAuthorizationServerProvider
|
323 |
+
from mcp.server.auth.settings import (
|
324 |
+
AuthSettings,
|
325 |
+
ClientRegistrationOptions,
|
326 |
+
RevocationOptions,
|
327 |
+
)
|
328 |
+
|
329 |
+
|
330 |
+
class MyOAuthServerProvider(OAuthAuthorizationServerProvider):
|
331 |
+
# See an example on how to implement at `examples/servers/simple-auth`
|
332 |
+
...
|
333 |
+
|
334 |
+
|
335 |
+
mcp = FastMCP(
|
336 |
+
"My App",
|
337 |
+
auth_server_provider=MyOAuthServerProvider(),
|
338 |
+
auth=AuthSettings(
|
339 |
+
issuer_url="https://myapp.com",
|
340 |
+
revocation_options=RevocationOptions(
|
341 |
+
enabled=True,
|
342 |
+
),
|
343 |
+
client_registration_options=ClientRegistrationOptions(
|
344 |
+
enabled=True,
|
345 |
+
valid_scopes=["myscope", "myotherscope"],
|
346 |
+
default_scopes=["myscope"],
|
347 |
+
),
|
348 |
+
required_scopes=["myscope"],
|
349 |
+
),
|
350 |
+
)
|
351 |
+
```
|
352 |
+
|
353 |
+
See [OAuthAuthorizationServerProvider](src/mcp/server/auth/provider.py) for more details.
|
354 |
+
|
355 |
## Running Your Server
|
356 |
|
357 |
### Development Mode
|
|
|
403 |
mcp run server.py
|
404 |
```
|
405 |
|
406 |
+
Note that `mcp run` or `mcp dev` only supports server using FastMCP and not the low-level server variant.
|
407 |
+
|
408 |
+
### Streamable HTTP Transport
|
409 |
+
|
410 |
+
> **Note**: Streamable HTTP transport is superseding SSE transport for production deployments.
|
411 |
+
|
412 |
+
```python
|
413 |
+
from mcp.server.fastmcp import FastMCP
|
414 |
+
|
415 |
+
# Stateful server (maintains session state)
|
416 |
+
mcp = FastMCP("StatefulServer")
|
417 |
+
|
418 |
+
# Stateless server (no session persistence)
|
419 |
+
mcp = FastMCP("StatelessServer", stateless_http=True)
|
420 |
+
|
421 |
+
# Stateless server (no session persistence, no sse stream with supported client)
|
422 |
+
mcp = FastMCP("StatelessServer", stateless_http=True, json_response=True)
|
423 |
+
|
424 |
+
# Run server with streamable_http transport
|
425 |
+
mcp.run(transport="streamable-http")
|
426 |
+
```
|
427 |
+
|
428 |
+
You can mount multiple FastMCP servers in a FastAPI application:
|
429 |
+
|
430 |
+
```python
|
431 |
+
# echo.py
|
432 |
+
from mcp.server.fastmcp import FastMCP
|
433 |
+
|
434 |
+
mcp = FastMCP(name="EchoServer", stateless_http=True)
|
435 |
+
|
436 |
+
|
437 |
+
@mcp.tool(description="A simple echo tool")
|
438 |
+
def echo(message: str) -> str:
|
439 |
+
return f"Echo: {message}"
|
440 |
+
```
|
441 |
+
|
442 |
+
```python
|
443 |
+
# math.py
|
444 |
+
from mcp.server.fastmcp import FastMCP
|
445 |
+
|
446 |
+
mcp = FastMCP(name="MathServer", stateless_http=True)
|
447 |
+
|
448 |
+
|
449 |
+
@mcp.tool(description="A simple add tool")
|
450 |
+
def add_two(n: int) -> int:
|
451 |
+
return n + 2
|
452 |
+
```
|
453 |
+
|
454 |
+
```python
|
455 |
+
# main.py
|
456 |
+
import contextlib
|
457 |
+
from fastapi import FastAPI
|
458 |
+
from mcp.echo import echo
|
459 |
+
from mcp.math import math
|
460 |
+
|
461 |
+
|
462 |
+
# Create a combined lifespan to manage both session managers
|
463 |
+
@contextlib.asynccontextmanager
|
464 |
+
async def lifespan(app: FastAPI):
|
465 |
+
async with contextlib.AsyncExitStack() as stack:
|
466 |
+
await stack.enter_async_context(echo.mcp.session_manager.run())
|
467 |
+
await stack.enter_async_context(math.mcp.session_manager.run())
|
468 |
+
yield
|
469 |
+
|
470 |
+
|
471 |
+
app = FastAPI(lifespan=lifespan)
|
472 |
+
app.mount("/echo", echo.mcp.streamable_http_app())
|
473 |
+
app.mount("/math", math.mcp.streamable_http_app())
|
474 |
+
```
|
475 |
+
|
476 |
+
For low level server with Streamable HTTP implementations, see:
|
477 |
+
- Stateful server: [`examples/servers/simple-streamablehttp/`](examples/servers/simple-streamablehttp/)
|
478 |
+
- Stateless server: [`examples/servers/simple-streamablehttp-stateless/`](examples/servers/simple-streamablehttp-stateless/)
|
479 |
+
|
480 |
+
The streamable HTTP transport supports:
|
481 |
+
- Stateful and stateless operation modes
|
482 |
+
- Resumability with event stores
|
483 |
+
- JSON or SSE response formats
|
484 |
+
- Better scalability for multi-node deployments
|
485 |
+
|
486 |
### Mounting to an Existing ASGI Server
|
487 |
|
488 |
+
> **Note**: SSE transport is being superseded by [Streamable HTTP transport](https://modelcontextprotocol.io/specification/2025-03-26/basic/transports#streamable-http).
|
489 |
+
|
490 |
+
By default, SSE servers are mounted at `/sse` and Streamable HTTP servers are mounted at `/mcp`. You can customize these paths using the methods described below.
|
491 |
+
|
492 |
You can mount the SSE server to an existing ASGI server using the `sse_app` method. This allows you to integrate the SSE server with other ASGI applications.
|
493 |
|
494 |
```python
|
495 |
from starlette.applications import Starlette
|
496 |
+
from starlette.routing import Mount, Host
|
497 |
from mcp.server.fastmcp import FastMCP
|
498 |
|
499 |
|
|
|
510 |
app.router.routes.append(Host('mcp.acme.corp', app=mcp.sse_app()))
|
511 |
```
|
512 |
|
513 |
+
When mounting multiple MCP servers under different paths, you can configure the mount path in several ways:
|
514 |
+
|
515 |
+
```python
|
516 |
+
from starlette.applications import Starlette
|
517 |
+
from starlette.routing import Mount
|
518 |
+
from mcp.server.fastmcp import FastMCP
|
519 |
+
|
520 |
+
# Create multiple MCP servers
|
521 |
+
github_mcp = FastMCP("GitHub API")
|
522 |
+
browser_mcp = FastMCP("Browser")
|
523 |
+
curl_mcp = FastMCP("Curl")
|
524 |
+
search_mcp = FastMCP("Search")
|
525 |
+
|
526 |
+
# Method 1: Configure mount paths via settings (recommended for persistent configuration)
|
527 |
+
github_mcp.settings.mount_path = "/github"
|
528 |
+
browser_mcp.settings.mount_path = "/browser"
|
529 |
+
|
530 |
+
# Method 2: Pass mount path directly to sse_app (preferred for ad-hoc mounting)
|
531 |
+
# This approach doesn't modify the server's settings permanently
|
532 |
+
|
533 |
+
# Create Starlette app with multiple mounted servers
|
534 |
+
app = Starlette(
|
535 |
+
routes=[
|
536 |
+
# Using settings-based configuration
|
537 |
+
Mount("/github", app=github_mcp.sse_app()),
|
538 |
+
Mount("/browser", app=browser_mcp.sse_app()),
|
539 |
+
# Using direct mount path parameter
|
540 |
+
Mount("/curl", app=curl_mcp.sse_app("/curl")),
|
541 |
+
Mount("/search", app=search_mcp.sse_app("/search")),
|
542 |
+
]
|
543 |
+
)
|
544 |
+
|
545 |
+
# Method 3: For direct execution, you can also pass the mount path to run()
|
546 |
+
if __name__ == "__main__":
|
547 |
+
search_mcp.run(transport="sse", mount_path="/search")
|
548 |
+
```
|
549 |
+
|
550 |
For more information on mounting applications in Starlette, see the [Starlette documentation](https://www.starlette.io/routing/#submounting-routes).
|
551 |
|
552 |
## Examples
|
|
|
618 |
|
619 |
```python
|
620 |
from contextlib import asynccontextmanager
|
621 |
+
from collections.abc import AsyncIterator
|
622 |
|
623 |
from fake_database import Database # Replace with your actual DB type
|
624 |
|
|
|
719 |
asyncio.run(run())
|
720 |
```
|
721 |
|
722 |
+
Caution: The `mcp run` and `mcp dev` tool doesn't support low-level server.
|
723 |
+
|
724 |
### Writing MCP Clients
|
725 |
|
726 |
+
The SDK provides a high-level client interface for connecting to MCP servers using various [transports](https://modelcontextprotocol.io/specification/2025-03-26/basic/transports):
|
727 |
|
728 |
```python
|
729 |
from mcp import ClientSession, StdioServerParameters, types
|
|
|
787 |
asyncio.run(run())
|
788 |
```
|
789 |
|
790 |
+
Clients can also connect using [Streamable HTTP transport](https://modelcontextprotocol.io/specification/2025-03-26/basic/transports#streamable-http):
|
791 |
+
|
792 |
+
```python
|
793 |
+
from mcp.client.streamable_http import streamablehttp_client
|
794 |
+
from mcp import ClientSession
|
795 |
+
|
796 |
+
|
797 |
+
async def main():
|
798 |
+
# Connect to a streamable HTTP server
|
799 |
+
async with streamablehttp_client("example/mcp") as (
|
800 |
+
read_stream,
|
801 |
+
write_stream,
|
802 |
+
_,
|
803 |
+
):
|
804 |
+
# Create a session using the client streams
|
805 |
+
async with ClientSession(read_stream, write_stream) as session:
|
806 |
+
# Initialize the connection
|
807 |
+
await session.initialize()
|
808 |
+
# Call a tool
|
809 |
+
tool_result = await session.call_tool("echo", {"message": "hello"})
|
810 |
+
```
|
811 |
+
|
812 |
+
### OAuth Authentication for Clients
|
813 |
+
|
814 |
+
The SDK includes [authorization support](https://modelcontextprotocol.io/specification/2025-03-26/basic/authorization) for connecting to protected MCP servers:
|
815 |
+
|
816 |
+
```python
|
817 |
+
from mcp.client.auth import OAuthClientProvider, TokenStorage
|
818 |
+
from mcp.client.session import ClientSession
|
819 |
+
from mcp.client.streamable_http import streamablehttp_client
|
820 |
+
from mcp.shared.auth import OAuthClientInformationFull, OAuthClientMetadata, OAuthToken
|
821 |
+
|
822 |
+
|
823 |
+
class CustomTokenStorage(TokenStorage):
|
824 |
+
"""Simple in-memory token storage implementation."""
|
825 |
+
|
826 |
+
async def get_tokens(self) -> OAuthToken | None:
|
827 |
+
pass
|
828 |
+
|
829 |
+
async def set_tokens(self, tokens: OAuthToken) -> None:
|
830 |
+
pass
|
831 |
+
|
832 |
+
async def get_client_info(self) -> OAuthClientInformationFull | None:
|
833 |
+
pass
|
834 |
+
|
835 |
+
async def set_client_info(self, client_info: OAuthClientInformationFull) -> None:
|
836 |
+
pass
|
837 |
+
|
838 |
+
|
839 |
+
async def main():
|
840 |
+
# Set up OAuth authentication
|
841 |
+
oauth_auth = OAuthClientProvider(
|
842 |
+
server_url="https://api.example.com",
|
843 |
+
client_metadata=OAuthClientMetadata(
|
844 |
+
client_name="My Client",
|
845 |
+
redirect_uris=["http://localhost:3000/callback"],
|
846 |
+
grant_types=["authorization_code", "refresh_token"],
|
847 |
+
response_types=["code"],
|
848 |
+
),
|
849 |
+
storage=CustomTokenStorage(),
|
850 |
+
redirect_handler=lambda url: print(f"Visit: {url}"),
|
851 |
+
callback_handler=lambda: ("auth_code", None),
|
852 |
+
)
|
853 |
+
|
854 |
+
# Use with streamable HTTP client
|
855 |
+
async with streamablehttp_client(
|
856 |
+
"https://api.example.com/mcp", auth=oauth_auth
|
857 |
+
) as (read, write, _):
|
858 |
+
async with ClientSession(read, write) as session:
|
859 |
+
await session.initialize()
|
860 |
+
# Authenticated session ready
|
861 |
+
```
|
862 |
+
|
863 |
+
For a complete working example, see [`examples/clients/simple-auth-client/`](examples/clients/simple-auth-client/).
|
864 |
+
|
865 |
+
|
866 |
### MCP Primitives
|
867 |
|
868 |
The MCP protocol defines three core primitives that servers can implement:
|
main.py
CHANGED
@@ -1,859 +1,33 @@
|
|
1 |
"""
|
2 |
-
TutorX MCP Server
|
|
|
|
|
|
|
3 |
"""
|
4 |
-
from mcp.server.fastmcp import FastMCP
|
5 |
-
import json
|
6 |
import os
|
7 |
-
import warnings
|
8 |
import uvicorn
|
9 |
-
from
|
10 |
-
from datetime import datetime
|
11 |
-
from fastapi import FastAPI, HTTPException, Query, Request
|
12 |
-
from fastapi.responses import JSONResponse
|
13 |
-
from fastapi.middleware.cors import CORSMiddleware
|
14 |
-
from difflib import SequenceMatcher
|
15 |
-
import re
|
16 |
-
import base64
|
17 |
-
import tempfile
|
18 |
-
import fitz # PyMuPDF
|
19 |
-
import pytesseract
|
20 |
-
from PIL import Image
|
21 |
-
import io
|
22 |
-
import numpy as np
|
23 |
-
|
24 |
-
# Filter out the tool registration warning
|
25 |
-
warnings.filterwarnings("ignore", message="Tool already exists")
|
26 |
-
|
27 |
-
# Import utility functions
|
28 |
-
from utils.multimodal import (
|
29 |
-
process_text_query,
|
30 |
-
process_voice_input,
|
31 |
-
process_handwriting,
|
32 |
-
generate_speech_response
|
33 |
-
)
|
34 |
-
from utils.assessment import (
|
35 |
-
generate_question,
|
36 |
-
evaluate_student_answer,
|
37 |
-
generate_performance_analytics,
|
38 |
-
detect_plagiarism
|
39 |
-
)
|
40 |
-
from typing import List, Dict, Any, Optional, Union
|
41 |
-
import random
|
42 |
-
from datetime import datetime, timedelta, timezone
|
43 |
-
import json
|
44 |
|
45 |
# Get server configuration from environment variables with defaults
|
46 |
-
SERVER_HOST = os.getenv("MCP_HOST", "0.0.0.0")
|
47 |
-
SERVER_PORT = int(os.getenv("MCP_PORT", "8001"))
|
48 |
-
SERVER_TRANSPORT = os.getenv("MCP_TRANSPORT", "
|
49 |
-
|
50 |
-
# Create FastAPI app
|
51 |
-
api_app = FastAPI(title="TutorX MCP Server", version="1.0.0")
|
52 |
-
|
53 |
-
# Add CORS middleware
|
54 |
-
api_app.add_middleware(
|
55 |
-
CORSMiddleware,
|
56 |
-
allow_origins=["*"],
|
57 |
-
allow_credentials=True,
|
58 |
-
allow_methods=["*"],
|
59 |
-
allow_headers=["*"],
|
60 |
-
)
|
61 |
-
|
62 |
-
# Create the TutorX MCP server with explicit configuration
|
63 |
-
mcp = FastMCP(
|
64 |
-
"TutorX",
|
65 |
-
dependencies=["mcp[cli]>=1.9.3", "gradio>=4.19.0", "numpy>=1.24.0", "pillow>=10.0.0"],
|
66 |
-
host=SERVER_HOST,
|
67 |
-
port=SERVER_PORT,
|
68 |
-
transport=SERVER_TRANSPORT,
|
69 |
-
cors_origins=["*"] # Allow CORS from any origin
|
70 |
-
)
|
71 |
-
|
72 |
-
# For FastMCP, we'll use it directly without mounting
|
73 |
-
# as it already creates its own FastAPI app internally
|
74 |
-
|
75 |
-
# ------------------ Core Features ------------------
|
76 |
-
|
77 |
-
# Store the concept graph data in memory
|
78 |
-
CONCEPT_GRAPH = {
|
79 |
-
"python": {
|
80 |
-
"id": "python",
|
81 |
-
"name": "Python Programming",
|
82 |
-
"description": "Fundamentals of Python programming language",
|
83 |
-
"prerequisites": [],
|
84 |
-
"related": ["functions", "oop", "data_structures"]
|
85 |
-
},
|
86 |
-
"functions": {
|
87 |
-
"id": "functions",
|
88 |
-
"name": "Python Functions",
|
89 |
-
"description": "Creating and using functions in Python",
|
90 |
-
"prerequisites": ["python"],
|
91 |
-
"related": ["decorators", "lambdas"]
|
92 |
-
},
|
93 |
-
"oop": {
|
94 |
-
"id": "oop",
|
95 |
-
"name": "Object-Oriented Programming",
|
96 |
-
"description": "Classes and objects in Python",
|
97 |
-
"prerequisites": ["python"],
|
98 |
-
"related": ["inheritance", "polymorphism"]
|
99 |
-
},
|
100 |
-
"data_structures": {
|
101 |
-
"id": "data_structures",
|
102 |
-
"name": "Data Structures",
|
103 |
-
"description": "Built-in data structures in Python",
|
104 |
-
"prerequisites": ["python"],
|
105 |
-
"related": ["algorithms"]
|
106 |
-
},
|
107 |
-
"decorators": {
|
108 |
-
"id": "decorators",
|
109 |
-
"name": "Python Decorators",
|
110 |
-
"description": "Function decorators in Python",
|
111 |
-
"prerequisites": ["functions"],
|
112 |
-
"related": ["python", "functions"]
|
113 |
-
},
|
114 |
-
"lambdas": {
|
115 |
-
"id": "lambdas",
|
116 |
-
"name": "Lambda Functions",
|
117 |
-
"description": "Anonymous functions in Python",
|
118 |
-
"prerequisites": ["functions"],
|
119 |
-
"related": ["python", "functions"]
|
120 |
-
},
|
121 |
-
"inheritance": {
|
122 |
-
"id": "inheritance",
|
123 |
-
"name": "Inheritance in OOP",
|
124 |
-
"description": "Creating class hierarchies in Python",
|
125 |
-
"prerequisites": ["oop"],
|
126 |
-
"related": ["python", "oop"]
|
127 |
-
},
|
128 |
-
"polymorphism": {
|
129 |
-
"id": "polymorphism",
|
130 |
-
"name": "Polymorphism in OOP",
|
131 |
-
"description": "Multiple forms of methods in Python",
|
132 |
-
"prerequisites": ["oop"],
|
133 |
-
"related": ["python", "oop"]
|
134 |
-
},
|
135 |
-
"algorithms": {
|
136 |
-
"id": "algorithms",
|
137 |
-
"name": "Basic Algorithms",
|
138 |
-
"description": "Common algorithms in Python",
|
139 |
-
"prerequisites": ["data_structures"],
|
140 |
-
"related": ["python", "data_structures"]
|
141 |
-
}
|
142 |
-
}
|
143 |
-
|
144 |
-
@api_app.get("/api/concept_graph")
|
145 |
-
async def api_get_concept_graph(concept_id: str = None):
|
146 |
-
"""API endpoint to get concept graph data for a specific concept or all concepts"""
|
147 |
-
if concept_id:
|
148 |
-
concept = CONCEPT_GRAPH.get(concept_id)
|
149 |
-
if not concept:
|
150 |
-
return JSONResponse(
|
151 |
-
status_code=404,
|
152 |
-
content={"error": f"Concept {concept_id} not found"}
|
153 |
-
)
|
154 |
-
return JSONResponse(content=concept)
|
155 |
-
return JSONResponse(content={"concepts": list(CONCEPT_GRAPH.values())})
|
156 |
-
|
157 |
-
@mcp.tool()
|
158 |
-
async def get_concept(concept_id: str = None) -> Dict[str, Any]:
|
159 |
-
"""MCP tool to get a specific concept or all concepts"""
|
160 |
-
if concept_id:
|
161 |
-
concept = CONCEPT_GRAPH.get(concept_id)
|
162 |
-
if not concept:
|
163 |
-
return {"error": f"Concept {concept_id} not found"}
|
164 |
-
return {"concept": concept}
|
165 |
-
return {"concepts": list(CONCEPT_GRAPH.values())}
|
166 |
-
|
167 |
-
@mcp.tool()
|
168 |
-
async def assess_skill(student_id: str, concept_id: str) -> Dict[str, Any]:
|
169 |
-
"""Assess a student's understanding of a specific concept"""
|
170 |
-
# Check if concept exists in our concept graph
|
171 |
-
concept_data = await get_concept(concept_id)
|
172 |
-
if isinstance(concept_data, dict) and "error" in concept_data:
|
173 |
-
return {"error": f"Cannot assess skill: {concept_data['error']}"}
|
174 |
-
|
175 |
-
# Get concept name, handling both direct dict and concept graph response
|
176 |
-
if isinstance(concept_data, dict) and "concept" in concept_data:
|
177 |
-
concept_name = concept_data["concept"].get("name", concept_id)
|
178 |
-
elif isinstance(concept_data, dict) and "name" in concept_data:
|
179 |
-
concept_name = concept_data["name"]
|
180 |
-
else:
|
181 |
-
concept_name = concept_id
|
182 |
-
|
183 |
-
# Generate a score based on concept difficulty or random
|
184 |
-
score = random.uniform(0.2, 1.0) # Random score between 0.2 and 1.0
|
185 |
-
|
186 |
-
# Set timestamp with timezone
|
187 |
-
timestamp = datetime.now(timezone.utc).isoformat()
|
188 |
-
|
189 |
-
# Generate feedback based on score
|
190 |
-
feedback = {
|
191 |
-
"strengths": [f"Good understanding of {concept_name} fundamentals"],
|
192 |
-
"areas_for_improvement": [f"Could work on advanced applications of {concept_name}"],
|
193 |
-
"recommendations": [
|
194 |
-
f"Review {concept_name} practice problems",
|
195 |
-
f"Watch tutorial videos on {concept_name}"
|
196 |
-
]
|
197 |
-
}
|
198 |
-
|
199 |
-
# Adjust feedback based on score
|
200 |
-
if score < 0.5:
|
201 |
-
feedback["strengths"] = [f"Basic understanding of {concept_name}"]
|
202 |
-
feedback["areas_for_improvement"].append("Needs to review fundamental concepts")
|
203 |
-
elif score > 0.8:
|
204 |
-
feedback["strengths"].append(f"Excellent grasp of {concept_name} concepts")
|
205 |
-
feedback["recommendations"].append("Try more advanced problems")
|
206 |
-
|
207 |
-
# Create assessment response
|
208 |
-
assessment = {
|
209 |
-
"student_id": student_id,
|
210 |
-
"concept_id": concept_id,
|
211 |
-
"concept_name": concept_name,
|
212 |
-
"score": round(score, 2), # Round to 2 decimal places
|
213 |
-
"timestamp": timestamp,
|
214 |
-
"feedback": feedback
|
215 |
-
}
|
216 |
-
return assessment
|
217 |
-
|
218 |
-
@mcp.resource("concept-graph://")
|
219 |
-
async def get_concept_graph_resource() -> Dict[str, Any]:
|
220 |
-
"""Get the full knowledge concept graph"""
|
221 |
-
return {
|
222 |
-
"nodes": [
|
223 |
-
{"id": "python", "name": "Python Basics", "difficulty": 1, "type": "foundation"},
|
224 |
-
{"id": "functions", "name": "Functions", "difficulty": 2, "type": "concept"},
|
225 |
-
{"id": "oop", "name": "OOP in Python", "difficulty": 3, "type": "paradigm"},
|
226 |
-
{"id": "data_structures", "name": "Data Structures", "difficulty": 2, "type": "concept"},
|
227 |
-
{"id": "decorators", "name": "Decorators", "difficulty": 4, "type": "advanced"},
|
228 |
-
{"id": "lambdas", "name": "Lambda Functions", "difficulty": 2, "type": "concept"},
|
229 |
-
{"id": "inheritance", "name": "Inheritance", "difficulty": 3, "type": "oop"},
|
230 |
-
{"id": "polymorphism", "name": "Polymorphism", "difficulty": 3, "type": "oop"},
|
231 |
-
{"id": "algorithms", "name": "Algorithms", "difficulty": 3, "type": "concept"}
|
232 |
-
],
|
233 |
-
"edges": [
|
234 |
-
{"from": "python", "to": "functions", "weight": 0.9},
|
235 |
-
{"from": "python", "to": "oop", "weight": 0.8},
|
236 |
-
{"from": "python", "to": "data_structures", "weight": 0.9},
|
237 |
-
{"from": "functions", "to": "decorators", "weight": 0.8},
|
238 |
-
{"from": "functions", "to": "lambdas", "weight": 0.7},
|
239 |
-
{"from": "oop", "to": "inheritance", "weight": 0.9},
|
240 |
-
{"from": "oop", "to": "polymorphism", "weight": 0.8},
|
241 |
-
{"from": "data_structures", "to": "algorithms", "weight": 0.9}
|
242 |
-
]
|
243 |
-
}
|
244 |
-
|
245 |
-
@mcp.resource("learning-path://{student_id}")
|
246 |
-
async def get_learning_path(student_id: str) -> Dict[str, Any]:
|
247 |
-
"""Get personalized learning path for a student"""
|
248 |
-
return {
|
249 |
-
"student_id": student_id,
|
250 |
-
"current_concepts": ["math_algebra_linear_equations"]
|
251 |
-
}
|
252 |
-
|
253 |
-
# Lesson Generation
|
254 |
-
@mcp.tool()
|
255 |
-
async def generate_lesson(topic: str, grade_level: int, duration_minutes: int) -> Dict[str, Any]:
|
256 |
-
"""
|
257 |
-
Generate a lesson plan for the given topic, grade level, and duration
|
258 |
-
|
259 |
-
Args:
|
260 |
-
topic: The topic for the lesson
|
261 |
-
grade_level: The grade level (1-12)
|
262 |
-
duration_minutes: Duration of the lesson in minutes
|
263 |
-
|
264 |
-
Returns:
|
265 |
-
Dictionary containing the generated lesson plan
|
266 |
-
"""
|
267 |
-
# In a real implementation, this would generate a lesson plan using an LLM
|
268 |
-
# For now, we'll return a mock lesson plan
|
269 |
-
return {
|
270 |
-
"lesson_id": f"lesson_{int(datetime.utcnow().timestamp())}",
|
271 |
-
"topic": topic,
|
272 |
-
"grade_level": grade_level,
|
273 |
-
"duration_minutes": duration_minutes,
|
274 |
-
"objectives": [
|
275 |
-
f"Understand the key concepts of {topic}",
|
276 |
-
f"Apply {topic} to solve problems",
|
277 |
-
f"Analyze examples of {topic} in real-world contexts"
|
278 |
-
],
|
279 |
-
"materials": ["Whiteboard", "Markers", "Printed worksheets"],
|
280 |
-
"activities": [
|
281 |
-
{
|
282 |
-
"name": "Introduction",
|
283 |
-
"duration": 5,
|
284 |
-
"description": f"Brief introduction to {topic} and its importance"
|
285 |
-
},
|
286 |
-
{
|
287 |
-
"name": "Direct Instruction",
|
288 |
-
"duration": 15,
|
289 |
-
"description": f"Explain the main concepts of {topic} with examples"
|
290 |
-
},
|
291 |
-
{
|
292 |
-
"name": "Guided Practice",
|
293 |
-
"duration": 15,
|
294 |
-
"description": "Work through example problems together"
|
295 |
-
},
|
296 |
-
{
|
297 |
-
"name": "Independent Practice",
|
298 |
-
"duration": 10,
|
299 |
-
"description": "Students work on problems independently"
|
300 |
-
}
|
301 |
-
],
|
302 |
-
"assessment": {
|
303 |
-
"type": "formative",
|
304 |
-
"description": "Exit ticket with 2-3 problems related to the lesson"
|
305 |
-
},
|
306 |
-
"timestamp": datetime.utcnow().isoformat()
|
307 |
-
}
|
308 |
-
|
309 |
-
# Assessment Suite
|
310 |
-
@mcp.tool()
|
311 |
-
async def generate_quiz(concept_ids: List[str], difficulty: int = 2) -> Dict[str, Any]:
|
312 |
-
"""
|
313 |
-
Generate a quiz based on specified concepts and difficulty
|
314 |
-
|
315 |
-
Args:
|
316 |
-
concept_ids: List of concept IDs to include in the quiz
|
317 |
-
difficulty: Difficulty level from 1-5
|
318 |
-
|
319 |
-
Returns:
|
320 |
-
Quiz object with questions and answers
|
321 |
-
"""
|
322 |
-
# In a real implementation, this would generate questions based on the concepts
|
323 |
-
# For now, we'll return a mock quiz
|
324 |
-
questions = []
|
325 |
-
for i, concept_id in enumerate(concept_ids[:5]): # Limit to 5 questions max
|
326 |
-
concept = CONCEPT_GRAPH.get(concept_id, {"name": f"Concept {concept_id}"})
|
327 |
-
questions.append({
|
328 |
-
"id": f"q{i+1}",
|
329 |
-
"concept_id": concept_id,
|
330 |
-
"concept_name": concept.get("name", f"Concept {concept_id}"),
|
331 |
-
"question": f"Sample question about {concept.get('name', concept_id)}?",
|
332 |
-
"options": ["Option 1", "Option 2", "Option 3", "Option 4"],
|
333 |
-
"correct_answer": random.randint(0, 3), # Random correct answer index
|
334 |
-
"difficulty": min(max(1, difficulty), 5), # Clamp difficulty between 1-5
|
335 |
-
"explanation": f"This is an explanation for the question about {concept.get('name', concept_id)}"
|
336 |
-
})
|
337 |
-
|
338 |
-
return {
|
339 |
-
"quiz_id": f"quiz_{int(datetime.utcnow().timestamp())}",
|
340 |
-
"concept_ids": concept_ids,
|
341 |
-
"difficulty": difficulty,
|
342 |
-
"questions": questions,
|
343 |
-
"timestamp": datetime.utcnow().isoformat()
|
344 |
-
}
|
345 |
-
|
346 |
-
|
347 |
-
|
348 |
-
# API Endpoints
|
349 |
-
@api_app.get("/api/health")
|
350 |
-
async def health_check():
|
351 |
-
return {"status": "ok", "timestamp": datetime.now().isoformat()}
|
352 |
-
|
353 |
-
@api_app.get("/api/assess_skill")
|
354 |
-
async def assess_skill_api(
|
355 |
-
request: Request,
|
356 |
-
student_id: Optional[str] = Query(None, description="Student ID"),
|
357 |
-
concept_id: Optional[str] = Query(None, description="Concept ID to assess")
|
358 |
-
):
|
359 |
-
"""
|
360 |
-
Assess a student's understanding of a specific concept
|
361 |
-
|
362 |
-
Args:
|
363 |
-
student_id: Student's unique identifier
|
364 |
-
concept_id: Concept ID to assess
|
365 |
-
|
366 |
-
Returns:
|
367 |
-
Assessment results with score and feedback
|
368 |
-
"""
|
369 |
-
try:
|
370 |
-
# Get query parameters
|
371 |
-
params = dict(request.query_params)
|
372 |
-
|
373 |
-
# Check for required parameters
|
374 |
-
if not student_id or not concept_id:
|
375 |
-
raise HTTPException(
|
376 |
-
status_code=400,
|
377 |
-
detail="Both student_id and concept_id are required parameters"
|
378 |
-
)
|
379 |
-
|
380 |
-
# Call the assess_skill function
|
381 |
-
result = await assess_skill(student_id, concept_id)
|
382 |
-
|
383 |
-
# Handle error responses
|
384 |
-
if isinstance(result, dict) and "error" in result:
|
385 |
-
raise HTTPException(status_code=404, detail=result["error"])
|
386 |
-
|
387 |
-
return result
|
388 |
-
|
389 |
-
except HTTPException as http_err:
|
390 |
-
# Re-raise HTTP exceptions as is
|
391 |
-
raise http_err
|
392 |
-
except Exception as e:
|
393 |
-
# Log the error for debugging
|
394 |
-
print(f"Error in assess_skill_api: {str(e)}")
|
395 |
-
import traceback
|
396 |
-
traceback.print_exc()
|
397 |
-
|
398 |
-
# Return a user-friendly error message
|
399 |
-
raise HTTPException(
|
400 |
-
status_code=500,
|
401 |
-
detail=f"An error occurred while processing your request: {str(e)}"
|
402 |
-
)
|
403 |
-
|
404 |
-
@api_app.post("/api/generate_lesson")
|
405 |
-
async def generate_lesson_api(request: Dict[str, Any]):
|
406 |
-
"""
|
407 |
-
Generate a lesson plan based on the provided parameters
|
408 |
-
|
409 |
-
Expected request format:
|
410 |
-
{
|
411 |
-
"topic": "Lesson Topic",
|
412 |
-
"grade_level": 9, # 1-12
|
413 |
-
"duration_minutes": 45
|
414 |
-
}
|
415 |
-
"""
|
416 |
-
try:
|
417 |
-
# Validate request
|
418 |
-
if not isinstance(request, dict):
|
419 |
-
raise HTTPException(
|
420 |
-
status_code=400,
|
421 |
-
detail="Request must be a JSON object"
|
422 |
-
)
|
423 |
-
|
424 |
-
# Get parameters with validation
|
425 |
-
topic = request.get("topic")
|
426 |
-
if not topic or not isinstance(topic, str):
|
427 |
-
raise HTTPException(
|
428 |
-
status_code=400,
|
429 |
-
detail="Topic is required and must be a string"
|
430 |
-
)
|
431 |
-
|
432 |
-
grade_level = request.get("grade_level")
|
433 |
-
if not isinstance(grade_level, int) or not (1 <= grade_level <= 12):
|
434 |
-
raise HTTPException(
|
435 |
-
status_code=400,
|
436 |
-
detail="Grade level must be an integer between 1 and 12"
|
437 |
-
)
|
438 |
-
|
439 |
-
duration_minutes = request.get("duration_minutes")
|
440 |
-
if not isinstance(duration_minutes, (int, float)) or duration_minutes <= 0:
|
441 |
-
raise HTTPException(
|
442 |
-
status_code=400,
|
443 |
-
detail="Duration must be a positive number"
|
444 |
-
)
|
445 |
-
|
446 |
-
# Generate the lesson plan
|
447 |
-
result = await generate_lesson(topic, grade_level, int(duration_minutes))
|
448 |
-
return result
|
449 |
-
|
450 |
-
except HTTPException:
|
451 |
-
raise
|
452 |
-
except Exception as e:
|
453 |
-
raise HTTPException(status_code=500, detail=f"Failed to generate lesson: {str(e)}")
|
454 |
-
|
455 |
-
@api_app.post("/api/generate_quiz")
|
456 |
-
async def generate_quiz_api(request: Dict[str, Any]):
|
457 |
-
"""
|
458 |
-
Generate a quiz based on specified concepts and difficulty
|
459 |
-
|
460 |
-
Expected request format:
|
461 |
-
{
|
462 |
-
"concept_ids": ["concept1", "concept2", ...],
|
463 |
-
"difficulty": 2 # Optional, default is 2
|
464 |
-
}
|
465 |
-
"""
|
466 |
-
try:
|
467 |
-
# Validate request
|
468 |
-
if not isinstance(request, dict) or "concept_ids" not in request:
|
469 |
-
raise HTTPException(
|
470 |
-
status_code=400,
|
471 |
-
detail="Request must be a JSON object with 'concept_ids' key"
|
472 |
-
)
|
473 |
-
|
474 |
-
# Get parameters with defaults
|
475 |
-
concept_ids = request.get("concept_ids", [])
|
476 |
-
difficulty = request.get("difficulty", 2)
|
477 |
-
|
478 |
-
# Validate types
|
479 |
-
if not isinstance(concept_ids, list):
|
480 |
-
concept_ids = [concept_ids] # Convert single concept to list
|
481 |
-
|
482 |
-
if not all(isinstance(cid, str) for cid in concept_ids):
|
483 |
-
raise HTTPException(
|
484 |
-
status_code=400,
|
485 |
-
detail="All concept IDs must be strings"
|
486 |
-
)
|
487 |
-
|
488 |
-
difficulty = int(difficulty) # Ensure difficulty is an integer
|
489 |
-
|
490 |
-
# Generate the quiz
|
491 |
-
result = await generate_quiz(concept_ids, difficulty)
|
492 |
-
return result
|
493 |
-
|
494 |
-
except HTTPException:
|
495 |
-
raise
|
496 |
-
except Exception as e:
|
497 |
-
raise HTTPException(status_code=500, detail=f"Failed to generate quiz: {str(e)}")
|
498 |
-
|
499 |
-
@mcp.tool()
|
500 |
-
async def get_curriculum_standards(country_code: str = "us") -> Dict[str, Any]:
|
501 |
-
"""
|
502 |
-
Get curriculum standards for a specific country
|
503 |
-
|
504 |
-
Args:
|
505 |
-
country_code: ISO country code (e.g., 'us', 'uk')
|
506 |
-
|
507 |
-
Returns:
|
508 |
-
Dictionary containing curriculum standards
|
509 |
-
"""
|
510 |
-
# Mock data - in a real implementation, this would come from a database or external API
|
511 |
-
standards = {
|
512 |
-
"us": {
|
513 |
-
"name": "Common Core State Standards (US)",
|
514 |
-
"subjects": {
|
515 |
-
"math": {
|
516 |
-
"description": "Mathematics standards focusing on conceptual understanding, procedural skills, and problem solving",
|
517 |
-
"domains": ["Number & Operations", "Algebra", "Geometry", "Statistics & Probability"]
|
518 |
-
},
|
519 |
-
"ela": {
|
520 |
-
"description": "English Language Arts standards for reading, writing, speaking, and listening",
|
521 |
-
"domains": ["Reading", "Writing", "Speaking & Listening", "Language"]
|
522 |
-
}
|
523 |
-
},
|
524 |
-
"grade_levels": list(range(1, 13)),
|
525 |
-
"website": "http://www.corestandards.org"
|
526 |
-
},
|
527 |
-
"uk": {
|
528 |
-
"name": "National Curriculum (UK)",
|
529 |
-
"subjects": {
|
530 |
-
"maths": {
|
531 |
-
"description": "Mathematics programme of study for key stages 1-4",
|
532 |
-
"domains": ["Number", "Algebra", "Ratio & Proportion", "Geometry", "Statistics"]
|
533 |
-
},
|
534 |
-
"english": {
|
535 |
-
"description": "English programme of study for key stages 1-4",
|
536 |
-
"domains": ["Reading", "Writing", "Grammar & Vocabulary", "Spoken English"]
|
537 |
-
}
|
538 |
-
},
|
539 |
-
"key_stages": ["KS1 (5-7)", "KS2 (7-11)", "KS3 (11-14)", "KS4 (14-16)"],
|
540 |
-
"website": "https://www.gov.uk/government/collections/national-curriculum"
|
541 |
-
}
|
542 |
-
}
|
543 |
-
|
544 |
-
# Default to US standards if country not found
|
545 |
-
country_code = country_code.lower()
|
546 |
-
if country_code not in standards:
|
547 |
-
country_code = "us"
|
548 |
-
|
549 |
-
return {
|
550 |
-
"country_code": country_code,
|
551 |
-
"standards": standards[country_code],
|
552 |
-
"timestamp": datetime.utcnow().isoformat()
|
553 |
-
}
|
554 |
-
|
555 |
-
@api_app.get("/api/curriculum-standards")
|
556 |
-
async def get_curriculum_standards_api(country: str = "us"):
|
557 |
-
"""
|
558 |
-
Get curriculum standards for a specific country
|
559 |
-
|
560 |
-
Args:
|
561 |
-
country: ISO country code (e.g., 'us', 'uk')
|
562 |
-
|
563 |
-
Returns:
|
564 |
-
Dictionary containing curriculum standards
|
565 |
-
"""
|
566 |
-
try:
|
567 |
-
# Validate country code
|
568 |
-
if not isinstance(country, str) or len(country) != 2:
|
569 |
-
raise HTTPException(
|
570 |
-
status_code=400,
|
571 |
-
detail="Country code must be a 2-letter ISO code"
|
572 |
-
)
|
573 |
-
|
574 |
-
# Get the standards
|
575 |
-
result = await get_curriculum_standards(country)
|
576 |
-
return result
|
577 |
-
|
578 |
-
except HTTPException:
|
579 |
-
raise
|
580 |
-
except Exception as e:
|
581 |
-
raise HTTPException(
|
582 |
-
status_code=500,
|
583 |
-
detail=f"Failed to fetch curriculum standards: {str(e)}"
|
584 |
-
)
|
585 |
-
|
586 |
-
@mcp.tool()
|
587 |
-
async def text_interaction(query: str, student_id: str) -> Dict[str, Any]:
|
588 |
-
"""
|
589 |
-
Process a text query from a student and provide an educational response
|
590 |
-
|
591 |
-
Args:
|
592 |
-
query: The student's question or input text
|
593 |
-
student_id: Unique identifier for the student
|
594 |
-
|
595 |
-
Returns:
|
596 |
-
Dictionary containing the response and metadata
|
597 |
-
"""
|
598 |
-
# In a real implementation, this would use an LLM to generate a response
|
599 |
-
# For now, we'll return a mock response
|
600 |
-
responses = {
|
601 |
-
"how do i solve a quadratic equation?": {
|
602 |
-
"response": "To solve a quadratic equation in the form ax² + bx + c = 0, you can use the quadratic formula: x = [-b ± √(b² - 4ac)] / (2a). First, identify the coefficients a, b, and c from your equation. Then plug them into the formula and simplify.",
|
603 |
-
"related_concepts": ["quadratic_equations", "algebra"],
|
604 |
-
"difficulty": "intermediate"
|
605 |
-
},
|
606 |
-
"what is photosynthesis?": {
|
607 |
-
"response": "Photosynthesis is the process by which green plants, algae, and some bacteria convert light energy, usually from the sun, into chemical energy. The overall reaction can be summarized as: 6CO₂ + 6H₂O + light energy → C₆H₁₂O₆ + 6O₂. This process occurs in the chloroplasts of plant cells.",
|
608 |
-
"related_concepts": ["biology", "plant_biology", "cellular_processes"],
|
609 |
-
"difficulty": "beginner"
|
610 |
-
},
|
611 |
-
"explain newton's laws of motion": {
|
612 |
-
"response": "Newton's three laws of motion are fundamental principles of physics:\\n\\n1. First Law (Inertia): An object at rest stays at rest, and an object in motion stays in motion at constant velocity unless acted upon by an external force.\\n2. Second Law: The acceleration of an object is directly proportional to the net force acting on it and inversely proportional to its mass (F=ma).\\n3. Third Law: For every action, there is an equal and opposite reaction.",
|
613 |
-
"related_concepts": ["physics", "mechanics", "newtonian_physics"],
|
614 |
-
"difficulty": "intermediate"
|
615 |
-
}
|
616 |
-
}
|
617 |
-
|
618 |
-
# Convert query to lowercase for case-insensitive matching
|
619 |
-
query_lower = query.lower()
|
620 |
-
|
621 |
-
# Check if we have a predefined response
|
622 |
-
if query_lower in responses:
|
623 |
-
response = responses[query_lower]
|
624 |
-
else:
|
625 |
-
# Default response for unknown queries
|
626 |
-
response = {
|
627 |
-
"response": f"I'm sorry, I don't have a specific response for that question. Could you rephrase or ask about something else?\\n\\nYour question was: {query}",
|
628 |
-
"related_concepts": [],
|
629 |
-
"difficulty": "unknown"
|
630 |
-
}
|
631 |
-
|
632 |
-
return {
|
633 |
-
"query": query,
|
634 |
-
"student_id": student_id,
|
635 |
-
"timestamp": datetime.utcnow().isoformat(),
|
636 |
-
**response
|
637 |
-
}
|
638 |
-
|
639 |
-
# Add API endpoint for text interaction
|
640 |
-
@api_app.post("/api/text_interaction")
|
641 |
-
async def api_text_interaction(request: Dict[str, Any]):
|
642 |
-
"""
|
643 |
-
Handle text interaction requests from the client
|
644 |
-
|
645 |
-
Expected request format:
|
646 |
-
{
|
647 |
-
"query": "user's question",
|
648 |
-
"student_id": "student_12345"
|
649 |
-
}
|
650 |
-
"""
|
651 |
-
try:
|
652 |
-
# Validate request
|
653 |
-
if not isinstance(request, dict) or "query" not in request:
|
654 |
-
raise HTTPException(
|
655 |
-
status_code=400,
|
656 |
-
detail="Request must be a JSON object with 'query' key"
|
657 |
-
)
|
658 |
-
|
659 |
-
# Get parameters
|
660 |
-
query = request.get("query", "")
|
661 |
-
student_id = request.get("student_id", "anonymous")
|
662 |
-
|
663 |
-
# Process the query
|
664 |
-
result = await text_interaction(query, student_id)
|
665 |
-
return result
|
666 |
-
|
667 |
-
except HTTPException:
|
668 |
-
raise
|
669 |
-
except Exception as e:
|
670 |
-
raise HTTPException(status_code=500, detail=f"Failed to process text interaction: {str(e)}")
|
671 |
-
|
672 |
-
@api_app.post("/api/check_submission_originality")
|
673 |
-
async def check_submission_originality(request: Dict[str, Any]) -> Dict[str, Any]:
|
674 |
-
"""
|
675 |
-
Check a student's submission for potential plagiarism against reference sources.
|
676 |
-
|
677 |
-
Args:
|
678 |
-
request: Dictionary containing:
|
679 |
-
- submission: The student's submission text
|
680 |
-
- reference_sources: List of reference texts to check against
|
681 |
-
|
682 |
-
Returns:
|
683 |
-
Dictionary with originality analysis results
|
684 |
-
"""
|
685 |
-
submission = request.get("submission", "")
|
686 |
-
reference_sources = request.get("reference_sources", [])
|
687 |
-
|
688 |
-
if not submission or not reference_sources:
|
689 |
-
return {
|
690 |
-
"error": "Both submission and reference_sources are required",
|
691 |
-
"score": 0.0,
|
692 |
-
"is_original": False
|
693 |
-
}
|
694 |
-
|
695 |
-
def calculate_similarity(text1: str, text2: str) -> float:
|
696 |
-
"""Calculate similarity between two texts (0.0 to 1.0)"""
|
697 |
-
# Simple similarity using SequenceMatcher
|
698 |
-
return SequenceMatcher(None, text1.lower(), text2.lower()).ratio()
|
699 |
-
|
700 |
-
# Clean and preprocess texts
|
701 |
-
def preprocess(text: str) -> str:
|
702 |
-
# Remove extra whitespace and normalize
|
703 |
-
text = re.sub(r'\s+', ' ', text).strip()
|
704 |
-
# Remove common words and punctuation for better matching
|
705 |
-
common_words = {'the', 'a', 'an', 'and', 'or', 'but', 'is', 'are', 'was', 'were'}
|
706 |
-
words = [word for word in re.findall(r'\w+', text.lower()) if word not in common_words]
|
707 |
-
return ' '.join(words)
|
708 |
-
|
709 |
-
# Calculate similarity scores against all references
|
710 |
-
preprocessed_submission = preprocess(submission)
|
711 |
-
matches = []
|
712 |
-
|
713 |
-
for i, ref in enumerate(reference_sources):
|
714 |
-
if not ref:
|
715 |
-
continue
|
716 |
-
|
717 |
-
preprocessed_ref = preprocess(ref)
|
718 |
-
similarity = calculate_similarity(preprocessed_submission, preprocessed_ref)
|
719 |
-
|
720 |
-
matches.append({
|
721 |
-
"reference_index": i,
|
722 |
-
"similarity_score": round(similarity, 4),
|
723 |
-
"is_potential_plagiarism": similarity > 0.7 # Threshold can be adjusted
|
724 |
-
})
|
725 |
-
|
726 |
-
# Calculate overall originality score (1.0 - max similarity)
|
727 |
-
max_similarity = max((m["similarity_score"] for m in matches), default=0.0)
|
728 |
-
originality_score = 1.0 - max_similarity
|
729 |
-
|
730 |
-
# Basic plagiarism detection
|
731 |
-
is_original = all(m["similarity_score"] < 0.7 for m in matches)
|
732 |
-
|
733 |
-
return {
|
734 |
-
"submission_length": len(submission),
|
735 |
-
"reference_count": len(reference_sources),
|
736 |
-
"originality_score": round(originality_score, 4),
|
737 |
-
"is_original": is_original,
|
738 |
-
"matches": matches,
|
739 |
-
"analysis": {
|
740 |
-
"similarity_threshold": 0.7,
|
741 |
-
"detection_method": "text_similarity"
|
742 |
-
}
|
743 |
-
}
|
744 |
-
|
745 |
-
@api_app.post("/api/pdf_ocr")
|
746 |
-
async def pdf_ocr(request: Dict[str, Any]) -> Dict[str, Any]:
|
747 |
-
"""
|
748 |
-
Extract text from a PDF file using OCR
|
749 |
-
|
750 |
-
Args:
|
751 |
-
request: Dictionary containing:
|
752 |
-
- pdf_data: Base64 encoded PDF data
|
753 |
-
- filename: Original filename (for reference)
|
754 |
-
|
755 |
-
Returns:
|
756 |
-
Dictionary containing extracted text and metadata
|
757 |
-
"""
|
758 |
-
try:
|
759 |
-
# Get the base64 encoded PDF data
|
760 |
-
pdf_base64 = request.get("pdf_data")
|
761 |
-
if not pdf_base64:
|
762 |
-
return {"error": "No PDF data provided", "success": False}
|
763 |
-
|
764 |
-
# Decode the base64 data
|
765 |
-
pdf_bytes = base64.b64decode(pdf_base64)
|
766 |
-
|
767 |
-
# Create a temporary file to store the PDF
|
768 |
-
with tempfile.NamedTemporaryFile(delete=False, suffix=".pdf") as temp_pdf:
|
769 |
-
temp_pdf.write(pdf_bytes)
|
770 |
-
temp_pdf_path = temp_pdf.name
|
771 |
-
|
772 |
-
try:
|
773 |
-
# Extract text using PyMuPDF
|
774 |
-
text_content = []
|
775 |
-
image_pages = []
|
776 |
-
|
777 |
-
# Open the PDF
|
778 |
-
doc = fitz.open(temp_pdf_path)
|
779 |
-
|
780 |
-
# Extract text from each page
|
781 |
-
for page_num in range(len(doc)):
|
782 |
-
page = doc.load_page(page_num)
|
783 |
-
|
784 |
-
# First try to extract text directly
|
785 |
-
page_text = page.get_text()
|
786 |
-
if page_text.strip():
|
787 |
-
text_content.append(page_text)
|
788 |
-
else:
|
789 |
-
# If no text found, try OCR on the page image
|
790 |
-
pix = page.get_pixmap()
|
791 |
-
img = Image.frombytes("RGB", [pix.width, pix.height], pix.samples)
|
792 |
-
image_pages.append(img)
|
793 |
-
|
794 |
-
# Close the document
|
795 |
-
doc.close()
|
796 |
-
|
797 |
-
# If we have images to OCR, process them
|
798 |
-
if image_pages:
|
799 |
-
for img in image_pages:
|
800 |
-
# Convert to grayscale for better OCR
|
801 |
-
img_gray = img.convert('L')
|
802 |
-
# Use pytesseract to do OCR on the image
|
803 |
-
text = pytesseract.image_to_string(img_gray)
|
804 |
-
if text.strip():
|
805 |
-
text_content.append(text)
|
806 |
-
|
807 |
-
# Combine all text
|
808 |
-
full_text = "\n\n".join(text_content).strip()
|
809 |
-
|
810 |
-
# Generate a summary (this is a placeholder - you might want to use an LLM for better summarization)
|
811 |
-
summary = "\n".join([line for line in full_text.split('\n') if line.strip()][:10]) + "..."
|
812 |
-
|
813 |
-
return {
|
814 |
-
"success": True,
|
815 |
-
"filename": request.get("filename", "document.pdf"),
|
816 |
-
"page_count": len(doc),
|
817 |
-
"text": full_text,
|
818 |
-
"summary": summary,
|
819 |
-
"has_ocr_applied": len(image_pages) > 0,
|
820 |
-
"ocr_page_count": len(image_pages)
|
821 |
-
}
|
822 |
-
|
823 |
-
finally:
|
824 |
-
# Clean up the temporary file
|
825 |
-
try:
|
826 |
-
os.unlink(temp_pdf_path)
|
827 |
-
except:
|
828 |
-
pass
|
829 |
-
|
830 |
-
except Exception as e:
|
831 |
-
import traceback
|
832 |
-
traceback.print_exc()
|
833 |
-
return {
|
834 |
-
"error": f"Failed to process PDF: {str(e)}",
|
835 |
-
"success": False
|
836 |
-
}
|
837 |
-
|
838 |
-
# Mount MCP app to /mcp path
|
839 |
-
mcp.app = api_app
|
840 |
|
841 |
def run_server():
|
842 |
-
"""Run the MCP server with configured
|
843 |
-
print(f"Starting TutorX MCP
|
844 |
-
|
845 |
-
|
846 |
-
|
847 |
-
|
848 |
-
|
849 |
-
|
850 |
-
|
851 |
-
|
852 |
-
|
853 |
-
|
854 |
-
|
855 |
-
|
856 |
-
raise
|
857 |
|
858 |
if __name__ == "__main__":
|
859 |
run_server()
|
|
|
1 |
"""
|
2 |
+
TutorX MCP Server - Main Entry Point
|
3 |
+
|
4 |
+
This is the main entry point for the TutorX MCP server.
|
5 |
+
It imports and runs the FastAPI application from the mcp_server package.
|
6 |
"""
|
|
|
|
|
7 |
import os
|
|
|
8 |
import uvicorn
|
9 |
+
from mcp_server import api_app, mcp
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
10 |
|
11 |
# Get server configuration from environment variables with defaults
|
12 |
+
SERVER_HOST = os.getenv("MCP_HOST", "0.0.0.0")
|
13 |
+
SERVER_PORT = int(os.getenv("MCP_PORT", "8001"))
|
14 |
+
SERVER_TRANSPORT = os.getenv("MCP_TRANSPORT", "sse")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
15 |
|
16 |
def run_server():
|
17 |
+
"""Run the MCP server with the configured settings."""
|
18 |
+
print(f"Starting TutorX MCP server on {SERVER_HOST}:{SERVER_PORT}...")
|
19 |
+
print(f"MCP transport: {SERVER_TRANSPORT}")
|
20 |
+
print(f"API docs: http://{SERVER_HOST}:{SERVER_PORT}/docs")
|
21 |
+
print(f"MCP endpoint: http://{SERVER_HOST}:{SERVER_PORT}/mcp")
|
22 |
+
|
23 |
+
# Configure uvicorn to run the FastAPI app
|
24 |
+
uvicorn.run(
|
25 |
+
"server:api_app",
|
26 |
+
host=SERVER_HOST,
|
27 |
+
port=SERVER_PORT,
|
28 |
+
reload=True,
|
29 |
+
log_level="info"
|
30 |
+
)
|
|
|
31 |
|
32 |
if __name__ == "__main__":
|
33 |
run_server()
|
mcp_server/__init__.py
ADDED
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
TutorX MCP Server Package
|
3 |
+
|
4 |
+
This package contains the Model Context Protocol (MCP) server for the TutorX educational platform.
|
5 |
+
"""
|
6 |
+
|
7 |
+
__version__ = "0.1.0"
|
8 |
+
|
9 |
+
def get_version() -> str:
|
10 |
+
"""Return the current version of the TutorX MCP server."""
|
11 |
+
return __version__
|
mcp_server/mcp_instance.py
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from mcp.server.fastmcp import FastMCP
|
2 |
+
|
3 |
+
mcp = FastMCP(
|
4 |
+
"TutorX",
|
5 |
+
dependencies=["mcp[cli]>=1.9.3"],
|
6 |
+
cors_origins=["*"]
|
7 |
+
)
|
mcp_server/model/__init__.py
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
from .gemini_flash import GeminiFlash
|
2 |
+
|
3 |
+
__all__ = ['GeminiFlash']
|
mcp_server/model/gemini_flash.py
ADDED
@@ -0,0 +1,147 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import google.generativeai as genai
|
2 |
+
from typing import Dict, Any, Optional, List, Literal, Union, TypeVar, Callable
|
3 |
+
import os
|
4 |
+
from functools import wraps
|
5 |
+
import logging
|
6 |
+
|
7 |
+
# Set up logging
|
8 |
+
logging.basicConfig(level=logging.INFO)
|
9 |
+
logger = logging.getLogger(__name__)
|
10 |
+
|
11 |
+
T = TypeVar('T')
|
12 |
+
|
13 |
+
class ModelError(Exception):
|
14 |
+
"""Custom exception for model-related errors"""
|
15 |
+
pass
|
16 |
+
|
17 |
+
def fallback_to_15_flash(method: Callable[..., T]) -> Callable[..., T]:
|
18 |
+
"""
|
19 |
+
Decorator to automatically fall back to 1.5 if 2.0 fails.
|
20 |
+
Only applies when the instance's version is '2.0'.
|
21 |
+
"""
|
22 |
+
@wraps(method)
|
23 |
+
async def wrapper(self: 'GeminiFlash', *args: Any, **kwargs: Any) -> T:
|
24 |
+
if self.version != '2.0' or not self._should_fallback:
|
25 |
+
return await method(self, *args, **kwargs)
|
26 |
+
|
27 |
+
try:
|
28 |
+
return await method(self, *args, **kwargs)
|
29 |
+
except Exception as e:
|
30 |
+
logger.warning(f"Error with Gemini 2.0 Flash: {str(e)}")
|
31 |
+
|
32 |
+
# Only fallback if we haven't already tried 1.5
|
33 |
+
if self.version == '2.0':
|
34 |
+
logger.info("Falling back to Gemini 1.5 Flash...")
|
35 |
+
fallback = GeminiFlash(version='1.5', api_key=self.api_key, _is_fallback=True)
|
36 |
+
return await getattr(fallback, method.__name__)(*args, **kwargs)
|
37 |
+
raise ModelError(f"Error with Gemini 1.5 Flash: {str(e)}")
|
38 |
+
return wrapper
|
39 |
+
|
40 |
+
class GeminiFlash:
|
41 |
+
"""
|
42 |
+
Google Gemini Flash model implementation with automatic fallback from 2.0 to 1.5.
|
43 |
+
"""
|
44 |
+
|
45 |
+
SUPPORTED_VERSIONS = ['2.0', '1.5']
|
46 |
+
|
47 |
+
def __init__(self, version: str = '2.0', api_key: Optional[str] = None, _is_fallback: bool = False):
|
48 |
+
"""
|
49 |
+
Initialize the Gemini Flash model.
|
50 |
+
|
51 |
+
Args:
|
52 |
+
version: Model version ('2.0' or '1.5')
|
53 |
+
api_key: Google AI API key. If not provided, will look for GOOGLE_API_KEY env var.
|
54 |
+
_is_fallback: Internal flag to indicate if this is a fallback instance.
|
55 |
+
"""
|
56 |
+
if version not in self.SUPPORTED_VERSIONS:
|
57 |
+
raise ValueError(f"Unsupported version: {version}. Supported versions: {self.SUPPORTED_VERSIONS}")
|
58 |
+
|
59 |
+
self.version = version
|
60 |
+
api_key="AIzaSyC3TpRUinxSCASXncgqhD1FJ6yqAq3j9rY"
|
61 |
+
self.api_key = api_key
|
62 |
+
if not self.api_key:
|
63 |
+
raise ValueError("GOOGLE_API_KEY environment variable not set and no API key provided")
|
64 |
+
|
65 |
+
self._should_fallback = version == '2.0' and not _is_fallback
|
66 |
+
genai.configure(api_key=self.api_key)
|
67 |
+
self.model_name = f'gemini-{version}-flash'
|
68 |
+
self.model = genai.GenerativeModel(self.model_name)
|
69 |
+
|
70 |
+
@fallback_to_15_flash
|
71 |
+
async def generate_text(
|
72 |
+
self,
|
73 |
+
prompt: str,
|
74 |
+
temperature: float = 0.7,
|
75 |
+
max_tokens: int = 2048,
|
76 |
+
top_p: float = 0.9,
|
77 |
+
top_k: int = 40,
|
78 |
+
**kwargs
|
79 |
+
) -> str:
|
80 |
+
"""
|
81 |
+
Generate text using Gemini Flash.
|
82 |
+
|
83 |
+
Args:
|
84 |
+
prompt: The input prompt
|
85 |
+
temperature: Controls randomness (0.0 to 1.0)
|
86 |
+
max_tokens: Maximum number of tokens to generate
|
87 |
+
top_p: Nucleus sampling parameter
|
88 |
+
top_k: Top-k sampling parameter
|
89 |
+
**kwargs: Additional generation parameters
|
90 |
+
|
91 |
+
Returns:
|
92 |
+
Generated text response
|
93 |
+
|
94 |
+
Raises:
|
95 |
+
ModelError: If both 2.0 and 1.5 models fail
|
96 |
+
"""
|
97 |
+
response = await self.model.generate_content_async(
|
98 |
+
prompt,
|
99 |
+
generation_config={
|
100 |
+
'temperature': temperature,
|
101 |
+
'max_output_tokens': max_tokens,
|
102 |
+
'top_p': top_p,
|
103 |
+
'top_k': top_k,
|
104 |
+
**kwargs
|
105 |
+
}
|
106 |
+
)
|
107 |
+
return response.text
|
108 |
+
|
109 |
+
@fallback_to_15_flash
|
110 |
+
async def chat(
|
111 |
+
self,
|
112 |
+
messages: List[Dict[Literal['role', 'content'], str]],
|
113 |
+
temperature: float = 0.7,
|
114 |
+
max_tokens: int = 2048,
|
115 |
+
**kwargs
|
116 |
+
) -> str:
|
117 |
+
"""
|
118 |
+
Chat completion using Gemini Flash.
|
119 |
+
|
120 |
+
Args:
|
121 |
+
messages: List of message dictionaries with 'role' and 'content'
|
122 |
+
temperature: Controls randomness (0.0 to 1.0)
|
123 |
+
max_tokens: Maximum number of tokens to generate
|
124 |
+
**kwargs: Additional generation parameters
|
125 |
+
|
126 |
+
Returns:
|
127 |
+
Model's response
|
128 |
+
|
129 |
+
Raises:
|
130 |
+
ModelError: If both 2.0 and 1.5 models fail
|
131 |
+
"""
|
132 |
+
chat = self.model.start_chat(history=[])
|
133 |
+
# Process all but the last message as history
|
134 |
+
for message in messages[:-1]:
|
135 |
+
if message['role'] == 'user':
|
136 |
+
chat.send_message(message['content'])
|
137 |
+
|
138 |
+
# Get response for the last message
|
139 |
+
response = await chat.send_message_async(
|
140 |
+
messages[-1]['content'],
|
141 |
+
generation_config={
|
142 |
+
'temperature': temperature,
|
143 |
+
'max_output_tokens': max_tokens,
|
144 |
+
**kwargs
|
145 |
+
}
|
146 |
+
)
|
147 |
+
return response.text
|
mcp_server/prompts/quiz_generation.txt
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
You are an expert quiz generator. Create a quiz about the following concept:
|
2 |
+
|
3 |
+
Concept: {concept}
|
4 |
+
Difficulty: {difficulty}
|
5 |
+
|
6 |
+
Generate a quiz with the following structure:
|
7 |
+
1. Multiple choice questions (3-5 questions)
|
8 |
+
2. Each question should have 4 options
|
9 |
+
3. Include the correct answer
|
10 |
+
4. Add a brief explanation for each answer
|
11 |
+
|
12 |
+
Return the quiz in the following JSON format:
|
13 |
+
{{
|
14 |
+
"quiz_title": "Quiz about [Concept]",
|
15 |
+
"questions": [
|
16 |
+
{{
|
17 |
+
"question": "...",
|
18 |
+
"options": ["...", "...", "...", "..."],
|
19 |
+
"correct_answer": "...",
|
20 |
+
"explanation": "..."
|
21 |
+
}}
|
22 |
+
]
|
23 |
+
}}
|
24 |
+
|
25 |
+
Make sure the quiz is appropriate for {difficulty} difficulty level.
|
mcp_server/resources/concept_graph.py
ADDED
@@ -0,0 +1,83 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Concept graph data structure for TutorX knowledge base.
|
3 |
+
"""
|
4 |
+
from typing import Dict, Any
|
5 |
+
|
6 |
+
# Store the concept graph data in memory
|
7 |
+
CONCEPT_GRAPH = {
|
8 |
+
"python": {
|
9 |
+
"id": "python",
|
10 |
+
"name": "Python Programming",
|
11 |
+
"description": "Fundamentals of Python programming language",
|
12 |
+
"prerequisites": [],
|
13 |
+
"related": ["functions", "oop", "data_structures"]
|
14 |
+
},
|
15 |
+
"functions": {
|
16 |
+
"id": "functions",
|
17 |
+
"name": "Python Functions",
|
18 |
+
"description": "Creating and using functions in Python",
|
19 |
+
"prerequisites": ["python"],
|
20 |
+
"related": ["decorators", "lambdas"]
|
21 |
+
},
|
22 |
+
"oop": {
|
23 |
+
"id": "oop",
|
24 |
+
"name": "Object-Oriented Programming",
|
25 |
+
"description": "Classes and objects in Python",
|
26 |
+
"prerequisites": ["python"],
|
27 |
+
"related": ["inheritance", "polymorphism"]
|
28 |
+
},
|
29 |
+
"data_structures": {
|
30 |
+
"id": "data_structures",
|
31 |
+
"name": "Data Structures",
|
32 |
+
"description": "Built-in data structures in Python",
|
33 |
+
"prerequisites": ["python"],
|
34 |
+
"related": ["algorithms"]
|
35 |
+
},
|
36 |
+
"decorators": {
|
37 |
+
"id": "decorators",
|
38 |
+
"name": "Python Decorators",
|
39 |
+
"description": "Function decorators in Python",
|
40 |
+
"prerequisites": ["functions"],
|
41 |
+
"related": ["python", "functions"]
|
42 |
+
},
|
43 |
+
"lambdas": {
|
44 |
+
"id": "lambdas",
|
45 |
+
"name": "Lambda Functions",
|
46 |
+
"description": "Anonymous functions in Python",
|
47 |
+
"prerequisites": ["functions"],
|
48 |
+
"related": ["python", "functions"]
|
49 |
+
},
|
50 |
+
"inheritance": {
|
51 |
+
"id": "inheritance",
|
52 |
+
"name": "Inheritance in OOP",
|
53 |
+
"description": "Creating class hierarchies in Python",
|
54 |
+
"prerequisites": ["oop"],
|
55 |
+
"related": ["python", "oop"]
|
56 |
+
},
|
57 |
+
"polymorphism": {
|
58 |
+
"id": "polymorphism",
|
59 |
+
"name": "Polymorphism in OOP",
|
60 |
+
"description": "Multiple forms of methods in Python",
|
61 |
+
"prerequisites": ["oop"],
|
62 |
+
"related": ["python", "oop"]
|
63 |
+
},
|
64 |
+
"algorithms": {
|
65 |
+
"id": "algorithms",
|
66 |
+
"name": "Basic Algorithms",
|
67 |
+
"description": "Common algorithms in Python",
|
68 |
+
"prerequisites": ["data_structures"],
|
69 |
+
"related": ["python", "data_structures"]
|
70 |
+
}
|
71 |
+
}
|
72 |
+
|
73 |
+
def get_concept(concept_id: str) -> Dict[str, Any]:
|
74 |
+
"""Get a specific concept by ID or return None if not found."""
|
75 |
+
return CONCEPT_GRAPH.get(concept_id)
|
76 |
+
|
77 |
+
def get_all_concepts() -> Dict[str, Any]:
|
78 |
+
"""Get all concepts in the graph."""
|
79 |
+
return {"concepts": list(CONCEPT_GRAPH.values())}
|
80 |
+
|
81 |
+
def get_concept_graph() -> Dict[str, Any]:
|
82 |
+
"""Get the complete concept graph."""
|
83 |
+
return CONCEPT_GRAPH
|
mcp_server/resources/curriculum_standards.py
ADDED
@@ -0,0 +1,106 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Curriculum standards for different countries and education systems.
|
3 |
+
"""
|
4 |
+
from typing import Dict, Any
|
5 |
+
|
6 |
+
# Sample curriculum standards data
|
7 |
+
CURRICULUM_STANDARDS = {
|
8 |
+
"us": {
|
9 |
+
"name": "Common Core State Standards (US)",
|
10 |
+
"subjects": {
|
11 |
+
"math": {
|
12 |
+
"k-5": ["Counting & Cardinality", "Operations & Algebraic Thinking", "Number & Operations"],
|
13 |
+
"6-8": ["Ratios & Proportional Relationships", "The Number System", "Expressions & Equations"],
|
14 |
+
"9-12": ["Number & Quantity", "Algebra", "Functions", "Modeling", "Geometry", "Statistics & Probability"]
|
15 |
+
},
|
16 |
+
"ela": {
|
17 |
+
"k-5": ["Reading: Literature", "Reading: Informational Text", "Foundational Skills"],
|
18 |
+
"6-12": ["Reading: Literature", "Reading: Informational Text", "Writing", "Speaking & Listening", "Language"]
|
19 |
+
},
|
20 |
+
"csta": {
|
21 |
+
"k-5": ["Algorithms & Programming", "Computing Systems", "Data & Analysis", "Impacts of Computing"],
|
22 |
+
"6-8": ["Algorithms & Programming", "Computing Systems", "Data & Analysis", "Impacts of Computing", "Networks & Internet"],
|
23 |
+
"9-12": ["Algorithms & Programming", "Computing Systems", "Data & Analysis", "Impacts of Computing", "Networks & Internet"]
|
24 |
+
}
|
25 |
+
},
|
26 |
+
"url": "http://www.corestandards.org/"
|
27 |
+
},
|
28 |
+
"uk": {
|
29 |
+
"name": "National Curriculum (UK)",
|
30 |
+
"subjects": {
|
31 |
+
"computing": {
|
32 |
+
"ks1": ["Computer Science", "Information Technology", "Digital Literacy"],
|
33 |
+
"ks2": ["Computer Science", "Information Technology", "Digital Literacy"],
|
34 |
+
"ks3": ["Computer Science", "Information Technology", "Digital Literacy"],
|
35 |
+
"ks4": ["Computer Science", "Information Technology", "Creative iMedia"]
|
36 |
+
},
|
37 |
+
"maths": {
|
38 |
+
"ks1": ["Number", "Measurement", "Geometry", "Statistics"],
|
39 |
+
"ks2": ["Number", "Ratio & Proportion", "Algebra", "Measurement", "Geometry", "Statistics"]
|
40 |
+
}
|
41 |
+
},
|
42 |
+
"url": "https://www.gov.uk/government/collections/national-curriculum"
|
43 |
+
},
|
44 |
+
"in": {
|
45 |
+
"name": "National Education Policy (India)",
|
46 |
+
"subjects": {
|
47 |
+
"mathematics": {
|
48 |
+
"foundation": ["Numeracy", "Shapes & Spatial Understanding"],
|
49 |
+
"preparatory": ["Numbers", "Basic Mathematical Operations", "Shapes & Geometry"],
|
50 |
+
"middle": ["Number System", "Algebra", "Geometry", "Mensuration", "Data Handling"],
|
51 |
+
"secondary": ["Number Systems", "Algebra", "Coordinate Geometry", "Geometry", "Trigonometry", "Mensuration", "Statistics & Probability"]
|
52 |
+
},
|
53 |
+
"computer_science": {
|
54 |
+
"middle": ["Computational Thinking", "Computer Systems", "Networking", "Data Analysis"],
|
55 |
+
"secondary": ["Programming", "Computer Networks", "Database Management", "Web Technologies"]
|
56 |
+
}
|
57 |
+
},
|
58 |
+
"url": "https://www.education.gov.in/en/nep2020"
|
59 |
+
},
|
60 |
+
"sg": {
|
61 |
+
"name": "Singapore Curriculum",
|
62 |
+
"subjects": {
|
63 |
+
"mathematics": {
|
64 |
+
"primary": ["Number & Algebra", "Measurement & Geometry", "Statistics"],
|
65 |
+
"secondary": ["Number & Algebra", "Geometry & Measurement", "Statistics & Probability", "Trigonometry & Calculus"]
|
66 |
+
},
|
67 |
+
"computing": {
|
68 |
+
"primary": ["Computational Thinking", "Coding", "Digital Literacy"],
|
69 |
+
"secondary": ["Computing", "Infocomm", "Media Studies"]
|
70 |
+
}
|
71 |
+
},
|
72 |
+
"url": "https://www.moe.gov.sg/"
|
73 |
+
},
|
74 |
+
"ca": {
|
75 |
+
"name": "Canadian Curriculum",
|
76 |
+
"subjects": {
|
77 |
+
"mathematics": {
|
78 |
+
"elementary": ["Number Sense & Numeration", "Measurement", "Geometry & Spatial Sense", "Patterning & Algebra", "Data Management & Probability"],
|
79 |
+
"secondary": ["Mathematics", "Advanced Functions", "Calculus & Vectors", "Data Management"]
|
80 |
+
},
|
81 |
+
"computer_studies": {
|
82 |
+
"grades_10-12": ["Computer Science", "Computer Engineering", "Computer Programming"]
|
83 |
+
}
|
84 |
+
},
|
85 |
+
"url": "https://www.cmec.ca/"
|
86 |
+
}
|
87 |
+
}
|
88 |
+
|
89 |
+
def get_curriculum_standards(country_code: str = "us") -> Dict[str, Any]:
|
90 |
+
"""
|
91 |
+
Get curriculum standards for a specific country.
|
92 |
+
|
93 |
+
Args:
|
94 |
+
country_code: ISO country code (e.g., 'us', 'uk', 'in', 'sg', 'ca')
|
95 |
+
|
96 |
+
Returns:
|
97 |
+
Dictionary containing curriculum standards for the specified country
|
98 |
+
"""
|
99 |
+
country_code = country_code.lower()
|
100 |
+
if country_code not in CURRICULUM_STANDARDS:
|
101 |
+
return {
|
102 |
+
"error": f"Curriculum standards for country code '{country_code}' not found. "
|
103 |
+
f"Available countries: {', '.join(CURRICULUM_STANDARDS.keys())}"
|
104 |
+
}
|
105 |
+
|
106 |
+
return CURRICULUM_STANDARDS[country_code]
|
mcp_server/server.py
ADDED
@@ -0,0 +1,186 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
TutorX MCP Server
|
3 |
+
|
4 |
+
This is the main entry point for the TutorX MCP server.
|
5 |
+
"""
|
6 |
+
import base64
|
7 |
+
import os
|
8 |
+
import sys
|
9 |
+
from pathlib import Path
|
10 |
+
|
11 |
+
# Add the current directory to the Python path
|
12 |
+
current_dir = Path(__file__).parent
|
13 |
+
sys.path.insert(0, str(current_dir))
|
14 |
+
|
15 |
+
import uvicorn
|
16 |
+
from fastapi import FastAPI, HTTPException, UploadFile, File, Form
|
17 |
+
from fastapi.middleware.cors import CORSMiddleware
|
18 |
+
from mcp.server.fastmcp import FastMCP
|
19 |
+
|
20 |
+
# Import all tools to register them with MCP
|
21 |
+
from tools import (
|
22 |
+
concept_tools,
|
23 |
+
lesson_tools,
|
24 |
+
quiz_tools,
|
25 |
+
interaction_tools,
|
26 |
+
ocr_tools,
|
27 |
+
learning_path_tools
|
28 |
+
)
|
29 |
+
|
30 |
+
# Import resources
|
31 |
+
from resources import concept_graph, curriculum_standards
|
32 |
+
|
33 |
+
# Create FastAPI app
|
34 |
+
api_app = FastAPI(
|
35 |
+
title="TutorX MCP Server",
|
36 |
+
description="Model Context Protocol server for TutorX educational platform",
|
37 |
+
version="1.0.0"
|
38 |
+
)
|
39 |
+
|
40 |
+
# Add CORS middleware
|
41 |
+
api_app.add_middleware(
|
42 |
+
CORSMiddleware,
|
43 |
+
allow_origins=["*"],
|
44 |
+
allow_credentials=True,
|
45 |
+
allow_methods=["*"],
|
46 |
+
allow_headers=["*"],
|
47 |
+
)
|
48 |
+
|
49 |
+
# Import the shared mcp instance
|
50 |
+
from mcp_server.mcp_instance import mcp
|
51 |
+
|
52 |
+
# Explicitly import all tool modules so their @mcp.tool() decorators run
|
53 |
+
from mcp_server.tools import concept_tools
|
54 |
+
from mcp_server.tools import lesson_tools
|
55 |
+
from mcp_server.tools import quiz_tools
|
56 |
+
from mcp_server.tools import interaction_tools
|
57 |
+
from mcp_server.tools import ocr_tools
|
58 |
+
from mcp_server.tools import learning_path_tools
|
59 |
+
from mcp_server.tools import concept_graph_tools
|
60 |
+
|
61 |
+
# Mount the SSE transport for MCP at '/sse/' (with trailing slash)
|
62 |
+
api_app.mount("/sse", mcp.sse_app())
|
63 |
+
|
64 |
+
# Health check endpoint
|
65 |
+
@api_app.get("/health")
|
66 |
+
async def health_check():
|
67 |
+
return {"status": "healthy", "service": "tutorx-mcp"}
|
68 |
+
|
69 |
+
# API endpoints - Concepts
|
70 |
+
@api_app.get("/api/concept_graph")
|
71 |
+
async def get_concept_graph(concept_id: str = None):
|
72 |
+
if concept_id:
|
73 |
+
concept = concept_graph.get_concept(concept_id)
|
74 |
+
if not concept:
|
75 |
+
raise HTTPException(status_code=404, detail={"error": f"Concept {concept_id} not found"})
|
76 |
+
return concept
|
77 |
+
return {"concepts": list(concept_graph.get_concept_graph().values())}
|
78 |
+
|
79 |
+
@api_app.get("/api/concept/{concept_id}")
|
80 |
+
async def get_concept_endpoint(concept_id: str):
|
81 |
+
concept = concept_graph.get_concept(concept_id)
|
82 |
+
if not concept:
|
83 |
+
raise HTTPException(status_code=404, detail=f"Concept {concept_id} not found")
|
84 |
+
return concept
|
85 |
+
|
86 |
+
@api_app.get("/api/concepts")
|
87 |
+
async def list_concepts():
|
88 |
+
return concept_graph.get_all_concepts()
|
89 |
+
|
90 |
+
# API endpoints - Curriculum Standards
|
91 |
+
@api_app.get("/api/curriculum-standards")
|
92 |
+
async def get_curriculum_standards(country: str = "us"):
|
93 |
+
return curriculum_standards.get_curriculum_standards(country)
|
94 |
+
|
95 |
+
# API endpoints - Text Interaction
|
96 |
+
@api_app.post("/api/text-interaction")
|
97 |
+
async def text_interaction_endpoint(request: dict):
|
98 |
+
query = request.get("query")
|
99 |
+
student_id = request.get("student_id")
|
100 |
+
if not query or not student_id:
|
101 |
+
raise HTTPException(status_code=400, detail="Both query and student_id are required")
|
102 |
+
return await interaction_tools.text_interaction(query, student_id)
|
103 |
+
|
104 |
+
# API endpoints - Submission Originality Check
|
105 |
+
@api_app.post("/api/check-originality")
|
106 |
+
async def check_originality_endpoint(request: dict):
|
107 |
+
submission = request.get("submission")
|
108 |
+
reference_sources = request.get("reference_sources", [])
|
109 |
+
if not submission or not isinstance(reference_sources, list):
|
110 |
+
raise HTTPException(status_code=400, detail="submission (string) and reference_sources (array) are required")
|
111 |
+
return await interaction_tools.check_submission_originality(submission, reference_sources)
|
112 |
+
|
113 |
+
# API endpoints - PDF OCR
|
114 |
+
@api_app.post("/api/pdf-ocr")
|
115 |
+
async def pdf_ocr_endpoint(
|
116 |
+
file: UploadFile = File(...),
|
117 |
+
filename: str = Form(None)
|
118 |
+
):
|
119 |
+
try:
|
120 |
+
pdf_data = await file.read()
|
121 |
+
pdf_b64 = base64.b64encode(pdf_data).decode('utf-8')
|
122 |
+
result = await ocr_tools.pdf_ocr({
|
123 |
+
"pdf_data": pdf_b64,
|
124 |
+
"filename": filename or file.filename
|
125 |
+
})
|
126 |
+
return result
|
127 |
+
except Exception as e:
|
128 |
+
raise HTTPException(status_code=500, detail=str(e))
|
129 |
+
|
130 |
+
# API endpoints - Learning Path
|
131 |
+
@api_app.post("/api/learning-path")
|
132 |
+
async def learning_path_endpoint(request: dict):
|
133 |
+
student_id = request.get("student_id")
|
134 |
+
concept_ids = request.get("concept_ids", [])
|
135 |
+
student_level = request.get("student_level")
|
136 |
+
if not student_id or not concept_ids:
|
137 |
+
raise HTTPException(status_code=400, detail="student_id and concept_ids are required")
|
138 |
+
return await learning_path_tools.get_learning_path(
|
139 |
+
student_id=student_id,
|
140 |
+
concept_ids=concept_ids,
|
141 |
+
student_level=student_level
|
142 |
+
)
|
143 |
+
|
144 |
+
# API endpoints - Assess Skill
|
145 |
+
from tools.concept_tools import assess_skill_tool
|
146 |
+
@api_app.post("/api/assess-skill")
|
147 |
+
async def assess_skill_endpoint(request: dict):
|
148 |
+
student_id = request.get("student_id")
|
149 |
+
concept_id = request.get("concept_id")
|
150 |
+
if not student_id or not concept_id:
|
151 |
+
raise HTTPException(status_code=400, detail="Both student_id and concept_id are required")
|
152 |
+
return await assess_skill_tool(student_id, concept_id)
|
153 |
+
|
154 |
+
# API endpoints - Generate Lesson
|
155 |
+
from tools.lesson_tools import generate_lesson_tool
|
156 |
+
@api_app.post("/api/generate-lesson")
|
157 |
+
async def generate_lesson_endpoint(request: dict):
|
158 |
+
topic = request.get("topic")
|
159 |
+
grade_level = request.get("grade_level")
|
160 |
+
duration_minutes = request.get("duration_minutes")
|
161 |
+
if not topic or grade_level is None or duration_minutes is None:
|
162 |
+
raise HTTPException(status_code=400, detail="topic, grade_level, and duration_minutes are required")
|
163 |
+
return await generate_lesson_tool(topic, grade_level, duration_minutes)
|
164 |
+
|
165 |
+
# API endpoints - Generate Quiz
|
166 |
+
from tools.quiz_tools import generate_quiz_tool
|
167 |
+
@api_app.post("/api/generate-quiz")
|
168 |
+
async def generate_quiz_endpoint(request: dict):
|
169 |
+
concept = request.get("concept", "")
|
170 |
+
difficulty = request.get("difficulty", 2)
|
171 |
+
if not concept or not isinstance(concept, str) or not concept.strip():
|
172 |
+
raise HTTPException(status_code=400, detail="concept must be a non-empty string")
|
173 |
+
if isinstance(difficulty, (int, float)):
|
174 |
+
if difficulty <= 2:
|
175 |
+
difficulty = "easy"
|
176 |
+
elif difficulty <= 4:
|
177 |
+
difficulty = "medium"
|
178 |
+
else:
|
179 |
+
difficulty = "hard"
|
180 |
+
if difficulty not in ["easy", "medium", "hard"]:
|
181 |
+
difficulty = "medium"
|
182 |
+
return await generate_quiz_tool(concept.strip(), difficulty)
|
183 |
+
|
184 |
+
# Entrypoint for running with MCP SSE transport
|
185 |
+
if __name__ == "__main__":
|
186 |
+
mcp.run(transport="sse")
|
mcp_server/setup.py
ADDED
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from setuptools import setup, find_packages
|
2 |
+
|
3 |
+
setup(
|
4 |
+
name="mcp-server",
|
5 |
+
version="0.1.0",
|
6 |
+
packages=find_packages(),
|
7 |
+
install_requires=[
|
8 |
+
"fastapi>=0.68.0",
|
9 |
+
"uvicorn>=0.15.0",
|
10 |
+
"mcp[cli]>=1.9.3",
|
11 |
+
"pymupdf>=1.19.0",
|
12 |
+
"pytesseract>=0.3.8",
|
13 |
+
"Pillow>=8.3.1",
|
14 |
+
"numpy>=1.21.0",
|
15 |
+
],
|
16 |
+
python_requires=">=3.8",
|
17 |
+
)
|
mcp_server/tools/__init__.py
ADDED
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
TutorX MCP Tools
|
3 |
+
|
4 |
+
This module contains all the MCP tools for the TutorX application.
|
5 |
+
"""
|
6 |
+
|
7 |
+
# Import all tools to make them available when importing the package
|
8 |
+
from .concept_tools import get_concept_tool, assess_skill_tool # noqa
|
9 |
+
from .concept_graph_tools import get_concept_graph_tool # noqa
|
10 |
+
from .lesson_tools import generate_lesson_tool # noqa
|
11 |
+
from .quiz_tools import generate_quiz_tool # noqa
|
12 |
+
from .interaction_tools import text_interaction, check_submission_originality # noqa
|
13 |
+
from .ocr_tools import pdf_ocr, image_to_text # noqa
|
14 |
+
from .learning_path_tools import get_learning_path # noqa
|
15 |
+
|
16 |
+
__all__ = [
|
17 |
+
# Concept tools
|
18 |
+
'get_concept_tool',
|
19 |
+
'assess_skill_tool',
|
20 |
+
'get_concept_graph_tool',
|
21 |
+
|
22 |
+
# Lesson tools
|
23 |
+
'generate_lesson_tool',
|
24 |
+
|
25 |
+
# Quiz tools
|
26 |
+
'generate_quiz_tool',
|
27 |
+
|
28 |
+
# Interaction tools
|
29 |
+
'text_interaction',
|
30 |
+
'check_submission_originality',
|
31 |
+
|
32 |
+
# OCR tools
|
33 |
+
'pdf_ocr',
|
34 |
+
'image_to_text',
|
35 |
+
|
36 |
+
# Learning path tools
|
37 |
+
'get_learning_path',
|
38 |
+
]
|
mcp_server/tools/concept_graph_tools.py
ADDED
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Concept graph tools for TutorX MCP.
|
3 |
+
"""
|
4 |
+
from typing import Dict, Any, Optional
|
5 |
+
import sys
|
6 |
+
import os
|
7 |
+
from pathlib import Path
|
8 |
+
|
9 |
+
# Add the parent directory to the Python path
|
10 |
+
current_dir = Path(__file__).parent
|
11 |
+
parent_dir = current_dir.parent.parent
|
12 |
+
sys.path.insert(0, str(parent_dir))
|
13 |
+
|
14 |
+
import sys
|
15 |
+
import os
|
16 |
+
from pathlib import Path
|
17 |
+
|
18 |
+
# Add the parent directory to the Python path
|
19 |
+
current_dir = Path(__file__).parent
|
20 |
+
parent_dir = current_dir.parent
|
21 |
+
sys.path.insert(0, str(parent_dir))
|
22 |
+
|
23 |
+
# Import from local resources
|
24 |
+
from resources import concept_graph
|
25 |
+
|
26 |
+
# Import MCP
|
27 |
+
from mcp_server.mcp_instance import mcp
|
28 |
+
|
29 |
+
@mcp.tool()
|
30 |
+
async def get_concept_graph_tool(concept_id: Optional[str] = None) -> Dict[str, Any]:
|
31 |
+
"""
|
32 |
+
Get the complete concept graph or a specific concept.
|
33 |
+
|
34 |
+
Args:
|
35 |
+
concept_id: Optional concept ID to get a specific concept
|
36 |
+
|
37 |
+
Returns:
|
38 |
+
Dictionary containing the concept graph or a specific concept
|
39 |
+
"""
|
40 |
+
if concept_id:
|
41 |
+
concept = concept_graph.get_concept(concept_id)
|
42 |
+
if not concept:
|
43 |
+
return {"error": f"Concept {concept_id} not found"}
|
44 |
+
return {"concept": concept}
|
45 |
+
|
46 |
+
return {"concepts": list(concept_graph.get_concept_graph().values())}
|
mcp_server/tools/concept_tools.py
ADDED
@@ -0,0 +1,100 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Concept-related MCP tools for TutorX.
|
3 |
+
"""
|
4 |
+
import random
|
5 |
+
from typing import Dict, Any, Optional
|
6 |
+
from datetime import datetime, timezone
|
7 |
+
import sys
|
8 |
+
import os
|
9 |
+
from pathlib import Path
|
10 |
+
|
11 |
+
# Add the parent directory to the Python path
|
12 |
+
current_dir = Path(__file__).parent
|
13 |
+
parent_dir = current_dir.parent.parent
|
14 |
+
sys.path.insert(0, str(parent_dir))
|
15 |
+
|
16 |
+
import sys
|
17 |
+
import os
|
18 |
+
from pathlib import Path
|
19 |
+
|
20 |
+
# Add the parent directory to the Python path
|
21 |
+
current_dir = Path(__file__).parent
|
22 |
+
parent_dir = current_dir.parent
|
23 |
+
sys.path.insert(0, str(parent_dir))
|
24 |
+
|
25 |
+
# Import from local resources
|
26 |
+
from resources.concept_graph import get_concept, get_all_concepts
|
27 |
+
|
28 |
+
# Import MCP
|
29 |
+
from mcp_server.mcp_instance import mcp
|
30 |
+
|
31 |
+
@mcp.tool()
|
32 |
+
async def get_concept_tool(concept_id: str = None) -> Dict[str, Any]:
|
33 |
+
"""
|
34 |
+
Get a specific concept or all concepts from the knowledge graph.
|
35 |
+
|
36 |
+
Args:
|
37 |
+
concept_id: Optional concept ID to retrieve a specific concept
|
38 |
+
|
39 |
+
Returns:
|
40 |
+
Dictionary containing the requested concept(s)
|
41 |
+
"""
|
42 |
+
if concept_id:
|
43 |
+
concept = get_concept(concept_id)
|
44 |
+
if not concept:
|
45 |
+
return {"error": f"Concept {concept_id} not found"}
|
46 |
+
return {"concept": concept}
|
47 |
+
return get_all_concepts()
|
48 |
+
|
49 |
+
@mcp.tool()
|
50 |
+
async def assess_skill_tool(student_id: str, concept_id: str) -> Dict[str, Any]:
|
51 |
+
"""
|
52 |
+
Assess a student's understanding of a specific concept.
|
53 |
+
|
54 |
+
Args:
|
55 |
+
student_id: Unique identifier for the student
|
56 |
+
concept_id: ID of the concept to assess
|
57 |
+
|
58 |
+
Returns:
|
59 |
+
Dictionary containing assessment results
|
60 |
+
"""
|
61 |
+
# Get concept data
|
62 |
+
concept_data = get_concept(concept_id)
|
63 |
+
if not concept_data:
|
64 |
+
return {"error": f"Cannot assess skill: Concept {concept_id} not found"}
|
65 |
+
|
66 |
+
concept_name = concept_data.get("name", concept_id)
|
67 |
+
|
68 |
+
# Generate a score based on concept difficulty or random
|
69 |
+
score = random.uniform(0.2, 1.0) # Random score between 0.2 and 1.0
|
70 |
+
|
71 |
+
# Set timestamp with timezone
|
72 |
+
timestamp = datetime.now(timezone.utc).isoformat()
|
73 |
+
|
74 |
+
# Generate feedback based on score
|
75 |
+
feedback = {
|
76 |
+
"strengths": [f"Good understanding of {concept_name} fundamentals"],
|
77 |
+
"areas_for_improvement": [f"Could work on advanced applications of {concept_name}"],
|
78 |
+
"recommendations": [
|
79 |
+
f"Review {concept_name} practice problems",
|
80 |
+
f"Watch tutorial videos on {concept_name}"
|
81 |
+
]
|
82 |
+
}
|
83 |
+
|
84 |
+
# Adjust feedback based on score
|
85 |
+
if score < 0.5:
|
86 |
+
feedback["strengths"] = [f"Basic understanding of {concept_name}"]
|
87 |
+
feedback["areas_for_improvement"] = [
|
88 |
+
f"Needs to strengthen fundamental knowledge of {concept_name}",
|
89 |
+
f"Practice more exercises on {concept_name}"
|
90 |
+
]
|
91 |
+
|
92 |
+
# Return assessment results
|
93 |
+
return {
|
94 |
+
"student_id": student_id,
|
95 |
+
"concept_id": concept_id,
|
96 |
+
"concept_name": concept_name,
|
97 |
+
"score": round(score, 2), # Round to 2 decimal places
|
98 |
+
"timestamp": timestamp,
|
99 |
+
"feedback": feedback
|
100 |
+
}
|
mcp_server/tools/interaction_tools.py
ADDED
@@ -0,0 +1,127 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Text interaction and submission checking tools for TutorX.
|
3 |
+
"""
|
4 |
+
import re
|
5 |
+
from difflib import SequenceMatcher
|
6 |
+
from typing import Dict, Any, List, Optional
|
7 |
+
from mcp_server.mcp_instance import mcp
|
8 |
+
|
9 |
+
def calculate_similarity(text1: str, text2: str) -> float:
|
10 |
+
"""Calculate the similarity ratio between two texts."""
|
11 |
+
return SequenceMatcher(None, text1, text2).ratio()
|
12 |
+
|
13 |
+
@mcp.tool()
|
14 |
+
async def text_interaction(query: str, student_id: str) -> Dict[str, Any]:
|
15 |
+
"""
|
16 |
+
Process a text query from a student and provide an educational response.
|
17 |
+
|
18 |
+
Args:
|
19 |
+
query: The student's question or input text
|
20 |
+
student_id: Unique identifier for the student
|
21 |
+
|
22 |
+
Returns:
|
23 |
+
Dictionary containing the response and metadata
|
24 |
+
"""
|
25 |
+
# Simple response generation based on keywords
|
26 |
+
query_lower = query.lower()
|
27 |
+
|
28 |
+
# Check for greetings
|
29 |
+
if any(word in query_lower for word in ["hello", "hi", "hey"]):
|
30 |
+
return {
|
31 |
+
"response": f"Hello! I'm your TutorX assistant. How can I help you today, Student {student_id}?",
|
32 |
+
"suggested_actions": [
|
33 |
+
"Ask a question about programming",
|
34 |
+
"Request a lesson on a topic",
|
35 |
+
"Take a quiz"
|
36 |
+
]
|
37 |
+
}
|
38 |
+
|
39 |
+
# Check for help request
|
40 |
+
if "help" in query_lower or "confused" in query_lower:
|
41 |
+
return {
|
42 |
+
"response": "I'm here to help! Could you please tell me what specific topic or concept you're struggling with?",
|
43 |
+
"suggested_actions": [
|
44 |
+
"Explain functions in Python",
|
45 |
+
"What is object-oriented programming?",
|
46 |
+
"Help me debug my code"
|
47 |
+
]
|
48 |
+
}
|
49 |
+
|
50 |
+
# Default response for other queries
|
51 |
+
return {
|
52 |
+
"response": f"I understand you're asking about: {query}. Here's what I can tell you...",
|
53 |
+
"metadata": {
|
54 |
+
"student_id": student_id,
|
55 |
+
"query_type": "general_inquiry"
|
56 |
+
},
|
57 |
+
"suggested_resources": [
|
58 |
+
{"title": "Related Documentation", "url": "https://docs.python.org/3/"},
|
59 |
+
{"title": "Tutorial Video", "url": "https://www.youtube.com/"},
|
60 |
+
{"title": "Practice Exercises", "url": "https://www.hackerrank.com/"}
|
61 |
+
]
|
62 |
+
}
|
63 |
+
|
64 |
+
@mcp.tool()
|
65 |
+
async def check_submission_originality(submission: str, reference_sources: List[str]) -> Dict[str, Any]:
|
66 |
+
"""
|
67 |
+
Check a student's submission for potential plagiarism against reference sources.
|
68 |
+
|
69 |
+
Args:
|
70 |
+
submission: The student's submission text
|
71 |
+
reference_sources: List of reference texts to check against
|
72 |
+
|
73 |
+
Returns:
|
74 |
+
Dictionary with originality analysis results
|
75 |
+
"""
|
76 |
+
if not submission or not reference_sources:
|
77 |
+
return {"error": "Both submission and reference_sources are required"}
|
78 |
+
|
79 |
+
# Simple plagiarism check using string similarity
|
80 |
+
results = []
|
81 |
+
for i, source in enumerate(reference_sources, 1):
|
82 |
+
if not source:
|
83 |
+
continue
|
84 |
+
|
85 |
+
similarity = calculate_similarity(submission, source)
|
86 |
+
results.append({
|
87 |
+
"source_index": i,
|
88 |
+
"similarity_score": round(similarity, 4),
|
89 |
+
"is_original": similarity < 0.7, # Threshold for originality
|
90 |
+
"suspicious_sections": []
|
91 |
+
})
|
92 |
+
|
93 |
+
# Check for exact matches
|
94 |
+
exact_matches = []
|
95 |
+
submission_words = submission.split()
|
96 |
+
for i in range(len(submission_words) - 4): # Check 5-word sequences
|
97 |
+
seq = ' '.join(submission_words[i:i+5])
|
98 |
+
for j, source in enumerate(reference_sources):
|
99 |
+
if seq in source:
|
100 |
+
exact_matches.append({
|
101 |
+
"source_index": j + 1,
|
102 |
+
"matched_text": seq,
|
103 |
+
"position": i
|
104 |
+
})
|
105 |
+
|
106 |
+
# Calculate overall originality score (weighted average)
|
107 |
+
if results:
|
108 |
+
avg_similarity = sum(r["similarity_score"] for r in results) / len(results)
|
109 |
+
originality_score = max(0, 1 - avg_similarity)
|
110 |
+
else:
|
111 |
+
originality_score = 1.0
|
112 |
+
|
113 |
+
return {
|
114 |
+
"originality_score": round(originality_score, 2),
|
115 |
+
"is_original": all(r["is_original"] for r in results) if results else True,
|
116 |
+
"sources_checked": len(reference_sources),
|
117 |
+
"source_comparisons": results,
|
118 |
+
"exact_matches": exact_matches,
|
119 |
+
"recommendations": [
|
120 |
+
"Paraphrase any sections with high similarity scores",
|
121 |
+
"Add proper citations for referenced material",
|
122 |
+
"Use your own words to explain concepts"
|
123 |
+
] if any(not r["is_original"] for r in results) else [
|
124 |
+
"Good job! Your work appears to be original.",
|
125 |
+
"Remember to always cite your sources properly."
|
126 |
+
]
|
127 |
+
}
|
mcp_server/tools/learning_path_tools.py
ADDED
@@ -0,0 +1,162 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Learning path generation tools for TutorX.
|
3 |
+
"""
|
4 |
+
import random
|
5 |
+
from typing import Dict, Any, List, Optional
|
6 |
+
from datetime import datetime, timedelta
|
7 |
+
import sys
|
8 |
+
import os
|
9 |
+
from pathlib import Path
|
10 |
+
|
11 |
+
# Add the parent directory to the Python path
|
12 |
+
current_dir = Path(__file__).parent
|
13 |
+
parent_dir = current_dir.parent.parent
|
14 |
+
sys.path.insert(0, str(parent_dir))
|
15 |
+
|
16 |
+
import sys
|
17 |
+
import os
|
18 |
+
from pathlib import Path
|
19 |
+
|
20 |
+
# Add the parent directory to the Python path
|
21 |
+
current_dir = Path(__file__).parent
|
22 |
+
parent_dir = current_dir.parent
|
23 |
+
sys.path.insert(0, str(parent_dir))
|
24 |
+
|
25 |
+
# Import from local resources
|
26 |
+
from resources.concept_graph import CONCEPT_GRAPH
|
27 |
+
|
28 |
+
# Import MCP
|
29 |
+
from mcp_server.mcp_instance import mcp
|
30 |
+
|
31 |
+
def get_prerequisites(concept_id: str, visited: Optional[set] = None) -> List[Dict[str, Any]]:
|
32 |
+
"""
|
33 |
+
Get all prerequisites for a concept recursively.
|
34 |
+
|
35 |
+
Args:
|
36 |
+
concept_id: ID of the concept to get prerequisites for
|
37 |
+
visited: Set of already visited concepts to avoid cycles
|
38 |
+
|
39 |
+
Returns:
|
40 |
+
List of prerequisite concepts in order
|
41 |
+
"""
|
42 |
+
if visited is None:
|
43 |
+
visited = set()
|
44 |
+
|
45 |
+
if concept_id not in CONCEPT_GRAPH or concept_id in visited:
|
46 |
+
return []
|
47 |
+
|
48 |
+
visited.add(concept_id)
|
49 |
+
prerequisites = []
|
50 |
+
|
51 |
+
# Get direct prerequisites
|
52 |
+
for prereq_id in CONCEPT_GRAPH[concept_id].get("prerequisites", []):
|
53 |
+
if prereq_id in CONCEPT_GRAPH and prereq_id not in visited:
|
54 |
+
prerequisites.extend(get_prerequisites(prereq_id, visited))
|
55 |
+
|
56 |
+
# Add the current concept
|
57 |
+
prerequisites.append(CONCEPT_GRAPH[concept_id])
|
58 |
+
return prerequisites
|
59 |
+
|
60 |
+
def generate_learning_path(concept_ids: List[str], student_level: str = "beginner") -> Dict[str, Any]:
|
61 |
+
"""
|
62 |
+
Generate a personalized learning path for a student.
|
63 |
+
|
64 |
+
Args:
|
65 |
+
concept_ids: List of concept IDs to include in the learning path
|
66 |
+
student_level: Student's current level (beginner, intermediate, advanced)
|
67 |
+
|
68 |
+
Returns:
|
69 |
+
Dictionary containing the learning path
|
70 |
+
"""
|
71 |
+
if not concept_ids:
|
72 |
+
return {"error": "At least one concept ID is required"}
|
73 |
+
|
74 |
+
# Get all prerequisites for each concept
|
75 |
+
all_prerequisites = []
|
76 |
+
visited = set()
|
77 |
+
|
78 |
+
for concept_id in concept_ids:
|
79 |
+
if concept_id in CONCEPT_GRAPH:
|
80 |
+
prereqs = get_prerequisites(concept_id, visited)
|
81 |
+
all_prerequisites.extend(prereqs)
|
82 |
+
|
83 |
+
# Remove duplicates while preserving order
|
84 |
+
unique_concepts = []
|
85 |
+
seen = set()
|
86 |
+
for concept in all_prerequisites:
|
87 |
+
if concept["id"] not in seen:
|
88 |
+
seen.add(concept["id"])
|
89 |
+
unique_concepts.append(concept)
|
90 |
+
|
91 |
+
# Add any target concepts not already in the path
|
92 |
+
for concept_id in concept_ids:
|
93 |
+
if concept_id in CONCEPT_GRAPH and concept_id not in seen:
|
94 |
+
unique_concepts.append(CONCEPT_GRAPH[concept_id])
|
95 |
+
|
96 |
+
# Estimate time required for each concept based on student level
|
97 |
+
time_estimates = {
|
98 |
+
"beginner": {"min": 30, "max": 60}, # 30-60 minutes per concept
|
99 |
+
"intermediate": {"min": 20, "max": 45}, # 20-45 minutes per concept
|
100 |
+
"advanced": {"min": 15, "max": 30} # 15-30 minutes per concept
|
101 |
+
}
|
102 |
+
|
103 |
+
level = student_level.lower()
|
104 |
+
if level not in time_estimates:
|
105 |
+
level = "beginner"
|
106 |
+
|
107 |
+
time_min = time_estimates[level]["min"]
|
108 |
+
time_max = time_estimates[level]["max"]
|
109 |
+
|
110 |
+
# Generate learning path with estimated times
|
111 |
+
learning_path = []
|
112 |
+
total_minutes = 0
|
113 |
+
|
114 |
+
for i, concept in enumerate(unique_concepts, 1):
|
115 |
+
# Random time estimate within range
|
116 |
+
minutes = random.randint(time_min, time_max)
|
117 |
+
total_minutes += minutes
|
118 |
+
|
119 |
+
learning_path.append({
|
120 |
+
"step": i,
|
121 |
+
"concept_id": concept["id"],
|
122 |
+
"concept_name": concept["name"],
|
123 |
+
"description": concept.get("description", ""),
|
124 |
+
"estimated_time_minutes": minutes,
|
125 |
+
"resources": [
|
126 |
+
f"Video tutorial on {concept['name']}",
|
127 |
+
f"{concept['name']} documentation",
|
128 |
+
f"Practice exercises for {concept['name']}"
|
129 |
+
]
|
130 |
+
})
|
131 |
+
|
132 |
+
# Calculate total time
|
133 |
+
hours, minutes = divmod(total_minutes, 60)
|
134 |
+
total_time = f"{hours}h {minutes}m" if hours > 0 else f"{minutes}m"
|
135 |
+
|
136 |
+
return {
|
137 |
+
"learning_path": learning_path,
|
138 |
+
"total_steps": len(learning_path),
|
139 |
+
"total_time_minutes": total_minutes,
|
140 |
+
"total_time_display": total_time,
|
141 |
+
"student_level": student_level,
|
142 |
+
"generated_at": datetime.utcnow().isoformat() + "Z"
|
143 |
+
}
|
144 |
+
|
145 |
+
@mcp.tool()
|
146 |
+
async def get_learning_path(student_id: str, concept_ids: List[str], student_level: Optional[str] = None) -> Dict[str, Any]:
|
147 |
+
"""
|
148 |
+
Generate a personalized learning path for a student.
|
149 |
+
|
150 |
+
Args:
|
151 |
+
student_id: Unique identifier for the student
|
152 |
+
concept_ids: List of concept IDs to include in the learning path
|
153 |
+
student_level: Optional student level (beginner, intermediate, advanced)
|
154 |
+
|
155 |
+
Returns:
|
156 |
+
Dictionary containing the learning path
|
157 |
+
"""
|
158 |
+
# In a real implementation, we would look up the student's level from their profile
|
159 |
+
if not student_level:
|
160 |
+
student_level = "beginner" # Default level
|
161 |
+
|
162 |
+
return generate_learning_path(concept_ids, student_level)
|
mcp_server/tools/lesson_tools.py
ADDED
@@ -0,0 +1,123 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Lesson generation tools for TutorX MCP.
|
3 |
+
"""
|
4 |
+
from typing import Dict, Any, List
|
5 |
+
from mcp_server.mcp_instance import mcp
|
6 |
+
|
7 |
+
@mcp.tool()
|
8 |
+
async def generate_lesson_tool(topic: str, grade_level: int, duration_minutes: int) -> Dict[str, Any]:
|
9 |
+
"""
|
10 |
+
Generate a lesson plan for the given topic, grade level, and duration.
|
11 |
+
|
12 |
+
Args:
|
13 |
+
topic: The topic for the lesson
|
14 |
+
grade_level: The grade level (1-12)
|
15 |
+
duration_minutes: Duration of the lesson in minutes
|
16 |
+
|
17 |
+
Returns:
|
18 |
+
Dictionary containing the generated lesson plan
|
19 |
+
"""
|
20 |
+
# Validate inputs
|
21 |
+
if not topic or not isinstance(topic, str):
|
22 |
+
return {"error": "Topic must be a non-empty string"}
|
23 |
+
|
24 |
+
if not isinstance(grade_level, int) or grade_level < 1 or grade_level > 12:
|
25 |
+
return {"error": "Grade level must be an integer between 1 and 12"}
|
26 |
+
|
27 |
+
if not isinstance(duration_minutes, (int, float)) or duration_minutes <= 0:
|
28 |
+
return {"error": "Duration must be a positive number"}
|
29 |
+
|
30 |
+
# Calculate time allocation (example: 10% intro, 30% instruction, 40% practice, 20% review)
|
31 |
+
intro_time = max(5, duration_minutes * 0.1) # At least 5 minutes
|
32 |
+
instruction_time = duration_minutes * 0.3
|
33 |
+
practice_time = duration_minutes * 0.4
|
34 |
+
review_time = duration_minutes - (intro_time + instruction_time + practice_time)
|
35 |
+
|
36 |
+
# Generate learning objectives based on grade level and topic
|
37 |
+
difficulty = {
|
38 |
+
1: "basic",
|
39 |
+
2: "basic",
|
40 |
+
3: "basic",
|
41 |
+
4: "intermediate",
|
42 |
+
5: "intermediate",
|
43 |
+
6: "intermediate",
|
44 |
+
7: "advanced",
|
45 |
+
8: "advanced",
|
46 |
+
9: "advanced",
|
47 |
+
10: "expert",
|
48 |
+
11: "expert",
|
49 |
+
12: "expert"
|
50 |
+
}.get(grade_level, "intermediate")
|
51 |
+
|
52 |
+
# Create lesson plan
|
53 |
+
lesson_plan = {
|
54 |
+
"topic": topic,
|
55 |
+
"grade_level": grade_level,
|
56 |
+
"duration_minutes": duration_minutes,
|
57 |
+
"difficulty": difficulty,
|
58 |
+
"objectives": [
|
59 |
+
f"Understand the {difficulty} concepts of {topic}",
|
60 |
+
f"Apply {topic} concepts to solve problems",
|
61 |
+
f"Analyze and evaluate {topic} in different contexts"
|
62 |
+
],
|
63 |
+
"materials": [
|
64 |
+
"Whiteboard and markers",
|
65 |
+
"Printed worksheets",
|
66 |
+
"Example code snippets",
|
67 |
+
"Interactive coding environment"
|
68 |
+
],
|
69 |
+
"activities": [
|
70 |
+
{
|
71 |
+
"type": "introduction",
|
72 |
+
"duration_minutes": intro_time,
|
73 |
+
"description": f"Introduce the topic of {topic} and its importance"
|
74 |
+
},
|
75 |
+
{
|
76 |
+
"type": "direct_instruction",
|
77 |
+
"duration_minutes": instruction_time,
|
78 |
+
"description": f"Teach the core concepts of {topic}"
|
79 |
+
},
|
80 |
+
{
|
81 |
+
"type": "guided_practice",
|
82 |
+
"duration_minutes": practice_time,
|
83 |
+
"description": "Work through examples together"
|
84 |
+
},
|
85 |
+
{
|
86 |
+
"type": "independent_practice",
|
87 |
+
"duration_minutes": practice_time,
|
88 |
+
"description": "Students work on exercises independently"
|
89 |
+
},
|
90 |
+
{
|
91 |
+
"type": "review",
|
92 |
+
"duration_minutes": review_time,
|
93 |
+
"description": "Review key concepts and answer questions"
|
94 |
+
}
|
95 |
+
],
|
96 |
+
"assessment": {
|
97 |
+
"type": "formative",
|
98 |
+
"methods": ["Exit ticket", "Class participation", "Worksheet completion"]
|
99 |
+
},
|
100 |
+
"differentiation": {
|
101 |
+
"for_struggling_students": [
|
102 |
+
"Provide additional examples",
|
103 |
+
"Offer one-on-one support",
|
104 |
+
"Use visual aids"
|
105 |
+
],
|
106 |
+
"for_advanced_students": [
|
107 |
+
"Provide extension activities",
|
108 |
+
"Challenge with advanced problems",
|
109 |
+
"Encourage to help peers"
|
110 |
+
]
|
111 |
+
},
|
112 |
+
"homework": {
|
113 |
+
"description": f"Complete practice problems on {topic}",
|
114 |
+
"estimated_time_minutes": 20,
|
115 |
+
"resources": [
|
116 |
+
f"{topic} practice worksheet",
|
117 |
+
"Online practice problems",
|
118 |
+
"Reading assignment"
|
119 |
+
]
|
120 |
+
}
|
121 |
+
}
|
122 |
+
|
123 |
+
return lesson_plan
|
mcp_server/tools/ocr_tools.py
ADDED
@@ -0,0 +1,166 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
OCR (Optical Character Recognition) tools for TutorX.
|
3 |
+
"""
|
4 |
+
import base64
|
5 |
+
import io
|
6 |
+
import tempfile
|
7 |
+
from typing import Dict, Any, Optional, Tuple
|
8 |
+
# import fitz # PyMuPDFuv run
|
9 |
+
import pytesseract
|
10 |
+
from PIL import Image, ImageEnhance
|
11 |
+
import numpy as np
|
12 |
+
from mcp_server.mcp_instance import mcp
|
13 |
+
|
14 |
+
def preprocess_image(image: Image.Image) -> Image.Image:
|
15 |
+
"""
|
16 |
+
Preprocess image to improve OCR accuracy.
|
17 |
+
|
18 |
+
Args:
|
19 |
+
image: Input PIL Image
|
20 |
+
|
21 |
+
Returns:
|
22 |
+
Preprocessed PIL Image
|
23 |
+
"""
|
24 |
+
# Convert to grayscale
|
25 |
+
image = image.convert('L')
|
26 |
+
|
27 |
+
# Enhance contrast
|
28 |
+
enhancer = ImageEnhance.Contrast(image)
|
29 |
+
image = enhancer.enhance(2.0)
|
30 |
+
|
31 |
+
# Enhance sharpness
|
32 |
+
enhancer = ImageEnhance.Sharpness(image)
|
33 |
+
image = enhancer.enhance(2.0)
|
34 |
+
|
35 |
+
return image
|
36 |
+
|
37 |
+
def extract_text_from_image(image: Image.Image) -> str:
|
38 |
+
"""
|
39 |
+
Extract text from an image using Tesseract OCR.
|
40 |
+
|
41 |
+
Args:
|
42 |
+
image: PIL Image to process
|
43 |
+
|
44 |
+
Returns:
|
45 |
+
Extracted text
|
46 |
+
"""
|
47 |
+
try:
|
48 |
+
# Preprocess the image
|
49 |
+
processed_image = preprocess_image(image)
|
50 |
+
|
51 |
+
# Use Tesseract to do OCR on the image
|
52 |
+
text = pytesseract.image_to_string(processed_image, lang='eng')
|
53 |
+
return text.strip()
|
54 |
+
except Exception as e:
|
55 |
+
raise RuntimeError(f"Error during OCR processing: {str(e)}")
|
56 |
+
|
57 |
+
def extract_text_from_pdf(pdf_data: bytes) -> Tuple[str, int]:
|
58 |
+
"""
|
59 |
+
Extract text from a PDF file.
|
60 |
+
|
61 |
+
Args:
|
62 |
+
pdf_data: PDF file content as bytes
|
63 |
+
|
64 |
+
Returns:
|
65 |
+
Tuple of (extracted_text, page_count)
|
66 |
+
"""
|
67 |
+
try:
|
68 |
+
# Open the PDF file
|
69 |
+
with fitz.open(stream=pdf_data, filetype="pdf") as doc:
|
70 |
+
page_count = len(doc)
|
71 |
+
extracted_text = []
|
72 |
+
|
73 |
+
# Extract text from each page
|
74 |
+
for page_num in range(page_count):
|
75 |
+
page = doc.load_page(page_num)
|
76 |
+
text = page.get_text()
|
77 |
+
|
78 |
+
# If no text is found, try OCR
|
79 |
+
if not text.strip():
|
80 |
+
pix = page.get_pixmap()
|
81 |
+
img_data = pix.tobytes("png")
|
82 |
+
img = Image.open(io.BytesIO(img_data))
|
83 |
+
text = extract_text_from_image(img)
|
84 |
+
|
85 |
+
extracted_text.append(text)
|
86 |
+
|
87 |
+
return "\n\n".join(extracted_text), page_count
|
88 |
+
except Exception as e:
|
89 |
+
raise RuntimeError(f"Error processing PDF: {str(e)}")
|
90 |
+
|
91 |
+
@mcp.tool()
|
92 |
+
async def pdf_ocr(request: Dict[str, Any]) -> Dict[str, Any]:
|
93 |
+
"""
|
94 |
+
Extract text from a PDF file using OCR.
|
95 |
+
|
96 |
+
Expected request format:
|
97 |
+
{
|
98 |
+
"pdf_data": "base64_encoded_pdf_data",
|
99 |
+
"filename": "document.pdf" # Optional
|
100 |
+
}
|
101 |
+
|
102 |
+
Returns:
|
103 |
+
Dictionary containing extracted text and metadata
|
104 |
+
"""
|
105 |
+
try:
|
106 |
+
# Get and validate input
|
107 |
+
pdf_data_b64 = request.get("pdf_data")
|
108 |
+
if not pdf_data_b64:
|
109 |
+
return {"error": "Missing required field: pdf_data"}
|
110 |
+
|
111 |
+
# Decode base64 data
|
112 |
+
try:
|
113 |
+
pdf_data = base64.b64decode(pdf_data_b64)
|
114 |
+
except Exception as e:
|
115 |
+
return {"error": f"Invalid base64 data: {str(e)}"}
|
116 |
+
|
117 |
+
# Extract text from PDF
|
118 |
+
extracted_text, page_count = extract_text_from_pdf(pdf_data)
|
119 |
+
|
120 |
+
# Prepare response
|
121 |
+
result = {
|
122 |
+
"success": True,
|
123 |
+
"filename": request.get("filename", "document.pdf"),
|
124 |
+
"page_count": page_count,
|
125 |
+
"extracted_text": extracted_text,
|
126 |
+
"character_count": len(extracted_text),
|
127 |
+
"word_count": len(extracted_text.split()),
|
128 |
+
"processing_time_ms": 0 # Could be calculated if needed
|
129 |
+
}
|
130 |
+
|
131 |
+
return result
|
132 |
+
|
133 |
+
except Exception as e:
|
134 |
+
return {"error": f"Error processing PDF: {str(e)}"}
|
135 |
+
|
136 |
+
@mcp.tool()
|
137 |
+
async def image_to_text(image_data: str) -> Dict[str, Any]:
|
138 |
+
"""
|
139 |
+
Extract text from an image using OCR.
|
140 |
+
|
141 |
+
Args:
|
142 |
+
image_data: Base64 encoded image data
|
143 |
+
|
144 |
+
Returns:
|
145 |
+
Dictionary containing extracted text and metadata
|
146 |
+
"""
|
147 |
+
try:
|
148 |
+
# Decode base64 image data
|
149 |
+
image_bytes = base64.b64decode(image_data)
|
150 |
+
|
151 |
+
# Open image
|
152 |
+
image = Image.open(io.BytesIO(image_bytes))
|
153 |
+
|
154 |
+
# Extract text
|
155 |
+
text = extract_text_from_image(image)
|
156 |
+
|
157 |
+
return {
|
158 |
+
"success": True,
|
159 |
+
"extracted_text": text,
|
160 |
+
"character_count": len(text),
|
161 |
+
"word_count": len(text.split()),
|
162 |
+
"image_size": image.size,
|
163 |
+
"image_mode": image.mode
|
164 |
+
}
|
165 |
+
except Exception as e:
|
166 |
+
return {"error": f"Error processing image: {str(e)}"}
|
mcp_server/tools/quiz_tools.py
ADDED
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Quiz generation tools for TutorX MCP.
|
3 |
+
"""
|
4 |
+
import json
|
5 |
+
import os
|
6 |
+
from pathlib import Path
|
7 |
+
from typing import Dict, Any, List, Optional
|
8 |
+
from mcp_server.mcp_instance import mcp
|
9 |
+
from model import GeminiFlash
|
10 |
+
|
11 |
+
# Load prompt template
|
12 |
+
PROMPT_TEMPLATE = (Path(__file__).parent.parent / "prompts" / "quiz_generation.txt").read_text(encoding="utf-8")
|
13 |
+
|
14 |
+
# Initialize Gemini model
|
15 |
+
MODEL = GeminiFlash()
|
16 |
+
|
17 |
+
@mcp.tool()
|
18 |
+
async def generate_quiz_tool(concept: str, difficulty: str = "medium") -> Dict[str, Any]:
|
19 |
+
"""
|
20 |
+
Generate a quiz based on a concept and difficulty using Gemini.
|
21 |
+
|
22 |
+
Args:
|
23 |
+
concept: The concept to generate a quiz about
|
24 |
+
difficulty: Difficulty level (easy, medium, hard)
|
25 |
+
|
26 |
+
Returns:
|
27 |
+
Dict containing the generated quiz in JSON format
|
28 |
+
"""
|
29 |
+
try:
|
30 |
+
# Validate inputs
|
31 |
+
if not concept or not isinstance(concept, str):
|
32 |
+
return {"error": "concept must be a non-empty string"}
|
33 |
+
|
34 |
+
valid_difficulties = ["easy", "medium", "hard"]
|
35 |
+
if difficulty.lower() not in valid_difficulties:
|
36 |
+
return {"error": f"difficulty must be one of {valid_difficulties}"}
|
37 |
+
|
38 |
+
# Format the prompt
|
39 |
+
prompt = PROMPT_TEMPLATE.format(
|
40 |
+
concept=concept,
|
41 |
+
difficulty=difficulty.lower()
|
42 |
+
)
|
43 |
+
|
44 |
+
# Generate quiz using Gemini
|
45 |
+
response = await MODEL.generate_text(prompt, temperature=0.7)
|
46 |
+
|
47 |
+
# Try to parse the JSON response
|
48 |
+
try:
|
49 |
+
# Extract JSON from markdown code block if present
|
50 |
+
if '```json' in response:
|
51 |
+
json_str = response.split('```json')[1].split('```')[0].strip()
|
52 |
+
else:
|
53 |
+
json_str = response
|
54 |
+
|
55 |
+
quiz_data = json.loads(json_str)
|
56 |
+
return quiz_data
|
57 |
+
|
58 |
+
except json.JSONDecodeError as e:
|
59 |
+
return {"error": f"Failed to parse quiz response: {str(e)}", "raw_response": response}
|
60 |
+
|
61 |
+
except Exception as e:
|
62 |
+
return {"error": f"Error generating quiz: {str(e)}"}
|
pyproject.toml
CHANGED
@@ -14,6 +14,12 @@ dependencies = [
|
|
14 |
"pillow>=10.0.0",
|
15 |
"python-multipart>=0.0.6",
|
16 |
"pydantic>=2.6.0",
|
|
|
|
|
|
|
|
|
|
|
|
|
17 |
]
|
18 |
|
19 |
[project.optional-dependencies]
|
@@ -22,6 +28,7 @@ test = [
|
|
22 |
"pytest-cov>=4.1.0",
|
23 |
"pytest-asyncio>=0.23.0",
|
24 |
"httpx>=0.26.0",
|
|
|
25 |
]
|
26 |
|
27 |
[tool.pytest.ini_options]
|
|
|
14 |
"pillow>=10.0.0",
|
15 |
"python-multipart>=0.0.6",
|
16 |
"pydantic>=2.6.0",
|
17 |
+
"networkx>=3.0",
|
18 |
+
"python-dotenv>=1.0.0",
|
19 |
+
"pytest>=7.4.0",
|
20 |
+
"pytest-cov>=4.1.0",
|
21 |
+
"pytest-asyncio>=0.23.0",
|
22 |
+
"httpx>=0.26.0",
|
23 |
]
|
24 |
|
25 |
[project.optional-dependencies]
|
|
|
28 |
"pytest-cov>=4.1.0",
|
29 |
"pytest-asyncio>=0.23.0",
|
30 |
"httpx>=0.26.0",
|
31 |
+
"pytest-mock>=3.12.0",
|
32 |
]
|
33 |
|
34 |
[tool.pytest.ini_options]
|
run.py
CHANGED
@@ -1,130 +1,164 @@
|
|
1 |
"""
|
2 |
-
Script to run either the MCP server or the Gradio interface
|
3 |
"""
|
4 |
|
5 |
-
import argparse
|
6 |
-
import importlib.util
|
7 |
import os
|
8 |
import sys
|
9 |
-
import
|
10 |
-
import
|
11 |
-
|
12 |
-
|
13 |
-
"""Load a module from path"""
|
14 |
-
spec = importlib.util.spec_from_file_location(name, path)
|
15 |
-
module = importlib.util.module_from_spec(spec)
|
16 |
-
spec.loader.exec_module(module)
|
17 |
-
return module
|
18 |
|
19 |
-
def run_mcp_server(host="0.0.0.0", port=8001
|
20 |
-
"""
|
21 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
22 |
|
23 |
-
# Set environment variables
|
24 |
os.environ["MCP_HOST"] = host
|
25 |
os.environ["MCP_PORT"] = str(port)
|
26 |
-
os.environ["MCP_TRANSPORT"] = transport
|
27 |
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
36 |
sys.exit(1)
|
37 |
|
38 |
-
def run_gradio_interface():
|
39 |
-
"""
|
40 |
-
|
41 |
-
app_module = load_module("app", "app.py")
|
42 |
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
48 |
sys.exit(1)
|
49 |
|
50 |
def check_port_available(port):
|
51 |
-
"""
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
|
|
|
|
60 |
|
61 |
if __name__ == "__main__":
|
62 |
-
parser = argparse.ArgumentParser(
|
|
|
|
|
|
|
|
|
63 |
parser.add_argument(
|
64 |
"--mode",
|
|
|
65 |
choices=["mcp", "gradio", "both"],
|
66 |
default="both",
|
67 |
-
help="Run mode: 'mcp' for MCP server, 'gradio' for Gradio interface, 'both' for both"
|
68 |
)
|
69 |
parser.add_argument(
|
70 |
"--host",
|
|
|
71 |
default="0.0.0.0",
|
72 |
-
help="Host
|
73 |
)
|
74 |
parser.add_argument(
|
75 |
-
"--port",
|
76 |
-
type=int,
|
77 |
default=8001,
|
78 |
-
help="Port
|
79 |
)
|
80 |
parser.add_argument(
|
81 |
"--gradio-port",
|
82 |
-
type=int,
|
83 |
default=7860,
|
84 |
-
help="Port
|
85 |
-
)
|
86 |
-
parser.add_argument(
|
87 |
-
"--transport",
|
88 |
-
default="http",
|
89 |
-
help="Transport protocol to use (default: http)"
|
90 |
)
|
91 |
|
92 |
args = parser.parse_args()
|
93 |
|
94 |
-
if
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
elif args.mode == "gradio":
|
99 |
-
run_gradio_interface()
|
100 |
-
elif args.mode == "both":
|
101 |
-
# For 'both' mode, we'll start MCP server in a separate process
|
102 |
-
if not check_port_available(args.port):
|
103 |
-
print(f"Warning: Port {args.port} is already in use. Trying to use the server anyway...")
|
104 |
-
|
105 |
-
# Start MCP server in a background process
|
106 |
-
mcp_process = subprocess.Popen(
|
107 |
-
[
|
108 |
-
sys.executable,
|
109 |
-
"run.py",
|
110 |
-
"--mode", "mcp",
|
111 |
-
"--host", args.host,
|
112 |
-
"--port", str(args.port),
|
113 |
-
"--transport", args.transport
|
114 |
-
],
|
115 |
-
stdout=subprocess.PIPE,
|
116 |
-
stderr=subprocess.PIPE
|
117 |
-
)
|
118 |
|
119 |
-
|
120 |
-
print("
|
121 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
122 |
|
123 |
-
|
124 |
-
#
|
125 |
-
run_gradio_interface()
|
126 |
-
|
127 |
-
|
128 |
-
|
|
|
|
|
|
|
|
|
|
|
129 |
mcp_process.terminate()
|
130 |
-
mcp_process.
|
|
|
1 |
"""
|
2 |
+
Script to run either the MCP server or the Gradio interface for TutorX
|
3 |
"""
|
4 |
|
|
|
|
|
5 |
import os
|
6 |
import sys
|
7 |
+
import argparse
|
8 |
+
import uvicorn
|
9 |
+
from pathlib import Path
|
10 |
+
import socket
|
|
|
|
|
|
|
|
|
|
|
11 |
|
12 |
+
def run_mcp_server(host="0.0.0.0", port=8001):
|
13 |
+
"""
|
14 |
+
Run the MCP server using uvicorn
|
15 |
+
|
16 |
+
Args:
|
17 |
+
host: Host to bind the server to
|
18 |
+
port: Port to run the server on
|
19 |
+
"""
|
20 |
+
print(f"Starting TutorX MCP Server on {host}:{port}...")
|
21 |
|
22 |
+
# Set environment variables
|
23 |
os.environ["MCP_HOST"] = host
|
24 |
os.environ["MCP_PORT"] = str(port)
|
|
|
25 |
|
26 |
+
try:
|
27 |
+
# Add the mcp-server directory to Python path
|
28 |
+
mcp_server_dir = str(Path(__file__).parent / "mcp-server")
|
29 |
+
if mcp_server_dir not in sys.path:
|
30 |
+
sys.path.insert(0, mcp_server_dir)
|
31 |
+
|
32 |
+
# Import the FastAPI app
|
33 |
+
from server import api_app
|
34 |
+
|
35 |
+
# Run the server using uvicorn
|
36 |
+
uvicorn.run(
|
37 |
+
"mcp-server.server:api_app",
|
38 |
+
host=host,
|
39 |
+
port=port,
|
40 |
+
reload=True,
|
41 |
+
reload_dirs=[mcp_server_dir],
|
42 |
+
log_level="info"
|
43 |
+
)
|
44 |
+
except ImportError as e:
|
45 |
+
print(f"Error: {e}")
|
46 |
+
print("Make sure you have installed all required dependencies:")
|
47 |
+
print(" pip install uvicorn fastapi")
|
48 |
+
sys.exit(1)
|
49 |
+
except Exception as e:
|
50 |
+
print(f"Error starting MCP server: {e}")
|
51 |
sys.exit(1)
|
52 |
|
53 |
+
def run_gradio_interface(port=7860):
|
54 |
+
"""
|
55 |
+
Run the Gradio interface
|
|
|
56 |
|
57 |
+
Args:
|
58 |
+
port: Port to run the Gradio interface on
|
59 |
+
"""
|
60 |
+
print(f"Starting TutorX Gradio Interface on port {port}...")
|
61 |
+
|
62 |
+
try:
|
63 |
+
# Make sure the mcp-server directory is in the path
|
64 |
+
mcp_server_dir = str(Path(__file__).parent / "mcp-server")
|
65 |
+
if mcp_server_dir not in sys.path:
|
66 |
+
sys.path.insert(0, mcp_server_dir)
|
67 |
+
|
68 |
+
# Import and run the Gradio app
|
69 |
+
from app import demo
|
70 |
+
|
71 |
+
# Launch the Gradio interface
|
72 |
+
demo.launch(
|
73 |
+
server_name="0.0.0.0",
|
74 |
+
server_port=port,
|
75 |
+
share=False
|
76 |
+
)
|
77 |
+
except Exception as e:
|
78 |
+
print(f"Failed to start Gradio interface: {e}")
|
79 |
sys.exit(1)
|
80 |
|
81 |
def check_port_available(port):
|
82 |
+
"""
|
83 |
+
Check if a port is available
|
84 |
+
|
85 |
+
Args:
|
86 |
+
port: Port number to check
|
87 |
+
|
88 |
+
Returns:
|
89 |
+
bool: True if port is available, False otherwise
|
90 |
+
"""
|
91 |
+
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
|
92 |
+
return s.connect_ex(('localhost', port)) != 0
|
93 |
|
94 |
if __name__ == "__main__":
|
95 |
+
parser = argparse.ArgumentParser(
|
96 |
+
description="TutorX - Run MCP Server and/or Gradio Interface"
|
97 |
+
)
|
98 |
+
|
99 |
+
# Add command line arguments
|
100 |
parser.add_argument(
|
101 |
"--mode",
|
102 |
+
type=str,
|
103 |
choices=["mcp", "gradio", "both"],
|
104 |
default="both",
|
105 |
+
help="Run mode: 'mcp' for MCP server, 'gradio' for Gradio interface, 'both' for both (default)"
|
106 |
)
|
107 |
parser.add_argument(
|
108 |
"--host",
|
109 |
+
type=str,
|
110 |
default="0.0.0.0",
|
111 |
+
help="Host to bind the server to (default: 0.0.0.0)"
|
112 |
)
|
113 |
parser.add_argument(
|
114 |
+
"--mcp-port",
|
115 |
+
type=int,
|
116 |
default=8001,
|
117 |
+
help="Port for MCP server (default: 8001)"
|
118 |
)
|
119 |
parser.add_argument(
|
120 |
"--gradio-port",
|
121 |
+
type=int,
|
122 |
default=7860,
|
123 |
+
help="Port for Gradio interface (default: 7860)"
|
|
|
|
|
|
|
|
|
|
|
124 |
)
|
125 |
|
126 |
args = parser.parse_args()
|
127 |
|
128 |
+
# Check if ports are available
|
129 |
+
if args.mode in ["mcp", "both"] and not check_port_available(args.mcp_port):
|
130 |
+
print(f"Error: Port {args.mcp_port} is already in use (MCP server)")
|
131 |
+
sys.exit(1)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
132 |
|
133 |
+
if args.mode in ["gradio", "both"] and not check_port_available(args.gradio_port):
|
134 |
+
print(f"Error: Port {args.gradio_port} is already in use (Gradio interface)")
|
135 |
+
sys.exit(1)
|
136 |
+
|
137 |
+
try:
|
138 |
+
if args.mode in ["mcp", "both"]:
|
139 |
+
# Start MCP server in a separate process
|
140 |
+
mcp_process = multiprocessing.Process(
|
141 |
+
target=run_mcp_server,
|
142 |
+
kwargs={
|
143 |
+
"host": args.host,
|
144 |
+
"port": args.mcp_port
|
145 |
+
}
|
146 |
+
)
|
147 |
+
mcp_process.start()
|
148 |
+
|
149 |
+
# Give the server a moment to start
|
150 |
+
time.sleep(2)
|
151 |
|
152 |
+
if args.mode in ["gradio", "both"]:
|
153 |
+
# Run Gradio in the main process
|
154 |
+
run_gradio_interface(port=args.gradio_port)
|
155 |
+
|
156 |
+
except KeyboardInterrupt:
|
157 |
+
print("\nShutting down...")
|
158 |
+
except Exception as e:
|
159 |
+
print(f"Error: {e}")
|
160 |
+
sys.exit(1)
|
161 |
+
finally:
|
162 |
+
if 'mcp_process' in locals() and mcp_process.is_alive():
|
163 |
mcp_process.terminate()
|
164 |
+
mcp_process.join(timeout=5)
|
utils/__init__.py
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
"""
|
2 |
-
TutorX MCP Server utilities.
|
3 |
-
"""
|
|
|
|
|
|
|
|
utils/assessment.py
DELETED
@@ -1,357 +0,0 @@
|
|
1 |
-
"""
|
2 |
-
Assessment and analytics utilities for the TutorX MCP server.
|
3 |
-
"""
|
4 |
-
|
5 |
-
from typing import Dict, Any, List, Optional, Tuple
|
6 |
-
from datetime import datetime, timedelta
|
7 |
-
import random
|
8 |
-
import json
|
9 |
-
|
10 |
-
def generate_question(concept_id: str, difficulty: int) -> Dict[str, Any]:
|
11 |
-
"""
|
12 |
-
Generate a question for a specific concept at the given difficulty level
|
13 |
-
|
14 |
-
Args:
|
15 |
-
concept_id: The concept identifier
|
16 |
-
difficulty: Difficulty level from 1-5
|
17 |
-
|
18 |
-
Returns:
|
19 |
-
Question object
|
20 |
-
"""
|
21 |
-
# In a real implementation, this would use templates and context to generate appropriate questions
|
22 |
-
# Here we'll simulate with some hardcoded examples
|
23 |
-
|
24 |
-
question_templates = {
|
25 |
-
"math_algebra_basics": [
|
26 |
-
{
|
27 |
-
"text": "Simplify: {a}x + {b}x",
|
28 |
-
"variables": {"a": (1, 10), "b": (1, 10)},
|
29 |
-
"solution_template": "{a}x + {b}x = {sum}x",
|
30 |
-
"answer_template": "{sum}x"
|
31 |
-
},
|
32 |
-
{
|
33 |
-
"text": "Solve: x + {a} = {b}",
|
34 |
-
"variables": {"a": (1, 20), "b": (5, 30)},
|
35 |
-
"solution_template": "x + {a} = {b}\nx = {b} - {a}\nx = {answer}",
|
36 |
-
"answer_template": "x = {answer}"
|
37 |
-
}
|
38 |
-
],
|
39 |
-
"math_algebra_linear_equations": [
|
40 |
-
{
|
41 |
-
"text": "Solve for x: {a}x + {b} = {c}",
|
42 |
-
"variables": {"a": (2, 5), "b": (1, 10), "c": (10, 30)},
|
43 |
-
"solution_template": "{a}x + {b} = {c}\n{a}x = {c} - {b}\n{a}x = {c_minus_b}\nx = {c_minus_b} / {a}\nx = {answer}",
|
44 |
-
"answer_template": "x = {answer}"
|
45 |
-
}
|
46 |
-
],
|
47 |
-
"math_algebra_quadratic_equations": [
|
48 |
-
{
|
49 |
-
"text": "Solve: x² + {b}x + {c} = 0",
|
50 |
-
"variables": {"b": (-10, 10), "c": (-20, 20)},
|
51 |
-
"solution_template": "Using the quadratic formula: x = (-b ± √(b² - 4ac)) / 2a\nWith a=1, b={b}, c={c}:\nx = (-({b}) ± √(({b})² - 4(1)({c}))) / 2(1)\nx = (-{b} ± √({b_squared} - {four_c})) / 2\nx = (-{b} ± √{discriminant}) / 2\nx = (-{b} ± {sqrt_discriminant}) / 2\nx = ({neg_b_plus_sqrt} / 2) or x = ({neg_b_minus_sqrt} / 2)\nx = {answer1} or x = {answer2}",
|
52 |
-
"answer_template": "x = {answer1} or x = {answer2}"
|
53 |
-
}
|
54 |
-
]
|
55 |
-
}
|
56 |
-
|
57 |
-
# Select a template based on concept_id or return a default
|
58 |
-
templates = question_templates.get(concept_id, [
|
59 |
-
{
|
60 |
-
"text": "Define the term: {concept}",
|
61 |
-
"variables": {"concept": concept_id.replace("_", " ")},
|
62 |
-
"solution_template": "Definition of {concept}",
|
63 |
-
"answer_template": "Definition varies"
|
64 |
-
}
|
65 |
-
])
|
66 |
-
|
67 |
-
# Select a template based on difficulty
|
68 |
-
template_index = min(int(difficulty / 2), len(templates) - 1)
|
69 |
-
template = templates[template_index]
|
70 |
-
|
71 |
-
# Fill in template variables
|
72 |
-
variables = {}
|
73 |
-
for var_name, var_range in template.get("variables", {}).items():
|
74 |
-
if isinstance(var_range, tuple) and len(var_range) == 2:
|
75 |
-
# For numeric ranges
|
76 |
-
variables[var_name] = random.randint(var_range[0], var_range[1])
|
77 |
-
else:
|
78 |
-
# For non-numeric values
|
79 |
-
variables[var_name] = var_range
|
80 |
-
|
81 |
-
# Process the variables further for the solution
|
82 |
-
solution_vars = dict(variables)
|
83 |
-
|
84 |
-
# For algebra basics
|
85 |
-
if concept_id == "math_algebra_basics" and "a" in variables and "b" in variables:
|
86 |
-
solution_vars["sum"] = variables["a"] + variables["b"]
|
87 |
-
solution_vars["answer"] = variables["b"] - variables["a"]
|
88 |
-
|
89 |
-
# For linear equations
|
90 |
-
if concept_id == "math_algebra_linear_equations" and all(k in variables for k in ["a", "b", "c"]):
|
91 |
-
solution_vars["c_minus_b"] = variables["c"] - variables["b"]
|
92 |
-
solution_vars["answer"] = (variables["c"] - variables["b"]) / variables["a"]
|
93 |
-
|
94 |
-
# For quadratic equations
|
95 |
-
if concept_id == "math_algebra_quadratic_equations" and all(k in variables for k in ["b", "c"]):
|
96 |
-
a = 1 # Assuming a=1 for simplicity
|
97 |
-
b = variables["b"]
|
98 |
-
c = variables["c"]
|
99 |
-
solution_vars["b_squared"] = b**2
|
100 |
-
solution_vars["four_c"] = 4 * c
|
101 |
-
solution_vars["discriminant"] = b**2 - 4*a*c
|
102 |
-
|
103 |
-
if solution_vars["discriminant"] >= 0:
|
104 |
-
solution_vars["sqrt_discriminant"] = round(solution_vars["discriminant"] ** 0.5, 3)
|
105 |
-
solution_vars["neg_b_plus_sqrt"] = -b + solution_vars["sqrt_discriminant"]
|
106 |
-
solution_vars["neg_b_minus_sqrt"] = -b - solution_vars["sqrt_discriminant"]
|
107 |
-
solution_vars["answer1"] = round((-b + solution_vars["sqrt_discriminant"]) / (2*a), 3)
|
108 |
-
solution_vars["answer2"] = round((-b - solution_vars["sqrt_discriminant"]) / (2*a), 3)
|
109 |
-
else:
|
110 |
-
# Complex roots
|
111 |
-
solution_vars["sqrt_discriminant"] = f"{round((-solution_vars['discriminant']) ** 0.5, 3)}i"
|
112 |
-
solution_vars["answer1"] = f"{round(-b/(2*a), 3)} + {round(((-solution_vars['discriminant']) ** 0.5)/(2*a), 3)}i"
|
113 |
-
solution_vars["answer2"] = f"{round(-b/(2*a), 3)} - {round(((-solution_vars['discriminant']) ** 0.5)/(2*a), 3)}i"
|
114 |
-
|
115 |
-
# Format text and solution
|
116 |
-
text = template["text"].format(**variables)
|
117 |
-
solution = template["solution_template"].format(**solution_vars) if "solution_template" in template else ""
|
118 |
-
answer = template["answer_template"].format(**solution_vars) if "answer_template" in template else ""
|
119 |
-
|
120 |
-
return {
|
121 |
-
"id": f"q_{concept_id}_{random.randint(1000, 9999)}",
|
122 |
-
"concept_id": concept_id,
|
123 |
-
"difficulty": difficulty,
|
124 |
-
"text": text,
|
125 |
-
"solution": solution,
|
126 |
-
"answer": answer,
|
127 |
-
"variables": variables
|
128 |
-
}
|
129 |
-
|
130 |
-
|
131 |
-
def evaluate_student_answer(question: Dict[str, Any], student_answer: str) -> Dict[str, Any]:
|
132 |
-
"""
|
133 |
-
Evaluate a student's answer to a question
|
134 |
-
|
135 |
-
Args:
|
136 |
-
question: The question object
|
137 |
-
student_answer: The student's answer as a string
|
138 |
-
|
139 |
-
Returns:
|
140 |
-
Evaluation results
|
141 |
-
"""
|
142 |
-
# In a real implementation, this would use NLP and math parsing to evaluate the answer
|
143 |
-
# Here we'll do a simple string comparison with some basic normalization
|
144 |
-
|
145 |
-
def normalize_answer(answer):
|
146 |
-
"""Normalize an answer string for comparison"""
|
147 |
-
return (answer.lower()
|
148 |
-
.replace(" ", "")
|
149 |
-
.replace("x=", "")
|
150 |
-
.replace("y=", ""))
|
151 |
-
|
152 |
-
correct_answer = normalize_answer(question["answer"])
|
153 |
-
student_answer_norm = normalize_answer(student_answer)
|
154 |
-
|
155 |
-
# Simple exact match for now
|
156 |
-
is_correct = student_answer_norm == correct_answer
|
157 |
-
|
158 |
-
# In a real implementation, we would have partial matching and error analysis
|
159 |
-
error_type = None
|
160 |
-
if not is_correct:
|
161 |
-
# Try to guess error type - very simplified example
|
162 |
-
if question["concept_id"] == "math_algebra_linear_equations":
|
163 |
-
# Check for sign error
|
164 |
-
if "-" in correct_answer and "+" in student_answer_norm:
|
165 |
-
error_type = "sign_error"
|
166 |
-
# Check for arithmetic error (within 20% of correct value)
|
167 |
-
elif student_answer_norm.replace("-", "").isdigit() and correct_answer.replace("-", "").isdigit():
|
168 |
-
try:
|
169 |
-
student_val = float(student_answer_norm)
|
170 |
-
correct_val = float(correct_answer)
|
171 |
-
if abs((student_val - correct_val) / correct_val) < 0.2:
|
172 |
-
error_type = "arithmetic_error"
|
173 |
-
except (ValueError, ZeroDivisionError):
|
174 |
-
pass
|
175 |
-
|
176 |
-
return {
|
177 |
-
"question_id": question["id"],
|
178 |
-
"is_correct": is_correct,
|
179 |
-
"error_type": error_type,
|
180 |
-
"correct_answer": question["answer"],
|
181 |
-
"student_answer": student_answer,
|
182 |
-
"timestamp": datetime.now().isoformat()
|
183 |
-
}
|
184 |
-
|
185 |
-
|
186 |
-
def generate_performance_analytics(student_id: str, timeframe_days: int = 30) -> Dict[str, Any]:
|
187 |
-
"""
|
188 |
-
Generate performance analytics for a student
|
189 |
-
|
190 |
-
Args:
|
191 |
-
student_id: The student's unique identifier
|
192 |
-
timeframe_days: Number of days to include in the analysis
|
193 |
-
|
194 |
-
Returns:
|
195 |
-
Performance analytics
|
196 |
-
"""
|
197 |
-
# In a real implementation, this would query a database
|
198 |
-
# Here we'll generate sample data
|
199 |
-
|
200 |
-
# Generate some sample data points over the timeframe
|
201 |
-
start_date = datetime.now() - timedelta(days=timeframe_days)
|
202 |
-
data_points = []
|
203 |
-
|
204 |
-
# Simulate an improving learning curve
|
205 |
-
accuracy_base = 0.65
|
206 |
-
speed_base = 120 # seconds
|
207 |
-
|
208 |
-
for day in range(timeframe_days):
|
209 |
-
current_date = start_date + timedelta(days=day)
|
210 |
-
|
211 |
-
# Simulate improvement over time with some random variation
|
212 |
-
improvement_factor = min(day / timeframe_days * 0.3, 0.3) # Max 30% improvement
|
213 |
-
random_variation = random.uniform(-0.05, 0.05)
|
214 |
-
|
215 |
-
accuracy = min(accuracy_base + improvement_factor + random_variation, 0.98)
|
216 |
-
speed = max(speed_base * (1 - improvement_factor) + random.uniform(-10, 10), 30)
|
217 |
-
|
218 |
-
# Generate 1-3 data points per day
|
219 |
-
daily_points = random.randint(1, 3)
|
220 |
-
for _ in range(daily_points):
|
221 |
-
hour = random.randint(9, 20) # Between 9 AM and 8 PM
|
222 |
-
timestamp = current_date.replace(hour=hour, minute=random.randint(0, 59))
|
223 |
-
|
224 |
-
data_points.append({
|
225 |
-
"timestamp": timestamp.isoformat(),
|
226 |
-
"accuracy": round(accuracy, 2),
|
227 |
-
"speed_seconds": round(speed),
|
228 |
-
"difficulty": random.randint(1, 5),
|
229 |
-
"concepts": [f"concept_{random.randint(1, 10)}" for _ in range(random.randint(1, 3))]
|
230 |
-
})
|
231 |
-
|
232 |
-
# Calculate aggregate metrics
|
233 |
-
if data_points:
|
234 |
-
avg_accuracy = sum(point["accuracy"] for point in data_points) / len(data_points)
|
235 |
-
avg_speed = sum(point["speed_seconds"] for point in data_points) / len(data_points)
|
236 |
-
|
237 |
-
# Calculate improvement
|
238 |
-
first_week = [p for p in data_points if datetime.fromisoformat(p["timestamp"]) < start_date + timedelta(days=7)]
|
239 |
-
last_week = [p for p in data_points if datetime.fromisoformat(p["timestamp"]) > datetime.now() - timedelta(days=7)]
|
240 |
-
|
241 |
-
accuracy_improvement = 0
|
242 |
-
speed_improvement = 0
|
243 |
-
|
244 |
-
if first_week and last_week:
|
245 |
-
first_week_acc = sum(p["accuracy"] for p in first_week) / len(first_week)
|
246 |
-
last_week_acc = sum(p["accuracy"] for p in last_week) / len(last_week)
|
247 |
-
accuracy_improvement = round((last_week_acc - first_week_acc) * 100, 1)
|
248 |
-
|
249 |
-
first_week_speed = sum(p["speed_seconds"] for p in first_week) / len(first_week)
|
250 |
-
last_week_speed = sum(p["speed_seconds"] for p in last_week) / len(last_week)
|
251 |
-
speed_improvement = round((first_week_speed - last_week_speed) / first_week_speed * 100, 1)
|
252 |
-
else:
|
253 |
-
avg_accuracy = 0
|
254 |
-
avg_speed = 0
|
255 |
-
accuracy_improvement = 0
|
256 |
-
speed_improvement = 0
|
257 |
-
|
258 |
-
# Compile strengths and weaknesses
|
259 |
-
concept_performance = {}
|
260 |
-
for point in data_points:
|
261 |
-
for concept in point["concepts"]:
|
262 |
-
if concept not in concept_performance:
|
263 |
-
concept_performance[concept] = {"total": 0, "correct": 0}
|
264 |
-
concept_performance[concept]["total"] += 1
|
265 |
-
concept_performance[concept]["correct"] += point["accuracy"]
|
266 |
-
|
267 |
-
strengths = []
|
268 |
-
weaknesses = []
|
269 |
-
|
270 |
-
for concept, perf in concept_performance.items():
|
271 |
-
avg = perf["correct"] / perf["total"] if perf["total"] > 0 else 0
|
272 |
-
if avg > 0.85 and perf["total"] >= 3:
|
273 |
-
strengths.append(concept)
|
274 |
-
elif avg < 0.7 and perf["total"] >= 3:
|
275 |
-
weaknesses.append(concept)
|
276 |
-
|
277 |
-
return {
|
278 |
-
"student_id": student_id,
|
279 |
-
"timeframe_days": timeframe_days,
|
280 |
-
"metrics": {
|
281 |
-
"avg_accuracy": round(avg_accuracy * 100, 1),
|
282 |
-
"avg_speed_seconds": round(avg_speed, 1),
|
283 |
-
"accuracy_improvement": accuracy_improvement, # percentage points
|
284 |
-
"speed_improvement": speed_improvement, # percentage
|
285 |
-
"total_questions_attempted": len(data_points),
|
286 |
-
"study_sessions": len(set(p["timestamp"].split("T")[0] for p in data_points))
|
287 |
-
},
|
288 |
-
"strengths": strengths[:3], # Top 3 strengths
|
289 |
-
"weaknesses": weaknesses[:3], # Top 3 weaknesses
|
290 |
-
"learning_style": "visual" if random.random() > 0.5 else "interactive",
|
291 |
-
"recommendations": [
|
292 |
-
"Focus on quadratic equations",
|
293 |
-
"Try more word problems",
|
294 |
-
"Schedule a tutoring session for challenging topics"
|
295 |
-
],
|
296 |
-
"generated_at": datetime.now().isoformat()
|
297 |
-
}
|
298 |
-
|
299 |
-
|
300 |
-
def detect_plagiarism(submission: str, reference_sources: List[str]) -> Dict[str, Any]:
|
301 |
-
"""
|
302 |
-
Check for potential plagiarism in a student's submission
|
303 |
-
|
304 |
-
Args:
|
305 |
-
submission: The student's submission
|
306 |
-
reference_sources: List of reference sources to check against
|
307 |
-
|
308 |
-
Returns:
|
309 |
-
Plagiarism analysis
|
310 |
-
"""
|
311 |
-
# In a real implementation, this would use sophisticated text comparison
|
312 |
-
# Here we'll do a simple similarity check
|
313 |
-
|
314 |
-
def normalize_text(text):
|
315 |
-
return text.lower().replace(" ", "")
|
316 |
-
|
317 |
-
norm_submission = normalize_text(submission)
|
318 |
-
matches = []
|
319 |
-
|
320 |
-
for i, source in enumerate(reference_sources):
|
321 |
-
norm_source = normalize_text(source)
|
322 |
-
|
323 |
-
# Check for exact substring matches of significant length
|
324 |
-
min_match_length = 30 # Characters
|
325 |
-
|
326 |
-
for start in range(len(norm_submission) - min_match_length + 1):
|
327 |
-
chunk = norm_submission[start:start + min_match_length]
|
328 |
-
if chunk in norm_source:
|
329 |
-
source_start = norm_source.find(chunk)
|
330 |
-
|
331 |
-
# Try to extend the match
|
332 |
-
match_length = min_match_length
|
333 |
-
while (start + match_length < len(norm_submission) and
|
334 |
-
source_start + match_length < len(norm_source) and
|
335 |
-
norm_submission[start + match_length] == norm_source[source_start + match_length]):
|
336 |
-
match_length += 1
|
337 |
-
|
338 |
-
matches.append({
|
339 |
-
"source_index": i,
|
340 |
-
"source_start": source_start,
|
341 |
-
"submission_start": start,
|
342 |
-
"length": match_length,
|
343 |
-
"match_text": submission[start:start + match_length]
|
344 |
-
})
|
345 |
-
|
346 |
-
# Calculate overall similarity
|
347 |
-
total_matched_chars = sum(match["length"] for match in matches)
|
348 |
-
similarity_score = min(total_matched_chars / len(submission) if submission else 0, 1.0)
|
349 |
-
|
350 |
-
return {
|
351 |
-
"similarity_score": round(similarity_score, 2),
|
352 |
-
"plagiarism_detected": similarity_score > 0.2,
|
353 |
-
"suspicious_threshold": 0.2,
|
354 |
-
"matches": matches,
|
355 |
-
"recommendation": "Review academic integrity guidelines" if similarity_score > 0.2 else "No issues detected",
|
356 |
-
"timestamp": datetime.now().isoformat()
|
357 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
utils/multimodal.py
DELETED
@@ -1,140 +0,0 @@
|
|
1 |
-
"""
|
2 |
-
Utility functions for multi-modal interactions including text processing,
|
3 |
-
voice recognition and handwriting recognition for the TutorX MCP server.
|
4 |
-
"""
|
5 |
-
|
6 |
-
from typing import Dict, Any, List, Optional
|
7 |
-
import base64
|
8 |
-
import json
|
9 |
-
from datetime import datetime
|
10 |
-
|
11 |
-
|
12 |
-
def process_text_query(query: str, context: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
|
13 |
-
"""
|
14 |
-
Process a text query from the student
|
15 |
-
|
16 |
-
Args:
|
17 |
-
query: The text query from the student
|
18 |
-
context: Optional context about the student and current session
|
19 |
-
|
20 |
-
Returns:
|
21 |
-
Processed response
|
22 |
-
"""
|
23 |
-
# In a real implementation, this would use NLP to understand the query
|
24 |
-
# and generate an appropriate response
|
25 |
-
|
26 |
-
# Simple keyword-based response for demonstration
|
27 |
-
keywords = {
|
28 |
-
"solve": {
|
29 |
-
"type": "math_solution",
|
30 |
-
"response": "To solve this equation, first isolate the variable by..."
|
31 |
-
},
|
32 |
-
"what is": {
|
33 |
-
"type": "definition",
|
34 |
-
"response": "This concept refers to..."
|
35 |
-
},
|
36 |
-
"how do i": {
|
37 |
-
"type": "procedure",
|
38 |
-
"response": "Follow these steps: 1)..."
|
39 |
-
},
|
40 |
-
"help": {
|
41 |
-
"type": "assistance",
|
42 |
-
"response": "I'm here to help! You can ask me questions about..."
|
43 |
-
}
|
44 |
-
}
|
45 |
-
|
46 |
-
for key, value in keywords.items():
|
47 |
-
if key in query.lower():
|
48 |
-
return {
|
49 |
-
"query": query,
|
50 |
-
"response_type": value["type"],
|
51 |
-
"response": value["response"],
|
52 |
-
"confidence": 0.85,
|
53 |
-
"timestamp": datetime.now().isoformat()
|
54 |
-
}
|
55 |
-
|
56 |
-
# Default response if no keywords match
|
57 |
-
return {
|
58 |
-
"query": query,
|
59 |
-
"response_type": "general",
|
60 |
-
"response": "That's an interesting question. Let me think about how to help you with that.",
|
61 |
-
"confidence": 0.6,
|
62 |
-
"timestamp": datetime.now().isoformat()
|
63 |
-
}
|
64 |
-
|
65 |
-
|
66 |
-
def process_voice_input(audio_data_base64: str) -> Dict[str, Any]:
|
67 |
-
"""
|
68 |
-
Process voice input from the student
|
69 |
-
|
70 |
-
Args:
|
71 |
-
audio_data_base64: Base64 encoded audio data
|
72 |
-
|
73 |
-
Returns:
|
74 |
-
Transcription and analysis
|
75 |
-
"""
|
76 |
-
# In a real implementation, this would use ASR to transcribe the audio
|
77 |
-
# and then process the transcribed text
|
78 |
-
|
79 |
-
# For demonstration purposes, we'll simulate a transcription
|
80 |
-
return {
|
81 |
-
"transcription": "What is the quadratic formula?",
|
82 |
-
"confidence": 0.92,
|
83 |
-
"detected_emotions": {
|
84 |
-
"confusion": 0.7,
|
85 |
-
"interest": 0.9,
|
86 |
-
"frustration": 0.2
|
87 |
-
},
|
88 |
-
"audio_quality": "good",
|
89 |
-
"background_noise": "low",
|
90 |
-
"timestamp": datetime.now().isoformat()
|
91 |
-
}
|
92 |
-
|
93 |
-
|
94 |
-
def process_handwriting(image_data_base64: str) -> Dict[str, Any]:
|
95 |
-
"""
|
96 |
-
Process handwritten input from the student
|
97 |
-
|
98 |
-
Args:
|
99 |
-
image_data_base64: Base64 encoded image data of handwriting
|
100 |
-
|
101 |
-
Returns:
|
102 |
-
Transcription and analysis
|
103 |
-
"""
|
104 |
-
# In a real implementation, this would use OCR/handwriting recognition
|
105 |
-
# to transcribe the handwritten text or equations
|
106 |
-
|
107 |
-
# For demonstration purposes, we'll simulate a transcription
|
108 |
-
return {
|
109 |
-
"transcription": "x^2 + 5x + 6 = 0",
|
110 |
-
"confidence": 0.85,
|
111 |
-
"detected_content_type": "math_equation",
|
112 |
-
"equation_type": "quadratic",
|
113 |
-
"parsed_latex": "x^2 + 5x + 6 = 0",
|
114 |
-
"timestamp": datetime.now().isoformat()
|
115 |
-
}
|
116 |
-
|
117 |
-
|
118 |
-
def generate_speech_response(text: str, voice_params: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
|
119 |
-
"""
|
120 |
-
Generate speech response from text
|
121 |
-
|
122 |
-
Args:
|
123 |
-
text: The text to convert to speech
|
124 |
-
voice_params: Parameters for the voice (gender, age, accent, etc.)
|
125 |
-
|
126 |
-
Returns:
|
127 |
-
Speech data and metadata
|
128 |
-
"""
|
129 |
-
# In a real implementation, this would use TTS to generate audio
|
130 |
-
|
131 |
-
# For demonstration, we'll simulate audio generation metadata
|
132 |
-
return {
|
133 |
-
"text": text,
|
134 |
-
"audio_format": "mp3",
|
135 |
-
"audio_data_base64": "SIMULATED_BASE64_AUDIO_DATA",
|
136 |
-
"voice_id": voice_params.get("voice_id", "default"),
|
137 |
-
"duration_seconds": len(text) / 15, # Rough estimate of speech duration
|
138 |
-
"sample_rate": 24000,
|
139 |
-
"timestamp": datetime.now().isoformat()
|
140 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
uv.lock
CHANGED
@@ -688,6 +688,15 @@ wheels = [
|
|
688 |
{ url = "https://files.pythonhosted.org/packages/84/5d/e17845bb0fa76334477d5de38654d27946d5b5d3695443987a094a71b440/multidict-6.4.4-py3-none-any.whl", hash = "sha256:bd4557071b561a8b3b6075c3ce93cf9bfb6182cb241805c3d66ced3b75eff4ac", size = 10481, upload-time = "2025-05-19T14:16:36.024Z" },
|
689 |
]
|
690 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
691 |
[[package]]
|
692 |
name = "numpy"
|
693 |
version = "2.2.6"
|
@@ -1044,6 +1053,18 @@ wheels = [
|
|
1044 |
{ url = "https://files.pythonhosted.org/packages/28/d0/def53b4a790cfb21483016430ed828f64830dd981ebe1089971cd10cab25/pytest_cov-6.1.1-py3-none-any.whl", hash = "sha256:bddf29ed2d0ab6f4df17b4c55b0a657287db8684af9c42ea546b21b1041b3dde", size = 23841, upload-time = "2025-04-05T14:07:49.641Z" },
|
1045 |
]
|
1046 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1047 |
[[package]]
|
1048 |
name = "python-dateutil"
|
1049 |
version = "2.9.0.post0"
|
@@ -1263,10 +1284,16 @@ dependencies = [
|
|
1263 |
{ name = "aiohttp" },
|
1264 |
{ name = "fastapi" },
|
1265 |
{ name = "gradio" },
|
|
|
1266 |
{ name = "mcp", extra = ["cli"] },
|
|
|
1267 |
{ name = "numpy" },
|
1268 |
{ name = "pillow" },
|
1269 |
{ name = "pydantic" },
|
|
|
|
|
|
|
|
|
1270 |
{ name = "python-multipart" },
|
1271 |
{ name = "uvicorn" },
|
1272 |
]
|
@@ -1277,6 +1304,7 @@ test = [
|
|
1277 |
{ name = "pytest" },
|
1278 |
{ name = "pytest-asyncio" },
|
1279 |
{ name = "pytest-cov" },
|
|
|
1280 |
]
|
1281 |
|
1282 |
[package.metadata]
|
@@ -1284,14 +1312,21 @@ requires-dist = [
|
|
1284 |
{ name = "aiohttp", specifier = ">=3.9.0" },
|
1285 |
{ name = "fastapi", specifier = ">=0.109.0" },
|
1286 |
{ name = "gradio", specifier = ">=4.19.0" },
|
|
|
1287 |
{ name = "httpx", marker = "extra == 'test'", specifier = ">=0.26.0" },
|
1288 |
{ name = "mcp", extras = ["cli"], specifier = ">=1.9.3" },
|
|
|
1289 |
{ name = "numpy", specifier = ">=1.24.0" },
|
1290 |
{ name = "pillow", specifier = ">=10.0.0" },
|
1291 |
{ name = "pydantic", specifier = ">=2.6.0" },
|
|
|
1292 |
{ name = "pytest", marker = "extra == 'test'", specifier = ">=7.4.0" },
|
|
|
1293 |
{ name = "pytest-asyncio", marker = "extra == 'test'", specifier = ">=0.23.0" },
|
|
|
1294 |
{ name = "pytest-cov", marker = "extra == 'test'", specifier = ">=4.1.0" },
|
|
|
|
|
1295 |
{ name = "python-multipart", specifier = ">=0.0.6" },
|
1296 |
{ name = "uvicorn", specifier = ">=0.27.0" },
|
1297 |
]
|
|
|
688 |
{ url = "https://files.pythonhosted.org/packages/84/5d/e17845bb0fa76334477d5de38654d27946d5b5d3695443987a094a71b440/multidict-6.4.4-py3-none-any.whl", hash = "sha256:bd4557071b561a8b3b6075c3ce93cf9bfb6182cb241805c3d66ced3b75eff4ac", size = 10481, upload-time = "2025-05-19T14:16:36.024Z" },
|
689 |
]
|
690 |
|
691 |
+
[[package]]
|
692 |
+
name = "networkx"
|
693 |
+
version = "3.5"
|
694 |
+
source = { registry = "https://pypi.org/simple" }
|
695 |
+
sdist = { url = "https://files.pythonhosted.org/packages/6c/4f/ccdb8ad3a38e583f214547fd2f7ff1fc160c43a75af88e6aec213404b96a/networkx-3.5.tar.gz", hash = "sha256:d4c6f9cf81f52d69230866796b82afbccdec3db7ae4fbd1b65ea750feed50037", size = 2471065, upload-time = "2025-05-29T11:35:07.804Z" }
|
696 |
+
wheels = [
|
697 |
+
{ url = "https://files.pythonhosted.org/packages/eb/8d/776adee7bbf76365fdd7f2552710282c79a4ead5d2a46408c9043a2b70ba/networkx-3.5-py3-none-any.whl", hash = "sha256:0030d386a9a06dee3565298b4a734b68589749a544acbb6c412dc9e2489ec6ec", size = 2034406, upload-time = "2025-05-29T11:35:04.961Z" },
|
698 |
+
]
|
699 |
+
|
700 |
[[package]]
|
701 |
name = "numpy"
|
702 |
version = "2.2.6"
|
|
|
1053 |
{ url = "https://files.pythonhosted.org/packages/28/d0/def53b4a790cfb21483016430ed828f64830dd981ebe1089971cd10cab25/pytest_cov-6.1.1-py3-none-any.whl", hash = "sha256:bddf29ed2d0ab6f4df17b4c55b0a657287db8684af9c42ea546b21b1041b3dde", size = 23841, upload-time = "2025-04-05T14:07:49.641Z" },
|
1054 |
]
|
1055 |
|
1056 |
+
[[package]]
|
1057 |
+
name = "pytest-mock"
|
1058 |
+
version = "3.14.1"
|
1059 |
+
source = { registry = "https://pypi.org/simple" }
|
1060 |
+
dependencies = [
|
1061 |
+
{ name = "pytest" },
|
1062 |
+
]
|
1063 |
+
sdist = { url = "https://files.pythonhosted.org/packages/71/28/67172c96ba684058a4d24ffe144d64783d2a270d0af0d9e792737bddc75c/pytest_mock-3.14.1.tar.gz", hash = "sha256:159e9edac4c451ce77a5cdb9fc5d1100708d2dd4ba3c3df572f14097351af80e", size = 33241, upload-time = "2025-05-26T13:58:45.167Z" }
|
1064 |
+
wheels = [
|
1065 |
+
{ url = "https://files.pythonhosted.org/packages/b2/05/77b60e520511c53d1c1ca75f1930c7dd8e971d0c4379b7f4b3f9644685ba/pytest_mock-3.14.1-py3-none-any.whl", hash = "sha256:178aefcd11307d874b4cd3100344e7e2d888d9791a6a1d9bfe90fbc1b74fd1d0", size = 9923, upload-time = "2025-05-26T13:58:43.487Z" },
|
1066 |
+
]
|
1067 |
+
|
1068 |
[[package]]
|
1069 |
name = "python-dateutil"
|
1070 |
version = "2.9.0.post0"
|
|
|
1284 |
{ name = "aiohttp" },
|
1285 |
{ name = "fastapi" },
|
1286 |
{ name = "gradio" },
|
1287 |
+
{ name = "httpx" },
|
1288 |
{ name = "mcp", extra = ["cli"] },
|
1289 |
+
{ name = "networkx" },
|
1290 |
{ name = "numpy" },
|
1291 |
{ name = "pillow" },
|
1292 |
{ name = "pydantic" },
|
1293 |
+
{ name = "pytest" },
|
1294 |
+
{ name = "pytest-asyncio" },
|
1295 |
+
{ name = "pytest-cov" },
|
1296 |
+
{ name = "python-dotenv" },
|
1297 |
{ name = "python-multipart" },
|
1298 |
{ name = "uvicorn" },
|
1299 |
]
|
|
|
1304 |
{ name = "pytest" },
|
1305 |
{ name = "pytest-asyncio" },
|
1306 |
{ name = "pytest-cov" },
|
1307 |
+
{ name = "pytest-mock" },
|
1308 |
]
|
1309 |
|
1310 |
[package.metadata]
|
|
|
1312 |
{ name = "aiohttp", specifier = ">=3.9.0" },
|
1313 |
{ name = "fastapi", specifier = ">=0.109.0" },
|
1314 |
{ name = "gradio", specifier = ">=4.19.0" },
|
1315 |
+
{ name = "httpx", specifier = ">=0.26.0" },
|
1316 |
{ name = "httpx", marker = "extra == 'test'", specifier = ">=0.26.0" },
|
1317 |
{ name = "mcp", extras = ["cli"], specifier = ">=1.9.3" },
|
1318 |
+
{ name = "networkx", specifier = ">=3.0" },
|
1319 |
{ name = "numpy", specifier = ">=1.24.0" },
|
1320 |
{ name = "pillow", specifier = ">=10.0.0" },
|
1321 |
{ name = "pydantic", specifier = ">=2.6.0" },
|
1322 |
+
{ name = "pytest", specifier = ">=7.4.0" },
|
1323 |
{ name = "pytest", marker = "extra == 'test'", specifier = ">=7.4.0" },
|
1324 |
+
{ name = "pytest-asyncio", specifier = ">=0.23.0" },
|
1325 |
{ name = "pytest-asyncio", marker = "extra == 'test'", specifier = ">=0.23.0" },
|
1326 |
+
{ name = "pytest-cov", specifier = ">=4.1.0" },
|
1327 |
{ name = "pytest-cov", marker = "extra == 'test'", specifier = ">=4.1.0" },
|
1328 |
+
{ name = "pytest-mock", marker = "extra == 'test'", specifier = ">=3.12.0" },
|
1329 |
+
{ name = "python-dotenv", specifier = ">=1.0.0" },
|
1330 |
{ name = "python-multipart", specifier = ">=0.0.6" },
|
1331 |
{ name = "uvicorn", specifier = ">=0.27.0" },
|
1332 |
]
|