Meet Patel commited on
Commit
f9f5b1d
Β·
1 Parent(s): 7867e85

Added proper MCP client, fixed Gradio-MCP integration, and improved architecture

Browse files
Files changed (6) hide show
  1. README.md +16 -0
  2. app.py +18 -52
  3. client.py +203 -0
  4. main.py +1 -0
  5. pyproject.toml +1 -0
  6. uv.lock +0 -0
README.md CHANGED
@@ -124,6 +124,7 @@ The server exposes the following MCP tools and resources:
124
  ```
125
  tutorx-mcp/
126
  β”œβ”€β”€ main.py # MCP server implementation
 
127
  β”œβ”€β”€ app.py # Gradio web interface
128
  β”œβ”€β”€ run.py # Runner script for different modes
129
  β”œβ”€β”€ utils/ # Utility modules
@@ -133,6 +134,21 @@ tutorx-mcp/
133
  └── README.md # Project documentation
134
  ```
135
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
136
  ## License
137
 
138
  This project is licensed under the MIT License - see the LICENSE file for details.
 
124
  ```
125
  tutorx-mcp/
126
  β”œβ”€β”€ main.py # MCP server implementation
127
+ β”œβ”€β”€ client.py # MCP client for calling server tools
128
  β”œβ”€β”€ app.py # Gradio web interface
129
  β”œβ”€β”€ run.py # Runner script for different modes
130
  β”œβ”€β”€ utils/ # Utility modules
 
134
  └── README.md # Project documentation
135
  ```
136
 
137
+ ## Architecture
138
+
139
+ The TutorX-MCP follows a layered architecture:
140
+
141
+ 1. **MCP Server (main.py)**: Core backend that exposes educational tools and resources through the Model Context Protocol.
142
+
143
+ 2. **MCP Client (client.py)**: Client library that communicates with the MCP server through HTTP requests, translating method calls into MCP protocol interactions.
144
+
145
+ 3. **Gradio Interface (app.py)**: Web-based user interface that uses the client to communicate with the MCP server.
146
+
147
+ This separation of concerns allows:
148
+ - MCP clients (like Claude Desktop App) to directly connect to the MCP server
149
+ - The web interface to interact with the server using standard HTTP
150
+ - Clear boundaries between presentation, business logic, and tool implementation
151
+
152
  ## License
153
 
154
  This project is licensed under the MIT License - see the LICENSE file for details.
app.py CHANGED
@@ -10,37 +10,8 @@ from io import BytesIO
10
  from PIL import Image
11
  from datetime import datetime
12
 
13
- # Import MCP server tools
14
- from main import (
15
- # Core features
16
- assess_skill,
17
- get_concept_graph,
18
- get_learning_path,
19
- generate_quiz,
20
- analyze_error_patterns,
21
-
22
- # Advanced features
23
- analyze_cognitive_state,
24
- get_curriculum_standards,
25
- align_content_to_standard,
26
- generate_lesson,
27
-
28
- # User experience
29
- get_student_dashboard,
30
- get_accessibility_settings,
31
- update_accessibility_settings,
32
-
33
- # Multi-modal
34
- text_interaction,
35
- voice_interaction,
36
- handwriting_recognition,
37
-
38
- # Assessment
39
- create_assessment,
40
- grade_assessment,
41
- get_student_analytics,
42
- check_submission_originality
43
- )
44
 
45
  # Utility functions
46
  def image_to_base64(img):
@@ -85,9 +56,8 @@ with gr.Blocks(title="TutorX Educational AI", theme=gr.themes.Soft()) as demo:
85
 
86
  with gr.Column():
87
  assessment_output = gr.JSON(label="Skill Assessment")
88
-
89
- assess_btn.click(
90
- fn=lambda concept: assess_skill(student_id, concept),
91
  inputs=[concept_id_input],
92
  outputs=[assessment_output]
93
  )
@@ -97,7 +67,7 @@ with gr.Blocks(title="TutorX Educational AI", theme=gr.themes.Soft()) as demo:
97
  concept_graph_output = gr.JSON(label="Concept Graph")
98
 
99
  concept_graph_btn.click(
100
- fn=lambda: get_concept_graph(),
101
  inputs=[],
102
  outputs=[concept_graph_output]
103
  )
@@ -117,7 +87,7 @@ with gr.Blocks(title="TutorX Educational AI", theme=gr.themes.Soft()) as demo:
117
  quiz_output = gr.JSON(label="Generated Quiz")
118
 
119
  gen_quiz_btn.click(
120
- fn=lambda concepts, diff: generate_quiz(concepts, diff),
121
  inputs=[concepts_input, diff_input],
122
  outputs=[quiz_output]
123
  )
@@ -135,9 +105,8 @@ with gr.Blocks(title="TutorX Educational AI", theme=gr.themes.Soft()) as demo:
135
 
136
  with gr.Column():
137
  lesson_output = gr.JSON(label="Lesson Plan")
138
-
139
- gen_lesson_btn.click(
140
- fn=lambda topic, grade, duration: generate_lesson(topic, grade, duration),
141
  inputs=[topic_input, grade_input, duration_input],
142
  outputs=[lesson_output]
143
  )
@@ -157,7 +126,7 @@ with gr.Blocks(title="TutorX Educational AI", theme=gr.themes.Soft()) as demo:
157
  standards_output = gr.JSON(label="Curriculum Standards")
158
 
159
  standards_btn.click(
160
- fn=lambda country: get_curriculum_standards(country),
161
  inputs=[country_input],
162
  outputs=[standards_output]
163
  )
@@ -173,9 +142,8 @@ with gr.Blocks(title="TutorX Educational AI", theme=gr.themes.Soft()) as demo:
173
 
174
  with gr.Column():
175
  text_output = gr.JSON(label="Response")
176
-
177
- text_btn.click(
178
- fn=lambda query: text_interaction(query, student_id),
179
  inputs=[text_input],
180
  outputs=[text_output]
181
  )
@@ -192,7 +160,7 @@ with gr.Blocks(title="TutorX Educational AI", theme=gr.themes.Soft()) as demo:
192
 
193
  # Convert drawing to base64 then process
194
  drawing_btn.click(
195
- fn=lambda img: handwriting_recognition(image_to_base64(img), student_id),
196
  inputs=[drawing_input],
197
  outputs=[drawing_output]
198
  )
@@ -203,9 +171,8 @@ with gr.Blocks(title="TutorX Educational AI", theme=gr.themes.Soft()) as demo:
203
  analytics_btn = gr.Button("Generate Analytics Report")
204
  timeframe = gr.Slider(minimum=7, maximum=90, value=30, step=1, label="Timeframe (days)")
205
  analytics_output = gr.JSON(label="Performance Analytics")
206
-
207
- analytics_btn.click(
208
- fn=lambda days: get_student_analytics(student_id, days),
209
  inputs=[timeframe],
210
  outputs=[analytics_output]
211
  )
@@ -221,7 +188,7 @@ with gr.Blocks(title="TutorX Educational AI", theme=gr.themes.Soft()) as demo:
221
  error_output = gr.JSON(label="Error Pattern Analysis")
222
 
223
  error_btn.click(
224
- fn=lambda concept: analyze_error_patterns(student_id, concept),
225
  inputs=[error_concept],
226
  outputs=[error_output]
227
  )
@@ -243,9 +210,8 @@ with gr.Blocks(title="TutorX Educational AI", theme=gr.themes.Soft()) as demo:
243
 
244
  with gr.Column():
245
  assessment_output = gr.JSON(label="Generated Assessment")
246
-
247
- create_assess_btn.click(
248
- fn=lambda concepts, num, diff: create_assessment(concepts, num, diff),
249
  inputs=[assess_concepts, assess_questions, assess_diff],
250
  outputs=[assessment_output]
251
  )
@@ -270,7 +236,7 @@ with gr.Blocks(title="TutorX Educational AI", theme=gr.themes.Soft()) as demo:
270
  plagiarism_output = gr.JSON(label="Originality Report")
271
 
272
  plagiarism_btn.click(
273
- fn=lambda sub, ref: check_submission_originality(sub, [ref]),
274
  inputs=[submission_input, reference_input],
275
  outputs=[plagiarism_output]
276
  )
 
10
  from PIL import Image
11
  from datetime import datetime
12
 
13
+ # Import MCP client to communicate with the MCP server
14
+ from client import client
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
15
 
16
  # Utility functions
17
  def image_to_base64(img):
 
56
 
57
  with gr.Column():
58
  assessment_output = gr.JSON(label="Skill Assessment")
59
+ assess_btn.click(
60
+ fn=lambda concept: client.assess_skill(student_id, concept),
 
61
  inputs=[concept_id_input],
62
  outputs=[assessment_output]
63
  )
 
67
  concept_graph_output = gr.JSON(label="Concept Graph")
68
 
69
  concept_graph_btn.click(
70
+ fn=lambda: client.get_concept_graph(),
71
  inputs=[],
72
  outputs=[concept_graph_output]
73
  )
 
87
  quiz_output = gr.JSON(label="Generated Quiz")
88
 
89
  gen_quiz_btn.click(
90
+ fn=lambda concepts, diff: client.generate_quiz(concepts, diff),
91
  inputs=[concepts_input, diff_input],
92
  outputs=[quiz_output]
93
  )
 
105
 
106
  with gr.Column():
107
  lesson_output = gr.JSON(label="Lesson Plan")
108
+ gen_lesson_btn.click(
109
+ fn=lambda topic, grade, duration: client.generate_lesson(topic, grade, duration),
 
110
  inputs=[topic_input, grade_input, duration_input],
111
  outputs=[lesson_output]
112
  )
 
126
  standards_output = gr.JSON(label="Curriculum Standards")
127
 
128
  standards_btn.click(
129
+ fn=lambda country: client.get_curriculum_standards(country),
130
  inputs=[country_input],
131
  outputs=[standards_output]
132
  )
 
142
 
143
  with gr.Column():
144
  text_output = gr.JSON(label="Response")
145
+ text_btn.click(
146
+ fn=lambda query: client.text_interaction(query, student_id),
 
147
  inputs=[text_input],
148
  outputs=[text_output]
149
  )
 
160
 
161
  # Convert drawing to base64 then process
162
  drawing_btn.click(
163
+ fn=lambda img: client.handwriting_recognition(image_to_base64(img), student_id),
164
  inputs=[drawing_input],
165
  outputs=[drawing_output]
166
  )
 
171
  analytics_btn = gr.Button("Generate Analytics Report")
172
  timeframe = gr.Slider(minimum=7, maximum=90, value=30, step=1, label="Timeframe (days)")
173
  analytics_output = gr.JSON(label="Performance Analytics")
174
+ analytics_btn.click(
175
+ fn=lambda days: client.get_student_analytics(student_id, days),
 
176
  inputs=[timeframe],
177
  outputs=[analytics_output]
178
  )
 
188
  error_output = gr.JSON(label="Error Pattern Analysis")
189
 
190
  error_btn.click(
191
+ fn=lambda concept: client.analyze_error_patterns(student_id, concept),
192
  inputs=[error_concept],
193
  outputs=[error_output]
194
  )
 
210
 
211
  with gr.Column():
212
  assessment_output = gr.JSON(label="Generated Assessment")
213
+ create_assess_btn.click(
214
+ fn=lambda concepts, num, diff: client.create_assessment(concepts, num, diff),
 
215
  inputs=[assess_concepts, assess_questions, assess_diff],
216
  outputs=[assessment_output]
217
  )
 
236
  plagiarism_output = gr.JSON(label="Originality Report")
237
 
238
  plagiarism_btn.click(
239
+ fn=lambda sub, ref: client.check_submission_originality(sub, [ref]),
240
  inputs=[submission_input, reference_input],
241
  outputs=[plagiarism_output]
242
  )
client.py ADDED
@@ -0,0 +1,203 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ MCP client for interacting with the TutorX MCP server.
3
+ This module provides functions that interact with the MCP server
4
+ for use by the Gradio interface.
5
+ """
6
+
7
+ import json
8
+ import requests
9
+ from typing import Dict, Any, List, Optional
10
+ import base64
11
+ from datetime import datetime
12
+
13
+ # Default MCP server URL
14
+ MCP_SERVER_URL = "http://localhost:8000"
15
+
16
+ class TutorXClient:
17
+ """Client for interacting with the TutorX MCP server"""
18
+
19
+ def __init__(self, server_url=MCP_SERVER_URL):
20
+ self.server_url = server_url
21
+
22
+ def _call_tool(self, tool_name: str, params: Dict[str, Any]) -> Dict[str, Any]:
23
+ """
24
+ Call an MCP tool on the server
25
+
26
+ Args:
27
+ tool_name: Name of the tool to call
28
+ params: Parameters to pass to the tool
29
+
30
+ Returns:
31
+ Tool response
32
+ """
33
+ try:
34
+ response = requests.post(
35
+ f"{self.server_url}/tools/{tool_name}",
36
+ json=params,
37
+ headers={"Content-Type": "application/json"}
38
+ )
39
+ response.raise_for_status()
40
+ return response.json()
41
+ except requests.RequestException as e:
42
+ return {
43
+ "error": f"Failed to call tool: {str(e)}",
44
+ "timestamp": datetime.now().isoformat()
45
+ }
46
+
47
+ def _get_resource(self, resource_uri: str) -> Dict[str, Any]:
48
+ """
49
+ Get an MCP resource from the server
50
+
51
+ Args:
52
+ resource_uri: URI of the resource to get
53
+
54
+ Returns:
55
+ Resource data
56
+ """
57
+ try:
58
+ response = requests.get(
59
+ f"{self.server_url}/resources?uri={resource_uri}",
60
+ headers={"Accept": "application/json"}
61
+ )
62
+ response.raise_for_status()
63
+ return response.json()
64
+ except requests.RequestException as e:
65
+ return {
66
+ "error": f"Failed to get resource: {str(e)}",
67
+ "timestamp": datetime.now().isoformat()
68
+ }
69
+
70
+ # ------------ Core Features ------------
71
+
72
+ def assess_skill(self, student_id: str, concept_id: str) -> Dict[str, Any]:
73
+ """Assess student's skill level on a specific concept"""
74
+ return self._call_tool("assess_skill", {
75
+ "student_id": student_id,
76
+ "concept_id": concept_id
77
+ })
78
+
79
+ def get_concept_graph(self) -> Dict[str, Any]:
80
+ """Get the full knowledge concept graph"""
81
+ return self._get_resource("concept-graph://")
82
+
83
+ def get_learning_path(self, student_id: str) -> Dict[str, Any]:
84
+ """Get personalized learning path for a student"""
85
+ return self._get_resource(f"learning-path://{student_id}")
86
+
87
+ def generate_quiz(self, concept_ids: List[str], difficulty: int = 2) -> Dict[str, Any]:
88
+ """Generate a quiz based on specified concepts and difficulty"""
89
+ return self._call_tool("generate_quiz", {
90
+ "concept_ids": concept_ids,
91
+ "difficulty": difficulty
92
+ })
93
+
94
+ def analyze_error_patterns(self, student_id: str, concept_id: str) -> Dict[str, Any]:
95
+ """Analyze common error patterns for a student on a specific concept"""
96
+ return self._call_tool("analyze_error_patterns", {
97
+ "student_id": student_id,
98
+ "concept_id": concept_id
99
+ })
100
+
101
+ # ------------ Advanced Features ------------
102
+
103
+ def analyze_cognitive_state(self, eeg_data: Dict[str, Any]) -> Dict[str, Any]:
104
+ """Analyze EEG data to determine cognitive state"""
105
+ return self._call_tool("analyze_cognitive_state", {
106
+ "eeg_data": eeg_data
107
+ })
108
+
109
+ def get_curriculum_standards(self, country_code: str) -> Dict[str, Any]:
110
+ """Get curriculum standards for a specific country"""
111
+ return self._get_resource(f"curriculum-standards://{country_code}")
112
+
113
+ def align_content_to_standard(self, content_id: str, standard_id: str) -> Dict[str, Any]:
114
+ """Align educational content to a specific curriculum standard"""
115
+ return self._call_tool("align_content_to_standard", {
116
+ "content_id": content_id,
117
+ "standard_id": standard_id
118
+ })
119
+
120
+ def generate_lesson(self, topic: str, grade_level: int, duration_minutes: int = 45) -> Dict[str, Any]:
121
+ """Generate a complete lesson plan on a topic"""
122
+ return self._call_tool("generate_lesson", {
123
+ "topic": topic,
124
+ "grade_level": grade_level,
125
+ "duration_minutes": duration_minutes
126
+ })
127
+
128
+ # ------------ User Experience ------------
129
+
130
+ def get_student_dashboard(self, student_id: str) -> Dict[str, Any]:
131
+ """Get dashboard data for a specific student"""
132
+ return self._get_resource(f"student-dashboard://{student_id}")
133
+
134
+ def get_accessibility_settings(self, student_id: str) -> Dict[str, Any]:
135
+ """Get accessibility settings for a student"""
136
+ return self._call_tool("get_accessibility_settings", {
137
+ "student_id": student_id
138
+ })
139
+
140
+ def update_accessibility_settings(self, student_id: str, settings: Dict[str, Any]) -> Dict[str, Any]:
141
+ """Update accessibility settings for a student"""
142
+ return self._call_tool("update_accessibility_settings", {
143
+ "student_id": student_id,
144
+ "settings": settings
145
+ })
146
+
147
+ # ------------ Multi-Modal Interaction ------------
148
+
149
+ def text_interaction(self, query: str, student_id: str) -> Dict[str, Any]:
150
+ """Process a text query from the student"""
151
+ return self._call_tool("text_interaction", {
152
+ "query": query,
153
+ "student_id": student_id
154
+ })
155
+
156
+ def voice_interaction(self, audio_data_base64: str, student_id: str) -> Dict[str, Any]:
157
+ """Process voice input from the student"""
158
+ return self._call_tool("voice_interaction", {
159
+ "audio_data_base64": audio_data_base64,
160
+ "student_id": student_id
161
+ })
162
+
163
+ def handwriting_recognition(self, image_data_base64: str, student_id: str) -> Dict[str, Any]:
164
+ """Process handwritten input from the student"""
165
+ return self._call_tool("handwriting_recognition", {
166
+ "image_data_base64": image_data_base64,
167
+ "student_id": student_id
168
+ })
169
+
170
+ # ------------ Assessment ------------
171
+
172
+ def create_assessment(self, concept_ids: List[str], num_questions: int, difficulty: int = 3) -> Dict[str, Any]:
173
+ """Create a complete assessment for given concepts"""
174
+ return self._call_tool("create_assessment", {
175
+ "concept_ids": concept_ids,
176
+ "num_questions": num_questions,
177
+ "difficulty": difficulty
178
+ })
179
+
180
+ def grade_assessment(self, assessment_id: str, student_answers: Dict[str, str], questions: List[Dict[str, Any]]) -> Dict[str, Any]:
181
+ """Grade a completed assessment"""
182
+ return self._call_tool("grade_assessment", {
183
+ "assessment_id": assessment_id,
184
+ "student_answers": student_answers,
185
+ "questions": questions
186
+ })
187
+
188
+ def get_student_analytics(self, student_id: str, timeframe_days: int = 30) -> Dict[str, Any]:
189
+ """Get comprehensive analytics for a student"""
190
+ return self._call_tool("get_student_analytics", {
191
+ "student_id": student_id,
192
+ "timeframe_days": timeframe_days
193
+ })
194
+
195
+ def check_submission_originality(self, submission: str, reference_sources: List[str]) -> Dict[str, Any]:
196
+ """Check student submission for potential plagiarism"""
197
+ return self._call_tool("check_submission_originality", {
198
+ "submission": submission,
199
+ "reference_sources": reference_sources
200
+ })
201
+
202
+ # Create a default client instance for easy import
203
+ client = TutorXClient()
main.py CHANGED
@@ -362,6 +362,7 @@ def update_accessibility_settings(student_id: str, settings: Dict[str, Any]) ->
362
 
363
  # ------------------ Multi-Modal Interaction ------------------
364
 
 
365
  @mcp.tool()
366
  def text_interaction(query: str, student_id: str, session_context: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
367
  """
 
362
 
363
  # ------------------ Multi-Modal Interaction ------------------
364
 
365
+ @mcp.tool()
366
  @mcp.tool()
367
  def text_interaction(query: str, student_id: str, session_context: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
368
  """
pyproject.toml CHANGED
@@ -9,4 +9,5 @@ dependencies = [
9
  "gradio>=4.19.0",
10
  "numpy>=1.24.0",
11
  "pillow>=10.0.0",
 
12
  ]
 
9
  "gradio>=4.19.0",
10
  "numpy>=1.24.0",
11
  "pillow>=10.0.0",
12
+ "requests>=2.31.0",
13
  ]
uv.lock CHANGED
The diff for this file is too large to render. See raw diff