ysharma HF Staff commited on
Commit
460de3b
·
verified ·
1 Parent(s): 2b232e8

Create ui_components.py

Browse files
Files changed (1) hide show
  1. ui_components.py +483 -0
ui_components.py ADDED
@@ -0,0 +1,483 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ UI Components for Universal MCP Client - Fixed with optimal MCP guidance
3
+ """
4
+ import gradio as gr
5
+ from gradio import ChatMessage
6
+ from typing import Tuple, List, Dict, Any
7
+ import os
8
+ import logging
9
+ import traceback
10
+ from openai import OpenAI
11
+
12
+ from config import AppConfig, CUSTOM_CSS, HF_HUB_AVAILABLE
13
+ from chat_handler import ChatHandler
14
+ from server_manager import ServerManager
15
+ from mcp_client import UniversalMCPClient
16
+
17
+ # Import HuggingFace Hub for login functionality
18
+ if HF_HUB_AVAILABLE:
19
+ from huggingface_hub import login, logout, whoami
20
+ from huggingface_hub.utils import HfHubHTTPError
21
+
22
+ logger = logging.getLogger(__name__)
23
+
24
+ class UIComponents:
25
+ """Manages Gradio UI components with improved MCP server management"""
26
+
27
+ def __init__(self, mcp_client: UniversalMCPClient):
28
+ self.mcp_client = mcp_client
29
+ self.chat_handler = ChatHandler(mcp_client)
30
+ self.server_manager = ServerManager(mcp_client)
31
+ self.current_user = None
32
+
33
+ def _initialize_default_servers(self):
34
+ """Initialize default MCP servers on app startup"""
35
+ default_servers = [
36
+ ("background removal", "ysharma/background-removal-mcp"),
37
+ ("text to video", "ysharma/ltx-video-distilled"),
38
+ ("text to speech", "ysharma/Kokoro-TTS-mcp-test"),
39
+ ("text to image", "ysharma/dalle-3-xl-lora-v2")
40
+ ]
41
+
42
+ logger.info("🚀 Initializing default MCP servers...")
43
+
44
+ for server_name, space_id in default_servers:
45
+ try:
46
+ status_msg, _ = self.server_manager.add_custom_server(server_name, space_id)
47
+ if "✅" in status_msg:
48
+ logger.info(f"✅ Added default server: {server_name}")
49
+ else:
50
+ logger.warning(f"⚠️ Failed to add default server {server_name}: {status_msg}")
51
+ except Exception as e:
52
+ logger.error(f"❌ Error adding default server {server_name}: {e}")
53
+
54
+ logger.info(f"📊 Initialized {len(self.mcp_client.servers)} default servers")
55
+
56
+ def create_interface(self) -> gr.Blocks:
57
+ """Create the main Gradio interface with improved layout"""
58
+ with gr.Blocks(
59
+ title="Universal MCP Client - HF Inference Powered",
60
+ theme=getattr(gr.themes, AppConfig.GRADIO_THEME.title())(),
61
+ fill_height=True,
62
+ css=CUSTOM_CSS
63
+ ) as demo:
64
+
65
+ # Create sidebar
66
+ self._create_sidebar()
67
+
68
+ # Create main chat area
69
+ chatbot = self._create_main_chat_area()
70
+
71
+ # Set up event handlers
72
+ self._setup_event_handlers(chatbot, demo)
73
+
74
+ return demo
75
+
76
+ def _create_sidebar(self):
77
+ """Create the sidebar with login, provider/model selection, and server management"""
78
+ with gr.Sidebar(elem_id="main-sidebar"):
79
+ gr.Markdown("# 🤗 chat.gradio.app")
80
+
81
+ # HuggingFace Login Section
82
+ self._create_login_section()
83
+
84
+ # Provider and Model Selection with defaults
85
+ self._create_provider_model_selection()
86
+
87
+ # MCP Server Management
88
+ self._create_server_management_section()
89
+
90
+ # Collapsible information section
91
+ with gr.Accordion("📚 Guide & Info", open=False):
92
+ gr.Markdown("""
93
+ ## 🎯 How To Use
94
+ 1. **Login**: Login with your HuggingFace account for API access
95
+ 2. **Add MCP Servers**: Connect to various AI tools on 🤗Hub
96
+ 3. **Enable/Disable Servers**: Use checkboxes to control which servers are active
97
+ 4. **Chat**: Interact with GPT-OSS and use connected MCP Servers
98
+
99
+ ## 💭 Features
100
+ - **GPT-OSS Models**: OpenAI's latest open-source reasoning models (128k context)
101
+ - **MCP Integration**: Connect to thousands of AI apps on Hub via MCP protocol
102
+ - **Multi-Provider**: Access via Cerebras, Fireworks, Together AI, and others
103
+ - **Media Support**: Automatic embedding of media -- images, audio, and video etc
104
+ """)
105
+
106
+ def _create_login_section(self):
107
+ """Create HuggingFace OAuth login section"""
108
+ with gr.Group(elem_classes="login-section"):
109
+ gr.Markdown("## 🔑 Authentication", container=True)
110
+ self.login_button = gr.LoginButton(
111
+ value="Sign in with Hugging Face",
112
+ size="sm"
113
+ )
114
+ self.login_status = gr.Markdown("⚪ Please sign in to access Inference Providers", container=True)
115
+
116
+ def _create_provider_model_selection(self):
117
+ """Create provider and model selection dropdowns with defaults"""
118
+ with gr.Group(elem_classes="provider-model-selection"):
119
+ gr.Markdown("## 🚀 Inference Configuration", container=True)
120
+
121
+ # Provider dropdown with default selection
122
+ provider_choices = list(AppConfig.INFERENCE_PROVIDERS.keys())
123
+ self.provider_dropdown = gr.Dropdown(
124
+ choices=provider_choices,
125
+ label="🔧 Inference Provider",
126
+ value="cerebras", # Default to Cerebras
127
+ info="Choose your preferred inference provider"
128
+ )
129
+
130
+ # Model dropdown (will be populated based on provider)
131
+ self.model_dropdown = gr.Dropdown(
132
+ choices=[],
133
+ label="🤖 Model",
134
+ value=None,
135
+ info="Select GPT OSS model variant"
136
+ )
137
+
138
+ # Status display
139
+ self.api_status = gr.Markdown("⚪ Select provider and model to begin", container=True)
140
+
141
+ def _create_server_management_section(self):
142
+ """Create the server management section with checkboxes and guidance"""
143
+ with gr.Group():
144
+ gr.Markdown("## 🔧 MCP Servers", container=True)
145
+
146
+ # ADDED: Optimal server count guidance
147
+ gr.Markdown("""
148
+ <div style="background: #f0f8ff; padding: 10px; border-radius: 5px; border-left: 3px solid #4169e1; margin-bottom: 10px;">
149
+ <strong>💡 Best Practice:</strong> For optimal performance, we recommend keeping
150
+ <strong>3-6 MCP servers</strong> enabled at once. Too many servers can:
151
+ • Increase context usage (reducing available tokens for conversation)
152
+ • Potentially confuse the model when selecting tools
153
+ • Slow down response times
154
+
155
+ You can add more servers but selectively enable only the ones you need for your current task.
156
+ </div>
157
+ """, container=True)
158
+
159
+ # Server controls
160
+ with gr.Row():
161
+ self.add_server_btn = gr.Button("Add MCP Server", variant="primary", size="sm")
162
+ self.remove_all_btn = gr.Button("Remove All", variant="secondary", size="sm")
163
+
164
+ # Add a save button (initially hidden)
165
+ self.save_server_btn = gr.Button("Save Server", variant="primary", size="sm", visible=False)
166
+
167
+ # MCP server selection
168
+ from mcp_spaces_finder import _finder
169
+ spaces = _finder.get_mcp_spaces()
170
+ self.mcp_dropdown = gr.Dropdown(
171
+ choices=spaces,
172
+ label=f"**Available MCP Servers ({len(spaces)}**)",
173
+ value=None,
174
+ info="Choose from HuggingFace spaces",
175
+ allow_custom_value=True,
176
+ visible=False
177
+ )
178
+
179
+ self.server_name = gr.Textbox(
180
+ label="Server Title",
181
+ placeholder="e.g., Text to Image Generator",
182
+ visible=False
183
+ )
184
+
185
+ # Server status and controls
186
+ self.server_checkboxes = gr.CheckboxGroup(
187
+ label="Active Servers (Check to enable)",
188
+ choices=[],
189
+ value=[],
190
+ info="✅ Enabled servers can be used | ⬜ Disabled servers are ignored"
191
+ )
192
+
193
+ self.add_server_output = gr.Markdown("", visible=False, container=True)
194
+
195
+ def _create_main_chat_area(self) -> gr.Chatbot:
196
+ """Create the main chat area"""
197
+ with gr.Column(elem_classes="main-content"):
198
+ chatbot = gr.Chatbot(
199
+ label="Universal MCP-Powered AI Assistant",
200
+ show_label=False,
201
+ type="messages",
202
+ scale=1,
203
+ show_copy_button=True,
204
+ avatar_images=None,
205
+ value=[
206
+ ChatMessage(
207
+ role="assistant",
208
+ content="""Welcome! I'm your MCP-powered AI assistant using OpenAI's GPT-OSS models via HuggingFace Inference Providers.
209
+
210
+ 🎉 **Pre-loaded MCP servers ready to use:**
211
+ - **background removal** - Remove backgrounds from images
212
+ - **text to video** - Generate videos from text descriptions
213
+ - **text to speech** - Convert text to natural speech
214
+ - **text to image** - Create images from text prompts
215
+
216
+ You can start using these servers right away, add more servers, or remove them as needed. Try asking me to generate an image, create speech, or any other task!"""
217
+ )
218
+ ]
219
+ )
220
+
221
+
222
+
223
+ with gr.Column(scale=0, elem_classes="input-area"):
224
+ self.chat_input = gr.MultimodalTextbox(
225
+ interactive=True,
226
+ file_count="multiple",
227
+ placeholder="Enter message or upload files...",
228
+ show_label=False,
229
+ sources=["upload", "microphone"],
230
+ file_types=None
231
+ )
232
+
233
+ return chatbot
234
+
235
+ def _setup_event_handlers(self, chatbot: gr.Chatbot, demo: gr.Blocks):
236
+ """Set up all event handlers"""
237
+
238
+ # OAuth profile handler
239
+ def handle_oauth_profile(profile: gr.OAuthProfile | None, token: gr.OAuthToken | None):
240
+ if profile is None:
241
+ return "⚪ Please sign in to access Inference Providers"
242
+
243
+ logger.info(f"👤 OAuth profile received for user: {profile.name}")
244
+
245
+ if token and token.token:
246
+ logger.info("🔑 OAuth token received, updating HF client...")
247
+ os.environ["HF_TOKEN"] = token.token
248
+ try:
249
+ self.mcp_client.hf_client = OpenAI(
250
+ base_url="https://router.huggingface.co/v1",
251
+ api_key=token.token
252
+ )
253
+ logger.info("✅ HuggingFace Inference client updated with OAuth token")
254
+ except Exception as e:
255
+ logger.error(f"❌ Failed to update HF client: {e}")
256
+
257
+ return f"✅ Signed in as: **{profile.name}**"
258
+
259
+ # Provider selection with auto-model loading
260
+ def handle_provider_change(provider_id):
261
+ if not provider_id:
262
+ return gr.Dropdown(choices=[], value=None), "⚪ Select provider first"
263
+
264
+ available_models = AppConfig.get_available_models_for_provider(provider_id)
265
+ model_choices = [(AppConfig.AVAILABLE_MODELS[model]["name"], model) for model in available_models]
266
+
267
+ # Auto-select 120b model if available
268
+ default_model = "openai/gpt-oss-120b" if "openai/gpt-oss-120b" in available_models else (available_models[0] if available_models else None)
269
+
270
+ # Get context info for status
271
+ if default_model:
272
+ model_info = AppConfig.AVAILABLE_MODELS.get(default_model, {})
273
+ context_length = model_info.get("context_length", 128000)
274
+ status_msg = f"✅ Provider selected, model auto-selected ({context_length:,} token context)"
275
+ else:
276
+ status_msg = "✅ Provider selected, please select a model"
277
+
278
+ return (
279
+ gr.Dropdown(choices=model_choices, value=default_model, label="🤖 Model"),
280
+ status_msg
281
+ )
282
+
283
+ # Model selection
284
+ def handle_model_change(provider_id, model_id):
285
+ if not provider_id or not model_id:
286
+ return "⚪ Select both provider and model"
287
+
288
+ self.mcp_client.set_model_and_provider(provider_id, model_id)
289
+
290
+ # Get model info
291
+ model_info = AppConfig.AVAILABLE_MODELS.get(model_id, {})
292
+ context_length = model_info.get("context_length", 128000)
293
+ active_params = model_info.get("active_params", "N/A")
294
+
295
+ if self.mcp_client.hf_client:
296
+ return f"✅ Ready! Using {active_params} active params, {context_length:,} token context"
297
+ else:
298
+ return "❌ Please login first"
299
+
300
+ # Chat handlers
301
+ def submit_message(message, history):
302
+ if message and (message.get("text", "").strip() or message.get("files", [])):
303
+ converted_history = []
304
+ for msg in history:
305
+ if isinstance(msg, dict):
306
+ converted_history.append(ChatMessage(
307
+ role=msg.get('role', 'assistant'),
308
+ content=msg.get('content', ''),
309
+ metadata=msg.get('metadata', None)
310
+ ))
311
+ else:
312
+ converted_history.append(msg)
313
+
314
+ new_history, cleared_input = self.chat_handler.process_multimodal_message(message, converted_history)
315
+ return new_history, cleared_input
316
+ return history, gr.MultimodalTextbox(value=None, interactive=False)
317
+
318
+ def enable_input():
319
+ return gr.MultimodalTextbox(interactive=True)
320
+
321
+ def show_add_server_fields():
322
+ return [
323
+ gr.Dropdown(visible=True), # mcp_dropdown
324
+ gr.Textbox(visible=True), # server_name
325
+ gr.Button(interactive=False), # add_server_btn - disable it
326
+ gr.Button(visible=True) # save_server_btn - show it
327
+ ]
328
+
329
+ def hide_add_server_fields():
330
+ return [
331
+ gr.Dropdown(visible=False, value=None), # mcp_dropdown
332
+ gr.Textbox(visible=False, value=""), # server_name
333
+ gr.Button(interactive=True), # add_server_btn - re-enable it
334
+ gr.Button(visible=False) # save_server_btn - hide it
335
+ ]
336
+
337
+ def handle_add_server(server_title, selected_space):
338
+ if not server_title or not selected_space:
339
+ return [
340
+ gr.Dropdown(visible=False, value=None),
341
+ gr.Textbox(visible=False, value=""),
342
+ gr.Button(interactive=True), # Re-enable add button
343
+ gr.Button(visible=False), # Hide save button
344
+ gr.CheckboxGroup(choices=list(self.mcp_client.servers.keys()),
345
+ value=[name for name, enabled in self.mcp_client.enabled_servers.items() if enabled]),
346
+ gr.Markdown("❌ Please provide both server title and space selection", visible=True)
347
+ ]
348
+
349
+ try:
350
+ status_msg, _ = self.server_manager.add_custom_server(server_title.strip(), selected_space)
351
+
352
+ # Update checkboxes with all servers
353
+ server_choices = list(self.mcp_client.servers.keys())
354
+ enabled_servers = [name for name, enabled in self.mcp_client.enabled_servers.items() if enabled]
355
+
356
+ # Check if we have many servers and show a warning
357
+ warning_msg = ""
358
+ if len(enabled_servers) > 6:
359
+ warning_msg = "\n\n⚠️ **Note:** You have more than 6 servers enabled. Consider disabling some for better performance."
360
+
361
+ return [
362
+ gr.Dropdown(visible=False, value=None),
363
+ gr.Textbox(visible=False, value=""),
364
+ gr.Button(interactive=True), # Re-enable add button
365
+ gr.Button(visible=False), # Hide save button
366
+ gr.CheckboxGroup(choices=server_choices, value=enabled_servers),
367
+ gr.Markdown(status_msg + warning_msg, visible=True)
368
+ ]
369
+
370
+ except Exception as e:
371
+ logger.error(f"Error adding server: {e}")
372
+ return [
373
+ gr.Dropdown(visible=False, value=None),
374
+ gr.Textbox(visible=False, value=""),
375
+ gr.Button(interactive=True), # Re-enable add button
376
+ gr.Button(visible=False), # Hide save button
377
+ gr.CheckboxGroup(choices=list(self.mcp_client.servers.keys()),
378
+ value=[name for name, enabled in self.mcp_client.enabled_servers.items() if enabled]),
379
+ gr.Markdown(f"❌ Error: {str(e)}", visible=True)
380
+ ]
381
+
382
+ def handle_server_toggle(enabled_servers):
383
+ """Handle enabling/disabling servers via checkboxes"""
384
+ # Update enabled status for all servers
385
+ for server_name in self.mcp_client.servers.keys():
386
+ self.mcp_client.enable_server(server_name, server_name in enabled_servers)
387
+
388
+ enabled_count = len(enabled_servers)
389
+
390
+ # Provide feedback based on count
391
+ if enabled_count == 0:
392
+ message = "ℹ️ No servers enabled - chatbot will use native capabilities only"
393
+ elif enabled_count <= 6:
394
+ message = f"✅ {enabled_count} server{'s' if enabled_count != 1 else ''} enabled - optimal configuration"
395
+ else:
396
+ message = f"⚠️ {enabled_count} servers enabled - consider reducing to 3-6 for better performance"
397
+
398
+ return gr.Markdown(message, visible=True)
399
+
400
+ def handle_remove_all():
401
+ """Remove all MCP servers"""
402
+ count = self.mcp_client.remove_all_servers()
403
+ return [
404
+ gr.CheckboxGroup(choices=[], value=[]),
405
+ gr.Markdown(f"✅ Removed all {count} servers", visible=True)
406
+ ]
407
+
408
+ # Load handler to initialize default mcp servers
409
+ def initialize_defaults():
410
+ """Initialize default servers and update UI on app load"""
411
+ self._initialize_default_servers()
412
+
413
+ # Return updated checkboxes with the default servers
414
+ server_choices = list(self.mcp_client.servers.keys())
415
+ enabled_servers = [name for name, enabled in self.mcp_client.enabled_servers.items() if enabled]
416
+
417
+ return gr.CheckboxGroup(
418
+ choices=server_choices,
419
+ value=enabled_servers,
420
+ label=f"Active Servers ({len(server_choices)} loaded)"
421
+ )
422
+
423
+ # Connect OAuth
424
+ demo.load(
425
+ fn=handle_oauth_profile,
426
+ outputs=[self.login_status]
427
+ )
428
+
429
+ # Connect provider/model dropdowns with auto-selection on load
430
+ demo.load(
431
+ fn=lambda: handle_provider_change("cerebras"),
432
+ outputs=[self.model_dropdown, self.api_status]
433
+ )
434
+
435
+ # Initialise default mcp server load
436
+ demo.load(
437
+ fn=initialize_defaults,
438
+ outputs=[self.server_checkboxes]
439
+ )
440
+
441
+ self.provider_dropdown.change(
442
+ handle_provider_change,
443
+ inputs=[self.provider_dropdown],
444
+ outputs=[self.model_dropdown, self.api_status]
445
+ )
446
+
447
+ self.model_dropdown.change(
448
+ handle_model_change,
449
+ inputs=[self.provider_dropdown, self.model_dropdown],
450
+ outputs=[self.api_status]
451
+ )
452
+
453
+ # Connect chat
454
+ chat_submit = self.chat_input.submit(
455
+ submit_message,
456
+ inputs=[self.chat_input, chatbot],
457
+ outputs=[chatbot, self.chat_input]
458
+ )
459
+ chat_submit.then(enable_input, None, [self.chat_input])
460
+
461
+ # Connect server management with proper button state handling
462
+ self.add_server_btn.click(
463
+ fn=show_add_server_fields,
464
+ outputs=[self.mcp_dropdown, self.server_name, self.add_server_btn, self.save_server_btn]
465
+ )
466
+
467
+ # Connect save button
468
+ self.save_server_btn.click(
469
+ fn=handle_add_server,
470
+ inputs=[self.server_name, self.mcp_dropdown],
471
+ outputs=[self.mcp_dropdown, self.server_name, self.add_server_btn, self.save_server_btn, self.server_checkboxes, self.add_server_output]
472
+ )
473
+
474
+ self.server_checkboxes.change(
475
+ handle_server_toggle,
476
+ inputs=[self.server_checkboxes],
477
+ outputs=[self.add_server_output]
478
+ )
479
+
480
+ self.remove_all_btn.click(
481
+ handle_remove_all,
482
+ outputs=[self.server_checkboxes, self.add_server_output]
483
+ )