davanstrien HF Staff commited on
Commit
170fd5f
·
1 Parent(s): 8710b70

Add MCP server for Hugging Face semantic search

Browse files

- Implement MCP server with 8 tools for searching HF datasets and models
- Add semantic search tools: search_datasets, search_models
- Add similarity search tools: find_similar_datasets, find_similar_models
- Add trending tools: get_trending_datasets, get_trending_models
- Add card download tools: download_model_card, download_dataset_card
- Configure backend API connection (default: http://localhost:8000)
- Include httpx for async HTTP requests and MCP dependencies

Files changed (3) hide show
  1. app.py +654 -0
  2. requirements.in +2 -0
  3. requirements.txt +175 -0
app.py ADDED
@@ -0,0 +1,654 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ MCP Server for Hugging Face Dataset and Model Search API
4
+ """
5
+
6
+ import asyncio
7
+ import logging
8
+ from typing import Any, Dict, Optional
9
+
10
+ import httpx
11
+ from mcp.server import Server
12
+ from mcp.server.stdio import stdio_server
13
+ from mcp.types import (
14
+ Tool,
15
+ TextContent,
16
+ CallToolResult,
17
+ CallToolRequest,
18
+ ListToolsResult,
19
+ )
20
+
21
+ # Configure logging
22
+ logging.basicConfig(level=logging.INFO)
23
+ logger = logging.getLogger(__name__)
24
+
25
+ class HFSearchServer:
26
+ def __init__(self, base_url: str = "http://localhost:8000"):
27
+ self.base_url = base_url
28
+ self.client = httpx.AsyncClient(timeout=30.0)
29
+
30
+ async def close(self):
31
+ await self.client.aclose()
32
+
33
+ async def search_datasets(
34
+ self,
35
+ query: str,
36
+ k: int = 5,
37
+ sort_by: str = "similarity",
38
+ min_likes: int = 0,
39
+ min_downloads: int = 0
40
+ ) -> Dict[str, Any]:
41
+ """Search for datasets based on a text query"""
42
+ params = {
43
+ "query": query,
44
+ "k": k,
45
+ "sort_by": sort_by,
46
+ "min_likes": min_likes,
47
+ "min_downloads": min_downloads
48
+ }
49
+
50
+ response = await self.client.get(
51
+ f"{self.base_url}/search/datasets",
52
+ params=params
53
+ )
54
+ response.raise_for_status()
55
+ return response.json()
56
+
57
+ async def find_similar_datasets(
58
+ self,
59
+ dataset_id: str,
60
+ k: int = 5,
61
+ sort_by: str = "similarity",
62
+ min_likes: int = 0,
63
+ min_downloads: int = 0
64
+ ) -> Dict[str, Any]:
65
+ """Find similar datasets to a specified dataset"""
66
+ params = {
67
+ "dataset_id": dataset_id,
68
+ "k": k,
69
+ "sort_by": sort_by,
70
+ "min_likes": min_likes,
71
+ "min_downloads": min_downloads
72
+ }
73
+
74
+ response = await self.client.get(
75
+ f"{self.base_url}/similarity/datasets",
76
+ params=params
77
+ )
78
+ response.raise_for_status()
79
+ return response.json()
80
+
81
+ async def search_models(
82
+ self,
83
+ query: str,
84
+ k: int = 5,
85
+ sort_by: str = "similarity",
86
+ min_likes: int = 0,
87
+ min_downloads: int = 0,
88
+ min_param_count: int = 0,
89
+ max_param_count: Optional[int] = None
90
+ ) -> Dict[str, Any]:
91
+ """Search for models based on a text query"""
92
+ params = {
93
+ "query": query,
94
+ "k": k,
95
+ "sort_by": sort_by,
96
+ "min_likes": min_likes,
97
+ "min_downloads": min_downloads,
98
+ "min_param_count": min_param_count
99
+ }
100
+ if max_param_count is not None:
101
+ params["max_param_count"] = max_param_count
102
+
103
+ response = await self.client.get(
104
+ f"{self.base_url}/search/models",
105
+ params=params
106
+ )
107
+ response.raise_for_status()
108
+ return response.json()
109
+
110
+ async def find_similar_models(
111
+ self,
112
+ model_id: str,
113
+ k: int = 5,
114
+ sort_by: str = "similarity",
115
+ min_likes: int = 0,
116
+ min_downloads: int = 0,
117
+ min_param_count: int = 0,
118
+ max_param_count: Optional[int] = None
119
+ ) -> Dict[str, Any]:
120
+ """Find similar models to a specified model"""
121
+ params = {
122
+ "model_id": model_id,
123
+ "k": k,
124
+ "sort_by": sort_by,
125
+ "min_likes": min_likes,
126
+ "min_downloads": min_downloads,
127
+ "min_param_count": min_param_count
128
+ }
129
+ if max_param_count is not None:
130
+ params["max_param_count"] = max_param_count
131
+
132
+ response = await self.client.get(
133
+ f"{self.base_url}/similarity/models",
134
+ params=params
135
+ )
136
+ response.raise_for_status()
137
+ return response.json()
138
+
139
+ async def get_trending_models(
140
+ self,
141
+ limit: int = 10,
142
+ min_likes: int = 0,
143
+ min_downloads: int = 0,
144
+ min_param_count: int = 0,
145
+ max_param_count: Optional[int] = None
146
+ ) -> Dict[str, Any]:
147
+ """Get trending models with their summaries"""
148
+ params = {
149
+ "limit": limit,
150
+ "min_likes": min_likes,
151
+ "min_downloads": min_downloads,
152
+ "min_param_count": min_param_count
153
+ }
154
+ if max_param_count is not None:
155
+ params["max_param_count"] = max_param_count
156
+
157
+ response = await self.client.get(
158
+ f"{self.base_url}/trending/models",
159
+ params=params
160
+ )
161
+ response.raise_for_status()
162
+ return response.json()
163
+
164
+ async def get_trending_datasets(
165
+ self,
166
+ limit: int = 10,
167
+ min_likes: int = 0,
168
+ min_downloads: int = 0
169
+ ) -> Dict[str, Any]:
170
+ """Get trending datasets with their summaries"""
171
+ params = {
172
+ "limit": limit,
173
+ "min_likes": min_likes,
174
+ "min_downloads": min_downloads
175
+ }
176
+
177
+ response = await self.client.get(
178
+ f"{self.base_url}/trending/datasets",
179
+ params=params
180
+ )
181
+ response.raise_for_status()
182
+ return response.json()
183
+
184
+ async def download_model_card(self, model_id: str) -> str:
185
+ """
186
+ Download the README card for a HuggingFace model.
187
+
188
+ Args:
189
+ model_id (str): The model ID (e.g., 'username/model-name')
190
+
191
+ Returns:
192
+ str: The content of the model card (README.md)
193
+ """
194
+ url = f"https://huggingface.co/{model_id}/raw/main/README.md"
195
+ response = await self.client.get(url)
196
+ response.raise_for_status()
197
+ return response.text
198
+
199
+ async def download_dataset_card(self, dataset_id: str) -> str:
200
+ """
201
+ Download the README card for a HuggingFace dataset.
202
+
203
+ Args:
204
+ dataset_id (str): The dataset ID (e.g., 'username/dataset-name')
205
+
206
+ Returns:
207
+ str: The content of the dataset card (README.md)
208
+ """
209
+ url = f"https://huggingface.co/datasets/{dataset_id}/raw/main/README.md"
210
+ response = await self.client.get(url)
211
+ response.raise_for_status()
212
+ return response.text
213
+
214
+ # Initialize server and API client
215
+ server = Server("hf-search")
216
+ api_client: Optional[HFSearchServer] = None
217
+
218
+ @server.list_tools()
219
+ async def list_tools() -> ListToolsResult:
220
+ """List available tools"""
221
+ return ListToolsResult(
222
+ tools=[
223
+ Tool(
224
+ name="search_datasets",
225
+ description="Search for datasets based on a text query",
226
+ inputSchema={
227
+ "type": "object",
228
+ "properties": {
229
+ "query": {
230
+ "type": "string",
231
+ "description": "Search query text"
232
+ },
233
+ "k": {
234
+ "type": "integer",
235
+ "description": "Number of results to return (1-100)",
236
+ "minimum": 1,
237
+ "maximum": 100,
238
+ "default": 5
239
+ },
240
+ "sort_by": {
241
+ "type": "string",
242
+ "description": "Sort method for results",
243
+ "enum": ["similarity", "likes", "downloads", "trending"],
244
+ "default": "similarity"
245
+ },
246
+ "min_likes": {
247
+ "type": "integer",
248
+ "description": "Minimum likes filter",
249
+ "minimum": 0,
250
+ "default": 0
251
+ },
252
+ "min_downloads": {
253
+ "type": "integer",
254
+ "description": "Minimum downloads filter",
255
+ "minimum": 0,
256
+ "default": 0
257
+ }
258
+ },
259
+ "required": ["query"]
260
+ }
261
+ ),
262
+ Tool(
263
+ name="find_similar_datasets",
264
+ description="Find datasets similar to a specified dataset",
265
+ inputSchema={
266
+ "type": "object",
267
+ "properties": {
268
+ "dataset_id": {
269
+ "type": "string",
270
+ "description": "Dataset ID to find similar datasets for"
271
+ },
272
+ "k": {
273
+ "type": "integer",
274
+ "description": "Number of results to return (1-100)",
275
+ "minimum": 1,
276
+ "maximum": 100,
277
+ "default": 5
278
+ },
279
+ "sort_by": {
280
+ "type": "string",
281
+ "description": "Sort method for results",
282
+ "enum": ["similarity", "likes", "downloads", "trending"],
283
+ "default": "similarity"
284
+ },
285
+ "min_likes": {
286
+ "type": "integer",
287
+ "description": "Minimum likes filter",
288
+ "minimum": 0,
289
+ "default": 0
290
+ },
291
+ "min_downloads": {
292
+ "type": "integer",
293
+ "description": "Minimum downloads filter",
294
+ "minimum": 0,
295
+ "default": 0
296
+ }
297
+ },
298
+ "required": ["dataset_id"]
299
+ }
300
+ ),
301
+ Tool(
302
+ name="search_models",
303
+ description="Search for models based on a text query with optional parameter count filtering",
304
+ inputSchema={
305
+ "type": "object",
306
+ "properties": {
307
+ "query": {
308
+ "type": "string",
309
+ "description": "Search query text"
310
+ },
311
+ "k": {
312
+ "type": "integer",
313
+ "description": "Number of results to return (1-100)",
314
+ "minimum": 1,
315
+ "maximum": 100,
316
+ "default": 5
317
+ },
318
+ "sort_by": {
319
+ "type": "string",
320
+ "description": "Sort method for results",
321
+ "enum": ["similarity", "likes", "downloads", "trending"],
322
+ "default": "similarity"
323
+ },
324
+ "min_likes": {
325
+ "type": "integer",
326
+ "description": "Minimum likes filter",
327
+ "minimum": 0,
328
+ "default": 0
329
+ },
330
+ "min_downloads": {
331
+ "type": "integer",
332
+ "description": "Minimum downloads filter",
333
+ "minimum": 0,
334
+ "default": 0
335
+ },
336
+ "min_param_count": {
337
+ "type": "integer",
338
+ "description": "Minimum parameter count (excludes models with unknown params)",
339
+ "minimum": 0,
340
+ "default": 0
341
+ },
342
+ "max_param_count": {
343
+ "type": ["integer", "null"],
344
+ "description": "Maximum parameter count (null for no limit)",
345
+ "minimum": 0,
346
+ "default": None
347
+ }
348
+ },
349
+ "required": ["query"]
350
+ }
351
+ ),
352
+ Tool(
353
+ name="find_similar_models",
354
+ description="Find models similar to a specified model",
355
+ inputSchema={
356
+ "type": "object",
357
+ "properties": {
358
+ "model_id": {
359
+ "type": "string",
360
+ "description": "Model ID to find similar models for"
361
+ },
362
+ "k": {
363
+ "type": "integer",
364
+ "description": "Number of results to return (1-100)",
365
+ "minimum": 1,
366
+ "maximum": 100,
367
+ "default": 5
368
+ },
369
+ "sort_by": {
370
+ "type": "string",
371
+ "description": "Sort method for results",
372
+ "enum": ["similarity", "likes", "downloads", "trending"],
373
+ "default": "similarity"
374
+ },
375
+ "min_likes": {
376
+ "type": "integer",
377
+ "description": "Minimum likes filter",
378
+ "minimum": 0,
379
+ "default": 0
380
+ },
381
+ "min_downloads": {
382
+ "type": "integer",
383
+ "description": "Minimum downloads filter",
384
+ "minimum": 0,
385
+ "default": 0
386
+ },
387
+ "min_param_count": {
388
+ "type": "integer",
389
+ "description": "Minimum parameter count (excludes models with unknown params)",
390
+ "minimum": 0,
391
+ "default": 0
392
+ },
393
+ "max_param_count": {
394
+ "type": ["integer", "null"],
395
+ "description": "Maximum parameter count (null for no limit)",
396
+ "minimum": 0,
397
+ "default": None
398
+ }
399
+ },
400
+ "required": ["model_id"]
401
+ }
402
+ ),
403
+ Tool(
404
+ name="get_trending_models",
405
+ description="Get trending models with their summaries and optional filtering",
406
+ inputSchema={
407
+ "type": "object",
408
+ "properties": {
409
+ "limit": {
410
+ "type": "integer",
411
+ "description": "Number of results to return (1-100)",
412
+ "minimum": 1,
413
+ "maximum": 100,
414
+ "default": 10
415
+ },
416
+ "min_likes": {
417
+ "type": "integer",
418
+ "description": "Minimum likes filter",
419
+ "minimum": 0,
420
+ "default": 0
421
+ },
422
+ "min_downloads": {
423
+ "type": "integer",
424
+ "description": "Minimum downloads filter",
425
+ "minimum": 0,
426
+ "default": 0
427
+ },
428
+ "min_param_count": {
429
+ "type": "integer",
430
+ "description": "Minimum parameter count (excludes models with unknown params)",
431
+ "minimum": 0,
432
+ "default": 0
433
+ },
434
+ "max_param_count": {
435
+ "type": ["integer", "null"],
436
+ "description": "Maximum parameter count (null for no limit)",
437
+ "minimum": 0,
438
+ "default": None
439
+ }
440
+ }
441
+ }
442
+ ),
443
+ Tool(
444
+ name="get_trending_datasets",
445
+ description="Get trending datasets with their summaries",
446
+ inputSchema={
447
+ "type": "object",
448
+ "properties": {
449
+ "limit": {
450
+ "type": "integer",
451
+ "description": "Number of results to return (1-100)",
452
+ "minimum": 1,
453
+ "maximum": 100,
454
+ "default": 10
455
+ },
456
+ "min_likes": {
457
+ "type": "integer",
458
+ "description": "Minimum likes filter",
459
+ "minimum": 0,
460
+ "default": 0
461
+ },
462
+ "min_downloads": {
463
+ "type": "integer",
464
+ "description": "Minimum downloads filter",
465
+ "minimum": 0,
466
+ "default": 0
467
+ }
468
+ }
469
+ }
470
+ ),
471
+ Tool(
472
+ name="download_model_card",
473
+ description="Download the README card for a HuggingFace model",
474
+ inputSchema={
475
+ "type": "object",
476
+ "properties": {
477
+ "model_id": {
478
+ "type": "string",
479
+ "description": "The model ID (e.g., 'username/model-name')"
480
+ }
481
+ },
482
+ "required": ["model_id"]
483
+ }
484
+ ),
485
+ Tool(
486
+ name="download_dataset_card",
487
+ description="Download the README card for a HuggingFace dataset",
488
+ inputSchema={
489
+ "type": "object",
490
+ "properties": {
491
+ "dataset_id": {
492
+ "type": "string",
493
+ "description": "The dataset ID (e.g., 'username/dataset-name')"
494
+ }
495
+ },
496
+ "required": ["dataset_id"]
497
+ }
498
+ )
499
+ ]
500
+ )
501
+
502
+ @server.call_tool()
503
+ async def call_tool(request: CallToolRequest) -> CallToolResult:
504
+ """Handle tool calls"""
505
+ global api_client
506
+
507
+ if api_client is None:
508
+ # Initialize API client with base URL from environment or default
509
+ import os
510
+ base_url = os.getenv("HF_SEARCH_API_URL", "http://localhost:8000")
511
+ api_client = HFSearchServer(base_url)
512
+
513
+ try:
514
+ # Parse arguments
515
+ args = request.params.arguments if hasattr(request.params, 'arguments') else {}
516
+
517
+ # Format results helper
518
+ def format_dataset_results(data: Dict[str, Any]) -> str:
519
+ results = data.get("results", [])
520
+ if not results:
521
+ return "No datasets found."
522
+
523
+ output = []
524
+ for i, result in enumerate(results, 1):
525
+ output.append(f"{i}. **{result['dataset_id']}**")
526
+ output.append(f" - Summary: {result['summary']}")
527
+ output.append(f" - Similarity: {result['similarity']:.3f}")
528
+ output.append(f" - Likes: {result['likes']:,} | Downloads: {result['downloads']:,}")
529
+ output.append("")
530
+
531
+ return "\n".join(output)
532
+
533
+ def format_model_results(data: Dict[str, Any]) -> str:
534
+ results = data.get("results", [])
535
+ if not results:
536
+ return "No models found."
537
+
538
+ output = []
539
+ for i, result in enumerate(results, 1):
540
+ output.append(f"{i}. **{result['model_id']}**")
541
+ output.append(f" - Summary: {result['summary']}")
542
+ output.append(f" - Similarity: {result['similarity']:.3f}")
543
+ output.append(f" - Likes: {result['likes']:,} | Downloads: {result['downloads']:,}")
544
+ if result.get('param_count') is not None and result['param_count'] > 0:
545
+ # Format parameter count nicely
546
+ param_count = result['param_count']
547
+ if param_count >= 1_000_000_000:
548
+ param_str = f"{param_count / 1_000_000_000:.1f}B"
549
+ elif param_count >= 1_000_000:
550
+ param_str = f"{param_count / 1_000_000:.1f}M"
551
+ elif param_count >= 1_000:
552
+ param_str = f"{param_count / 1_000:.1f}K"
553
+ else:
554
+ param_str = str(param_count)
555
+ output.append(f" - Parameters: {param_str}")
556
+ output.append("")
557
+
558
+ return "\n".join(output)
559
+
560
+ # Route to appropriate method
561
+ if request.params.name == "search_datasets":
562
+ result = await api_client.search_datasets(**args)
563
+ formatted = format_dataset_results(result)
564
+ return CallToolResult(
565
+ content=[TextContent(text=formatted)],
566
+ isError=False
567
+ )
568
+
569
+ elif request.params.name == "find_similar_datasets":
570
+ result = await api_client.find_similar_datasets(**args)
571
+ formatted = format_dataset_results(result)
572
+ return CallToolResult(
573
+ content=[TextContent(text=formatted)],
574
+ isError=False
575
+ )
576
+
577
+ elif request.params.name == "search_models":
578
+ result = await api_client.search_models(**args)
579
+ formatted = format_model_results(result)
580
+ return CallToolResult(
581
+ content=[TextContent(text=formatted)],
582
+ isError=False
583
+ )
584
+
585
+ elif request.params.name == "find_similar_models":
586
+ result = await api_client.find_similar_models(**args)
587
+ formatted = format_model_results(result)
588
+ return CallToolResult(
589
+ content=[TextContent(text=formatted)],
590
+ isError=False
591
+ )
592
+
593
+ elif request.params.name == "get_trending_models":
594
+ result = await api_client.get_trending_models(**args)
595
+ formatted = format_model_results(result)
596
+ return CallToolResult(
597
+ content=[TextContent(text=formatted)],
598
+ isError=False
599
+ )
600
+
601
+ elif request.params.name == "get_trending_datasets":
602
+ result = await api_client.get_trending_datasets(**args)
603
+ formatted = format_dataset_results(result)
604
+ return CallToolResult(
605
+ content=[TextContent(text=formatted)],
606
+ isError=False
607
+ )
608
+
609
+ elif request.params.name == "download_model_card":
610
+ result = await api_client.download_model_card(**args)
611
+ return CallToolResult(
612
+ content=[TextContent(text=result)],
613
+ isError=False
614
+ )
615
+
616
+ elif request.params.name == "download_dataset_card":
617
+ result = await api_client.download_dataset_card(**args)
618
+ return CallToolResult(
619
+ content=[TextContent(text=result)],
620
+ isError=False
621
+ )
622
+
623
+ else:
624
+ return CallToolResult(
625
+ content=[TextContent(text=f"Unknown tool: {request.params.name}")],
626
+ isError=True
627
+ )
628
+
629
+ except httpx.HTTPStatusError as e:
630
+ error_msg = f"API request failed with status {e.response.status_code}: {e.response.text}"
631
+ logger.error(error_msg)
632
+ return CallToolResult(
633
+ content=[TextContent(text=error_msg)],
634
+ isError=True
635
+ )
636
+ except Exception as e:
637
+ error_msg = f"Error calling tool {request.params.name}: {str(e)}"
638
+ logger.error(error_msg, exc_info=True)
639
+ return CallToolResult(
640
+ content=[TextContent(text=error_msg)],
641
+ isError=True
642
+ )
643
+
644
+ async def main():
645
+ """Main entry point"""
646
+ async with stdio_server() as (read_stream, write_stream):
647
+ await server.run(read_stream, write_stream)
648
+
649
+ # Cleanup
650
+ if api_client:
651
+ await api_client.close()
652
+
653
+ if __name__ == "__main__":
654
+ asyncio.run(main())
requirements.in ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ gradio[mcp]
2
+ httpx
requirements.txt ADDED
@@ -0,0 +1,175 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file was autogenerated by uv via the following command:
2
+ # uv pip compile requirements.in -o requirements.txt
3
+ aiofiles==24.1.0
4
+ # via gradio
5
+ annotated-types==0.7.0
6
+ # via pydantic
7
+ anyio==4.9.0
8
+ # via
9
+ # gradio
10
+ # httpx
11
+ # mcp
12
+ # sse-starlette
13
+ # starlette
14
+ certifi==2025.4.26
15
+ # via
16
+ # httpcore
17
+ # httpx
18
+ # requests
19
+ charset-normalizer==3.4.2
20
+ # via requests
21
+ click==8.2.1
22
+ # via
23
+ # typer
24
+ # uvicorn
25
+ fastapi==0.115.12
26
+ # via gradio
27
+ ffmpy==0.6.0
28
+ # via gradio
29
+ filelock==3.18.0
30
+ # via huggingface-hub
31
+ fsspec==2025.5.1
32
+ # via
33
+ # gradio-client
34
+ # huggingface-hub
35
+ gradio==5.33.0
36
+ # via -r requirements.in
37
+ gradio-client==1.10.2
38
+ # via gradio
39
+ groovy==0.1.2
40
+ # via gradio
41
+ h11==0.16.0
42
+ # via
43
+ # httpcore
44
+ # uvicorn
45
+ hf-xet==1.1.3
46
+ # via huggingface-hub
47
+ httpcore==1.0.9
48
+ # via httpx
49
+ httpx==0.28.1
50
+ # via
51
+ # -r requirements.in
52
+ # gradio
53
+ # gradio-client
54
+ # mcp
55
+ # safehttpx
56
+ httpx-sse==0.4.0
57
+ # via mcp
58
+ huggingface-hub==0.32.4
59
+ # via
60
+ # gradio
61
+ # gradio-client
62
+ idna==3.10
63
+ # via
64
+ # anyio
65
+ # httpx
66
+ # requests
67
+ jinja2==3.1.6
68
+ # via gradio
69
+ markdown-it-py==3.0.0
70
+ # via rich
71
+ markupsafe==3.0.2
72
+ # via
73
+ # gradio
74
+ # jinja2
75
+ mcp==1.9.0
76
+ # via gradio
77
+ mdurl==0.1.2
78
+ # via markdown-it-py
79
+ numpy==2.3.0
80
+ # via
81
+ # gradio
82
+ # pandas
83
+ orjson==3.10.18
84
+ # via gradio
85
+ packaging==25.0
86
+ # via
87
+ # gradio
88
+ # gradio-client
89
+ # huggingface-hub
90
+ pandas==2.3.0
91
+ # via gradio
92
+ pillow==11.2.1
93
+ # via gradio
94
+ pydantic==2.11.5
95
+ # via
96
+ # fastapi
97
+ # gradio
98
+ # mcp
99
+ # pydantic-settings
100
+ pydantic-core==2.33.2
101
+ # via pydantic
102
+ pydantic-settings==2.9.1
103
+ # via mcp
104
+ pydub==0.25.1
105
+ # via gradio
106
+ pygments==2.19.1
107
+ # via rich
108
+ python-dateutil==2.9.0.post0
109
+ # via pandas
110
+ python-dotenv==1.1.0
111
+ # via pydantic-settings
112
+ python-multipart==0.0.20
113
+ # via
114
+ # gradio
115
+ # mcp
116
+ pytz==2025.2
117
+ # via pandas
118
+ pyyaml==6.0.2
119
+ # via
120
+ # gradio
121
+ # huggingface-hub
122
+ requests==2.32.3
123
+ # via huggingface-hub
124
+ rich==14.0.0
125
+ # via typer
126
+ ruff==0.11.13
127
+ # via gradio
128
+ safehttpx==0.1.6
129
+ # via gradio
130
+ semantic-version==2.10.0
131
+ # via gradio
132
+ shellingham==1.5.4
133
+ # via typer
134
+ six==1.17.0
135
+ # via python-dateutil
136
+ sniffio==1.3.1
137
+ # via anyio
138
+ sse-starlette==2.3.6
139
+ # via mcp
140
+ starlette==0.46.2
141
+ # via
142
+ # fastapi
143
+ # gradio
144
+ # mcp
145
+ tomlkit==0.13.3
146
+ # via gradio
147
+ tqdm==4.67.1
148
+ # via huggingface-hub
149
+ typer==0.16.0
150
+ # via gradio
151
+ typing-extensions==4.14.0
152
+ # via
153
+ # anyio
154
+ # fastapi
155
+ # gradio
156
+ # gradio-client
157
+ # huggingface-hub
158
+ # pydantic
159
+ # pydantic-core
160
+ # typer
161
+ # typing-inspection
162
+ typing-inspection==0.4.1
163
+ # via
164
+ # pydantic
165
+ # pydantic-settings
166
+ tzdata==2025.2
167
+ # via pandas
168
+ urllib3==2.4.0
169
+ # via requests
170
+ uvicorn==0.34.3
171
+ # via
172
+ # gradio
173
+ # mcp
174
+ websockets==15.0.1
175
+ # via gradio-client