Spaces:
Running
Running
Commit
Β·
aaa3e82
0
Parent(s):
Initial commit: A1D MCP Server with Gradio interface
Browse files- Complete MCP server implementation with Gradio web interface
- Support for 6 AI tools: background removal, image upscaling, video upscaling, image vectorization, image extension, and image generation
- Dual API key support: header-based (recommended) and environment variable
- Real-time SSE processing with detailed logging
- Media preview support for images and videos
- Production-ready with comprehensive error handling
- Full documentation with MCP client configuration examples
- .env.example +11 -0
- .gitignore +62 -0
- LICENSE +21 -0
- README.md +277 -0
- app.py +448 -0
- config.py +84 -0
- mcp_handler.py +114 -0
- requirements.txt +2 -0
- start_server.py +76 -0
- test_app.py +121 -0
- utils.py +308 -0
.env.example
ADDED
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# A1D MCP Server Environment Variables
|
2 |
+
# Copy this file to .env and fill in your values
|
3 |
+
|
4 |
+
# A1D API Key (Required)
|
5 |
+
# Get your API key from: https://a1d.ai/home/api
|
6 |
+
A1D_API_KEY=z4Zl7OhAM5r-DxyVFDN1A
|
7 |
+
|
8 |
+
# Optional: Gradio Configuration
|
9 |
+
# GRADIO_SERVER_NAME=0.0.0.0
|
10 |
+
# GRADIO_SERVER_PORT=7860
|
11 |
+
# GRADIO_SHARE=false
|
.gitignore
ADDED
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Python
|
2 |
+
__pycache__/
|
3 |
+
*.py[cod]
|
4 |
+
*$py.class
|
5 |
+
*.so
|
6 |
+
.Python
|
7 |
+
build/
|
8 |
+
develop-eggs/
|
9 |
+
dist/
|
10 |
+
downloads/
|
11 |
+
eggs/
|
12 |
+
.eggs/
|
13 |
+
lib/
|
14 |
+
lib64/
|
15 |
+
parts/
|
16 |
+
sdist/
|
17 |
+
var/
|
18 |
+
wheels/
|
19 |
+
*.egg-info/
|
20 |
+
.installed.cfg
|
21 |
+
*.egg
|
22 |
+
MANIFEST
|
23 |
+
|
24 |
+
# Virtual Environment
|
25 |
+
venv/
|
26 |
+
env/
|
27 |
+
ENV/
|
28 |
+
env.bak/
|
29 |
+
venv.bak/
|
30 |
+
|
31 |
+
# Environment Variables
|
32 |
+
.env
|
33 |
+
.env.local
|
34 |
+
.env.production
|
35 |
+
|
36 |
+
# IDE
|
37 |
+
.vscode/
|
38 |
+
.idea/
|
39 |
+
*.swp
|
40 |
+
*.swo
|
41 |
+
*~
|
42 |
+
|
43 |
+
# OS
|
44 |
+
.DS_Store
|
45 |
+
.DS_Store?
|
46 |
+
._*
|
47 |
+
.Spotlight-V100
|
48 |
+
.Trashes
|
49 |
+
ehthumbs.db
|
50 |
+
Thumbs.db
|
51 |
+
|
52 |
+
# Gradio
|
53 |
+
gradio_cached_examples/
|
54 |
+
flagged/
|
55 |
+
|
56 |
+
# Logs
|
57 |
+
*.log
|
58 |
+
logs/
|
59 |
+
|
60 |
+
# Temporary files
|
61 |
+
*.tmp
|
62 |
+
*.temp
|
LICENSE
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
MIT License
|
2 |
+
|
3 |
+
Copyright (c) 2025 A1D Team
|
4 |
+
|
5 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
6 |
+
of this software and associated documentation files (the "Software"), to deal
|
7 |
+
in the Software without restriction, including without limitation the rights
|
8 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
9 |
+
copies of the Software, and to permit persons to whom the Software is
|
10 |
+
furnished to do so, subject to the following conditions:
|
11 |
+
|
12 |
+
The above copyright notice and this permission notice shall be included in all
|
13 |
+
copies or substantial portions of the Software.
|
14 |
+
|
15 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
16 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
17 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
18 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
19 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
20 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
21 |
+
SOFTWARE.
|
README.md
ADDED
@@ -0,0 +1,277 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
title: A1d Mcp Server
|
3 |
+
emoji: π€
|
4 |
+
colorFrom: blue
|
5 |
+
colorTo: purple
|
6 |
+
sdk: gradio
|
7 |
+
sdk_version: 5.17.0
|
8 |
+
app_file: app.py
|
9 |
+
pinned: false
|
10 |
+
license: mit
|
11 |
+
tags:
|
12 |
+
- mcp-server-track
|
13 |
+
---
|
14 |
+
|
15 |
+
# A1D MCP Server - Universal AI Tools
|
16 |
+
|
17 |
+
A powerful MCP (Model Context Protocol) server built with Gradio that provides AI image and video processing tools for any MCP-compatible client. This server implements the same tools as the original [A1D MCP Server](https://github.com/AIGC-Hackers/mcp-server) but uses Gradio for easy deployment and hosting.
|
18 |
+
|
19 |
+
## π₯ Demo
|
20 |
+
|
21 |
+
**MCP Server in Action**: [View Demo Recording](https://huggingface.co/spaces/aigchacker/a1d-mcp-server)
|
22 |
+
|
23 |
+
*The demo shows the A1D MCP Server working with Claude Desktop and other MCP clients, featuring real-time AI image processing, seamless integration, and live preview of results.*
|
24 |
+
|
25 |
+
## π€ Available AI Tools
|
26 |
+
|
27 |
+
| Tool | Description | Use Cases |
|
28 |
+
|------|-------------|-----------|
|
29 |
+
| **remove_bg** | AI background removal | Remove backgrounds from photos, product images |
|
30 |
+
| **image_upscaler** | AI image enhancement | Upscale images 2x, 4x, 8x, 16x resolution |
|
31 |
+
| **video_upscaler** | AI video enhancement | Improve video quality and resolution |
|
32 |
+
| **image_vectorization** | Convert to vectors | Turn images into scalable SVG graphics |
|
33 |
+
| **image_extends** | Smart image extension | Expand image boundaries intelligently |
|
34 |
+
| **image_generator** | Text-to-image AI | Generate images from text descriptions |
|
35 |
+
|
36 |
+
## π Quick Setup
|
37 |
+
|
38 |
+
### 1. Get Your API Key
|
39 |
+
- Visit [A1D.ai](https://a1d.ai/home/api) to get your free API key
|
40 |
+
- Optional: [Purchase credits](https://a1d.ai/pricing) for extended usage
|
41 |
+
|
42 |
+
### 2. Set Environment Variable
|
43 |
+
```bash
|
44 |
+
export A1D_API_KEY=your_api_key_here
|
45 |
+
```
|
46 |
+
|
47 |
+
### 3. Install Dependencies
|
48 |
+
```bash
|
49 |
+
pip install -r requirements.txt
|
50 |
+
```
|
51 |
+
|
52 |
+
### 4. Run the Server
|
53 |
+
|
54 |
+
**Option 1: Using the startup script (Recommended)**
|
55 |
+
```bash
|
56 |
+
python start_server.py
|
57 |
+
```
|
58 |
+
|
59 |
+
**Option 2: Direct execution**
|
60 |
+
```bash
|
61 |
+
python app.py
|
62 |
+
```
|
63 |
+
|
64 |
+
**Option 3: Using virtual environment**
|
65 |
+
```bash
|
66 |
+
source venv/bin/activate # On Windows: venv\Scripts\activate
|
67 |
+
python start_server.py
|
68 |
+
```
|
69 |
+
|
70 |
+
The server will start on `http://localhost:7860` with MCP server enabled.
|
71 |
+
|
72 |
+
### 5. Test the Installation
|
73 |
+
```bash
|
74 |
+
python test_app.py
|
75 |
+
```
|
76 |
+
|
77 |
+
This will run a comprehensive test suite to verify everything is working correctly.
|
78 |
+
|
79 |
+
## π§ MCP Server Features
|
80 |
+
|
81 |
+
This Gradio app serves as both a **web interface** and a **full MCP server**, providing:
|
82 |
+
|
83 |
+
### β
**Dual Interface Support**
|
84 |
+
- **Web UI**: Interactive Gradio interface at `http://localhost:7860`
|
85 |
+
- **MCP Server**: Protocol-compliant server at `http://localhost:7860/gradio_api/mcp/sse`
|
86 |
+
|
87 |
+
### β
**Real-time Processing**
|
88 |
+
- **Async Task Handling**: Uses SSE for real-time status updates
|
89 |
+
- **Live Preview**: Immediate media preview in both web and MCP clients
|
90 |
+
- **Progress Tracking**: Detailed logs of API calls and processing status
|
91 |
+
|
92 |
+
### β
**Production Ready**
|
93 |
+
- **Error Handling**: Comprehensive error management and user feedback
|
94 |
+
- **API Integration**: Direct integration with A1D.ai services
|
95 |
+
- **Scalable**: Can handle multiple concurrent requests
|
96 |
+
|
97 |
+
## π§ MCP Client Configuration
|
98 |
+
|
99 |
+
### Method 1: With API Key in Headers (Recommended)
|
100 |
+
|
101 |
+
Add this to your Claude Desktop configuration file:
|
102 |
+
|
103 |
+
```json
|
104 |
+
{
|
105 |
+
"mcpServers": {
|
106 |
+
"a1d": {
|
107 |
+
"command": "npx",
|
108 |
+
"args": [
|
109 |
+
"mcp-remote@latest",
|
110 |
+
"http://localhost:7860/gradio_api/mcp/sse",
|
111 |
+
"--header",
|
112 |
+
"API_KEY:${MCP_API_KEY}"
|
113 |
+
],
|
114 |
+
"env": {
|
115 |
+
"MCP_API_KEY": "your_a1d_api_key_here"
|
116 |
+
}
|
117 |
+
}
|
118 |
+
}
|
119 |
+
}
|
120 |
+
```
|
121 |
+
|
122 |
+
### Method 2: With Environment Variable (Fallback)
|
123 |
+
|
124 |
+
```json
|
125 |
+
{
|
126 |
+
"mcpServers": {
|
127 |
+
"a1d-gradio": {
|
128 |
+
"command": "npx",
|
129 |
+
"args": [
|
130 |
+
"mcp-remote",
|
131 |
+
"http://localhost:7860/gradio_api/mcp/sse"
|
132 |
+
]
|
133 |
+
}
|
134 |
+
}
|
135 |
+
}
|
136 |
+
```
|
137 |
+
|
138 |
+
*Note: Requires `A1D_API_KEY` environment variable to be set.*
|
139 |
+
|
140 |
+
### For Other MCP Clients
|
141 |
+
- **Server URL:** `http://localhost:7860/gradio_api/mcp/sse`
|
142 |
+
- **Transport Type:** SSE (Server-Sent Events)
|
143 |
+
- **Protocol:** MCP 1.0 compatible
|
144 |
+
- **Authentication:** API key via `API_KEY` header or environment variable
|
145 |
+
|
146 |
+
### Configuration File Locations
|
147 |
+
|
148 |
+
**Windows:** `%APPDATA%\Claude\claude_desktop_config.json`
|
149 |
+
**macOS:** `~/Library/Application Support/Claude/claude_desktop_config.json`
|
150 |
+
**Linux:** `~/.config/Claude/claude_desktop_config.json`
|
151 |
+
|
152 |
+
## π Hosted Version
|
153 |
+
|
154 |
+
This server is also available as a hosted Hugging Face Space:
|
155 |
+
- **Space URL:** [https://huggingface.co/spaces/aigchacker/a1d-mcp-server](https://huggingface.co/spaces/aigchacker/a1d-mcp-server)
|
156 |
+
- **MCP Endpoint:** `https://aigchacker-a1d-mcp-server.hf.space/gradio_api/mcp/sse`
|
157 |
+
|
158 |
+
### π§ Environment Variables for Hugging Face Space
|
159 |
+
|
160 |
+
To deploy this on your own Hugging Face Space:
|
161 |
+
|
162 |
+
1. **Fork or duplicate this Space**
|
163 |
+
2. **Go to Settings β Variables and secrets**
|
164 |
+
3. **Add the following environment variable:**
|
165 |
+
- **Name**: `A1D_API_KEY`
|
166 |
+
- **Value**: Your A1D API key from [A1D.ai](https://a1d.ai/home/api)
|
167 |
+
- **Type**: Secret (recommended for security)
|
168 |
+
4. **Save and restart the Space**
|
169 |
+
|
170 |
+
### π MCP Client Configuration for Hosted Version
|
171 |
+
|
172 |
+
**Method 1: With API Key in Headers (Recommended)**
|
173 |
+
|
174 |
+
```json
|
175 |
+
{
|
176 |
+
"mcpServers": {
|
177 |
+
"a1d-hosted": {
|
178 |
+
"command": "npx",
|
179 |
+
"args": [
|
180 |
+
"mcp-remote@latest",
|
181 |
+
"https://aigchacker-a1d-mcp-server.hf.space/gradio_api/mcp/sse",
|
182 |
+
"--header",
|
183 |
+
"API_KEY:${MCP_API_KEY}"
|
184 |
+
],
|
185 |
+
"env": {
|
186 |
+
"MCP_API_KEY": "your_a1d_api_key_here"
|
187 |
+
}
|
188 |
+
}
|
189 |
+
}
|
190 |
+
}
|
191 |
+
```
|
192 |
+
|
193 |
+
**Method 2: Environment Variable (if Space has API key configured)**
|
194 |
+
|
195 |
+
```json
|
196 |
+
{
|
197 |
+
"mcpServers": {
|
198 |
+
"a1d-hosted": {
|
199 |
+
"command": "npx",
|
200 |
+
"args": [
|
201 |
+
"mcp-remote",
|
202 |
+
"https://aigchacker-a1d-mcp-server.hf.space/gradio_api/mcp/sse"
|
203 |
+
]
|
204 |
+
}
|
205 |
+
}
|
206 |
+
}
|
207 |
+
```
|
208 |
+
|
209 |
+
## π‘ How to Use
|
210 |
+
|
211 |
+
Once configured, simply ask your AI assistant to help with image or video tasks:
|
212 |
+
|
213 |
+
- *"Remove the background from this image: https://example.com/photo.jpg"*
|
214 |
+
- *"Upscale this image to 4x resolution: https://example.com/image.png"*
|
215 |
+
- *"Convert this photo to a vector graphic: https://example.com/logo.jpg"*
|
216 |
+
- *"Generate an image of a sunset over mountains"*
|
217 |
+
|
218 |
+
## π οΈ Development
|
219 |
+
|
220 |
+
### Local Development
|
221 |
+
```bash
|
222 |
+
git clone https://github.com/your-repo/a1d-mcp-server-hf.git
|
223 |
+
cd a1d-mcp-server-hf
|
224 |
+
pip install -r requirements.txt
|
225 |
+
export A1D_API_KEY=your_api_key_here
|
226 |
+
python app.py
|
227 |
+
```
|
228 |
+
|
229 |
+
### Project Structure
|
230 |
+
```
|
231 |
+
a1d-mcp-server-hf/
|
232 |
+
βββ app.py # Main Gradio application
|
233 |
+
βββ config.py # Configuration settings
|
234 |
+
βββ utils.py # Utility functions and API client
|
235 |
+
βββ mcp_handler.py # MCP request handler with header-based API keys
|
236 |
+
βββ start_server.py # Server startup script
|
237 |
+
βββ test_app.py # Test suite
|
238 |
+
βββ requirements.txt # Python dependencies
|
239 |
+
βββ .env.example # Environment variables example
|
240 |
+
βββ .gitignore # Git ignore file
|
241 |
+
βββ LICENSE # MIT license
|
242 |
+
βββ README.md # This file
|
243 |
+
```
|
244 |
+
|
245 |
+
## π Security
|
246 |
+
|
247 |
+
- **User-provided credentials**: This server supports multiple API key methods:
|
248 |
+
- **Header-based**: API keys passed via MCP client headers (recommended)
|
249 |
+
- **Environment variables**: Server-side API key configuration
|
250 |
+
- **No stored secrets**: All API keys are handled per-request, nothing is stored server-side
|
251 |
+
- **Multi-user support**: Each user can use their own API key via headers
|
252 |
+
- **HTTPS recommended**: Use HTTPS in production environments
|
253 |
+
|
254 |
+
## π€ Contributing
|
255 |
+
|
256 |
+
We welcome contributions! Please:
|
257 |
+
|
258 |
+
1. Fork the repository
|
259 |
+
2. Create a feature branch
|
260 |
+
3. Follow the existing code style
|
261 |
+
4. Add tests for new features
|
262 |
+
5. Submit a pull request
|
263 |
+
|
264 |
+
## π License
|
265 |
+
|
266 |
+
This project is licensed under the MIT License - see the [LICENSE](LICENSE) file for details.
|
267 |
+
|
268 |
+
## π Related Projects
|
269 |
+
|
270 |
+
- **Original MCP Server:** [AIGC-Hackers/mcp-server](https://github.com/AIGC-Hackers/mcp-server)
|
271 |
+
- **A1D API Documentation:** [A1D.ai API Docs](https://a1d.ai/api/quick-start)
|
272 |
+
- **Gradio MCP Guide:** [Building MCP Server with Gradio](https://www.gradio.app/guides/building-mcp-server-with-gradio)
|
273 |
+
|
274 |
+
---
|
275 |
+
|
276 |
+
**Built with β€οΈ by the A1D Team**
|
277 |
+
[A1D.ai](https://a1d.ai) β’ [GitHub](https://github.com/AIGC-Hackers) β’ [API Docs](https://a1d.ai/api/quick-start)
|
app.py
ADDED
@@ -0,0 +1,448 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
A1D MCP Server - Gradio Application
|
3 |
+
Universal AI Tools for image and video processing
|
4 |
+
"""
|
5 |
+
|
6 |
+
import gradio as gr
|
7 |
+
import os
|
8 |
+
from typing import Optional, Tuple, Union
|
9 |
+
from utils import A1DAPIClient, validate_url, validate_scale, prepare_request_data, format_response_with_preview
|
10 |
+
from config import GRADIO_CONFIG, TOOLS_CONFIG
|
11 |
+
from mcp_handler import get_api_key_from_headers
|
12 |
+
|
13 |
+
|
14 |
+
# Initialize API client
|
15 |
+
def get_api_client():
|
16 |
+
"""Get API client with current API key"""
|
17 |
+
# Try to get API key from multiple sources
|
18 |
+
api_key = None
|
19 |
+
|
20 |
+
# 1. Try from request headers (for MCP clients)
|
21 |
+
try:
|
22 |
+
request = gr.request()
|
23 |
+
if request and hasattr(request, 'headers'):
|
24 |
+
api_key = get_api_key_from_headers(dict(request.headers))
|
25 |
+
except:
|
26 |
+
pass
|
27 |
+
|
28 |
+
# 2. Fallback to environment variable
|
29 |
+
if not api_key:
|
30 |
+
api_key = os.getenv("A1D_API_KEY")
|
31 |
+
|
32 |
+
if not api_key:
|
33 |
+
raise ValueError(
|
34 |
+
"API key is required. Set A1D_API_KEY environment variable or provide API_KEY in request headers.")
|
35 |
+
|
36 |
+
return A1DAPIClient(api_key)
|
37 |
+
|
38 |
+
|
39 |
+
def remove_bg(image_url: str) -> Tuple[str, Optional[str]]:
|
40 |
+
"""Remove background from images using AI.
|
41 |
+
|
42 |
+
Args:
|
43 |
+
image_url: The URL of the image to remove background from
|
44 |
+
|
45 |
+
Returns:
|
46 |
+
Tuple of (result_message, media_url_for_preview)
|
47 |
+
"""
|
48 |
+
try:
|
49 |
+
if not validate_url(image_url):
|
50 |
+
return "β Error: Invalid image URL format", None
|
51 |
+
|
52 |
+
client = get_api_client()
|
53 |
+
data = prepare_request_data("remove_bg", image_url=image_url)
|
54 |
+
|
55 |
+
# Use the new method that waits for result
|
56 |
+
response = client.make_request_with_result(
|
57 |
+
TOOLS_CONFIG["remove_bg"]["api_endpoint"],
|
58 |
+
data,
|
59 |
+
timeout=120 # 2 minutes timeout
|
60 |
+
)
|
61 |
+
|
62 |
+
return format_response_with_preview(response, "remove_bg")
|
63 |
+
|
64 |
+
except Exception as e:
|
65 |
+
return f"β Error: {str(e)}", None
|
66 |
+
|
67 |
+
|
68 |
+
def image_upscaler(image_url: str, scale: int = 2) -> Tuple[str, Optional[str]]:
|
69 |
+
"""Upscale images using AI with specified scale factor.
|
70 |
+
|
71 |
+
Args:
|
72 |
+
image_url: The URL of the image to upscale
|
73 |
+
scale: Scale factor for upscaling (2, 4, 8, or 16). Default: 2
|
74 |
+
|
75 |
+
Returns:
|
76 |
+
Tuple of (result_message, media_url_for_preview)
|
77 |
+
"""
|
78 |
+
try:
|
79 |
+
if not validate_url(image_url):
|
80 |
+
return "β Error: Invalid image URL format", None
|
81 |
+
|
82 |
+
if not validate_scale(scale):
|
83 |
+
return "β Error: Scale must be 2, 4, 8, or 16", None
|
84 |
+
|
85 |
+
client = get_api_client()
|
86 |
+
data = prepare_request_data(
|
87 |
+
"image_upscaler", image_url=image_url, scale=scale)
|
88 |
+
|
89 |
+
response = client.make_request_with_result(
|
90 |
+
TOOLS_CONFIG["image_upscaler"]["api_endpoint"],
|
91 |
+
data,
|
92 |
+
timeout=120
|
93 |
+
)
|
94 |
+
|
95 |
+
return format_response_with_preview(response, "image_upscaler")
|
96 |
+
|
97 |
+
except Exception as e:
|
98 |
+
return f"β Error: {str(e)}", None
|
99 |
+
|
100 |
+
|
101 |
+
def video_upscaler(video_url: str) -> Tuple[str, Optional[str]]:
|
102 |
+
"""Upscale videos using AI.
|
103 |
+
|
104 |
+
Args:
|
105 |
+
video_url: The URL of the video to upscale
|
106 |
+
|
107 |
+
Returns:
|
108 |
+
Tuple of (result_message, media_url_for_preview)
|
109 |
+
"""
|
110 |
+
try:
|
111 |
+
if not validate_url(video_url):
|
112 |
+
return "β Error: Invalid video URL format", None
|
113 |
+
|
114 |
+
client = get_api_client()
|
115 |
+
data = prepare_request_data("video_upscaler", video_url=video_url)
|
116 |
+
|
117 |
+
response = client.make_request_with_result(
|
118 |
+
TOOLS_CONFIG["video_upscaler"]["api_endpoint"],
|
119 |
+
data,
|
120 |
+
timeout=300 # 5 minutes for video processing
|
121 |
+
)
|
122 |
+
|
123 |
+
return format_response_with_preview(response, "video_upscaler")
|
124 |
+
|
125 |
+
except Exception as e:
|
126 |
+
return f"β Error: {str(e)}", None
|
127 |
+
|
128 |
+
|
129 |
+
def image_vectorization(image_url: str) -> Tuple[str, Optional[str]]:
|
130 |
+
"""Convert images to vector format using AI.
|
131 |
+
|
132 |
+
Args:
|
133 |
+
image_url: The URL of the image to vectorize
|
134 |
+
|
135 |
+
Returns:
|
136 |
+
Tuple of (result_message, media_url_for_preview)
|
137 |
+
"""
|
138 |
+
try:
|
139 |
+
if not validate_url(image_url):
|
140 |
+
return "β Error: Invalid image URL format", None
|
141 |
+
|
142 |
+
client = get_api_client()
|
143 |
+
data = prepare_request_data("image_vectorization", image_url=image_url)
|
144 |
+
|
145 |
+
response = client.make_request_with_result(
|
146 |
+
TOOLS_CONFIG["image_vectorization"]["api_endpoint"],
|
147 |
+
data,
|
148 |
+
timeout=120
|
149 |
+
)
|
150 |
+
|
151 |
+
return format_response_with_preview(response, "image_vectorization")
|
152 |
+
|
153 |
+
except Exception as e:
|
154 |
+
return f"β Error: {str(e)}", None
|
155 |
+
|
156 |
+
|
157 |
+
def image_extends(image_url: str) -> Tuple[str, Optional[str]]:
|
158 |
+
"""Extend images using AI.
|
159 |
+
|
160 |
+
Args:
|
161 |
+
image_url: The URL of the image to extend
|
162 |
+
|
163 |
+
Returns:
|
164 |
+
Tuple of (result_message, media_url_for_preview)
|
165 |
+
"""
|
166 |
+
try:
|
167 |
+
if not validate_url(image_url):
|
168 |
+
return "β Error: Invalid image URL format", None
|
169 |
+
|
170 |
+
client = get_api_client()
|
171 |
+
data = prepare_request_data("image_extends", image_url=image_url)
|
172 |
+
|
173 |
+
response = client.make_request_with_result(
|
174 |
+
TOOLS_CONFIG["image_extends"]["api_endpoint"],
|
175 |
+
data,
|
176 |
+
timeout=120
|
177 |
+
)
|
178 |
+
|
179 |
+
return format_response_with_preview(response, "image_extends")
|
180 |
+
|
181 |
+
except Exception as e:
|
182 |
+
return f"β Error: {str(e)}", None
|
183 |
+
|
184 |
+
|
185 |
+
def image_generator(prompt: str) -> Tuple[str, Optional[str]]:
|
186 |
+
"""Generate images using AI from text prompts.
|
187 |
+
|
188 |
+
Args:
|
189 |
+
prompt: Text prompt to generate image from
|
190 |
+
|
191 |
+
Returns:
|
192 |
+
Tuple of (result_message, media_url_for_preview)
|
193 |
+
"""
|
194 |
+
try:
|
195 |
+
if not prompt or not prompt.strip():
|
196 |
+
return "β Error: Prompt is required and cannot be empty", None
|
197 |
+
|
198 |
+
client = get_api_client()
|
199 |
+
data = prepare_request_data("image_generator", prompt=prompt.strip())
|
200 |
+
|
201 |
+
response = client.make_request_with_result(
|
202 |
+
TOOLS_CONFIG["image_generator"]["api_endpoint"],
|
203 |
+
data,
|
204 |
+
timeout=120
|
205 |
+
)
|
206 |
+
|
207 |
+
return format_response_with_preview(response, "image_generator")
|
208 |
+
|
209 |
+
except Exception as e:
|
210 |
+
return f"β Error: {str(e)}", None
|
211 |
+
|
212 |
+
|
213 |
+
# Wrapper functions for Gradio interface
|
214 |
+
def remove_bg_wrapper(image_url: str):
|
215 |
+
"""Wrapper for remove_bg that returns message and media for Gradio
|
216 |
+
|
217 |
+
Args:
|
218 |
+
image_url: The URL of the image to remove background from. Must be a valid HTTP/HTTPS URL pointing to an image file.
|
219 |
+
|
220 |
+
Returns:
|
221 |
+
Tuple of (result_message, media_url_for_preview)
|
222 |
+
"""
|
223 |
+
message, media_url = remove_bg(image_url)
|
224 |
+
return message, media_url if media_url else None
|
225 |
+
|
226 |
+
|
227 |
+
def image_upscaler_wrapper(image_url: str, scale: int):
|
228 |
+
"""Wrapper for image_upscaler that returns message and media for Gradio
|
229 |
+
|
230 |
+
Args:
|
231 |
+
image_url: The URL of the image to upscale. Must be a valid HTTP/HTTPS URL pointing to an image file.
|
232 |
+
scale: Scale factor for upscaling. Choose from 2, 4, 8, or 16. Higher values produce larger images but take longer to process.
|
233 |
+
|
234 |
+
Returns:
|
235 |
+
Tuple of (result_message, media_url_for_preview)
|
236 |
+
"""
|
237 |
+
message, media_url = image_upscaler(image_url, scale)
|
238 |
+
return message, media_url if media_url else None
|
239 |
+
|
240 |
+
|
241 |
+
def video_upscaler_wrapper(video_url: str):
|
242 |
+
"""Wrapper for video_upscaler that returns message and media for Gradio
|
243 |
+
|
244 |
+
Args:
|
245 |
+
video_url: The URL of the video to upscale. Must be a valid HTTP/HTTPS URL pointing to a video file (MP4, AVI, MOV, etc.).
|
246 |
+
|
247 |
+
Returns:
|
248 |
+
Tuple of (result_message, media_url_for_preview)
|
249 |
+
"""
|
250 |
+
message, media_url = video_upscaler(video_url)
|
251 |
+
return message, media_url if media_url else None
|
252 |
+
|
253 |
+
|
254 |
+
def image_vectorization_wrapper(image_url: str):
|
255 |
+
"""Wrapper for image_vectorization that returns message and media for Gradio
|
256 |
+
|
257 |
+
Args:
|
258 |
+
image_url: The URL of the image to convert to vector format. Must be a valid HTTP/HTTPS URL pointing to an image file.
|
259 |
+
|
260 |
+
Returns:
|
261 |
+
Tuple of (result_message, media_url_for_preview)
|
262 |
+
"""
|
263 |
+
message, media_url = image_vectorization(image_url)
|
264 |
+
return message, media_url if media_url else None
|
265 |
+
|
266 |
+
|
267 |
+
def image_extends_wrapper(image_url: str):
|
268 |
+
"""Wrapper for image_extends that returns message and media for Gradio
|
269 |
+
|
270 |
+
Args:
|
271 |
+
image_url: The URL of the image to extend. Must be a valid HTTP/HTTPS URL pointing to an image file.
|
272 |
+
|
273 |
+
Returns:
|
274 |
+
Tuple of (result_message, media_url_for_preview)
|
275 |
+
"""
|
276 |
+
message, media_url = image_extends(image_url)
|
277 |
+
return message, media_url if media_url else None
|
278 |
+
|
279 |
+
|
280 |
+
def image_generator_wrapper(prompt: str):
|
281 |
+
"""Wrapper for image_generator that returns message and media for Gradio
|
282 |
+
|
283 |
+
Args:
|
284 |
+
prompt: Text description of the image to generate. Be descriptive and specific for better results. Example: "A beautiful sunset over mountains with vibrant orange and purple colors".
|
285 |
+
|
286 |
+
Returns:
|
287 |
+
Tuple of (result_message, media_url_for_preview)
|
288 |
+
"""
|
289 |
+
message, media_url = image_generator(prompt)
|
290 |
+
return message, media_url if media_url else None
|
291 |
+
|
292 |
+
|
293 |
+
# Create Gradio interfaces for each tool
|
294 |
+
def create_gradio_app():
|
295 |
+
"""Create the main Gradio application with all tools"""
|
296 |
+
|
297 |
+
# Create individual interfaces for each tool
|
298 |
+
remove_bg_interface = gr.Interface(
|
299 |
+
fn=remove_bg_wrapper,
|
300 |
+
inputs=[
|
301 |
+
gr.Textbox(
|
302 |
+
label="Image URL",
|
303 |
+
placeholder="https://example.com/image.jpg",
|
304 |
+
info="Enter the URL of the image to remove background from"
|
305 |
+
)
|
306 |
+
],
|
307 |
+
outputs=[
|
308 |
+
gr.Textbox(label="Result"),
|
309 |
+
gr.Image(label="Preview", show_label=True)
|
310 |
+
],
|
311 |
+
title="π Background Removal",
|
312 |
+
description="Remove background from images using AI"
|
313 |
+
)
|
314 |
+
|
315 |
+
image_upscaler_interface = gr.Interface(
|
316 |
+
fn=image_upscaler_wrapper,
|
317 |
+
inputs=[
|
318 |
+
gr.Textbox(
|
319 |
+
label="Image URL",
|
320 |
+
placeholder="https://example.com/image.jpg",
|
321 |
+
info="Enter the URL of the image to upscale"
|
322 |
+
),
|
323 |
+
gr.Dropdown(
|
324 |
+
choices=[2, 4, 8, 16],
|
325 |
+
value=2,
|
326 |
+
label="Scale Factor",
|
327 |
+
info="Choose the upscaling factor"
|
328 |
+
)
|
329 |
+
],
|
330 |
+
outputs=[
|
331 |
+
gr.Textbox(label="Result"),
|
332 |
+
gr.Image(label="Preview", show_label=True)
|
333 |
+
],
|
334 |
+
title="π Image Upscaler",
|
335 |
+
description="Upscale images using AI with specified scale factor"
|
336 |
+
)
|
337 |
+
|
338 |
+
video_upscaler_interface = gr.Interface(
|
339 |
+
fn=video_upscaler_wrapper,
|
340 |
+
inputs=[
|
341 |
+
gr.Textbox(
|
342 |
+
label="Video URL",
|
343 |
+
placeholder="https://example.com/video.mp4",
|
344 |
+
info="Enter the URL of the video to upscale"
|
345 |
+
)
|
346 |
+
],
|
347 |
+
outputs=[
|
348 |
+
gr.Textbox(label="Result"),
|
349 |
+
gr.Video(label="Preview", show_label=True)
|
350 |
+
],
|
351 |
+
title="π¬ Video Upscaler",
|
352 |
+
description="Upscale videos using AI"
|
353 |
+
)
|
354 |
+
|
355 |
+
image_vectorization_interface = gr.Interface(
|
356 |
+
fn=image_vectorization_wrapper,
|
357 |
+
inputs=[
|
358 |
+
gr.Textbox(
|
359 |
+
label="Image URL",
|
360 |
+
placeholder="https://example.com/image.jpg",
|
361 |
+
info="Enter the URL of the image to convert to vector format"
|
362 |
+
)
|
363 |
+
],
|
364 |
+
outputs=[
|
365 |
+
gr.Textbox(label="Result"),
|
366 |
+
gr.Image(label="Preview", show_label=True)
|
367 |
+
],
|
368 |
+
title="π Image Vectorization",
|
369 |
+
description="Convert images to vector format using AI"
|
370 |
+
)
|
371 |
+
|
372 |
+
image_extends_interface = gr.Interface(
|
373 |
+
fn=image_extends_wrapper,
|
374 |
+
inputs=[
|
375 |
+
gr.Textbox(
|
376 |
+
label="Image URL",
|
377 |
+
placeholder="https://example.com/image.jpg",
|
378 |
+
info="Enter the URL of the image to extend"
|
379 |
+
)
|
380 |
+
],
|
381 |
+
outputs=[
|
382 |
+
gr.Textbox(label="Result"),
|
383 |
+
gr.Image(label="Preview", show_label=True)
|
384 |
+
],
|
385 |
+
title="πΌοΈ Image Extension",
|
386 |
+
description="Extend images using AI"
|
387 |
+
)
|
388 |
+
|
389 |
+
image_generator_interface = gr.Interface(
|
390 |
+
fn=image_generator_wrapper,
|
391 |
+
inputs=[
|
392 |
+
gr.Textbox(
|
393 |
+
label="Text Prompt",
|
394 |
+
placeholder="A beautiful sunset over mountains",
|
395 |
+
info="Enter a text description to generate an image",
|
396 |
+
lines=3
|
397 |
+
)
|
398 |
+
],
|
399 |
+
outputs=[
|
400 |
+
gr.Textbox(label="Result"),
|
401 |
+
gr.Image(label="Preview", show_label=True)
|
402 |
+
],
|
403 |
+
title="π¨ Image Generator",
|
404 |
+
description="Generate images using AI from text prompts"
|
405 |
+
)
|
406 |
+
|
407 |
+
# Create tabbed interface
|
408 |
+
demo = gr.TabbedInterface(
|
409 |
+
[
|
410 |
+
remove_bg_interface,
|
411 |
+
image_upscaler_interface,
|
412 |
+
video_upscaler_interface,
|
413 |
+
image_vectorization_interface,
|
414 |
+
image_extends_interface,
|
415 |
+
image_generator_interface
|
416 |
+
],
|
417 |
+
[
|
418 |
+
"Background Removal",
|
419 |
+
"Image Upscaler",
|
420 |
+
"Video Upscaler",
|
421 |
+
"Image Vectorization",
|
422 |
+
"Image Extension",
|
423 |
+
"Image Generator"
|
424 |
+
],
|
425 |
+
title=GRADIO_CONFIG["title"],
|
426 |
+
theme=GRADIO_CONFIG["theme"]
|
427 |
+
)
|
428 |
+
|
429 |
+
return demo
|
430 |
+
|
431 |
+
|
432 |
+
if __name__ == "__main__":
|
433 |
+
# Check for API key
|
434 |
+
if not os.getenv("A1D_API_KEY"):
|
435 |
+
print("β Error: A1D_API_KEY environment variable is required")
|
436 |
+
print("Please set your API key: export A1D_API_KEY=your_api_key_here")
|
437 |
+
exit(1)
|
438 |
+
|
439 |
+
# Create and launch the app
|
440 |
+
demo = create_gradio_app()
|
441 |
+
|
442 |
+
# Launch with MCP server enabled
|
443 |
+
demo.launch(
|
444 |
+
mcp_server=True,
|
445 |
+
server_name=GRADIO_CONFIG["server_name"],
|
446 |
+
server_port=GRADIO_CONFIG["server_port"],
|
447 |
+
share=GRADIO_CONFIG["share"]
|
448 |
+
)
|
config.py
ADDED
@@ -0,0 +1,84 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Configuration file for A1D MCP Server
|
3 |
+
Contains API endpoints and tool configurations
|
4 |
+
"""
|
5 |
+
|
6 |
+
import os
|
7 |
+
from pathlib import Path
|
8 |
+
|
9 |
+
# Load .env file if it exists
|
10 |
+
env_file = Path(__file__).parent / '.env'
|
11 |
+
if env_file.exists():
|
12 |
+
with open(env_file, 'r') as f:
|
13 |
+
for line in f:
|
14 |
+
line = line.strip()
|
15 |
+
if line and not line.startswith('#') and '=' in line:
|
16 |
+
key, value = line.split('=', 1)
|
17 |
+
os.environ[key.strip()] = value.strip()
|
18 |
+
|
19 |
+
# A1D API Configuration
|
20 |
+
A1D_API_BASE_URL = "https://api.a1d.ai"
|
21 |
+
API_KEY = os.getenv("A1D_API_KEY", "")
|
22 |
+
|
23 |
+
# Tool configurations based on the original mcp-server
|
24 |
+
TOOLS_CONFIG = {
|
25 |
+
"remove_bg": {
|
26 |
+
"name": "remove_bg",
|
27 |
+
"description": "Remove background from images using AI",
|
28 |
+
"api_endpoint": "/api/remove-bg",
|
29 |
+
"required_params": ["image_url"],
|
30 |
+
"optional_params": [],
|
31 |
+
"param_mapping": {"image_url": "imageUrl"}
|
32 |
+
},
|
33 |
+
"image_upscaler": {
|
34 |
+
"name": "image_upscaler",
|
35 |
+
"description": "Upscale images using AI with specified scale factor",
|
36 |
+
"api_endpoint": "/api/image-upscaler",
|
37 |
+
"required_params": ["image_url"],
|
38 |
+
"optional_params": ["scale"],
|
39 |
+
"default_values": {"scale": 2},
|
40 |
+
"scale_options": [2, 4, 8, 16],
|
41 |
+
"param_mapping": {"image_url": "imageUrl"}
|
42 |
+
},
|
43 |
+
"video_upscaler": {
|
44 |
+
"name": "video_upscaler",
|
45 |
+
"description": "Upscale videos using AI",
|
46 |
+
"api_endpoint": "/api/video-upscaler",
|
47 |
+
"required_params": ["video_url"],
|
48 |
+
"optional_params": [],
|
49 |
+
"param_mapping": {"video_url": "videoUrl"}
|
50 |
+
},
|
51 |
+
"image_vectorization": {
|
52 |
+
"name": "image_vectorization",
|
53 |
+
"description": "Convert images to vector format using AI",
|
54 |
+
"api_endpoint": "/api/image-vectorization",
|
55 |
+
"required_params": ["image_url"],
|
56 |
+
"optional_params": [],
|
57 |
+
"param_mapping": {"image_url": "imageUrl"}
|
58 |
+
},
|
59 |
+
"image_extends": {
|
60 |
+
"name": "image_extends",
|
61 |
+
"description": "Extend images using AI",
|
62 |
+
"api_endpoint": "/api/image-extends",
|
63 |
+
"required_params": ["image_url"],
|
64 |
+
"optional_params": [],
|
65 |
+
"param_mapping": {"image_url": "imageUrl"}
|
66 |
+
},
|
67 |
+
"image_generator": {
|
68 |
+
"name": "image_generator",
|
69 |
+
"description": "Generate images using AI from text prompts",
|
70 |
+
"api_endpoint": "/api/image-generator",
|
71 |
+
"required_params": ["prompt"],
|
72 |
+
"optional_params": []
|
73 |
+
}
|
74 |
+
}
|
75 |
+
|
76 |
+
# Gradio Configuration
|
77 |
+
GRADIO_CONFIG = {
|
78 |
+
"title": "A1D MCP Server - Universal AI Tools",
|
79 |
+
"description": "A powerful MCP server providing AI image and video processing tools",
|
80 |
+
"theme": "default",
|
81 |
+
"share": False,
|
82 |
+
"server_name": "0.0.0.0",
|
83 |
+
"server_port": 7860
|
84 |
+
}
|
mcp_handler.py
ADDED
@@ -0,0 +1,114 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
MCP Handler for processing requests with API key from headers
|
3 |
+
"""
|
4 |
+
|
5 |
+
import json
|
6 |
+
from typing import Dict, Any, Optional
|
7 |
+
from utils import A1DAPIClient, prepare_request_data, format_response_with_preview
|
8 |
+
from config import TOOLS_CONFIG
|
9 |
+
|
10 |
+
|
11 |
+
def get_api_key_from_headers(headers: Dict[str, str]) -> Optional[str]:
|
12 |
+
"""Extract API key from request headers"""
|
13 |
+
# Try different header formats
|
14 |
+
api_key = (headers.get('API_KEY') or
|
15 |
+
headers.get('api_key') or
|
16 |
+
headers.get('Api-Key') or
|
17 |
+
headers.get('X-API-Key') or
|
18 |
+
headers.get('x-api-key'))
|
19 |
+
|
20 |
+
if api_key:
|
21 |
+
print(f"π‘ Found API key in headers: {api_key[:8]}...")
|
22 |
+
return api_key
|
23 |
+
|
24 |
+
print("β οΈ No API key found in headers")
|
25 |
+
return None
|
26 |
+
|
27 |
+
|
28 |
+
def process_mcp_request(tool_name: str, params: Dict[str, Any], headers: Dict[str, str]) -> Dict[str, Any]:
|
29 |
+
"""Process MCP request with API key from headers"""
|
30 |
+
try:
|
31 |
+
# Get API key from headers
|
32 |
+
api_key = get_api_key_from_headers(headers)
|
33 |
+
if not api_key:
|
34 |
+
return {
|
35 |
+
"error": "API key required. Please provide API_KEY in request headers.",
|
36 |
+
"code": "MISSING_API_KEY"
|
37 |
+
}
|
38 |
+
|
39 |
+
# Validate tool
|
40 |
+
if tool_name not in TOOLS_CONFIG:
|
41 |
+
return {
|
42 |
+
"error": f"Unknown tool: {tool_name}",
|
43 |
+
"code": "INVALID_TOOL"
|
44 |
+
}
|
45 |
+
|
46 |
+
print(f"π§ Processing MCP request for tool: {tool_name}")
|
47 |
+
print(f"π Parameters: {params}")
|
48 |
+
|
49 |
+
# Create API client with header API key
|
50 |
+
client = A1DAPIClient(api_key=api_key)
|
51 |
+
|
52 |
+
# Prepare request data
|
53 |
+
data = prepare_request_data(tool_name, **params)
|
54 |
+
|
55 |
+
# Make request with result
|
56 |
+
response = client.make_request_with_result(
|
57 |
+
TOOLS_CONFIG[tool_name]["api_endpoint"],
|
58 |
+
data,
|
59 |
+
timeout=120 if "video" not in tool_name else 300
|
60 |
+
)
|
61 |
+
|
62 |
+
# Format response
|
63 |
+
message, media_url = format_response_with_preview(response, tool_name)
|
64 |
+
|
65 |
+
return {
|
66 |
+
"success": True,
|
67 |
+
"message": message,
|
68 |
+
"media_url": media_url,
|
69 |
+
"raw_response": response
|
70 |
+
}
|
71 |
+
|
72 |
+
except Exception as e:
|
73 |
+
print(f"β MCP request error: {str(e)}")
|
74 |
+
return {
|
75 |
+
"error": str(e),
|
76 |
+
"code": "PROCESSING_ERROR"
|
77 |
+
}
|
78 |
+
|
79 |
+
|
80 |
+
def create_mcp_tool_functions():
|
81 |
+
"""Create MCP tool functions that can handle header-based API keys"""
|
82 |
+
|
83 |
+
def mcp_remove_bg(image_url: str, headers: Dict[str, str] = None):
|
84 |
+
"""Remove background from images using AI (MCP version)"""
|
85 |
+
return process_mcp_request("remove_bg", {"image_url": image_url}, headers or {})
|
86 |
+
|
87 |
+
def mcp_image_upscaler(image_url: str, scale: int = 2, headers: Dict[str, str] = None):
|
88 |
+
"""Upscale images using AI (MCP version)"""
|
89 |
+
return process_mcp_request("image_upscaler", {"image_url": image_url, "scale": scale}, headers or {})
|
90 |
+
|
91 |
+
def mcp_video_upscaler(video_url: str, headers: Dict[str, str] = None):
|
92 |
+
"""Upscale videos using AI (MCP version)"""
|
93 |
+
return process_mcp_request("video_upscaler", {"video_url": video_url}, headers or {})
|
94 |
+
|
95 |
+
def mcp_image_vectorization(image_url: str, headers: Dict[str, str] = None):
|
96 |
+
"""Convert images to vector format using AI (MCP version)"""
|
97 |
+
return process_mcp_request("image_vectorization", {"image_url": image_url}, headers or {})
|
98 |
+
|
99 |
+
def mcp_image_extends(image_url: str, headers: Dict[str, str] = None):
|
100 |
+
"""Extend images using AI (MCP version)"""
|
101 |
+
return process_mcp_request("image_extends", {"image_url": image_url}, headers or {})
|
102 |
+
|
103 |
+
def mcp_image_generator(prompt: str, headers: Dict[str, str] = None):
|
104 |
+
"""Generate images using AI from text prompts (MCP version)"""
|
105 |
+
return process_mcp_request("image_generator", {"prompt": prompt}, headers or {})
|
106 |
+
|
107 |
+
return {
|
108 |
+
"remove_bg": mcp_remove_bg,
|
109 |
+
"image_upscaler": mcp_image_upscaler,
|
110 |
+
"video_upscaler": mcp_video_upscaler,
|
111 |
+
"image_vectorization": mcp_image_vectorization,
|
112 |
+
"image_extends": mcp_image_extends,
|
113 |
+
"image_generator": mcp_image_generator
|
114 |
+
}
|
requirements.txt
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
gradio[mcp]>=5.0.0
|
2 |
+
requests>=2.31.0
|
start_server.py
ADDED
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python3
|
2 |
+
"""
|
3 |
+
Startup script for A1D MCP Server
|
4 |
+
"""
|
5 |
+
|
6 |
+
import os
|
7 |
+
import sys
|
8 |
+
from app import create_gradio_app
|
9 |
+
|
10 |
+
def main():
|
11 |
+
"""Start the A1D MCP Server"""
|
12 |
+
print("π Starting A1D MCP Server...")
|
13 |
+
print("=" * 50)
|
14 |
+
|
15 |
+
# Check for API key
|
16 |
+
api_key = os.getenv("A1D_API_KEY")
|
17 |
+
if not api_key:
|
18 |
+
print("β Error: A1D_API_KEY environment variable is required")
|
19 |
+
print("\nπ To set your API key:")
|
20 |
+
print(" export A1D_API_KEY=your_api_key_here")
|
21 |
+
print("\nπ Get your API key at: https://a1d.ai/home/api")
|
22 |
+
return 1
|
23 |
+
|
24 |
+
print(f"β
API key found: {api_key[:8]}...")
|
25 |
+
|
26 |
+
# Create and launch the app
|
27 |
+
try:
|
28 |
+
demo = create_gradio_app()
|
29 |
+
|
30 |
+
print("\nπ― Server Configuration:")
|
31 |
+
print(f" - Title: {demo.title}")
|
32 |
+
print(f" - MCP Server: Enabled")
|
33 |
+
print(f" - Server: http://localhost:7860")
|
34 |
+
print(f" - MCP Endpoint: http://localhost:7860/gradio_api/mcp/sse")
|
35 |
+
|
36 |
+
print("\nπ Available Tools:")
|
37 |
+
from config import TOOLS_CONFIG
|
38 |
+
for tool_name, config in TOOLS_CONFIG.items():
|
39 |
+
print(f" - {tool_name}: {config['description']}")
|
40 |
+
|
41 |
+
print("\nπ§ MCP Client Configuration:")
|
42 |
+
print("Add this to your MCP client config:")
|
43 |
+
print("""
|
44 |
+
{
|
45 |
+
"mcpServers": {
|
46 |
+
"a1d-gradio": {
|
47 |
+
"command": "npx",
|
48 |
+
"args": [
|
49 |
+
"mcp-remote",
|
50 |
+
"http://localhost:7860/gradio_api/mcp/sse"
|
51 |
+
]
|
52 |
+
}
|
53 |
+
}
|
54 |
+
}
|
55 |
+
""")
|
56 |
+
|
57 |
+
print("\nπ Starting server...")
|
58 |
+
|
59 |
+
# Launch with MCP server enabled
|
60 |
+
demo.launch(
|
61 |
+
mcp_server=True,
|
62 |
+
server_name="0.0.0.0",
|
63 |
+
server_port=7860,
|
64 |
+
share=False,
|
65 |
+
show_error=True
|
66 |
+
)
|
67 |
+
|
68 |
+
except KeyboardInterrupt:
|
69 |
+
print("\nπ Server stopped by user")
|
70 |
+
return 0
|
71 |
+
except Exception as e:
|
72 |
+
print(f"\nβ Error starting server: {e}")
|
73 |
+
return 1
|
74 |
+
|
75 |
+
if __name__ == "__main__":
|
76 |
+
sys.exit(main())
|
test_app.py
ADDED
@@ -0,0 +1,121 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python3
|
2 |
+
"""
|
3 |
+
Test script for A1D MCP Server
|
4 |
+
"""
|
5 |
+
|
6 |
+
import os
|
7 |
+
import sys
|
8 |
+
|
9 |
+
def test_imports():
|
10 |
+
"""Test if all modules can be imported"""
|
11 |
+
try:
|
12 |
+
import gradio as gr
|
13 |
+
print("β
Gradio imported successfully")
|
14 |
+
|
15 |
+
import config
|
16 |
+
print("β
Config module imported successfully")
|
17 |
+
|
18 |
+
import utils
|
19 |
+
print("β
Utils module imported successfully")
|
20 |
+
|
21 |
+
import app
|
22 |
+
print("β
App module imported successfully")
|
23 |
+
|
24 |
+
return True
|
25 |
+
except ImportError as e:
|
26 |
+
print(f"β Import error: {e}")
|
27 |
+
return False
|
28 |
+
|
29 |
+
def test_config():
|
30 |
+
"""Test configuration"""
|
31 |
+
try:
|
32 |
+
from config import TOOLS_CONFIG, GRADIO_CONFIG
|
33 |
+
|
34 |
+
print(f"β
Found {len(TOOLS_CONFIG)} tools configured:")
|
35 |
+
for tool_name in TOOLS_CONFIG.keys():
|
36 |
+
print(f" - {tool_name}")
|
37 |
+
|
38 |
+
print(f"β
Gradio config: {GRADIO_CONFIG['title']}")
|
39 |
+
return True
|
40 |
+
except Exception as e:
|
41 |
+
print(f"β Config error: {e}")
|
42 |
+
return False
|
43 |
+
|
44 |
+
def test_gradio_app():
|
45 |
+
"""Test Gradio app creation"""
|
46 |
+
try:
|
47 |
+
# Set a dummy API key for testing
|
48 |
+
os.environ['A1D_API_KEY'] = 'test_key_for_demo'
|
49 |
+
|
50 |
+
from app import create_gradio_app
|
51 |
+
demo = create_gradio_app()
|
52 |
+
|
53 |
+
print("β
Gradio app created successfully")
|
54 |
+
print(f"β
App title: {demo.title}")
|
55 |
+
|
56 |
+
return True
|
57 |
+
except Exception as e:
|
58 |
+
print(f"β Gradio app error: {e}")
|
59 |
+
return False
|
60 |
+
|
61 |
+
def test_tool_functions():
|
62 |
+
"""Test individual tool functions"""
|
63 |
+
try:
|
64 |
+
# Set a dummy API key for testing
|
65 |
+
os.environ['A1D_API_KEY'] = 'test_key_for_demo'
|
66 |
+
|
67 |
+
from app import remove_bg, image_upscaler, image_generator
|
68 |
+
|
69 |
+
# Test with invalid inputs to check validation
|
70 |
+
result = remove_bg("invalid_url")
|
71 |
+
if "Invalid image URL format" in result:
|
72 |
+
print("β
URL validation working")
|
73 |
+
|
74 |
+
result = image_upscaler("invalid_url", 3)
|
75 |
+
if "Invalid image URL format" in result:
|
76 |
+
print("β
Image upscaler validation working")
|
77 |
+
|
78 |
+
result = image_generator("")
|
79 |
+
if "Prompt is required" in result:
|
80 |
+
print("β
Prompt validation working")
|
81 |
+
|
82 |
+
return True
|
83 |
+
except Exception as e:
|
84 |
+
print(f"β Tool function error: {e}")
|
85 |
+
return False
|
86 |
+
|
87 |
+
def main():
|
88 |
+
"""Run all tests"""
|
89 |
+
print("π§ͺ Testing A1D MCP Server...")
|
90 |
+
print("=" * 50)
|
91 |
+
|
92 |
+
tests = [
|
93 |
+
("Imports", test_imports),
|
94 |
+
("Configuration", test_config),
|
95 |
+
("Gradio App", test_gradio_app),
|
96 |
+
("Tool Functions", test_tool_functions),
|
97 |
+
]
|
98 |
+
|
99 |
+
passed = 0
|
100 |
+
total = len(tests)
|
101 |
+
|
102 |
+
for test_name, test_func in tests:
|
103 |
+
print(f"\nπ Testing {test_name}...")
|
104 |
+
if test_func():
|
105 |
+
passed += 1
|
106 |
+
print(f"β
{test_name} test passed")
|
107 |
+
else:
|
108 |
+
print(f"β {test_name} test failed")
|
109 |
+
|
110 |
+
print("\n" + "=" * 50)
|
111 |
+
print(f"π― Test Results: {passed}/{total} tests passed")
|
112 |
+
|
113 |
+
if passed == total:
|
114 |
+
print("π All tests passed! The MCP server is ready to use.")
|
115 |
+
return 0
|
116 |
+
else:
|
117 |
+
print("β οΈ Some tests failed. Please check the errors above.")
|
118 |
+
return 1
|
119 |
+
|
120 |
+
if __name__ == "__main__":
|
121 |
+
sys.exit(main())
|
utils.py
ADDED
@@ -0,0 +1,308 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Utility functions for A1D MCP Server
|
3 |
+
Handles API calls and data processing
|
4 |
+
"""
|
5 |
+
|
6 |
+
import requests
|
7 |
+
import json
|
8 |
+
import os
|
9 |
+
import time
|
10 |
+
import re
|
11 |
+
from typing import Dict, Any, Optional, Tuple
|
12 |
+
from config import A1D_API_BASE_URL, API_KEY, TOOLS_CONFIG
|
13 |
+
|
14 |
+
|
15 |
+
class A1DAPIClient:
|
16 |
+
"""Client for making API calls to A1D services"""
|
17 |
+
|
18 |
+
def __init__(self, api_key: Optional[str] = None):
|
19 |
+
# Try to get API key from multiple sources
|
20 |
+
self.api_key = api_key or self._get_api_key()
|
21 |
+
self.base_url = A1D_API_BASE_URL
|
22 |
+
self.session = requests.Session()
|
23 |
+
|
24 |
+
if not self.api_key:
|
25 |
+
raise ValueError(
|
26 |
+
"API key is required. Set A1D_API_KEY environment variable, pass it directly, or provide via MCP header.")
|
27 |
+
|
28 |
+
# Set default headers
|
29 |
+
self.session.headers.update({
|
30 |
+
"Authorization": f"KEY {self.api_key}",
|
31 |
+
"Content-Type": "application/json",
|
32 |
+
"User-Agent": "A1D-MCP-Server/1.0.0"
|
33 |
+
})
|
34 |
+
|
35 |
+
def _get_api_key(self) -> Optional[str]:
|
36 |
+
"""Get API key from various sources"""
|
37 |
+
# 1. Environment variable
|
38 |
+
api_key = API_KEY
|
39 |
+
if api_key:
|
40 |
+
return api_key
|
41 |
+
|
42 |
+
# 2. Try to get from Gradio request headers (if available)
|
43 |
+
try:
|
44 |
+
import gradio as gr
|
45 |
+
request = gr.request()
|
46 |
+
if request and hasattr(request, 'headers'):
|
47 |
+
# Check for API_KEY header from MCP client
|
48 |
+
api_key = request.headers.get(
|
49 |
+
'API_KEY') or request.headers.get('api_key')
|
50 |
+
if api_key:
|
51 |
+
print(f"π‘ Using API key from MCP client header")
|
52 |
+
return api_key
|
53 |
+
except:
|
54 |
+
pass
|
55 |
+
|
56 |
+
return None
|
57 |
+
|
58 |
+
def make_request(self, endpoint: str, data: Dict[str, Any]) -> Dict[str, Any]:
|
59 |
+
"""Make API request to A1D service"""
|
60 |
+
url = f"{self.base_url}{endpoint}"
|
61 |
+
|
62 |
+
# Add source field to all requests
|
63 |
+
request_data = {**data, "source": "mcp"}
|
64 |
+
|
65 |
+
# Print detailed request information
|
66 |
+
print("\n" + "="*60)
|
67 |
+
print("π A1D API REQUEST DEBUG INFO")
|
68 |
+
print("="*60)
|
69 |
+
print(f"π‘ URL: {url}")
|
70 |
+
print(f"π§ Method: POST")
|
71 |
+
|
72 |
+
print(f"\nπ Headers:")
|
73 |
+
for key, value in self.session.headers.items():
|
74 |
+
# Mask API key for security
|
75 |
+
if key.lower() in ['api_key', 'authorization']:
|
76 |
+
masked_value = f"{value[:8]}..." if len(value) > 8 else "***"
|
77 |
+
print(f" {key}: {masked_value}")
|
78 |
+
else:
|
79 |
+
print(f" {key}: {value}")
|
80 |
+
|
81 |
+
print(f"\nπ¦ Request Body:")
|
82 |
+
print(f" {json.dumps(request_data, indent=2)}")
|
83 |
+
|
84 |
+
try:
|
85 |
+
print(f"\nβ³ Sending request...")
|
86 |
+
response = self.session.post(url, json=request_data, timeout=30)
|
87 |
+
|
88 |
+
print(f"\nπ Response Info:")
|
89 |
+
print(f" Status Code: {response.status_code}")
|
90 |
+
print(f" Status Text: {response.reason}")
|
91 |
+
|
92 |
+
print(f"\nπ Response Headers:")
|
93 |
+
for key, value in response.headers.items():
|
94 |
+
print(f" {key}: {value}")
|
95 |
+
|
96 |
+
print(f"\nπ¦ Response Body:")
|
97 |
+
try:
|
98 |
+
response_json = response.json()
|
99 |
+
print(f" {json.dumps(response_json, indent=2)}")
|
100 |
+
except:
|
101 |
+
print(f" {response.text[:500]}...")
|
102 |
+
|
103 |
+
print("="*60)
|
104 |
+
|
105 |
+
response.raise_for_status()
|
106 |
+
return response.json()
|
107 |
+
|
108 |
+
except requests.exceptions.RequestException as e:
|
109 |
+
print(f"\nβ Request failed: {str(e)}")
|
110 |
+
print("="*60)
|
111 |
+
raise Exception(f"API request failed: {str(e)}")
|
112 |
+
except json.JSONDecodeError as e:
|
113 |
+
print(f"\nβ JSON decode failed: {str(e)}")
|
114 |
+
print("="*60)
|
115 |
+
raise Exception(f"Failed to parse API response: {str(e)}")
|
116 |
+
|
117 |
+
def get_task_result(self, task_id: str, timeout: int = 60) -> Dict[str, Any]:
|
118 |
+
"""Get task result using SSE endpoint"""
|
119 |
+
url = f"{self.base_url}/api/task/{task_id}/sse"
|
120 |
+
|
121 |
+
print(f"\nπ Getting task result...")
|
122 |
+
print(f"π‘ SSE URL: {url}")
|
123 |
+
print(f"β±οΈ Timeout: {timeout}s")
|
124 |
+
|
125 |
+
headers = {
|
126 |
+
"Authorization": f"KEY {self.api_key}",
|
127 |
+
"Accept": "text/event-stream"
|
128 |
+
}
|
129 |
+
|
130 |
+
try:
|
131 |
+
response = requests.get(
|
132 |
+
url, headers=headers, stream=True, timeout=timeout)
|
133 |
+
response.raise_for_status()
|
134 |
+
|
135 |
+
print(f"π SSE Response Status: {response.status_code}")
|
136 |
+
|
137 |
+
# Parse SSE stream
|
138 |
+
for line in response.iter_lines(decode_unicode=True):
|
139 |
+
if line:
|
140 |
+
print(f"π₯ SSE Line: {line}")
|
141 |
+
|
142 |
+
# Parse SSE data
|
143 |
+
if line.startswith("data: "):
|
144 |
+
data_str = line[6:] # Remove "data: " prefix
|
145 |
+
if data_str.strip() == "[DONE]":
|
146 |
+
print("β
Task completed!")
|
147 |
+
break
|
148 |
+
|
149 |
+
try:
|
150 |
+
data = json.loads(data_str)
|
151 |
+
print(
|
152 |
+
f"π¦ Parsed data: {json.dumps(data, indent=2)}")
|
153 |
+
|
154 |
+
# Check if task is completed
|
155 |
+
status = data.get("status", "").upper()
|
156 |
+
if (status in ["COMPLETED", "FINISHED", "SUCCESS"] or
|
157 |
+
"result" in data or
|
158 |
+
"imageUrl" in data or
|
159 |
+
"videoUrl" in data or
|
160 |
+
"url" in data):
|
161 |
+
print("β
Task result received!")
|
162 |
+
return data
|
163 |
+
elif status in ["FAILED", "ERROR"]:
|
164 |
+
raise Exception(
|
165 |
+
f"Task failed: {data.get('error', 'Unknown error')}")
|
166 |
+
else:
|
167 |
+
print(
|
168 |
+
f"β³ Task status: {data.get('status', 'processing')}")
|
169 |
+
|
170 |
+
except json.JSONDecodeError:
|
171 |
+
print(f"β οΈ Could not parse JSON: {data_str}")
|
172 |
+
continue
|
173 |
+
|
174 |
+
raise Exception("Task did not complete within timeout")
|
175 |
+
|
176 |
+
except requests.exceptions.RequestException as e:
|
177 |
+
print(f"β SSE request failed: {str(e)}")
|
178 |
+
raise Exception(f"Failed to get task result: {str(e)}")
|
179 |
+
|
180 |
+
def make_request_with_result(self, endpoint: str, data: Dict[str, Any], timeout: int = 60) -> Dict[str, Any]:
|
181 |
+
"""Make API request and wait for result"""
|
182 |
+
# First, make the initial request to get task ID
|
183 |
+
response = self.make_request(endpoint, data)
|
184 |
+
|
185 |
+
if "taskId" not in response:
|
186 |
+
raise Exception("No taskId in response")
|
187 |
+
|
188 |
+
task_id = response["taskId"]
|
189 |
+
print(f"\nπ― Task ID: {task_id}")
|
190 |
+
|
191 |
+
# Then get the result
|
192 |
+
return self.get_task_result(task_id, timeout)
|
193 |
+
|
194 |
+
|
195 |
+
def validate_url(url: str) -> bool:
|
196 |
+
"""Validate if the provided string is a valid URL"""
|
197 |
+
import re
|
198 |
+
url_pattern = re.compile(
|
199 |
+
r'^https?://' # http:// or https://
|
200 |
+
# domain...
|
201 |
+
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+[A-Z]{2,6}\.?|'
|
202 |
+
r'localhost|' # localhost...
|
203 |
+
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # ...or ip
|
204 |
+
r'(?::\d+)?' # optional port
|
205 |
+
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
|
206 |
+
return url_pattern.match(url) is not None
|
207 |
+
|
208 |
+
|
209 |
+
def validate_scale(scale: int) -> bool:
|
210 |
+
"""Validate scale parameter for image upscaling"""
|
211 |
+
return scale in TOOLS_CONFIG["image_upscaler"]["scale_options"]
|
212 |
+
|
213 |
+
|
214 |
+
def prepare_request_data(tool_name: str, **kwargs) -> Dict[str, Any]:
|
215 |
+
"""Prepare request data based on tool configuration"""
|
216 |
+
if tool_name not in TOOLS_CONFIG:
|
217 |
+
raise ValueError(f"Unknown tool: {tool_name}")
|
218 |
+
|
219 |
+
config = TOOLS_CONFIG[tool_name]
|
220 |
+
data = {}
|
221 |
+
|
222 |
+
# Add required parameters
|
223 |
+
for param in config["required_params"]:
|
224 |
+
if param not in kwargs:
|
225 |
+
raise ValueError(f"Missing required parameter: {param}")
|
226 |
+
|
227 |
+
# Apply parameter mapping if exists
|
228 |
+
param_mapping = config.get("param_mapping", {})
|
229 |
+
api_param_name = param_mapping.get(param, param)
|
230 |
+
data[api_param_name] = kwargs[param]
|
231 |
+
|
232 |
+
# Add optional parameters with defaults
|
233 |
+
for param in config.get("optional_params", []):
|
234 |
+
if param in kwargs:
|
235 |
+
# Apply parameter mapping if exists
|
236 |
+
param_mapping = config.get("param_mapping", {})
|
237 |
+
api_param_name = param_mapping.get(param, param)
|
238 |
+
data[api_param_name] = kwargs[param]
|
239 |
+
elif param in config.get("default_values", {}):
|
240 |
+
# Apply parameter mapping if exists
|
241 |
+
param_mapping = config.get("param_mapping", {})
|
242 |
+
api_param_name = param_mapping.get(param, param)
|
243 |
+
data[api_param_name] = config["default_values"][param]
|
244 |
+
|
245 |
+
return data
|
246 |
+
|
247 |
+
|
248 |
+
def format_response_with_preview(response: Dict[str, Any], tool_name: str) -> Tuple[str, Optional[str]]:
|
249 |
+
"""Format API response for display with media preview
|
250 |
+
|
251 |
+
Returns:
|
252 |
+
Tuple of (message, media_url_for_preview)
|
253 |
+
"""
|
254 |
+
if "error" in response:
|
255 |
+
return f"β Error: {response['error']}", None
|
256 |
+
|
257 |
+
# Handle different response formats
|
258 |
+
result_url = None
|
259 |
+
|
260 |
+
# Check for A1D API specific fields first
|
261 |
+
result_url = (response.get("imageUrl") or
|
262 |
+
response.get("videoUrl") or
|
263 |
+
response.get("url"))
|
264 |
+
|
265 |
+
# Then check nested result fields
|
266 |
+
if not result_url and "result" in response:
|
267 |
+
result = response["result"]
|
268 |
+
if isinstance(result, dict):
|
269 |
+
# Try different possible URL fields
|
270 |
+
result_url = (result.get("imageUrl") or
|
271 |
+
result.get("videoUrl") or
|
272 |
+
result.get("url") or
|
273 |
+
result.get("image_url") or
|
274 |
+
result.get("video_url") or
|
275 |
+
result.get("output_url"))
|
276 |
+
elif isinstance(result, str) and result.startswith("http"):
|
277 |
+
result_url = result
|
278 |
+
|
279 |
+
# Also check other common fields
|
280 |
+
if not result_url:
|
281 |
+
result_url = (response.get("image_url") or
|
282 |
+
response.get("video_url") or
|
283 |
+
response.get("output_url"))
|
284 |
+
|
285 |
+
if result_url:
|
286 |
+
# Determine media type
|
287 |
+
media_type = "image"
|
288 |
+
if any(ext in result_url.lower() for ext in ['.mp4', '.avi', '.mov', '.webm']):
|
289 |
+
media_type = "video"
|
290 |
+
|
291 |
+
message = f"β
Success! {media_type.title()} generated: {result_url}"
|
292 |
+
return message, result_url
|
293 |
+
|
294 |
+
return f"β
Task completed successfully for {tool_name}", None
|
295 |
+
|
296 |
+
|
297 |
+
def format_response(response: Dict[str, Any], tool_name: str) -> str:
|
298 |
+
"""Format API response for display (backward compatibility)"""
|
299 |
+
message, _ = format_response_with_preview(response, tool_name)
|
300 |
+
return message
|
301 |
+
|
302 |
+
|
303 |
+
def get_tool_info(tool_name: str) -> Dict[str, Any]:
|
304 |
+
"""Get tool configuration information"""
|
305 |
+
if tool_name not in TOOLS_CONFIG:
|
306 |
+
raise ValueError(f"Unknown tool: {tool_name}")
|
307 |
+
|
308 |
+
return TOOLS_CONFIG[tool_name]
|