Spaces:
Running
Running
import gradio as gr | |
import requests | |
import json | |
from smolagents import CodeAgent | |
import logging | |
from tenacity import retry, stop_after_attempt, wait_exponential | |
# Configure logging | |
logging.basicConfig(level=logging.INFO) | |
logger = logging.getLogger(__name__) | |
HF_SPACE_URL = "https://manavraj-troubleshoot-mcp.hf.space" | |
def call_mcp_server(message: str, tool_type: str = "knowledge_base") -> str: | |
"""Call MCP server endpoint""" | |
try: | |
tool_endpoint_map = { | |
"knowledge_base": "predict_1", | |
"web_search": "predict_2", | |
"formatter": "predict_3" | |
} | |
endpoint = f"{HF_SPACE_URL}/gradio_api/mcp/sse" | |
response = requests.post( | |
endpoint, | |
json={"data": [message]}, | |
timeout=30, | |
headers={"Content-Type": "application/json"} | |
) | |
response.raise_for_status() | |
if response.text.startswith("event: predict"): | |
data = json.loads(response.text.split("\n")[1][5:]) | |
return data["data"][0] | |
return response.json()["data"][0] | |
except Exception as e: | |
logger.error(f"API call failed: {str(e)}") | |
return f"Error: {str(e)}" | |
agent = CodeAgent( | |
tools=[], | |
model="microsoft/DialoGPT-medium", | |
system_prompt="""{{authorized_imports}} | |
- requests for API calls | |
- standard Python libraries | |
{{managed_agents_descriptions}} | |
You are a Technical Support Assistant with these capabilities: | |
1. Troubleshooting technical issues | |
2. Finding information via web search | |
3. Formatting instructions | |
Access tools through MCP server: | |
- knowledge_base: For technical issues | |
- web_search: For information lookup | |
- formatter: To organize steps | |
Response workflow: | |
1. Analyze user request | |
2. Choose appropriate tool | |
3. Return clear response | |
Example: | |
USER: My wifi disconnected | |
THOUGHT: Should use knowledge_base | |
ACTION: knowledge_base("wifi disconnection") | |
RESPONSE: Try these steps: [solution steps] | |
Important: | |
- Always return the full response including ACTION and RESPONSE | |
- Never show internal workflow to user | |
- If no tool is needed, respond directly""" | |
) | |
def chat_interface(message: str, history: list) -> str: | |
"""Handle chat interaction""" | |
try: | |
tool_mapping = { | |
"knowledge_base": lambda x: call_mcp_server(x, "knowledge_base"), | |
"web_search": lambda x: call_mcp_server(x, "web_search"), | |
"formatter": lambda x: call_mcp_server(x, "formatter") | |
} | |
response = agent.run(message) | |
if isinstance(response, str): | |
if "ACTION:" in response and "RESPONSE:" in response: | |
final = response.split("RESPONSE:")[-1].strip() | |
return final if final else "I couldn't process that request." | |
return response | |
return str(response) | |
except Exception as e: | |
logger.error(f"Chat error: {str(e)}") | |
return f"Error processing your request: {str(e)}" | |
demo = gr.ChatInterface( | |
fn=chat_interface, | |
title="🔧 Technical Support", | |
examples=["Wifi not working", "Find Windows 11 specs", "Format: Turn off. Wait. Restart"] | |
) | |
if __name__ == "__main__": | |
demo.launch() |