File size: 1,408 Bytes
7404322 6a73182 7404322 df2f773 7404322 6a73182 7404322 2abf61d 42f8131 7404322 6a73182 7404322 df2f773 9b51b5d 4434f84 9b51b5d 7404322 9b51b5d 6a73182 7404322 ef0c72b 7404322 6a73182 7404322 ce3290e 6a73182 7404322 6a73182 2abf61d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 |
import gradio as gr
from mcp.client.stdio import StdioServerParameters
from smolagents import InferenceClientModel, CodeAgent
from smolagents.mcp_client import MCPClient
from transformers import pipeline
# Initialize the MCP client correctly
try:
mcp_client = MCPClient(
## Try this working example on the hub:
# {"url": "https://abidlabs-mcp-tools.hf.space/gradio_api/mcp/sse"}
{"url": "https://captain-awesome-alquranchapters.hf.space/gradio_api/mcp/sse"}
)
tools = mcp_client.get_tools()
# model = InferenceClientModel()
# model = TransformersModel(
# model_id="Qwen/Qwen2.5-Coder-32B-Instruct",
# device="cuda",
# max_new_tokens=5000,
# )
model_id = "unsloth/Llama-3.2-1B"
model = AutoModelForCausalLM.from_pretrained(
model_id,
torch_dtype=torch.bfloat16,
device_map="auto"
)
agent = CodeAgent(tools=[*tools], model=model)
# Define Gradio ChatInterface
demo = gr.ChatInterface(
fn=lambda message, history: str(agent.run(message)),
type="messages",
title="Agent with MCP Tools",
description="This is a simple agent that uses MCP tools to get chapters of the Quran.",
)
demo.launch(share=True)
finally:
# Properly close the MCP client connection
# if 'mcp_client' in locals():
# mcp_client.disconnect()
mcp_client.disconnect()
|