File size: 2,648 Bytes
bc40118 7404322 bc40118 7404322 bc40118 4f225ef 7404322 bc40118 9b51b5d bc40118 9b51b5d bc40118 4434f84 9b51b5d bc40118 9b51b5d bc40118 7404322 bc40118 ef0c72b 7404322 6a73182 7404322 ce3290e 6a73182 bc40118 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 |
# import gradio as gr
# from mcp.client.stdio import StdioServerParameters
# from smolagents import InferenceClientModel, CodeAgent
# from smolagents.mcp_client import MCPClient
# from transformers import pipeline
# from transformers import AutoModelForCausalLM, AutoTokenizer
# import torch
# # Initialize the MCP client correctly
# try:
# mcp_client = MCPClient(
# ## Try this working example on the hub:
# # {"url": "https://abidlabs-mcp-tools.hf.space/gradio_api/mcp/sse"}
# {"url": "https://captain-awesome-alquranchapters.hf.space/gradio_api/mcp/sse"}
# )
# tools = mcp_client.get_tools()
# # model = InferenceClientModel()
# # model = TransformersModel(
# # model_id="Qwen/Qwen2.5-Coder-32B-Instruct",
# # device="cuda",
# # max_new_tokens=5000,
# # )
# model_id = "unsloth/Llama-3.2-1B"
# model = AutoModelForCausalLM.from_pretrained(
# model_id,
# torch_dtype=torch.bfloat16,
# device_map="auto"
# )
# agent = CodeAgent(tools=[*tools], model=model)
# # Define Gradio ChatInterface
# demo = gr.ChatInterface(
# fn=lambda message, history: str(agent.run(message)),
# type="messages",
# title="Agent with MCP Tools",
# description="This is a simple agent that uses MCP tools to get chapters of the Quran.",
# )
# demo.launch(share=True)
# finally:
# # Properly close the MCP client connection
# # if 'mcp_client' in locals():
# # mcp_client.disconnect()
# mcp_client.disconnect()
import gradio as gr
import asyncio
from smolagents.mcp_client import MCPClient
from transformers import AutoModelForCausalLM
import torch
async def main():
mcp_client = MCPClient({"url": "https://captain-awesome-alquranchapters.hf.space/gradio_api/mcp/sse"})
await mcp_client.connect() # or use async context manager if supported
tools = await mcp_client.get_tools()
model_id = "unsloth/Llama-3.2-1B"
model = AutoModelForCausalLM.from_pretrained(
model_id,
torch_dtype=torch.bfloat16,
device_map="auto"
)
agent = CodeAgent(tools=tools, model=model)
async def gradio_fn(message, history):
response = await agent.run(message) # assuming run is async
return str(response)
demo = gr.ChatInterface(
fn=gradio_fn,
type="messages",
title="Agent with MCP Tools",
description="This is a simple agent that uses MCP tools to get chapters of the Quran.",
)
demo.launch(share=True)
await mcp_client.disconnect()
if __name__ == "__main__":
asyncio.run(main()) |