captain-awesome's picture
Update app.py
ea857a1 verified
# import gradio as gr
# from mcp.client.stdio import StdioServerParameters
# from smolagents import InferenceClientModel, CodeAgent
# from smolagents.mcp_client import MCPClient
# from transformers import pipeline
# from transformers import AutoModelForCausalLM, AutoTokenizer
# import torch
# # Initialize the MCP client correctly
# try:
# mcp_client = MCPClient(
# ## Try this working example on the hub:
# # {"url": "https://abidlabs-mcp-tools.hf.space/gradio_api/mcp/sse"}
# {"url": "https://captain-awesome-alquranchapters.hf.space/gradio_api/mcp/sse"}
# )
# tools = mcp_client.get_tools()
# # model = InferenceClientModel()
# # model = TransformersModel(
# # model_id="Qwen/Qwen2.5-Coder-32B-Instruct",
# # device="cuda",
# # max_new_tokens=5000,
# # )
# model_id = "unsloth/Llama-3.2-1B"
# model = AutoModelForCausalLM.from_pretrained(
# model_id,
# torch_dtype=torch.bfloat16,
# device_map="auto"
# )
# agent = CodeAgent(tools=[*tools], model=model)
# # Define Gradio ChatInterface
# demo = gr.ChatInterface(
# fn=lambda message, history: str(agent.run(message)),
# type="messages",
# title="Agent with MCP Tools",
# description="This is a simple agent that uses MCP tools to get chapters of the Quran.",
# )
# demo.launch(share=True)
# finally:
# # Properly close the MCP client connection
# # if 'mcp_client' in locals():
# # mcp_client.disconnect()
# mcp_client.disconnect()
import gradio as gr
import asyncio
from smolagents.mcp_client import MCPClient
from transformers import AutoModelForCausalLM
import torch
from mcp.client.stdio import StdioServerParameters
from smolagents import InferenceClientModel, CodeAgent, ToolCollection
import os
try:
mcp_client = MCPClient(
## Try this working example on the hub:
# {"url": "https://abidlabs-mcp-tools.hf.space/gradio_api/mcp/sse"}
{"url": "https://captain-awesome-alquranchapters.hf.space/gradio_api/mcp/sse"}
)
tools = mcp_client.get_tools()
model = InferenceClientModel(token=os.getenv("HUGGINGFACE_API_TOKEN"))
agent = CodeAgent(tools=[*tools], model=model, additional_authorized_imports=["json", "ast", "urllib", "base64"])
# model_id = "unsloth/Llama-3.2-1B"
# model = AutoModelForCausalLM.from_pretrained(
# model_id,
# torch_dtype=torch.bfloat16,
# device_map="auto"
# )
# agent = CodeAgent(tools=tools, model=model)
demo = gr.ChatInterface(
fn=lambda message, history: str(agent.run(message)),
type="messages",
title="Agent with MCP Tools",
description="This is a simple agent that uses MCP tools to get chapters of the Quran.",
)
demo.launch()
# demo.launch(share=True)
finally:
mcp_client.disconnect()
# import gradio as gr
# import os
# from smolagents import InferenceClientModel, CodeAgent, MCPClient
# try:
# mcp_client = MCPClient(
# # {"url": "https://abidlabs-mcp-tool-http.hf.space/gradio_api/mcp/sse"}
# {"url":"https://captain-awesome-alquranchapters.hf.space/gradio_api/mcp/sse"}
# )
# tools = mcp_client.get_tools()
# model = InferenceClientModel(token=os.getenv("HUGGINGFACE_API_TOKEN"))
# agent = CodeAgent(tools=[*tools], model=model, additional_authorized_imports=["json", "ast", "urllib", "base64"])
# demo = gr.ChatInterface(
# fn=lambda message, history: str(agent.run(message)),
# type="messages",
# examples=["Analyze the sentiment of the following text 'This is awesome'"],
# title="Agent with MCP Tools",
# description="This is a simple agent that uses MCP tools to answer questions.",
# )
# demo.launch()
# finally:
# mcp_client.disconnect()