File size: 3,870 Bytes
bc40118
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7404322
bc40118
 
 
 
10a9d38
 
 
 
 
 
e1421e8
7404322
14fff99
695f9e9
10a9d38
 
 
695f9e9
14fff99
9b51b5d
d9ecb54
ea857a1
10a9d38
 
 
 
 
 
 
 
 
9b51b5d
7404322
14fff99
ef0c72b
7404322
10a9d38
7404322
bab0dab
10a9d38
14fff99
10a9d38
 
 
14fff99
10a9d38
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
# import gradio as gr

# from mcp.client.stdio import StdioServerParameters
# from smolagents import InferenceClientModel, CodeAgent
# from smolagents.mcp_client import MCPClient
# from transformers import pipeline
# from transformers import AutoModelForCausalLM, AutoTokenizer
# import torch

# # Initialize the MCP client correctly
# try:
#     mcp_client = MCPClient(
#         ## Try this working example on the hub:
#         # {"url": "https://abidlabs-mcp-tools.hf.space/gradio_api/mcp/sse"}
#         {"url": "https://captain-awesome-alquranchapters.hf.space/gradio_api/mcp/sse"}
#     )
    
#     tools = mcp_client.get_tools()

#     # model = InferenceClientModel()
#     # model = TransformersModel(
#     # model_id="Qwen/Qwen2.5-Coder-32B-Instruct",
#     # device="cuda",
#     # max_new_tokens=5000,
#     # )
#     model_id = "unsloth/Llama-3.2-1B"

#     model = AutoModelForCausalLM.from_pretrained(
#       model_id,
#       torch_dtype=torch.bfloat16,
#       device_map="auto"
#     )


#     agent = CodeAgent(tools=[*tools], model=model)



#     # Define Gradio ChatInterface
#     demo = gr.ChatInterface(
#         fn=lambda message, history: str(agent.run(message)),
#         type="messages",
#         title="Agent with MCP Tools",
#         description="This is a simple agent that uses MCP tools to get chapters of the Quran.",
#     )

#     demo.launch(share=True)

# finally:
#     # Properly close the MCP client connection
#     # if 'mcp_client' in locals():
#         # mcp_client.disconnect()
#     mcp_client.disconnect()




import gradio as gr
import asyncio
from smolagents.mcp_client import MCPClient
from transformers import AutoModelForCausalLM
import torch
from mcp.client.stdio import StdioServerParameters
from smolagents import InferenceClientModel, CodeAgent, ToolCollection
import os

try:
    mcp_client = MCPClient(
        ## Try this working example on the hub:
        # {"url": "https://abidlabs-mcp-tools.hf.space/gradio_api/mcp/sse"}
        {"url": "https://captain-awesome-alquranchapters.hf.space/gradio_api/mcp/sse"}
    )
    tools = mcp_client.get_tools()

    model = InferenceClientModel(token=os.getenv("HUGGINGFACE_API_TOKEN"))
    agent = CodeAgent(tools=[*tools], model=model, additional_authorized_imports=["json", "ast", "urllib", "base64"])

    # model_id = "unsloth/Llama-3.2-1B"
    # model = AutoModelForCausalLM.from_pretrained(
    #     model_id,
    #     torch_dtype=torch.bfloat16,
    #     device_map="auto"
    # )

    # agent = CodeAgent(tools=tools, model=model)

    demo = gr.ChatInterface(
        fn=lambda message, history: str(agent.run(message)),
        type="messages",
        title="Agent with MCP Tools",
        description="This is a simple agent that uses MCP tools to get chapters of the Quran.",
    )

    
    demo.launch()

    # demo.launch(share=True)

finally:
    mcp_client.disconnect()




# import gradio as gr
# import os

# from smolagents import InferenceClientModel, CodeAgent, MCPClient


# try:
#     mcp_client = MCPClient(
#         # {"url": "https://abidlabs-mcp-tool-http.hf.space/gradio_api/mcp/sse"}
#         {"url":"https://captain-awesome-alquranchapters.hf.space/gradio_api/mcp/sse"}
#     )
#     tools = mcp_client.get_tools()

#     model = InferenceClientModel(token=os.getenv("HUGGINGFACE_API_TOKEN"))
#     agent = CodeAgent(tools=[*tools], model=model, additional_authorized_imports=["json", "ast", "urllib", "base64"])

#     demo = gr.ChatInterface(
#         fn=lambda message, history: str(agent.run(message)),
#         type="messages",
#         examples=["Analyze the sentiment of the following text 'This is awesome'"],
#         title="Agent with MCP Tools",
#         description="This is a simple agent that uses MCP tools to answer questions.",
#     )

#     demo.launch()
# finally:
#     mcp_client.disconnect()