largo commited on
Commit
94492e5
Β·
1 Parent(s): 59098b7

Working the deploy

Browse files
Files changed (2) hide show
  1. agent.ts β†’ agent.ts.example +0 -0
  2. app.py +124 -20
agent.ts β†’ agent.ts.example RENAMED
File without changes
app.py CHANGED
@@ -1,30 +1,134 @@
1
- import gradio as gr
 
 
 
 
 
2
  import os
 
 
 
 
 
 
 
3
 
4
- from smolagents import InferenceClientModel, CodeAgent, MCPClient
 
 
 
5
 
6
- try:
7
- mcp_client = MCPClient(
8
- {"url": "https://abidlabs-mcp-tool-http.hf.space/gradio_api/mcp/sse"}
9
  )
10
- # mcp_client = MCPClient(
11
- # {"url": " http://127.0.0.1:7860/gradio_api/mcp/sse"}
12
- #)
13
 
14
- tools = mcp_client.get_tools()
 
 
 
 
15
 
16
- model = InferenceClientModel(token=os.getenv("HF_HUB_TOKEN"))
 
 
17
 
18
- agent = CodeAgent(tools=[*tools], model=model, additional_authorized_imports=["json", "ast", "urllib", "base64"])
 
 
 
19
 
20
- demo = gr.ChatInterface(
21
- fn=lambda message, history: str(agent.run(message)),
22
- type="messages",
23
- examples=["Analyze the sentiment of the following text 'This is awesome'"],
24
- title="Agent with MCP Tools",
25
- description="This is a simple agent that uses MCP tools to answer questions.",
26
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
27
 
28
- demo.launch(share=True)
29
- finally:
30
- mcp_client.disconnect()
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ SmolAgents Authentication Fix
4
+ Resolves 401 "Invalid username or password" errors
5
+ """
6
+
7
  import os
8
+ from smolagents import CodeAgent, DuckDuckGoSearchTool, HfApiModel, InferenceClientModel
9
+
10
+ # Method 1: Explicit token (most reliable)
11
+ def create_agent_with_token():
12
+ """Create agent with explicit HF token - most reliable method"""
13
+ # Replace with your actual HuggingFace token
14
+ hf_token = "hf_xxxxxxxxxxxxxxxxxxxxxxxxxx"
15
 
16
+ model = HfApiModel(
17
+ model_id="Qwen/Qwen2.5-Coder-32B-Instruct",
18
+ token=hf_token
19
+ )
20
 
21
+ agent = CodeAgent(
22
+ tools=[DuckDuckGoSearchTool()],
23
+ model=model
24
  )
25
+ return agent
 
 
26
 
27
+ # Method 2: Environment variable (recommended for production)
28
+ def create_agent_with_env_var():
29
+ """Create agent using HF_TOKEN environment variable"""
30
+ # Set environment variable first:
31
+ # export HF_TOKEN="hf_xxxxxxxxxxxxxxxxxxxxxxxxxx"
32
 
33
+ # Verify token is set
34
+ if not os.getenv("HF_TOKEN"):
35
+ raise ValueError("HF_TOKEN environment variable not set!")
36
 
37
+ model = HfApiModel(
38
+ model_id="Qwen/Qwen2.5-Coder-32B-Instruct"
39
+ # token will be read from HF_TOKEN automatically
40
+ )
41
 
42
+ agent = CodeAgent(
43
+ tools=[DuckDuckGoSearchTool()],
44
+ model=model
 
 
 
45
  )
46
+ return agent
47
+
48
+ # Method 3: InferenceClientModel (newest approach, 2025)
49
+ def create_agent_with_inference_client():
50
+ """Create agent using newer InferenceClientModel - better error handling"""
51
+ hf_token = "hf_xxxxxxxxxxxxxxxxxxxxxxxxxx"
52
+
53
+ model = InferenceClientModel(
54
+ model_id="Qwen/Qwen2.5-Coder-32B-Instruct",
55
+ token=hf_token
56
+ )
57
+
58
+ agent = CodeAgent(
59
+ tools=[DuckDuckGoSearchTool()],
60
+ model=model
61
+ )
62
+ return agent
63
+
64
+ # Method 4: Login first (alternative approach)
65
+ def create_agent_with_login():
66
+ """Create agent after logging in to HuggingFace Hub"""
67
+ from huggingface_hub import login
68
+
69
+ # Login to HF (will prompt for token if not provided)
70
+ login(token="hf_xxxxxxxxxxxxxxxxxxxxxxxxxx")
71
+
72
+ # Now HfApiModel should work without explicit token
73
+ model = HfApiModel(
74
+ model_id="Qwen/Qwen2.5-Coder-32B-Instruct"
75
+ )
76
+
77
+ agent = CodeAgent(
78
+ tools=[DuckDuckGoSearchTool()],
79
+ model=model
80
+ )
81
+ return agent
82
+
83
+ def test_agent(agent):
84
+ """Test the agent with a simple query"""
85
+ try:
86
+ result = agent.run("What is the current time?")
87
+ print("βœ… Success! Agent is working properly.")
88
+ print(f"Result: {result}")
89
+ return True
90
+ except Exception as e:
91
+ print(f"❌ Error: {e}")
92
+ return False
93
+
94
+ if __name__ == "__main__":
95
+ print("Testing SmolAgents authentication fixes...\n")
96
+
97
+ # Try Method 1 first (most common solution)
98
+ print("Method 1: Explicit token")
99
+ try:
100
+ agent = create_agent_with_token()
101
+ if test_agent(agent):
102
+ print("βœ… Method 1 successful!\n")
103
+ else:
104
+ print("❌ Method 1 failed, trying next method...\n")
105
+ except Exception as e:
106
+ print(f"❌ Method 1 failed: {e}\n")
107
+
108
+ # Try Method 3 (newest approach)
109
+ print("Method 3: InferenceClientModel")
110
+ try:
111
+ agent = create_agent_with_inference_client()
112
+ if test_agent(agent):
113
+ print("βœ… Method 3 successful!\n")
114
+ except Exception as e:
115
+ print(f"❌ Method 3 failed: {e}\n")
116
+
117
+ # Token Requirements Checklist:
118
+ """
119
+ Your HuggingFace token must have these permissions:
120
+ βœ… "Make calls to the serverless Inference API"
121
+ βœ… "Read access to contents of all public gated repos" (for gated models)
122
+
123
+ To get your token:
124
+ 1. Go to: https://huggingface.co/settings/tokens
125
+ 2. Click "New token"
126
+ 3. Select "Write" permissions
127
+ 4. Copy the token (starts with hf_)
128
 
129
+ Common issues:
130
+ - Token is expired or revoked
131
+ - Token lacks proper permissions
132
+ - Model is gated and requires special access
133
+ - Network/firewall blocking HF API calls
134
+ """