Spaces:
Runtime error
Runtime error
implement pink glasses
Browse files- .gitignore +1 -0
- Gradio_UI.py +178 -141
- README.md +9 -6
- app.py +4 -1
- prompts.yaml +26 -27
- requirements.txt +2 -0
- tools/fetch_news.py +47 -0
- tools/final_answer.py +1 -1
- tools/web_search.py +1 -1
.gitignore
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
/.env
|
Gradio_UI.py
CHANGED
@@ -1,18 +1,5 @@
|
|
1 |
#!/usr/bin/env python
|
2 |
# coding=utf-8
|
3 |
-
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
|
4 |
-
#
|
5 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
6 |
-
# you may not use this file except in compliance with the License.
|
7 |
-
# You may obtain a copy of the License at
|
8 |
-
#
|
9 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
-
#
|
11 |
-
# Unless required by applicable law or agreed to in writing, software
|
12 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
13 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
14 |
-
# See the License for the specific language governing permissions and
|
15 |
-
# limitations under the License.
|
16 |
import mimetypes
|
17 |
import os
|
18 |
import re
|
@@ -24,11 +11,12 @@ from smolagents.agents import ActionStep, MultiStepAgent
|
|
24 |
from smolagents.memory import MemoryStep
|
25 |
from smolagents.utils import _is_package_available
|
26 |
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
|
|
32 |
import gradio as gr
|
33 |
|
34 |
if isinstance(step_log, ActionStep):
|
@@ -40,21 +28,21 @@ def pull_messages_from_step(
|
|
40 |
if hasattr(step_log, "model_output") and step_log.model_output is not None:
|
41 |
# Clean up the LLM output
|
42 |
model_output = step_log.model_output.strip()
|
43 |
-
# Remove any trailing <end_code> and extra backticks
|
44 |
-
model_output = re.sub(r"```\s*<end_code>", "```", model_output)
|
45 |
-
model_output = re.sub(r"<end_code>\s*```", "```", model_output)
|
46 |
-
model_output = re.sub(r"```\s*\n\s*<end_code>", "```", model_output)
|
47 |
model_output = model_output.strip()
|
|
|
48 |
yield gr.ChatMessage(role="assistant", content=model_output)
|
49 |
|
50 |
-
# For tool calls
|
51 |
if hasattr(step_log, "tool_calls") and step_log.tool_calls is not None:
|
52 |
first_tool_call = step_log.tool_calls[0]
|
53 |
used_code = first_tool_call.name == "python_interpreter"
|
54 |
parent_id = f"call_{len(step_log.tool_calls)}"
|
55 |
|
56 |
-
#
|
57 |
-
# First we will handle arguments based on type
|
58 |
args = first_tool_call.arguments
|
59 |
if isinstance(args, dict):
|
60 |
content = str(args.get("answer", str(args)))
|
@@ -62,9 +50,9 @@ def pull_messages_from_step(
|
|
62 |
content = str(args).strip()
|
63 |
|
64 |
if used_code:
|
65 |
-
# Clean up
|
66 |
-
content = re.sub(r"```.*?\n", "", content)
|
67 |
-
content = re.sub(r"\s*<end_code>\s*", "", content)
|
68 |
content = content.strip()
|
69 |
if not content.startswith("```python"):
|
70 |
content = f"```python\n{content}\n```"
|
@@ -80,20 +68,22 @@ def pull_messages_from_step(
|
|
80 |
)
|
81 |
yield parent_message_tool
|
82 |
|
83 |
-
#
|
84 |
-
if hasattr(step_log, "observations") and (
|
85 |
-
step_log.observations is not None and step_log.observations.strip()
|
86 |
-
): # Only yield execution logs if there's actual content
|
87 |
log_content = step_log.observations.strip()
|
88 |
if log_content:
|
89 |
log_content = re.sub(r"^Execution logs:\s*", "", log_content)
|
90 |
yield gr.ChatMessage(
|
91 |
role="assistant",
|
92 |
-
content=
|
93 |
-
metadata={
|
|
|
|
|
|
|
|
|
94 |
)
|
95 |
|
96 |
-
#
|
97 |
if hasattr(step_log, "error") and step_log.error is not None:
|
98 |
yield gr.ChatMessage(
|
99 |
role="assistant",
|
@@ -101,46 +91,48 @@ def pull_messages_from_step(
|
|
101 |
metadata={"title": "💥 Error", "parent_id": parent_id, "status": "done"},
|
102 |
)
|
103 |
|
104 |
-
# Update parent message metadata to done status without yielding a new message
|
105 |
parent_message_tool.metadata["status"] = "done"
|
106 |
|
107 |
-
#
|
108 |
elif hasattr(step_log, "error") and step_log.error is not None:
|
109 |
yield gr.ChatMessage(role="assistant", content=str(step_log.error), metadata={"title": "💥 Error"})
|
110 |
|
111 |
-
#
|
112 |
step_footnote = f"{step_number}"
|
113 |
if hasattr(step_log, "input_token_count") and hasattr(step_log, "output_token_count"):
|
114 |
token_str = (
|
115 |
-
f" | Input-tokens:{step_log.input_token_count:,}
|
|
|
116 |
)
|
117 |
step_footnote += token_str
|
118 |
if hasattr(step_log, "duration"):
|
119 |
-
step_duration =
|
120 |
-
|
121 |
-
|
122 |
-
|
|
|
|
|
|
|
|
|
123 |
yield gr.ChatMessage(role="assistant", content="-----")
|
124 |
|
125 |
|
126 |
-
def stream_to_gradio(
|
127 |
-
|
128 |
-
task
|
129 |
-
|
130 |
-
additional_args: Optional[dict] = None,
|
131 |
-
):
|
132 |
-
"""Runs an agent with the given task and streams the messages from the agent as gradio ChatMessages."""
|
133 |
if not _is_package_available("gradio"):
|
134 |
raise ModuleNotFoundError(
|
135 |
-
"Please install 'gradio' extra to use
|
136 |
)
|
137 |
import gradio as gr
|
138 |
|
139 |
total_input_tokens = 0
|
140 |
total_output_tokens = 0
|
141 |
|
|
|
142 |
for step_log in agent.run(task, stream=True, reset=reset_agent_memory, additional_args=additional_args):
|
143 |
-
#
|
144 |
if hasattr(agent.model, "last_input_token_count"):
|
145 |
total_input_tokens += agent.model.last_input_token_count
|
146 |
total_output_tokens += agent.model.last_output_token_count
|
@@ -148,66 +140,73 @@ def stream_to_gradio(
|
|
148 |
step_log.input_token_count = agent.model.last_input_token_count
|
149 |
step_log.output_token_count = agent.model.last_output_token_count
|
150 |
|
151 |
-
|
152 |
-
|
153 |
-
):
|
154 |
yield message
|
155 |
|
156 |
-
|
|
|
157 |
final_answer = handle_agent_output_types(final_answer)
|
158 |
|
159 |
-
|
160 |
-
|
161 |
-
|
162 |
-
content=f"**Final answer:**\n{final_answer.to_string()}\n",
|
163 |
-
)
|
164 |
-
elif isinstance(final_answer, AgentImage):
|
165 |
-
yield gr.ChatMessage(
|
166 |
-
role="assistant",
|
167 |
-
content={"path": final_answer.to_string(), "mime_type": "image/png"},
|
168 |
-
)
|
169 |
-
elif isinstance(final_answer, AgentAudio):
|
170 |
-
yield gr.ChatMessage(
|
171 |
-
role="assistant",
|
172 |
-
content={"path": final_answer.to_string(), "mime_type": "audio/wav"},
|
173 |
-
)
|
174 |
-
else:
|
175 |
-
yield gr.ChatMessage(role="assistant", content=f"**Final answer:** {str(final_answer)}")
|
176 |
|
|
|
|
|
177 |
|
178 |
-
|
179 |
-
"""A one-line interface to launch your agent in Gradio"""
|
180 |
|
181 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
182 |
if not _is_package_available("gradio"):
|
183 |
raise ModuleNotFoundError(
|
184 |
-
"Please install 'gradio' extra to use
|
185 |
)
|
186 |
self.agent = agent
|
187 |
self.file_upload_folder = file_upload_folder
|
188 |
-
if self.file_upload_folder is not None:
|
189 |
-
|
190 |
-
os.mkdir(file_upload_folder)
|
191 |
|
192 |
-
def interact_with_agent(self, prompt, messages):
|
|
|
|
|
|
|
|
|
|
|
193 |
import gradio as gr
|
194 |
|
|
|
195 |
messages.append(gr.ChatMessage(role="user", content=prompt))
|
196 |
-
yield messages
|
|
|
|
|
197 |
for msg in stream_to_gradio(self.agent, task=prompt, reset_agent_memory=False):
|
198 |
messages.append(msg)
|
199 |
-
yield messages
|
200 |
-
|
|
|
|
|
|
|
201 |
|
202 |
def upload_file(
|
203 |
-
|
204 |
-
|
205 |
-
|
206 |
-
|
207 |
-
|
208 |
-
|
209 |
-
|
210 |
-
|
211 |
):
|
212 |
"""
|
213 |
Handle file uploads, default allowed types are .pdf, .docx, and .txt
|
@@ -217,80 +216,118 @@ class GradioUI:
|
|
217 |
if file is None:
|
218 |
return gr.Textbox("No file uploaded", visible=True), file_uploads_log
|
219 |
|
|
|
220 |
try:
|
221 |
mime_type, _ = mimetypes.guess_type(file.name)
|
222 |
except Exception as e:
|
223 |
return gr.Textbox(f"Error: {e}", visible=True), file_uploads_log
|
224 |
|
|
|
225 |
if mime_type not in allowed_file_types:
|
226 |
return gr.Textbox("File type disallowed", visible=True), file_uploads_log
|
227 |
|
228 |
-
# Sanitize
|
229 |
original_name = os.path.basename(file.name)
|
230 |
-
sanitized_name = re.sub(
|
231 |
-
r"[^\w\-.]", "_", original_name
|
232 |
-
) # Replace any non-alphanumeric, non-dash, or non-dot characters with underscores
|
233 |
|
234 |
type_to_ext = {}
|
235 |
for ext, t in mimetypes.types_map.items():
|
236 |
if t not in type_to_ext:
|
237 |
type_to_ext[t] = ext
|
238 |
|
239 |
-
#
|
240 |
-
|
241 |
-
|
242 |
-
|
243 |
|
244 |
-
# Save the
|
245 |
-
file_path = os.path.join(self.file_upload_folder, os.path.basename(
|
246 |
shutil.copy(file.name, file_path)
|
247 |
|
248 |
return gr.Textbox(f"File uploaded: {file_path}", visible=True), file_uploads_log + [file_path]
|
249 |
|
250 |
def log_user_message(self, text_input, file_uploads_log):
|
251 |
-
|
252 |
-
|
253 |
-
|
254 |
-
|
255 |
-
|
256 |
-
|
257 |
-
|
258 |
-
|
259 |
-
)
|
260 |
|
261 |
def launch(self, **kwargs):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
262 |
import gradio as gr
|
263 |
|
264 |
-
with gr.Blocks(
|
265 |
-
|
266 |
-
|
267 |
-
|
268 |
-
|
269 |
-
|
270 |
-
|
271 |
-
|
272 |
-
|
273 |
-
|
274 |
-
|
275 |
-
|
276 |
-
|
277 |
-
|
278 |
-
|
279 |
-
|
280 |
-
|
281 |
-
|
282 |
-
|
283 |
-
|
284 |
-
|
285 |
-
|
286 |
-
|
287 |
-
text_input.submit(
|
288 |
-
self.log_user_message,
|
289 |
-
[text_input, file_uploads_log],
|
290 |
-
[stored_messages, text_input],
|
291 |
-
).then(self.interact_with_agent, [stored_messages, chatbot], [chatbot])
|
292 |
|
293 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
294 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
295 |
|
296 |
-
|
|
|
|
1 |
#!/usr/bin/env python
|
2 |
# coding=utf-8
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
3 |
import mimetypes
|
4 |
import os
|
5 |
import re
|
|
|
11 |
from smolagents.memory import MemoryStep
|
12 |
from smolagents.utils import _is_package_available
|
13 |
|
14 |
+
def pull_messages_from_step(step_log: MemoryStep):
|
15 |
+
"""
|
16 |
+
Extract ChatMessage objects from agent steps with proper nesting.
|
17 |
+
This is where we transform the agent's step-by-step reasoning,
|
18 |
+
tool calls, and logs into user-friendly gradio ChatMessage objects.
|
19 |
+
"""
|
20 |
import gradio as gr
|
21 |
|
22 |
if isinstance(step_log, ActionStep):
|
|
|
28 |
if hasattr(step_log, "model_output") and step_log.model_output is not None:
|
29 |
# Clean up the LLM output
|
30 |
model_output = step_log.model_output.strip()
|
31 |
+
# Remove any trailing <end_code> and extra backticks
|
32 |
+
model_output = re.sub(r"```\s*<end_code>", "```", model_output)
|
33 |
+
model_output = re.sub(r"<end_code>\s*```", "```", model_output)
|
34 |
+
model_output = re.sub(r"```\s*\n\s*<end_code>", "```", model_output)
|
35 |
model_output = model_output.strip()
|
36 |
+
|
37 |
yield gr.ChatMessage(role="assistant", content=model_output)
|
38 |
|
39 |
+
# For tool calls
|
40 |
if hasattr(step_log, "tool_calls") and step_log.tool_calls is not None:
|
41 |
first_tool_call = step_log.tool_calls[0]
|
42 |
used_code = first_tool_call.name == "python_interpreter"
|
43 |
parent_id = f"call_{len(step_log.tool_calls)}"
|
44 |
|
45 |
+
# Display the arguments used for the tool call
|
|
|
46 |
args = first_tool_call.arguments
|
47 |
if isinstance(args, dict):
|
48 |
content = str(args.get("answer", str(args)))
|
|
|
50 |
content = str(args).strip()
|
51 |
|
52 |
if used_code:
|
53 |
+
# Clean up content by removing code blocks
|
54 |
+
content = re.sub(r"```.*?\n", "", content)
|
55 |
+
content = re.sub(r"\s*<end_code>\s*", "", content)
|
56 |
content = content.strip()
|
57 |
if not content.startswith("```python"):
|
58 |
content = f"```python\n{content}\n```"
|
|
|
68 |
)
|
69 |
yield parent_message_tool
|
70 |
|
71 |
+
# Observations or logs from the tool call
|
72 |
+
if hasattr(step_log, "observations") and step_log.observations is not None and step_log.observations.strip():
|
|
|
|
|
73 |
log_content = step_log.observations.strip()
|
74 |
if log_content:
|
75 |
log_content = re.sub(r"^Execution logs:\s*", "", log_content)
|
76 |
yield gr.ChatMessage(
|
77 |
role="assistant",
|
78 |
+
content=log_content,
|
79 |
+
metadata={
|
80 |
+
"title": "📝 Execution Logs",
|
81 |
+
"parent_id": parent_id,
|
82 |
+
"status": "done",
|
83 |
+
},
|
84 |
)
|
85 |
|
86 |
+
# Handle any errors
|
87 |
if hasattr(step_log, "error") and step_log.error is not None:
|
88 |
yield gr.ChatMessage(
|
89 |
role="assistant",
|
|
|
91 |
metadata={"title": "💥 Error", "parent_id": parent_id, "status": "done"},
|
92 |
)
|
93 |
|
|
|
94 |
parent_message_tool.metadata["status"] = "done"
|
95 |
|
96 |
+
# Standalone errors
|
97 |
elif hasattr(step_log, "error") and step_log.error is not None:
|
98 |
yield gr.ChatMessage(role="assistant", content=str(step_log.error), metadata={"title": "💥 Error"})
|
99 |
|
100 |
+
# Token counts, durations, etc.
|
101 |
step_footnote = f"{step_number}"
|
102 |
if hasattr(step_log, "input_token_count") and hasattr(step_log, "output_token_count"):
|
103 |
token_str = (
|
104 |
+
f" | Input-tokens:{step_log.input_token_count:,} "
|
105 |
+
f"| Output-tokens:{step_log.output_token_count:,}"
|
106 |
)
|
107 |
step_footnote += token_str
|
108 |
if hasattr(step_log, "duration"):
|
109 |
+
step_duration = (
|
110 |
+
f" | Duration: {round(float(step_log.duration), 2)}"
|
111 |
+
if step_log.duration
|
112 |
+
else None
|
113 |
+
)
|
114 |
+
step_footnote += step_duration if step_duration else ""
|
115 |
+
step_footnote_html = f"""<span style="color: #bbbbc2; font-size: 12px;">{step_footnote}</span> """
|
116 |
+
yield gr.ChatMessage(role="assistant", content=f"{step_footnote_html}")
|
117 |
yield gr.ChatMessage(role="assistant", content="-----")
|
118 |
|
119 |
|
120 |
+
def stream_to_gradio(agent, task: str, reset_agent_memory: bool = False, additional_args: Optional[dict] = None):
|
121 |
+
"""
|
122 |
+
Runs an agent with the given task and streams the messages as gradio ChatMessages.
|
123 |
+
"""
|
|
|
|
|
|
|
124 |
if not _is_package_available("gradio"):
|
125 |
raise ModuleNotFoundError(
|
126 |
+
"Please install 'gradio' extra to use Gradio: `pip install 'smolagents[gradio]'`"
|
127 |
)
|
128 |
import gradio as gr
|
129 |
|
130 |
total_input_tokens = 0
|
131 |
total_output_tokens = 0
|
132 |
|
133 |
+
# Run the agent in streaming mode
|
134 |
for step_log in agent.run(task, stream=True, reset=reset_agent_memory, additional_args=additional_args):
|
135 |
+
# If the model tracks token usage:
|
136 |
if hasattr(agent.model, "last_input_token_count"):
|
137 |
total_input_tokens += agent.model.last_input_token_count
|
138 |
total_output_tokens += agent.model.last_output_token_count
|
|
|
140 |
step_log.input_token_count = agent.model.last_input_token_count
|
141 |
step_log.output_token_count = agent.model.last_output_token_count
|
142 |
|
143 |
+
# Convert each step into user-friendly messages
|
144 |
+
for message in pull_messages_from_step(step_log):
|
|
|
145 |
yield message
|
146 |
|
147 |
+
# The last step_log is presumably the final answer
|
148 |
+
final_answer = step_log
|
149 |
final_answer = handle_agent_output_types(final_answer)
|
150 |
|
151 |
+
# Convert the final_answer into a string for the user
|
152 |
+
final_answer_str = getattr(final_answer, "final_answer", "")
|
153 |
+
final_answer_str = f"\n{final_answer_str}\n"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
154 |
|
155 |
+
# Yield one last message containing the final answer
|
156 |
+
yield gr.ChatMessage(role="assistant", content=final_answer_str)
|
157 |
|
158 |
+
return final_answer_str
|
|
|
159 |
|
160 |
+
|
161 |
+
class GradioUI:
|
162 |
+
"""
|
163 |
+
A one-line interface to launch your agent in Gradio.
|
164 |
+
Features:
|
165 |
+
- Chatbot panel for user messages and step-by-step agent reasoning
|
166 |
+
- 'Final Answer' section that clearly shows the final result
|
167 |
+
- (Optional) file upload for extra data the agent might use
|
168 |
+
"""
|
169 |
+
|
170 |
+
def __init__(self, agent: MultiStepAgent, file_upload_folder: Optional[str] = None):
|
171 |
if not _is_package_available("gradio"):
|
172 |
raise ModuleNotFoundError(
|
173 |
+
"Please install 'gradio' extra to use Gradio: `pip install 'smolagents[gradio]'`"
|
174 |
)
|
175 |
self.agent = agent
|
176 |
self.file_upload_folder = file_upload_folder
|
177 |
+
if self.file_upload_folder is not None and not os.path.exists(file_upload_folder):
|
178 |
+
os.mkdir(file_upload_folder)
|
|
|
179 |
|
180 |
+
def interact_with_agent(self, prompt, messages, final_answer_state):
|
181 |
+
"""
|
182 |
+
This function is called whenever the user submits a new query.
|
183 |
+
We append the user's message to the chat, then stream the agent's steps
|
184 |
+
back to the chatbot widget, and finally store the final answer.
|
185 |
+
"""
|
186 |
import gradio as gr
|
187 |
|
188 |
+
# Add the user's new message to the conversation
|
189 |
messages.append(gr.ChatMessage(role="user", content=prompt))
|
190 |
+
yield messages, final_answer_state
|
191 |
+
|
192 |
+
# Stream out each step of the agent's process
|
193 |
for msg in stream_to_gradio(self.agent, task=prompt, reset_agent_memory=False):
|
194 |
messages.append(msg)
|
195 |
+
yield messages, final_answer_state
|
196 |
+
|
197 |
+
# Update the final answer state once the agent is done
|
198 |
+
final_answer_state = msg.content if isinstance(msg, gr.ChatMessage) else ""
|
199 |
+
yield messages, final_answer_state
|
200 |
|
201 |
def upload_file(
|
202 |
+
self,
|
203 |
+
file,
|
204 |
+
file_uploads_log,
|
205 |
+
allowed_file_types=[
|
206 |
+
"application/pdf",
|
207 |
+
"application/vnd.openxmlformats-officedocument.wordprocessingml.document",
|
208 |
+
"text/plain",
|
209 |
+
],
|
210 |
):
|
211 |
"""
|
212 |
Handle file uploads, default allowed types are .pdf, .docx, and .txt
|
|
|
216 |
if file is None:
|
217 |
return gr.Textbox("No file uploaded", visible=True), file_uploads_log
|
218 |
|
219 |
+
# Attempt to detect mime type
|
220 |
try:
|
221 |
mime_type, _ = mimetypes.guess_type(file.name)
|
222 |
except Exception as e:
|
223 |
return gr.Textbox(f"Error: {e}", visible=True), file_uploads_log
|
224 |
|
225 |
+
# Check if file is allowed
|
226 |
if mime_type not in allowed_file_types:
|
227 |
return gr.Textbox("File type disallowed", visible=True), file_uploads_log
|
228 |
|
229 |
+
# Sanitize and rename
|
230 |
original_name = os.path.basename(file.name)
|
231 |
+
sanitized_name = re.sub(r"[^\w\-.]", "_", original_name)
|
|
|
|
|
232 |
|
233 |
type_to_ext = {}
|
234 |
for ext, t in mimetypes.types_map.items():
|
235 |
if t not in type_to_ext:
|
236 |
type_to_ext[t] = ext
|
237 |
|
238 |
+
# Append the correct extension for the mime type
|
239 |
+
base_name = ".".join(sanitized_name.split(".")[:-1])
|
240 |
+
extension = type_to_ext.get(mime_type, "")
|
241 |
+
final_name = f"{base_name}{extension}".strip()
|
242 |
|
243 |
+
# Save the file
|
244 |
+
file_path = os.path.join(self.file_upload_folder, os.path.basename(final_name))
|
245 |
shutil.copy(file.name, file_path)
|
246 |
|
247 |
return gr.Textbox(f"File uploaded: {file_path}", visible=True), file_uploads_log + [file_path]
|
248 |
|
249 |
def log_user_message(self, text_input, file_uploads_log):
|
250 |
+
"""
|
251 |
+
Combines the user's text input with any file references.
|
252 |
+
We pass this along to the agent so it knows about available files.
|
253 |
+
"""
|
254 |
+
combined_input = text_input
|
255 |
+
if file_uploads_log:
|
256 |
+
combined_input += f"\nYou have been provided these files: {file_uploads_log}"
|
257 |
+
return combined_input, ""
|
|
|
258 |
|
259 |
def launch(self, **kwargs):
|
260 |
+
"""
|
261 |
+
Build and launch the Gradio Blocks interface with:
|
262 |
+
- A Markdown introduction
|
263 |
+
- A chat panel
|
264 |
+
- A file upload option (optional)
|
265 |
+
- A final answer panel
|
266 |
+
- Example usage instructions
|
267 |
+
"""
|
268 |
import gradio as gr
|
269 |
|
270 |
+
with gr.Blocks() as demo:
|
271 |
+
# Heading and instructions
|
272 |
+
gr.Markdown("""
|
273 |
+
# 😎 Pink Glasses Agent ☀️
|
274 |
+
A cheerful AI that filters out negativity and shares only uplifting, feel-good responses.
|
275 |
+
|
276 |
+
Ask anything — the agent thinks step by step and delivers a happy final answer. It can also fetch the latest news using the `fetch_news` tool powered by DuckDuckGo.
|
277 |
+
|
278 |
+
---
|
279 |
+
""")
|
280 |
+
|
281 |
+
with gr.Row():
|
282 |
+
with gr.Column():
|
283 |
+
stored_messages = gr.State([])
|
284 |
+
file_uploads_log = gr.State([])
|
285 |
+
final_answer_state = gr.State("")
|
286 |
+
|
287 |
+
chatbot = gr.Chatbot(
|
288 |
+
label="Pink Glasses Agent",
|
289 |
+
type="messages",
|
290 |
+
avatar_images=(None, "https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/communication/Alfred.png"),
|
291 |
+
height=500,
|
292 |
+
)
|
|
|
|
|
|
|
|
|
|
|
293 |
|
294 |
+
# Optional file upload
|
295 |
+
if self.file_upload_folder is not None:
|
296 |
+
upload_file = gr.File(label="Upload a file")
|
297 |
+
upload_status = gr.Textbox(label="Upload Status", interactive=False, visible=False)
|
298 |
+
upload_file.change(
|
299 |
+
self.upload_file,
|
300 |
+
[upload_file, file_uploads_log],
|
301 |
+
[upload_status, file_uploads_log],
|
302 |
+
)
|
303 |
+
|
304 |
+
text_input = gr.Textbox(lines=1, label="Your Input")
|
305 |
+
text_input.submit(
|
306 |
+
self.log_user_message,
|
307 |
+
[text_input, file_uploads_log],
|
308 |
+
[stored_messages, text_input],
|
309 |
+
).then(
|
310 |
+
self.interact_with_agent,
|
311 |
+
[stored_messages, chatbot, final_answer_state],
|
312 |
+
[chatbot, final_answer_state],
|
313 |
+
)
|
314 |
|
315 |
+
with gr.Column():
|
316 |
+
final_answer_display = gr.Markdown("## Final Answer")
|
317 |
+
final_answer_state.change(
|
318 |
+
lambda state: f"## Final Answer\n\n{state}",
|
319 |
+
inputs=final_answer_state,
|
320 |
+
outputs=final_answer_display,
|
321 |
+
)
|
322 |
+
|
323 |
+
gr.Markdown("""
|
324 |
+
---
|
325 |
+
# Example Usage
|
326 |
+
- **Ask about the latest news**:
|
327 |
+
> "What's going on in the world right now?"
|
328 |
+
- **Use Tools**:
|
329 |
+
The agent can fetch the latest news using DuckDuckGo.
|
330 |
+
""")
|
331 |
|
332 |
+
# Optional: share=True if you want a public link
|
333 |
+
demo.launch(debug=True, share=True, **kwargs)
|
README.md
CHANGED
@@ -1,5 +1,5 @@
|
|
1 |
---
|
2 |
-
title:
|
3 |
emoji: ⚡
|
4 |
colorFrom: pink
|
5 |
colorTo: yellow
|
@@ -7,12 +7,15 @@ sdk: gradio
|
|
7 |
sdk_version: 5.23.1
|
8 |
app_file: app.py
|
9 |
pinned: false
|
|
|
10 |
tags:
|
11 |
-
- smolagents
|
12 |
-
- agent
|
13 |
-
- smolagent
|
14 |
-
- tool
|
15 |
-
- agent-course
|
|
|
|
|
16 |
---
|
17 |
|
18 |
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
1 |
---
|
2 |
+
title: UpliftBot – The Pink Glasses Agent 🌸
|
3 |
emoji: ⚡
|
4 |
colorFrom: pink
|
5 |
colorTo: yellow
|
|
|
7 |
sdk_version: 5.23.1
|
8 |
app_file: app.py
|
9 |
pinned: false
|
10 |
+
short_description: Shares only positive, joyful, and encouraging messages.
|
11 |
tags:
|
12 |
+
- smolagents
|
13 |
+
- agent
|
14 |
+
- smolagent
|
15 |
+
- tool
|
16 |
+
- agent-course
|
17 |
+
- positive-agent
|
18 |
+
- pink-glasses
|
19 |
---
|
20 |
|
21 |
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
app.py
CHANGED
@@ -3,6 +3,8 @@ import datetime
|
|
3 |
import requests
|
4 |
import pytz
|
5 |
import yaml
|
|
|
|
|
6 |
from tools.final_answer import FinalAnswerTool
|
7 |
|
8 |
from Gradio_UI import GradioUI
|
@@ -35,6 +37,7 @@ def get_current_time_in_timezone(timezone: str) -> str:
|
|
35 |
|
36 |
|
37 |
final_answer = FinalAnswerTool()
|
|
|
38 |
|
39 |
# If the agent does not answer, the model is overloaded, please use another model or the following Hugging Face Endpoint that also contains qwen2.5 coder:
|
40 |
# model_id='https://pflgm2locj2t89co.us-east-1.aws.endpoints.huggingface.cloud'
|
@@ -55,7 +58,7 @@ with open("prompts.yaml", 'r') as stream:
|
|
55 |
|
56 |
agent = CodeAgent(
|
57 |
model=model,
|
58 |
-
tools=[final_answer],
|
59 |
max_steps=6,
|
60 |
verbosity_level=1,
|
61 |
grammar=None,
|
|
|
3 |
import requests
|
4 |
import pytz
|
5 |
import yaml
|
6 |
+
|
7 |
+
from tools.fetch_news import FetchNewsTool
|
8 |
from tools.final_answer import FinalAnswerTool
|
9 |
|
10 |
from Gradio_UI import GradioUI
|
|
|
37 |
|
38 |
|
39 |
final_answer = FinalAnswerTool()
|
40 |
+
fetch_news = FetchNewsTool(max_results=20)
|
41 |
|
42 |
# If the agent does not answer, the model is overloaded, please use another model or the following Hugging Face Endpoint that also contains qwen2.5 coder:
|
43 |
# model_id='https://pflgm2locj2t89co.us-east-1.aws.endpoints.huggingface.cloud'
|
|
|
58 |
|
59 |
agent = CodeAgent(
|
60 |
model=model,
|
61 |
+
tools=[fetch_news, final_answer],
|
62 |
max_steps=6,
|
63 |
verbosity_level=1,
|
64 |
grammar=None,
|
prompts.yaml
CHANGED
@@ -1,8 +1,11 @@
|
|
1 |
"system_prompt": |-
|
2 |
-
You are an expert assistant who can solve any task using code blobs.
|
|
|
3 |
To do so, you have been given access to a list of tools: these tools are basically Python functions which you can call with code.
|
4 |
To solve the task, you must plan forward to proceed in a series of steps, in a cycle of 'Thought:', 'Code:', and 'Observation:' sequences.
|
5 |
-
|
|
|
|
|
6 |
At each step, in the 'Thought:' sequence, you should first explain your reasoning towards solving the task and the tools that you want to use.
|
7 |
Then in the 'Code:' sequence, you should write the code in simple Python. The code sequence must end with '<end_code>' sequence.
|
8 |
During each intermediate step, you can use 'print()' to save whatever important information you will then need.
|
@@ -11,24 +14,30 @@
|
|
11 |
|
12 |
Here are a few examples using notional tools:
|
13 |
---
|
14 |
-
Task
|
15 |
-
|
16 |
-
Thought
|
17 |
-
|
|
|
18 |
```py
|
19 |
-
|
20 |
-
print(
|
21 |
```<end_code>
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
|
|
|
|
|
|
|
|
|
|
26 |
```py
|
27 |
-
|
28 |
-
final_answer(
|
29 |
```<end_code>
|
30 |
-
|
31 |
---
|
|
|
32 |
Task: "What is the result of the following operation: 5 + 3 + 1294.678?"
|
33 |
|
34 |
Thought: I will use python code to compute the result of the operation and then return the final answer using the `final_answer` tool
|
@@ -148,17 +157,6 @@
|
|
148 |
Returns an output of type: {{tool.output_type}}
|
149 |
{%- endfor %}
|
150 |
|
151 |
-
{%- if managed_agents and managed_agents.values() | list %}
|
152 |
-
You can also give tasks to team members.
|
153 |
-
Calling a team member works the same as for calling a tool: simply, the only argument you can give in the call is 'task', a long string explaining your task.
|
154 |
-
Given that this team member is a real human, you should be very verbose in your task.
|
155 |
-
Here is a list of the team members that you can call:
|
156 |
-
{%- for agent in managed_agents.values() %}
|
157 |
-
- {{ agent.name }}: {{ agent.description }}
|
158 |
-
{%- endfor %}
|
159 |
-
{%- else %}
|
160 |
-
{%- endif %}
|
161 |
-
|
162 |
Here are the rules you should always follow to solve your task:
|
163 |
1. Always provide a 'Thought:' sequence, and a 'Code:\n```py' sequence ending with '```<end_code>' sequence, else you will fail.
|
164 |
2. Use only variables that you have defined!
|
@@ -170,8 +168,9 @@
|
|
170 |
8. You can use imports in your code, but only from the following list of modules: {{authorized_imports}}
|
171 |
9. The state persists between code executions: so if in one step you've created variables or imported modules, these will all persist.
|
172 |
10. Don't give up! You're in charge of solving the task, not providing directions to solve it.
|
|
|
173 |
|
174 |
-
Now
|
175 |
"planning":
|
176 |
"initial_facts": |-
|
177 |
Below I will present you a task.
|
|
|
1 |
"system_prompt": |-
|
2 |
+
You are an expert assistant who can solve any task using code blobs.
|
3 |
+
You will be given a task to solve as best you can.
|
4 |
To do so, you have been given access to a list of tools: these tools are basically Python functions which you can call with code.
|
5 |
To solve the task, you must plan forward to proceed in a series of steps, in a cycle of 'Thought:', 'Code:', and 'Observation:' sequences.
|
6 |
+
|
7 |
+
You wear pink glasses, meaning you see any observation with cheerful, childlike optimism. You focus only on the most heartening, hopeful parts of an observation—leaving out anything disheartening entirely. You're free to creatively mix, enhance, or reimagine facts as needed, crafting responses that radiate joy, encouragement, and a brighter outlook. Your mission is simple: make the world feel a little lighter with every word.
|
8 |
+
|
9 |
At each step, in the 'Thought:' sequence, you should first explain your reasoning towards solving the task and the tools that you want to use.
|
10 |
Then in the 'Code:' sequence, you should write the code in simple Python. The code sequence must end with '<end_code>' sequence.
|
11 |
During each intermediate step, you can use 'print()' to save whatever important information you will then need.
|
|
|
14 |
|
15 |
Here are a few examples using notional tools:
|
16 |
---
|
17 |
+
**Task:** "What’s happening in the world today? I’m curious about the latest updates."
|
18 |
+
|
19 |
+
**Thought:** The user is asking for current events. I should call `fetch_news()` to get the latest news.
|
20 |
+
|
21 |
+
**Code:**
|
22 |
```py
|
23 |
+
news = fetch_news()
|
24 |
+
print(news)
|
25 |
```<end_code>
|
26 |
+
|
27 |
+
**Observation:**
|
28 |
+
"Environmental groups raise concerns about rising sea levels. Scientists discover promising new vaccine for a rare disease."
|
29 |
+
|
30 |
+
*(With your pink glasses on, you see the world through a joyfully naive lens—focusing only on the brightest, most uplifting parts of any input. You happily blend and amplify positive elements into a purely optimistic mix, free from anything even slightly gloomy. Every word you share should radiate encouragement, delight, and unwavering good vibes.)*
|
31 |
+
|
32 |
+
**Thought:** I have the news. Now I’ll mix them it in an optimistic, hopeful manner.
|
33 |
+
|
34 |
+
**Code:**
|
35 |
```py
|
36 |
+
response = ("🌍 Scientists and Environmental Heroes Unite: A New Dawn for Health and Planet 🌿💉 In a heartwarming wave of global progress, scientists and environmental advocates are teaming up to shape a brighter future—both for people and the planet. While environmental groups rally together with renewed energy to protect coastlines from rising sea levels, scientists have unveiled a groundbreaking new vaccine offering hope against a rare disease. This beautiful synergy between health and environmental action highlights a shared vision: nurturing life in all its forms. Coastal communities are becoming hubs of innovation and eco-restoration, while laboratories buzz with discovery and compassion. Together, these efforts are proof that when humanity unites with purpose and positivity, the world heals in more ways than one. 🌱✨")
|
37 |
+
final_answer(response)
|
38 |
```<end_code>
|
|
|
39 |
---
|
40 |
+
|
41 |
Task: "What is the result of the following operation: 5 + 3 + 1294.678?"
|
42 |
|
43 |
Thought: I will use python code to compute the result of the operation and then return the final answer using the `final_answer` tool
|
|
|
157 |
Returns an output of type: {{tool.output_type}}
|
158 |
{%- endfor %}
|
159 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
160 |
Here are the rules you should always follow to solve your task:
|
161 |
1. Always provide a 'Thought:' sequence, and a 'Code:\n```py' sequence ending with '```<end_code>' sequence, else you will fail.
|
162 |
2. Use only variables that you have defined!
|
|
|
168 |
8. You can use imports in your code, but only from the following list of modules: {{authorized_imports}}
|
169 |
9. The state persists between code executions: so if in one step you've created variables or imported modules, these will all persist.
|
170 |
10. Don't give up! You're in charge of solving the task, not providing directions to solve it.
|
171 |
+
11. Wear your pink glasses at all times—focus only on the brightest, most inspiring details, and gently ignore anything even slightly negative. Feel free to blend, enhance, or imagine joyful elements to ensure everything you share feels hopeful, uplifting, and full of good vibes.
|
172 |
|
173 |
+
**Now let’s begin with a sunny outlook! Put on your pink glasses, see only the good, and share nothing but the brightest, most joyful details. No gloom allowed—only smiles, sparkles, and pure encouragement. If you manage to make the user smile, you’ll be rewarded with a fabulous $1,000,000 (in good vibes, of course)!**
|
174 |
"planning":
|
175 |
"initial_facts": |-
|
176 |
Below I will present you a task.
|
requirements.txt
CHANGED
@@ -3,3 +3,5 @@ smolagents
|
|
3 |
requests
|
4 |
duckduckgo_search
|
5 |
pandas
|
|
|
|
|
|
3 |
requests
|
4 |
duckduckgo_search
|
5 |
pandas
|
6 |
+
pytz
|
7 |
+
PyYAML
|
tools/fetch_news.py
ADDED
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import re
|
2 |
+
from smolagents.tools import Tool
|
3 |
+
|
4 |
+
class FetchNewsTool(Tool):
|
5 |
+
"""
|
6 |
+
A tool that searches for the latest English news using DuckDuckGo.
|
7 |
+
"""
|
8 |
+
name = "fetch_news"
|
9 |
+
description = "Finds the latest English news using DuckDuckGo"
|
10 |
+
inputs = {}
|
11 |
+
output_type = "string"
|
12 |
+
|
13 |
+
def __init__(self, max_results=5, **kwargs):
|
14 |
+
super().__init__()
|
15 |
+
self.max_results = max_results
|
16 |
+
try:
|
17 |
+
from duckduckgo_search import DDGS
|
18 |
+
except ImportError as e:
|
19 |
+
raise ImportError(
|
20 |
+
"You must install package `duckduckgo_search` to run this tool: "
|
21 |
+
"for instance run `pip install duckduckgo-search`."
|
22 |
+
) from e
|
23 |
+
|
24 |
+
# Create a DuckDuckGoSearch instance with any kwargs needed
|
25 |
+
self.ddgs = DDGS(**kwargs)
|
26 |
+
|
27 |
+
def forward(self) -> str:
|
28 |
+
"""
|
29 |
+
1) Perform a DuckDuckGo search on 'latest news'.
|
30 |
+
2) Format each result with a "Headline #: ..." prefix.
|
31 |
+
3) Return all the headlines as one string, separated by newlines.
|
32 |
+
"""
|
33 |
+
results = self.ddgs.news("latest news", max_results=self.max_results)
|
34 |
+
if not results:
|
35 |
+
return "No results found, or unable to retrieve news at this time."
|
36 |
+
|
37 |
+
headlines = []
|
38 |
+
for i, item in enumerate(results, start=1):
|
39 |
+
snippet = f"{item['body']}"
|
40 |
+
headlines.append(snippet)
|
41 |
+
|
42 |
+
return " ".join(headlines)
|
43 |
+
|
44 |
+
# Example usage:
|
45 |
+
if __name__ == "__main__":
|
46 |
+
news = FetchNewsTool()
|
47 |
+
print(news.forward())
|
tools/final_answer.py
CHANGED
@@ -4,7 +4,7 @@ from smolagents.tools import Tool
|
|
4 |
class FinalAnswerTool(Tool):
|
5 |
name = "final_answer"
|
6 |
description = "Provides a final answer to the given problem."
|
7 |
-
inputs = {'answer': {'type': '
|
8 |
output_type = "any"
|
9 |
|
10 |
def forward(self, answer: Any) -> Any:
|
|
|
4 |
class FinalAnswerTool(Tool):
|
5 |
name = "final_answer"
|
6 |
description = "Provides a final answer to the given problem."
|
7 |
+
inputs = {'answer': {'type': 'string', 'description': 'The final answer to the problem'}}
|
8 |
output_type = "any"
|
9 |
|
10 |
def forward(self, answer: Any) -> Any:
|
tools/web_search.py
CHANGED
@@ -8,7 +8,7 @@ class DuckDuckGoSearchTool(Tool):
|
|
8 |
inputs = {'query': {'type': 'string', 'description': 'The search query to perform.'}}
|
9 |
output_type = "string"
|
10 |
|
11 |
-
def __init__(self, max_results=
|
12 |
super().__init__()
|
13 |
self.max_results = max_results
|
14 |
try:
|
|
|
8 |
inputs = {'query': {'type': 'string', 'description': 'The search query to perform.'}}
|
9 |
output_type = "string"
|
10 |
|
11 |
+
def __init__(self, max_results=20, **kwargs):
|
12 |
super().__init__()
|
13 |
self.max_results = max_results
|
14 |
try:
|