Updated Gradio
Browse files- Gradio_UI.py +9 -34
Gradio_UI.py
CHANGED
@@ -136,8 +136,6 @@ def stream_to_gradio(
|
|
136 |
)
|
137 |
import gradio as gr
|
138 |
|
139 |
-
total_input_tokens = 0
|
140 |
-
total_output_tokens = 0
|
141 |
final_answer_step = None
|
142 |
|
143 |
for step_log in agent.run(task, stream=True, reset=reset_agent_memory, additional_args=additional_args):
|
@@ -145,10 +143,8 @@ def stream_to_gradio(
|
|
145 |
final_answer_step = step_log
|
146 |
continue # Don't display the final answer step itself, process it at the end
|
147 |
|
148 |
-
#
|
149 |
-
if hasattr(agent.model, "last_input_token_count"):
|
150 |
-
total_input_tokens += agent.model.last_input_token_count
|
151 |
-
total_output_tokens += agent.model.last_output_token_count
|
152 |
if isinstance(step_log, ActionStep):
|
153 |
step_log.input_token_count = agent.model.last_input_token_count
|
154 |
step_log.output_token_count = agent.model.last_output_token_count
|
@@ -158,41 +154,20 @@ def stream_to_gradio(
|
|
158 |
):
|
159 |
yield message
|
160 |
|
161 |
-
#
|
162 |
if final_answer_step:
|
163 |
# Extract the actual value from the FinalAnswerStep object
|
164 |
final_answer_value = getattr(final_answer_step, 'final_answer', final_answer_step)
|
165 |
|
166 |
-
#
|
167 |
-
if isinstance(final_answer_value, str) and final_answer_value.endswith(('.png', '.jpg', '.jpeg', '.gif'
|
168 |
-
# It's an image file path, create an AgentImage object
|
169 |
-
from smolagents.agent_types import AgentImage
|
170 |
-
processed_answer = AgentImage(value=final_answer_value)
|
171 |
-
else:
|
172 |
-
# Try the default conversion
|
173 |
-
processed_answer = handle_agent_output_types(final_answer_value)
|
174 |
-
|
175 |
-
if isinstance(processed_answer, AgentText):
|
176 |
-
yield gr.ChatMessage(
|
177 |
-
role="assistant",
|
178 |
-
content=f"**Final answer:**\n{processed_answer.to_string()}\n",
|
179 |
-
)
|
180 |
-
elif isinstance(processed_answer, AgentImage):
|
181 |
-
# For images, pass the file path directly - Gradio will handle it
|
182 |
-
image_path = processed_answer.to_string()
|
183 |
-
yield gr.ChatMessage(
|
184 |
-
role="assistant",
|
185 |
-
content=f"**Final answer:**\n{image_path}", # Just pass the path as a string
|
186 |
-
)
|
187 |
-
elif isinstance(processed_answer, AgentAudio):
|
188 |
-
# Same for audio files
|
189 |
yield gr.ChatMessage(
|
190 |
role="assistant",
|
191 |
-
content=
|
192 |
)
|
193 |
else:
|
194 |
-
# Fallback for any other type
|
195 |
-
yield gr.ChatMessage(role="assistant", content=f"**Final answer
|
196 |
|
197 |
|
198 |
class GradioUI:
|
@@ -286,7 +261,7 @@ class GradioUI:
|
|
286 |
file_uploads_log = gr.State([])
|
287 |
chatbot = gr.Chatbot(
|
288 |
label="Agent",
|
289 |
-
type="messages",
|
290 |
render_markdown=True,
|
291 |
avatar_images=(
|
292 |
None,
|
|
|
136 |
)
|
137 |
import gradio as gr
|
138 |
|
|
|
|
|
139 |
final_answer_step = None
|
140 |
|
141 |
for step_log in agent.run(task, stream=True, reset=reset_agent_memory, additional_args=additional_args):
|
|
|
143 |
final_answer_step = step_log
|
144 |
continue # Don't display the final answer step itself, process it at the end
|
145 |
|
146 |
+
# FIX: Check if token counts exist before trying to use them
|
147 |
+
if hasattr(agent.model, "last_input_token_count") and agent.model.last_input_token_count is not None:
|
|
|
|
|
148 |
if isinstance(step_log, ActionStep):
|
149 |
step_log.input_token_count = agent.model.last_input_token_count
|
150 |
step_log.output_token_count = agent.model.last_output_token_count
|
|
|
154 |
):
|
155 |
yield message
|
156 |
|
157 |
+
# Process the final answer correctly after the loop
|
158 |
if final_answer_step:
|
159 |
# Extract the actual value from the FinalAnswerStep object
|
160 |
final_answer_value = getattr(final_answer_step, 'final_answer', final_answer_step)
|
161 |
|
162 |
+
# Directly check if the final answer is a path to an image file
|
163 |
+
if isinstance(final_answer_value, str) and final_answer_value.lower().endswith(('.png', '.jpg', '.jpeg', '.gif')):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
164 |
yield gr.ChatMessage(
|
165 |
role="assistant",
|
166 |
+
content=final_answer_value, # Pass the file path directly to Gradio
|
167 |
)
|
168 |
else:
|
169 |
+
# Fallback for text or any other type of answer
|
170 |
+
yield gr.ChatMessage(role="assistant", content=f"**Final answer:**\n{str(final_answer_value)}")
|
171 |
|
172 |
|
173 |
class GradioUI:
|
|
|
261 |
file_uploads_log = gr.State([])
|
262 |
chatbot = gr.Chatbot(
|
263 |
label="Agent",
|
264 |
+
type="messages",
|
265 |
render_markdown=True,
|
266 |
avatar_images=(
|
267 |
None,
|