Abbasid commited on
Commit
4252aa5
·
verified ·
1 Parent(s): 75e30b8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +143 -63
app.py CHANGED
@@ -10,6 +10,9 @@ import contextlib
10
  import sys
11
  import traceback
12
  import os
 
 
 
13
 
14
  @tool
15
  def parse_height_from_text(
@@ -229,94 +232,171 @@ else:
229
  )
230
  print(initialization_error_message)
231
  # height_agent is already None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
232
 
233
- # --- Wrapper Function to Run Agent and Capture Output ---
234
- def run_agent_wrapper(query: str) -> Tuple[str, str]:
235
  """
236
- Runs the height_agent and captures its stdout (reasoning steps).
237
- Returns (reasoning_log, final_answer).
 
238
  """
239
- # Access the global variables
240
- global height_agent, initialization_error_message
241
-
242
  if height_agent is None:
243
- # If agent initialization failed, return the stored error message
244
- return (initialization_error_message or "Agent not initialized (unknown error).",
245
- "Agent failed to initialize. See reasoning log for details.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
246
 
247
- print(f"\n--- Running agent for query: '{query}' ---") # Log to console
248
- log_stream = io.StringIO()
249
- final_answer = "Agent execution did not complete." # Default message
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
250
 
251
- try:
252
- # Redirect stdout to capture prints from agent.run() (due to verbosity=3)
253
- with contextlib.redirect_stdout(log_stream):
254
- # Make sure to call the run method of the specific agent instance
255
- final_answer = height_agent.run(query) # Pass the raw query
256
- print("\n--- Agent execution finished successfully. ---") # Add marker to log
257
- except Exception as e:
258
- print(f"\n--- Error during agent execution wrapper: {e} ---") # Log to console
259
- # Print exception details *into the captured log*
260
- print("\n\n******** ERROR DURING EXECUTION ********\n", file=log_stream)
261
- traceback.print_exc(file=log_stream)
262
- final_answer = f"An error occurred during processing. See reasoning log. Error: {e}"
263
- finally:
264
- reasoning_log = log_stream.getvalue()
265
- log_stream.close()
266
- print("--- Finished capturing stdout. ---") # Log to console
267
 
268
- return reasoning_log, final_answer
269
  # --- Build Gradio Interface Manually with gr.Blocks ---
270
  print("--- Building Gradio Interface with gr.Blocks ---")
271
 
272
  # Make sure theme is applied correctly if desired
273
  # theme = gr.themes.Default() # Or another theme
274
  # with gr.Blocks(theme=theme, css="footer {visibility: hidden}") as demo:
275
- with gr.Blocks(css="footer {visibility: hidden}") as demo: # Hides the default footer
 
 
276
  gr.Markdown("# Height Comparison Agent")
277
  gr.Markdown("Enter your height (e.g., '180 cm', '5ft 11in') to find characters/figures of similar height.")
278
 
279
  with gr.Row():
280
  with gr.Column(scale=1):
281
- query_input = gr.Textbox(
282
- label="Your Query (including height)",
283
- placeholder="e.g., I am 175cm tall",
284
- lines=2 # Allow slightly more room for input
285
- )
286
  submit_button = gr.Button("Compare Heights", variant="primary")
287
  with gr.Column(scale=2):
288
- final_answer_output = gr.Textbox(
289
- label="Final Answer",
290
- interactive=False,
291
- lines=5
292
- )
293
 
294
  gr.Markdown("## Agent Reasoning Steps")
295
- # Use gr.Code for better formatting of logs, especially if they contain code blocks
296
- reasoning_output = gr.Code(
 
297
  label="Reasoning Log",
298
- language="markdown", # Use 'markdown' if logs might contain markdown
299
- interactive=False,
300
- lines=20
301
  )
 
302
 
303
- # Link components: When button is clicked, call wrapper, update outputs
 
 
304
  submit_button.click(
305
- fn=run_agent_wrapper, # Function to call
306
- inputs=[query_input], # Component(s) providing input
307
- outputs=[reasoning_output, final_answer_output] # Components to update
308
- # Ensure the order matches the return tuple from run_agent_wrapper: (log, answer)
309
  )
 
 
 
 
 
310
 
311
- # Add an example input
312
- gr.Examples(
313
- examples=[
314
- "I am 188cm tall",
315
- "How tall is someone who is 5 foot 8 inches?",
316
- "My height is 1.65m",
317
- ],
318
- inputs=query_input
319
- )
320
- # --- Launch Gradio ---
321
- print("--- Launching Gradio demo ---")
322
- demo.launch() # ssr=False recommended, share=True not needed for Spaces
 
10
  import sys
11
  import traceback
12
  import os
13
+ import threading # <<< ADDED
14
+ import queue # <<< ADDED
15
+ import time # <<< ADDED
16
 
17
  @tool
18
  def parse_height_from_text(
 
232
  )
233
  print(initialization_error_message)
234
  # height_agent is already None
235
+
236
+ # --- ADD THIS HELPER CLASS ---
237
+ class WritableQueue:
238
+ """A file-like object that writes messages to a queue."""
239
+ def __init__(self, q):
240
+ self.queue = q
241
+
242
+ def write(self, message):
243
+ # Only put non-empty messages on the queue
244
+ if message.strip():
245
+ self.queue.put(message)
246
+
247
+ def flush(self):
248
+ # Required for file-like objects, but does nothing here
249
+ pass
250
+ # --- END OF HELPER CLASS ---
251
+
252
+ # --- REPLACE THE EXISTING run_agent_wrapper FUNCTION WITH THIS ---
253
+
254
+ def agent_thread_func(agent, query, log_queue, result_queue):
255
+ """Function to run the agent in a separate thread and capture output."""
256
+ try:
257
+ # Create a WritableQueue instance for stdout redirection
258
+ stdout_writer = WritableQueue(log_queue)
259
+ # Redirect stdout within this thread
260
+ with contextlib.redirect_stdout(stdout_writer):
261
+ # Run the agent (prints will go to stdout_writer -> log_queue)
262
+ final_result = agent.run(query)
263
+ result_queue.put(final_result) # Put the final result in the result queue
264
+ except Exception as e:
265
+ # If an error occurs, print it to the log and put it in the result queue
266
+ tb_str = traceback.format_exc()
267
+ print(f"\n--- ERROR IN AGENT THREAD ---\n{e}\n{tb_str}")
268
+ result_queue.put(e) # Put the exception object itself
269
+ finally:
270
+ # Signal that logging is finished by putting None in the log queue
271
+ log_queue.put(None)
272
 
273
+ # Make the main Gradio function a generator
274
+ def run_agent_wrapper(query: str) -> Iterator[Tuple[List[Tuple[str, str]], str]]:
275
  """
276
+ Runs the agent in a thread, captures stdout via a queue, and yields updates
277
+ for Gradio streaming. Uses Chatbot format for reasoning.
278
+ Returns Iterator yielding: (chatbot_history, final_answer_status)
279
  """
 
 
 
280
  if height_agent is None:
281
+ error_msg = initialization_error_message or "Agent not initialized."
282
+ yield ([(None, error_msg)], "Error: Agent not initialized.")
283
+ return # Stop the generator
284
+
285
+ log_queue = queue.Queue()
286
+ result_queue = queue.Queue()
287
+ chatbot_history = [] # Start with empty history
288
+ current_log_message = "" # Accumulate lines into one message block
289
+ final_answer = "⏳ Running..." # Initial status
290
+
291
+ # Initial yield to clear previous state and show "Running"
292
+ yield (chatbot_history, final_answer)
293
+
294
+ # Start the agent thread
295
+ thread = threading.Thread(
296
+ target=agent_thread_func,
297
+ args=(height_agent, query, log_queue, result_queue)
298
+ )
299
+ thread.start()
300
 
301
+ while True:
302
+ try:
303
+ # Check for new log messages (non-blocking)
304
+ log_line = log_queue.get_nowait()
305
+
306
+ if log_line is None: # End-of-logs signal
307
+ break
308
+
309
+ # Append new line to the current log message block
310
+ current_log_message += log_line
311
+ # Update the chatbot history: Replace the last message or add a new one
312
+ # Simple approach: always update a single entry representing the log
313
+ if chatbot_history and chatbot_history[-1][0] is None: # Check if last entry is from "Bot" (None for user)
314
+ chatbot_history[-1] = (None, current_log_message) # Update last bot message
315
+ else:
316
+ chatbot_history.append((None, current_log_message)) # Add new bot message if history is empty or last was user
317
+
318
+ yield (chatbot_history, final_answer) # Yield updated log
319
+
320
+ except queue.Empty:
321
+ # No new message, brief pause to prevent busy-waiting
322
+ # Also check if the thread is still alive; if not, break (error case)
323
+ if not thread.is_alive() and result_queue.empty():
324
+ print("Warning: Agent thread finished unexpectedly without result.")
325
+ # Attempt to retrieve any remaining logs
326
+ while not log_queue.empty():
327
+ log_line = log_queue.get_nowait()
328
+ if log_line: current_log_message += log_line
329
+ if chatbot_history and chatbot_history[-1][0] is None:
330
+ chatbot_history[-1] = (None, current_log_message + "\nError: Agent stopped unexpectedly.")
331
+ else:
332
+ chatbot_history.append((None, current_log_message + "\nError: Agent stopped unexpectedly."))
333
+ final_answer = "Error: Agent stopped unexpectedly."
334
+ yield (chatbot_history, final_answer)
335
+ return # Stop
336
+
337
+ time.sleep(0.1) # Pause briefly
338
+
339
+ # Agent thread has finished (log_queue received None)
340
+ thread.join() # Wait for the thread to fully terminate
341
+
342
+ # Get the final result or exception
343
+ final_result = result_queue.get()
344
+
345
+ if isinstance(final_result, Exception):
346
+ final_answer = f"Error during execution: {final_result}"
347
+ # Append error to the chatbot log
348
+ error_log = f"\n--- EXECUTION ERROR ---\n{final_result}"
349
+ current_log_message += error_log
350
+ if chatbot_history and chatbot_history[-1][0] is None:
351
+ chatbot_history[-1] = (None, current_log_message)
352
+ else:
353
+ chatbot_history.append((None, current_log_message))
354
+ else:
355
+ final_answer = final_result
356
 
357
+ # Final yield with the complete log and the final answer
358
+ yield (chatbot_history, final_answer)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
359
 
 
360
  # --- Build Gradio Interface Manually with gr.Blocks ---
361
  print("--- Building Gradio Interface with gr.Blocks ---")
362
 
363
  # Make sure theme is applied correctly if desired
364
  # theme = gr.themes.Default() # Or another theme
365
  # with gr.Blocks(theme=theme, css="footer {visibility: hidden}") as demo:
366
+ # --- MODIFY THE gr.Blocks SECTION ---
367
+
368
+ with gr.Blocks(css="footer {visibility: hidden}") as demo:
369
  gr.Markdown("# Height Comparison Agent")
370
  gr.Markdown("Enter your height (e.g., '180 cm', '5ft 11in') to find characters/figures of similar height.")
371
 
372
  with gr.Row():
373
  with gr.Column(scale=1):
374
+ query_input = gr.Textbox(label="Your Query (including height)", placeholder="e.g., I am 175cm tall")
 
 
 
 
375
  submit_button = gr.Button("Compare Heights", variant="primary")
376
  with gr.Column(scale=2):
377
+ # Keep the Textbox for the final answer separate
378
+ final_answer_output = gr.Textbox(label="Final Answer", interactive=False, lines=5)
 
 
 
379
 
380
  gr.Markdown("## Agent Reasoning Steps")
381
+ # --- CHANGE THIS ---
382
+ # reasoning_output = gr.Code(label="Reasoning Log", language="markdown", interactive=False, lines=20)
383
+ reasoning_output_chatbot = gr.Chatbot(
384
  label="Reasoning Log",
385
+ height=500 # Set a height to enable scrolling
 
 
386
  )
387
+ # --- END OF CHANGE ---
388
 
389
+
390
+ # --- CHANGE THIS ---
391
+ # Link components - ensure outputs match the function's yield tuple order
392
  submit_button.click(
393
+ fn=run_agent_wrapper,
394
+ inputs=query_input,
395
+ outputs=[reasoning_output_chatbot, final_answer_output] # Output to Chatbot and Textbox
 
396
  )
397
+ # --- END OF CHANGE ---
398
+
399
+ # --- Launch Gradio (no change needed here) ---
400
+ print("--- Starting Gradio Interface ---")
401
+ demo.launch()
402