Spaces:
Sleeping
Sleeping
import gradio as gr | |
import json | |
from typing import Any, Dict, List, Tuple | |
from re_act import ( | |
get_plan_from_llm, | |
think, | |
act, | |
store_name_email_mapping, | |
extract_sender_info, | |
client, | |
) | |
from logger import logger # Assumes logger is configured | |
from schemas import PlanStep | |
# Maintain persistent session results | |
session_results: Dict[str, Any] = {} | |
def respond( | |
message: str, | |
history: List[Tuple[str, str]], | |
system_message: str, | |
max_tokens: int, | |
temperature: float | |
) -> str: | |
logger.info("Gradio agent received message: %s", message) | |
full_response = "" | |
try: | |
# Step 1: Generate plan | |
plan = get_plan_from_llm(message) | |
logger.debug("Generated plan: %s", plan) | |
full_response += "π **Plan**:\n" | |
for step in plan.plan: | |
full_response += f"- {step.action}\n" | |
full_response += "\n" | |
results = {} | |
# Step 2: Execute steps | |
for step in plan.plan: | |
if step.action == "done": | |
full_response += "β Plan complete.\n" | |
break | |
should_run, updated_step, user_prompt = think(step, results, message) | |
# Ask user for clarification if needed | |
if user_prompt: | |
full_response += f"β {user_prompt} (Please respond with an email)\n" | |
return full_response # wait for user | |
if not should_run: | |
full_response += f"βοΈ Skipping `{step.action}`\n" | |
continue | |
try: | |
output = act(updated_step) | |
results[updated_step.action] = output | |
full_response += f"π§ Ran `{updated_step.action}` β {output}\n" | |
except Exception as e: | |
logger.error("Error running action '%s': %s", updated_step.action, e) | |
full_response += f"β Error running `{updated_step.action}`: {e}\n" | |
break | |
# Step 3: Summarize results | |
try: | |
summary_rsp = client.chat.completions.create( | |
model="gpt-4o-mini", | |
temperature=temperature, | |
max_tokens=max_tokens, | |
messages=[ | |
{"role": "system", "content": "Summarize these results for the user in a friendly way."}, | |
{"role": "assistant", "content": json.dumps(results)} | |
], | |
) | |
summary = summary_rsp.choices[0].message.content | |
full_response += "\nπ **Summary**:\n" + summary | |
except Exception as e: | |
logger.error("Summary generation failed: %s", e) | |
full_response += "\nβ Failed to generate summary." | |
except Exception as e: | |
logger.exception("Unhandled error in agent: %s", e) | |
full_response += f"\nβ Unexpected error: {e}" | |
return full_response | |
demo = gr.ChatInterface( | |
respond, | |
additional_inputs=[ | |
gr.Textbox(label="System message", value="You are an email assistant agent."), | |
gr.Slider(label="Max tokens", minimum=64, maximum=2048, value=512, step=1), | |
gr.Slider(label="Temperature", minimum=0.0, maximum=1.5, value=0.7, step=0.1), | |
], | |
title="π¬ Email Agent", | |
description="Ask me anything related to your email tasks!" | |
) | |
if __name__ == "__main__": | |
demo.launch() | |