File size: 3,341 Bytes
810398e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 |
import gradio as gr
import json
from typing import Any, Dict, List, Tuple
from re_act import (
get_plan_from_llm,
think,
act,
store_name_email_mapping,
extract_sender_info,
client,
)
from logger import logger # Assumes logger is configured
from schemas import PlanStep
# Maintain persistent session results
session_results: Dict[str, Any] = {}
def respond(
message: str,
history: List[Tuple[str, str]],
system_message: str,
max_tokens: int,
temperature: float
) -> str:
logger.info("Gradio agent received message: %s", message)
full_response = ""
try:
# Step 1: Generate plan
plan = get_plan_from_llm(message)
logger.debug("Generated plan: %s", plan)
full_response += "π **Plan**:\n"
for step in plan.plan:
full_response += f"- {step.action}\n"
full_response += "\n"
results = {}
# Step 2: Execute steps
for step in plan.plan:
if step.action == "done":
full_response += "β
Plan complete.\n"
break
should_run, updated_step, user_prompt = think(step, results, message)
# Ask user for clarification if needed
if user_prompt:
full_response += f"β {user_prompt} (Please respond with an email)\n"
return full_response # wait for user
if not should_run:
full_response += f"βοΈ Skipping `{step.action}`\n"
continue
try:
output = act(updated_step)
results[updated_step.action] = output
full_response += f"π§ Ran `{updated_step.action}` β {output}\n"
except Exception as e:
logger.error("Error running action '%s': %s", updated_step.action, e)
full_response += f"β Error running `{updated_step.action}`: {e}\n"
break
# Step 3: Summarize results
try:
summary_rsp = client.chat.completions.create(
model="gpt-4o-mini",
temperature=temperature,
max_tokens=max_tokens,
messages=[
{"role": "system", "content": "Summarize these results for the user in a friendly way."},
{"role": "assistant", "content": json.dumps(results)}
],
)
summary = summary_rsp.choices[0].message.content
full_response += "\nπ **Summary**:\n" + summary
except Exception as e:
logger.error("Summary generation failed: %s", e)
full_response += "\nβ Failed to generate summary."
except Exception as e:
logger.exception("Unhandled error in agent: %s", e)
full_response += f"\nβ Unexpected error: {e}"
return full_response
demo = gr.ChatInterface(
respond,
additional_inputs=[
gr.Textbox(label="System message", value="You are an email assistant agent."),
gr.Slider(label="Max tokens", minimum=64, maximum=2048, value=512, step=1),
gr.Slider(label="Temperature", minimum=0.0, maximum=1.5, value=0.7, step=0.1),
],
title="π¬ Email Agent",
description="Ask me anything related to your email tasks!"
)
if __name__ == "__main__":
demo.launch()
|