diegocp01's picture
Update app.py
9a1e263 verified
raw
history blame
5.1 kB
from smolagents import CodeAgent, DuckDuckGoSearchTool, HfApiModel, load_tool, tool
import datetime
import requests
import pytz
import yaml
from tools.final_answer import FinalAnswerTool
from datetime import datetime, timedelta
import json
from Gradio_UI import GradioUI
import os
from dotenv import load_dotenv
import openai
# Load environment variables from .env file
load_dotenv()
# Access the API key
openai.api_key = os.getenv('OPENAI_API_KEY')
# Below is the new deadline calculator tool
@tool
def multi_perspective_brainstorming(query: str) -> str:
"""A tool that generates and ranks creative ideas by simulating a brainstorming swarm of AI instances.
Args:
query: An open-ended query to brainstorm (e.g., 'Generate marketing ideas for a coffee shop').
Returns:
A prioritized list of the top ideas synthesized from multiple perspectives.
"""
# Define brainstorming perspectives
perspectives = [
{"focus": "Social Media", "prompt": f"Generate 3 creative marketing ideas for a coffee shop focused on social media: {query}"},
{"focus": "Loyalty Programs", "prompt": f"Generate 3 creative marketing ideas for a coffee shop focused on loyalty programs: {query}"},
{"focus": "Sustainability", "prompt": f"Generate 3 creative marketing ideas for a coffee shop focused on sustainability: {query}"},
]
# Collect ideas from each perspective
all_ideas = []
for perspective in perspectives:
response = openai.chat.completions.create(
model="gpt-4o-mini",
messages=[
{"role": "user", "content": perspective["prompt"]}
],
)
ideas = response.choices[0].message.content.split("\n") # Assume ideas are newline-separated
all_ideas.extend([f"{perspective['focus']}: {idea.strip()}" for idea in ideas if idea.strip()])
# Rank the ideas by simulating a consensus
ranking_prompt = (
f"From the following list of ideas, rank the top 5 based on creativity, feasibility, and impact for '{query}':\n"
f"{'\n'.join(all_ideas)}"
)
ranked_response = openai.chat.completions.create(
model="gpt-4o-mini",
messages=[
{"role": "user", "content": ranking_prompt}
],
)
return ranked_response.choices[0].message.content
@tool
def realtime_collaborative_assistant(query: str) -> str:
"""A tool that simulates a roundtable discussion with AI experts to provide a well-rounded response.
Args:
query: The user’s question or topic to discuss (e.g., 'How can I improve my website’s UX?').
Returns:
A synthesized response combining insights from multiple AI perspectives.
"""
# Define expert personas with distinct roles
experts = [
{"role": "UX Designer", "prompt": f"As a UX designer, provide practical suggestions for: {query}"},
{"role": "Marketing Strategist", "prompt": f"As a marketing strategist, suggest how to approach: {query}"},
{"role": "Tech Analyst", "prompt": f"As a tech analyst, offer technical insights on: {query}"},
]
# Collect responses from each AI expert
expert_opinions = []
for expert in experts:
response = openai.chat.completions.create(
model="gpt-4o-mini",
messages=[
{"role": "user", "content": expert["prompt"]}
],
)
expert_opinions.append(f"{expert['role']}: {response.choices[0].message.content}")
# Synthesize the responses into a cohesive answer
synthesis_prompt = (
f"Synthesize the following expert opinions into a concise, well-rounded response to the query '{query}':\n"
f"{'\n'.join(expert_opinions)}"
)
final_response = openai.chat.completions.create(
model="gpt-4o-mini",
messages=[
{"role": "user", "content": synthesis_prompt}
],
)
return final_response.choices[0].message.content
final_answer = FinalAnswerTool()
# If the agent does not answer, the model is overloaded, please use another model or the following Hugging Face Endpoint that also contains qwen2.5 coder:
# model_id='https://pflgm2locj2t89co.us-east-1.aws.endpoints.huggingface.cloud'
model = HfApiModel(
max_tokens=2096,
temperature=0.5,
model_id='https://pflgm2locj2t89co.us-east-1.aws.endpoints.huggingface.cloud', #model_id='Qwen/Qwen2.5-Coder-32B-Instruct',# it is possible that this model may be overloaded
custom_role_conversions=None,
)
# Import tool from Hub
image_generation_tool = load_tool("agents-course/text-to-image", trust_remote_code=True)
with open("prompts.yaml", 'r') as stream:
prompt_templates = yaml.safe_load(stream)
agent = CodeAgent(
model=model,
tools=[final_answer, multi_perspective_brainstorming, realtime_collaborative_assistant], ## add your tools here (don't remove final answer)
max_steps=6,
verbosity_level=1,
grammar=None,
planning_interval=None,
name=None,
description=None,
prompt_templates=prompt_templates
)
GradioUI(agent).launch()