Spaces:
Sleeping
Sleeping
File size: 5,101 Bytes
c28dccc 9b5b26a c19d193 6aae614 acc27c6 3841ff3 9b5b26a c28dccc 9b5b26a c28dccc 3841ff3 acc27c6 83d3181 9b5b26a 9a1e263 cd87963 9b5b26a 9a1e263 acc27c6 9a1e263 9b5b26a 9a1e263 c28dccc 9a1e263 c28dccc 9a1e263 acc27c6 9b5b26a 9a1e263 9b5b26a 9a1e263 9b5b26a 9a1e263 8c01ffb 6aae614 ae7a494 e121372 bf6d34c 2f6d5c4 fe328e0 13d500a 8c01ffb 9b5b26a 8c01ffb 861422e 9b5b26a 8c01ffb 8fe992b 9a1e263 8c01ffb 861422e 8fe992b 9b5b26a c28dccc |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 |
from smolagents import CodeAgent, DuckDuckGoSearchTool, HfApiModel, load_tool, tool
import datetime
import requests
import pytz
import yaml
from tools.final_answer import FinalAnswerTool
from datetime import datetime, timedelta
import json
from Gradio_UI import GradioUI
import os
from dotenv import load_dotenv
import openai
# Load environment variables from .env file
load_dotenv()
# Access the API key
openai.api_key = os.getenv('OPENAI_API_KEY')
# Below is the new deadline calculator tool
@tool
def multi_perspective_brainstorming(query: str) -> str:
"""A tool that generates and ranks creative ideas by simulating a brainstorming swarm of AI instances.
Args:
query: An open-ended query to brainstorm (e.g., 'Generate marketing ideas for a coffee shop').
Returns:
A prioritized list of the top ideas synthesized from multiple perspectives.
"""
# Define brainstorming perspectives
perspectives = [
{"focus": "Social Media", "prompt": f"Generate 3 creative marketing ideas for a coffee shop focused on social media: {query}"},
{"focus": "Loyalty Programs", "prompt": f"Generate 3 creative marketing ideas for a coffee shop focused on loyalty programs: {query}"},
{"focus": "Sustainability", "prompt": f"Generate 3 creative marketing ideas for a coffee shop focused on sustainability: {query}"},
]
# Collect ideas from each perspective
all_ideas = []
for perspective in perspectives:
response = openai.chat.completions.create(
model="gpt-4o-mini",
messages=[
{"role": "user", "content": perspective["prompt"]}
],
)
ideas = response.choices[0].message.content.split("\n") # Assume ideas are newline-separated
all_ideas.extend([f"{perspective['focus']}: {idea.strip()}" for idea in ideas if idea.strip()])
# Rank the ideas by simulating a consensus
ranking_prompt = (
f"From the following list of ideas, rank the top 5 based on creativity, feasibility, and impact for '{query}':\n"
f"{'\n'.join(all_ideas)}"
)
ranked_response = openai.chat.completions.create(
model="gpt-4o-mini",
messages=[
{"role": "user", "content": ranking_prompt}
],
)
return ranked_response.choices[0].message.content
@tool
def realtime_collaborative_assistant(query: str) -> str:
"""A tool that simulates a roundtable discussion with AI experts to provide a well-rounded response.
Args:
query: The user’s question or topic to discuss (e.g., 'How can I improve my website’s UX?').
Returns:
A synthesized response combining insights from multiple AI perspectives.
"""
# Define expert personas with distinct roles
experts = [
{"role": "UX Designer", "prompt": f"As a UX designer, provide practical suggestions for: {query}"},
{"role": "Marketing Strategist", "prompt": f"As a marketing strategist, suggest how to approach: {query}"},
{"role": "Tech Analyst", "prompt": f"As a tech analyst, offer technical insights on: {query}"},
]
# Collect responses from each AI expert
expert_opinions = []
for expert in experts:
response = openai.chat.completions.create(
model="gpt-4o-mini",
messages=[
{"role": "user", "content": expert["prompt"]}
],
)
expert_opinions.append(f"{expert['role']}: {response.choices[0].message.content}")
# Synthesize the responses into a cohesive answer
synthesis_prompt = (
f"Synthesize the following expert opinions into a concise, well-rounded response to the query '{query}':\n"
f"{'\n'.join(expert_opinions)}"
)
final_response = openai.chat.completions.create(
model="gpt-4o-mini",
messages=[
{"role": "user", "content": synthesis_prompt}
],
)
return final_response.choices[0].message.content
final_answer = FinalAnswerTool()
# If the agent does not answer, the model is overloaded, please use another model or the following Hugging Face Endpoint that also contains qwen2.5 coder:
# model_id='https://pflgm2locj2t89co.us-east-1.aws.endpoints.huggingface.cloud'
model = HfApiModel(
max_tokens=2096,
temperature=0.5,
model_id='https://pflgm2locj2t89co.us-east-1.aws.endpoints.huggingface.cloud', #model_id='Qwen/Qwen2.5-Coder-32B-Instruct',# it is possible that this model may be overloaded
custom_role_conversions=None,
)
# Import tool from Hub
image_generation_tool = load_tool("agents-course/text-to-image", trust_remote_code=True)
with open("prompts.yaml", 'r') as stream:
prompt_templates = yaml.safe_load(stream)
agent = CodeAgent(
model=model,
tools=[final_answer, multi_perspective_brainstorming, realtime_collaborative_assistant], ## add your tools here (don't remove final answer)
max_steps=6,
verbosity_level=1,
grammar=None,
planning_interval=None,
name=None,
description=None,
prompt_templates=prompt_templates
)
GradioUI(agent).launch()
|