Spaces:
Runtime error
Runtime error
File size: 4,656 Bytes
9b5b26a c19d193 6aae614 f528b63 9b5b26a 5df72d6 9b5b26a 3d1237b 9b5b26a 1dffacd f528b63 1dffacd f528b63 d43153e 1dffacd f528b63 1dffacd d43153e 1dffacd d43153e f528b63 1dffacd f528b63 1dffacd 9b5b26a 1dffacd 9b5b26a 1dffacd d43153e 1dffacd 9b5b26a 1dffacd 9b5b26a 1dffacd 9b5b26a 1dffacd 8c01ffb 6aae614 1dffacd d43153e ae7a494 d43153e ae7a494 d43153e e121372 d43153e 13d500a 8c01ffb d43153e 8c01ffb 861422e 9b5b26a 8c01ffb 8fe992b f528b63 8c01ffb 861422e 8fe992b d43153e 8c01ffb |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 |
from smolagents import CodeAgent,DuckDuckGoSearchTool, HfApiModel,load_tool,tool
import datetime
import requests
import pytz
import yaml
from tools.final_answer import FinalAnswerTool
import os
from tavily import TavilyClient
from Gradio_UI import GradioUI
# Below is an example of a tool that does nothing. Amaze us with your creativity !
@tool
def my_custom_tool(arg1:str, arg2:int)-> str: #it's import to specify the return type
#Keep this format for the description / args / args description but feel free to modify the tool
"""A tool that does nothing yet
Args:
arg1: the first argument
arg2: the second argument
"""
return "What magic will you build ?"
# Tavily safe search tool
@tool
def tavily_search(query: str) -> str:
"""An enhanced search tool that performs web search using Tavily API with better error handling and formatting.
Args:
query: The search query to look up
Returns:
str: A well-formatted summary of search results with key highlights
"""
try:
client = TavilyClient(api_key=os.environ["TAVILY_API_KEY"])
search_result = client.search(
query=query,
search_depth="advanced",
include_domains=["*.gov", "*.edu", "*.org"], # More relaible sources
exclude_domains=["pinterest.com", "reddit.com"] # Social media excluded
)
summary = f"🔍 Search Results for: {query}\n\n"
summary += "Key Findings:\n"
for idx, result in enumerate(search_result['results'][:5], 1):
summary += f"\n{idx}. {result['title']}\n"
summary += f" Source: {result['domain']}\n"
if 'published_date' in result:
summary += f" Date: {result['published_date']}\n"
summary += f" Summary: {result['description'][:250]}...\n"
return summary
except Exception as e:
return f"Search Error: {str(e)}. Please try rephrasing your query or try again later."
# Improved time tool
@tool
def world_time_tool(location: str) -> str:
"""An improved timezone tool with better location handling and formatted output.
Args:
location: City name or timezone identifier (e.g., 'Tokyo' or 'Asia/Tokyo')
Returns:
str: Formatted time information with additional context
"""
try:
# Timezone mapping for common cities
city_to_timezone = {
'tokyo': 'Asia/Tokyo',
'new york': 'America/New_York',
'london': 'Europe/London',
'paris': 'Europe/Paris',
'istanbul': 'Europe/Istanbul'
}
# Clean and check input
clean_location = location.lower().strip()
if clean_location in city_to_timezone:
timezone = city_to_timezone[clean_location]
else:
timezone = location
tz = pytz.timezone(timezone)
current_time = datetime.datetime.now(tz)
# Zengin formatlama
formatted_time = current_time.strftime("%Y-%m-%d %H:%M:%S")
day_name = current_time.strftime("%A")
return f"""
📍 Location: {location.title()}
🕒 Current Time: {formatted_time}
📅 Day: {day_name}
"""
except pytz.exceptions.UnknownTimeZoneError:
return f"Error: '{location}' is not a recognized timezone or city. Please try a major city name or standard timezone format."
except Exception as e:
return f"Time lookup error: {str(e)}"
final_answer = FinalAnswerTool()
# Import tool from Hub
image_generation_tool = load_tool("agents-course/text-to-image", trust_remote_code=True)
# If the agent does not answer, the model is overloaded, please use another model or the following Hugging Face Endpoint that also contains qwen2.5 coder:
model_id='https://pflgm2locj2t89co.us-east-1.aws.endpoints.huggingface.cloud'
# Model configuration
model = HfApiModel(
max_tokens=2096,
temperature=0.3, # Lower temperature for more consistent responses
model_id=model_id, # 'Qwen/Qwen2.5-Coder-32B-Instruct',
custom_role_conversions=None,
)
with open("prompts.yaml", 'r') as stream:
prompt_templates = yaml.safe_load(stream)
agent = CodeAgent(
model=model,
tools=[final_answer, tavily_search, get_current_time_in_timezone, image_generation_tool], # Added tavily_search
max_steps=6,
verbosity_level=1,
grammar=None,
planning_interval=None,
name=None,
description=None,
prompt_templates=prompt_templates
)
# Launch the Gradio interface
GradioUI(agent).launch() |