Spaces:
Sleeping
Sleeping
File size: 3,732 Bytes
691a653 9b5b26a c19d193 691a653 6aae614 8fe992b 9b5b26a 5df72d6 691a653 9b5b26a 691a653 9b5b26a 691a653 9b5b26a 691a653 044d079 691a653 9b5b26a 8c01ffb 6aae614 ae7a494 e121372 bf6d34c 29ec968 fe328e0 13d500a 8c01ffb 9b5b26a 8c01ffb 861422e 9b5b26a 8c01ffb 8fe992b 691a653 8c01ffb 861422e 8fe992b 9b5b26a 8c01ffb |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 |
from smolagents import CodeAgent, DuckDuckGoSearchTool, VisitWebpageTool, HfApiModel, load_tool, tool
import datetime
import requests
import pytz
import yaml
from typing import List
from tools.final_answer import FinalAnswerTool
from Gradio_UI import GradioUI
# Below is an example of a tool that does nothing. Amaze us with your creativity !
import re
from typing import Dict
@tool
def parse_search_results(query_results: str) -> Dict[str, str]:
"""
Parses search results text and extracts URLs along with their corresponding description.
Args:
query_results: A string representing the search results. Each search result is expected to start with a URL,
followed by one or more lines of description until the next URL is encountered.
Returns:
A dictionary where each key is a URL and the corresponding value is the description text.
"""
results: Dict[str, str] = {}
current_url = None
current_desc = []
# Split the input into lines
lines = query_results.splitlines()
print(lines)
# Regex pattern to match a URL (starting with http:// or https://)
url_pattern = re.compile(r"^(https?://\S+)")
for line in lines:
stripped_line = line.strip()
if not stripped_line:
continue # Skip empty lines
# Check if the line starts with a URL
url_match = url_pattern.match(stripped_line)
if url_match:
# Save the previous URL and its description if available
if current_url is not None:
results[current_url] = " ".join(current_desc).strip()
# Set the new URL and reset description accumulator
current_url = url_match.group(1)
current_desc = []
else:
# Accumulate description lines
current_desc.append(stripped_line)
# Save the last URL and its description if any
if current_url is not None:
results[current_url] = " ".join(current_desc).strip()
return results
@tool
def get_current_time_in_timezone(timezone: str) -> str:
"""A tool that fetches the current local time in a specified timezone.
Args:
timezone: A string representing a valid timezone (e.g., 'America/New_York').
"""
try:
# Create timezone object
tz = pytz.timezone(timezone)
# Get current time in that timezone
local_time = datetime.datetime.now(tz).strftime("%Y-%m-%d %H:%M:%S")
return f"The current local time in {timezone} is: {local_time}"
except Exception as e:
return f"Error fetching time for timezone '{timezone}': {str(e)}"
final_answer = FinalAnswerTool()
# If the agent does not answer, the model is overloaded, please use another model or the following Hugging Face Endpoint that also contains qwen2.5 coder:
# model_id='https://pflgm2locj2t89co.us-east-1.aws.endpoints.huggingface.cloud'
model = HfApiModel(
max_tokens=2096,
temperature=0.5,
model_id='Qwen/Qwen2.5-Coder-32B-Instruct',# it is possible that this model may be overloaded
custom_role_conversions=None,
)
# Import tool from Hub
image_generation_tool = load_tool("agents-course/text-to-image", trust_remote_code=True)
with open("prompts.yaml", 'r') as stream:
prompt_templates = yaml.safe_load(stream)
agent = CodeAgent(
model=model,
tools=[final_answer, parse_search_results, DuckDuckGoSearchTool(), VisitWebpageTool()], ## add your tools here (don't remove final answer)
max_steps=6,
verbosity_level=1,
grammar=None,
planning_interval=None,
name=None,
description=None,
prompt_templates=prompt_templates
)
GradioUI(agent).launch() |