|
from smolagents import CodeAgent,DuckDuckGoSearchTool, LiteLLMModel,load_tool,tool |
|
import datetime |
|
import requests |
|
import pytz |
|
import yaml |
|
from tools.final_answer import FinalAnswerTool |
|
from tools.visit_webpage import VisitWebpageTool |
|
import os |
|
from Gradio_UI import GradioUI |
|
from PIL import Image |
|
from duckduckgo_search import DDGS |
|
|
|
import datetime |
|
import time |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@tool |
|
def browsing_tool_fetch_content(url: str, query_context: str) -> str: |
|
""" |
|
Placeholder function to simulate fetching full content from a URL. |
|
In a real scenario, this would use a library like 'requests' and 'BeautifulSoup' |
|
or a dedicated browsing/scraping API. |
|
The query_context is provided if the browsing tool can use it for better extraction. |
|
|
|
Args: |
|
url: the URL to fetch the content from. |
|
query_context: the context related to the URL. |
|
""" |
|
print(f"[Browsing Tool Stub] Attempting to fetch content for URL: {url} (context: '{query_context}')") |
|
|
|
|
|
|
|
try: |
|
|
|
import requests |
|
from bs4 import BeautifulSoup |
|
response = requests.get(url, timeout=10) |
|
response.raise_for_status() |
|
soup = BeautifulSoup(response.content, 'html.parser') |
|
|
|
paragraphs = soup.find_all('p') |
|
fetched_text = "\n".join([p.get_text() for p in paragraphs]) |
|
if not fetched_text: |
|
|
|
fetched_text = soup.get_text(separator='\n', strip=True) |
|
return fetched_text |
|
|
|
except Exception as e: |
|
return f"Error fetching content from {url}: {str(e)}" |
|
|
|
@tool |
|
def search_duckduckgo(topic: str, max_results: int = 3) -> list: |
|
""" |
|
Searches DuckDuckGo for a given topic, retrieves search results, |
|
and then attempts to fetch the full content of each result URL. |
|
|
|
Args: |
|
topic: The topic to search for. |
|
max_results: The maximum number of search results to process. |
|
|
|
Returns: |
|
A list of dictionaries, where each dictionary represents a search result |
|
and contains: |
|
- 'title': The title of the search result. |
|
- 'href': The URL of the search result. |
|
- 'original_snippet': The original snippet from DuckDuckGo. |
|
- 'full_content': The fetched full content from the URL (or an error message/placeholder). |
|
""" |
|
print(f"Searching DuckDuckGo for: {topic} (max_results: {max_results})") |
|
detailed_results_list = [] |
|
|
|
try: |
|
|
|
initial_results = DDGS().text(topic, max_results=max_results) |
|
|
|
if not initial_results: |
|
print("No initial results found from DuckDuckGo.") |
|
return [] |
|
|
|
print(f"Found {len(initial_results)} initial results. Now fetching full content...") |
|
|
|
for result in initial_results: |
|
title = result.get('title', 'N/A') |
|
href = result.get('href', None) |
|
original_snippet = result.get('body', 'N/A') |
|
|
|
print(f"\nProcessing result: {title}") |
|
print(f" URL: {href}") |
|
|
|
full_content = "N/A" |
|
if href: |
|
|
|
|
|
full_content = browsing_tool_fetch_content(url=href, query_context=topic) |
|
else: |
|
print(" No URL found for this result, cannot fetch full content.") |
|
full_content = "No URL provided in search result." |
|
|
|
detailed_results_list.append({ |
|
'title': title, |
|
'href': href, |
|
'original_snippet': original_snippet, |
|
'full_content': full_content |
|
}) |
|
print(f" Full content (or placeholder/error): {full_content[:200]}...") |
|
|
|
except Exception as e: |
|
print(f"An error occurred during the search or content fetching process: {str(e)}") |
|
|
|
|
|
|
|
return [result['full_content'] for result in detailed_results_list] |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@tool |
|
def get_current_time_in_timezone(timezone: str) -> str: |
|
"""A tool that fetches the current local time in a specified timezone. |
|
Args: |
|
timezone: A string representing a valid timezone (e.g., 'America/New_York'). |
|
""" |
|
try: |
|
|
|
tz = pytz.timezone(timezone) |
|
|
|
local_time = datetime.datetime.now(tz).strftime("%Y-%m-%d %H:%M:%S") |
|
return f"The current local time in {timezone} is: {local_time}" |
|
except Exception as e: |
|
return f"Error fetching time for timezone '{timezone}': {str(e)}" |
|
|
|
|
|
final_answer = FinalAnswerTool() |
|
|
|
|
|
|
|
|
|
os.environ["GOOGLE_API_KEY"] = "AIzaSyBcJrlnDDdWtjUDiLrisSOPuaAGizCLKO4" |
|
gemini_api_key = os.environ.get("GOOGLE_API_KEY") |
|
|
|
try: |
|
|
|
gemini_model = LiteLLMModel( |
|
model_id="gemini/gemini-1.5-flash-latest", |
|
api_key=gemini_api_key, |
|
temperature = 0.5, |
|
max_tokens = 2096, |
|
custom_role_conversions=None |
|
) |
|
print("Successfully initialized LiteLLMModel for Gemini 1.5 Flash.") |
|
|
|
except Exception as e: |
|
print(f"Failed to initialize LiteLLMModel: {e}") |
|
gemini_model = None |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
search_tool = DuckDuckGoSearchTool() |
|
web_visit = VisitWebpageTool() |
|
|
|
image_generation_tool = load_tool("agents-course/text-to-image", trust_remote_code=True) |
|
|
|
with open("prompts.yaml", 'r') as stream: |
|
prompt_templates = yaml.safe_load(stream) |
|
|
|
agent = CodeAgent( |
|
model=gemini_model, |
|
tools=[final_answer,get_current_time_in_timezone, search_tool, web_visit, image_generation_tool], |
|
max_steps=6, |
|
verbosity_level=1, |
|
grammar=None, |
|
planning_interval=None, |
|
name=None, |
|
description=None, |
|
prompt_templates=prompt_templates |
|
) |
|
|
|
|
|
GradioUI(agent).launch() |