tonko22's picture
refactor: Enhance logging with loguru and standardize code documentation
013068f
raw
history blame
7.72 kB
import os
import time
import random
import traceback
from loguru import logger
from Gradio_UI import GradioUI
from litellm import completion
from smolagents import (
CodeAgent,
DuckDuckGoSearchTool,
FinalAnswerTool,
LiteLLMModel,
VisitWebpageTool,
tool,
Tool,
)
# Setting up logging with loguru - only terminal output
logger.remove() # Remove default handlers
logger.add(
lambda msg: print(msg, end=""),
level="INFO",
format="<green>{time:YYYY-MM-DD HH:mm:ss}</green> | <level>{level: <8}</level> | <cyan>{message}</cyan>")
# API key configuration
os.environ["GEMINI_API_KEY"] = os.getenv("GEMINI_API_KEY")
class LyricsSearchTool(Tool):
"""
Uses web search to find song lyrics based on song title and artist name
The search query should include the song title and artist name. The tool
will return the lyrics of the song if found.
Parameters
----------
query : str
The search query for finding song lyrics. Should include song title and artist name.
Returns
-------
str
The lyrics of the song if found, otherwise an empty string.
"""
name = "lyrics_search_tool"
description = "Uses web search to find song lyrics based on song title and artist name"
inputs = {
"query": {
"type": "string",
"description": "The search query for finding song lyrics. Should include song title and artist name.",
}
}
output_type = "string"
def __init__(self, **kwargs):
super().__init__(**kwargs)
def forward(self, query: str) -> str:
assert isinstance(query, str), "Your search query must be a string"
# TODO: Implement lyrics search functionality
return "Lyrics search not implemented yet"
@tool
def analyze_lyrics_tool(song_title: str, artist: str, lyrics: str) -> str:
"""
Performs a deep analysis of the musical track, given its metadata.
Args:
song_title: Title of the song or music track.
artist: The name of the artist.
lyrics: The lyrics of the song.
Returns:
A summary of the song's meaning in English.
"""
prompt = f"""You are an expert in songs and their meanings.
Summarize the meaning of {song_title} by {artist} and identify
key themes based on the lyrics:
{lyrics}.
Include deep idea and vibes analysis with explanations
based on references to the exact lines.
"""
# If the USE_ANTHROPIC environment variable is defined, use the Claude model
if os.getenv("USE_ANTHROPIC", "false").lower() == "true":
model_to_use = "claude-3-haiku-20240307"
logger.info("Using Anthropic model: {} for lyrics analysis", model_to_use)
else:
model_to_use = "gemini/gemini-2.0-flash"
logger.info("Using Gemini model: {} for lyrics analysis", model_to_use)
# Use the function with retry mechanism
logger.info("Analyzing lyrics for song: '{}' by '{}'", song_title, artist)
return _make_api_call_with_retry(model_to_use, prompt)
# Function with manual implementation of retry mechanism
def _make_api_call_with_retry(model: str, prompt: str) -> str:
"""
Makes an API call with a retry mechanism for error handling.
Args:
model: The model identifier to use.
prompt: The prompt text to send to the model.
Returns:
The response from the model as a string.
"""
max_attempts = 20
base_delay = 10
max_delay = 60
attempt = 0
last_exception = None
while attempt < max_attempts:
try:
# Add a small random delay to prevent simultaneous requests
jitter = random.uniform(0.1, 1.0)
time.sleep(jitter)
# If this is a retry attempt, add exponential backoff delay
if attempt > 0:
delay = min(base_delay * (2 ** (attempt - 1)), max_delay)
time.sleep(delay)
response = completion(
model=model,
messages=[{"role": "user", "content": prompt}],
num_retries=2, # Built-in retry mechanism of LiteLLM
)
# Try to extract the content from the response
try:
analysis_result = response.choices[0].message.content.strip()
return analysis_result
except (AttributeError, KeyError, IndexError):
try:
analysis_result = response["choices"][0]["message"]["content"].strip()
return analysis_result
except (AttributeError, KeyError, IndexError):
# If we couldn't extract the content, return an error
raise ValueError("Failed to extract content from response")
except (ConnectionError, TimeoutError) as e:
last_exception = e
logger.warning("API call failed (attempt {}/{}) for model {}: {}. Retrying...", attempt+1, max_attempts, model, str(e))
attempt += 1
continue
except Exception as e:
logger.error("Unexpected error: {}", str(e))
logger.error(traceback.format_exc())
raise # For other exceptions, we don't retry
# If all attempts failed, re-raise the last exception
if last_exception:
logger.error("All {} attempts failed. Last error: {}", max_attempts, str(last_exception))
raise last_exception
# TODO: use DuckDuckGoSearchTool to find related information
# for explanation in case the LLM itself is not confident or doesn't know
#
# Check if we need to use Anthropic for local testing
use_anthropic = os.getenv("USE_ANTHROPIC", "false").lower() == "true"
# Configure Anthropic API key if needed
if use_anthropic:
os.environ["ANTHROPIC_API_KEY"] = os.getenv("ANTHROPIC_API_KEY")
model = LiteLLMModel(model_id="claude-3-haiku-20240307")
logger.info("Using Anthropic Claude model for local testing")
else:
model = LiteLLMModel(model_id="gemini/gemini-2.0-flash")
logger.info("Using Gemini model as default")
web_agent = CodeAgent(
model=model,
tools=[DuckDuckGoSearchTool(), VisitWebpageTool()],
name="lyrics_search_agent",
description="Browses the web to find original full lyrics and scrape them. Excels at building effective search queries",
additional_authorized_imports=["numpy", "bs4"],
max_steps=22,
verbosity_level=2,
)
analysis_agent = CodeAgent(
model=model,
tools=[DuckDuckGoSearchTool(), VisitWebpageTool(), analyze_lyrics_tool],
name="lyrics_analysis_agent",
description="You are a Song Analysis Expert with deep knowledge of music theory, lyrical interpretation, cultural contexts, and music history. Your role is to analyze song lyrics to uncover their deeper meaning, artistic significance, and historical context.",
additional_authorized_imports=["numpy", "bs4"],
max_steps=50,
verbosity_level=2,
)
# When using the DuckDuckGoSearchTool, clearly indicate when information comes from external research versus your own knowledge base.
manager_agent = CodeAgent(
model=model,
tools=[FinalAnswerTool()],
name="manager_agent",
description="Manages the search process and coordinates the search and analysis of song lyrics.",
managed_agents=[web_agent, analysis_agent],
additional_authorized_imports=["json"],
planning_interval=5,
verbosity_level=2,
max_steps=15,
)
logger.info("Initializing Gradio UI and launching server")
GradioUI(manager_agent).launch(
debug=True, share=False, server_name="127.0.0.1", server_port=3000
)
logger.success("Server started successfully")