LyricsAnalyzerAgent / config.py
tonko22's picture
Remove anthropic key getter
40b176a
raw
history blame
2.43 kB
"""
Configuration parameters for the Lyrics Analyzer Agent.
This module separates configuration from implementation,
making it easier to modify settings without changing code.
"""
import os
import yaml
from loguru import logger
# Logger configuration
def setup_logger():
"""Configure loguru logger with custom formatting."""
logger.remove() # Remove default handlers
logger.add(
lambda msg: print(msg, end=""),
level="INFO",
format="<green>{time:YYYY-MM-DD HH:mm:ss}</green> | <level>{level: <8}</level> | <cyan>{message}</cyan>"
)
# API configuration
def load_api_keys():
"""Load API keys from environment variables."""
# Gemini API is the default
os.environ["GEMINI_API_KEY"] = os.getenv("GEMINI_API_KEY")
def get_model_id(is_test=True):
"""Get the appropriate model ID based on configuration."""
if is_test:
return "ollama/gemma3:4b" # Using local Ollama with Gemma 3:4B instead of Claude
else:
return "gemini/gemini-2.0-flash"
def get_ollama_api_base():
"""Get the API base URL for Ollama."""
return "http://localhost:11434"
# Load prompts from YAML
def load_prompt_templates():
"""Load prompt templates from YAML file."""
try:
with open("prompts/prompts_hf.yaml", 'r') as stream:
return yaml.safe_load(stream)
except (FileNotFoundError, yaml.YAMLError) as e:
logger.error(f"Error loading prompts.yaml: {e}")
return {} # Return empty dict to avoid breaking the application
# Tool configuration
SEARCH_TOOL_CONFIG = {
"min_delay": 3.0,
"max_delay": 7.0
}
# Gradio UI configuration
def get_gradio_config(is_test=True):
"""Get the appropriate Gradio UI configuration based on environment.
Args:
is_test: If True, use test configuration (local development).
If False, use production configuration (HuggingFace).
Returns:
Dictionary with Gradio configuration parameters.
"""
if is_test:
# Configuration for local development/testing
return {
"debug": True,
"share": False,
"server_name": "127.0.0.1",
"server_port": 3000
}
else:
# Configuration for production (HuggingFace)
return {
"debug": True,
"share": False
# No server_name or server_port for HuggingFace deployment
}