File size: 14,044 Bytes
fb513c1 97e737a fb513c1 89e5d16 fb513c1 97e737a fb513c1 89e5d16 fb513c1 97e737a fb513c1 89e5d16 fb513c1 89e5d16 fb513c1 89e5d16 fb513c1 89e5d16 fb513c1 89e5d16 fb513c1 f5395f8 fb513c1 89e5d16 fb513c1 f5395f8 fb513c1 89e5d16 fb513c1 89e5d16 fb513c1 89e5d16 fb513c1 89e5d16 f5395f8 89e5d16 f5395f8 89e5d16 fb513c1 89e5d16 fb513c1 89e5d16 fb513c1 97e737a fb513c1 89e5d16 fb513c1 89e5d16 fb513c1 89e5d16 fb513c1 89e5d16 fb513c1 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 |
"""
Intelligent AI Agent using LlamaIndex with websearch capabilities
This module contains the agent class with advanced tools and reasoning.
"""
import os
import asyncio
import io
import contextlib
import ast
import traceback
from typing import Any, Dict, Tuple, List
# Load environment variables from .env file
try:
from dotenv import load_dotenv
load_dotenv()
print("β
.env file loaded successfully")
except ImportError:
print("β οΈ python-dotenv not available, .env file not loaded")
except Exception as e:
print(f"β οΈ Error loading .env file: {e}")
# LlamaIndex imports
try:
from llama_index.core.agent.workflow import (
ToolCall,
ToolCallResult,
FunctionAgent,
AgentStream,
)
#from llama_index.llms.huggingface import HuggingFaceLLM
from llama_index.core.tools import FunctionTool
from llama_index.tools.wikipedia import WikipediaToolSpec
from llama_index.tools.tavily_research.base import TavilyToolSpec
#from llama_index.llms.ollama import Ollama
from llama_index.llms.bedrock_converse import BedrockConverse
#from llama_index.llms.openai import OpenAI
LLAMA_INDEX_AVAILABLE = True
except ImportError as e:
print(f"LlamaIndex imports not available: {e}")
LLAMA_INDEX_AVAILABLE = False
#MODEL = "microsoft/Phi-3.5-mini-instruct"
class BasicAgent:
"""
Advanced AI Agent using LlamaIndex with CodeAct capabilities and multiple tools.
"""
def __init__(self):
"""Initialize the agent with LLM, tools, and code executor."""
print("Initializing Advanced AI Agent with LlamaIndex...")
# Get Hugging Face token
self.hf_token = os.getenv("HUGGINGFACE_TOKEN")
if not self.hf_token:
print("Warning: HUGGINGFACE_TOKEN not found. Using default model.")
# Initialize LLM
self._initialize_llm()
# Initialize tools
self._initialize_tools()
# Initialize code executor
#self._initialize_code_executor()
# Initialize CodeAct Agent
self._initialize_agent()
print("Advanced AI Agent initialized successfully.")
def _initialize_llm(self):
"""Initialize the Hugging Face LLM."""
if not LLAMA_INDEX_AVAILABLE:
print("LlamaIndex not available, using basic mode")
self.llm = None
return
try:
#self.llm = OpenAI(model="gpt-4o", api_key=os.getenv("OPENAI_API_KEY"))
#self.llm = Ollama(model="llama3.1:latest", base_url="http://localhost:11434")
self.llm = BedrockConverse(
model="amazon.nova-pro-v1:0",
temperature=0.5,
aws_access_key_id=os.getenv("AWS_ACCESS_KEY_ID"),
aws_secret_access_key=os.getenv("AWS_SECRET_ACCESS_KEY"),
region_name=os.getenv("AWS_REGION"),
)
print("β
LLM initialized successfully")
except Exception as e:
print(f"Error initializing LLM: {e}")
# Fallback to a basic setup
self.llm = None
def _initialize_tools(self):
"""Initialize all available tools."""
self.tools = []
# Store basic math functions for fallback mode
self.math_functions = {
'add': lambda a, b: a + b,
'subtract': lambda a, b: a - b,
'multiply': lambda a, b: a * b,
'divide': lambda a, b: a / b if b != 0 else "Error: Division by zero",
'power': lambda a, b: a ** b,
'percentage': lambda v, p: (v * p) / 100,
}
if not LLAMA_INDEX_AVAILABLE:
print("Tools initialization skipped - LlamaIndex not available")
return
# Mathematical tools
def add_numbers(a: float, b: float) -> float:
"""Add two numbers together."""
return a + b
def subtract_numbers(a: float, b: float) -> float:
"""Subtract second number from first number."""
return a - b
def multiply_numbers(a: float, b: float) -> float:
"""Multiply two numbers."""
return a * b
def divide_numbers(a: float, b: float) -> float:
"""Divide first number by second number."""
if b == 0:
return "Error: Division by zero"
return a / b
def power_numbers(a: float, b: float) -> float:
"""Raise first number to the power of second number."""
return a ** b
def calculate_percentage(value: float, percentage: float) -> float:
"""Calculate percentage of a value."""
return (value * percentage) / 100
def get_modulus(a: float, b: float) -> float:
"""Get the modulus of two numbers."""
return a % b
# Create function tools
try:
math_tools = [
FunctionTool.from_defaults(fn=add_numbers, name="add_numbers", description="Add two numbers together"),
FunctionTool.from_defaults(fn=subtract_numbers, name="subtract_numbers", description="Subtract second number from first number"),
FunctionTool.from_defaults(fn=multiply_numbers, name="multiply_numbers", description="Multiply two numbers"),
FunctionTool.from_defaults(fn=divide_numbers, name="divide_numbers", description="Divide first number by second number"),
FunctionTool.from_defaults(fn=power_numbers, name="power_numbers", description="Raise first number to the power of second number"),
FunctionTool.from_defaults(fn=calculate_percentage, name="calculate_percentage", description="Calculate percentage of a value"),
FunctionTool.from_defaults(fn=get_modulus, name="get_modulus", description="Get the modulus of two numbers"),
]
self.tools.extend(math_tools)
print("β
Math tools initialized")
except Exception as e:
print(f"Warning: Could not initialize math tools: {e}")
# Initialize search tools
try:
# web search
search_spec = TavilyToolSpec(
api_key=os.getenv("TAVILY_API_KEY"),
)
search_tool = search_spec.to_tool_list()
self.tools.extend(search_tool)
print("β
DuckDuckGo search tool initialized")
except Exception as e:
print(f"Warning: Could not initialize DuckDuckGo tool: {e}")
try:
# Wikipedia search
wiki_spec = WikipediaToolSpec()
wiki_tools = FunctionTool.from_defaults(wiki_spec.wikipedia_search, name="wikipedia_search", description="Search Wikipedia for information")
self.tools.extend(wiki_tools)
print("β
Wikipedia tool initialized")
except Exception as e:
print(f"Warning: Could not initialize Wikipedia tool: {e}")
""" try:
# Web requests tool
requests_spec = RequestsToolSpec()
requests_tools = requests_spec.to_tool_list()
self.tools.extend(requests_tools)
print("β
Web requests tool initialized")
except Exception as e:
print(f"Warning: Could not initialize requests tool: {e}") """
print(f"β
Total {len(self.tools)} tools initialized")
def _initialize_agent(self):
"""Initialize the CodeAct Agent (deferred initialization)."""
if not self.llm:
print("Warning: No LLM available, using basic mode")
self.agent = None
self.context = None
return
# Store initialization parameters for deferred initialization
self._agent_params = {
#'code_execute_fn': self.code_executor.execute,
'llm': self.llm,
'tools': self.tools
}
self.agent = None
self.context = None
print("β
CodeAct Agent parameters prepared (deferred initialization)")
def _ensure_agent_initialized(self):
"""Ensure the CodeAct agent is initialized when needed."""
if self.agent is None and hasattr(self, '_agent_params'):
try:
# Reset any existing context to avoid conflicts
if hasattr(self, 'context') and self.context:
try:
# Clean up existing context if possible
self.context = None
except:
pass
# Create the CodeAct Agent without assuming event loop state
#self.agent = CodeActAgent(**self._agent_params)
# Enhanced prompt with specific formatting requirements for medium models
enhanced_prompt = f"""
You are an intelligent AI assistant equipped with powerful tools to help solve problems. You must think step-by-step and use the available tools when needed.
## AVAILABLE TOOLS:
You have access to the following tools - use them strategically:
### Mathematical Tools:
- add_numbers(a, b): Add two numbers together
- subtract_numbers(a, b): Subtract second number from first number
- multiply_numbers(a, b): Multiply two numbers
- divide_numbers(a, b): Divide first number by second number
- power_numbers(a, b): Raise first number to the power of second number
- calculate_percentage(value, percentage): Calculate percentage of a value
- get_modulus(a, b): Get the modulus of two numbers
### Research Tools:
- tavily_search(query): Search the web for current information
- wikipedia_search(query): Search Wikipedia for factual information
## INSTRUCTIONS:
1. **Read the question carefully** and identify what type of answer is needed
2. **Think step-by-step** - break down complex problems into smaller parts
3. **Use tools when necessary** - don't try to guess calculations or current information
4. **For mathematical problems**: Use the math tools for accuracy
5. **For factual questions**: Use search tools to get current/accurate information
6. **Show your reasoning** - explain your thought process before the final answer
## ANSWER FORMAT:
Always end your response with exactly this format:
FINAL ANSWER: [YOUR ANSWER]
## FORMATTING RULES FOR FINAL ANSWER:
- **Numbers**: Write plain numbers without commas, units, or symbols (unless specifically asked)
- **Text**: Use simple words, no articles (a, an, the), no abbreviations
- **Lists**: Comma-separated, following the above rules for each element
- **Calculations**: Show the result as a plain number
## EXAMPLES:
Question: "What is 25% of 200?"
Reasoning: I need to calculate 25% of 200 using the percentage tool.
[Use calculate_percentage(200, 25)]
FINAL ANSWER: 50
Question: "What is the capital of France?"
Reasoning: This is a factual question about geography.
[Use wikipedia_search("France capital")]
FINAL ANSWER: Paris
Remember: Think carefully, use tools when helpful, and always provide your final answer in the specified format.
"""
self.agent = FunctionAgent(
tools=self.tools,
llm=self.llm,
system_prompt=enhanced_prompt,
)
print("β
CodeAct Agent initialized (deferred)")
except Exception as e:
print(f"Error in deferred agent initialization: {e}")
print("Continuing with fallback mode...")
return False
return self.agent is not None
async def __call__(self, question: str) -> str:
"""
Main method that processes a question and returns an answer.
"""
print(f"Agent received question (first 100 chars): {question[:100]}...")
# Ensure agent is initialized (for deferred initialization)
self._ensure_agent_initialized()
if self.agent:
try:
# Use the CodeAct agent for advanced reasoning
response = await self._async_agent_run(question)
return response
except Exception as e:
print(f"Error with agent: {e}")
return f"FINAL ANSWER: Error processing question - {str(e)}"
else:
return "FINAL ANSWER: Agent not properly initialized"
async def _async_agent_run(self, question: str) -> str:
"""Run the agent asynchronously."""
try:
# Create a fresh context for this run to avoid loop conflicts
#context = Context(self.agent)
print("Agent running...")
print(self.agent)
handler = self.agent.run(question)
#return str(handler)
iterationsNumber = 0
async for event in handler.stream_events():
iterationsNumber += 1
# if isinstance(event, ToolCallResult):
# print(
# f"\n-----------\nCode execution result:\n{event.tool_output}"
# )
if isinstance(event, ToolCall):
print(f"\n-----------\nevent:\n{event}")
elif isinstance(event, AgentStream):
print(f"{event.delta}", end="", flush=True)
""" if iterationsNumber > 5:
print("Too many iterations, stopping...")
break """
response = await handler
print(f'response.response: {response.response.content}')
return response.response.content.split("FINAL ANSWER: ")[1]
except Exception as e:
print(f"Async agent error: {e}")
return f"FINAL ANSWER: Error in agent processing - {str(e)}"
|