stivenDR14 commited on
Commit
89e5d16
Β·
1 Parent(s): fb513c1

30% managed

Browse files
Files changed (2) hide show
  1. agent.py +76 -73
  2. app.py +6 -2
agent.py CHANGED
@@ -28,13 +28,17 @@ try:
28
  from llama_index.core.agent.workflow import (
29
  ToolCall,
30
  ToolCallResult,
 
31
  AgentStream,
32
  )
33
  from llama_index.llms.huggingface import HuggingFaceLLM
34
  from llama_index.core.tools import FunctionTool
35
  from llama_index.tools.duckduckgo import DuckDuckGoSearchToolSpec
36
  from llama_index.tools.wikipedia import WikipediaToolSpec
37
- from llama_index.tools.requests import RequestsToolSpec
 
 
 
38
  LLAMA_INDEX_AVAILABLE = True
39
  except ImportError as e:
40
  print(f"LlamaIndex imports not available: {e}")
@@ -105,7 +109,6 @@ class BasicAgent:
105
 
106
  # Get Hugging Face token
107
  self.hf_token = os.getenv("HUGGINGFACE_TOKEN")
108
- print(self.hf_token)
109
  if not self.hf_token:
110
  print("Warning: HUGGINGFACE_TOKEN not found. Using default model.")
111
 
@@ -116,7 +119,7 @@ class BasicAgent:
116
  self._initialize_tools()
117
 
118
  # Initialize code executor
119
- self._initialize_code_executor()
120
 
121
  # Initialize CodeAct Agent
122
  self._initialize_agent()
@@ -131,28 +134,17 @@ class BasicAgent:
131
  return
132
 
133
  try:
134
- # Using a capable model for reasoning and code generation
135
- # Note: For production, consider using models like meta-llama/Llama-2-7b-chat-hf or similar
136
- model_kwargs = {"temperature": 0.1, "max_length": 512}
137
- generate_kwargs = {"temperature": 0.1, "do_sample": True}
 
 
 
 
 
 
138
 
139
- if self.hf_token:
140
- # Use token if available
141
- self.llm = HuggingFaceLLM(
142
- model_name=MODEL,
143
- tokenizer_name=MODEL, # Explicitly use the same model for tokenizer
144
- model_kwargs=model_kwargs,
145
- generate_kwargs=generate_kwargs,
146
- tokenizer_kwargs={"token": self.hf_token},
147
- )
148
- else:
149
- # Try without token for public models
150
- self.llm = HuggingFaceLLM(
151
- model_name=MODEL,
152
- tokenizer_name=MODEL, # Explicitly use the same model for tokenizer
153
- model_kwargs=model_kwargs,
154
- generate_kwargs=generate_kwargs,
155
- )
156
  print("βœ… LLM initialized successfully")
157
  except Exception as e:
158
  print(f"Error initializing LLM: {e}")
@@ -203,16 +195,21 @@ class BasicAgent:
203
  def calculate_percentage(value: float, percentage: float) -> float:
204
  """Calculate percentage of a value."""
205
  return (value * percentage) / 100
 
 
 
 
206
 
207
  # Create function tools
208
  try:
209
  math_tools = [
210
- FunctionTool.from_defaults(fn=add_numbers),
211
- FunctionTool.from_defaults(fn=subtract_numbers),
212
- FunctionTool.from_defaults(fn=multiply_numbers),
213
- FunctionTool.from_defaults(fn=divide_numbers),
214
- FunctionTool.from_defaults(fn=power_numbers),
215
- FunctionTool.from_defaults(fn=calculate_percentage),
 
216
  ]
217
  self.tools.extend(math_tools)
218
  print("βœ… Math tools initialized")
@@ -221,31 +218,33 @@ class BasicAgent:
221
 
222
  # Initialize search tools
223
  try:
224
- # DuckDuckGo search
225
- ddg_spec = DuckDuckGoSearchToolSpec()
226
- ddg_tools = ddg_spec.to_tool_list()
227
- self.tools.extend(ddg_tools)
 
 
228
  print("βœ… DuckDuckGo search tool initialized")
229
  except Exception as e:
230
  print(f"Warning: Could not initialize DuckDuckGo tool: {e}")
231
 
232
- try:
233
  # Wikipedia search
234
  wiki_spec = WikipediaToolSpec()
235
- wiki_tools = wiki_spec.to_tool_list()
236
  self.tools.extend(wiki_tools)
237
  print("βœ… Wikipedia tool initialized")
238
  except Exception as e:
239
- print(f"Warning: Could not initialize Wikipedia tool: {e}")
240
 
241
- try:
242
  # Web requests tool
243
  requests_spec = RequestsToolSpec()
244
  requests_tools = requests_spec.to_tool_list()
245
  self.tools.extend(requests_tools)
246
  print("βœ… Web requests tool initialized")
247
  except Exception as e:
248
- print(f"Warning: Could not initialize requests tool: {e}")
249
 
250
  print(f"βœ… Total {len(self.tools)} tools initialized")
251
 
@@ -295,7 +294,7 @@ class BasicAgent:
295
 
296
  # Store initialization parameters for deferred initialization
297
  self._agent_params = {
298
- 'code_execute_fn': self.code_executor.execute,
299
  'llm': self.llm,
300
  'tools': self.tools
301
  }
@@ -316,7 +315,21 @@ class BasicAgent:
316
  pass
317
 
318
  # Create the CodeAct Agent without assuming event loop state
319
- self.agent = CodeActAgent(**self._agent_params)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
320
  print("βœ… CodeAct Agent initialized (deferred)")
321
 
322
  except Exception as e:
@@ -334,29 +347,11 @@ class BasicAgent:
334
  # Ensure agent is initialized (for deferred initialization)
335
  self._ensure_agent_initialized()
336
 
337
- # Enhanced prompt with specific formatting requirements
338
- enhanced_prompt = f"""
339
- You are a general AI assistant. I will ask you a question. Report your thoughts, and finish your answer with the following template: FINAL ANSWER: [YOUR FINAL ANSWER].
340
-
341
- YOUR FINAL ANSWER should be a number OR as few words as possible OR a comma separated list of numbers and/or strings. If you are asked for a number, don't use comma to write your number neither use units such as $ or percent sign unless specified otherwise. If you are asked for a string, don't use articles, neither abbreviations (e.g. for cities), and write the digits in plain text unless specified otherwise. If you are asked for a comma separated list, apply the above rules depending of whether the element to be put in the list is a number or a string.
342
-
343
- Available tools and capabilities:
344
- - Mathematical calculations (addition, subtraction, multiplication, division, powers, percentages)
345
- - Web search using DuckDuckGo
346
- - Wikipedia search for factual information
347
- - Web requests for API calls
348
- - Code execution for complex calculations and data processing
349
- - Python libraries: math, datetime, json, re, numpy (if available), pandas (if available)
350
-
351
- Question: {question}
352
-
353
- Think step by step, use the available tools when necessary, and provide your final answer in the specified format.
354
- """
355
-
356
  if self.agent:
357
  try:
358
  # Use the CodeAct agent for advanced reasoning
359
- response = await self._async_agent_run(enhanced_prompt)
360
  return response
361
  except Exception as e:
362
  print(f"Error with CodeAct agent: {e}")
@@ -365,24 +360,32 @@ Think step by step, use the available tools when necessary, and provide your fin
365
  return "FINAL ANSWER: Agent not properly initialized"
366
 
367
 
368
- async def _async_agent_run(self, prompt: str) -> str:
369
  """Run the agent asynchronously."""
370
  try:
371
  # Create a fresh context for this run to avoid loop conflicts
372
- context = Context(self.agent)
373
- handler = self.agent.run(prompt, ctx=context)
374
-
 
 
 
375
  async for event in handler.stream_events():
376
- if isinstance(event, ToolCallResult):
377
- print(
378
- f"\n-----------\nCode execution result:\n{event.tool_output}"
379
- )
380
- elif isinstance(event, ToolCall):
381
- print(f"\n-----------\nParsed code:\n{event.tool_kwargs['code']}")
 
382
  elif isinstance(event, AgentStream):
383
  print(f"{event.delta}", end="", flush=True)
384
-
385
- return await handler
 
 
 
 
386
  except Exception as e:
387
  print(f"Async agent error: {e}")
388
  return f"FINAL ANSWER: Error in agent processing - {str(e)}"
 
28
  from llama_index.core.agent.workflow import (
29
  ToolCall,
30
  ToolCallResult,
31
+ FunctionAgent,
32
  AgentStream,
33
  )
34
  from llama_index.llms.huggingface import HuggingFaceLLM
35
  from llama_index.core.tools import FunctionTool
36
  from llama_index.tools.duckduckgo import DuckDuckGoSearchToolSpec
37
  from llama_index.tools.wikipedia import WikipediaToolSpec
38
+ from llama_index.tools.tavily_research.base import TavilyToolSpec
39
+ #from llama_index.llms.ollama import Ollama
40
+ from llama_index.llms.bedrock_converse import BedrockConverse
41
+ #from llama_index.llms.openai import OpenAI
42
  LLAMA_INDEX_AVAILABLE = True
43
  except ImportError as e:
44
  print(f"LlamaIndex imports not available: {e}")
 
109
 
110
  # Get Hugging Face token
111
  self.hf_token = os.getenv("HUGGINGFACE_TOKEN")
 
112
  if not self.hf_token:
113
  print("Warning: HUGGINGFACE_TOKEN not found. Using default model.")
114
 
 
119
  self._initialize_tools()
120
 
121
  # Initialize code executor
122
+ #self._initialize_code_executor()
123
 
124
  # Initialize CodeAct Agent
125
  self._initialize_agent()
 
134
  return
135
 
136
  try:
137
+ #self.llm = OpenAI(model="gpt-4o", api_key=os.getenv("OPENAI_API_KEY"))
138
+ #self.llm = Ollama(model="llama3.1:latest", base_url="http://localhost:11434")
139
+ self.llm = BedrockConverse(
140
+ model="amazon.nova-pro-v1:0",
141
+ temperature=0.5,
142
+ aws_access_key_id=os.getenv("AWS_ACCESS_KEY_ID"),
143
+ aws_secret_access_key=os.getenv("AWS_SECRET_ACCESS_KEY"),
144
+ region_name=os.getenv("AWS_REGION"),
145
+
146
+ )
147
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
148
  print("βœ… LLM initialized successfully")
149
  except Exception as e:
150
  print(f"Error initializing LLM: {e}")
 
195
  def calculate_percentage(value: float, percentage: float) -> float:
196
  """Calculate percentage of a value."""
197
  return (value * percentage) / 100
198
+
199
+ def get_modulus(a: float, b: float) -> float:
200
+ """Get the modulus of two numbers."""
201
+ return a % b
202
 
203
  # Create function tools
204
  try:
205
  math_tools = [
206
+ FunctionTool.from_defaults(fn=add_numbers, name="add_numbers", description="Add two numbers together"),
207
+ FunctionTool.from_defaults(fn=subtract_numbers, name="subtract_numbers", description="Subtract second number from first number"),
208
+ FunctionTool.from_defaults(fn=multiply_numbers, name="multiply_numbers", description="Multiply two numbers"),
209
+ FunctionTool.from_defaults(fn=divide_numbers, name="divide_numbers", description="Divide first number by second number"),
210
+ FunctionTool.from_defaults(fn=power_numbers, name="power_numbers", description="Raise first number to the power of second number"),
211
+ FunctionTool.from_defaults(fn=calculate_percentage, name="calculate_percentage", description="Calculate percentage of a value"),
212
+ FunctionTool.from_defaults(fn=get_modulus, name="get_modulus", description="Get the modulus of two numbers"),
213
  ]
214
  self.tools.extend(math_tools)
215
  print("βœ… Math tools initialized")
 
218
 
219
  # Initialize search tools
220
  try:
221
+ # web search
222
+ search_spec = TavilyToolSpec(
223
+ api_key=os.getenv("TAVILY_API_KEY"),
224
+ )
225
+ search_tool = search_spec.to_tool_list()
226
+ self.tools.extend(search_tool)
227
  print("βœ… DuckDuckGo search tool initialized")
228
  except Exception as e:
229
  print(f"Warning: Could not initialize DuckDuckGo tool: {e}")
230
 
231
+ """ try:
232
  # Wikipedia search
233
  wiki_spec = WikipediaToolSpec()
234
+ wiki_tools = FunctionTool.from_defaults(wiki_spec.wikipedia_search, name="wikipedia_search", description="Search Wikipedia for information")
235
  self.tools.extend(wiki_tools)
236
  print("βœ… Wikipedia tool initialized")
237
  except Exception as e:
238
+ print(f"Warning: Could not initialize Wikipedia tool: {e}") """
239
 
240
+ """ try:
241
  # Web requests tool
242
  requests_spec = RequestsToolSpec()
243
  requests_tools = requests_spec.to_tool_list()
244
  self.tools.extend(requests_tools)
245
  print("βœ… Web requests tool initialized")
246
  except Exception as e:
247
+ print(f"Warning: Could not initialize requests tool: {e}") """
248
 
249
  print(f"βœ… Total {len(self.tools)} tools initialized")
250
 
 
294
 
295
  # Store initialization parameters for deferred initialization
296
  self._agent_params = {
297
+ #'code_execute_fn': self.code_executor.execute,
298
  'llm': self.llm,
299
  'tools': self.tools
300
  }
 
315
  pass
316
 
317
  # Create the CodeAct Agent without assuming event loop state
318
+ #self.agent = CodeActAgent(**self._agent_params)
319
+ # Enhanced prompt with specific formatting requirements
320
+ enhanced_prompt = f"""
321
+ You are a helpful assistant tasked with answering questions using a set of tools.
322
+ Now, I will ask you a question. Report your thoughts, and finish your answer with the following template:
323
+ FINAL ANSWER: [YOUR FINAL ANSWER].
324
+ YOUR FINAL ANSWER should be a number OR as few words as possible OR a comma separated list of numbers and/or strings. If you are asked for a number, don't use comma to write your number neither use units such as $ or percent sign unless specified otherwise. If you are asked for a string, don't use articles, neither abbreviations (e.g. for cities), and write the digits in plain text unless specified otherwise. If you are asked for a comma separated list, apply the above rules depending of whether the element to be put in the list is a number or a string.
325
+ Your answer should only start with "FINAL ANSWER: ", then follows with the answer.
326
+ """
327
+
328
+ self.agent = FunctionAgent(
329
+ tools=self.tools,
330
+ llm=self.llm,
331
+ system_prompt=enhanced_prompt,
332
+ )
333
  print("βœ… CodeAct Agent initialized (deferred)")
334
 
335
  except Exception as e:
 
347
  # Ensure agent is initialized (for deferred initialization)
348
  self._ensure_agent_initialized()
349
 
350
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
351
  if self.agent:
352
  try:
353
  # Use the CodeAct agent for advanced reasoning
354
+ response = await self._async_agent_run(question)
355
  return response
356
  except Exception as e:
357
  print(f"Error with CodeAct agent: {e}")
 
360
  return "FINAL ANSWER: Agent not properly initialized"
361
 
362
 
363
+ async def _async_agent_run(self, question: str) -> str:
364
  """Run the agent asynchronously."""
365
  try:
366
  # Create a fresh context for this run to avoid loop conflicts
367
+ #context = Context(self.agent)
368
+ print("Agent running...")
369
+ print(self.agent)
370
+ handler = self.agent.run(question)
371
+ #return str(handler)
372
+ iterationsNumber = 0
373
  async for event in handler.stream_events():
374
+ iterationsNumber += 1
375
+ # if isinstance(event, ToolCallResult):
376
+ # print(
377
+ # f"\n-----------\nCode execution result:\n{event.tool_output}"
378
+ # )
379
+ if isinstance(event, ToolCall):
380
+ print(f"\n-----------\nevent:\n{event}")
381
  elif isinstance(event, AgentStream):
382
  print(f"{event.delta}", end="", flush=True)
383
+ """ if iterationsNumber > 5:
384
+ print("Too many iterations, stopping...")
385
+ break """
386
+ response = await handler
387
+ print(f'response.response: {response.response.content}')
388
+ return response.response.content.split("FINAL ANSWER: ")[1]
389
  except Exception as e:
390
  print(f"Async agent error: {e}")
391
  return f"FINAL ANSWER: Error in agent processing - {str(e)}"
app.py CHANGED
@@ -75,15 +75,19 @@ async def run_and_submit_all(profile: Optional[gr.OAuthProfile]):
75
  results_log = []
76
  answers_payload = []
77
  print(f"Running agent on {len(questions_data)} questions...")
78
- questions_data = questions_data[:1]
79
  for item in questions_data:
80
  task_id = item.get("task_id")
81
  question_text = item.get("question")
 
82
  if not task_id or question_text is None:
83
  print(f"Skipping item with missing task_id or question: {item}")
84
  continue
85
  try:
86
- submitted_answer = await agent(question_text)
 
 
 
87
  answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
88
  results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer})
89
  except Exception as e:
 
75
  results_log = []
76
  answers_payload = []
77
  print(f"Running agent on {len(questions_data)} questions...")
78
+ #questions_data = questions_data[:5]
79
  for item in questions_data:
80
  task_id = item.get("task_id")
81
  question_text = item.get("question")
82
+ print(f"Running agent on question: {item}")
83
  if not task_id or question_text is None:
84
  print(f"Skipping item with missing task_id or question: {item}")
85
  continue
86
  try:
87
+ if(item.get("file_name") != ""):
88
+ submitted_answer = "N.D."
89
+ else:
90
+ submitted_answer = await agent(question_text)
91
  answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
92
  results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer})
93
  except Exception as e: