Delete reasoning_system.py
Browse files- reasoning_system.py +0 -668
reasoning_system.py
DELETED
@@ -1,668 +0,0 @@
|
|
1 |
-
"""
|
2 |
-
Reasoning System for GAIA-Ready AI Agent
|
3 |
-
|
4 |
-
This module provides advanced reasoning capabilities for the AI agent,
|
5 |
-
implementing the ReAct approach (Reasoning + Acting) and supporting
|
6 |
-
the Think-Act-Observe workflow.
|
7 |
-
"""
|
8 |
-
|
9 |
-
import os
|
10 |
-
import json
|
11 |
-
from typing import List, Dict, Any, Optional, Union, Tuple
|
12 |
-
from datetime import datetime
|
13 |
-
import traceback
|
14 |
-
import re
|
15 |
-
|
16 |
-
try:
|
17 |
-
from smolagents import Agent, InferenceClientModel, Tool
|
18 |
-
except ImportError:
|
19 |
-
import subprocess
|
20 |
-
subprocess.check_call(["pip", "install", "smolagents"])
|
21 |
-
from smolagents import Agent, InferenceClientModel, Tool
|
22 |
-
|
23 |
-
|
24 |
-
class ReasoningSystem:
|
25 |
-
"""
|
26 |
-
Advanced reasoning system implementing the ReAct approach
|
27 |
-
and supporting the Think-Act-Observe workflow.
|
28 |
-
"""
|
29 |
-
def __init__(self, agent, memory_manager):
|
30 |
-
self.agent = agent
|
31 |
-
self.memory_manager = memory_manager
|
32 |
-
self.max_reasoning_depth = 5
|
33 |
-
self.reasoning_templates = self._load_reasoning_templates()
|
34 |
-
|
35 |
-
def _load_reasoning_templates(self) -> Dict[str, str]:
|
36 |
-
"""Load reasoning templates for different stages of the workflow"""
|
37 |
-
return {
|
38 |
-
"think": """
|
39 |
-
# Task Analysis and Planning
|
40 |
-
|
41 |
-
## Task
|
42 |
-
{query}
|
43 |
-
|
44 |
-
## Relevant Context
|
45 |
-
{context}
|
46 |
-
|
47 |
-
## Analysis
|
48 |
-
Let me analyze this task step by step:
|
49 |
-
1. What is being asked?
|
50 |
-
2. What information do I need?
|
51 |
-
3. What challenges might I encounter?
|
52 |
-
|
53 |
-
## Plan
|
54 |
-
Based on my analysis, here's my plan:
|
55 |
-
1. [First step]
|
56 |
-
2. [Second step]
|
57 |
-
3. [Third step]
|
58 |
-
...
|
59 |
-
|
60 |
-
## Tools Needed
|
61 |
-
To accomplish this task, I'll need:
|
62 |
-
- [Tool 1]: For [purpose]
|
63 |
-
- [Tool 2]: For [purpose]
|
64 |
-
...
|
65 |
-
|
66 |
-
## Expected Outcome
|
67 |
-
If successful, I expect to:
|
68 |
-
[Description of expected outcome]
|
69 |
-
""",
|
70 |
-
"act": """
|
71 |
-
# Action Execution
|
72 |
-
|
73 |
-
## Current Task
|
74 |
-
{query}
|
75 |
-
|
76 |
-
## Current Plan
|
77 |
-
{plan}
|
78 |
-
|
79 |
-
## Previous Results
|
80 |
-
{previous_results}
|
81 |
-
|
82 |
-
## Next Action
|
83 |
-
Based on my plan and previous results, I'll now:
|
84 |
-
1. Use the [tool name] tool
|
85 |
-
2. With parameters: [parameters]
|
86 |
-
3. Purpose: [why this action is needed]
|
87 |
-
|
88 |
-
## Execution
|
89 |
-
[Detailed description of how I'll execute this action]
|
90 |
-
""",
|
91 |
-
"observe": """
|
92 |
-
# Result Analysis
|
93 |
-
|
94 |
-
## Current Task
|
95 |
-
{query}
|
96 |
-
|
97 |
-
## Action Taken
|
98 |
-
{action}
|
99 |
-
|
100 |
-
## Results Obtained
|
101 |
-
{results}
|
102 |
-
|
103 |
-
## Analysis
|
104 |
-
Let me analyze these results:
|
105 |
-
1. What did I learn?
|
106 |
-
2. Does this answer the original question?
|
107 |
-
3. Are there any inconsistencies or gaps?
|
108 |
-
|
109 |
-
## Next Steps
|
110 |
-
Based on my analysis:
|
111 |
-
- [Next step recommendation]
|
112 |
-
- [Alternative approach if needed]
|
113 |
-
|
114 |
-
## Progress Assessment
|
115 |
-
Task completion status: [percentage]%
|
116 |
-
[Explanation of current progress]
|
117 |
-
"""
|
118 |
-
}
|
119 |
-
|
120 |
-
def think(self, query: str) -> Dict[str, Any]:
|
121 |
-
"""
|
122 |
-
Analyze the task and plan an approach (Think phase)
|
123 |
-
|
124 |
-
Args:
|
125 |
-
query: The user's query or task
|
126 |
-
|
127 |
-
Returns:
|
128 |
-
Dictionary containing analysis and plan
|
129 |
-
"""
|
130 |
-
# Retrieve relevant memories
|
131 |
-
relevant_memories = self.memory_manager.get_relevant_memories(query)
|
132 |
-
|
133 |
-
# Format context from relevant memories
|
134 |
-
context = ""
|
135 |
-
if relevant_memories:
|
136 |
-
context_items = []
|
137 |
-
for memory in relevant_memories:
|
138 |
-
memory_type = memory.get("type", "general")
|
139 |
-
content = memory.get("content", "")
|
140 |
-
relevance = memory.get("relevance_score", 0)
|
141 |
-
context_items.append(f"- [{memory_type.upper()}] (Relevance: {relevance:.2f}): {content}")
|
142 |
-
context = "\n".join(context_items)
|
143 |
-
else:
|
144 |
-
context = "No relevant prior knowledge found."
|
145 |
-
|
146 |
-
# Apply the thinking template
|
147 |
-
thinking_template = self.reasoning_templates["think"]
|
148 |
-
thinking_prompt = thinking_template.format(
|
149 |
-
query=query,
|
150 |
-
context=context
|
151 |
-
)
|
152 |
-
|
153 |
-
# Use the agent to generate a plan
|
154 |
-
try:
|
155 |
-
response = self.agent.chat(thinking_prompt)
|
156 |
-
|
157 |
-
# Store the thinking in memory
|
158 |
-
self.memory_manager.add_to_short_term({
|
159 |
-
"type": "thinking",
|
160 |
-
"content": response,
|
161 |
-
"timestamp": datetime.now().isoformat()
|
162 |
-
})
|
163 |
-
|
164 |
-
# Parse the response to extract structured information
|
165 |
-
analysis = self._extract_section(response, "Analysis")
|
166 |
-
plan = self._extract_section(response, "Plan")
|
167 |
-
tools_needed = self._extract_section(response, "Tools Needed")
|
168 |
-
expected_outcome = self._extract_section(response, "Expected Outcome")
|
169 |
-
|
170 |
-
return {
|
171 |
-
"raw_response": response,
|
172 |
-
"analysis": analysis,
|
173 |
-
"plan": plan,
|
174 |
-
"tools_needed": tools_needed,
|
175 |
-
"expected_outcome": expected_outcome
|
176 |
-
}
|
177 |
-
except Exception as e:
|
178 |
-
error_msg = f"Error during thinking phase: {str(e)}\n{traceback.format_exc()}"
|
179 |
-
print(error_msg)
|
180 |
-
|
181 |
-
# Store the error in memory
|
182 |
-
self.memory_manager.add_to_short_term({
|
183 |
-
"type": "error",
|
184 |
-
"content": error_msg,
|
185 |
-
"timestamp": datetime.now().isoformat()
|
186 |
-
})
|
187 |
-
|
188 |
-
# Return a basic plan despite the error
|
189 |
-
return {
|
190 |
-
"raw_response": "Error occurred during thinking phase.",
|
191 |
-
"analysis": "Could not analyze the task due to an error.",
|
192 |
-
"plan": "1. Try a simpler approach\n2. Break down the task into smaller steps",
|
193 |
-
"tools_needed": "web_search: To find basic information",
|
194 |
-
"expected_outcome": "Partial answer to the query"
|
195 |
-
}
|
196 |
-
|
197 |
-
def act(self, plan: Dict[str, Any], query: str, previous_results: str = "") -> Dict[str, Any]:
|
198 |
-
"""
|
199 |
-
Execute actions based on the plan (Act phase)
|
200 |
-
|
201 |
-
Args:
|
202 |
-
plan: The plan generated by the think step
|
203 |
-
query: The original query
|
204 |
-
previous_results: Results from previous actions
|
205 |
-
|
206 |
-
Returns:
|
207 |
-
Dictionary containing action details and results
|
208 |
-
"""
|
209 |
-
# Apply the action template
|
210 |
-
action_template = self.reasoning_templates["act"]
|
211 |
-
action_prompt = action_template.format(
|
212 |
-
query=query,
|
213 |
-
plan=plan.get("plan", "No plan available."),
|
214 |
-
previous_results=previous_results if previous_results else "No previous results."
|
215 |
-
)
|
216 |
-
|
217 |
-
try:
|
218 |
-
# Use the agent to determine the next action
|
219 |
-
action_response = self.agent.chat(action_prompt)
|
220 |
-
|
221 |
-
# Store the action planning in memory
|
222 |
-
self.memory_manager.add_to_short_term({
|
223 |
-
"type": "action_planning",
|
224 |
-
"content": action_response,
|
225 |
-
"timestamp": datetime.now().isoformat()
|
226 |
-
})
|
227 |
-
|
228 |
-
# Parse the action response to extract tool and parameters
|
229 |
-
tool_info = self._extract_tool_info(action_response)
|
230 |
-
|
231 |
-
if not tool_info:
|
232 |
-
# If no tool was identified, try a more direct approach
|
233 |
-
direct_prompt = f"""
|
234 |
-
Based on the task "{query}" and the plan:
|
235 |
-
{plan.get('plan', 'No plan available.')}
|
236 |
-
|
237 |
-
Which specific tool should I use next and with what parameters?
|
238 |
-
Respond in this format:
|
239 |
-
TOOL: [tool name]
|
240 |
-
PARAMETERS: [parameter1=value1, parameter2=value2, ...]
|
241 |
-
"""
|
242 |
-
direct_response = self.agent.chat(direct_prompt)
|
243 |
-
tool_info = self._extract_tool_info(direct_response)
|
244 |
-
|
245 |
-
if tool_info:
|
246 |
-
tool_name = tool_info["tool"]
|
247 |
-
tool_params = tool_info["parameters"]
|
248 |
-
|
249 |
-
# Find the matching tool
|
250 |
-
matching_tool = None
|
251 |
-
for tool in self.agent.tools:
|
252 |
-
if tool.name == tool_name:
|
253 |
-
matching_tool = tool
|
254 |
-
break
|
255 |
-
|
256 |
-
if matching_tool:
|
257 |
-
# Execute the tool
|
258 |
-
try:
|
259 |
-
if isinstance(tool_params, dict):
|
260 |
-
result = matching_tool.function(**tool_params)
|
261 |
-
else:
|
262 |
-
result = matching_tool.function(tool_params)
|
263 |
-
|
264 |
-
# Store the successful action result in memory
|
265 |
-
self.memory_manager.add_to_short_term({
|
266 |
-
"type": "action_result",
|
267 |
-
"content": f"Tool: {tool_name}\nParameters: {tool_params}\nResult: {result}",
|
268 |
-
"timestamp": datetime.now().isoformat()
|
269 |
-
})
|
270 |
-
|
271 |
-
return {
|
272 |
-
"tool": tool_name,
|
273 |
-
"parameters": tool_params,
|
274 |
-
"result": result,
|
275 |
-
"success": True,
|
276 |
-
"error": None
|
277 |
-
}
|
278 |
-
except Exception as e:
|
279 |
-
error_msg = f"Error executing tool {tool_name}: {str(e)}\n{traceback.format_exc()}"
|
280 |
-
print(error_msg)
|
281 |
-
|
282 |
-
# Store the error in memory
|
283 |
-
self.memory_manager.add_to_short_term({
|
284 |
-
"type": "error",
|
285 |
-
"content": error_msg,
|
286 |
-
"timestamp": datetime.now().isoformat()
|
287 |
-
})
|
288 |
-
|
289 |
-
return {
|
290 |
-
"tool": tool_name,
|
291 |
-
"parameters": tool_params,
|
292 |
-
"result": f"Error: {str(e)}",
|
293 |
-
"success": False,
|
294 |
-
"error": str(e)
|
295 |
-
}
|
296 |
-
else:
|
297 |
-
error_msg = f"Tool '{tool_name}' not found."
|
298 |
-
print(error_msg)
|
299 |
-
|
300 |
-
# Store the error in memory
|
301 |
-
self.memory_manager.add_to_short_term({
|
302 |
-
"type": "error",
|
303 |
-
"content": error_msg,
|
304 |
-
"timestamp": datetime.now().isoformat()
|
305 |
-
})
|
306 |
-
|
307 |
-
return {
|
308 |
-
"tool": tool_name,
|
309 |
-
"parameters": tool_params,
|
310 |
-
"result": f"Error: Tool '{tool_name}' not found.",
|
311 |
-
"success": False,
|
312 |
-
"error": "Tool not found"
|
313 |
-
}
|
314 |
-
else:
|
315 |
-
error_msg = "Could not determine which tool to use."
|
316 |
-
print(error_msg)
|
317 |
-
|
318 |
-
# Store the error in memory
|
319 |
-
self.memory_manager.add_to_short_term({
|
320 |
-
"type": "error",
|
321 |
-
"content": error_msg,
|
322 |
-
"timestamp": datetime.now().isoformat()
|
323 |
-
})
|
324 |
-
|
325 |
-
# Default to web search as a fallback
|
326 |
-
try:
|
327 |
-
web_search_tool = None
|
328 |
-
for tool in self.agent.tools:
|
329 |
-
if tool.name == "web_search":
|
330 |
-
web_search_tool = tool
|
331 |
-
break
|
332 |
-
|
333 |
-
if web_search_tool:
|
334 |
-
result = web_search_tool.function(query)
|
335 |
-
return {
|
336 |
-
"tool": "web_search",
|
337 |
-
"parameters": query,
|
338 |
-
"result": result,
|
339 |
-
"success": True,
|
340 |
-
"error": None,
|
341 |
-
"fallback": True
|
342 |
-
}
|
343 |
-
else:
|
344 |
-
return {
|
345 |
-
"tool": "none",
|
346 |
-
"parameters": "none",
|
347 |
-
"result": "Could not determine which tool to use and web_search fallback not available.",
|
348 |
-
"success": False,
|
349 |
-
"error": "No tool selected"
|
350 |
-
}
|
351 |
-
except Exception as e:
|
352 |
-
return {
|
353 |
-
"tool": "web_search",
|
354 |
-
"parameters": query,
|
355 |
-
"result": f"Error in fallback web search: {str(e)}",
|
356 |
-
"success": False,
|
357 |
-
"error": str(e),
|
358 |
-
"fallback": True
|
359 |
-
}
|
360 |
-
except Exception as e:
|
361 |
-
error_msg = f"Error during action phase: {str(e)}\n{traceback.format_exc()}"
|
362 |
-
print(error_msg)
|
363 |
-
|
364 |
-
# Store the error in memory
|
365 |
-
self.memory_manager.add_to_short_term({
|
366 |
-
"type": "error",
|
367 |
-
"content": error_msg,
|
368 |
-
"timestamp": datetime.now().isoformat()
|
369 |
-
})
|
370 |
-
|
371 |
-
return {
|
372 |
-
"tool": "none",
|
373 |
-
"parameters": "none",
|
374 |
-
"result": f"Error during action planning: {str(e)}",
|
375 |
-
"success": False,
|
376 |
-
"error": str(e)
|
377 |
-
}
|
378 |
-
|
379 |
-
def observe(self, action_result: Dict[str, Any], plan: Dict[str, Any], query: str) -> Dict[str, Any]:
|
380 |
-
"""
|
381 |
-
Analyze the results of actions and determine next steps (Observe phase)
|
382 |
-
|
383 |
-
Args:
|
384 |
-
action_result: Results from the act step
|
385 |
-
plan: The original plan
|
386 |
-
query: The original query
|
387 |
-
|
388 |
-
Returns:
|
389 |
-
Dictionary containing observation and next steps
|
390 |
-
"""
|
391 |
-
# Apply the observation template
|
392 |
-
observation_template = self.reasoning_templates["observe"]
|
393 |
-
observation_prompt = observation_template.format(
|
394 |
-
query=query,
|
395 |
-
action=f"Tool: {action_result.get('tool', 'none')}\nParameters: {action_result.get('parameters', 'none')}",
|
396 |
-
results=action_result.get('result', 'No results.')
|
397 |
-
)
|
398 |
-
|
399 |
-
try:
|
400 |
-
# Use the agent to analyze the results
|
401 |
-
observation_response = self.agent.chat(observation_prompt)
|
402 |
-
|
403 |
-
# Store the observation in memory
|
404 |
-
self.memory_manager.add_to_short_term({
|
405 |
-
"type": "observation",
|
406 |
-
"content": observation_response,
|
407 |
-
"timestamp": datetime.now().isoformat()
|
408 |
-
})
|
409 |
-
|
410 |
-
# Parse the observation to extract structured information
|
411 |
-
analysis = self._extract_section(observation_response, "Analysis")
|
412 |
-
next_steps = self._extract_section(observation_response, "Next Steps")
|
413 |
-
progress = self._extract_section(observation_response, "Progress Assessment")
|
414 |
-
|
415 |
-
# Determine if we need to continue with more actions
|
416 |
-
continue_execution = True
|
417 |
-
|
418 |
-
# Check for completion indicators
|
419 |
-
completion_phrases = [
|
420 |
-
"task complete", "question answered", "fully answered",
|
421 |
-
"100%", "task is complete", "fully resolved"
|
422 |
-
]
|
423 |
-
|
424 |
-
if any(phrase in observation_response.lower() for phrase in completion_phrases):
|
425 |
-
continue_execution = False
|
426 |
-
|
427 |
-
# Store the final answer in long-term memory
|
428 |
-
self.memory_manager.add_to_long_term({
|
429 |
-
"type": "final_answer",
|
430 |
-
"query": query,
|
431 |
-
"content": observation_response,
|
432 |
-
"timestamp": datetime.now().isoformat(),
|
433 |
-
"importance": 0.8 # High importance for final answers
|
434 |
-
})
|
435 |
-
|
436 |
-
return {
|
437 |
-
"raw_response": observation_response,
|
438 |
-
"analysis": analysis,
|
439 |
-
"next_steps": next_steps,
|
440 |
-
"progress": progress,
|
441 |
-
"continue": continue_execution
|
442 |
-
}
|
443 |
-
except Exception as e:
|
444 |
-
error_msg = f"Error during observation phase: {str(e)}\n{traceback.format_exc()}"
|
445 |
-
print(error_msg)
|
446 |
-
|
447 |
-
# Store the error in memory
|
448 |
-
self.memory_manager.add_to_short_term({
|
449 |
-
"type": "error",
|
450 |
-
"content": error_msg,
|
451 |
-
"timestamp": datetime.now().isoformat()
|
452 |
-
})
|
453 |
-
|
454 |
-
# Default observation with continuation
|
455 |
-
return {
|
456 |
-
"raw_response": f"Error occurred during observation phase: {str(e)}",
|
457 |
-
"analysis": "Could not analyze the results due to an error.",
|
458 |
-
"next_steps": "Try a different approach or tool.",
|
459 |
-
"progress": "Unknown due to error.",
|
460 |
-
"continue": True # Continue by default on error
|
461 |
-
}
|
462 |
-
|
463 |
-
def _extract_section(self, text: str, section_name: str) -> str:
|
464 |
-
"""Extract a section from the response text"""
|
465 |
-
pattern = rf"(?:^|\n)(?:#+\s*{re.escape(section_name)}:?|\*\*{re.escape(section_name)}:?\*\*|{re.escape(section_name)}:?)\s*(.*?)(?:\n(?:#+\s*|$)|\Z)"
|
466 |
-
match = re.search(pattern, text, re.DOTALL | re.IGNORECASE)
|
467 |
-
|
468 |
-
if match:
|
469 |
-
content = match.group(1).strip()
|
470 |
-
return content
|
471 |
-
|
472 |
-
# Try a more lenient approach if the first one fails
|
473 |
-
pattern = rf"{re.escape(section_name)}:?\s*(.*?)(?:\n\n|\n[A-Z]|\Z)"
|
474 |
-
match = re.search(pattern, text, re.DOTALL | re.IGNORECASE)
|
475 |
-
|
476 |
-
if match:
|
477 |
-
content = match.group(1).strip()
|
478 |
-
return content
|
479 |
-
|
480 |
-
return f"No {section_name.lower()} found."
|
481 |
-
|
482 |
-
def _extract_tool_info(self, text: str) -> Optional[Dict[str, Any]]:
|
483 |
-
"""Extract tool name and parameters from the response text"""
|
484 |
-
# Try to find tool name
|
485 |
-
tool_pattern = r"(?:TOOL|Tool|tool):\s*(\w+)"
|
486 |
-
tool_match = re.search(tool_pattern, text)
|
487 |
-
|
488 |
-
if not tool_match:
|
489 |
-
return None
|
490 |
-
|
491 |
-
tool_name = tool_match.group(1).strip()
|
492 |
-
|
493 |
-
# Try to find parameters
|
494 |
-
params_pattern = r"(?:PARAMETERS|Parameters|parameters):\s*(.*?)(?:\n\n|\n[A-Z]|\Z)"
|
495 |
-
params_match = re.search(params_pattern, text, re.DOTALL)
|
496 |
-
|
497 |
-
if params_match:
|
498 |
-
params_text = params_match.group(1).strip()
|
499 |
-
|
500 |
-
# Check if parameters are in key=value format
|
501 |
-
if "=" in params_text:
|
502 |
-
# Parse as dictionary
|
503 |
-
params_dict = {}
|
504 |
-
param_pairs = re.findall(r"(\w+)\s*=\s*([^,\n]+)", params_text)
|
505 |
-
|
506 |
-
for key, value in param_pairs:
|
507 |
-
params_dict[key.strip()] = value.strip()
|
508 |
-
|
509 |
-
return {
|
510 |
-
"tool": tool_name,
|
511 |
-
"parameters": params_dict
|
512 |
-
}
|
513 |
-
else:
|
514 |
-
# Treat as a single string parameter
|
515 |
-
return {
|
516 |
-
"tool": tool_name,
|
517 |
-
"parameters": params_text
|
518 |
-
}
|
519 |
-
else:
|
520 |
-
# No parameters found, use empty dict
|
521 |
-
return {
|
522 |
-
"tool": tool_name,
|
523 |
-
"parameters": {}
|
524 |
-
}
|
525 |
-
|
526 |
-
def execute_reasoning_cycle(self, query: str, max_iterations: int = 5) -> str:
|
527 |
-
"""
|
528 |
-
Execute a complete Think-Act-Observe reasoning cycle
|
529 |
-
|
530 |
-
Args:
|
531 |
-
query: The user's query or task
|
532 |
-
max_iterations: Maximum number of iterations
|
533 |
-
|
534 |
-
Returns:
|
535 |
-
Final answer to the query
|
536 |
-
"""
|
537 |
-
# Store the query in memory
|
538 |
-
self.memory_manager.add_to_short_term({
|
539 |
-
"type": "query",
|
540 |
-
"content": query,
|
541 |
-
"timestamp": datetime.now().isoformat()
|
542 |
-
})
|
543 |
-
|
544 |
-
# Initialize the workflow
|
545 |
-
iteration = 0
|
546 |
-
final_answer = None
|
547 |
-
all_results = []
|
548 |
-
|
549 |
-
while iteration < max_iterations:
|
550 |
-
print(f"Iteration {iteration + 1}/{max_iterations}")
|
551 |
-
|
552 |
-
# Think
|
553 |
-
print("Thinking...")
|
554 |
-
plan = self.think(query)
|
555 |
-
|
556 |
-
# Act
|
557 |
-
print("Acting...")
|
558 |
-
previous_results = "\n".join([r.get("result", "") for r in all_results])
|
559 |
-
action_result = self.act(plan, query, previous_results)
|
560 |
-
all_results.append(action_result)
|
561 |
-
|
562 |
-
# Observe
|
563 |
-
print("Observing...")
|
564 |
-
observation = self.observe(action_result, plan, query)
|
565 |
-
|
566 |
-
# Check if we have a final answer
|
567 |
-
if not observation["continue"]:
|
568 |
-
# Generate final answer
|
569 |
-
final_answer_prompt = f"""
|
570 |
-
TASK: {query}
|
571 |
-
|
572 |
-
REASONING PROCESS:
|
573 |
-
{plan.get('raw_response', 'No thinking process available.')}
|
574 |
-
|
575 |
-
ACTIONS TAKEN:
|
576 |
-
{', '.join([f"{r.get('tool', 'unknown')}({r.get('parameters', '')})" for r in all_results])}
|
577 |
-
|
578 |
-
RESULTS:
|
579 |
-
{previous_results}
|
580 |
-
{action_result.get('result', '')}
|
581 |
-
|
582 |
-
OBSERVATION:
|
583 |
-
{observation.get('raw_response', 'No observation available.')}
|
584 |
-
|
585 |
-
Based on all the above, provide a comprehensive final answer to the original task.
|
586 |
-
"""
|
587 |
-
final_answer = self.agent.chat(final_answer_prompt)
|
588 |
-
|
589 |
-
# Store the final answer in long-term memory
|
590 |
-
self.memory_manager.add_to_long_term({
|
591 |
-
"type": "final_answer",
|
592 |
-
"query": query,
|
593 |
-
"content": final_answer,
|
594 |
-
"timestamp": datetime.now().isoformat(),
|
595 |
-
"importance": 0.9 # Very high importance
|
596 |
-
})
|
597 |
-
|
598 |
-
break
|
599 |
-
|
600 |
-
# Update the query with the observation for the next iteration
|
601 |
-
query = f"""
|
602 |
-
Original task: {query}
|
603 |
-
|
604 |
-
Progress so far:
|
605 |
-
{observation.get('raw_response', 'No observation available.')}
|
606 |
-
|
607 |
-
Please continue solving this task.
|
608 |
-
"""
|
609 |
-
|
610 |
-
iteration += 1
|
611 |
-
|
612 |
-
# If we reached max iterations without a final answer
|
613 |
-
if final_answer is None:
|
614 |
-
final_answer = f"""
|
615 |
-
I've spent {max_iterations} iterations trying to solve this task.
|
616 |
-
Here's my best answer based on what I've learned:
|
617 |
-
|
618 |
-
{observation.get('raw_response', 'No final observation available.')}
|
619 |
-
|
620 |
-
Note: This answer may be incomplete as I reached the maximum number of iterations.
|
621 |
-
"""
|
622 |
-
|
623 |
-
# Store the partial answer in long-term memory
|
624 |
-
self.memory_manager.add_to_long_term({
|
625 |
-
"type": "partial_answer",
|
626 |
-
"query": query,
|
627 |
-
"content": final_answer,
|
628 |
-
"timestamp": datetime.now().isoformat(),
|
629 |
-
"importance": 0.6 # Medium importance for partial answers
|
630 |
-
})
|
631 |
-
|
632 |
-
return final_answer
|
633 |
-
|
634 |
-
|
635 |
-
# Example usage
|
636 |
-
if __name__ == "__main__":
|
637 |
-
# This would be imported from your agent.py
|
638 |
-
from smolagents import Agent, InferenceClientModel, Tool
|
639 |
-
|
640 |
-
# Mock agent for testing
|
641 |
-
class MockAgent:
|
642 |
-
def __init__(self):
|
643 |
-
self.tools = [
|
644 |
-
Tool(name="web_search", description="Search the web", function=lambda x: f"Search results for: {x}"),
|
645 |
-
Tool(name="calculator", description="Calculate", function=lambda x: f"Result: {eval(x)}")
|
646 |
-
]
|
647 |
-
|
648 |
-
def chat(self, message):
|
649 |
-
return f"Response to: {message[:50]}..."
|
650 |
-
|
651 |
-
# Mock memory manager
|
652 |
-
class MockMemoryManager:
|
653 |
-
def add_to_short_term(self, item):
|
654 |
-
print(f"Added to short-term: {item['type']}")
|
655 |
-
|
656 |
-
def add_to_long_term(self, item):
|
657 |
-
print(f"Added to long-term: {item['type']}")
|
658 |
-
|
659 |
-
def get_relevant_memories(self, query):
|
660 |
-
return []
|
661 |
-
|
662 |
-
# Test the reasoning system
|
663 |
-
agent = MockAgent()
|
664 |
-
memory_manager = MockMemoryManager()
|
665 |
-
reasoning = ReasoningSystem(agent, memory_manager)
|
666 |
-
|
667 |
-
result = reasoning.execute_reasoning_cycle("What is 2+2?")
|
668 |
-
print(f"\nFinal result: {result}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|