Freddolin commited on
Commit
af82781
·
verified ·
1 Parent(s): a3f556b

Update agent.py

Browse files
Files changed (1) hide show
  1. agent.py +35 -14
agent.py CHANGED
@@ -63,7 +63,7 @@ class GaiaAgent:
63
 
64
  **Thought:** First, carefully consider the task. What information do you need to answer the question? Do you need to use a tool?
65
  **Tool:** If you need to search, use the search_tavily tool. The format is: <TOOL_CODE>search_tavily("your search query")</TOOL_CODE>
66
- **Observation:** After a tool call, you will receive an observation (the tool's output).
67
  **Answer:** Once you have gathered all necessary information, provide your final, concise answer directly.
68
 
69
  Your available tools:
@@ -82,11 +82,11 @@ class GaiaAgent:
82
  """
83
 
84
  max_iterations = 3
85
- current_response = ""
86
 
87
  for i in range(max_iterations):
88
  # Lägg till "Thought:" här för att uppmuntra modellen att starta sin tankeprocess
89
- full_prompt = prompt + current_response + "\n\nThought:"
90
 
91
  print(f"[{i+1}/{max_iterations}] Generating response with prompt length: {len(full_prompt)}")
92
 
@@ -97,9 +97,10 @@ class GaiaAgent:
97
  pad_token_id=self.tokenizer.eos_token_id,
98
  do_sample=True,
99
  top_k=50, top_p=0.95,
100
- temperature=0.7 # Justera temperaturen till 0.7
101
  )[0]['generated_text']
102
 
 
103
  new_content = generated_text[len(full_prompt):].strip()
104
  print(f"DEBUG - Full generated_text: \n---START---\n{generated_text}\n---END---")
105
  print(f"DEBUG - Extracted new_content: '{new_content}'")
@@ -108,12 +109,26 @@ class GaiaAgent:
108
  if "Answer:" in new_content:
109
  final_answer = new_content.split("Answer:", 1)[1].strip()
110
  print(f"Final answer from model:\n{final_answer}")
111
- return final_answer
 
112
  elif "<TOOL_CODE>" in new_content and "</TOOL_CODE>" in new_content:
113
- start_index = new_content.find("<TOOL_CODE>") + len("<TOOL_CODE>")
114
- end_index = new_content.find("</TOOL_CODE>")
115
- tool_call_str = new_content[start_index:end_index].strip()
 
 
 
116
 
 
 
 
 
 
 
 
 
 
 
117
  print(f"Tool call detected: {tool_call_str}")
118
 
119
  try:
@@ -121,18 +136,24 @@ class GaiaAgent:
121
  query = tool_call_str[len("search_tavily("):-1].strip().strip('"').strip("'")
122
  tool_output = search_tavily(query)
123
  print(f"Tool result: {tool_output[:200]}...")
124
- current_response += f"\n\nObservation: {tool_output}\n"
 
 
125
  else:
126
  tool_output = f"Unknown tool: {tool_call_str}"
127
  print(f"Error: {tool_output}")
128
- current_response += f"\n\nObservation: {tool_output}\n"
129
  except Exception as tool_e:
130
  tool_output = f"Error running tool {tool_call_str}: {tool_e}"
131
  print(f"Error: {tool_output}")
132
- current_response += f"\n\nObservation: {tool_output}\n"
133
  else:
134
- # Om modellen varken ger svar eller verktygskall men genererar något annat
135
- current_response += f"\n\n{new_content}\n"
 
136
  print(f"Model generated non-tool/non-answer content. Appending: {new_content[:100]}...")
137
 
138
- return "Agent could not complete the task within the allowed iterations. Latest response: " + new_content.strip() if new_content else "Agent could not complete the task within the allowed iterations. No meaningful content generated."
 
 
 
 
63
 
64
  **Thought:** First, carefully consider the task. What information do you need to answer the question? Do you need to use a tool?
65
  **Tool:** If you need to search, use the search_tavily tool. The format is: <TOOL_CODE>search_tavily("your search query")</TOOL_CODE>
66
+ **Observation:** After a tool call, you will receive an observation (the tool's output). This is factual information.
67
  **Answer:** Once you have gathered all necessary information, provide your final, concise answer directly.
68
 
69
  Your available tools:
 
82
  """
83
 
84
  max_iterations = 3
85
+ current_response_history = "" # Ny variabel för att bygga upp historiken
86
 
87
  for i in range(max_iterations):
88
  # Lägg till "Thought:" här för att uppmuntra modellen att starta sin tankeprocess
89
+ full_prompt = prompt + current_response_history + "\n\nThought:"
90
 
91
  print(f"[{i+1}/{max_iterations}] Generating response with prompt length: {len(full_prompt)}")
92
 
 
97
  pad_token_id=self.tokenizer.eos_token_id,
98
  do_sample=True,
99
  top_k=50, top_p=0.95,
100
+ temperature=0.7
101
  )[0]['generated_text']
102
 
103
+ # Extrahera endast den nya delen av texten (modellens respons efter den sista "Thought:")
104
  new_content = generated_text[len(full_prompt):].strip()
105
  print(f"DEBUG - Full generated_text: \n---START---\n{generated_text}\n---END---")
106
  print(f"DEBUG - Extracted new_content: '{new_content}'")
 
109
  if "Answer:" in new_content:
110
  final_answer = new_content.split("Answer:", 1)[1].strip()
111
  print(f"Final answer from model:\n{final_answer}")
112
+ return final_answer # Returnera det slutgiltiga svaret
113
+
114
  elif "<TOOL_CODE>" in new_content and "</TOOL_CODE>" in new_content:
115
+ # Modellen genererade ett verktygskall.
116
+ # Vi vill inte inkludera modellens egna "Observation:" eller "Tool:"-text i historiken
117
+ # innan verktyget faktiskt körts. Vi tar bara själva tool_code strängen.
118
+
119
+ tool_call_start = new_content.find("<TOOL_CODE>")
120
+ tool_call_end = new_content.find("</TOOL_CODE>") + len("</TOOL_CODE>")
121
 
122
+ # Försök att extrahera tanken som ledde till verktygskallet
123
+ thought_part = ""
124
+ if "Thought:" in new_content[:tool_call_start]:
125
+ thought_part = new_content.split("Thought:", 1)[1].split("Tool:", 1)[0].strip()
126
+ elif tool_call_start > 0: # Om det finns text före tool code
127
+ thought_part = new_content[:tool_call_start].strip()
128
+
129
+ tool_code_section = new_content[tool_call_start:tool_call_end]
130
+ tool_call_str = tool_code_section.replace("<TOOL_CODE>", "").replace("</TOOL_CODE>", "").strip()
131
+
132
  print(f"Tool call detected: {tool_call_str}")
133
 
134
  try:
 
136
  query = tool_call_str[len("search_tavily("):-1].strip().strip('"').strip("'")
137
  tool_output = search_tavily(query)
138
  print(f"Tool result: {tool_output[:200]}...")
139
+
140
+ # Lägg till tanken, verktygskallet och det FAKTISKA observationen till historiken
141
+ current_response_history += f"\n\nThought: {thought_part}\nTool: {tool_code_section}\nObservation: {tool_output}\n"
142
  else:
143
  tool_output = f"Unknown tool: {tool_call_str}"
144
  print(f"Error: {tool_output}")
145
+ current_response_history += f"\n\nThought: {thought_part}\nTool: {tool_code_section}\nObservation: {tool_output}\n"
146
  except Exception as tool_e:
147
  tool_output = f"Error running tool {tool_call_str}: {tool_e}"
148
  print(f"Error: {tool_output}")
149
+ current_response_history += f"\n\nThought: {thought_part}\nTool: {tool_code_section}\nObservation: {tool_output}\n"
150
  else:
151
+ # Modellen genererade varken ett verktygskall eller ett slutgiltigt svar.
152
+ # Lägg till det den faktiskt genererade till historiken så den kan fortsätta sin tanke.
153
+ current_response_history += f"\n\nThought: {new_content}\n"
154
  print(f"Model generated non-tool/non-answer content. Appending: {new_content[:100]}...")
155
 
156
+ # Om max_iterations nås utan slutgiltigt svar
157
+ return "Agent could not complete the task within the allowed iterations. Latest relevant content: " + \
158
+ (current_response_history[-500:] if current_response_history else "No meaningful content generated.")
159
+