Freddolin commited on
Commit
5441081
·
verified ·
1 Parent(s): b44b8a4

Update agent.py

Browse files
Files changed (1) hide show
  1. agent.py +34 -25
agent.py CHANGED
@@ -59,22 +59,25 @@ class GaiaAgent:
59
  Your task is to carefully and accurately answer questions by using the search tool when necessary.
60
  Always provide a complete and correct answer based on the information you find.
61
 
 
 
 
 
 
 
 
62
  Your available tools:
63
  1. search_tavily(query: str): Searches on Tavily and returns relevant results.
64
- Use this tool to find information on the internet that you don't know or need to verify.
65
-
66
- To use a tool, write it in the following exact format:
67
- <TOOL_CODE>
68
- tool_name("your search query")
69
- </TOOL_CODE>
70
- Example:
71
- If you need to know the capital of France:
72
- <TOOL_CODE>
73
- search_tavily("capital of France")
74
- </TOOL_CODE>
75
-
76
- When you have found all the necessary information and are ready to answer the task, provide your final answer.
77
-
78
  Task: {task_description}
79
  """
80
 
@@ -82,25 +85,31 @@ class GaiaAgent:
82
  current_response = ""
83
 
84
  for i in range(max_iterations):
85
- full_prompt = prompt + current_response + "\n\nWhat is the next step or your final answer?"
 
86
 
87
  print(f"[{i+1}/{max_iterations}] Generating response with prompt length: {len(full_prompt)}")
88
 
89
  generated_text = self.text_generator(
90
  full_prompt,
91
- max_new_tokens=1024, # Behold 1024 eller öka om behövs
92
  num_return_sequences=1,
93
  pad_token_id=self.tokenizer.eos_token_id,
94
  do_sample=True,
95
  top_k=50, top_p=0.95,
96
- temperature=0.8 # Behold 0.8 eller justera vid behov
97
  )[0]['generated_text']
98
 
99
  new_content = generated_text[len(full_prompt):].strip()
100
  print(f"DEBUG - Full generated_text: \n---START---\n{generated_text}\n---END---")
101
  print(f"DEBUG - Extracted new_content: '{new_content}'")
102
 
103
- if "<TOOL_CODE>" in new_content and "</TOOL_CODE>" in new_content:
 
 
 
 
 
104
  start_index = new_content.find("<TOOL_CODE>") + len("<TOOL_CODE>")
105
  end_index = new_content.find("</TOOL_CODE>")
106
  tool_call_str = new_content[start_index:end_index].strip()
@@ -112,18 +121,18 @@ class GaiaAgent:
112
  query = tool_call_str[len("search_tavily("):-1].strip().strip('"').strip("'")
113
  tool_output = search_tavily(query)
114
  print(f"Tool result: {tool_output[:200]}...")
115
- current_response += f"\n\nTool Result from {tool_call_str}:\n{tool_output}\n"
116
  else:
117
  tool_output = f"Unknown tool: {tool_call_str}"
118
  print(f"Error: {tool_output}")
119
- current_response += f"\n\n{tool_output}\n"
120
  except Exception as tool_e:
121
  tool_output = f"Error running tool {tool_call_str}: {tool_e}"
122
  print(f"Error: {tool_output}")
123
- current_response += f"\n\n{tool_output}\n"
124
  else:
125
- final_answer = new_content
126
- print(f"Final answer from model:\n{final_answer}")
127
- return final_answer.strip()
128
 
129
- return "Agent could not complete the task within the allowed iterations. Latest response: " + new_content.strip()
 
59
  Your task is to carefully and accurately answer questions by using the search tool when necessary.
60
  Always provide a complete and correct answer based on the information you find.
61
 
62
+ You must follow a Thought, Tool, Observation, Answer (TTOA) pattern.
63
+
64
+ **Thought:** First, carefully consider the task. What information do you need to answer the question? Do you need to use a tool?
65
+ **Tool:** If you need to search, use the search_tavily tool. The format is: <TOOL_CODE>search_tavily("your search query")</TOOL_CODE>
66
+ **Observation:** After a tool call, you will receive an observation (the tool's output).
67
+ **Answer:** Once you have gathered all necessary information, provide your final, concise answer directly.
68
+
69
  Your available tools:
70
  1. search_tavily(query: str): Searches on Tavily and returns relevant results.
71
+
72
+ Example Interaction:
73
+ Task: What is the capital of France?
74
+ Thought: I need to find the capital of France. I should use the search_tavily tool.
75
+ Tool: <TOOL_CODE>search_tavily("capital of France")</TOOL_CODE>
76
+ Observation: The capital of France is Paris.
77
+ Answer: The capital of France is Paris.
78
+
79
+ Now, let's start.
80
+
 
 
 
 
81
  Task: {task_description}
82
  """
83
 
 
85
  current_response = ""
86
 
87
  for i in range(max_iterations):
88
+ # Lägg till "Thought:" här för att uppmuntra modellen att starta sin tankeprocess
89
+ full_prompt = prompt + current_response + "\n\nThought:"
90
 
91
  print(f"[{i+1}/{max_iterations}] Generating response with prompt length: {len(full_prompt)}")
92
 
93
  generated_text = self.text_generator(
94
  full_prompt,
95
+ max_new_tokens=1024, # Fortsätt med 1024 eller öka till 2048
96
  num_return_sequences=1,
97
  pad_token_id=self.tokenizer.eos_token_id,
98
  do_sample=True,
99
  top_k=50, top_p=0.95,
100
+ temperature=0.7 # Justera temperaturen till 0.7
101
  )[0]['generated_text']
102
 
103
  new_content = generated_text[len(full_prompt):].strip()
104
  print(f"DEBUG - Full generated_text: \n---START---\n{generated_text}\n---END---")
105
  print(f"DEBUG - Extracted new_content: '{new_content}'")
106
 
107
+ # Kontrollera om modellen genererade ett svar som en 'Answer:'
108
+ if "Answer:" in new_content:
109
+ final_answer = new_content.split("Answer:", 1)[1].strip()
110
+ print(f"Final answer from model:\n{final_answer}")
111
+ return final_answer
112
+ elif "<TOOL_CODE>" in new_content and "</TOOL_CODE>" in new_content:
113
  start_index = new_content.find("<TOOL_CODE>") + len("<TOOL_CODE>")
114
  end_index = new_content.find("</TOOL_CODE>")
115
  tool_call_str = new_content[start_index:end_index].strip()
 
121
  query = tool_call_str[len("search_tavily("):-1].strip().strip('"').strip("'")
122
  tool_output = search_tavily(query)
123
  print(f"Tool result: {tool_output[:200]}...")
124
+ current_response += f"\n\nObservation: {tool_output}\n"
125
  else:
126
  tool_output = f"Unknown tool: {tool_call_str}"
127
  print(f"Error: {tool_output}")
128
+ current_response += f"\n\nObservation: {tool_output}\n"
129
  except Exception as tool_e:
130
  tool_output = f"Error running tool {tool_call_str}: {tool_e}"
131
  print(f"Error: {tool_output}")
132
+ current_response += f"\n\nObservation: {tool_output}\n"
133
  else:
134
+ # Om modellen varken ger svar eller verktygskall men genererar något annat
135
+ current_response += f"\n\n{new_content}\n"
136
+ print(f"Model generated non-tool/non-answer content. Appending: {new_content[:100]}...")
137
 
138
+ return "Agent could not complete the task within the allowed iterations. Latest response: " + new_content.strip() if new_content else "Agent could not complete the task within the allowed iterations. No meaningful content generated."