ZeroTimo commited on
Commit
895b1b5
·
verified ·
1 Parent(s): 6a9f59c

Update agent.py

Browse files
Files changed (1) hide show
  1. agent.py +33 -18
agent.py CHANGED
@@ -127,8 +127,6 @@ gemini_llm = ChatGoogleGenerativeAI(
127
  model="gemini-2.0-flash",
128
  temperature=0,
129
  max_output_tokens=2048,
130
- ).bind_tools(
131
- [web_search, wiki_search, parse_csv, parse_excel, python_repl]
132
  )
133
 
134
  # ---------------------------------------------------------------------
@@ -136,33 +134,50 @@ gemini_llm = ChatGoogleGenerativeAI(
136
  # ---------------------------------------------------------------------
137
  SYSTEM_PROMPT = SystemMessage(
138
  content=(
139
- "You are a helpful assistant with access to Python tools.\n"
140
- " Think step by step.\n"
141
- " Call a tool when needed reply in this JSON format:\n"
142
- " {\"tool\": \"<tool_name>\", \"tool_input\": { ... }}\n"
143
- " When you have the answer, reply with the answer **only** "
144
- " no prefix, no explanations.\n"
145
- "Answer format rules:\n"
146
- " Single number no separators / units unless required.\n"
147
- " Single string → no articles/abbrev.\n"
148
- " List → comma + single space separated, keep required order.\n"
 
 
149
  )
150
  )
151
 
152
  # ---------------------------------------------------------------------
153
  # 7) LangGraph – Planner + Tools + Router
154
  # ---------------------------------------------------------------------
 
 
 
 
 
 
 
155
  def planner(state: MessagesState):
156
- """LLM-Planner – entscheidet, ob Tool nötig oder Final Answer erreicht."""
157
  msgs = state["messages"]
158
  if msgs[0].type != "system":
159
  msgs = [SYSTEM_PROMPT] + msgs
 
160
  resp = with_backoff(lambda: gemini_llm.invoke(msgs))
161
- finished = (
162
- not getattr(resp, "tool_calls", None) # keine Toolaufrufe
163
- and "\n" not in resp.content # heuristik: kurze Endantwort
164
- )
165
- return {"messages": [resp], "should_end": finished}
 
 
 
 
 
 
 
 
166
 
167
  def route(state):
168
  return "END" if state["should_end"] else "tools"
 
127
  model="gemini-2.0-flash",
128
  temperature=0,
129
  max_output_tokens=2048,
 
 
130
  )
131
 
132
  # ---------------------------------------------------------------------
 
134
  # ---------------------------------------------------------------------
135
  SYSTEM_PROMPT = SystemMessage(
136
  content=(
137
+ "You are a helpful assistant with access to several tools.\n"
138
+ "You can think step by step and use tools to find answers.\n\n"
139
+ "When you want to use a tool, write it like this:\n"
140
+ "Tool: <tool_name>\n"
141
+ "Input: <input for the tool>\n\n"
142
+ "Wait for the tool result before continuing.\n"
143
+ "When you know the final answer, reply with the answer **only**.\n"
144
+ "Don't include any prefix, explanation or formatting around the answer.\n"
145
+ "Answer formatting:\n"
146
+ "- For numbers: no units unless requested\n"
147
+ "- For strings: no articles or abbreviations\n"
148
+ "- For lists: comma + space separated, correct order\n"
149
  )
150
  )
151
 
152
  # ---------------------------------------------------------------------
153
  # 7) LangGraph – Planner + Tools + Router
154
  # ---------------------------------------------------------------------
155
+ def extract_tool_call(text: str) -> tuple[str, str] | None:
156
+ """Parse Gemini output like: 'Tool: xyz\nInput: abc'."""
157
+ match = re.search(r"Tool:\s*(\w+)\s*Input:\s*(.+)", text, re.DOTALL)
158
+ if match:
159
+ return match.group(1).strip(), match.group(2).strip()
160
+ return None
161
+
162
  def planner(state: MessagesState):
 
163
  msgs = state["messages"]
164
  if msgs[0].type != "system":
165
  msgs = [SYSTEM_PROMPT] + msgs
166
+
167
  resp = with_backoff(lambda: gemini_llm.invoke(msgs))
168
+ content = resp.content.strip()
169
+
170
+ parsed = extract_tool_call(content)
171
+ if parsed:
172
+ tool_name, tool_input = parsed
173
+ tool = {t.name: t for t in TOOLS}.get(tool_name)
174
+ if tool:
175
+ result = tool.invoke(tool_input)
176
+ new_msg = HumanMessage(content=f"Tool result:\n{result}")
177
+ return {"messages": msgs + [resp, new_msg], "should_end": False}
178
+
179
+ finished = "\n" not in content # einfache Heuristik
180
+ return {"messages": msgs + [resp], "should_end": finished}
181
 
182
  def route(state):
183
  return "END" if state["should_end"] else "tools"