mnab commited on
Commit
1bfafd9
·
verified ·
1 Parent(s): 38f51a6

Upload agent.py

Browse files
Files changed (1) hide show
  1. agent.py +62 -12
agent.py CHANGED
@@ -59,6 +59,54 @@ def save_and_read_file(content: str, filename: Optional[str] = None) -> str:
59
  return f"File saved to {filepath}. You can read this file to process its contents."
60
 
61
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
62
  @tool
63
  def download_file_from_url(url: str, filename: Optional[str] = None) -> str:
64
  """
@@ -209,25 +257,27 @@ search_tool = DuckDuckGoSearchResults()
209
  # temperature=0.2,
210
  # max_tokens=4096,
211
  # )
212
- llm = ChatHuggingFace(
213
- llm=HuggingFaceEndpoint(
214
- repo_id="Qwen/Qwen3-4B",
215
- # repo_id="meta-llama/Llama-3-70B-Instruct",
216
- temperature=0,
217
- huggingfacehub_api_token=os.environ["HUGGINGFACEHUB_API_TOKEN"],
218
- ),
219
- verbose=True,
220
- )
221
- # llm = ChatGoogleGenerativeAI(
222
- # model="gemini-2.0-flash-exp", google_api_key=os.environ["GOOGLE_API_KEY"]
223
  # )
 
 
 
224
  tools = [
225
  analyze_csv_file,
226
  analyze_excel_file,
227
  extract_text_from_image,
228
  download_file_from_url,
229
  save_and_read_file,
230
- # search_tool,
 
 
231
  ]
232
  # Bind the tools to the LLM
233
  model_with_tools = llm.bind_tools(tools)
 
59
  return f"File saved to {filepath}. You can read this file to process its contents."
60
 
61
 
62
+ @tool
63
+ def wiki_search(query: str) -> str:
64
+ """Search Wikipedia for a query and return maximum 2 results.
65
+
66
+ Args:
67
+ query: The search query."""
68
+ search_docs = WikipediaLoader(query=query, load_max_docs=2).load()
69
+ formatted_search_docs = "\n\n---\n\n".join(
70
+ [
71
+ f'<Document source="{doc.metadata["source"]}" page="{doc.metadata.get("page", "")}"/>\n{doc.page_content}\n</Document>'
72
+ for doc in search_docs
73
+ ]
74
+ )
75
+ return {"wiki_results": formatted_search_docs}
76
+
77
+
78
+ @tool
79
+ def web_search(query: str) -> str:
80
+ """Search Tavily for a query and return maximum 3 results.
81
+
82
+ Args:
83
+ query: The search query."""
84
+ search_docs = TavilySearchResults(max_results=3).invoke(query=query)
85
+ formatted_search_docs = "\n\n---\n\n".join(
86
+ [
87
+ f'<Document source="{doc.metadata["source"]}" page="{doc.metadata.get("page", "")}"/>\n{doc.page_content}\n</Document>'
88
+ for doc in search_docs
89
+ ]
90
+ )
91
+ return {"web_results": formatted_search_docs}
92
+
93
+
94
+ @tool
95
+ def arvix_search(query: str) -> str:
96
+ """Search Arxiv for a query and return maximum 3 result.
97
+
98
+ Args:
99
+ query: The search query."""
100
+ search_docs = ArxivLoader(query=query, load_max_docs=3).load()
101
+ formatted_search_docs = "\n\n---\n\n".join(
102
+ [
103
+ f'<Document source="{doc.metadata["source"]}" page="{doc.metadata.get("page", "")}"/>\n{doc.page_content[:1000]}\n</Document>'
104
+ for doc in search_docs
105
+ ]
106
+ )
107
+ return {"arvix_results": formatted_search_docs}
108
+
109
+
110
  @tool
111
  def download_file_from_url(url: str, filename: Optional[str] = None) -> str:
112
  """
 
257
  # temperature=0.2,
258
  # max_tokens=4096,
259
  # )
260
+ # llm = ChatHuggingFace(
261
+ # llm=HuggingFaceEndpoint(
262
+ # repo_id="Qwen/Qwen3-4B",
263
+ # # repo_id="meta-llama/Llama-3-70B-Instruct",
264
+ # temperature=0,
265
+ # huggingfacehub_api_token=os.environ["HUGGINGFACEHUB_API_TOKEN"],
266
+ # ),
267
+ # verbose=True,
 
 
 
268
  # )
269
+ llm = ChatGoogleGenerativeAI(
270
+ model="gemini-2.0-flash-exp", google_api_key=os.environ["GOOGLE_API_KEY"]
271
+ )
272
  tools = [
273
  analyze_csv_file,
274
  analyze_excel_file,
275
  extract_text_from_image,
276
  download_file_from_url,
277
  save_and_read_file,
278
+ web_search,
279
+ wiki_search,
280
+ arvix_search,
281
  ]
282
  # Bind the tools to the LLM
283
  model_with_tools = llm.bind_tools(tools)