Spaces:
Runtime error
Runtime error
Commit
·
c657a71
1
Parent(s):
1ba738b
Edit tools
Browse files- app.py +68 -121
- downloaded_files/image.png +0 -0
- tools/answer_excel.py +24 -0
- tools/answer_question.py +38 -0
- tools/answer_question_from_file.py +59 -0
- tools/audio_tool.py +52 -0
- tools/chess_tool.py +33 -0
- tools/code_exec.py +84 -0
- tools/code_gen.py +42 -0
- tools/download_file.py +83 -0
- tools/reverse_string.py +8 -0
- tools/web_search.py +52 -0
- tools/wikipedia.py +50 -0
- tools/youtube_transcript.py +29 -0
app.py
CHANGED
@@ -16,12 +16,26 @@ from io import BytesIO
|
|
16 |
import PyPDF2
|
17 |
import base64
|
18 |
from langchain_google_genai import ChatGoogleGenerativeAI
|
|
|
19 |
from langchain_core.tools import tool
|
20 |
from dotenv import load_dotenv
|
21 |
import time
|
22 |
from langchain_community.tools import DuckDuckGoSearchRun
|
23 |
from langchain_community.utilities.duckduckgo_search import DuckDuckGoSearchAPIWrapper
|
24 |
from langchain_community.tools import BraveSearch
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
25 |
|
26 |
load_dotenv(".env", override=True)
|
27 |
BRAVE_API_KEY = os.getenv("BRAVE_API")
|
@@ -32,6 +46,8 @@ class State(TypedDict):
|
|
32 |
parsed_file: Optional[str]
|
33 |
messages: Annotated[list[AnyMessage], add_messages]
|
34 |
parsed_file_message: dict
|
|
|
|
|
35 |
|
36 |
# (Keep Constants as is)
|
37 |
# --- Constants ---
|
@@ -44,9 +60,10 @@ class BasicAgent:
|
|
44 |
# tools initialization
|
45 |
#internet_search = DuckDuckGoSearchRun()
|
46 |
|
47 |
-
tools = [
|
|
|
|
|
48 |
|
49 |
-
#llm = ChatOllama(model="llama3.2", temperature=0)
|
50 |
llm = ChatGoogleGenerativeAI(
|
51 |
model="gemini-2.0-flash",
|
52 |
temperature=0)
|
@@ -56,6 +73,7 @@ class BasicAgent:
|
|
56 |
|
57 |
builder.add_node("assistant", self.assistant)
|
58 |
builder.add_node("tools", ToolNode(tools))
|
|
|
59 |
#builder.add_node("download_file", BasicAgent.download_file_node)
|
60 |
#builder.add_node("parse_img", BasicAgent.parse_image)
|
61 |
#builder.add_node("parse_pdf", BasicAgent.parse_pdf)
|
@@ -68,8 +86,17 @@ class BasicAgent:
|
|
68 |
#builder.add_edge("parse_img", "assistant")
|
69 |
#builder.add_edge("parse_pdf", "assistant")
|
70 |
#builder.add_edge("parse_audio", "assistant")
|
71 |
-
builder.add_conditional_edges(
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
72 |
builder.add_edge("tools", "assistant")
|
|
|
73 |
|
74 |
self.react_graph = builder.compile()
|
75 |
|
@@ -78,26 +105,17 @@ class BasicAgent:
|
|
78 |
print(f"Agent received question (first 50 chars): {question[:50]}...")
|
79 |
|
80 |
messages = [HumanMessage(question)]
|
81 |
-
messages = self.react_graph.invoke({"messages": messages, "file_path": file_name})
|
82 |
|
83 |
-
|
84 |
-
|
|
|
|
|
85 |
|
86 |
-
|
87 |
-
final_answer = messages["messages"][-1].content
|
88 |
print(f"Final answer is {final_answer}")
|
89 |
return final_answer
|
90 |
|
91 |
-
def search_tool(query: str):
|
92 |
-
"""
|
93 |
-
This function looks for the provided query online and gives you information about it.
|
94 |
-
"""
|
95 |
-
|
96 |
-
search_tool = BraveSearch.from_api_key(api_key=BRAVE_API_KEY, search_kwargs={"count": 3})
|
97 |
-
res = search_tool.run(query)
|
98 |
-
|
99 |
-
return res
|
100 |
-
|
101 |
|
102 |
def assistant(self, state: State):
|
103 |
if state["file_path"]:
|
@@ -108,10 +126,22 @@ class BasicAgent:
|
|
108 |
file_name = None
|
109 |
|
110 |
prompt = f"""
|
111 |
-
You are a
|
112 |
-
|
113 |
-
If you
|
114 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
115 |
"""
|
116 |
|
117 |
sys_msg = SystemMessage(content=prompt)
|
@@ -119,119 +149,34 @@ class BasicAgent:
|
|
119 |
time.sleep(5)
|
120 |
return {"messages": [self.llm_with_tools.invoke([sys_msg] + state["messages"])]}
|
121 |
|
122 |
-
def
|
123 |
-
"""
|
124 |
-
|
125 |
-
|
126 |
-
return state["file_path"] != ""
|
127 |
|
128 |
-
|
129 |
-
|
130 |
"""
|
131 |
-
This tool downloads a file (image, pdf, etc.) given the name of the file. The url for the request will be composed in the function so ONLY the name of the file should be passed in.
|
132 |
-
|
133 |
-
You may have to download a file in 2 different scenarios:
|
134 |
-
- A file given already as part of the task. In this case the format of the url must be: {DEFAULT_API_URL}/files/{file_name} THE EXTENSION OF THE FILE MUST NOT(!!) BE INCLUDED!
|
135 |
-
- A url retrieved from the internet in the format https://some_url. In that case, you simply need to provide the url of the file that needs to be retrieved.
|
136 |
|
137 |
-
|
138 |
-
|
139 |
|
140 |
-
|
141 |
-
A tuple made of:
|
142 |
-
1) The file in bytes
|
143 |
-
2) The file in Base64 encoding
|
144 |
-
3) The result of the call
|
145 |
"""
|
146 |
-
#task_id = file_.split(".")[0]
|
147 |
-
#print("Downloading the file")
|
148 |
-
|
149 |
-
response = requests.get(file_url)
|
150 |
-
if response.status_code == 200:
|
151 |
-
msg = "File downloaded successfully!!"
|
152 |
-
print(msg)
|
153 |
-
file = response.content
|
154 |
-
b64_file = base64.b64encode(file).decode("utf-8")
|
155 |
-
else:
|
156 |
-
msg = "There was an error downloading the file."
|
157 |
-
print(msg)
|
158 |
-
file = None
|
159 |
-
b64_file = None
|
160 |
-
|
161 |
-
return {
|
162 |
-
"bytes": file,
|
163 |
-
"base64": b64_file,
|
164 |
-
"status": response.status_code,
|
165 |
-
}
|
166 |
-
|
167 |
-
def determine_file_type(state: State) -> ["pdf", "img", "audio", "end"]:
|
168 |
-
if state["file"] is None:
|
169 |
-
return "end"
|
170 |
-
|
171 |
-
file_extension = state["file_path"].split(".")[1]
|
172 |
-
if file_extension in ["png", "jpg"]:
|
173 |
-
return "img"
|
174 |
-
elif file_extension == "pdf":
|
175 |
-
return "pdf"
|
176 |
-
elif file_extension in ["mp3", "wav"]:
|
177 |
-
return "audio"
|
178 |
-
|
179 |
-
return "end"
|
180 |
-
|
181 |
-
def answer_question_tool_from_file(question: str, encoded_file: str, file_extension: str) -> str:
|
182 |
-
"""
|
183 |
-
This tool allows you to answer a question taking into account information that were provided inside a file.
|
184 |
|
185 |
-
|
186 |
-
The question that needs to be answered.
|
187 |
-
The file from which you want to get some information.
|
188 |
-
The file extension of the file that is being processed.
|
189 |
-
"""
|
190 |
|
191 |
-
|
192 |
-
message = {"type": "image_url", "image_url": f"data:image/png;base64,{encoded_file}"}
|
193 |
-
elif file_extension == "pdf":
|
194 |
-
message = {"type": "image_url", # Assuming the LLM accepts PDF under this key, you might need to verify this
|
195 |
-
"image_url": f"data:application/pdf;base64,{encoded_file}"
|
196 |
-
}
|
197 |
-
elif file_extension in ["mp3", "wav"]:
|
198 |
-
message = {"type": "media", "data": encoded_file, # Use base64 string directly
|
199 |
-
"mime_type": "audio/mpeg",
|
200 |
-
}
|
201 |
-
else:
|
202 |
-
message = {"type": "text", "text": "The file is not supported."}
|
203 |
|
204 |
-
|
205 |
-
content=[
|
206 |
-
{"type": "text", "text": question},
|
207 |
-
message,
|
208 |
-
]
|
209 |
-
)
|
210 |
|
211 |
llm = ChatGoogleGenerativeAI(
|
212 |
model="gemini-2.0-flash",
|
213 |
temperature=0)
|
214 |
|
215 |
-
response = llm.invoke([
|
216 |
-
|
217 |
-
return response
|
218 |
-
|
219 |
-
|
220 |
-
def revert_string(input_str: str) -> str:
|
221 |
-
"""
|
222 |
-
This function inverst the order of the characters within a sentence. It is particularly useful if you can't understand the content
|
223 |
-
in any language.
|
224 |
-
|
225 |
-
Args:
|
226 |
-
input_str: the string to invert
|
227 |
-
|
228 |
-
Returns:
|
229 |
-
The inverted string
|
230 |
-
"""
|
231 |
-
|
232 |
-
return input_str[::-1]
|
233 |
-
|
234 |
|
|
|
235 |
|
236 |
def run_and_submit_all( profile: gr.OAuthProfile | None):
|
237 |
"""
|
@@ -289,6 +234,8 @@ def run_and_submit_all( profile: gr.OAuthProfile | None):
|
|
289 |
print(f"Running agent on {len(questions_data)} questions...")
|
290 |
for item in questions_data:
|
291 |
task_id = item.get("task_id")
|
|
|
|
|
292 |
question_text = item.get("question")
|
293 |
file_name = item.get("file_name")
|
294 |
if not task_id or question_text is None:
|
|
|
16 |
import PyPDF2
|
17 |
import base64
|
18 |
from langchain_google_genai import ChatGoogleGenerativeAI
|
19 |
+
from langchain_openai import AzureChatOpenAI
|
20 |
from langchain_core.tools import tool
|
21 |
from dotenv import load_dotenv
|
22 |
import time
|
23 |
from langchain_community.tools import DuckDuckGoSearchRun
|
24 |
from langchain_community.utilities.duckduckgo_search import DuckDuckGoSearchAPIWrapper
|
25 |
from langchain_community.tools import BraveSearch
|
26 |
+
from tools.answer_question_from_file import AnswerQuestionFromFileTool
|
27 |
+
from tools.answer_question import AnswerQuestionTool
|
28 |
+
from tools.download_file import DownloadFile
|
29 |
+
from tools.reverse_string import ReverseString
|
30 |
+
from tools.web_search import WebSearchTool
|
31 |
+
from tools.wikipedia import WikipediaTool
|
32 |
+
from tools.youtube_transcript import YoutubeTranscriptTool
|
33 |
+
from tools.code_exec import PythonExecutionTool
|
34 |
+
from tools.code_gen import CodeGenTool
|
35 |
+
from tools.answer_excel import AnswerExcelTool
|
36 |
+
from contextlib import redirect_stdout
|
37 |
+
from tools.chess_tool import ChessTool
|
38 |
+
from tools.audio_tool import AudioTool
|
39 |
|
40 |
load_dotenv(".env", override=True)
|
41 |
BRAVE_API_KEY = os.getenv("BRAVE_API")
|
|
|
46 |
parsed_file: Optional[str]
|
47 |
messages: Annotated[list[AnyMessage], add_messages]
|
48 |
parsed_file_message: dict
|
49 |
+
question: str
|
50 |
+
response: str
|
51 |
|
52 |
# (Keep Constants as is)
|
53 |
# --- Constants ---
|
|
|
60 |
# tools initialization
|
61 |
#internet_search = DuckDuckGoSearchRun()
|
62 |
|
63 |
+
tools = [CodeGenTool(), PythonExecutionTool(temp_dir="./"), YoutubeTranscriptTool(),
|
64 |
+
AnswerQuestionFromFileTool(), AnswerQuestionTool(), DownloadFile(),
|
65 |
+
ReverseString(), WebSearchTool(), WikipediaTool(), AnswerExcelTool(), ChessTool(), AudioTool()]
|
66 |
|
|
|
67 |
llm = ChatGoogleGenerativeAI(
|
68 |
model="gemini-2.0-flash",
|
69 |
temperature=0)
|
|
|
73 |
|
74 |
builder.add_node("assistant", self.assistant)
|
75 |
builder.add_node("tools", ToolNode(tools))
|
76 |
+
builder.add_node("final_answer", BasicAgent.final_answer)
|
77 |
#builder.add_node("download_file", BasicAgent.download_file_node)
|
78 |
#builder.add_node("parse_img", BasicAgent.parse_image)
|
79 |
#builder.add_node("parse_pdf", BasicAgent.parse_pdf)
|
|
|
86 |
#builder.add_edge("parse_img", "assistant")
|
87 |
#builder.add_edge("parse_pdf", "assistant")
|
88 |
#builder.add_edge("parse_audio", "assistant")
|
89 |
+
builder.add_conditional_edges(
|
90 |
+
"assistant",
|
91 |
+
tools_condition,
|
92 |
+
path_map={
|
93 |
+
"tools": "tools",
|
94 |
+
"__end__": "final_answer"
|
95 |
+
}
|
96 |
+
)
|
97 |
+
|
98 |
builder.add_edge("tools", "assistant")
|
99 |
+
builder.add_edge("final_answer", END)
|
100 |
|
101 |
self.react_graph = builder.compile()
|
102 |
|
|
|
105 |
print(f"Agent received question (first 50 chars): {question[:50]}...")
|
106 |
|
107 |
messages = [HumanMessage(question)]
|
108 |
+
messages = self.react_graph.invoke({"messages": messages, "file_path": file_name, "question": question})
|
109 |
|
110 |
+
with open(f'messages_{file_name}.txt', 'w', encoding='utf-8') as out:
|
111 |
+
with redirect_stdout(out):
|
112 |
+
for m in messages['messages']:
|
113 |
+
m.pretty_print()
|
114 |
|
115 |
+
final_answer = messages["messages"][-1].content.strip()
|
|
|
116 |
print(f"Final answer is {final_answer}")
|
117 |
return final_answer
|
118 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
119 |
|
120 |
def assistant(self, state: State):
|
121 |
if state["file_path"]:
|
|
|
126 |
file_name = None
|
127 |
|
128 |
prompt = f"""
|
129 |
+
You are a general AI assistant. I will ask you a question. Report your thoughts, and finish your answer with the following template: FINAL ANSWER: [YOUR FINAL ANSWER].
|
130 |
+
YOUR FINAL ANSWER should be a number OR as few words as possible OR a comma separated list of numbers and/or strings.
|
131 |
+
If you are asked for a number, don't use comma to write your number neither use units such as $ or percent sign unless specified otherwise. If you are asked for a string, don't use articles, neither abbreviations (e.g. for cities), and write the digits in plain text unless specified otherwise.
|
132 |
+
|
133 |
+
You should read the prompt thoroughly. For example, if they ask you for athletes with the least number of athletes, you must be careful to what they ask (in case of tie, the country which is the first in alphabetical order.)
|
134 |
+
|
135 |
+
You MUST ALWAYS PICK WIKIPEDIA TOOL BEFORE WEB SEARCH.
|
136 |
+
|
137 |
+
If you are asked for a comma separated list, apply the above rules depending of whether the element to be put in the list is a number or a string.
|
138 |
+
|
139 |
+
YOU SHOULD **NEVER** MAKE ANY ASSUMPTION AND USE THE TOOLS PROVIDED!
|
140 |
+
|
141 |
+
You are given this file: {file_name} with the extension: {file_extension}.
|
142 |
+
If a file is provided, the FIRST thing you MUST do is call the download_file tool!!
|
143 |
+
The format must be {DEFAULT_API_URL}/files/{file_name}
|
144 |
+
DO NOT PASS THE EXTENSION!!
|
145 |
"""
|
146 |
|
147 |
sys_msg = SystemMessage(content=prompt)
|
|
|
149 |
time.sleep(5)
|
150 |
return {"messages": [self.llm_with_tools.invoke([sys_msg] + state["messages"])]}
|
151 |
|
152 |
+
def final_answer(state: State):
|
153 |
+
system_prompt = f"""
|
154 |
+
You will be given an answer and a question. You MUST remove EVERYTHING not needed from the answer and answer the question exactly.
|
155 |
+
That is if you are being asked the number of something, you must not return the thought process, but just the number X.
|
|
|
156 |
|
157 |
+
You must be VERY CAREFUL!! Of what the question asks.
|
158 |
+
For example if they ask you to give the full name of a city without abbreviations you should stick to it (for example, St. Petersburg should be Saint Petersburg).
|
159 |
"""
|
|
|
|
|
|
|
|
|
|
|
160 |
|
161 |
+
human_prompt = f"""
|
162 |
+
Question: {state['question']}
|
163 |
|
164 |
+
Answer: {state['messages'][-1]}
|
|
|
|
|
|
|
|
|
165 |
"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
166 |
|
167 |
+
human_msg = HumanMessage(content=human_prompt)
|
|
|
|
|
|
|
|
|
168 |
|
169 |
+
sys_msg = SystemMessage(content=system_prompt)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
170 |
|
171 |
+
time.sleep(1)
|
|
|
|
|
|
|
|
|
|
|
172 |
|
173 |
llm = ChatGoogleGenerativeAI(
|
174 |
model="gemini-2.0-flash",
|
175 |
temperature=0)
|
176 |
|
177 |
+
response = llm.invoke([sys_msg, human_msg])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
178 |
|
179 |
+
return {"messages": state["messages"] + [response]}
|
180 |
|
181 |
def run_and_submit_all( profile: gr.OAuthProfile | None):
|
182 |
"""
|
|
|
234 |
print(f"Running agent on {len(questions_data)} questions...")
|
235 |
for item in questions_data:
|
236 |
task_id = item.get("task_id")
|
237 |
+
#if task_id != "99c9cc74-fdc8-46c6-8f8d-3ce2d3bfeea3":
|
238 |
+
# continue
|
239 |
question_text = item.get("question")
|
240 |
file_name = item.get("file_name")
|
241 |
if not task_id or question_text is None:
|
downloaded_files/image.png
ADDED
![]() |
tools/answer_excel.py
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from langchain_core.tools.base import BaseTool
|
2 |
+
from langchain_experimental.agents.agent_toolkits import create_pandas_dataframe_agent
|
3 |
+
import pandas as pd
|
4 |
+
from langchain_google_genai import ChatGoogleGenerativeAI
|
5 |
+
from langchain.agents.agent_types import AgentType
|
6 |
+
|
7 |
+
class AnswerExcelTool(BaseTool):
|
8 |
+
name : str = "answer_excel_tool"
|
9 |
+
description: str = "Given the path to a file containing an excel file and a query, this tool tries to get an answer by querying the excel file."
|
10 |
+
|
11 |
+
def _run(query: str, file_path: str) -> str:
|
12 |
+
df = pd.read_excel(file_path)
|
13 |
+
|
14 |
+
llm = ChatGoogleGenerativeAI(model="gemini-2.0-flash", temperature=0),
|
15 |
+
|
16 |
+
agent_executor = create_pandas_dataframe_agent(
|
17 |
+
llm,
|
18 |
+
df,
|
19 |
+
agent_type=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
|
20 |
+
verbose=True,
|
21 |
+
allow_dangerous_code=True # IMPORTANT: Understand the risks
|
22 |
+
)
|
23 |
+
|
24 |
+
return agent_executor(query)
|
tools/answer_question.py
ADDED
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from langchain_google_genai import ChatGoogleGenerativeAI
|
2 |
+
from pydantic import PrivateAttr
|
3 |
+
from langchain_core.tools.base import BaseTool
|
4 |
+
from langchain_core.messages import AnyMessage, SystemMessage, HumanMessage
|
5 |
+
|
6 |
+
class AnswerQuestionTool(BaseTool):
|
7 |
+
name : str = "answer_question_tool"
|
8 |
+
description: str = "Use this tool to answer any elementary question that you can solve without needing access to any external tool. Simply provide the question in input, reporting the whole question including desired output format."
|
9 |
+
_llm = PrivateAttr()
|
10 |
+
_system_prompt = PrivateAttr()
|
11 |
+
|
12 |
+
def __init__(self):
|
13 |
+
super().__init__()
|
14 |
+
self._llm = ChatGoogleGenerativeAI(
|
15 |
+
model="gemini-2.0-flash",
|
16 |
+
temperature=0)
|
17 |
+
|
18 |
+
|
19 |
+
self._system_prompt = SystemMessage("""You are a helpful assistant.
|
20 |
+
You will be given a question and you will have to answer that question.
|
21 |
+
You MUST NOT apologise, explain your reasoning nor nothing else.
|
22 |
+
You MUST answer the question and provide the answer in the REQUIRED FORMAT.
|
23 |
+
YOU MUST ABSOLUTELY NOT MAKE ANY KIND OF ASSUMPTION!! If you don't know an answer, say you don't know!
|
24 |
+
If the format is incorrect, the answer is considered wrong.
|
25 |
+
|
26 |
+
If you are given a list or a transcript and you need to do something on a list of object, think thoroughly on how you should return the output!
|
27 |
+
""")
|
28 |
+
|
29 |
+
def _run(self, question: str) -> str:
|
30 |
+
human_message = HumanMessage(
|
31 |
+
content=[
|
32 |
+
{"type": "text", "text": question},
|
33 |
+
]
|
34 |
+
)
|
35 |
+
|
36 |
+
response = self._llm.invoke([self._system_prompt, human_message])
|
37 |
+
|
38 |
+
return response
|
tools/answer_question_from_file.py
ADDED
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from langchain_core.tools.base import BaseTool
|
2 |
+
from langchain_core.messages import AnyMessage, SystemMessage, HumanMessage
|
3 |
+
from langchain_google_genai import ChatGoogleGenerativeAI
|
4 |
+
from pydantic import PrivateAttr
|
5 |
+
import os
|
6 |
+
from dotenv import load_dotenv
|
7 |
+
import whisper
|
8 |
+
|
9 |
+
load_dotenv(".env", override=True)
|
10 |
+
|
11 |
+
AZURE_OPENAI_ENDPOINT = os.getenv("AZURE_OPENAI_ENDPOINT")
|
12 |
+
AZURE_OPENAI_API_KEY = os.getenv("AZURE_OPENAI_API_KEY")
|
13 |
+
OPENAI_API_VERSION = os.getenv("OPENAI_API_VERSION_GEN", "2023-12-01-preview") # Default API version
|
14 |
+
# AZURE_OPENAI_DEPLOYMENT_NAME will be used as the 'model' for API calls
|
15 |
+
AZURE_OPENAI_DEPLOYMENT_NAME = "gpt-4.1"
|
16 |
+
|
17 |
+
|
18 |
+
class AnswerQuestionFromFileTool(BaseTool):
|
19 |
+
name: str = "answer_question_from_file_tool"
|
20 |
+
description: str = """
|
21 |
+
This tool allows you to answer a question taking into account information that were provided inside a file.
|
22 |
+
You must provide the file in b64 when processing here.
|
23 |
+
|
24 |
+
Args:
|
25 |
+
The question that needs to be answered.
|
26 |
+
The file from which you want to get some information.
|
27 |
+
The file extension of the file that is being processed.
|
28 |
+
"""
|
29 |
+
_llm = PrivateAttr()
|
30 |
+
|
31 |
+
def __init__(self):
|
32 |
+
super().__init__()
|
33 |
+
self._llm = ChatGoogleGenerativeAI(
|
34 |
+
model="gemini-2.0-flash",
|
35 |
+
temperature=0)
|
36 |
+
|
37 |
+
|
38 |
+
def _run(self, question: str, encoded_file: str, file_extension: str) -> str:
|
39 |
+
if file_extension in ["png", "jpg"]:
|
40 |
+
message = {"type": "image_url", "image_url": f"data:image/png;base64,{encoded_file}"}
|
41 |
+
elif file_extension == "pdf":
|
42 |
+
message = {"type": "image_url", # Assuming the LLM accepts PDF under this key, you might need to verify this
|
43 |
+
"image_url": f"data:application/pdf;base64,{encoded_file}"
|
44 |
+
}
|
45 |
+
else:
|
46 |
+
message = {"type": "text", "text": "The file is not supported."}
|
47 |
+
|
48 |
+
message_local = HumanMessage(
|
49 |
+
content=[
|
50 |
+
{"type": "text", "text": question + "\nLet's think step by step."},
|
51 |
+
message,
|
52 |
+
]
|
53 |
+
)
|
54 |
+
|
55 |
+
response = self._llm.invoke([message_local])
|
56 |
+
|
57 |
+
return response
|
58 |
+
|
59 |
+
|
tools/audio_tool.py
ADDED
@@ -0,0 +1,52 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from langchain_core.tools.base import BaseTool
|
2 |
+
import whisper
|
3 |
+
from langchain_google_genai import ChatGoogleGenerativeAI
|
4 |
+
from langchain_core.messages import AnyMessage, SystemMessage, HumanMessage
|
5 |
+
from pathlib import Path
|
6 |
+
import os
|
7 |
+
from transformers import pipeline
|
8 |
+
import torch
|
9 |
+
|
10 |
+
class AudioTool(BaseTool):
|
11 |
+
name : str = "answer_question_audio_tool"
|
12 |
+
description: str = "This tool will reply to a query based on the audio given the path of a locally stored file. This file DOES NOT DOWNLOAD the file from the web. Run the download_file_tool first"
|
13 |
+
|
14 |
+
def _run(self, query: str, file_path: str) -> str:
|
15 |
+
try:
|
16 |
+
pipe = pipeline(
|
17 |
+
task="automatic-speech-recognition",
|
18 |
+
model="openai/whisper-base",
|
19 |
+
torch_dtype=torch.float32,
|
20 |
+
device=0
|
21 |
+
)
|
22 |
+
result = pipe(str(Path("./") / Path(file_path)))
|
23 |
+
except Exception as e:
|
24 |
+
print("Exception", e)
|
25 |
+
|
26 |
+
print(result["text"])
|
27 |
+
|
28 |
+
human_message = HumanMessage([{"type": "text", "text": query},
|
29 |
+
{"type": "text", "text": f"\n\nTranscript: {result['text']}"}])
|
30 |
+
|
31 |
+
system_message = SystemMessage("""You are a helpful assistant. Whenever you receive a transcript of an audio recording along with a user's query:
|
32 |
+
|
33 |
+
1. Carefully read the query multiple times to ensure you fully grasp what is being asked.
|
34 |
+
|
35 |
+
2. Start your response by listing, in clear bullet points, each precise requirement implied by the user's instructions (e.g., which portions of the transcript to use, what to include or exclude, and any specific formatting).
|
36 |
+
|
37 |
+
3. After restating the requirements, fulfill the request exactly as specified. Follow all content and formatting rules without deviation (for instance, “list only names,” “omit quantities,” “use comma-separated values,” “alphabetize,” etc.).
|
38 |
+
|
39 |
+
4. Ensure that your final answer adheres strictly to the user's criteria and contains nothing beyond what was requested.
|
40 |
+
|
41 |
+
Always prioritize accuracy and strict adherence to the user's stated needs before providing the answer.""")
|
42 |
+
|
43 |
+
llm = ChatGoogleGenerativeAI(
|
44 |
+
model="gemini-2.0-flash",
|
45 |
+
temperature=0)
|
46 |
+
|
47 |
+
response = llm.invoke([system_message, human_message])
|
48 |
+
|
49 |
+
return response
|
50 |
+
|
51 |
+
|
52 |
+
|
tools/chess_tool.py
ADDED
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from langchain_core.tools.base import BaseTool
|
2 |
+
from chessimg2pos import predict_fen
|
3 |
+
from stockfish import Stockfish
|
4 |
+
import chess
|
5 |
+
|
6 |
+
class ChessTool(BaseTool):
|
7 |
+
name : str = "chess_tool"
|
8 |
+
description : str = "Given the path of an image, this tool returns the best next move that can be done on the chessboard. You must give ONLY the PATH of the image here! Pass in input b or w as color_turn based on whose turn is it. Use w if unspecified."
|
9 |
+
|
10 |
+
def _run(self, img_path: str, color_turn: str) -> str:
|
11 |
+
# Get the FEN string
|
12 |
+
#fen = predict_fen("./downloaded_files/image.png")
|
13 |
+
|
14 |
+
if color_turn == "b":
|
15 |
+
ranks = fen.split('/')
|
16 |
+
rotated_matrix = []
|
17 |
+
for old_row in reversed(ranks):
|
18 |
+
rotated_matrix.append(list(reversed(old_row)))
|
19 |
+
final_fen = "/".join(["".join(row) for row in rotated_matrix])
|
20 |
+
for length in reversed(range(2, 9)):
|
21 |
+
final_fen = final_fen.replace(length * "1", str(length))
|
22 |
+
else:
|
23 |
+
final_fen = fen
|
24 |
+
|
25 |
+
fen = f"{final_fen} {color_turn} - - 0 1"
|
26 |
+
|
27 |
+
fen = f"3r2k1/pp3pp1/4b2p/7Q/3n4/PqBBR2P/5PP1/6K1 {color_turn} - - 0 1"
|
28 |
+
|
29 |
+
stockfish = Stockfish(path="C:/Users/FORMAGGA/Documents/personal/stockfish-windows-x86-64-avx2/stockfish/stockfish-windows-x86-64-avx2.exe")
|
30 |
+
|
31 |
+
stockfish.set_fen_position(fen)
|
32 |
+
|
33 |
+
return stockfish.get_best_move()
|
tools/code_exec.py
ADDED
@@ -0,0 +1,84 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from langchain_core.tools.base import BaseTool, ToolException
|
2 |
+
from typing import Optional
|
3 |
+
import subprocess
|
4 |
+
import tempfile
|
5 |
+
import os
|
6 |
+
from pydantic import PrivateAttr
|
7 |
+
|
8 |
+
class PythonExecutionTool(BaseTool):
|
9 |
+
"""
|
10 |
+
A LangChain “tool” that takes a string of Python code,
|
11 |
+
writes it to a temporary .py file, executes it in a fresh
|
12 |
+
Python subprocess, captures stdout/stderr, and returns the result.
|
13 |
+
"""
|
14 |
+
|
15 |
+
name : str = "python_execution"
|
16 |
+
description : str = (
|
17 |
+
"Executes a string of Python code in an isolated subprocess. "
|
18 |
+
"Returns stdout on success, or stderr (with exit code) on failure."
|
19 |
+
)
|
20 |
+
_python_executable: str = PrivateAttr()
|
21 |
+
_timeout: int = PrivateAttr()
|
22 |
+
_temp_dir: str = PrivateAttr()
|
23 |
+
|
24 |
+
def __init__(
|
25 |
+
self,
|
26 |
+
python_executable: str = "python",
|
27 |
+
timeout: int = 5,
|
28 |
+
*,
|
29 |
+
temp_dir: Optional[str] = None
|
30 |
+
):
|
31 |
+
"""
|
32 |
+
:param python_executable: Path to the Python interpreter to invoke.
|
33 |
+
:param timeout: Maximum seconds to allow the code to run.
|
34 |
+
:param temp_dir: Optional directory in which to create the temp file.
|
35 |
+
"""
|
36 |
+
super().__init__()
|
37 |
+
self._python_executable = python_executable
|
38 |
+
self._timeout = timeout
|
39 |
+
self._temp_dir = temp_dir
|
40 |
+
|
41 |
+
def _run(self, code: str) -> str:
|
42 |
+
"""
|
43 |
+
Synchronously execute the provided Python code.
|
44 |
+
:param code: The complete Python source to run.
|
45 |
+
:return: Captured stdout if exit code is 0; otherwise stderr + exit code.
|
46 |
+
:raises ToolException: On internal error (e.g. unable to write temp file).
|
47 |
+
"""
|
48 |
+
# 1. Write code to a temporary file on disk to avoid shell-quoting issues.
|
49 |
+
try:
|
50 |
+
with tempfile.NamedTemporaryFile(
|
51 |
+
suffix=".py", delete=False, dir=self._temp_dir, mode="w", encoding="utf-8"
|
52 |
+
) as tmp:
|
53 |
+
tmp.write(code)
|
54 |
+
tmp_path = tmp.name
|
55 |
+
except Exception as e:
|
56 |
+
raise ToolException(f"Failed to write temp file: {e!r}")
|
57 |
+
|
58 |
+
# 2. Invoke a fresh Python process on that file, capturing stdout & stderr.
|
59 |
+
try:
|
60 |
+
result = subprocess.run(
|
61 |
+
[self._python_executable, "-u", tmp_path],
|
62 |
+
stdout=subprocess.PIPE,
|
63 |
+
stderr=subprocess.PIPE,
|
64 |
+
text=True,
|
65 |
+
timeout=self._timeout,
|
66 |
+
)
|
67 |
+
except subprocess.TimeoutExpired:
|
68 |
+
return f"⚠️ Execution timed out after {self._timeout} seconds."
|
69 |
+
except Exception as e:
|
70 |
+
raise ToolException(f"Failed to launch subprocess: {e!r}")
|
71 |
+
finally:
|
72 |
+
# 3. Clean up the temp file no matter what
|
73 |
+
try:
|
74 |
+
os.remove(tmp_path)
|
75 |
+
except OSError:
|
76 |
+
pass
|
77 |
+
|
78 |
+
# 4. Process the result
|
79 |
+
if result.returncode != 0:
|
80 |
+
return (
|
81 |
+
f"❌ Process exited with code {result.returncode}.\n"
|
82 |
+
f"stderr:\n{result.stderr}"
|
83 |
+
)
|
84 |
+
return result.stdout
|
tools/code_gen.py
ADDED
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from langchain_core.tools.base import BaseTool
|
2 |
+
from langchain_core.messages import AnyMessage, SystemMessage, HumanMessage
|
3 |
+
from langchain_google_genai import ChatGoogleGenerativeAI
|
4 |
+
from langchain_openai import AzureChatOpenAI
|
5 |
+
import os
|
6 |
+
from dotenv import load_dotenv
|
7 |
+
|
8 |
+
load_dotenv(".env", override=True)
|
9 |
+
|
10 |
+
AZURE_OPENAI_ENDPOINT = os.getenv("AZURE_OPENAI_ENDPOINT_GEN")
|
11 |
+
AZURE_OPENAI_API_KEY = os.getenv("AZURE_OPENAI_API_KEY_GEN")
|
12 |
+
OPENAI_API_VERSION = os.getenv("OPENAI_API_VERSION_GEN", "2023-12-01-preview") # Default API version
|
13 |
+
# AZURE_OPENAI_DEPLOYMENT_NAME will be used as the 'model' for API calls
|
14 |
+
AZURE_OPENAI_DEPLOYMENT_NAME = "gpt-4.1"
|
15 |
+
|
16 |
+
class CodeGenTool(BaseTool):
|
17 |
+
name : str = "code_generator_tool"
|
18 |
+
description: str = "Given the instructions provided, it generates Python code as text. It's important that the instructions provide: which args must be provided in input, the content of the function and what is the desired output."
|
19 |
+
|
20 |
+
def _run(self, function_description: str, input: str, output: str) -> str:
|
21 |
+
if not function_description:
|
22 |
+
return "You need to pass in a function description. Retry providing the right parameters."
|
23 |
+
|
24 |
+
system = SystemMessage("""You are an expert software engineer, your goal is to generate a piece of code.
|
25 |
+
YOU MUST GENERATE A **PYTHON** FUNCTION.
|
26 |
+
You will be given a description of what the function needs to do, for example "Generate a function that retrieves a web page from the internet".
|
27 |
+
Then you will be given information about what the input parameters are and the output.
|
28 |
+
|
29 |
+
In the output code you must list the imports as well.
|
30 |
+
It's VERY IMPORTANT that you stick to the contraints given for input and output.
|
31 |
+
If you believe there is a better way to do things, IGNORE THIS IDEA and stick to what is given in input.
|
32 |
+
""")
|
33 |
+
|
34 |
+
human = HumanMessage(f"Description of the function:\n{function_description}\n\nInput parameters:\n{input}\n\nOutput result:\n{output}\n\n")
|
35 |
+
|
36 |
+
llm = ChatGoogleGenerativeAI(
|
37 |
+
model="gemini-2.0-flash",
|
38 |
+
temperature=0.5)
|
39 |
+
|
40 |
+
response = llm.invoke([system, human])
|
41 |
+
|
42 |
+
return response
|
tools/download_file.py
ADDED
@@ -0,0 +1,83 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from langchain_core.tools.base import BaseTool
|
2 |
+
import requests
|
3 |
+
import base64
|
4 |
+
import pandas as pd
|
5 |
+
import os
|
6 |
+
import tempfile
|
7 |
+
import whisper
|
8 |
+
|
9 |
+
class DownloadFile(BaseTool):
|
10 |
+
name : str = "download_file_tool"
|
11 |
+
description: str = """
|
12 |
+
This tool downloads a file (image, pdf, python code, excel, etc.) given the name of the file. The url for the request will be composed in the function so ONLY the name of the file should be passed in.
|
13 |
+
|
14 |
+
You may have to download a file in 2 different scenarios:
|
15 |
+
- A file given already as part of the task. In this case the format of the url must be: {DEFAULT_API_URL}/files/{file_name} THE EXTENSION OF THE FILE MUST NOT(!!) BE INCLUDED!
|
16 |
+
- A url retrieved from the internet in the format https://some_url. In that case, you simply need to provide the url of the file that needs to be retrieved.
|
17 |
+
|
18 |
+
Args:
|
19 |
+
file_name: the name of the file to be retrieved DEFAULT_API_URL/files/task_id
|
20 |
+
file_extension: the extension of the file, without the dot. So for example "pdf", "img", "py", "xlsx", etc.
|
21 |
+
|
22 |
+
Output:
|
23 |
+
IF the file is a document, image or audio:
|
24 |
+
A Dict made of:
|
25 |
+
1\) The file in bytes
|
26 |
+
2\) The file in Base64 encoding
|
27 |
+
3\) The result of the call
|
28 |
+
|
29 |
+
IF the file is a piece of code:
|
30 |
+
A dict made of:
|
31 |
+
The text of the image
|
32 |
+
|
33 |
+
IF the file is an excel:
|
34 |
+
A dict made of:
|
35 |
+
A pandas dataframe
|
36 |
+
"""
|
37 |
+
|
38 |
+
def _run(self, file_url: str, file_extension: str) -> dict:
|
39 |
+
response = requests.get(file_url)
|
40 |
+
if response.status_code == 200:
|
41 |
+
msg = "File downloaded successfully!!"
|
42 |
+
if file_extension in ["png", "jpg", "pdf"]:
|
43 |
+
file = response.content
|
44 |
+
b64_file = base64.b64encode(file).decode("utf-8")
|
45 |
+
with open("downloaded_files/image.png", "wb") as f:
|
46 |
+
f.write(file)
|
47 |
+
|
48 |
+
return {
|
49 |
+
"bytes": file,
|
50 |
+
"base64": b64_file,
|
51 |
+
"status": response.status_code,
|
52 |
+
"path": "downloaded_files/image.png"
|
53 |
+
}
|
54 |
+
elif file_extension in ["mp3", "wav"]:
|
55 |
+
res = response.content
|
56 |
+
with open("downloaded_files/audio.mp3", mode="wb") as f:
|
57 |
+
f.write(res)
|
58 |
+
|
59 |
+
return {"transcript": "./downloaded_files/audio.mp3"}
|
60 |
+
|
61 |
+
elif file_extension == "py":
|
62 |
+
return {"text": response.text}
|
63 |
+
elif file_extension == "xlsx":
|
64 |
+
file_name = file_url.split("/")[-1]
|
65 |
+
|
66 |
+
with open(f"./downloaded_files/{file_name}", "wb") as f:
|
67 |
+
f.write(response.content)
|
68 |
+
|
69 |
+
return {"dataframe_path": f"./downloaded_files/{file_name}"}
|
70 |
+
else:
|
71 |
+
return {"error_msg": "The file extension is not valid."}
|
72 |
+
else:
|
73 |
+
msg = "There was an error downloading the file."
|
74 |
+
file = None
|
75 |
+
b64_file = None
|
76 |
+
|
77 |
+
return {"error_msg": msg}
|
78 |
+
|
79 |
+
|
80 |
+
|
81 |
+
|
82 |
+
|
83 |
+
|
tools/reverse_string.py
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from langchain_core.tools.base import BaseTool
|
2 |
+
|
3 |
+
class ReverseString(BaseTool):
|
4 |
+
name: str = "reverse_string_tool"
|
5 |
+
description: str = ("This tool inverts the order of the characters within a sentence. It is particularly useful if you can't understand the content in any language.")
|
6 |
+
|
7 |
+
def _run(self, string: str) -> str:
|
8 |
+
return string[::-1]
|
tools/web_search.py
ADDED
@@ -0,0 +1,52 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from langchain_core.tools.base import BaseTool
|
2 |
+
from dotenv import load_dotenv
|
3 |
+
from langchain_community.utilities import DuckDuckGoSearchAPIWrapper
|
4 |
+
from langchain_community.tools import TavilySearchResults, DuckDuckGoSearchResults
|
5 |
+
from langchain_tavily import TavilySearch
|
6 |
+
import os
|
7 |
+
from pydantic import PrivateAttr
|
8 |
+
from langchain_community.document_loaders import WebBaseLoader
|
9 |
+
import json
|
10 |
+
import requests
|
11 |
+
|
12 |
+
load_dotenv(".env", override=True)
|
13 |
+
|
14 |
+
|
15 |
+
class WebSearchTool(BaseTool):
|
16 |
+
name: str = "web_search_tool"
|
17 |
+
description: str = "Perform a web search and extract concise factual answers. Use for online facts not in GAIA/Wikipedia—e.g. sports stats, Olympic participation, published papers, museum specimen locations, competition winners, and other up-to-date info."
|
18 |
+
#_search: BraveSearch = PrivateAttr()
|
19 |
+
_search: DuckDuckGoSearchResults = PrivateAttr()
|
20 |
+
|
21 |
+
def __init__(self):
|
22 |
+
super().__init__()
|
23 |
+
#wrapper = DuckDuckGoSearchAPIWrapper(region="en", max_results=2)
|
24 |
+
#self._search = DuckDuckGoSearchResults(api_wrapper=wrapper, output_format="json")
|
25 |
+
|
26 |
+
self._search = TavilySearch(max_results=2)
|
27 |
+
|
28 |
+
def _run_old(self, query: str) -> str:
|
29 |
+
json_str = self._search.run(query) # list[Document]
|
30 |
+
docs = json.loads(json_str)
|
31 |
+
urls = [doc["link"] for doc in docs]
|
32 |
+
|
33 |
+
pages = [requests.get(url) for url in urls]
|
34 |
+
|
35 |
+
res = "\n\n---\n\n".join(
|
36 |
+
page.text for page in pages
|
37 |
+
)
|
38 |
+
|
39 |
+
try:
|
40 |
+
with open("./web_search.txt", "wt", encoding="utf-8") as f:
|
41 |
+
f.write(str(res))
|
42 |
+
except Exception as e:
|
43 |
+
print(e)
|
44 |
+
|
45 |
+
return res
|
46 |
+
|
47 |
+
def _run(self, query: str) -> str:
|
48 |
+
# import pdb;pdb.set_trace()
|
49 |
+
search_results = []
|
50 |
+
search_results.append(self._search.invoke(query))
|
51 |
+
# print(f"Search results: {search_results} \n type: {type(search_results)}")
|
52 |
+
return str(search_results)
|
tools/wikipedia.py
ADDED
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from langchain_community.tools import WikipediaQueryRun
|
2 |
+
from langchain_community.utilities import WikipediaAPIWrapper
|
3 |
+
from pydantic import PrivateAttr
|
4 |
+
from langchain_core.tools.base import BaseTool
|
5 |
+
from langchain_community.document_loaders import WikipediaLoader
|
6 |
+
import requests
|
7 |
+
from bs4 import BeautifulSoup
|
8 |
+
|
9 |
+
|
10 |
+
class WikipediaTool(BaseTool):
|
11 |
+
name: str = "wikipedia_tool"
|
12 |
+
description: str = "Search Wikipedia for a given query, retrieving the corresponding page's HTML content."
|
13 |
+
#_wikipedia = PrivateAttr()
|
14 |
+
|
15 |
+
def __init__(self):
|
16 |
+
super().__init__()
|
17 |
+
#self._wikipedia = WikipediaQueryRun(api_wrapper=WikipediaAPIWrapper(lang="en", doc_content_chars_max=100000, top_k_results=1))
|
18 |
+
|
19 |
+
|
20 |
+
def _run(self, query: str):
|
21 |
+
loader = WikipediaLoader(
|
22 |
+
query=query,
|
23 |
+
lang="en",
|
24 |
+
load_max_docs=1, # number of pages to fetch
|
25 |
+
load_all_available_meta=True
|
26 |
+
)
|
27 |
+
docs = loader.load()
|
28 |
+
url = docs[0].metadata.get("source")
|
29 |
+
|
30 |
+
resp = requests.get(url)
|
31 |
+
resp.raise_for_status()
|
32 |
+
|
33 |
+
html_text = resp.text
|
34 |
+
|
35 |
+
soup = BeautifulSoup(html_text, 'html.parser')
|
36 |
+
page_content = soup.find("div", class_="mw-parser-output")
|
37 |
+
if not page_content:
|
38 |
+
return ""
|
39 |
+
try:
|
40 |
+
# Decompose non relevant tags
|
41 |
+
to_decompose = []
|
42 |
+
for tag in page_content.find_all(["style", "sup", "script", "noscript", "img", "link", "figure"]):
|
43 |
+
to_decompose.append(tag)
|
44 |
+
|
45 |
+
for tag in to_decompose:
|
46 |
+
tag.decompose()
|
47 |
+
except Exception as e:
|
48 |
+
print(e)
|
49 |
+
|
50 |
+
return str(page_content)
|
tools/youtube_transcript.py
ADDED
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from langchain_core.tools.base import BaseTool, ToolException
|
2 |
+
import requests
|
3 |
+
from youtube_transcript_api import YouTubeTranscriptApi
|
4 |
+
import re
|
5 |
+
|
6 |
+
class YoutubeTranscriptTool(BaseTool):
|
7 |
+
name: str = "youtube_transcript_tool"
|
8 |
+
description: str = "This tool can be used to retrieve the transcript of a youtube video given the FULL youtube link. You must pass the full youtube link!"
|
9 |
+
|
10 |
+
def _run(self, youtube_link: str) -> str:
|
11 |
+
"""
|
12 |
+
Fetch transcript for a YouTube video URL.
|
13 |
+
Args:
|
14 |
+
youtube_link: The full URL of the YouTube video.
|
15 |
+
Returns:
|
16 |
+
The transcript as a single string.
|
17 |
+
"""
|
18 |
+
re_match = re.search(r"watch\?v=([^&]+)", youtube_link)
|
19 |
+
if not re_match:
|
20 |
+
raise ValueError(f"Invalid YouTube URL: {youtube_link}")
|
21 |
+
video_id = re_match.group(1)
|
22 |
+
ytt_api = YouTubeTranscriptApi()
|
23 |
+
fetched_transcript = ytt_api.fetch(video_id)
|
24 |
+
|
25 |
+
transcript = []
|
26 |
+
for snippet in fetched_transcript:
|
27 |
+
transcript.append(snippet.text)
|
28 |
+
|
29 |
+
return "\n".join(transcript)
|