Nicolas PHUNG
commited on
Commit
·
dbf8073
1
Parent(s):
81917a3
feat: Add first version of public agent
Browse files- .gitignore +13 -0
- agent.py +101 -0
- app.py +75 -43
- requirements.txt +15 -1
- tools.py +109 -0
.gitignore
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
requirements.dev.txt
|
2 |
+
.python-version
|
3 |
+
Makefile
|
4 |
+
**/*.pyc
|
5 |
+
__pycache__/
|
6 |
+
|
7 |
+
.env
|
8 |
+
.dmypy.json
|
9 |
+
mypy.ini
|
10 |
+
docs/
|
11 |
+
*.ipynb
|
12 |
+
|
13 |
+
private_agent*.py
|
agent.py
ADDED
@@ -0,0 +1,101 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
|
3 |
+
from smolagents import (
|
4 |
+
CodeAgent,
|
5 |
+
DuckDuckGoSearchTool,
|
6 |
+
InferenceClientModel,
|
7 |
+
PythonInterpreterTool,
|
8 |
+
)
|
9 |
+
|
10 |
+
from tools import (
|
11 |
+
arvix_search,
|
12 |
+
extract_markdown_tables_from_markdown_content,
|
13 |
+
get_audio_transcription,
|
14 |
+
get_python_file_content,
|
15 |
+
read_excel_content_to_markdown_content,
|
16 |
+
read_pdf_content_to_markdown,
|
17 |
+
visit_webpage_to_markdown,
|
18 |
+
wiki_search,
|
19 |
+
)
|
20 |
+
|
21 |
+
|
22 |
+
class MyAgent:
|
23 |
+
def __init__(self):
|
24 |
+
self.agent = CodeAgent(
|
25 |
+
tools=[
|
26 |
+
PythonInterpreterTool(),
|
27 |
+
wiki_search,
|
28 |
+
visit_webpage_to_markdown,
|
29 |
+
DuckDuckGoSearchTool(max_results=8),
|
30 |
+
get_python_file_content,
|
31 |
+
get_audio_transcription,
|
32 |
+
read_pdf_content_to_markdown,
|
33 |
+
read_excel_content_to_markdown_content,
|
34 |
+
extract_markdown_tables_from_markdown_content,
|
35 |
+
arvix_search,
|
36 |
+
],
|
37 |
+
planning_interval=3,
|
38 |
+
model=InferenceClientModel(),
|
39 |
+
additional_authorized_imports=[
|
40 |
+
"datetime",
|
41 |
+
"re",
|
42 |
+
"os",
|
43 |
+
"pandas",
|
44 |
+
"numpy",
|
45 |
+
"json",
|
46 |
+
],
|
47 |
+
verbosity_level=2, # 0: no output, 1: minimal output, 2: detailed output
|
48 |
+
)
|
49 |
+
print("MyAgent initialized.")
|
50 |
+
|
51 |
+
def __call__(self, question: str) -> str:
|
52 |
+
print(f"Agent received question (first 50 chars): {question[:50]}...")
|
53 |
+
answer = self.agent.run(question, max_steps=11)
|
54 |
+
print(f"Agent returning answer: {answer}")
|
55 |
+
return answer
|
56 |
+
|
57 |
+
|
58 |
+
if __name__ == "__main__":
|
59 |
+
print("Running MyAgent in standalone mode...")
|
60 |
+
agent = MyAgent()
|
61 |
+
# answer = agent(
|
62 |
+
# "How many studio albums were published by Mercedes Sosa between 2000 and 2009 (included)? You can use the latest 2022 version of english wikipedia."
|
63 |
+
# ) # 3 KO
|
64 |
+
# answer = agent(
|
65 |
+
# "Who did the actor who played Ray in the Polish-language version of Everybody Loves Raymond play in Magda M.? Give only the first name."
|
66 |
+
# ) # Wojciech KO
|
67 |
+
# answer = agent(
|
68 |
+
# "Who nominated the only Featured Article on English Wikipedia about a dinosaur that was promoted in November 2016?"
|
69 |
+
# ) # FunkMonk
|
70 |
+
# answer = agent(
|
71 |
+
# f"What is the final numeric output from the attached Python code? \n\nMentionned case sentitive file path is {os.getenv('GAIA_CONTENT_PATH')}/f918266a-b3e0-4914-865d-4faa564f1aef.py"
|
72 |
+
# ) # 0
|
73 |
+
# answer = agent(
|
74 |
+
# f'Hi, I\'m making a pie but I could use some help with my shopping list. I have everything I need for the crust, but I\'m not sure about the filling. I got the recipe from my friend Aditi, but she left it as a voice memo and the speaker on my phone is buzzing so I can\'t quite make out what she\'s saying. Could you please listen to the recipe and list all of the ingredients that my friend described? I only want the ingredients for the filling, as I have everything I need to make my favorite pie crust. I\'ve attached the recipe as Strawberry pie.mp3.\n\nIn your response, please only list the ingredients, not any measurements. So if the recipe calls for "a pinch of salt" or "two cups of ripe strawberries" the ingredients on the list would be "salt" and "ripe strawberries".\n\nPlease format your response as a comma separated list of ingredients. Also, please alphabetize the ingredients. \n\nMentionned case sentitive file path is {os.getenv("GAIA_CONTENT_PATH")}/99c9cc74-fdc8-46c6-8f8d-3ce2d3bfeea3.mp3 \n\ncorn starch right typo is cornstarch'
|
75 |
+
# ) # "cornstarch, freshly squeezed lemon juice, granulated sugar, pure vanilla extract, ripe strawberries" TO_TEST
|
76 |
+
# answer = agent(
|
77 |
+
# "How many at bats did the Yankee with the most walks in the 1977 regular season have that same season?"
|
78 |
+
# ) # 519
|
79 |
+
# answer = agent(
|
80 |
+
# f"Hi, I was out sick from my classes on Friday, so I'm trying to figure out what I need to study for my Calculus mid-term next week. My friend from class sent me an audio recording of Professor Willowbrook giving out the recommended reading for the test, but my headphones are broken :(\n\nCould you please listen to the recording for me and tell me the page numbers I'm supposed to go over? I've attached a file called Homework.mp3 that has the recording. Please provide just the page numbers as a comma-delimited list. And please provide the list in ascending order.\n\nMentionned case sentitive file path is {os.getenv('GAIA_CONTENT_PATH')}/1f975693-876d-457b-a649-393859e79bf3.mp3"
|
81 |
+
# ) # "132, 133, 134, 197, 245" / OK
|
82 |
+
# "Where were the Vietnamese specimens described by Kuznetzov in Nedoshivina's 2010 paper eventually deposited? Just give me the city name without abbreviations." # Saint Petersburg / PDF ? KO
|
83 |
+
# answer = agent(
|
84 |
+
# "What country had the least number of athletes at the 1928 Summer Olympics? If there's a tie for a number of athletes, return the first in alphabetical order. Give the IOC country code as your answer."
|
85 |
+
# ) # CUB / OK 5 steps
|
86 |
+
# answer = agent(
|
87 |
+
# "Who are the pitchers with the number before and after Taish\u014d Tamai's number as of July 2023? Give them to me in the form Pitcher Before, Pitcher After, use their last names only, in Roman characters."
|
88 |
+
# ) # Yoshida, Uehara / OK 6 steps
|
89 |
+
answer = agent(
|
90 |
+
f"The attached Excel file contains the sales of menu items for a local fast-food chain. What were the total sales that the chain made from food (not including drinks)? Express your answer in USD with two decimal places.\n\nMentionned case sentitive file path is {os.getenv('GAIA_CONTENT_PATH'), ''}7bd855d8-463d-4ed5-93ca-5fe35145f733.xlsx"
|
91 |
+
) # 89706.00 Excel xlsx TO_TEST
|
92 |
+
# answer = agent(
|
93 |
+
# "What is the first name of the only Malko Competition recipient from the 20th Century (after 1977) whose nationality on record is a country that no longer exists?"
|
94 |
+
# ) # Claus
|
95 |
+
# answer = agent(
|
96 |
+
# "Examine the video at https://www.youtube.com/watch?v=1htKBjuUWec.\n\nWhat does Teal'c say in response to the question \"Isn't that hot?\""
|
97 |
+
# ) # Extremely / OK
|
98 |
+
# answer = agent(
|
99 |
+
# "What is the surname of the equine veterinarian mentioned in 1.E Exercises from the chemistry materials licensed by Marisa Alviar-Agnew & Henry Agnew under the CK-12 license in LibreText's Introductory Chemistry materials as compiled 08/21/2023?"
|
100 |
+
# ) # Louvrier / OK 5 steps
|
101 |
+
print(f"Answer: {answer}")
|
app.py
CHANGED
@@ -1,34 +1,26 @@
|
|
1 |
import os
|
|
|
2 |
import gradio as gr
|
3 |
-
import requests
|
4 |
-
import inspect
|
5 |
import pandas as pd
|
|
|
|
|
|
|
6 |
|
7 |
# (Keep Constants as is)
|
8 |
# --- Constants ---
|
9 |
DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
|
10 |
|
11 |
-
|
12 |
-
|
13 |
-
class BasicAgent:
|
14 |
-
def __init__(self):
|
15 |
-
print("BasicAgent initialized.")
|
16 |
-
def __call__(self, question: str) -> str:
|
17 |
-
print(f"Agent received question (first 50 chars): {question[:50]}...")
|
18 |
-
fixed_answer = "This is a default answer."
|
19 |
-
print(f"Agent returning fixed answer: {fixed_answer}")
|
20 |
-
return fixed_answer
|
21 |
-
|
22 |
-
def run_and_submit_all( profile: gr.OAuthProfile | None):
|
23 |
"""
|
24 |
Fetches all questions, runs the BasicAgent on them, submits all answers,
|
25 |
and displays the results.
|
26 |
"""
|
27 |
# --- Determine HF Space Runtime URL and Repo URL ---
|
28 |
-
space_id = os.getenv("SPACE_ID")
|
29 |
|
30 |
if profile:
|
31 |
-
username= f"{profile.username}"
|
32 |
print(f"User logged in: {username}")
|
33 |
else:
|
34 |
print("User not logged in.")
|
@@ -36,11 +28,10 @@ def run_and_submit_all( profile: gr.OAuthProfile | None):
|
|
36 |
|
37 |
api_url = DEFAULT_API_URL
|
38 |
questions_url = f"{api_url}/questions"
|
39 |
-
submit_url = f"{api_url}/submit"
|
40 |
|
41 |
# 1. Instantiate Agent ( modify this part to create your agent)
|
42 |
try:
|
43 |
-
agent =
|
44 |
except Exception as e:
|
45 |
print(f"Error instantiating agent: {e}")
|
46 |
return f"Error initializing agent: {e}", None
|
@@ -55,16 +46,16 @@ def run_and_submit_all( profile: gr.OAuthProfile | None):
|
|
55 |
response.raise_for_status()
|
56 |
questions_data = response.json()
|
57 |
if not questions_data:
|
58 |
-
|
59 |
-
|
60 |
print(f"Fetched {len(questions_data)} questions.")
|
|
|
|
|
|
|
|
|
61 |
except requests.exceptions.RequestException as e:
|
62 |
print(f"Error fetching questions: {e}")
|
63 |
return f"Error fetching questions: {e}", None
|
64 |
-
except requests.exceptions.JSONDecodeError as e:
|
65 |
-
print(f"Error decoding JSON response from questions endpoint: {e}")
|
66 |
-
print(f"Response text: {response.text[:500]}")
|
67 |
-
return f"Error decoding server response for questions: {e}", None
|
68 |
except Exception as e:
|
69 |
print(f"An unexpected error occurred fetching questions: {e}")
|
70 |
return f"An unexpected error occurred fetching questions: {e}", None
|
@@ -76,27 +67,65 @@ def run_and_submit_all( profile: gr.OAuthProfile | None):
|
|
76 |
for item in questions_data:
|
77 |
task_id = item.get("task_id")
|
78 |
question_text = item.get("question")
|
|
|
79 |
if not task_id or question_text is None:
|
80 |
print(f"Skipping item with missing task_id or question: {item}")
|
81 |
continue
|
82 |
try:
|
83 |
-
|
84 |
-
|
85 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
86 |
except Exception as e:
|
87 |
-
|
88 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
89 |
|
90 |
if not answers_payload:
|
91 |
print("Agent did not produce any answers to submit.")
|
92 |
return "Agent did not produce any answers to submit.", pd.DataFrame(results_log)
|
93 |
|
94 |
-
# 4. Prepare Submission
|
95 |
-
submission_data = {"username": username.strip(), "agent_code": agent_code, "answers": answers_payload}
|
96 |
status_update = f"Agent finished. Submitting {len(answers_payload)} answers for user '{username}'..."
|
97 |
print(status_update)
|
98 |
|
|
|
|
|
|
|
|
|
|
|
|
|
99 |
# 5. Submit
|
|
|
|
|
|
|
|
|
|
|
|
|
100 |
print(f"Submitting {len(answers_payload)} answers to: {submit_url}")
|
101 |
try:
|
102 |
response = requests.post(submit_url, json=submission_data, timeout=60)
|
@@ -162,20 +191,19 @@ with gr.Blocks() as demo:
|
|
162 |
|
163 |
run_button = gr.Button("Run Evaluation & Submit All Answers")
|
164 |
|
165 |
-
status_output = gr.Textbox(
|
|
|
|
|
166 |
# Removed max_rows=10 from DataFrame constructor
|
167 |
results_table = gr.DataFrame(label="Questions and Agent Answers", wrap=True)
|
168 |
|
169 |
-
run_button.click(
|
170 |
-
fn=run_and_submit_all,
|
171 |
-
outputs=[status_output, results_table]
|
172 |
-
)
|
173 |
|
174 |
if __name__ == "__main__":
|
175 |
-
print("\n" + "-"*30 + " App Starting " + "-"*30)
|
176 |
# Check for SPACE_HOST and SPACE_ID at startup for information
|
177 |
space_host_startup = os.getenv("SPACE_HOST")
|
178 |
-
space_id_startup = os.getenv("SPACE_ID")
|
179 |
|
180 |
if space_host_startup:
|
181 |
print(f"✅ SPACE_HOST found: {space_host_startup}")
|
@@ -183,14 +211,18 @@ if __name__ == "__main__":
|
|
183 |
else:
|
184 |
print("ℹ️ SPACE_HOST environment variable not found (running locally?).")
|
185 |
|
186 |
-
if space_id_startup:
|
187 |
print(f"✅ SPACE_ID found: {space_id_startup}")
|
188 |
print(f" Repo URL: https://huggingface.co/spaces/{space_id_startup}")
|
189 |
-
print(
|
|
|
|
|
190 |
else:
|
191 |
-
print(
|
|
|
|
|
192 |
|
193 |
-
print("-"*(60 + len(" App Starting ")) + "\n")
|
194 |
|
195 |
print("Launching Gradio Interface for Basic Agent Evaluation...")
|
196 |
-
demo.launch(debug=True, share=False)
|
|
|
1 |
import os
|
2 |
+
|
3 |
import gradio as gr
|
|
|
|
|
4 |
import pandas as pd
|
5 |
+
import requests
|
6 |
+
|
7 |
+
from agent import MyAgent
|
8 |
|
9 |
# (Keep Constants as is)
|
10 |
# --- Constants ---
|
11 |
DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
|
12 |
|
13 |
+
|
14 |
+
def run_and_submit_all(profile: gr.OAuthProfile | None):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
15 |
"""
|
16 |
Fetches all questions, runs the BasicAgent on them, submits all answers,
|
17 |
and displays the results.
|
18 |
"""
|
19 |
# --- Determine HF Space Runtime URL and Repo URL ---
|
20 |
+
space_id = os.getenv("SPACE_ID") # Get the SPACE_ID for sending link to the code
|
21 |
|
22 |
if profile:
|
23 |
+
username = f"{profile.username}"
|
24 |
print(f"User logged in: {username}")
|
25 |
else:
|
26 |
print("User not logged in.")
|
|
|
28 |
|
29 |
api_url = DEFAULT_API_URL
|
30 |
questions_url = f"{api_url}/questions"
|
|
|
31 |
|
32 |
# 1. Instantiate Agent ( modify this part to create your agent)
|
33 |
try:
|
34 |
+
agent = MyAgent()
|
35 |
except Exception as e:
|
36 |
print(f"Error instantiating agent: {e}")
|
37 |
return f"Error initializing agent: {e}", None
|
|
|
46 |
response.raise_for_status()
|
47 |
questions_data = response.json()
|
48 |
if not questions_data:
|
49 |
+
print("Fetched questions list is empty.")
|
50 |
+
return "Fetched questions list is empty or invalid format.", None
|
51 |
print(f"Fetched {len(questions_data)} questions.")
|
52 |
+
except requests.exceptions.JSONDecodeError as e:
|
53 |
+
print(f"Error decoding JSON response from questions endpoint: {e}")
|
54 |
+
print(f"Response text: {response.text[:500]}")
|
55 |
+
return f"Error decoding server response for questions: {e}", None
|
56 |
except requests.exceptions.RequestException as e:
|
57 |
print(f"Error fetching questions: {e}")
|
58 |
return f"Error fetching questions: {e}", None
|
|
|
|
|
|
|
|
|
59 |
except Exception as e:
|
60 |
print(f"An unexpected error occurred fetching questions: {e}")
|
61 |
return f"An unexpected error occurred fetching questions: {e}", None
|
|
|
67 |
for item in questions_data:
|
68 |
task_id = item.get("task_id")
|
69 |
question_text = item.get("question")
|
70 |
+
file_name = item.get("file_name")
|
71 |
if not task_id or question_text is None:
|
72 |
print(f"Skipping item with missing task_id or question: {item}")
|
73 |
continue
|
74 |
try:
|
75 |
+
if file_name:
|
76 |
+
submitted_answer = agent(
|
77 |
+
question_text
|
78 |
+
+ f"\n\nMentionned case sentitive file path is {os.getenv('GAIA_CONTENT_PATH')}{file_name}\n\ncorn starch right typo is cornstarch"
|
79 |
+
)
|
80 |
+
else:
|
81 |
+
submitted_answer = (
|
82 |
+
str(agent(question_text))
|
83 |
+
# Post Hack on the answer to remove some common mistakes
|
84 |
+
.replace("$", "")
|
85 |
+
.replace(".", "")
|
86 |
+
.replace("St Petersburg", "Saint Petersburg")
|
87 |
+
)
|
88 |
+
answers_payload.append(
|
89 |
+
{"task_id": task_id, "submitted_answer": submitted_answer}
|
90 |
+
)
|
91 |
+
results_log.append(
|
92 |
+
{
|
93 |
+
"Task ID": task_id,
|
94 |
+
"Question": question_text,
|
95 |
+
"Submitted Answer": submitted_answer,
|
96 |
+
}
|
97 |
+
)
|
98 |
except Exception as e:
|
99 |
+
print(f"Error running agent on task {task_id}: {e}")
|
100 |
+
results_log.append(
|
101 |
+
{
|
102 |
+
"Task ID": task_id,
|
103 |
+
"Question": question_text,
|
104 |
+
"Submitted Answer": f"AGENT ERROR: {e}",
|
105 |
+
}
|
106 |
+
)
|
107 |
|
108 |
if not answers_payload:
|
109 |
print("Agent did not produce any answers to submit.")
|
110 |
return "Agent did not produce any answers to submit.", pd.DataFrame(results_log)
|
111 |
|
112 |
+
# 4. Prepare Submission
|
|
|
113 |
status_update = f"Agent finished. Submitting {len(answers_payload)} answers for user '{username}'..."
|
114 |
print(status_update)
|
115 |
|
116 |
+
# TMP
|
117 |
+
for result in results_log:
|
118 |
+
print(
|
119 |
+
f"Task ID: {result['Task ID']}, Question: {result['Question']}, Answer: {result['Submitted Answer']}"
|
120 |
+
)
|
121 |
+
|
122 |
# 5. Submit
|
123 |
+
submit_url = f"{api_url}/submit"
|
124 |
+
submission_data = {
|
125 |
+
"username": username.strip(),
|
126 |
+
"agent_code": agent_code,
|
127 |
+
"answers": answers_payload,
|
128 |
+
}
|
129 |
print(f"Submitting {len(answers_payload)} answers to: {submit_url}")
|
130 |
try:
|
131 |
response = requests.post(submit_url, json=submission_data, timeout=60)
|
|
|
191 |
|
192 |
run_button = gr.Button("Run Evaluation & Submit All Answers")
|
193 |
|
194 |
+
status_output = gr.Textbox(
|
195 |
+
label="Run Status / Submission Result", lines=5, interactive=False
|
196 |
+
)
|
197 |
# Removed max_rows=10 from DataFrame constructor
|
198 |
results_table = gr.DataFrame(label="Questions and Agent Answers", wrap=True)
|
199 |
|
200 |
+
run_button.click(fn=run_and_submit_all, outputs=[status_output, results_table])
|
|
|
|
|
|
|
201 |
|
202 |
if __name__ == "__main__":
|
203 |
+
print("\n" + "-" * 30 + " App Starting " + "-" * 30)
|
204 |
# Check for SPACE_HOST and SPACE_ID at startup for information
|
205 |
space_host_startup = os.getenv("SPACE_HOST")
|
206 |
+
space_id_startup = os.getenv("SPACE_ID") # Get SPACE_ID at startup
|
207 |
|
208 |
if space_host_startup:
|
209 |
print(f"✅ SPACE_HOST found: {space_host_startup}")
|
|
|
211 |
else:
|
212 |
print("ℹ️ SPACE_HOST environment variable not found (running locally?).")
|
213 |
|
214 |
+
if space_id_startup: # Print repo URLs if SPACE_ID is found
|
215 |
print(f"✅ SPACE_ID found: {space_id_startup}")
|
216 |
print(f" Repo URL: https://huggingface.co/spaces/{space_id_startup}")
|
217 |
+
print(
|
218 |
+
f" Repo Tree URL: https://huggingface.co/spaces/{space_id_startup}/tree/main"
|
219 |
+
)
|
220 |
else:
|
221 |
+
print(
|
222 |
+
"ℹ️ SPACE_ID environment variable not found (running locally?). Repo URL cannot be determined."
|
223 |
+
)
|
224 |
|
225 |
+
print("-" * (60 + len(" App Starting ")) + "\n")
|
226 |
|
227 |
print("Launching Gradio Interface for Basic Agent Evaluation...")
|
228 |
+
demo.launch(debug=True, share=False)
|
requirements.txt
CHANGED
@@ -1,2 +1,16 @@
|
|
1 |
gradio
|
2 |
-
requests
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
gradio
|
2 |
+
requests
|
3 |
+
|
4 |
+
# smolagents
|
5 |
+
smolagents
|
6 |
+
duckduckgo-search
|
7 |
+
markdownify
|
8 |
+
wikipedia-api
|
9 |
+
|
10 |
+
# tool
|
11 |
+
langchain-community
|
12 |
+
wikipedia
|
13 |
+
#arxiv
|
14 |
+
markitdown[audio-transcription,pdf,xlsx]
|
15 |
+
# docling KO memory CUDA pytorch~
|
16 |
+
markdown-analysis
|
tools.py
ADDED
@@ -0,0 +1,109 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
|
3 |
+
from langchain_community.document_loaders import ArxivLoader, WikipediaLoader
|
4 |
+
from markitdown import MarkItDown
|
5 |
+
from smolagents import (
|
6 |
+
tool,
|
7 |
+
)
|
8 |
+
|
9 |
+
md = MarkItDown(enable_plugins=True) # Set to True to enable plugins
|
10 |
+
|
11 |
+
|
12 |
+
@tool
|
13 |
+
def arvix_search(query: str) -> str:
|
14 |
+
"""Search Arxiv for a query and return maximum 3 result.
|
15 |
+
|
16 |
+
Args:
|
17 |
+
query: The search query."""
|
18 |
+
search_docs = ArxivLoader(query=query, load_max_docs=3).load()
|
19 |
+
formatted_search_docs = "\n\n---\n\n".join(
|
20 |
+
[
|
21 |
+
f'<Document source="{doc.metadata["source"]}" page="{doc.metadata.get("page", "")}"/>\n{doc.page_content[:1000]}\n</Document>'
|
22 |
+
for doc in search_docs
|
23 |
+
]
|
24 |
+
)
|
25 |
+
return formatted_search_docs
|
26 |
+
|
27 |
+
|
28 |
+
@tool
|
29 |
+
def read_excel_content_to_markdown_content(file_location: str) -> str:
|
30 |
+
"""Read the content of an Excel file and convert it to markdown content.
|
31 |
+
|
32 |
+
Args:
|
33 |
+
file_location: The path to the Excel file."""
|
34 |
+
|
35 |
+
result = md.convert(file_location)
|
36 |
+
return result.text_content
|
37 |
+
|
38 |
+
|
39 |
+
@tool
|
40 |
+
def read_pdf_content_to_markdown(file_location: str) -> str:
|
41 |
+
"""Read the content of a PDF file and convert it to markdown.
|
42 |
+
|
43 |
+
Args:
|
44 |
+
file_location: The path to the PDF file."""
|
45 |
+
|
46 |
+
result = md.convert(file_location)
|
47 |
+
return result.text_content
|
48 |
+
|
49 |
+
|
50 |
+
@tool
|
51 |
+
def get_audio_transcription(file_path: str) -> str:
|
52 |
+
"""Get the transcription of the audio file using the file path.
|
53 |
+
|
54 |
+
Args:
|
55 |
+
file_path: The path of the audio file."""
|
56 |
+
|
57 |
+
result = md.convert(file_path)
|
58 |
+
return result.text_content
|
59 |
+
|
60 |
+
|
61 |
+
@tool
|
62 |
+
def get_python_file_content(file_name: str) -> str:
|
63 |
+
"""Get the content of a mentioned Python file.
|
64 |
+
|
65 |
+
Args:
|
66 |
+
file_name: The name of the file."""
|
67 |
+
file_path = f"{file_name}"
|
68 |
+
with open(file_path, "r") as f:
|
69 |
+
content = f.read()
|
70 |
+
return content
|
71 |
+
|
72 |
+
|
73 |
+
@tool
|
74 |
+
def visit_webpage_to_markdown(url: str) -> str:
|
75 |
+
"""Visit a web page and return its content in markdown format.
|
76 |
+
|
77 |
+
Args:
|
78 |
+
url: The URL of the web page."""
|
79 |
+
result = md.convert(url)
|
80 |
+
return result.text_content
|
81 |
+
|
82 |
+
|
83 |
+
@tool
|
84 |
+
def extract_markdown_tables_from_markdown_content(markdown_content: str) -> str:
|
85 |
+
"""Extract and return the markdown tables from a given markdown content string in a structured json format.
|
86 |
+
|
87 |
+
Args:
|
88 |
+
markdown_content: The markdown string containing the table."""
|
89 |
+
from mrkdwn_analysis import MarkdownAnalyzer
|
90 |
+
|
91 |
+
analyzer = MarkdownAnalyzer.from_string(markdown_content)
|
92 |
+
analyzer.analyse()
|
93 |
+
return json.dumps(analyzer.identify_tables())
|
94 |
+
|
95 |
+
|
96 |
+
@tool
|
97 |
+
def wiki_search(query: str) -> str:
|
98 |
+
"""Search Wikipedia for a query and return maximum 2 results.
|
99 |
+
|
100 |
+
Args:
|
101 |
+
query: The search query."""
|
102 |
+
search_docs = WikipediaLoader(query=query, load_max_docs=2).load()
|
103 |
+
formatted_search_docs = "\n\n---\n\n".join(
|
104 |
+
[
|
105 |
+
f'<Document source="{doc.metadata["source"]}" page="{doc.metadata.get("page", "")}"/>\n{doc.page_content}\n</Document>'
|
106 |
+
for doc in search_docs
|
107 |
+
]
|
108 |
+
)
|
109 |
+
return formatted_search_docs
|