Spaces:
Sleeping
Sleeping
Commit
·
a12a1d9
1
Parent(s):
1771a85
updated tools
Browse files- app.py +240 -167
- requirements.txt +2 -1
app.py
CHANGED
@@ -16,6 +16,9 @@ from langchain_openai import ChatOpenAI
|
|
16 |
import fitz
|
17 |
import yt_dlp
|
18 |
import re
|
|
|
|
|
|
|
19 |
|
20 |
## # Load environment variables from .env file
|
21 |
# --- Constants ---
|
@@ -32,16 +35,48 @@ OPENAI_MODEL = os.getenv ('OPENAI_MODEL')
|
|
32 |
# --- TOOL 1: Web Search Tool (DuckDuckGo) ---
|
33 |
|
34 |
@tool
|
35 |
-
def
|
36 |
-
"""
|
|
|
|
|
|
|
|
|
37 |
url = f"https://api.duckduckgo.com/?q={query}&format=json&no_html=1"
|
38 |
try:
|
39 |
-
resp = requests.get(url, timeout=
|
40 |
resp.raise_for_status()
|
41 |
data = resp.json()
|
|
|
42 |
for key in ["AbstractText", "Answer", "Definition"]:
|
43 |
if data.get(key):
|
44 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
45 |
return "no_answer"
|
46 |
except Exception:
|
47 |
return "error"
|
@@ -58,7 +93,7 @@ def get_weather(city: str) -> str:
|
|
58 |
api_key = os.environ.get("WEATHER_API_KEY")
|
59 |
url = f"https://api.openweathermap.org/data/2.5/weather?q={city}&appid={WEATHER_API_KEY}&units=metric"
|
60 |
try:
|
61 |
-
resp = requests.get(url, timeout=
|
62 |
resp.raise_for_status()
|
63 |
data = resp.json()
|
64 |
return str(round(data["main"]["temp"]))
|
@@ -119,95 +154,52 @@ def get_date(input: str) -> str:
|
|
119 |
|
120 |
# --- TOOL 6: Wikipedia Summary Tool ---
|
121 |
@tool
|
122 |
-
def
|
123 |
"""
|
124 |
-
Answer questions
|
125 |
"""
|
126 |
-
|
127 |
-
|
128 |
-
|
129 |
-
"
|
130 |
-
|
131 |
-
|
132 |
-
|
133 |
-
|
134 |
-
params = {
|
135 |
-
"action": "query",
|
136 |
-
"list": "search",
|
137 |
-
"srsearch": query,
|
138 |
-
"format": "json"
|
139 |
-
}
|
140 |
-
try:
|
141 |
-
resp = requests.get(search_url, params=params, timeout=15)
|
142 |
-
resp.raise_for_status()
|
143 |
-
results = resp.json().get("query", {}).get("search", [])
|
144 |
-
if not results:
|
145 |
-
return "no_answer"
|
146 |
-
page_title = results[0]["title"]
|
147 |
-
page_url = f"https://en.wikipedia.org/wiki/{page_title.replace(' ', '_')}"
|
148 |
-
except Exception:
|
149 |
-
return "error: Could not search Wikipedia"
|
150 |
-
|
151 |
-
# Step 2: Fetch the Wikipedia page and extract tables/lists
|
152 |
-
try:
|
153 |
-
page_resp = requests.get(page_url, timeout=20)
|
154 |
-
page_resp.raise_for_status()
|
155 |
-
soup = BeautifulSoup(page_resp.text, "html.parser")
|
156 |
-
output = f"Source: {page_url}\n"
|
157 |
-
|
158 |
-
# Extract all tables with relevant columns
|
159 |
-
tables = soup.find_all("table", {"class": ["wikitable", "sortable"]})
|
160 |
-
found_table = False
|
161 |
-
for table in tables:
|
162 |
-
table_str = str(table)
|
163 |
-
if any(word in table_str.lower() for word in ["winner", "name", "year", "nationality", "country", "recipient", "team"]):
|
164 |
-
try:
|
165 |
-
df = pd.read_html(table_str)[0]
|
166 |
-
output += "\n--- Extracted Table ---\n"
|
167 |
-
output += df.to_csv(index=False)
|
168 |
-
found_table = True
|
169 |
-
except Exception:
|
170 |
-
continue
|
171 |
-
|
172 |
-
# If no relevant table, extract lists
|
173 |
-
if not found_table:
|
174 |
-
lists = soup.find_all(['ul', 'ol'])
|
175 |
-
for lst in lists:
|
176 |
-
items = lst.find_all('li')
|
177 |
-
if len(items) > 2:
|
178 |
-
output += "\n--- Extracted List ---\n"
|
179 |
-
for item in items:
|
180 |
-
text = item.get_text(separator=" ", strip=True)
|
181 |
-
output += f"{text}\n"
|
182 |
-
break
|
183 |
-
|
184 |
-
# Fallback: return the first paragraph if nothing else
|
185 |
-
if not found_table and "--- Extracted List ---" not in output:
|
186 |
-
first_p = soup.find("p")
|
187 |
-
output += first_p.get_text(strip=True)[:500] if first_p else "no_answer"
|
188 |
-
|
189 |
-
# Limit output length for LLM context
|
190 |
-
return output[:3500]
|
191 |
-
except Exception as e:
|
192 |
-
return f"error: {e}"
|
193 |
-
|
194 |
-
# Otherwise, just return the summary as before
|
195 |
-
url = f"https://en.wikipedia.org/api/rest_v1/page/summary/{query.replace(' ', '_')}"
|
196 |
try:
|
197 |
-
resp = requests.get(
|
198 |
resp.raise_for_status()
|
199 |
-
|
200 |
-
|
|
|
|
|
|
|
201 |
except Exception:
|
202 |
-
return "error"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
203 |
|
204 |
# --- TOOL 7: Dictionary Tool ---
|
205 |
@tool
|
206 |
def dictionary_lookup(word: str) -> str:
|
207 |
-
"""Get the definition of an English word."""
|
208 |
url = f"https://api.dictionaryapi.dev/api/v2/entries/en/{word}"
|
209 |
try:
|
210 |
-
resp = requests.get(url, timeout=
|
211 |
resp.raise_for_status()
|
212 |
data = resp.json()
|
213 |
return data[0]["meanings"][0]["definitions"][0]["definition"]
|
@@ -227,7 +219,7 @@ def currency_convert(args: str) -> str:
|
|
227 |
from_currency = parts[1]
|
228 |
to_currency = parts[3]
|
229 |
url = f"https://api.exchangerate.host/convert?from={from_currency}&to={to_currency}&amount={amount}"
|
230 |
-
resp = requests.get(url, timeout=
|
231 |
resp.raise_for_status()
|
232 |
data = resp.json()
|
233 |
return str(round(data["result"], 2))
|
@@ -242,7 +234,7 @@ def image_caption(image_url: str) -> str:
|
|
242 |
headers = {"Authorization": f"Bearer {HF_ACCESS_KEY}"}
|
243 |
payload = {"inputs": image_url}
|
244 |
try:
|
245 |
-
resp = requests.post(api_url, headers=headers, json=payload, timeout=
|
246 |
resp.raise_for_status()
|
247 |
data = resp.json()
|
248 |
return data[0]["generated_text"] if isinstance(data, list) else data.get("generated_text", "no_caption")
|
@@ -257,7 +249,7 @@ def ocr_image(image_url: str) -> str:
|
|
257 |
headers = {"Authorization": f"Bearer {HF_ACCESS_KEY}"}
|
258 |
payload = {"inputs": {"image": image_url, "question": "What text is in the image?"}}
|
259 |
try:
|
260 |
-
resp = requests.post(api_url, headers=headers, json=payload, timeout=
|
261 |
resp.raise_for_status()
|
262 |
data = resp.json()
|
263 |
return data.get("answer", "no_text_found")
|
@@ -272,7 +264,7 @@ def classify_image(image_url: str) -> str:
|
|
272 |
headers = {"Authorization": f"Bearer {HF_ACCESS_KEY}"}
|
273 |
payload = {"inputs": image_url}
|
274 |
try:
|
275 |
-
resp = requests.post(api_url, headers=headers, json=payload, timeout=
|
276 |
resp.raise_for_status()
|
277 |
data = resp.json()
|
278 |
return data[0]["label"] if isinstance(data, list) else data.get("label", "no_label")
|
@@ -281,29 +273,28 @@ def classify_image(image_url: str) -> str:
|
|
281 |
|
282 |
# --- TOOL 12: Web Scraping Tool ---
|
283 |
@tool
|
284 |
-
def
|
285 |
"""
|
286 |
-
Scrape the main textual content from a given website URL and
|
287 |
-
Input: A valid URL (e.g., 'https://en.wikipedia.org/wiki/Python_(programming_language)')
|
288 |
"""
|
289 |
try:
|
290 |
headers = {
|
291 |
"User-Agent": "Mozilla/5.0 (compatible; WebScrapeTool/1.0)"
|
292 |
}
|
293 |
-
resp = requests.get(url, headers=headers, timeout=
|
294 |
resp.raise_for_status()
|
295 |
soup = BeautifulSoup(resp.text, "html.parser")
|
296 |
# Try to extract main content from common tags
|
297 |
paragraphs = soup.find_all("p")
|
298 |
text = " ".join(p.get_text() for p in paragraphs)
|
299 |
# Limit to first 2000 characters for brevity
|
300 |
-
return text[:
|
301 |
except Exception as e:
|
302 |
return f"error: {e}"
|
303 |
|
304 |
# --- TOOL 13: Audio to Text Transcription Tool ---
|
305 |
@tool
|
306 |
-
def
|
307 |
"""
|
308 |
Transcribe speech from an audio file URL to text using Hugging Face's Whisper model.
|
309 |
Input: A direct link to an audio file (e.g., .mp3, .wav).
|
@@ -313,7 +304,7 @@ def audio_to_text(audio_url: str) -> str:
|
|
313 |
headers = {"Authorization": f"Bearer {HF_ACCESS_KEY}"}
|
314 |
try:
|
315 |
# Download the audio file
|
316 |
-
audio_resp = requests.get(audio_url, timeout=
|
317 |
audio_resp.raise_for_status()
|
318 |
audio_bytes = audio_resp.content
|
319 |
# Encode audio as base64 for API
|
@@ -322,7 +313,7 @@ def audio_to_text(audio_url: str) -> str:
|
|
322 |
"inputs": audio_b64,
|
323 |
"parameters": {"return_timestamps": False}
|
324 |
}
|
325 |
-
resp = requests.post(api_url, headers=headers, json=payload, timeout=
|
326 |
resp.raise_for_status()
|
327 |
data = resp.json()
|
328 |
return data.get("text", "no_answer")
|
@@ -349,8 +340,8 @@ def python_executor(code: str) -> str:
|
|
349 |
@tool
|
350 |
def python_excel_audio_video_attached_file_tool(input_str: str) -> str:
|
351 |
"""
|
352 |
-
Processes an input attachment (audio, image, video, Excel, or Python .py file) and returns extracted text
|
353 |
-
This function accepts a JSON string 'input_str' with keys: 'file_bytes' (base64), and 'filename'. So input the file and filename as json strings.
|
354 |
"""
|
355 |
import pandas as pd
|
356 |
|
@@ -384,7 +375,7 @@ def python_excel_audio_video_attached_file_tool(input_str: str) -> str:
|
|
384 |
headers = {"Authorization": f"Bearer {HF_ACCESS_KEY}"}
|
385 |
files = {"file": (filename, file_bytes)}
|
386 |
try:
|
387 |
-
resp = requests.post(api_url, headers=headers, files=files, timeout=
|
388 |
resp.raise_for_status()
|
389 |
data = resp.json()
|
390 |
transcript = data.get("text", "")
|
@@ -473,10 +464,9 @@ def python_excel_audio_video_attached_file_tool(input_str: str) -> str:
|
|
473 |
@tool
|
474 |
def search_and_extract_research_paper_info(query: str) -> str:
|
475 |
"""
|
476 |
-
Searches for research papers using the Semantic Scholar API
|
477 |
-
and extracts the title, authors, abstract, and main sections.
|
478 |
Input: A search query (e.g., topic, paper title, or keywords).
|
479 |
-
Output: A summary with title, authors, abstract, and main sections
|
480 |
"""
|
481 |
try:
|
482 |
# Search for papers using Semantic Scholar API
|
@@ -486,7 +476,7 @@ def search_and_extract_research_paper_info(query: str) -> str:
|
|
486 |
"limit": 1,
|
487 |
"fields": "title,authors,abstract,url,openAccessPdf"
|
488 |
}
|
489 |
-
resp = requests.get(search_url, params=params, timeout=
|
490 |
resp.raise_for_status()
|
491 |
data = resp.json()
|
492 |
if not data.get("data"):
|
@@ -495,12 +485,13 @@ def search_and_extract_research_paper_info(query: str) -> str:
|
|
495 |
title = paper.get("title", "")
|
496 |
authors = ", ".join([a["name"] for a in paper.get("authors", [])])
|
497 |
abstract = paper.get("abstract", "")
|
|
|
498 |
pdf_url = paper.get("openAccessPdf", {}).get("url")
|
499 |
if not pdf_url:
|
500 |
-
return f"Paper found: {title}\nAuthors: {authors}\nAbstract: {abstract}\n(No open access PDF available.)"
|
501 |
|
502 |
# Download the PDF
|
503 |
-
pdf_resp = requests.get(pdf_url, timeout=
|
504 |
pdf_resp.raise_for_status()
|
505 |
with tempfile.NamedTemporaryFile(delete=False, suffix=".pdf") as tmp_pdf:
|
506 |
tmp_pdf.write(pdf_resp.content)
|
@@ -513,7 +504,7 @@ def search_and_extract_research_paper_info(query: str) -> str:
|
|
513 |
for page in doc:
|
514 |
full_text += page.get_text("text") + "\n"
|
515 |
|
516 |
-
#
|
517 |
lines = full_text.splitlines()
|
518 |
main_sections = ""
|
519 |
in_main = False
|
@@ -522,14 +513,15 @@ def search_and_extract_research_paper_info(query: str) -> str:
|
|
522 |
in_main = True
|
523 |
if in_main:
|
524 |
main_sections += line.strip() + " "
|
525 |
-
if len(main_sections) >
|
526 |
break
|
527 |
|
528 |
summary = (
|
529 |
f"Title: {title}\n"
|
530 |
f"Authors: {authors}\n"
|
531 |
f"Abstract: {abstract}\n"
|
532 |
-
f"
|
|
|
533 |
)
|
534 |
return summary if summary.strip() else "No information extracted."
|
535 |
except Exception as e:
|
@@ -540,9 +532,7 @@ def search_and_extract_research_paper_info(query: str) -> str:
|
|
540 |
@tool
|
541 |
def sports_awards_historicalfacts_tool(query: str) -> str:
|
542 |
"""
|
543 |
-
For questions about
|
544 |
-
extracts all tables and lists from the most relevant page, and returns them as CSV or plain text.
|
545 |
-
This gives the LLM enough context to answer complex queries about people, years, nationalities, etc.
|
546 |
"""
|
547 |
|
548 |
# Step 1: Search Wikipedia for the most relevant page
|
@@ -554,7 +544,7 @@ def sports_awards_historicalfacts_tool(query: str) -> str:
|
|
554 |
"format": "json"
|
555 |
}
|
556 |
try:
|
557 |
-
resp = requests.get(search_url, params=params, timeout=
|
558 |
resp.raise_for_status()
|
559 |
results = resp.json().get("query", {}).get("search", [])
|
560 |
if not results:
|
@@ -566,7 +556,7 @@ def sports_awards_historicalfacts_tool(query: str) -> str:
|
|
566 |
|
567 |
# Step 2: Fetch the Wikipedia page and extract tables and lists
|
568 |
try:
|
569 |
-
page_resp = requests.get(page_url, timeout=
|
570 |
page_resp.raise_for_status()
|
571 |
soup = BeautifulSoup(page_resp.text, "html.parser")
|
572 |
output = f"Source: {page_url}\n"
|
@@ -609,32 +599,55 @@ def sports_awards_historicalfacts_tool(query: str) -> str:
|
|
609 |
|
610 |
# --- TOOL 18: YouTube Transcript Tool ---
|
611 |
@tool
|
612 |
-
def
|
613 |
"""
|
614 |
-
Given a URL
|
615 |
"""
|
616 |
api_url = "https://api-inference.huggingface.co/models/openai/whisper-large-v3"
|
617 |
headers = {"Authorization": f"Bearer {HF_ACCESS_KEY}"}
|
618 |
|
619 |
try:
|
620 |
-
# Download audio from YouTube
|
621 |
with tempfile.TemporaryDirectory() as tmpdir:
|
622 |
-
|
623 |
-
|
624 |
-
|
625 |
-
|
626 |
-
|
627 |
-
|
628 |
-
|
629 |
-
|
630 |
-
'
|
631 |
-
'
|
632 |
-
'
|
633 |
-
|
634 |
-
|
635 |
-
|
636 |
-
|
637 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
638 |
|
639 |
# Read audio bytes
|
640 |
with open(audio_path, "rb") as f:
|
@@ -653,31 +666,93 @@ def audio_video_url_transcript_tool(youtube_url: str) -> str:
|
|
653 |
except Exception as e:
|
654 |
return f"error: {e}"
|
655 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
656 |
##-- Tool Discovery ---
|
657 |
# Use @tool for each function.
|
658 |
# Use get_all_tools() to auto-discover all decorated tools.
|
659 |
# tools_list = get_all_tools()
|
660 |
tools_list = [
|
661 |
python_excel_audio_video_attached_file_tool,
|
662 |
-
|
663 |
-
sports_awards_historicalfacts_tool,
|
664 |
search_and_extract_research_paper_info,
|
665 |
python_executor,
|
666 |
-
get_weather,
|
667 |
-
calculator,
|
668 |
-
convert_units,
|
669 |
-
get_time,
|
670 |
-
get_date,
|
671 |
dictionary_lookup,
|
672 |
-
currency_convert,
|
673 |
-
image_caption,
|
674 |
-
ocr_image,
|
675 |
-
classify_image,
|
676 |
-
|
677 |
-
|
678 |
-
|
679 |
# sports_awards_historicalfacts_tool,
|
680 |
-
|
|
|
681 |
]
|
682 |
|
683 |
tool_descriptions = "\n".join(f"- {tool.name}: {tool.description}" for tool in tools_list)
|
@@ -688,11 +763,20 @@ tool_descriptions = "\n".join(f"- {tool.name}: {tool.description}" for tool in t
|
|
688 |
# --- System Prompt for the Agent ---
|
689 |
|
690 |
system_prompt = f"""
|
691 |
-
You are a general AI assistant. I will ask you a question. Report your thoughts, and finish your answer with the following template: [YOUR FINAL ANSWER].
|
692 |
-
YOUR FINAL ANSWER should be a number OR as few words as possible OR a comma separated list of numbers and/or strings
|
693 |
-
|
694 |
-
|
695 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
696 |
|
697 |
For each question, follow this format:
|
698 |
|
@@ -704,10 +788,6 @@ Observation: the result of the action
|
|
704 |
... (repeat Thought/Action/Action Input/Observation as needed)
|
705 |
Final Answer: the answer to the original question, as concise as possible (number, short string, or comma-separated list, no extra explanation).
|
706 |
|
707 |
-
Rules for YOUR FINAL ANSWER:
|
708 |
-
- If the questions is about 'how many' or 'how many times' or 'how many years' or 'how many people' or 'how many items' or 'how many albums' or 'how many songs' (basically needing quantitative reply), then YOUR FINAL ANSWER should just be a number.
|
709 |
-
- If the question is about 'what', 'who', 'where', 'when', 'which', 'why', 'how' or similar, then YOUR FINAL ANSWER should be a short string or a comma separated list of strings.
|
710 |
-
|
711 |
**Examples:**
|
712 |
Q: Which country had the least number of athletes at the 1928 Summer Olympics?
|
713 |
Final Answer: Luxembourg
|
@@ -718,17 +798,10 @@ Final Answer: 5
|
|
718 |
Q: List the top 3 programming languages.
|
719 |
Final Answer: Python, JavaScript, Java
|
720 |
|
721 |
-
|
722 |
-
|
723 |
-
|
724 |
-
If there is a file (image, audio, or video) attached to the question, you should use the process_attachment tool to process it and follow the instructions below:
|
725 |
-
- For audio or video attachments, the process_attachment tool will transcribe the audio and return the transcript, which you can use to answer the question.
|
726 |
-
- For image attachments, the process_attachment tool will return a base64 encoded string of the image. You can use this encoded information to provide answer.
|
727 |
-
|
728 |
-
If the question is related to sports, awards, historical facts or similar topic that can be answered from wikipedia, you should use the 'sports_awards_historicalfacts_tool' or if the question is similar or related that can be searched in wikipedia, use the more specific tool 'wikipedia_summary' to fetch relevant page information and answer from it.
|
729 |
-
In general, you must use tools only if needed for the question and only if the question can be answered by one of the provided tools. Otherwise provide the answer based on your knowledge. You must not use multiple tools in a single call. Don't hallucinate.
|
730 |
|
731 |
-
If after 12 iterations also a tool usage is not useful then try to answer directly based on your knowledge. If you cannot answer
|
732 |
|
733 |
"""
|
734 |
# If your final answer is something like 'there were 5 studio albums published between 2000 and 2009' then modify YOUR FINAL ANSWER as: '5'
|
@@ -844,7 +917,7 @@ def run_and_submit_all( profile: gr.OAuthProfile | None):
|
|
844 |
# 2. Fetch Questions
|
845 |
print(f"Fetching questions from: {questions_url}")
|
846 |
try:
|
847 |
-
response = requests.get(questions_url, timeout=
|
848 |
response.raise_for_status()
|
849 |
questions_data = response.json()
|
850 |
if not questions_data:
|
@@ -894,7 +967,7 @@ def run_and_submit_all( profile: gr.OAuthProfile | None):
|
|
894 |
# 5. Submit
|
895 |
print(f"Submitting {len(answers_payload)} answers to: {submit_url}")
|
896 |
try:
|
897 |
-
response = requests.post(submit_url, json=submission_data, timeout=
|
898 |
response.raise_for_status()
|
899 |
result_data = response.json()
|
900 |
final_status = (
|
|
|
16 |
import fitz
|
17 |
import yt_dlp
|
18 |
import re
|
19 |
+
import subprocess
|
20 |
+
from PIL import Image
|
21 |
+
from transformers import pipeline
|
22 |
|
23 |
## # Load environment variables from .env file
|
24 |
# --- Constants ---
|
|
|
35 |
# --- TOOL 1: Web Search Tool (DuckDuckGo) ---
|
36 |
|
37 |
@tool
|
38 |
+
def current_events_news_search_tool(query: str) -> str:
|
39 |
+
"""
|
40 |
+
General web search tool for current events, news, or trending topics not yet on Wikipedia.
|
41 |
+
Use this tool only if the Wikipedia tool is not suitable.
|
42 |
+
Returns relevant context and source URL if available.
|
43 |
+
"""
|
44 |
url = f"https://api.duckduckgo.com/?q={query}&format=json&no_html=1"
|
45 |
try:
|
46 |
+
resp = requests.get(url, timeout=120)
|
47 |
resp.raise_for_status()
|
48 |
data = resp.json()
|
49 |
+
# Check main answer fields
|
50 |
for key in ["AbstractText", "Answer", "Definition"]:
|
51 |
if data.get(key):
|
52 |
+
answer = data[key].strip()
|
53 |
+
break
|
54 |
+
else:
|
55 |
+
answer = None
|
56 |
+
|
57 |
+
# Check for related topics if no direct answer
|
58 |
+
if not answer:
|
59 |
+
related = data.get("RelatedTopics")
|
60 |
+
if related and isinstance(related, list) and related:
|
61 |
+
first_topic = related[0]
|
62 |
+
if isinstance(first_topic, dict) and first_topic.get("Text"):
|
63 |
+
answer = first_topic["Text"].strip()
|
64 |
+
|
65 |
+
# Check for results if still no answer
|
66 |
+
if not answer:
|
67 |
+
results = data.get("Results")
|
68 |
+
if results and isinstance(results, list) and results:
|
69 |
+
first_result = results[0]
|
70 |
+
if isinstance(first_result, dict) and first_result.get("Text"):
|
71 |
+
answer = first_result["Text"].strip()
|
72 |
+
|
73 |
+
# Try to include a source URL if available
|
74 |
+
source_url = data.get("AbstractURL") or data.get("Redirect") or ""
|
75 |
+
if answer:
|
76 |
+
result = answer
|
77 |
+
if source_url:
|
78 |
+
result += f"\nSource: {source_url}"
|
79 |
+
return result.strip()
|
80 |
return "no_answer"
|
81 |
except Exception:
|
82 |
return "error"
|
|
|
93 |
api_key = os.environ.get("WEATHER_API_KEY")
|
94 |
url = f"https://api.openweathermap.org/data/2.5/weather?q={city}&appid={WEATHER_API_KEY}&units=metric"
|
95 |
try:
|
96 |
+
resp = requests.get(url, timeout=120)
|
97 |
resp.raise_for_status()
|
98 |
data = resp.json()
|
99 |
return str(round(data["main"]["temp"]))
|
|
|
154 |
|
155 |
# --- TOOL 6: Wikipedia Summary Tool ---
|
156 |
@tool
|
157 |
+
def wikipedia_and_generalknowledge_search(query: str) -> str:
|
158 |
"""
|
159 |
+
Answer questions related to general knowledge, world information, facts, sports, olympics, history, etc. from Wikipedia by scraping the text and returns text as context for LLM to use.
|
160 |
"""
|
161 |
+
# Step 1: Search Wikipedia for the most relevant page
|
162 |
+
search_url = "https://en.wikipedia.org/w/api.php"
|
163 |
+
params = {
|
164 |
+
"action": "query",
|
165 |
+
"list": "search",
|
166 |
+
"srsearch": query,
|
167 |
+
"format": "json"
|
168 |
+
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
169 |
try:
|
170 |
+
resp = requests.get(search_url, params=params, timeout=150)
|
171 |
resp.raise_for_status()
|
172 |
+
results = resp.json().get("query", {}).get("search", [])
|
173 |
+
if not results:
|
174 |
+
return "no_answer"
|
175 |
+
page_title = results[0]["title"]
|
176 |
+
page_url = f"https://en.wikipedia.org/wiki/{page_title.replace(' ', '_')}"
|
177 |
except Exception:
|
178 |
+
return "error: Could not search Wikipedia"
|
179 |
+
|
180 |
+
# Step 2: Fetch the Wikipedia page and extract main text
|
181 |
+
try:
|
182 |
+
page_resp = requests.get(page_url, timeout=120)
|
183 |
+
page_resp.raise_for_status()
|
184 |
+
soup = BeautifulSoup(page_resp.text, "html.parser")
|
185 |
+
output = f"Source: {page_url}\n"
|
186 |
+
|
187 |
+
# Extract main text from all paragraphs
|
188 |
+
paragraphs = soup.find_all("p")
|
189 |
+
text = " ".join(p.get_text(separator=" ", strip=True) for p in paragraphs)
|
190 |
+
# Limit to first 3000 characters for brevity
|
191 |
+
output += text[:3000] if text else "No textual content found."
|
192 |
+
return output
|
193 |
+
except Exception as e:
|
194 |
+
return f"error: {e}"
|
195 |
|
196 |
# --- TOOL 7: Dictionary Tool ---
|
197 |
@tool
|
198 |
def dictionary_lookup(word: str) -> str:
|
199 |
+
"""Get the definition of an English word using dictionary."""
|
200 |
url = f"https://api.dictionaryapi.dev/api/v2/entries/en/{word}"
|
201 |
try:
|
202 |
+
resp = requests.get(url, timeout=120)
|
203 |
resp.raise_for_status()
|
204 |
data = resp.json()
|
205 |
return data[0]["meanings"][0]["definitions"][0]["definition"]
|
|
|
219 |
from_currency = parts[1]
|
220 |
to_currency = parts[3]
|
221 |
url = f"https://api.exchangerate.host/convert?from={from_currency}&to={to_currency}&amount={amount}"
|
222 |
+
resp = requests.get(url, timeout=120)
|
223 |
resp.raise_for_status()
|
224 |
data = resp.json()
|
225 |
return str(round(data["result"], 2))
|
|
|
234 |
headers = {"Authorization": f"Bearer {HF_ACCESS_KEY}"}
|
235 |
payload = {"inputs": image_url}
|
236 |
try:
|
237 |
+
resp = requests.post(api_url, headers=headers, json=payload, timeout=120)
|
238 |
resp.raise_for_status()
|
239 |
data = resp.json()
|
240 |
return data[0]["generated_text"] if isinstance(data, list) else data.get("generated_text", "no_caption")
|
|
|
249 |
headers = {"Authorization": f"Bearer {HF_ACCESS_KEY}"}
|
250 |
payload = {"inputs": {"image": image_url, "question": "What text is in the image?"}}
|
251 |
try:
|
252 |
+
resp = requests.post(api_url, headers=headers, json=payload, timeout=120)
|
253 |
resp.raise_for_status()
|
254 |
data = resp.json()
|
255 |
return data.get("answer", "no_text_found")
|
|
|
264 |
headers = {"Authorization": f"Bearer {HF_ACCESS_KEY}"}
|
265 |
payload = {"inputs": image_url}
|
266 |
try:
|
267 |
+
resp = requests.post(api_url, headers=headers, json=payload, timeout=120)
|
268 |
resp.raise_for_status()
|
269 |
data = resp.json()
|
270 |
return data[0]["label"] if isinstance(data, list) else data.get("label", "no_label")
|
|
|
273 |
|
274 |
# --- TOOL 12: Web Scraping Tool ---
|
275 |
@tool
|
276 |
+
def URL_scrape_tool(url: str) -> str:
|
277 |
"""
|
278 |
+
Scrape the main textual content from a given website URL and returns the text - to be used as context by model.
|
|
|
279 |
"""
|
280 |
try:
|
281 |
headers = {
|
282 |
"User-Agent": "Mozilla/5.0 (compatible; WebScrapeTool/1.0)"
|
283 |
}
|
284 |
+
resp = requests.get(url, headers=headers, timeout=120)
|
285 |
resp.raise_for_status()
|
286 |
soup = BeautifulSoup(resp.text, "html.parser")
|
287 |
# Try to extract main content from common tags
|
288 |
paragraphs = soup.find_all("p")
|
289 |
text = " ".join(p.get_text() for p in paragraphs)
|
290 |
# Limit to first 2000 characters for brevity
|
291 |
+
return text[:4000] if text else "No textual content found."
|
292 |
except Exception as e:
|
293 |
return f"error: {e}"
|
294 |
|
295 |
# --- TOOL 13: Audio to Text Transcription Tool ---
|
296 |
@tool
|
297 |
+
def audio_url_to_text(audio_url: str) -> str:
|
298 |
"""
|
299 |
Transcribe speech from an audio file URL to text using Hugging Face's Whisper model.
|
300 |
Input: A direct link to an audio file (e.g., .mp3, .wav).
|
|
|
304 |
headers = {"Authorization": f"Bearer {HF_ACCESS_KEY}"}
|
305 |
try:
|
306 |
# Download the audio file
|
307 |
+
audio_resp = requests.get(audio_url, timeout=120)
|
308 |
audio_resp.raise_for_status()
|
309 |
audio_bytes = audio_resp.content
|
310 |
# Encode audio as base64 for API
|
|
|
313 |
"inputs": audio_b64,
|
314 |
"parameters": {"return_timestamps": False}
|
315 |
}
|
316 |
+
resp = requests.post(api_url, headers=headers, json=payload, timeout=120)
|
317 |
resp.raise_for_status()
|
318 |
data = resp.json()
|
319 |
return data.get("text", "no_answer")
|
|
|
340 |
@tool
|
341 |
def python_excel_audio_video_attached_file_tool(input_str: str) -> str:
|
342 |
"""
|
343 |
+
Processes an input attachment (audio, image, video, Excel, or Python .py file) and returns extracted info (text, encoded information, metadata, etc.) to be used by LLM.
|
344 |
+
This function accepts a JSON string 'input_str' with keys: 'file_bytes' (base64), and 'filename'. So input the file and filename as json strings.
|
345 |
"""
|
346 |
import pandas as pd
|
347 |
|
|
|
375 |
headers = {"Authorization": f"Bearer {HF_ACCESS_KEY}"}
|
376 |
files = {"file": (filename, file_bytes)}
|
377 |
try:
|
378 |
+
resp = requests.post(api_url, headers=headers, files=files, timeout=120)
|
379 |
resp.raise_for_status()
|
380 |
data = resp.json()
|
381 |
transcript = data.get("text", "")
|
|
|
464 |
@tool
|
465 |
def search_and_extract_research_paper_info(query: str) -> str:
|
466 |
"""
|
467 |
+
Searches for research and online papers/journals using the Semantic Scholar API.
|
|
|
468 |
Input: A search query (e.g., topic, paper title, or keywords).
|
469 |
+
Output: A summary with title, authors, abstract, and a longer excerpt from the main sections of the top result.
|
470 |
"""
|
471 |
try:
|
472 |
# Search for papers using Semantic Scholar API
|
|
|
476 |
"limit": 1,
|
477 |
"fields": "title,authors,abstract,url,openAccessPdf"
|
478 |
}
|
479 |
+
resp = requests.get(search_url, params=params, timeout=120)
|
480 |
resp.raise_for_status()
|
481 |
data = resp.json()
|
482 |
if not data.get("data"):
|
|
|
485 |
title = paper.get("title", "")
|
486 |
authors = ", ".join([a["name"] for a in paper.get("authors", [])])
|
487 |
abstract = paper.get("abstract", "")
|
488 |
+
paper_url = paper.get("url", "")
|
489 |
pdf_url = paper.get("openAccessPdf", {}).get("url")
|
490 |
if not pdf_url:
|
491 |
+
return f"Paper found: {title}\nAuthors: {authors}\nAbstract: {abstract}\nURL: {paper_url}\n(No open access PDF available.)"
|
492 |
|
493 |
# Download the PDF
|
494 |
+
pdf_resp = requests.get(pdf_url, timeout=120)
|
495 |
pdf_resp.raise_for_status()
|
496 |
with tempfile.NamedTemporaryFile(delete=False, suffix=".pdf") as tmp_pdf:
|
497 |
tmp_pdf.write(pdf_resp.content)
|
|
|
504 |
for page in doc:
|
505 |
full_text += page.get_text("text") + "\n"
|
506 |
|
507 |
+
# Extract a longer excerpt from the main sections (e.g., Introduction + first 2000 chars)
|
508 |
lines = full_text.splitlines()
|
509 |
main_sections = ""
|
510 |
in_main = False
|
|
|
513 |
in_main = True
|
514 |
if in_main:
|
515 |
main_sections += line.strip() + " "
|
516 |
+
if len(main_sections) > 2000:
|
517 |
break
|
518 |
|
519 |
summary = (
|
520 |
f"Title: {title}\n"
|
521 |
f"Authors: {authors}\n"
|
522 |
f"Abstract: {abstract}\n"
|
523 |
+
f"URL: {paper_url}\n"
|
524 |
+
f"Main Sections (excerpt): {main_sections.strip() if main_sections else full_text[:2000]}"
|
525 |
)
|
526 |
return summary if summary.strip() else "No information extracted."
|
527 |
except Exception as e:
|
|
|
532 |
@tool
|
533 |
def sports_awards_historicalfacts_tool(query: str) -> str:
|
534 |
"""
|
535 |
+
For questions about sports, awards, competitions, historical facts, or generic wikipedia available data, this tool fetches relevant context from Wikipedia.
|
|
|
|
|
536 |
"""
|
537 |
|
538 |
# Step 1: Search Wikipedia for the most relevant page
|
|
|
544 |
"format": "json"
|
545 |
}
|
546 |
try:
|
547 |
+
resp = requests.get(search_url, params=params, timeout=150)
|
548 |
resp.raise_for_status()
|
549 |
results = resp.json().get("query", {}).get("search", [])
|
550 |
if not results:
|
|
|
556 |
|
557 |
# Step 2: Fetch the Wikipedia page and extract tables and lists
|
558 |
try:
|
559 |
+
page_resp = requests.get(page_url, timeout=150)
|
560 |
page_resp.raise_for_status()
|
561 |
soup = BeautifulSoup(page_resp.text, "html.parser")
|
562 |
output = f"Source: {page_url}\n"
|
|
|
599 |
|
600 |
# --- TOOL 18: YouTube Transcript Tool ---
|
601 |
@tool
|
602 |
+
def video_url_to_transcript_tool(media_url: str) -> str:
|
603 |
"""
|
604 |
+
Given a URL to a video or audio file (YouTube, direct .mp4/.mp3/.wav, etc.), download the audio and return a transcript.
|
605 |
"""
|
606 |
api_url = "https://api-inference.huggingface.co/models/openai/whisper-large-v3"
|
607 |
headers = {"Authorization": f"Bearer {HF_ACCESS_KEY}"}
|
608 |
|
609 |
try:
|
|
|
610 |
with tempfile.TemporaryDirectory() as tmpdir:
|
611 |
+
audio_path = None
|
612 |
+
|
613 |
+
# Check if it's a YouTube URL
|
614 |
+
if "youtube.com" in media_url or "youtu.be" in media_url:
|
615 |
+
ydl_opts = {
|
616 |
+
'format': 'bestaudio/best',
|
617 |
+
'outtmpl': f'{tmpdir}/audio.%(ext)s',
|
618 |
+
'quiet': True,
|
619 |
+
'noplaylist': True,
|
620 |
+
'extractaudio': True,
|
621 |
+
'audioformat': 'wav',
|
622 |
+
'postprocessors': [{
|
623 |
+
'key': 'FFmpegExtractAudio',
|
624 |
+
'preferredcodec': 'wav',
|
625 |
+
'preferredquality': '192',
|
626 |
+
}],
|
627 |
+
}
|
628 |
+
with yt_dlp.YoutubeDL(ydl_opts) as ydl:
|
629 |
+
info = ydl.extract_info(media_url, download=True)
|
630 |
+
audio_path = ydl.prepare_filename(info).rsplit('.', 1)[0] + '.wav'
|
631 |
+
else:
|
632 |
+
# Download direct media file
|
633 |
+
resp = requests.get(media_url, timeout=120)
|
634 |
+
resp.raise_for_status()
|
635 |
+
# Guess extension
|
636 |
+
ext = media_url.split('?')[0].split('.')[-1].lower()
|
637 |
+
if ext not in ["mp3", "wav", "m4a", "mp4"]:
|
638 |
+
ext = "mp3"
|
639 |
+
file_path = os.path.join(tmpdir, f"audio.{ext}")
|
640 |
+
with open(file_path, "wb") as f:
|
641 |
+
f.write(resp.content)
|
642 |
+
# If video, extract audio using ffmpeg
|
643 |
+
if ext in ["mp4", "mkv", "webm"]:
|
644 |
+
audio_path = os.path.join(tmpdir, "audio.wav")
|
645 |
+
import subprocess
|
646 |
+
subprocess.run([
|
647 |
+
"ffmpeg", "-i", file_path, "-vn", "-acodec", "pcm_s16le", "-ar", "16000", "-ac", "1", audio_path
|
648 |
+
], check=True)
|
649 |
+
else:
|
650 |
+
audio_path = file_path
|
651 |
|
652 |
# Read audio bytes
|
653 |
with open(audio_path, "rb") as f:
|
|
|
666 |
except Exception as e:
|
667 |
return f"error: {e}"
|
668 |
|
669 |
+
|
670 |
+
# --- TOOL 19: Audio to Text Transcription Tool ---
|
671 |
+
@tool
|
672 |
+
def max_object_in_video(video_url: str, object_label: str = "bird") -> str:
|
673 |
+
"""
|
674 |
+
Given a video URL and an object label, extracts frames and uses an object detection model to count the specified object in each frame.
|
675 |
+
Returns the maximum number of objects detected in any single frame.
|
676 |
+
Example: max_object_in_video("https://...", "car") -> "Maximum car count in a frame: 4"
|
677 |
+
"""
|
678 |
+
|
679 |
+
# Download video
|
680 |
+
try:
|
681 |
+
resp = requests.get(video_url, timeout=120)
|
682 |
+
resp.raise_for_status()
|
683 |
+
with tempfile.NamedTemporaryFile(delete=False, suffix=".mp4") as tmp_video:
|
684 |
+
tmp_video.write(resp.content)
|
685 |
+
tmp_video.flush()
|
686 |
+
video_path = tmp_video.name
|
687 |
+
except Exception as e:
|
688 |
+
return f"error: Could not download video: {e}"
|
689 |
+
|
690 |
+
# Extract frames every 2 seconds (adjust as needed)
|
691 |
+
frames_dir = tempfile.mkdtemp()
|
692 |
+
frame_pattern = os.path.join(frames_dir, "frame_%04d.jpg")
|
693 |
+
try:
|
694 |
+
subprocess.run([
|
695 |
+
"ffmpeg", "-i", video_path, "-vf", "fps=0.5", frame_pattern
|
696 |
+
], check=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
|
697 |
+
except Exception as e:
|
698 |
+
return f"error: Could not extract frames: {e}"
|
699 |
+
|
700 |
+
# Load object detection pipeline
|
701 |
+
try:
|
702 |
+
detector = pipeline("object-detection", model="facebook/detr-resnet-50")
|
703 |
+
except Exception as e:
|
704 |
+
return f"error: Could not load detection model: {e}"
|
705 |
+
|
706 |
+
max_count = 0
|
707 |
+
for fname in sorted(os.listdir(frames_dir)):
|
708 |
+
fpath = os.path.join(frames_dir, fname)
|
709 |
+
try:
|
710 |
+
image = Image.open(fpath)
|
711 |
+
results = detector(image)
|
712 |
+
count = sum(1 for obj in results if obj['label'].lower() == object_label.lower() and obj['score'] > 0.5)
|
713 |
+
if count > max_count:
|
714 |
+
max_count = count
|
715 |
+
except Exception:
|
716 |
+
continue
|
717 |
+
|
718 |
+
# Clean up
|
719 |
+
try:
|
720 |
+
os.remove(video_path)
|
721 |
+
for fname in os.listdir(frames_dir):
|
722 |
+
os.remove(os.path.join(frames_dir, fname))
|
723 |
+
os.rmdir(frames_dir)
|
724 |
+
except Exception:
|
725 |
+
pass
|
726 |
+
|
727 |
+
return f"Maximum {object_label} count in a single frame: {max_count}"
|
728 |
+
|
729 |
+
|
730 |
##-- Tool Discovery ---
|
731 |
# Use @tool for each function.
|
732 |
# Use get_all_tools() to auto-discover all decorated tools.
|
733 |
# tools_list = get_all_tools()
|
734 |
tools_list = [
|
735 |
python_excel_audio_video_attached_file_tool,
|
736 |
+
wikipedia_and_generalknowledge_search,
|
737 |
+
# sports_awards_historicalfacts_tool,
|
738 |
search_and_extract_research_paper_info,
|
739 |
python_executor,
|
740 |
+
# get_weather,
|
741 |
+
# calculator,
|
742 |
+
# convert_units,
|
743 |
+
# get_time,
|
744 |
+
# get_date,
|
745 |
dictionary_lookup,
|
746 |
+
# currency_convert,
|
747 |
+
# image_caption,
|
748 |
+
# ocr_image,
|
749 |
+
# classify_image,
|
750 |
+
current_events_news_search_tool,
|
751 |
+
URL_scrape_tool,
|
752 |
+
audio_url_to_text,
|
753 |
# sports_awards_historicalfacts_tool,
|
754 |
+
video_url_to_transcript_tool,
|
755 |
+
max_object_in_video,
|
756 |
]
|
757 |
|
758 |
tool_descriptions = "\n".join(f"- {tool.name}: {tool.description}" for tool in tools_list)
|
|
|
763 |
# --- System Prompt for the Agent ---
|
764 |
|
765 |
system_prompt = f"""
|
766 |
+
You are a general AI assistant. I will ask you a question. Report your thoughts, and finish your answer with the following template: [YOUR FINAL ANSWER].
|
767 |
+
YOUR FINAL ANSWER should be a number OR as few words as possible OR a comma separated list of numbers and/or strings.
|
768 |
+
First check if you can answer the question by yourself without the need for a tool. If you can answer without hallucination then answer the question directly. If you cannot, use the tools provided to answer the question.
|
769 |
+
|
770 |
+
You also have access to a set of tools, which you can use to answer the question, if you can't answer the question directly. The available tools are:
|
771 |
+
{tool_descriptions}
|
772 |
+
|
773 |
+
If there is a file (image, audio, or video) attached to the question, you should use the process_attachment tool to process it and follow the instructions below:
|
774 |
+
- For audio or video attachments, the process_attachment tool will transcribe the audio and return the transcript, which you can use to answer the question.
|
775 |
+
- For image attachments, the process_attachment tool will return a base64 encoded string of the image. You can use this encoded information to provide answer.
|
776 |
+
|
777 |
+
If the question is related to sports, awards, historical facts or similar topic that can be answered from wikipedia, you should use the 'wikipedia_and_generalknowledge_search', if the question is based on current events or news kind then you can utilize the tool 'current_events_news_search_tool' to fetch relevant page information and answer from it.
|
778 |
+
You must not use multiple tools in a single call. Don't hallucinate.
|
779 |
+
|
780 |
|
781 |
For each question, follow this format:
|
782 |
|
|
|
788 |
... (repeat Thought/Action/Action Input/Observation as needed)
|
789 |
Final Answer: the answer to the original question, as concise as possible (number, short string, or comma-separated list, no extra explanation).
|
790 |
|
|
|
|
|
|
|
|
|
791 |
**Examples:**
|
792 |
Q: Which country had the least number of athletes at the 1928 Summer Olympics?
|
793 |
Final Answer: Luxembourg
|
|
|
798 |
Q: List the top 3 programming languages.
|
799 |
Final Answer: Python, JavaScript, Java
|
800 |
|
801 |
+
Q: What is the maximum number os birds in the video https://www.youtube.com/watch?v=example?
|
802 |
+
Final Answer: 12
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
803 |
|
804 |
+
If after 12 iterations also a tool usage is not useful then try to answer directly based on your knowledge. If you cannot answer then just say "no_answer" as YOUR FINAL ANSWER.
|
805 |
|
806 |
"""
|
807 |
# If your final answer is something like 'there were 5 studio albums published between 2000 and 2009' then modify YOUR FINAL ANSWER as: '5'
|
|
|
917 |
# 2. Fetch Questions
|
918 |
print(f"Fetching questions from: {questions_url}")
|
919 |
try:
|
920 |
+
response = requests.get(questions_url, timeout=120)
|
921 |
response.raise_for_status()
|
922 |
questions_data = response.json()
|
923 |
if not questions_data:
|
|
|
967 |
# 5. Submit
|
968 |
print(f"Submitting {len(answers_payload)} answers to: {submit_url}")
|
969 |
try:
|
970 |
+
response = requests.post(submit_url, json=submission_data, timeout=120)
|
971 |
response.raise_for_status()
|
972 |
result_data = response.json()
|
973 |
final_status = (
|
requirements.txt
CHANGED
@@ -12,4 +12,5 @@ beautifulsoup4
|
|
12 |
mimetype
|
13 |
PyMuPDF
|
14 |
yt_dlp
|
15 |
-
pandas
|
|
|
|
12 |
mimetype
|
13 |
PyMuPDF
|
14 |
yt_dlp
|
15 |
+
pandas
|
16 |
+
pillow
|