Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -10,7 +10,10 @@ from collections import Counter
|
|
10 |
import re
|
11 |
from io import BytesIO
|
12 |
from youtube_transcript_api import YouTubeTranscriptApi
|
13 |
-
|
|
|
|
|
|
|
14 |
|
15 |
# (Keep Constants as is)
|
16 |
# --- Constants ---
|
@@ -21,17 +24,68 @@ load_dotenv()
|
|
21 |
|
22 |
|
23 |
|
24 |
-
import wikipedia
|
25 |
|
26 |
@tool
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
35 |
|
36 |
|
37 |
|
@@ -173,7 +227,7 @@ class BasicAgent:
|
|
173 |
)
|
174 |
|
175 |
search_tool = DuckDuckGoSearchTool()
|
176 |
-
wiki_search_tool =
|
177 |
str_reverse_tool = StringReverseTool()
|
178 |
keywords_extract_tool = KeywordsExtractorTool()
|
179 |
speech_to_text_tool = SpeechToTextTool()
|
|
|
10 |
import re
|
11 |
from io import BytesIO
|
12 |
from youtube_transcript_api import YouTubeTranscriptApi
|
13 |
+
from langchain_community.tools.tavily_search import TavilySearchResults
|
14 |
+
from langchain_community.document_loaders import WikipediaLoader
|
15 |
+
from langchain_community.utilities import WikipediaAPIWrapper
|
16 |
+
from langchain_community.document_loaders import ArxivLoader
|
17 |
|
18 |
# (Keep Constants as is)
|
19 |
# --- Constants ---
|
|
|
24 |
|
25 |
|
26 |
|
|
|
27 |
|
28 |
@tool
|
29 |
+
def wiki_search(query: str) -> str:
|
30 |
+
"""Search Wikipedia for a query and return up to 2 results."""
|
31 |
+
search_docs = WikipediaLoader(query=query, load_max_docs=2).load()
|
32 |
+
|
33 |
+
formatted_search_docs = "\n\n---\n\n".join(
|
34 |
+
[
|
35 |
+
f'<Document source="{doc.metadata.get("source", "Wikipedia")}" page="{doc.metadata.get("page", "")}"/>\n{doc.page_content}\n</Document>'
|
36 |
+
for doc in search_docs
|
37 |
+
]
|
38 |
+
)
|
39 |
+
return formatted_search_docs
|
40 |
+
|
41 |
+
|
42 |
+
@tool
|
43 |
+
def wikidata_query(query: str) -> str:
|
44 |
+
"""
|
45 |
+
Run a SPARQL query on Wikidata and return results.
|
46 |
+
"""
|
47 |
+
endpoint_url = "https://query.wikidata.org/sparql"
|
48 |
+
headers = {
|
49 |
+
"Accept": "application/sparql-results+json"
|
50 |
+
}
|
51 |
+
response = requests.get(endpoint_url, headers=headers, params={"query": query})
|
52 |
+
data = response.json()
|
53 |
+
return json.dumps(data, indent=2)
|
54 |
+
|
55 |
+
|
56 |
+
@tool
|
57 |
+
def web_search(query: str) -> str:
|
58 |
+
"""Search Tavily for a query and return up to 3 results."""
|
59 |
+
tavily_key = os.getenv("TAVILY_API_KEY")
|
60 |
+
|
61 |
+
if not tavily_key:
|
62 |
+
return "Error: Tavily API key not set."
|
63 |
+
|
64 |
+
search_tool = TavilySearchResults(tavily_api_key=tavily_key, max_results=3)
|
65 |
+
search_docs = search_tool.invoke(query=query)
|
66 |
+
|
67 |
+
formatted_search_docs = "\n\n---\n\n".join(
|
68 |
+
[
|
69 |
+
f'<Document source="{doc.metadata["source"]}" page="{doc.metadata.get("page", "")}"/>\n{doc.page_content}\n</Document>'
|
70 |
+
for doc in search_docs
|
71 |
+
])
|
72 |
+
|
73 |
+
return formatted_search_docs
|
74 |
+
|
75 |
+
|
76 |
+
@tool
|
77 |
+
def arxiv_search(query: str) -> str:
|
78 |
+
"""Search Arxiv for a query and return maximum 3 result.
|
79 |
+
|
80 |
+
Args:
|
81 |
+
query: The search query."""
|
82 |
+
search_docs = ArxivLoader(query=query, load_max_docs=3).load()
|
83 |
+
formatted_search_docs = "\n\n---\n\n".join(
|
84 |
+
[
|
85 |
+
f'<Document source="{doc.metadata["source"]}" page="{doc.metadata.get("page", "")}"/>\n{doc.page_content[:1000]}\n</Document>'
|
86 |
+
for doc in search_docs
|
87 |
+
])
|
88 |
+
return formatted_search_docs
|
89 |
|
90 |
|
91 |
|
|
|
227 |
)
|
228 |
|
229 |
search_tool = DuckDuckGoSearchTool()
|
230 |
+
wiki_search_tool = wiki_search()
|
231 |
str_reverse_tool = StringReverseTool()
|
232 |
keywords_extract_tool = KeywordsExtractorTool()
|
233 |
speech_to_text_tool = SpeechToTextTool()
|