Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -25,68 +25,29 @@ load_dotenv()
|
|
25 |
|
26 |
|
27 |
|
28 |
-
|
29 |
-
|
30 |
-
"""Search Wikipedia for a query and return up to 2 results."""
|
31 |
-
search_docs = WikipediaLoader(query=query, load_max_docs=2).load()
|
32 |
-
|
33 |
-
formatted_search_docs = "\n\n---\n\n".join(
|
34 |
-
[
|
35 |
-
f'<Document source="{doc.metadata.get("source", "Wikipedia")}" page="{doc.metadata.get("page", "")}"/>\n{doc.page_content}\n</Document>'
|
36 |
-
for doc in search_docs
|
37 |
-
]
|
38 |
-
)
|
39 |
-
return formatted_search_docs
|
40 |
-
|
41 |
-
|
42 |
-
@tool
|
43 |
-
def wikidata_query(query: str) -> str:
|
44 |
-
"""
|
45 |
-
Run a SPARQL query on Wikidata and return results.
|
46 |
-
"""
|
47 |
-
endpoint_url = "https://query.wikidata.org/sparql"
|
48 |
-
headers = {
|
49 |
-
"Accept": "application/sparql-results+json"
|
50 |
-
}
|
51 |
-
response = requests.get(endpoint_url, headers=headers, params={"query": query})
|
52 |
-
data = response.json()
|
53 |
-
return json.dumps(data, indent=2)
|
54 |
-
|
55 |
|
56 |
-
|
57 |
-
|
58 |
-
"
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
formatted_search_docs = "\n\n---\n\n".join(
|
68 |
-
[
|
69 |
-
f'<Document source="{doc.metadata["source"]}" page="{doc.metadata.get("page", "")}"/>\n{doc.page_content}\n</Document>'
|
70 |
-
for doc in search_docs
|
71 |
-
])
|
72 |
-
|
73 |
-
return formatted_search_docs
|
74 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
75 |
|
76 |
-
@tool
|
77 |
-
def arxiv_search(query: str) -> str:
|
78 |
-
"""Search Arxiv for a query and return maximum 3 result.
|
79 |
|
80 |
-
Args:
|
81 |
-
query: The search query."""
|
82 |
-
search_docs = ArxivLoader(query=query, load_max_docs=3).load()
|
83 |
-
formatted_search_docs = "\n\n---\n\n".join(
|
84 |
-
[
|
85 |
-
f'<Document source="{doc.metadata["source"]}" page="{doc.metadata.get("page", "")}"/>\n{doc.page_content[:1000]}\n</Document>'
|
86 |
-
for doc in search_docs
|
87 |
-
])
|
88 |
-
return formatted_search_docs
|
89 |
-
|
90 |
|
91 |
|
92 |
class StringReverseTool(Tool):
|
@@ -227,7 +188,7 @@ class BasicAgent:
|
|
227 |
)
|
228 |
|
229 |
search_tool = DuckDuckGoSearchTool()
|
230 |
-
wiki_search_tool =
|
231 |
str_reverse_tool = StringReverseTool()
|
232 |
keywords_extract_tool = KeywordsExtractorTool()
|
233 |
speech_to_text_tool = SpeechToTextTool()
|
|
|
25 |
|
26 |
|
27 |
|
28 |
+
from smolagents import Tool
|
29 |
+
from langchain_community.document_loaders import WikipediaLoader
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
30 |
|
31 |
+
class WikiSearchTool(Tool):
|
32 |
+
name = "wiki_search"
|
33 |
+
description = "Search Wikipedia for a query and return up to 2 results."
|
34 |
+
inputs = {
|
35 |
+
"query": {"type": "string", "description": "The search term for Wikipedia."}
|
36 |
+
}
|
37 |
+
output_type = "string"
|
38 |
|
39 |
+
def forward(self, query: str) -> str:
|
40 |
+
search_docs = WikipediaLoader(query=query, load_max_docs=2).load()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
41 |
|
42 |
+
formatted_search_docs = "\n\n---\n\n".join(
|
43 |
+
[
|
44 |
+
f'<Document source="{doc.metadata.get("source", "Wikipedia")}" page="{doc.metadata.get("page", "")}"/>\n{doc.page_content}\n</Document>'
|
45 |
+
for doc in search_docs
|
46 |
+
]
|
47 |
+
)
|
48 |
+
return formatted_search_docs
|
49 |
|
|
|
|
|
|
|
50 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
51 |
|
52 |
|
53 |
class StringReverseTool(Tool):
|
|
|
188 |
)
|
189 |
|
190 |
search_tool = DuckDuckGoSearchTool()
|
191 |
+
wiki_search_tool = WikiSearchTool()
|
192 |
str_reverse_tool = StringReverseTool()
|
193 |
keywords_extract_tool = KeywordsExtractorTool()
|
194 |
speech_to_text_tool = SpeechToTextTool()
|