Create search_utils.py
Browse files- search_utils.py +18 -0
search_utils.py
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# search_utils.py — Simple web search function (DuckDuckGo-based fallback)
|
| 2 |
+
import requests
|
| 3 |
+
|
| 4 |
+
def web_search(query, num_results=3):
|
| 5 |
+
try:
|
| 6 |
+
url = f"https://duckduckgo.com/html/?q={query}"
|
| 7 |
+
headers = {"User-Agent": "Mozilla/5.0"}
|
| 8 |
+
resp = requests.get(url, headers=headers)
|
| 9 |
+
if resp.status_code != 200:
|
| 10 |
+
return "No web results found."
|
| 11 |
+
|
| 12 |
+
# Simple extract (DuckDuckGo HTML fallback)
|
| 13 |
+
import re
|
| 14 |
+
results = re.findall(r'<a rel="nofollow" class="result__a" href="(.*?)">(.*?)</a>', resp.text)
|
| 15 |
+
summaries = [re.sub('<[^<]+?>', '', title) for _, title in results]
|
| 16 |
+
return " | ".join(summaries[:num_results])
|
| 17 |
+
except Exception as e:
|
| 18 |
+
return f"Web search error: {str(e)}"
|