broadfield-dev commited on
Commit
f46bc9b
·
verified ·
1 Parent(s): 8440e4d

Create websearch_logic.py

Browse files
Files changed (1) hide show
  1. websearch_logic.py +281 -0
websearch_logic.py ADDED
@@ -0,0 +1,281 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import requests
2
+ from bs4 import BeautifulSoup, Comment
3
+ import logging
4
+ import os
5
+ import re
6
+ from duckduckgo_search import DDGS
7
+ from googlesearch import search as google_search_lib # Correct import
8
+
9
+ logger = logging.getLogger(__name__)
10
+
11
+ DEFAULT_TIMEOUT = 10
12
+ MAX_CONTENT_LENGTH_PER_URL = 3000
13
+ MAX_TOTAL_SCRAPED_CONTENT = 9000
14
+
15
+ # scrape_url function remains the same (as the last version with BeautifulSoup.Comment fix)
16
+ def scrape_url(url_to_scrape, query_filter=None):
17
+ try:
18
+ logger.debug(f"SCRAPER_MODULE: Scraping URL: {url_to_scrape}")
19
+ headers = {
20
+ 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/125.0.0.0 Safari/537.36',
21
+ 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
22
+ 'Accept-Language': 'en-US,en;q=0.9',
23
+ 'Referer': 'https://www.google.com/'
24
+ }
25
+ response = requests.get(url_to_scrape, headers=headers, timeout=DEFAULT_TIMEOUT, allow_redirects=True)
26
+ response.raise_for_status()
27
+ content_type = response.headers.get('content-type', '').lower()
28
+ if 'html' not in content_type:
29
+ logger.info(f"SCRAPER_MODULE: Skipping non-HTML: {url_to_scrape} (type: {content_type})")
30
+ return {"url": url_to_scrape, "title": url_to_scrape, "error": f"Non-HTML: {content_type}"}
31
+
32
+ soup = BeautifulSoup(response.content, 'html.parser')
33
+ for element_type in ["script", "style", "nav", "footer", "aside", "form", "iframe", "noscript", "header", "menu", "button", "figure", "figcaption", "link", "meta", ".sidebar", ".ad", ".advertisement", ".popup", ".modal", ".share", ".social", ".related-posts", ".comments-area", ".site-footer", ".site-header", ".widget", ".cookie-banner", ".gdpr", "dialog"]:
34
+ for element in soup.select(element_type): element.decompose()
35
+ for comment_node in soup.find_all(string=lambda text: isinstance(text, Comment)): # Corrected
36
+ comment_node.extract()
37
+
38
+ main_content_selectors = ['main', 'article', '.main-content', '.entry-content', '.post-content', '.td-post-content', '.page-content', 'div[role="main"]', 'div[class*="content"]', 'div[class*="article"]', 'div[class*="post"]', 'div[id*="content"]', 'div[id*="main"]']
39
+ content_area = next((soup.select_one(s) for s in main_content_selectors if soup.select_one(s)), soup.body or soup)
40
+
41
+ text_parts = []
42
+ if content_area:
43
+ tags_to_check = ['p', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'li', 'td', 'th', 'pre']
44
+ for element in content_area.find_all(tags_to_check):
45
+ text = element.get_text(separator=' ', strip=True)
46
+ if text and len(text) > 10: text_parts.append(text)
47
+ if not text_parts or len(" ".join(text_parts)) < 200:
48
+ all_text_from_area = content_area.get_text(separator='\n', strip=True)
49
+ all_text_from_area = re.sub(r'\n\s*\n+', '\n\n', all_text_from_area)
50
+ if all_text_from_area and len(all_text_from_area) > len(" ".join(text_parts)):
51
+ text_parts = [line for line in all_text_from_area.split('\n\n') if line.strip() and len(line.strip()) > 10]
52
+
53
+ full_text = "\n\n".join(list(dict.fromkeys(text_parts)))
54
+ if not full_text and hasattr(soup, 'body') and soup.body:
55
+ full_text = soup.body.get_text(separator='\n', strip=True)
56
+ elif not full_text: full_text = "Content could not be extracted."
57
+
58
+ full_text = re.sub(r'\s{3,}', ' ', full_text)
59
+ full_text = re.sub(r'(\n\s*){3,}', '\n\n', full_text)
60
+ title_tag = soup.find('title')
61
+ title = title_tag.string.strip() if title_tag and title_tag.string else url_to_scrape
62
+ if len(full_text) > MAX_CONTENT_LENGTH_PER_URL: full_text = full_text[:MAX_CONTENT_LENGTH_PER_URL] + "..."
63
+
64
+ logger.debug(f"SCRAPER_MODULE: Scraped {len(full_text)} chars from {url_to_scrape}. Title: {title}")
65
+ return {"url": url_to_scrape, "title": title.strip(), "content": full_text.strip()}
66
+ except requests.exceptions.HTTPError as e: logger.error(f"SCRAPER_MODULE: HTTP error {url_to_scrape}: {e.response.status_code}"); return {"url": url_to_scrape, "title": url_to_scrape, "error": f"HTTP error: {e.response.status_code}"}
67
+ except requests.exceptions.Timeout: logger.error(f"SCRAPER_MODULE: Timeout {url_to_scrape}"); return {"url": url_to_scrape, "title": url_to_scrape, "error": "Timeout"}
68
+ except requests.exceptions.RequestException as e: logger.error(f"SCRAPER_MODULE: Request failed {url_to_scrape}: {e}"); return {"url": url_to_scrape, "title": url_to_scrape, "error": f"Request failed: {e}"}
69
+ except Exception as e: logger.error(f"SCRAPER_MODULE: Scraping error {url_to_scrape}: {e}", exc_info=True); return {"url": url_to_scrape, "title": url_to_scrape, "error": f"Internal scraping error: {e}"}
70
+
71
+ # search_and_scrape_duckduckgo remains the same as the last version.
72
+ def search_and_scrape_duckduckgo(search_query, num_results=3):
73
+ scraped_data_all_urls = []
74
+ total_content_collected_length = 0
75
+ try:
76
+ logger.info(f"SCRAPER_MODULE (DDG): Searching for: '{search_query}' (max {num_results} results)")
77
+ search_results_urls_info = []
78
+ with DDGS() as ddgs:
79
+ ddg_search_results = ddgs.text(search_query, max_results=num_results + 2, region='wt-wt', safesearch='moderate')
80
+ if ddg_search_results:
81
+ for result in ddg_search_results[:num_results]:
82
+ if result.get('href'):
83
+ search_results_urls_info.append({
84
+ "url": result['href'],
85
+ "title": result.get('title', 'N/A'),
86
+ "description": result.get('body', 'N/A')
87
+ })
88
+
89
+ logger.info(f"SCRAPER_MODULE (DDG): Found {len(search_results_urls_info)} URLs: {[r['url'] for r in search_results_urls_info]}")
90
+ if not search_results_urls_info:
91
+ return [{"query": search_query, "engine": "DuckDuckGo", "error": "No search results."}]
92
+
93
+ for res_info in search_results_urls_info:
94
+ url_to_scrape = res_info["url"]
95
+ if total_content_collected_length >= MAX_TOTAL_SCRAPED_CONTENT: break
96
+ scraped_info = scrape_url(url_to_scrape)
97
+ if scraped_info:
98
+ if not scraped_info.get("title") or scraped_info.get("title") == url_to_scrape:
99
+ scraped_info["title"] = res_info.get("title", url_to_scrape)
100
+ current_content = scraped_info.get("content", "")
101
+ ddg_desc = res_info.get("description")
102
+ if ddg_desc and (not current_content or len(current_content) < 150 or scraped_info.get("error")):
103
+ scraped_info["content"] = f"Search result snippet: {ddg_desc}\n\n(Content from page below {'or error encountered' if scraped_info.get('error') else ''}):\n{current_content if current_content else 'No content extracted.'}"
104
+ scraped_data_all_urls.append(scraped_info)
105
+ if scraped_info.get("content") and not scraped_info.get("error"):
106
+ total_content_collected_length += len(scraped_info["content"])
107
+ else: scraped_data_all_urls.append({"url": url_to_scrape, "title": res_info.get("title", url_to_scrape), "error": "Scraping function returned no data."})
108
+ return scraped_data_all_urls
109
+ except Exception as e:
110
+ logger.error(f"SCRAPER_MODULE (DDG): Error for '{search_query}': {e}", exc_info=True)
111
+ return [{"query": search_query, "engine": "DuckDuckGo", "error": f"DDG search/scrape failed: {str(e)}"}]
112
+
113
+
114
+ def search_and_scrape_google(search_query, num_results=10):
115
+ """
116
+ Performs a Google search (via googlesearch-python library) and scrapes the top results.
117
+ Uses arguments as per the provided documentation snippet.
118
+ """
119
+ scraped_data_all_urls = []
120
+ total_content_collected_length = 0
121
+ try:
122
+ logger.info(f"SCRAPER_MODULE (Google): Searching for: '{search_query}' (num_results={num_results})")
123
+ search_results_info = [] # Will store SearchResult objects or dicts
124
+
125
+ try:
126
+ # Using arguments based on the provided documentation:
127
+ # query, lang, num_results, advanced, sleep_interval
128
+ # The library handles User-Agent internally.
129
+ # For few results (like 3), sleep_interval for inter-page requests is not triggered,
130
+ # but the library has an internal default pause between individual fetches.
131
+ # We can also set a small sleep_interval here for overall politeness for any number of results.
132
+ # 'advanced=True' is key to get SearchResult objects with title, url, description.
133
+
134
+ # The search function directly returns a list of SearchResult objects if advanced=True
135
+ results_iterable = google_search_lib(
136
+ query=search_query,
137
+ num_results=num_results,
138
+ lang='en', # Example language
139
+ advanced=True, # To get SearchResult objects
140
+ sleep_interval=1.0 # Politeness delay between requests the library makes internally
141
+ # This applies if library makes multiple HTTP requests for the results.
142
+ # For num_results=3, it likely makes one request.
143
+ )
144
+
145
+ # Convert SearchResult objects to our desired dict format
146
+ for res_obj in results_iterable:
147
+ if hasattr(res_obj, 'url'): # Check if it's a SearchResult object
148
+ search_results_info.append({
149
+ "url": res_obj.url,
150
+ "title": getattr(res_obj, 'title', 'N/A'),
151
+ "description": getattr(res_obj, 'description', 'N/A')
152
+ })
153
+ elif isinstance(res_obj, str): # Fallback if advanced=False or lib changes
154
+ search_results_info.append({"url": res_obj, "title": "N/A", "description": "N/A"})
155
+
156
+ logger.info(f"SCRAPER_MODULE (Google): Found {len(search_results_info)} result objects: {[r['url'] for r in search_results_info]}")
157
+
158
+ except Exception as e_search:
159
+ logger.error(f"SCRAPER_MODULE (Google): Error during google_search_lib call for '{search_query}': {e_search}", exc_info=True)
160
+ if "HTTP Error 429" in str(e_search):
161
+ return [{"query": search_query, "engine": "Google", "error": f"Google search blocked (HTTP 429). Try again later or use DuckDuckGo."}]
162
+ return [{"query": search_query, "engine": "Google", "error": f"Google search library failed: {str(e_search)}"}]
163
+
164
+ if not search_results_info:
165
+ return [{"query": search_query, "engine": "Google", "error": "No search results retrieved."}]
166
+
167
+ for res_info_item in search_results_info:
168
+ url_to_scrape = res_info_item["url"]
169
+ if total_content_collected_length >= MAX_TOTAL_SCRAPED_CONTENT:
170
+ logger.info(f"SCRAPER_MODULE (Google): Reached max total content length. Stopping further scraping.")
171
+ break
172
+
173
+ scraped_info = scrape_url(url_to_scrape)
174
+ if scraped_info:
175
+ # Use Google's title/description if scraping failed to get them or content is short
176
+ if not scraped_info.get("title") or scraped_info.get("title") == url_to_scrape:
177
+ scraped_info["title"] = res_info_item.get("title", url_to_scrape)
178
+
179
+ current_content = scraped_info.get("content", "")
180
+ google_desc = res_info_item.get("description")
181
+
182
+ if google_desc and (not current_content or len(current_content) < 150 or scraped_info.get("error")):
183
+ scraped_info["content"] = f"Search result description: {google_desc}\n\n(Content from page below {'or error encountered' if scraped_info.get('error') else ''}):\n{current_content if current_content else 'No content extracted.'}"
184
+ elif not current_content and google_desc: # If no content at all was scraped
185
+ scraped_info["content"] = f"Could not scrape full content. Search result description: {google_desc}"
186
+
187
+ scraped_data_all_urls.append(scraped_info)
188
+ if scraped_info.get("content") and not scraped_info.get("error"):
189
+ total_content_collected_length += len(scraped_info["content"])
190
+ else:
191
+ scraped_data_all_urls.append({"url": url_to_scrape, "title": res_info_item.get("title", "N/A"), "error": "Scraping function (scrape_url) returned no data."})
192
+
193
+ return scraped_data_all_urls
194
+
195
+ except Exception as e:
196
+ logger.error(f"SCRAPER_MODULE (Google): General error during search_and_scrape_google for '{search_query}': {e}", exc_info=True)
197
+ return [{"query": search_query, "engine": "Google", "error": f"Overall Google search or scraping process failed: {str(e)}"}]
198
+ def search_and_scrape(search_query, num_results=10):
199
+ """
200
+ Performs a Google search (via googlesearch-python library) and scrapes the top results.
201
+ Uses arguments as per the provided documentation snippet.
202
+ """
203
+ scraped_data_all_urls = []
204
+ total_content_collected_length = 0
205
+ try:
206
+ logger.info(f"SCRAPER_MODULE (Google): Searching for: '{search_query}' (num_results={num_results})")
207
+ search_results_info = [] # Will store SearchResult objects or dicts
208
+
209
+ try:
210
+ # Using arguments based on the provided documentation:
211
+ # query, lang, num_results, advanced, sleep_interval
212
+ # The library handles User-Agent internally.
213
+ # For few results (like 3), sleep_interval for inter-page requests is not triggered,
214
+ # but the library has an internal default pause between individual fetches.
215
+ # We can also set a small sleep_interval here for overall politeness for any number of results.
216
+ # 'advanced=True' is key to get SearchResult objects with title, url, description.
217
+
218
+ # The search function directly returns a list of SearchResult objects if advanced=True
219
+ results_iterable = google_search_lib(
220
+ query=search_query,
221
+ num_results=num_results,
222
+ lang='en', # Example language
223
+ advanced=True, # To get SearchResult objects
224
+ sleep_interval=1.0 # Politeness delay between requests the library makes internally
225
+ # This applies if library makes multiple HTTP requests for the results.
226
+ # For num_results=3, it likely makes one request.
227
+ )
228
+
229
+ # Convert SearchResult objects to our desired dict format
230
+ for res_obj in results_iterable:
231
+ if hasattr(res_obj, 'url'): # Check if it's a SearchResult object
232
+ search_results_info.append({
233
+ "url": res_obj.url,
234
+ "title": getattr(res_obj, 'title', 'N/A'),
235
+ "description": getattr(res_obj, 'description', 'N/A')
236
+ })
237
+ elif isinstance(res_obj, str): # Fallback if advanced=False or lib changes
238
+ search_results_info.append({"url": res_obj, "title": "N/A", "description": "N/A"})
239
+
240
+ logger.info(f"SCRAPER_MODULE (Google): Found {len(search_results_info)} result objects: {[r['url'] for r in search_results_info]}")
241
+
242
+ except Exception as e_search:
243
+ logger.error(f"SCRAPER_MODULE (Google): Error during google_search_lib call for '{search_query}': {e_search}", exc_info=True)
244
+ if "HTTP Error 429" in str(e_search):
245
+ return [{"query": search_query, "engine": "Google", "error": f"Google search blocked (HTTP 429). Try again later or use DuckDuckGo."}]
246
+ return [{"query": search_query, "engine": "Google", "error": f"Google search library failed: {str(e_search)}"}]
247
+
248
+ if not search_results_info:
249
+ return [{"query": search_query, "engine": "Google", "error": "No search results retrieved."}]
250
+
251
+ for res_info_item in search_results_info:
252
+ url_to_scrape = res_info_item["url"]
253
+ if total_content_collected_length >= MAX_TOTAL_SCRAPED_CONTENT:
254
+ logger.info(f"SCRAPER_MODULE (Google): Reached max total content length. Stopping further scraping.")
255
+ break
256
+
257
+ scraped_info = scrape_url(url_to_scrape)
258
+ if scraped_info:
259
+ # Use Google's title/description if scraping failed to get them or content is short
260
+ if not scraped_info.get("title") or scraped_info.get("title") == url_to_scrape:
261
+ scraped_info["title"] = res_info_item.get("title", url_to_scrape)
262
+
263
+ current_content = scraped_info.get("content", "")
264
+ google_desc = res_info_item.get("description")
265
+
266
+ if google_desc and (not current_content or len(current_content) < 150 or scraped_info.get("error")):
267
+ scraped_info["content"] = f"Search result description: {google_desc}\n\n(Content from page below {'or error encountered' if scraped_info.get('error') else ''}):\n{current_content if current_content else 'No content extracted.'}"
268
+ elif not current_content and google_desc: # If no content at all was scraped
269
+ scraped_info["content"] = f"Could not scrape full content. Search result description: {google_desc}"
270
+
271
+ scraped_data_all_urls.append(scraped_info)
272
+ if scraped_info.get("content") and not scraped_info.get("error"):
273
+ total_content_collected_length += len(scraped_info["content"])
274
+ else:
275
+ scraped_data_all_urls.append({"url": url_to_scrape, "title": res_info_item.get("title", "N/A"), "error": "Scraping function (scrape_url) returned no data."})
276
+
277
+ return scraped_data_all_urls
278
+
279
+ except Exception as e:
280
+ logger.error(f"SCRAPER_MODULE (Google): General error during search_and_scrape_google for '{search_query}': {e}", exc_info=True)
281
+ return [{"query": search_query, "engine": "Google", "error": f"Overall Google search or scraping process failed: {str(e)}"}]