PyQuarX commited on
Commit
1ab0ddc
·
verified ·
1 Parent(s): 636189b

Update scraper.py

Browse files
Files changed (1) hide show
  1. scraper.py +11 -10
scraper.py CHANGED
@@ -1,25 +1,25 @@
1
  from selenium import webdriver
2
  from selenium.webdriver.chrome.service import Service
 
 
3
  from bs4 import BeautifulSoup
4
- import os
5
- from shutil import which
6
-
7
 
8
  def scrape_website(website):
9
- print("Launching chrome browser...")
10
 
11
  chrome_driver_path = "/usr/lib/chromium/chromedriver"
12
- options = webdriver.ChromeOptions()
 
13
  options.binary_location = "/usr/bin/chromium"
14
- options.add_argument('--no-sandbox')
15
- options.add_argument('--headless')
16
- options.add_argument('--disable-dev-shm-usage')
17
 
18
  driver = webdriver.Chrome(service=Service(chrome_driver_path), options=options)
19
 
20
-
21
  try:
22
  driver.get(website)
 
23
  html = driver.page_source
24
  return html
25
  finally:
@@ -35,8 +35,9 @@ def clean_body_content(body_content):
35
  soup = BeautifulSoup(body_content, "html.parser")
36
  for script_or_style in soup(["script", "style"]):
37
  script_or_style.extract()
 
38
  cleaned_content = soup.get_text(separator="\n")
39
  return "\n".join(line.strip() for line in cleaned_content.splitlines() if line.strip())
40
 
41
  def split_dom_content(dom_content, max_length=60000):
42
- return [dom_content[i:i + max_length] for i in range(0, len(dom_content), max_length)]
 
1
  from selenium import webdriver
2
  from selenium.webdriver.chrome.service import Service
3
+ from selenium.webdriver.chrome.options import Options
4
+ import time
5
  from bs4 import BeautifulSoup
 
 
 
6
 
7
  def scrape_website(website):
8
+ print("Launching chromium browser...")
9
 
10
  chrome_driver_path = "/usr/lib/chromium/chromedriver"
11
+
12
+ options = Options()
13
  options.binary_location = "/usr/bin/chromium"
14
+ options.add_argument("--headless")
15
+ options.add_argument("--no-sandbox")
16
+ options.add_argument("--disable-dev-shm-usage")
17
 
18
  driver = webdriver.Chrome(service=Service(chrome_driver_path), options=options)
19
 
 
20
  try:
21
  driver.get(website)
22
+ print("Page Loaded...")
23
  html = driver.page_source
24
  return html
25
  finally:
 
35
  soup = BeautifulSoup(body_content, "html.parser")
36
  for script_or_style in soup(["script", "style"]):
37
  script_or_style.extract()
38
+
39
  cleaned_content = soup.get_text(separator="\n")
40
  return "\n".join(line.strip() for line in cleaned_content.splitlines() if line.strip())
41
 
42
  def split_dom_content(dom_content, max_length=60000):
43
+ return [dom_content[i:i + max_length] for i in range(0, len(dom_content), max_length)]