import requests import trafilatura from newspaper import Article from typing import Optional from bs4 import BeautifulSoup HEADERS = { "User-Agent": ( "Mozilla/5.0 (Windows NT 10.0; Win64; x64) " "AppleWebKit/537.36 (KHTML, like Gecko) " "Chrome/115.0.0.0 Safari/537.36" ) } def clean_text(text: str) -> str: # Remove excess whitespace, ads, and headings cleaned = text.replace("\n", " ").strip() cleaned = BeautifulSoup(cleaned, "html.parser").text # remove tags cleaned = " ".join(cleaned.split()) # remove multiple spaces return cleaned def scrape_url(url: str, timeout: int = 10) -> Optional[str]: try: response = requests.get(url, timeout=timeout, headers=HEADERS) if response.status_code == 200: html = response.text extracted = trafilatura.extract(html, include_comments=False, include_tables=False) if extracted and len(extracted.split()) > 100: return clean_text(extracted) except Exception as e: print(f"⚠️ Trafilatura failed for {url}: {e}") try: article = Article(url) article.download() article.parse() if article.text and len(article.text.split()) > 100: return clean_text(article.text) except Exception as e: print(f"⚠️ Newspaper3k failed for {url}: {e}") return None