Spaces:
Sleeping
Sleeping
File size: 6,688 Bytes
6ecf729 9f222f2 56c5685 9f222f2 1748e66 f63dcb0 fda7b65 3696013 78a3c51 1748e66 9f222f2 fda7b65 f63dcb0 fda7b65 3696013 fda7b65 9f222f2 6ecf729 f63dcb0 12928b4 f63dcb0 12928b4 1748e66 f9ab4fa 12928b4 f63dcb0 6ecf729 f63dcb0 6ecf729 04286a4 6ecf729 f63dcb0 cb8ca6c f63dcb0 81f7834 cb8ca6c 81f7834 3696013 f63dcb0 fda7b65 f63dcb0 fda7b65 f63dcb0 3696013 81f7834 f63dcb0 81f7834 cb8ca6c f63dcb0 6ecf729 9f222f2 13c4089 9f222f2 d6fec81 de0ffde cb8ca6c 13c4089 12928b4 2ad1f6f d6fec81 2ad1f6f 3696013 0f462a3 56c5685 1748e66 6ecf729 1748e66 6ecf729 f63dcb0 6ecf729 f63dcb0 6ecf729 1748e66 f9ab4fa 6ecf729 f63dcb0 733d87e 6ecf729 f63dcb0 cb8ca6c f63dcb0 cb8ca6c 6ecf729 31c9130 6ecf729 af244f7 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 |
import gradio as gr
import requests
from bs4 import BeautifulSoup
from urllib.parse import urljoin, urlparse
from fpdf import FPDF
import tempfile
import re
import logging
import asyncio
import aiohttp
from aiolimiter import AsyncLimiter
import sqlite3
from contextlib import contextmanager
from threading import local
import concurrent.futures
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
# Thread-local storage for database connections
thread_local = local()
# Rate limiter: 10 requests per second
rate_limiter = AsyncLimiter(10, 1)
@contextmanager
def get_db_connection():
if not hasattr(thread_local, "connection"):
thread_local.connection = sqlite3.connect('crawl_cache.db')
try:
yield thread_local.connection
finally:
pass # We'll keep the connection open for reuse
def init_db():
with get_db_connection() as conn:
c = conn.cursor()
c.execute('''CREATE TABLE IF NOT EXISTS pages
(url TEXT PRIMARY KEY, content TEXT, depth INTEGER)''')
c.execute('''CREATE INDEX IF NOT EXISTS idx_url ON pages(url)''')
conn.commit()
init_db()
def clean_text(text):
text = ''.join(char for char in text if char.isprintable())
text = re.sub(r'[^\x00-\x7F]+', ' ', text)
return text
async def get_page_content(session, url):
try:
async with rate_limiter:
async with session.get(url, timeout=30) as response:
if response.status == 200:
text = await response.text()
soup = BeautifulSoup(text, 'html.parser')
content = []
main_content = soup.find('article') or soup.find('main') or soup
if main_content:
for tag in ['p', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'li']:
for element in main_content.find_all(tag):
text = clean_text(element.get_text(strip=True))
if text:
content.append(text)
logger.info(f"Found {len(content)} content items for {url}")
return content
else:
logger.error(f"Error fetching {url}: HTTP {response.status}")
return [f"Error fetching {url}: HTTP {response.status}"]
except Exception as e:
logger.error(f"Error processing {url}: {str(e)}")
return [f"Error processing {url}: {str(e)}"]
async def get_links(session, url, base_url):
try:
async with rate_limiter:
async with session.get(url, timeout=30) as response:
if response.status == 200:
text = await response.text()
soup = BeautifulSoup(text, 'html.parser')
links = soup.find_all('a', href=True)
valid_links = []
for link in links:
full_url = urljoin(url, link['href'])
if full_url.startswith(base_url) and full_url != url:
valid_links.append(full_url)
return valid_links
else:
logger.error(f"Error fetching links from {url}: HTTP {response.status}")
return []
except Exception as e:
logger.error(f"Error getting links from {url}: {str(e)}")
return []
async def crawl_pages(base_url, max_depth):
visited = set()
to_visit = [(base_url, 0)]
all_pages = []
async with aiohttp.ClientSession() as session:
while to_visit:
current_url, depth = to_visit.pop(0)
if current_url in visited or depth > max_depth:
continue
visited.add(current_url)
start_time = time.time()
with get_db_connection() as conn:
c = conn.cursor()
c.execute("SELECT content FROM pages WHERE url = ?", (current_url,))
result = c.fetchone()
if result:
content = eval(result[0]) # Convert string back to list
else:
content = await get_page_content(session, current_url)
with get_db_connection() as conn:
c = conn.cursor()
c.execute("INSERT INTO pages VALUES (?, ?, ?)", (current_url, str(content), depth))
conn.commit()
all_pages.append((current_url, content))
logger.info(f"Processed page: {current_url} at depth {depth} in {time.time() - start_time:.2f} seconds")
if depth < max_depth:
links = await get_links(session, current_url, base_url)
for link in links:
if link not in visited:
to_visit.append((link, depth + 1))
return all_pages
def website_to_pdf(all_pages):
logger.info(f"Starting PDF generation for {len(all_pages)} pages")
pdf = FPDF()
pdf.set_auto_page_break(auto=True, margin=15)
pdf.add_page()
pdf.set_font("Arial", size=12)
for page_url, content in all_pages:
pdf.cell(0, 10, txt=page_url, ln=True)
pdf.ln(5)
for text in content:
try:
pdf.multi_cell(0, 10, txt=text[:200]) # Limit text length to avoid issues
except Exception as e:
logger.error(f"Error writing text to PDF: {str(e)}")
if pdf.get_y() > 250: # Add a new page if the current page is almost full
pdf.add_page()
with tempfile.NamedTemporaryFile(delete=False, suffix='.pdf') as tmp:
pdf_path = tmp.name
pdf.output(pdf_path)
logger.info(f"PDF saved to: {pdf_path}")
return pdf_path
async def process_url(url, depth):
try:
all_pages = await crawl_pages(url, depth)
pdf_file = website_to_pdf(all_pages)
return pdf_file
except Exception as e:
logger.error(f"Error in process_url: {str(e)}")
return f"An error occurred: {str(e)}"
def run_async(url, depth):
return asyncio.run(process_url(url, depth))
iface = gr.Interface(
fn=run_async,
inputs=[
gr.Textbox(label="Enter website URL (e.g., https://www.gradio.app/docs)"),
gr.Slider(minimum=1, maximum=10, value=3, step=1, label="Crawl Depth")
],
outputs=gr.File(label="Download PDF"),
title="Website to PDF Converter",
description="Enter docs URL and crawl depth to convert documentation pages into a PDF. Be responsible for sites you have permission to do this"
)
if __name__ == "__main__":
iface.launch() |