Spaces:
Sleeping
Sleeping
File size: 1,861 Bytes
6ecf729 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 |
import gradio as gr
import requests
from bs4 import BeautifulSoup
from urllib.parse import urljoin, urlparse
import pdfkit
import os
def get_subdirectory_pages(url, base_url, visited=set()):
if url in visited:
return []
visited.add(url)
pages = [url]
try:
response = requests.get(url)
soup = BeautifulSoup(response.text, 'html.parser')
for link in soup.find_all('a'):
href = link.get('href')
if href:
full_url = urljoin(base_url, href)
if full_url.startswith(base_url) and full_url not in visited:
pages.extend(get_subdirectory_pages(full_url, base_url, visited))
except Exception as e:
print(f"Error processing {url}: {e}")
return pages
def website_to_pdf(url):
parsed_url = urlparse(url)
base_url = f"{parsed_url.scheme}://{parsed_url.netloc}{parsed_url.path.rstrip('/')}/"
all_pages = get_subdirectory_pages(base_url, base_url)
options = {
'page-size': 'A4',
'margin-top': '0.75in',
'margin-right': '0.75in',
'margin-bottom': '0.75in',
'margin-left': '0.75in',
}
output_file = "subdirectory_documentation.pdf"
pdfkit.from_url(all_pages, output_file, options=options)
return output_file
def process_url(url):
try:
pdf_file = website_to_pdf(url)
return pdf_file
except Exception as e:
return f"An error occurred: {str(e)}"
iface = gr.Interface(
fn=process_url,
inputs=gr.Textbox(label="Enter website URL (e.g., https://www.gradio.app/docs)"),
outputs=gr.File(label="Download PDF"),
title="Website Subdirectory to PDF Converter",
description="Enter a website URL to convert its subdirectories into a PDF."
)
if __name__ == "__main__":
iface.launch() |