File size: 8,704 Bytes
a948e2e
 
 
 
 
6ecf729
 
 
9f222f2
56c5685
9f222f2
1748e66
f63dcb0
 
 
 
fda7b65
 
ec7ab0d
a948e2e
78a3c51
a948e2e
 
 
 
 
1748e66
 
9f222f2
fda7b65
 
f63dcb0
 
 
 
fda7b65
 
 
 
 
 
 
 
 
 
 
 
 
 
3696013
fda7b65
 
 
 
9f222f2
 
 
 
6ecf729
f63dcb0
12928b4
f63dcb0
 
 
 
 
 
35d836c
f63dcb0
35d836c
 
 
 
 
f63dcb0
 
 
 
 
12928b4
1748e66
f9ab4fa
ead5062
f63dcb0
6ecf729
f63dcb0
 
 
 
 
35d836c
f63dcb0
35d836c
 
 
 
f63dcb0
 
 
 
6ecf729
04286a4
 
6ecf729
0a65fc2
cb8ca6c
 
 
 
f63dcb0
81f7834
 
 
 
cb8ca6c
81f7834
40c0a08
f63dcb0
35d836c
 
 
 
 
 
 
 
 
 
 
 
 
 
f63dcb0
40c0a08
81f7834
 
40c0a08
 
35d836c
81f7834
 
cb8ca6c
 
f63dcb0
 
6ecf729
9f222f2
13c4089
9f222f2
d6fec81
de0ffde
cb8ca6c
13c4089
 
12928b4
2ad1f6f
d6fec81
2ad1f6f
 
3696013
 
0f462a3
56c5685
1748e66
 
 
6ecf729
1748e66
6ecf729
f63dcb0
6ecf729
f63dcb0
 
6ecf729
 
1748e66
f9ab4fa
6ecf729
a948e2e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6ecf729
a948e2e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
import dash
from dash import dcc, html, Input, Output, State
import dash_bootstrap_components as dbc
from dash.exceptions import PreventUpdate
import base64
import requests
from bs4 import BeautifulSoup
from urllib.parse import urljoin, urlparse
from fpdf import FPDF
import tempfile
import re
import logging
import asyncio
import aiohttp
from aiolimiter import AsyncLimiter
import sqlite3
from contextlib import contextmanager
from threading import local
import time
import os

# Initialize Dash app
app = dash.Dash(__name__, external_stylesheets=[dbc.themes.BOOTSTRAP])
server = app.server

# Logging setup
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

# Thread-local storage for database connections
thread_local = local()

# Rate limiter: 10 requests per second
rate_limiter = AsyncLimiter(10, 1)

@contextmanager
def get_db_connection():
    if not hasattr(thread_local, "connection"):
        thread_local.connection = sqlite3.connect('crawl_cache.db')
    try:
        yield thread_local.connection
    finally:
        pass  # We'll keep the connection open for reuse

def init_db():
    with get_db_connection() as conn:
        c = conn.cursor()
        c.execute('''CREATE TABLE IF NOT EXISTS pages
                     (url TEXT PRIMARY KEY, content TEXT, depth INTEGER)''')
        c.execute('''CREATE INDEX IF NOT EXISTS idx_url ON pages(url)''')
        conn.commit()

init_db()

def clean_text(text):
    text = ''.join(char for char in text if char.isprintable())
    text = re.sub(r'[^\x00-\x7F]+', ' ', text)
    return text

async def get_page_content(session, url):
    try:
        async with rate_limiter:
            async with session.get(url, timeout=30) as response:
                if response.status == 200:
                    text = await response.text()
                    soup = BeautifulSoup(text, 'html.parser')
                    content = []
                    main_content = soup.find('article') or soup.find('main') or soup
                    if main_content:
                        for tag in ['p', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'li']:
                            for element in main_content.find_all(tag):
                                text = clean_text(element.get_text(strip=True))
                                if text:
                                    content.append(text)
                    logger.info(f"Found {len(content)} content items for {url}")
                    return content
                else:
                    logger.error(f"Error fetching {url}: HTTP {response.status}")
                    return [f"Error fetching {url}: HTTP {response.status}"]
    except Exception as e:
        logger.error(f"Error processing {url}: {str(e)}")
        return [f"Error processing {url}: {str(e)}"]

async def get_links(session, url, base_url):
    try:
        async with rate_limiter:
            async with session.get(url, timeout=30) as response:
                if response.status == 200:
                    text = await response.text()
                    soup = BeautifulSoup(text, 'html.parser')
                    links = soup.find_all('a', href=True)
                    valid_links = []
                    for link in links:
                        full_url = urljoin(url, link['href'])
                        if full_url.startswith(base_url) and full_url != url:
                            valid_links.append(full_url)
                    return valid_links
                else:
                    logger.error(f"Error fetching links from {url}: HTTP {response.status}")
                    return []
    except Exception as e:
        logger.error(f"Error getting links from {url}: {str(e)}")
        return []

async def crawl_pages(base_url, max_depth):
    visited = set()
    to_visit = [(base_url, 0)]
    all_pages = []

    async with aiohttp.ClientSession() as session:
        while to_visit:
            current_url, depth = to_visit.pop(0)
            if current_url in visited or depth > max_depth:
                continue

            visited.add(current_url)
            start_time = time.time()

            with get_db_connection() as conn:
                c = conn.cursor()
                c.execute("SELECT content FROM pages WHERE url = ?", (current_url,))
                result = c.fetchone()

            if result:
                content = eval(result[0])  # Convert string back to list
            else:
                content = await get_page_content(session, current_url)
                with get_db_connection() as conn:
                    c = conn.cursor()
                    c.execute("INSERT INTO pages VALUES (?, ?, ?)", (current_url, str(content), depth))
                    conn.commit()

            all_pages.append((current_url, content))
            logger.info(f"Processed page: {current_url} at depth {depth} in {time.time() - start_time:.2f} seconds")

            if depth < max_depth:
                links = await get_links(session, current_url, base_url)
                for link in links:
                    if link not in visited:
                        to_visit.append((link, depth + 1))

    return all_pages

def website_to_pdf(all_pages):
    logger.info(f"Starting PDF generation for {len(all_pages)} pages")
    
    pdf = FPDF()
    pdf.set_auto_page_break(auto=True, margin=15)
    pdf.add_page()
    pdf.set_font("Arial", size=12)

    for page_url, content in all_pages:
        pdf.cell(0, 10, txt=page_url, ln=True)
        pdf.ln(5)
        for text in content:
            try:
                pdf.multi_cell(0, 10, txt=text[:200])  # Limit text length to avoid issues
            except Exception as e:
                logger.error(f"Error writing text to PDF: {str(e)}")
        if pdf.get_y() > 250:  # Add a new page if the current page is almost full
            pdf.add_page()

    with tempfile.NamedTemporaryFile(delete=False, suffix='.pdf') as tmp:
        pdf_path = tmp.name
        pdf.output(pdf_path)
        logger.info(f"PDF saved to: {pdf_path}")
    
    return pdf_path

async def process_url(url, depth):
    try:
        all_pages = await crawl_pages(url, depth)
        pdf_file = website_to_pdf(all_pages)
        return pdf_file
    except Exception as e:
        logger.error(f"Error in process_url: {str(e)}")
        return f"An error occurred: {str(e)}"

# App layout
app.layout = dbc.Container([
    dbc.Navbar(
        dbc.Container([
            html.A(
                dbc.Row([
                    dbc.Col(html.Img(src="/assets/logo.png", height="30px")),
                    dbc.Col(dbc.NavbarBrand("Website to PDF Converter", className="ms-2")),
                ],
                align="center",
                className="g-0",
                ),
                href="/",
                style={"textDecoration": "none"},
            )
        ]),
        color="#116F70",
        dark=True,
    ),
    
    dbc.Card(
        dbc.CardBody([
            html.H1("Website to PDF Converter", className="text-center mb-4"),
            html.P("Enter docs URL and crawl depth to convert documentation pages into a PDF. Be responsible for sites you have permission to do this", className="text-center mb-4"),
            dbc.Input(id="url-input", type="text", placeholder="Enter website URL (e.g., https://www.gradio.app/docs)", className="mb-3"),
            dcc.Slider(id="depth-slider", min=1, max=10, step=1, value=3, marks={i: str(i) for i in range(1, 11)}, className="mb-3"),
            dbc.Button("Convert to PDF", id="submit-button", color="primary", className="mb-3 w-100"),
            dbc.Spinner(html.Div(id="output-area"), color="primary", type="grow"),
        ]),
        className="mt-4"
    )
], fluid=True)

@app.callback(
    Output("output-area", "children"),
    Input("submit-button", "n_clicks"),
    State("url-input", "value"),
    State("depth-slider", "value"),
    prevent_initial_call=True
)
def update_output(n_clicks, url, depth):
    if not url:
        return "Please enter a valid URL."
    
    pdf_path = asyncio.run(process_url(url, depth))
    
    if pdf_path.startswith("An error occurred"):
        return pdf_path
    
    with open(pdf_path, "rb") as f:
        encoded = base64.b64encode(f.read()).decode()
    
    os.unlink(pdf_path)  # Remove the temporary file
    
    return html.Div([
        html.H4("PDF Generated Successfully"),
        html.A(
            dbc.Button("Download PDF", color="success", className="mt-2"),
            href=f"data:application/pdf;base64,{encoded}",
            download="website_content.pdf"
        )
    ])

if __name__ == '__main__':
    app.run_server(debug=True, host='0.0.0.0', port=7860)