File size: 16,471 Bytes
6f509ec
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
"""
HTML Downloader component for web crawler
"""

import time
import logging
import requests
from requests.exceptions import RequestException
from typing import Dict, Optional, Tuple, List, Any
from urllib.parse import urlparse
import aiohttp
import asyncio
from aiohttp.client_exceptions import ClientError
import hashlib
import os

from models import URL, Page, calculate_content_hash
from dns_resolver import DNSResolver
from robots import RobotsHandler
import config

# Configure logging
logging.basicConfig(
    level=getattr(logging, config.LOG_LEVEL),
    format=config.LOG_FORMAT
)
logger = logging.getLogger(__name__)


class HTMLDownloader:
    """
    HTML Downloader responsible for downloading web pages
    
    Features:
    - Respects robots.txt rules
    - Uses DNS caching for performance
    - Handles errors and retries
    - Supports both synchronous and asynchronous downloads
    """
    
    def __init__(self, 
                 dns_resolver: Optional[DNSResolver] = None,
                 robots_handler: Optional[RobotsHandler] = None,
                 user_agent: Optional[str] = None):
        """
        Initialize HTML Downloader
        
        Args:
            dns_resolver: DNS resolver for hostname resolution
            robots_handler: Handler for robots.txt
            user_agent: User agent to use for requests
        """
        self.dns_resolver = dns_resolver or DNSResolver()
        self.robots_handler = robots_handler or RobotsHandler()
        self.user_agent = user_agent or config.USER_AGENT
        
        # Create request session
        self.session = requests.Session()
        self.session.headers.update({
            'User-Agent': self.user_agent,
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9',
            'Accept-Language': 'en-US,en;q=0.5',
            'Accept-Encoding': 'gzip, deflate, br',
            'Connection': 'keep-alive',
            'Upgrade-Insecure-Requests': '1',
            'Cache-Control': 'max-age=0'
        })
    
    def download(self, url_obj: URL) -> Optional[Page]:
        """
        Download an HTML page from a URL
        
        Args:
            url_obj: URL object to download
            
        Returns:
            Page object or None if download fails
        """
        url = url_obj.url
        try:
            # Check robots.txt first
            if config.ROBOTSTXT_OBEY:
                allowed, crawl_delay = self.robots_handler.can_fetch(url)
                if not allowed:
                    logger.info(f"URL not allowed by robots.txt: {url}")
                    url_obj.status = "robotstxt_excluded"
                    return None
                
                # Respect crawl delay if specified
                if crawl_delay and crawl_delay > 0:
                    time.sleep(crawl_delay)
            
            # Resolve DNS
            ip_address = self.dns_resolver.resolve(url)
            if not ip_address:
                logger.warning(f"Failed to resolve DNS for URL: {url}")
                url_obj.error = "DNS resolution failed"
                return None
            
            # Download page with specific headers
            start_time = time.time()
            response = self.session.get(
                url,
                timeout=config.CRAWL_TIMEOUT,
                allow_redirects=True,
                stream=True,  # Stream to avoid downloading large files fully
                headers={
                    'User-Agent': self.user_agent,
                    'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9',
                    'Accept-Language': 'en-US,en;q=0.5',
                    'Accept-Encoding': 'gzip',  # Only accept gzip to avoid encoding issues
                    'Connection': 'keep-alive'
                }
            )
            
            # Log response details
            logger.debug(f"Response status code: {response.status_code}")
            logger.debug(f"Response headers: {dict(response.headers)}")
            
            # Check content type
            content_type = response.headers.get('Content-Type', '').lower()
            logger.debug(f"Content type for {url}: {content_type}")
            
            is_html = any(allowed_type in content_type for allowed_type in config.ALLOWED_CONTENT_TYPES) or \
                     any(allowed_type == '*/*' for allowed_type in config.ALLOWED_CONTENT_TYPES)
            
            if not is_html:
                logger.info(f"Skipping non-HTML content ({content_type}): {url}")
                url_obj.error = f"Non-HTML content type: {content_type}"
                return None
            
            # Read content (with size limit)
            content = b""
            for chunk in response.iter_content(chunk_size=1024*1024):  # 1MB chunks
                content += chunk
                if len(content) > config.MAX_CONTENT_SIZE:
                    logger.info(f"Content exceeded max size during download: {url}")
                    url_obj.error = f"Content exceeded max size: {len(content)} bytes"
                    return None
            
            # Log content details
            logger.debug(f"Downloaded content size: {len(content)} bytes")
            logger.debug(f"First 100 bytes (hex): {content[:100].hex()}")
            
            # Check for UTF-8 BOM
            if content.startswith(b'\xef\xbb\xbf'):
                content = content[3:]
                logger.debug("Removed UTF-8 BOM from content")
            
            # Try to detect encoding from response headers
            encoding = None
            if 'charset=' in content_type:
                encoding = content_type.split('charset=')[-1].strip()
                logger.debug(f"Found encoding in Content-Type header: {encoding}")
            
            # Try to detect encoding from content
            try:
                import chardet
                detected = chardet.detect(content)
                if detected['confidence'] > 0.8:  # Only use if confidence is high
                    encoding = detected['encoding']
                    logger.debug(f"Detected encoding using chardet: {encoding} (confidence: {detected['confidence']})")
            except ImportError:
                logger.debug("chardet not available for encoding detection")
            
            # Decode content with fallbacks
            html_content = None
            encodings_to_try = [
                encoding,
                'utf-8',
                'utf-8-sig',
                'iso-8859-1',
                'cp1252',
                'ascii'
            ]
            
            for enc in encodings_to_try:
                if not enc:
                    continue
                try:
                    html_content = content.decode(enc)
                    # Quick validation of HTML content
                    if '<!DOCTYPE' in html_content[:1000] or '<html' in html_content[:1000]:
                        logger.debug(f"Successfully decoded content using {enc} encoding")
                        break
                    else:
                        logger.debug(f"Decoded with {enc} but content doesn't look like HTML")
                        html_content = None
                except UnicodeDecodeError:
                    logger.debug(f"Failed to decode content using {enc} encoding")
                    continue
            
            if html_content is None:
                logger.warning(f"Failed to decode content for URL: {url} with any encoding")
                url_obj.error = "Failed to decode content"
                return None
            
            # Additional HTML validation
            if not any(marker in html_content[:1000] for marker in ['<!DOCTYPE', '<html', '<head', '<body']):
                logger.warning(f"Content doesn't appear to be valid HTML for URL: {url}")
                url_obj.error = "Invalid HTML content"
                return None
            
            # Calculate hash for duplicate detection
            content_hash = calculate_content_hash(html_content)
            
            elapsed_time = time.time() - start_time
            
            # Create page object
            page = Page(
                url=url,
                status_code=response.status_code,
                content=html_content,
                content_type=content_type,
                content_length=len(content),
                content_hash=content_hash,
                headers={k.lower(): v for k, v in response.headers.items()},
                crawled_at=time.time(),
                redirect_url=response.url if response.url != url else None,
                elapsed_time=elapsed_time
            )
            
            logger.info(f"Downloaded {len(content)} bytes from {url} in {elapsed_time:.2f}s")
            return page
            
        except RequestException as e:
            logger.warning(f"Request error for URL {url}: {e}")
            url_obj.error = f"Request error: {str(e)}"
            return None
            
        except Exception as e:
            logger.error(f"Unexpected error downloading URL {url}: {e}")
            url_obj.error = f"Unexpected error: {str(e)}"
            return None
    
    async def download_async(self, url_obj: URL, session: Optional[aiohttp.ClientSession] = None) -> Optional[Page]:
        """
        Download an HTML page asynchronously
        
        Args:
            url_obj: URL object to download
            session: Optional aiohttp session to use
            
        Returns:
            Page object or None if download fails
        """
        url = url_obj.url
        own_session = False
        
        try:
            # Check robots.txt first (blocking call)
            if config.ROBOTSTXT_OBEY:
                allowed, crawl_delay = self.robots_handler.can_fetch(url)
                if not allowed:
                    logger.info(f"URL not allowed by robots.txt: {url}")
                    url_obj.status = "robotstxt_excluded"
                    return None
                
                # Respect crawl delay if specified
                if crawl_delay and crawl_delay > 0:
                    await asyncio.sleep(crawl_delay)
            
            # Resolve DNS (blocking call, but cached)
            ip_address = self.dns_resolver.resolve(url)
            if not ip_address:
                logger.warning(f"Failed to resolve DNS for URL: {url}")
                url_obj.error = "DNS resolution failed"
                return None
            
            # Create session if not provided
            if session is None:
                own_session = True
                session = aiohttp.ClientSession(headers={
                    'User-Agent': self.user_agent,
                    'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9',
                    'Accept-Language': 'en-US,en;q=0.5',
                    'Accept-Encoding': 'gzip, deflate, br',
                    'Connection': 'keep-alive',
                    'Upgrade-Insecure-Requests': '1',
                    'Cache-Control': 'max-age=0'
                })
            
            # Download page
            start_time = time.time()
            async with session.get(url, timeout=config.CRAWL_TIMEOUT, allow_redirects=True) as response:
                # Check content type
                content_type = response.headers.get('Content-Type', '').lower()
                is_html = any(allowed_type in content_type for allowed_type in config.ALLOWED_CONTENT_TYPES)
                
                if not is_html:
                    logger.info(f"Skipping non-HTML content ({content_type}): {url}")
                    url_obj.error = f"Non-HTML content type: {content_type}"
                    return None
                
                # Check content length
                content_length = int(response.headers.get('Content-Length', 0))
                if content_length > config.MAX_CONTENT_SIZE:
                    logger.info(f"Skipping large content ({content_length} bytes): {url}")
                    url_obj.error = f"Content too large: {content_length} bytes"
                    return None
                
                # Read content (with size limit)
                content = b""
                async for chunk in response.content.iter_chunked(1024*1024):  # 1MB chunks
                    content += chunk
                    if len(content) > config.MAX_CONTENT_SIZE:
                        logger.info(f"Content exceeded max size during download: {url}")
                        url_obj.error = f"Content exceeded max size: {len(content)} bytes"
                        return None
                
                # Decode content
                try:
                    html_content = content.decode('utf-8')
                except UnicodeDecodeError:
                    try:
                        # Try with a more forgiving encoding
                        html_content = content.decode('iso-8859-1')
                    except UnicodeDecodeError:
                        logger.warning(f"Failed to decode content for URL: {url}")
                        url_obj.error = "Failed to decode content"
                        return None
                
                # Calculate hash for duplicate detection
                content_hash = calculate_content_hash(html_content)
                
                elapsed_time = time.time() - start_time
                
                # Create page object
                page = Page(
                    url=url,
                    status_code=response.status,
                    content=html_content,
                    content_type=content_type,
                    content_length=len(content),
                    content_hash=content_hash,
                    headers={k.lower(): v for k, v in response.headers.items()},
                    crawled_at=time.time(),
                    redirect_url=str(response.url) if str(response.url) != url else None,
                    elapsed_time=elapsed_time
                )
                
                logger.info(f"Downloaded {len(content)} bytes from {url} in {elapsed_time:.2f}s")
                return page
                
        except (ClientError, asyncio.TimeoutError) as e:
            logger.warning(f"Request error for URL {url}: {e}")
            url_obj.error = f"Request error: {str(e)}"
            return None
            
        except Exception as e:
            logger.error(f"Unexpected error downloading URL {url}: {e}")
            url_obj.error = f"Unexpected error: {str(e)}"
            return None
            
        finally:
            # Close session if we created it
            if own_session and session:
                await session.close()
    
    async def bulk_download(self, urls: List[URL], concurrency: int = 10) -> Dict[str, Optional[Page]]:
        """
        Download multiple URLs concurrently
        
        Args:
            urls: List of URL objects to download
            concurrency: Maximum number of concurrent downloads
            
        Returns:
            Dictionary mapping URL strings to Page objects
        """
        results = {}
        
        # Create a session to be shared across requests
        async with aiohttp.ClientSession(headers={
            'User-Agent': self.user_agent,
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9',
            'Accept-Language': 'en-US,en;q=0.5',
            'Accept-Encoding': 'gzip, deflate, br',
            'Connection': 'keep-alive',
            'Upgrade-Insecure-Requests': '1',
            'Cache-Control': 'max-age=0'
        }) as session:
            # Create a semaphore to limit concurrency
            semaphore = asyncio.Semaphore(concurrency)
            
            async def download_with_semaphore(url_obj):
                async with semaphore:
                    return await self.download_async(url_obj, session)
            
            # Create download tasks
            tasks = [download_with_semaphore(url_obj) for url_obj in urls]
            
            # Wait for all tasks to complete
            pages = await asyncio.gather(*tasks)
            
            # Map results
            for url_obj, page in zip(urls, pages):
                results[url_obj.url] = page
                
        return results