File size: 27,387 Bytes
c3b1d58
3792905
c3b1d58
 
 
 
 
c92df66
c3b1d58
 
c92df66
75b5552
c3b1d58
 
 
 
 
 
6ec7960
3792905
c3b1d58
 
 
 
 
 
 
 
c7e50ec
c3b1d58
 
c7e50ec
c3b1d58
 
 
 
 
345d19b
c3b1d58
 
 
 
 
 
c7e50ec
c3b1d58
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4da78ca
f25da5c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6bdfb9b
c3b1d58
6bdfb9b
c3b1d58
 
 
 
 
6bdfb9b
 
c92df66
c3b1d58
 
4da78ca
c92df66
c3b1d58
 
4da78ca
c92df66
6bdfb9b
e9abd76
6bdfb9b
 
 
 
 
 
 
c3b1d58
6bdfb9b
c3b1d58
 
 
 
 
6bdfb9b
 
 
3f75c63
 
 
6bdfb9b
3f75c63
 
 
 
6bdfb9b
c3b1d58
 
6bdfb9b
 
 
 
3f75c63
 
6bdfb9b
3f75c63
6bdfb9b
c3b1d58
4da78ca
c92df66
c3b1d58
 
df3f48b
3f75c63
 
 
 
 
 
 
 
 
 
 
 
 
 
df3f48b
3f75c63
df3f48b
3f75c63
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6bdfb9b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
df3f48b
6bdfb9b
df3f48b
 
 
 
 
 
 
6bdfb9b
df3f48b
6bdfb9b
df3f48b
 
 
 
 
6bdfb9b
 
 
 
 
4da78ca
c92df66
 
6bdfb9b
 
 
 
 
 
 
4da78ca
c92df66
 
 
df3f48b
 
c92df66
c3b1d58
df3f48b
c3b1d58
c7e50ec
 
c3b1d58
 
c7e50ec
cd90730
c3b1d58
 
c7e50ec
c92df66
 
30f269c
c92df66
 
 
 
30f269c
c92df66
 
 
 
6a91fa4
c92df66
345d19b
c92df66
 
 
 
 
 
 
 
 
 
 
 
 
14b5f32
7ef4e98
f6bca8a
7ef4e98
c92df66
7ef4e98
 
 
 
 
 
 
 
 
 
 
 
c7e50ec
7ef4e98
 
 
4da78ca
7ef4e98
 
 
4da78ca
 
7ef4e98
 
 
 
4da78ca
7ef4e98
 
 
 
 
 
c7e50ec
4da78ca
7ef4e98
 
 
 
 
 
 
 
 
 
 
 
0b2fd02
c7e50ec
de72fda
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6463f71
de72fda
 
 
6463f71
de72fda
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6bdfb9b
de72fda
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6bdfb9b
de72fda
 
 
6463f71
7acf825
 
15fb404
4da78ca
 
 
15fb404
4da78ca
15fb404
 
 
4da78ca
15fb404
 
 
4da78ca
09cff43
15fb404
4da78ca
15fb404
 
 
09cff43
15fb404
4da78ca
15fb404
 
 
4da78ca
6ec7960
15fb404
4da78ca
15fb404
 
 
 
 
4da78ca
15fb404
 
4da78ca
15fb404
 
 
 
 
 
 
 
 
 
4da78ca
15fb404
 
 
 
 
 
4da78ca
 
 
 
 
 
 
15fb404
 
 
4da78ca
 
 
7acf825
4da78ca
 
 
 
15fb404
869982c
f25da5c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2caeae6
 
 
f25da5c
 
 
 
 
 
 
 
2caeae6
 
f25da5c
2caeae6
 
 
 
c92df66
7203d3a
c92df66
 
2caeae6
6ec7960
d755d1c
15fb404
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
import json
import sys
import os
import re
import time
import logging
import mimetypes
import tempfile
from datetime import datetime
from pathlib import Path
from urllib.parse import urlparse
from typing import List, Dict, Tuple, Union, Optional
import requests
import validators
import gradio as gr
from bs4 import BeautifulSoup
from fake_useragent import UserAgent
from cleantext import clean
import qrcode
import zipfile

# Setup logging with detailed configuration
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - [%(filename)s:%(lineno)d] - %(message)s',
    handlers=[
        logging.StreamHandler(),
        logging.FileHandler('app.log', encoding='utf-8')
    ])
logger = logging.getLogger(__name__)


class URLProcessor:
    def __init__(self):
        self.session = requests.Session()
        self.timeout = 10  # seconds
        self.session.headers.update({
            'User-Agent': UserAgent().random,
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
            'Accept-Language': 'en-US,en;q=0.5',
            'Accept-Encoding': 'gzip, deflate, br',
            'Connection': 'keep-alive',
            'Upgrade-Insecure-Requests': '1'
        })

    def advanced_text_cleaning(self, text: str) -> str:
        """Robust text cleaning with version compatibility"""
        try:
            cleaned_text = clean(
                text,
                to_ascii=True,
                lower=True,
                no_line_breaks=True,
                no_urls=True,
                no_emails=True,
                no_phone_numbers=True,
                no_numbers=False,
                no_digits=False,
                no_currency_symbols=True,
                no_punct=False
            ).strip()
            return cleaned_text
        except Exception as e:
            logger.warning(f"Text cleaning error: {e}. Using fallback method.")
            text = re.sub(r'[\x00-\x1F\x7F-\x9F]', '', text)  # Remove control characters
            text = text.encode('ascii', 'ignore').decode('ascii')  # Remove non-ASCII characters
            text = re.sub(r'\s+', ' ', text)  # Normalize whitespace
            return text.strip()

    def validate_url(self, url: str) -> Dict:
        """Validate URL format and accessibility"""
        try:
            if not validators.url(url):
                return {'is_valid': False, 'message': 'Invalid URL format'}

            # Try with DNS resolution retry
            for attempt in range(3):  # Try up to 3 times
                try:
                    # Some sites block HEAD requests but allow GET
                    try:
                        response = self.session.head(url, timeout=self.timeout)
                        response.raise_for_status()
                    except (requests.exceptions.RequestException, Exception) as e:
                        logger.warning(f"HEAD request failed for {url}, trying GET: {e}")
                        # Try with GET request if HEAD fails
                        response = self.session.get(url, timeout=self.timeout, stream=True)
                        response.raise_for_status()
                        # Close the connection to avoid downloading the entire content
                        response.close()
                    
                    return {'is_valid': True, 'message': 'URL is valid and accessible'}
                except requests.exceptions.ConnectionError as e:
                    if "NameResolutionError" in str(e) or "Failed to resolve" in str(e):
                        logger.warning(f"DNS resolution failed for {url}, attempt {attempt+1}/3")
                        time.sleep(1)  # Wait a bit before retrying
                        continue
                    else:
                        raise
                except Exception as e:
                    raise
            
            # If we get here, all attempts failed
            return {'is_valid': False, 'message': f'URL validation failed: DNS resolution failed after multiple attempts'}
                
        except Exception as e:
            logger.error(f"URL validation failed for {url}: {str(e)}")
            return {'is_valid': False, 'message': f'URL validation failed: {str(e)}'}

    def fetch_content(self, url: str) -> Optional[Dict]:
        """Universal content fetcher with special case handling"""
        try:
            logger.info(f"Fetching content from: {url}")
            
            # Google Drive document handling
            if 'drive.google.com' in url:
                return self._handle_google_drive(url)

            # Google Calendar ICS handling
            if 'calendar.google.com' in url and 'ical' in url:
                return self._handle_google_calendar(url)

            # Standard HTML processing
            result = self._fetch_html_content(url)
            
            # Log the result status
            if result:
                logger.info(f"Successfully extracted content from {url} ({len(result.get('content', ''))} chars)")
            else:
                logger.error(f"Failed to extract content from {url}")
                
            return result
        except Exception as e:
            logger.error(f"Content fetch failed for {url}: {e}")
            return None

    def _fetch_html_content(self, url: str) -> Optional[Dict]:
        """Standard HTML content processing"""
        try:
            # Try with a different user agent if it's a social media site
            if any(domain in url for domain in ['facebook.com', 'instagram.com', 'twitter.com', 'x.com', 'huggingface.co']):
                self.session.headers.update({
                    'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.114 Safari/537.36',
                    # Add cookie consent headers to bypass some login walls
                    'Cookie': 'c_user=0; xs=0; datr=0; locale=en_US; wd=1920x1080'
                })
                # For Facebook, try to access the mobile version which often has fewer restrictions
                if 'facebook.com' in url and 'm.facebook.com' not in url:
                    url = url.replace('www.facebook.com', 'm.facebook.com')
                    logger.info(f"Switched to mobile Facebook URL: {url}")
            
            response = self.session.get(url, timeout=self.timeout)
            response.raise_for_status()
            
            logger.info(f"Response status: {response.status_code}, Content-Type: {response.headers.get('Content-Type')}")
            
            # Save the raw HTML for debugging if needed
            debug_path = f"/Users/a2014/urld/debug_raw_{int(time.time())}.html"
            with open(debug_path, "w", encoding="utf-8") as f:
                f.write(response.text)
            logger.info(f"Saved raw HTML to {debug_path}")
            
            soup = BeautifulSoup(response.text, 'html.parser')

            # Remove unwanted elements
            for element in soup(['script', 'style', 'nav', 'footer', 'header', 'meta', 'link']):
                element.decompose()
                
            # Simulate "ESC key" by removing login walls and overlays common on social media sites
            login_wall_selectors = [
                '.login-wall', '.signup-wall', '.overlay', '.modal', 
                '[role="dialog"]', '[aria-modal="true"]', '.login-overlay', 
                '.signup-overlay', '.uiLayer', '.fb_overlay', '.ReactModalPortal',
                '[data-testid="login_dialog"]', '[data-testid="signup_dialog"]',
                '.login-signup-modal', '.onboarding-modal', '.signup-wrapper',
                '.login-wrapper', '.login-container', '.signup-container',
                '.login-modal', '.signup-modal', '.auth-modal', '.auth-wall'
            ]
            for selector in login_wall_selectors:
                for element in soup.select(selector):
                    logger.info(f"Removing login wall element: {selector}")
                    element.decompose()
                
            # Enhanced removal for social media sites
            if 'facebook.com' in url:
                # Facebook specific elements - simulating ESC key
                fb_selectors = [
                    '[data-testid="cookie-policy-manage-dialog"]', 
                    '[role="banner"]', '[role="complementary"]', 
                    '.login_form_container', '.login_form', '#login_form', 
                    '.uiLayer', '.pluginConnectButton', '.fbPageBanner', 
                    '._5hn6', '._67m7', '.nonLoggedInSignUp', 
                    '#headerArea', '.uiContextualLayer', '.uiContextualLayerPositioner'
                ]
                for selector in fb_selectors:
                    for element in soup.select(selector):
                        element.decompose()
                
                # Look for the main content in mobile version
                main_content = soup.select_one('#m_story_permalink_view') or soup.select_one('#mobile_injected_video_feed_pagelet')
                if main_content:
                    logger.info("Found Facebook mobile main content")
                    
            elif 'instagram.com' in url:
                # Instagram specific elements - simulating ESC key
                ig_selectors = [
                    '[role="presentation"]', '[role="banner"]', '[role="complementary"]', 
                    '.RnEpo', '._acb3', '._ab8w', '._abn5', '.x1n2onr6', 
                    '.x78zum5', '.x1q0g3np', '.xieb3on', '._a9-z', '._a9_1', 
                    '._aa4b', '.x1i10hfl', '.x9f619', '.xnz67gz', '.x78zum5',
                    '.x1q0g3np', '.x1gslohp', '.xieb3on', '.x1lcm9me'
                ]
                for selector in ig_selectors:
                    for element in soup.select(selector):
                        element.decompose()
                
                # Try to find the main content
                insta_content = soup.select_one('main article') or soup.select_one('._aagv') or soup.select_one('._ab1y')
                if insta_content:
                    logger.info("Found Instagram main content")
                    
            elif 'twitter.com' in url or 'x.com' in url:
                # X/Twitter already works well for public content, but clean up any remaining overlays
                x_selectors = [
                    '[data-testid="LoginForm"]', '[data-testid="SignupForm"]',
                    '[data-testid="sheetDialog"]', '[data-testid="mask"]',
                    '.r-zchlnj', '.r-1xcajam', '.r-1d2f490', '.r-1p0dtai', 
                    '.r-1pi2tsx', '.r-u8s1d', '.css-175oi2r', '.css-1dbjc4n', 
                    '.r-kemksi', '[data-testid="BottomBar"]'
                ]
                for selector in x_selectors:
                    for element in soup.select(selector):
                        element.decompose()
                        
            elif 'huggingface.co' in url:
                # Special handling for Hugging Face
                logger.info("Applying special handling for Hugging Face")
                # Try to find the main content
                hf_selectors = ['.prose', '.space-content', '.model-description', '.dataset-description', 'article', '.markdown']
                for selector in hf_selectors:
                    elements = soup.select(selector)
                    if elements:
                        logger.info(f"Found Hugging Face content with selector: {selector}")
                        break

            # Extract content using a general approach - try multiple strategies
            # Strategy 1: Look for semantic HTML5 elements
            main_content = None
            for selector in ['main', 'article', 'section', '.content', '.main', '.body', '.post', '.entry', '.page']:
                elements = soup.select(selector)
                if elements:
                    main_content = elements[0]
                    logger.info(f"Found content with selector: {selector}")
                    break
            
            # Strategy 2: If no semantic elements, try common class names
            if not main_content or not main_content.get_text(strip=True):
                for div in soup.find_all('div'):
                    class_name = div.get('class', [])
                    id_name = div.get('id', '')
                    if any(term in ' '.join(class_name).lower() for term in ['content', 'main', 'body', 'article', 'post']):
                        main_content = div
                        logger.info(f"Found content with div class: {class_name}")
                        break
                    if any(term in id_name.lower() for term in ['content', 'main', 'body', 'article', 'post']):
                        main_content = div
                        logger.info(f"Found content with div id: {id_name}")
                        break
            
            # Strategy 3: Fall back to body
            if not main_content or not main_content.get_text(strip=True):
                logger.info(f"No main content container found for {url}, using body")
                main_content = soup.body if soup.body else soup
                
            # Extract text with proper spacing
            text_content = main_content.get_text(separator='\n', strip=True)
            
            # Strategy 4: If content is too short, extract all visible text
            if len(text_content) < 100:
                logger.info(f"Content too short for {url} ({len(text_content)} chars), using all visible text")
                visible_text = []
                for element in soup.find_all(['p', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'li', 'span', 'div']):
                    if element.get_text(strip=True):
                        visible_text.append(element.get_text(strip=True))
                text_content = '\n'.join(visible_text)
                
            # Strategy 5: Last resort - get all text from the page
            if len(text_content) < 50:
                logger.info(f"Still insufficient content for {url} ({len(text_content)} chars), using entire page text")
                text_content = soup.get_text(separator='\n', strip=True)

            # Clean and structure content
            cleaned_content = self.advanced_text_cleaning(text_content)
            
            logger.info(f"Final content length: {len(cleaned_content)} chars")
            
            # If we still have no content, this is a failure
            if len(cleaned_content) < 20:
                logger.error(f"Failed to extract meaningful content from {url}")
                return None

            return {
                'content': cleaned_content,
                'content_type': response.headers.get('Content-Type', ''),
                'timestamp': datetime.now().isoformat(),
                'url': url  # Add the URL to the returned data for reference
            }
        except Exception as e:
            logger.error(f"HTML processing failed for {url}: {e}")
            return None


class FileProcessor:
    """Class to handle file processing"""

    def __init__(self, max_file_size: int = 2 * 1024 * 1024 * 1024):  # 2GB default
        self.max_file_size = max_file_size
        self.supported_text_extensions = {'.txt', '.md', '.csv', '.json', '.xml'}

    def is_text_file(self, filepath: str) -> bool:
        """Check if file is a text file"""
        try:
            mime_type, _ = mimetypes.guess_type(filepath)
            return (mime_type and mime_type.startswith('text/')) or \
                   (os.path.splitext(filepath)[1].lower() in self.supported_text_extensions)
        except Exception:
            return False

    def process_file(self, file) -> List[Dict]:
        """Process uploaded file with enhanced error handling"""
        if not file:
            return []
        dataset = []
        try:
            file_size = os.path.getsize(file.name)
            if file_size > self.max_file_size:
                logger.warning(f"File size ({file_size} bytes) exceeds maximum allowed size")
                return []
            with tempfile.TemporaryDirectory() as temp_dir:
                if zipfile.is_zipfile(file.name):
                    dataset.extend(self._process_zip_file(file.name, temp_dir))
                else:
                    dataset.extend(self._process_single_file(file))
        except Exception as e:
            logger.error(f"Error processing file: {str(e)}")
            return []
        return dataset

    def _process_zip_file(self, zip_path, temp_dir):
        """Extract and process files within a ZIP archive."""
        result = []
        with zipfile.ZipFile(zip_path, 'r') as zip_ref:
            zip_ref.extractall(temp_dir)
            for extracted_file in os.listdir(temp_dir):
                extracted_file_path = os.path.join(temp_dir, extracted_file)
                if os.path.isfile(extracted_file_path):
                    with open(extracted_file_path, 'r', encoding='utf-8', errors='ignore') as f:
                        result.append({
                            'source': 'file_from_zip',
                            'filename': extracted_file,
                            'content': f.read(),
                            'timestamp': datetime.now().isoformat()
                        })
        return result

    def _process_single_file(self, file) -> List[Dict]:
        try:
            file_stat = os.stat(file.name)

            # For very large files, read in chunks and summarize
            if file_stat.st_size > 100 * 1024 * 1024:  # 100MB
                logger.info(f"Processing large file: {file.name} ({file_stat.st_size} bytes)")

                # Read first and last  1MB for extremely large files
                content = ""
                with open(file.name, 'r', encoding='utf-8', errors='ignore') as f:
                    content = f.read(1 * 1024 * 1024)  # First 1MB
                    content += "\n...[Content truncated due to large file size]...\n"

                    # Seek to the last 1MB
                    f.seek(max(0, file_stat.st_size - 1 * 1024 * 1024))
                    content += f.read()  # Last 1MB
            else:
                # Regular file processing
                with open(file.name, 'r', encoding='utf-8', errors='ignore') as f:
                    content = f.read()

            return [{
                'source': 'file',
                'filename': os.path.basename(file.name),
                'file_size': file_stat.st_size,
                'mime_type': mimetypes.guess_type(file.name)[0],
                'created': datetime.fromtimestamp(file_stat.st_ctime).isoformat(),
                'modified': datetime.fromtimestamp(file_stat.st_mtime).isoformat(),
                'content': content,
                'timestamp': datetime.now().isoformat()
            }]
        except Exception as e:
            logger.error(f"File processing error: {e}")
            return []

# Move process_all_inputs outside of the FileProcessor class
def process_all_inputs(urls, file, text, notes):
    """Process all input types with progress tracking"""
    try:
        processor = URLProcessor()
        file_processor = FileProcessor()
        results = []

        # Process URLs
        if urls:
            url_list = re.split(r'[,\n]', urls)
            url_list = [url.strip() for url in url_list if url.strip()]

            for url in url_list:
                validation = processor.validate_url(url)
                if validation.get('is_valid'):
                    content = processor.fetch_content(url)
                    if content:
                        results.append({
                            'source': 'url',
                            'url': url,
                            'content': content,
                            'timestamp': datetime.now().isoformat()
                        })

        # Process files
        if file:
            results.extend(file_processor.process_file(file))

        # Process text input
        if text:
            cleaned_text = processor.advanced_text_cleaning(text)
            results.append({
                'source': 'direct_input',
                'content': cleaned_text,
                'timestamp': datetime.now().isoformat()
            })

        # Generate output
        if results:
            output_dir = Path('output') / datetime.now().strftime('%Y-%m-%d')
            output_dir.mkdir(parents=True, exist_ok=True)
            output_path = output_dir / f'processed_{int(time.time())}.json'

            with open(output_path, 'w', encoding='utf-8') as f:
                json.dump(results, f, ensure_ascii=False, indent=2)

            summary = f"Processed {len(results)} items successfully!"
            json_data = json.dumps(results, indent=2)  # Prepare JSON for QR code
            return str(output_path), summary, json_data  # Return JSON for editor
        else:
            return None, "No valid content to process.", ""

    except Exception as e:
        logger.error(f"Processing error: {e}")
        return None, f"Error: {str(e)}", ""

# Also move generate_qr_code outside of the FileProcessor class
def generate_qr_code(json_data):
    """Generate QR code from JSON data and return the file path."""
    if json_data:
        return generate_qr(json_data)

# Move generate_qr outside of the FileProcessor class as well
def generate_qr(json_data):
    """Generate QR code from JSON data and return the file path."""
    try:
        # Try first with automatic version selection
        qr = qrcode.QRCode(
            error_correction=qrcode.constants.ERROR_CORRECT_L,
            box_size=10,
            border=4,
        )
        qr.add_data(json_data)
        qr.make(fit=True)
        
        img = qrcode.make_image(fill_color="black", back_color="white")
        temp_file = tempfile.NamedTemporaryFile(delete=False, suffix=".png")
        img.save(temp_file.name)
        return temp_file.name
    except Exception as e:
        # If the data is too large for a QR code
        logger.error(f"QR generation error: {e}")
        
        # Create a simple QR with error message
        qr = qrcode.QRCode(
            version=1,
            error_correction=qrcode.constants.ERROR_CORRECT_L,
            box_size=10,
            border=4,
        )
        qr.add_data("Error: Data too large for QR code")
        qr.make(fit=True)
        
        img = qrcode.make_image(fill_color="black", back_color="white")
        temp_file = tempfile.NamedTemporaryFile(delete=False, suffix=".png")
        img.save(temp_file.name)
        return temp_file.name

def create_interface():
    """Create a comprehensive Gradio interface with advanced features"""
    css = """
    .container { max-width: 1200px; margin: auto; }
    .warning { background-color: #fff3cd; color: #856404; }
    .error { background-color: #f8d7da; color: #721c24; }
    """

    with gr.Blocks(css=css, title="Advanced Text & URL Processor") as interface:
        gr.Markdown("# 🌐 Advanced URL & Text Processing Toolkit")

        with gr.Tab("URL Processing"):
            url_input = gr.Textbox(
                label="Enter URLs (comma or newline separated)",
                lines=5,
                placeholder="https://example1.com\nhttps://example2.com"
            )

        with gr.Tab("File Input"):
            file_input = gr.File(
                label="Upload text file or ZIP archive",
                file_types=[".txt", ".zip", ".md", ".csv", ".json", ".xml"]
            )

        with gr.Tab("Text Input"):
            text_input = gr.Textbox(
                label="Raw Text Input",
                lines=5,
                placeholder="Paste your text here..."
            )

        with gr.Tab("JSON Editor"):
            json_editor = gr.Textbox(
                label="JSON Editor",
                lines=20,
                placeholder="View and edit your JSON data here...",
                interactive=True,
                elem_id="json-editor"  # Optional: for custom styling
            )

        with gr.Tab("Scratchpad"):
            scratchpad = gr.Textbox(
                label="Scratchpad",
                lines=10,
                placeholder="Quick notes or text collections...",
                interactive=True
            )

        process_btn = gr.Button("Process Input", variant="primary")
        qr_btn = gr.Button("Generate QR Code", variant="secondary")

        output_text = gr.Textbox(label="Processing Results", interactive=False)
        output_file = gr.File(label="Processed Output")
        qr_output = gr.Image(label="QR Code", type="filepath")  # To display the generated QR code

        process_btn.click(
            process_all_inputs,
            inputs=[url_input, file_input, text_input, scratchpad],
            outputs=[output_file, output_text, json_editor]  # Update outputs to include JSON editor
        )

        qr_btn.click(
            generate_qr_code,
            inputs=json_editor,
            outputs=qr_output
        )

        gr.Markdown("""
    ### Usage Guidelines
    - **URL Processing**: Enter valid HTTP/HTTPS URLs
    - **File Input**: Upload text files or ZIP archives
    - ** Text Input**: Direct text processing
    - **JSON Editor**: View and edit your JSON data
    - **Scratchpad**: Quick notes or text collections
    - Advanced cleaning and validation included
    """)
    return interface

def check_network_connectivity():
    """Check if the network is working properly by testing connection to common sites"""
    test_sites = ["https://www.google.com", "https://www.cloudflare.com", "https://www.amazon.com"]
    results = []
    
    for site in test_sites:
        try:
            response = requests.get(site, timeout=5)
            results.append({
                "site": site,
                "status": "OK" if response.status_code == 200 else f"Error: {response.status_code}",
                "response_time": response.elapsed.total_seconds()
            })
        except Exception as e:
            results.append({
                "site": site,
                "status": f"Error: {str(e)}",
                "response_time": None
            })
    
    # If all sites failed, there might be a network issue
    if all(result["status"].startswith("Error") for result in results):
        logger.error("Network connectivity issue detected. All test sites failed.")
        return False, results
    
    return True, results

# Add this to the main function
def main():
    # Configure system settings
    mimetypes.init()
    
    # Check network connectivity
    network_ok, network_results = check_network_connectivity()
    if not network_ok:
        logger.warning("Network connectivity issues detected. Some features may not work properly.")
        for result in network_results:
            logger.warning(f"Test site {result['site']}: {result['status']}")
    
    # Create and launch interface
    interface = create_interface()
    
    # Launch with proper configuration
    interface.launch(
        server_name="0.0.0.0",
        server_port=7860,
        show_error=True,
        share=False,
        inbrowser=True,
        debug=True
    )

if __name__ == "__main__":
    main()