File size: 18,701 Bytes
c3b1d58
3792905
c3b1d58
 
 
 
 
c92df66
c3b1d58
 
c92df66
75b5552
c3b1d58
 
 
 
 
 
6ec7960
3792905
c3b1d58
 
 
 
 
 
 
 
c7e50ec
c3b1d58
 
c7e50ec
c3b1d58
 
 
 
 
345d19b
c3b1d58
 
 
 
 
 
c7e50ec
c3b1d58
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4da78ca
c3b1d58
 
 
 
 
 
 
 
 
c92df66
c3b1d58
 
4da78ca
c92df66
c3b1d58
 
4da78ca
c92df66
c3b1d58
 
 
 
e9abd76
4489b3a
c3b1d58
e9abd76
 
c3b1d58
6fb3663
c3b1d58
e9abd76
345d19b
14b5f32
c3b1d58
e9abd76
c3b1d58
 
 
 
 
 
 
 
e9abd76
c3b1d58
 
 
 
 
8b83f9c
c3b1d58
 
 
 
 
 
 
 
 
 
 
 
 
4da78ca
c3b1d58
4da78ca
c92df66
c3b1d58
 
df3f48b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4da78ca
c92df66
 
4da78ca
c92df66
 
 
df3f48b
 
c92df66
c3b1d58
df3f48b
c3b1d58
c7e50ec
 
c3b1d58
 
c7e50ec
cd90730
c3b1d58
 
c7e50ec
c92df66
 
30f269c
c92df66
 
 
 
30f269c
c92df66
 
 
 
6a91fa4
c92df66
345d19b
c92df66
 
 
 
 
 
 
 
 
 
 
 
 
14b5f32
7ef4e98
f6bca8a
7ef4e98
c92df66
7ef4e98
 
 
 
 
 
 
 
 
 
 
 
c7e50ec
7ef4e98
 
 
4da78ca
7ef4e98
 
 
4da78ca
 
7ef4e98
 
 
 
4da78ca
7ef4e98
 
 
 
 
 
c7e50ec
4da78ca
7ef4e98
 
 
 
 
 
 
 
 
 
 
 
0b2fd02
c7e50ec
de72fda
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6463f71
de72fda
 
 
6463f71
de72fda
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6463f71
7acf825
 
15fb404
4da78ca
 
 
15fb404
4da78ca
15fb404
 
 
4da78ca
15fb404
 
 
4da78ca
09cff43
15fb404
4da78ca
15fb404
 
 
09cff43
15fb404
4da78ca
15fb404
 
 
4da78ca
6ec7960
15fb404
4da78ca
15fb404
 
 
 
 
4da78ca
15fb404
 
4da78ca
15fb404
 
 
 
 
 
 
 
 
 
4da78ca
15fb404
 
 
 
 
 
4da78ca
 
 
 
 
 
 
15fb404
 
 
4da78ca
 
 
7acf825
4da78ca
 
 
 
15fb404
869982c
2caeae6
 
 
4da78ca
2caeae6
 
4da78ca
2caeae6
 
 
 
c92df66
7203d3a
c92df66
 
2caeae6
6ec7960
d755d1c
15fb404
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
import json
import sys
import os
import re
import time
import logging
import mimetypes
import tempfile
from datetime import datetime
from pathlib import Path
from urllib.parse import urlparse
from typing import List, Dict, Tuple, Union, Optional
import requests
import validators
import gradio as gr
from bs4 import BeautifulSoup
from fake_useragent import UserAgent
from cleantext import clean
import qrcode
import zipfile

# Setup logging with detailed configuration
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - [%(filename)s:%(lineno)d] - %(message)s',
    handlers=[
        logging.StreamHandler(),
        logging.FileHandler('app.log', encoding='utf-8')
    ])
logger = logging.getLogger(__name__)


class URLProcessor:
    def __init__(self):
        self.session = requests.Session()
        self.timeout = 10  # seconds
        self.session.headers.update({
            'User-Agent': UserAgent().random,
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
            'Accept-Language': 'en-US,en;q=0.5',
            'Accept-Encoding': 'gzip, deflate, br',
            'Connection': 'keep-alive',
            'Upgrade-Insecure-Requests': '1'
        })

    def advanced_text_cleaning(self, text: str) -> str:
        """Robust text cleaning with version compatibility"""
        try:
            cleaned_text = clean(
                text,
                to_ascii=True,
                lower=True,
                no_line_breaks=True,
                no_urls=True,
                no_emails=True,
                no_phone_numbers=True,
                no_numbers=False,
                no_digits=False,
                no_currency_symbols=True,
                no_punct=False
            ).strip()
            return cleaned_text
        except Exception as e:
            logger.warning(f"Text cleaning error: {e}. Using fallback method.")
            text = re.sub(r'[\x00-\x1F\x7F-\x9F]', '', text)  # Remove control characters
            text = text.encode('ascii', 'ignore').decode('ascii')  # Remove non-ASCII characters
            text = re.sub(r'\s+', ' ', text)  # Normalize whitespace
            return text.strip()

    def validate_url(self, url: str) -> Dict:
        """Validate URL format and accessibility"""
        try:
            if not validators.url(url):
                return {'is_valid': False, 'message': 'Invalid URL format'}

            response = self.session.head(url, timeout=self.timeout)
            response.raise_for_status()
            return {'is_valid': True, 'message': 'URL is valid and accessible'}
        except Exception as e:
            return {'is_valid': False, 'message': f'URL validation failed: {str(e)}'}

    def fetch_content(self, url: str) -> Optional[Dict]:
        """Universal content fetcher with special case handling"""
        try:
            # Google Drive document handling
            if 'drive.google.com' in url:
                return self._handle_google_drive(url)

            # Google Calendar ICS handling
            if 'calendar.google.com' in url and 'ical' in url:
                return self._handle_google_calendar(url)

            # Standard HTML processing
            return self._fetch_html_content(url)
        except Exception as e:
            logger.error(f"Content fetch failed: {e}")
            return None
    
    def _handle_google_drive(self, url: str) -> Optional[Dict]:
        """Process Google Drive file links"""
        try:
            file_id = re.search(r'/file/d/([a-zA-Z0-9_-]+)', url)
            if not file_id:
                logger.error(f"Invalid Google Drive URL: {url}")
                return None
    
            direct_url = f"https://drive.google.com/uc?export=download&id={file_id.group(1)}"
            response = self.session.get(direct_url, timeout=self.timeout)
            response.raise_for_status()
    
            return {
                'content': response.text,
                'content_type': response.headers.get('Content-Type', ''),
                'timestamp': datetime.now().isoformat()
            }
        except Exception as e:
            logger.error(f"Google Drive processing failed: {e}")
            return None
            
    def _handle_google_calendar(self, url: str) -> Optional[Dict]:
        """Process Google Calendar ICS feeds"""
        try:
            response = self.session.get(url, timeout=self.timeout)
            response.raise_for_status()
            return {
                'content': response.text,
                'content_type': 'text/calendar',
                'timestamp': datetime.now().isoformat()
            }
        except Exception as e:
            logger.error(f"Calendar fetch failed: {e}")
            return None

    def _fetch_html_content(self, url: str) -> Optional[Dict]:
        """Standard HTML content processing"""
        try:
            response = self.session.get(url, timeout=self.timeout)
            response.raise_for_status()

            soup = BeautifulSoup(response.text, 'html.parser')

            # Remove unwanted elements
            for element in soup(['script', 'style', 'nav', 'footer', 'header', 'meta', 'link']):
                element.decompose()
                
            # Remove login walls and overlays common on social media sites
            for element in soup.select('.login-wall, .signup-wall, .overlay, .modal, [role="dialog"], [aria-modal="true"]'):
                element.decompose()
                
            # Remove specific elements for known sites
            if 'facebook.com' in url:
                for element in soup.select('[data-testid="cookie-policy-manage-dialog"], [role="banner"], [role="complementary"]'):
                    element.decompose()
            elif 'instagram.com' in url or 'twitter.com' in url or 'x.com' in url:
                for element in soup.select('[role="presentation"], [role="banner"], [role="complementary"]'):
                    element.decompose()

            # Extract content using a general approach
            # First try to find main content containers
            main_content = soup.find('main') or soup.find('article') or soup.find('div', class_=lambda c: c and any(term in c for term in ['content', 'main', 'body', 'post', 'feed', 'timeline']))
            
            # If no specific container found, fall back to body
            if not main_content or not main_content.get_text(strip=True):
                logger.info(f"No main content container found for {url}, using body")
                main_content = soup.body if soup.body else soup
                
            # Extract text with proper spacing
            text_content = main_content.get_text(separator='\n', strip=True)
            
            # If content is too short, try a more aggressive approach to get all visible text
            if len(text_content) < 100:
                logger.info(f"Content too short for {url}, using all visible text")
                visible_text = []
                for element in soup.find_all(['p', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'li', 'span', 'div']):
                    if element.get_text(strip=True):
                        visible_text.append(element.get_text(strip=True))
                text_content = '\n'.join(visible_text)

            # Clean and structure content
            cleaned_content = self.advanced_text_cleaning(text_content)

            return {
                'content': cleaned_content,
                'content_type': response.headers.get('Content-Type', ''),
                'timestamp': datetime.now().isoformat(),
                'url': url  # Add the URL to the returned data for reference
            }
        except Exception as e:
            logger.error(f"HTML processing failed for {url}: {e}")
            return None


class FileProcessor:
    """Class to handle file processing"""

    def __init__(self, max_file_size: int = 2 * 1024 * 1024 * 1024):  # 2GB default
        self.max_file_size = max_file_size
        self.supported_text_extensions = {'.txt', '.md', '.csv', '.json', '.xml'}

    def is_text_file(self, filepath: str) -> bool:
        """Check if file is a text file"""
        try:
            mime_type, _ = mimetypes.guess_type(filepath)
            return (mime_type and mime_type.startswith('text/')) or \
                   (os.path.splitext(filepath)[1].lower() in self.supported_text_extensions)
        except Exception:
            return False

    def process_file(self, file) -> List[Dict]:
        """Process uploaded file with enhanced error handling"""
        if not file:
            return []
        dataset = []
        try:
            file_size = os.path.getsize(file.name)
            if file_size > self.max_file_size:
                logger.warning(f"File size ({file_size} bytes) exceeds maximum allowed size")
                return []
            with tempfile.TemporaryDirectory() as temp_dir:
                if zipfile.is_zipfile(file.name):
                    dataset.extend(self._process_zip_file(file.name, temp_dir))
                else:
                    dataset.extend(self._process_single_file(file))
        except Exception as e:
            logger.error(f"Error processing file: {str(e)}")
            return []
        return dataset

    def _process_zip_file(self, zip_path, temp_dir):
        """Extract and process files within a ZIP archive."""
        result = []
        with zipfile.ZipFile(zip_path, 'r') as zip_ref:
            zip_ref.extractall(temp_dir)
            for extracted_file in os.listdir(temp_dir):
                extracted_file_path = os.path.join(temp_dir, extracted_file)
                if os.path.isfile(extracted_file_path):
                    with open(extracted_file_path, 'r', encoding='utf-8', errors='ignore') as f:
                        result.append({
                            'source': 'file_from_zip',
                            'filename': extracted_file,
                            'content': f.read(),
                            'timestamp': datetime.now().isoformat()
                        })
        return result

    def _process_single_file(self, file) -> List[Dict]:
        try:
            file_stat = os.stat(file.name)

            # For very large files, read in chunks and summarize
            if file_stat.st_size > 100 * 1024 * 1024:  # 100MB
                logger.info(f"Processing large file: {file.name} ({file_stat.st_size} bytes)")

                # Read first and last  1MB for extremely large files
                content = ""
                with open(file.name, 'r', encoding='utf-8', errors='ignore') as f:
                    content = f.read(1 * 1024 * 1024)  # First 1MB
                    content += "\n...[Content truncated due to large file size]...\n"

                    # Seek to the last 1MB
                    f.seek(max(0, file_stat.st_size - 1 * 1024 * 1024))
                    content += f.read()  # Last 1MB
            else:
                # Regular file processing
                with open(file.name, 'r', encoding='utf-8', errors='ignore') as f:
                    content = f.read()

            return [{
                'source': 'file',
                'filename': os.path.basename(file.name),
                'file_size': file_stat.st_size,
                'mime_type': mimetypes.guess_type(file.name)[0],
                'created': datetime.fromtimestamp(file_stat.st_ctime).isoformat(),
                'modified': datetime.fromtimestamp(file_stat.st_mtime).isoformat(),
                'content': content,
                'timestamp': datetime.now().isoformat()
            }]
        except Exception as e:
            logger.error(f"File processing error: {e}")
            return []

# Move process_all_inputs outside of the FileProcessor class
def process_all_inputs(urls, file, text, notes):
    """Process all input types with progress tracking"""
    try:
        processor = URLProcessor()
        file_processor = FileProcessor()
        results = []

        # Process URLs
        if urls:
            url_list = re.split(r'[,\n]', urls)
            url_list = [url.strip() for url in url_list if url.strip()]

            for url in url_list:
                validation = processor.validate_url(url)
                if validation.get('is_valid'):
                    content = processor.fetch_content(url)
                    if content:
                        results.append({
                            'source': 'url',
                            'url': url,
                            'content': content,
                            'timestamp': datetime.now().isoformat()
                        })

        # Process files
        if file:
            results.extend(file_processor.process_file(file))

        # Process text input
        if text:
            cleaned_text = processor.advanced_text_cleaning(text)
            results.append({
                'source': 'direct_input',
                'content': cleaned_text,
                'timestamp': datetime.now().isoformat()
            })

        # Generate output
        if results:
            output_dir = Path('output') / datetime.now().strftime('%Y-%m-%d')
            output_dir.mkdir(parents=True, exist_ok=True)
            output_path = output_dir / f'processed_{int(time.time())}.json'

            with open(output_path, 'w', encoding='utf-8') as f:
                json.dump(results, f, ensure_ascii=False, indent=2)

            summary = f"Processed {len(results)} items successfully!"
            json_data = json.dumps(results, indent=2)  # Prepare JSON for QR code
            return str(output_path), summary, json_data  # Return JSON for editor
        else:
            return None, "No valid content to process.", ""

    except Exception as e:
        logger.error(f"Processing error: {e}")
        return None, f"Error: {str(e)}", ""

# Also move generate_qr_code outside of the FileProcessor class
def generate_qr_code(json_data):
    """Generate QR code from JSON data and return the file path."""
    if json_data:
        return generate_qr(json_data)

# Move generate_qr outside of the FileProcessor class as well
def generate_qr(json_data):
    """Generate QR code from JSON data and return the file path."""
    try:
        # Try first with automatic version selection
        qr = qrcode.QRCode(
            error_correction=qrcode.constants.ERROR_CORRECT_L,
            box_size=10,
            border=4,
        )
        qr.add_data(json_data)
        qr.make(fit=True)
        
        img = qr.make_image(fill_color="black", back_color="white")
        temp_file = tempfile.NamedTemporaryFile(delete=False, suffix=".png")
        img.save(temp_file.name)
        return temp_file.name
    except Exception as e:
        # If the data is too large for a QR code
        logger.error(f"QR generation error: {e}")
        
        # Create a simple QR with error message
        qr = qrcode.QRCode(
            version=1,
            error_correction=qrcode.constants.ERROR_CORRECT_L,
            box_size=10,
            border=4,
        )
        qr.add_data("Error: Data too large for QR code")
        qr.make(fit=True)
        
        img = qr.make_image(fill_color="black", back_color="white")
        temp_file = tempfile.NamedTemporaryFile(delete=False, suffix=".png")
        img.save(temp_file.name)
        return temp_file.name

def create_interface():
    """Create a comprehensive Gradio interface with advanced features"""
    css = """
    .container { max-width: 1200px; margin: auto; }
    .warning { background-color: #fff3cd; color: #856404; }
    .error { background-color: #f8d7da; color: #721c24; }
    """

    with gr.Blocks(css=css, title="Advanced Text & URL Processor") as interface:
        gr.Markdown("# 🌐 Advanced URL & Text Processing Toolkit")

        with gr.Tab("URL Processing"):
            url_input = gr.Textbox(
                label="Enter URLs (comma or newline separated)",
                lines=5,
                placeholder="https://example1.com\nhttps://example2.com"
            )

        with gr.Tab("File Input"):
            file_input = gr.File(
                label="Upload text file or ZIP archive",
                file_types=[".txt", ".zip", ".md", ".csv", ".json", ".xml"]
            )

        with gr.Tab("Text Input"):
            text_input = gr.Textbox(
                label="Raw Text Input",
                lines=5,
                placeholder="Paste your text here..."
            )

        with gr.Tab("JSON Editor"):
            json_editor = gr.Textbox(
                label="JSON Editor",
                lines=20,
                placeholder="View and edit your JSON data here...",
                interactive=True,
                elem_id="json-editor"  # Optional: for custom styling
            )

        with gr.Tab("Scratchpad"):
            scratchpad = gr.Textbox(
                label="Scratchpad",
                lines=10,
                placeholder="Quick notes or text collections...",
                interactive=True
            )

        process_btn = gr.Button("Process Input", variant="primary")
        qr_btn = gr.Button("Generate QR Code", variant="secondary")

        output_text = gr.Textbox(label="Processing Results", interactive=False)
        output_file = gr.File(label="Processed Output")
        qr_output = gr.Image(label="QR Code", type="filepath")  # To display the generated QR code

        process_btn.click(
            process_all_inputs,
            inputs=[url_input, file_input, text_input, scratchpad],
            outputs=[output_file, output_text, json_editor]  # Update outputs to include JSON editor
        )

        qr_btn.click(
            generate_qr_code,
            inputs=json_editor,
            outputs=qr_output
        )

        gr.Markdown("""
    ### Usage Guidelines
    - **URL Processing**: Enter valid HTTP/HTTPS URLs
    - **File Input**: Upload text files or ZIP archives
    - ** Text Input**: Direct text processing
    - **JSON Editor**: View and edit your JSON data
    - **Scratchpad**: Quick notes or text collections
    - Advanced cleaning and validation included
    """)
    return interface

def main():
    # Configure system settings
    mimetypes.init()

    # Create and launch interface
    interface = create_interface()

    # Launch with proper configuration
    interface.launch(
        server_name="0.0.0.0",
        server_port=7860,
        show_error=True,
        share=False,
        inbrowser=True,
        debug=True
    )

if __name__ == "__main__":
    main()