File size: 17,002 Bytes
c3b1d58
 
 
 
 
 
 
 
 
6fb3663
c3b1d58
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8b83f9c
c3b1d58
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6fb3663
c3b1d58
 
 
8e7dfc6
c3b1d58
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8b83f9c
c3b1d58
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
475baf9
c3b1d58
 
 
8e7dfc6
 
 
 
 
 
 
 
 
6fb3663
0fb0a58
 
c3b1d58
 
0fb0a58
c3b1d58
0fb0a58
6fb3663
 
 
0fb0a58
 
 
c3b1d58
6fb3663
 
c3b1d58
6fb3663
c3b1d58
 
0fb0a58
c3b1d58
 
0fb0a58
c3b1d58
0fb0a58
c3b1d58
890dba9
c3b1d58
0fb0a58
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c3b1d58
 
6fb3663
c3b1d58
6fb3663
 
c3b1d58
 
 
 
6fb3663
c3b1d58
6fb3663
c3b1d58
 
 
 
 
 
 
 
 
8e7dfc6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c3b1d58
 
 
 
8b83f9c
 
 
 
 
 
 
 
c3b1d58
 
8b83f9c
 
c3b1d58
 
 
 
 
 
 
 
 
 
0fb0a58
6fb3663
 
c3b1d58
 
 
 
 
 
 
 
 
8e7dfc6
 
 
 
 
 
 
 
 
 
8b83f9c
8e7dfc6
 
c3b1d58
 
 
 
1336a84
8e7dfc6
 
 
0fb0a58
c3b1d58
 
 
 
 
 
 
 
6fb3663
c3b1d58
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0fb0a58
 
6fb3663
c3b1d58
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
81bf6d8
c3b1d58
 
 
 
 
 
8e7dfc6
 
 
 
 
 
 
 
 
c3b1d58
 
 
 
 
8e7dfc6
 
 
 
 
 
 
 
 
 
8b83f9c
8e7dfc6
c3b1d58
 
 
 
6fb3663
c3b1d58
8e7dfc6
c3b1d58
 
 
 
 
 
 
 
200e562
c3b1d58
 
6e34905
 
c3b1d58
 
 
8e7dfc6
 
 
788a085
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
import json
import os
import re
import time
import logging
import mimetypes
import zipfile
import tempfile
from datetime import datetime
from typing import List, Dict, Optional, Union, Any
from pathlib import Path
import requests
import validators
import gradio as gr
from bs4 import BeautifulSoup
from fake_useragent import UserAgent
from cleantext import clean

# Setup logging with detailed configuration
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - [%(filename)s:%(lineno)d] - %(message)s',
    handlers=[
        logging.StreamHandler(),
        logging.FileHandler('app.log', encoding='utf-8')
    ]
)
logger = logging.getLogger(__name__)

class URLProcessor:
    def __init__(self):
        self.session = requests.Session()
        self.timeout = 10  # seconds
        self.session.headers.update({
            'User-Agent': UserAgent().random,
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
            'Accept-Language': 'en-US,en;q=0.5',
            'Accept-Encoding': 'gzip, deflate, br',
            'Connection': 'keep-alive',
            'Upgrade-Insecure-Requests': '1'
        })

    def advanced_text_cleaning(self, text: str) -> str:
        """Robust text cleaning with version compatibility"""
        try:
            cleaned_text = clean(
                text,
                fix_unicode=True,
                to_ascii=True,
                lower=True,
                no_line_breaks=True,
                no_urls=True,
                no_emails=True,
                no_phone_numbers=True,
                no_numbers=False,
                no_digits=False,
                no_currency_symbols=True,
                no_punct=False
            ).strip()
            return cleaned_text
        except Exception as e:
            logger.warning(f"Text cleaning error: {e}. Using fallback method.")
            text = re.sub(r'[\x00-\x1F\x7F-\x9F]', '', text)  # Remove control characters
            text = text.encode('ascii', 'ignore').decode('ascii')  # Remove non-ASCII characters
            text = re.sub(r'\s+', ' ', text)  # Normalize whitespace
            return text.strip()

    def validate_url(self, url: str) -> Dict:
        """Validate URL format and accessibility"""
        try:
            if not validators.url(url):
                return {'is_valid': False, 'message': 'Invalid URL format'}
            
            response = self.session.head(url, timeout=self.timeout)
            response.raise_for_status()
            return {'is_valid': True, 'message': 'URL is valid and accessible'}
        except Exception as e:
            return {'is_valid': False, 'message': f'URL validation failed: {str(e)}'}

    def fetch_content(self, url: str) -> Optional[Dict]:
        """Universal content fetcher with special case handling"""
        try:
            if 'drive.google.com' in url:
                return self._handle_google_drive(url)
            if 'calendar.google.com' in url and 'ical' in url:
                return self._handle_google_calendar(url)
            return self._fetch_html_content(url)
        except Exception as e:
            logger.error(f"Content fetch failed: {e}")
            return None

    def _handle_google_drive(self, url: str) -> Optional[Dict]:
        """Process Google Drive file links"""
        try:
            file_id = re.search(r'/file/d/([a-zA-Z0-9_-]+)', url)
            if not file_id:
                logger.error(f"Invalid Google Drive URL: {url}")
                return None
                
            direct_url = f"https://drive.google.com/uc?export=download&id={file_id.group(1)}"
            response = self.session.get(direct_url, timeout=self.timeout)
            response.raise_for_status()
            
            return {
                'content': response.text,
                'content_type': response.headers.get('Content-Type', ''),
                'timestamp': datetime.now().isoformat()
            }
        except Exception as e:
            logger.error(f"Google Drive processing failed: {e}")
            return None

    def _handle_google_calendar(self, url: str) -> Optional[Dict]:
        """Process Google Calendar ICS feeds"""
        try:
            response = self.session.get(url, timeout=self.timeout)
            response.raise_for_status()
            return {
                'content': response.text,
                'content_type': 'text/calendar',
                'timestamp': datetime.now().isoformat()
            }
        except Exception as e:
            logger.error(f"Calendar fetch failed: {e}")
            return None

    def _fetch_html_content(self, url: str) -> Optional[Dict]:
        """Standard HTML content processing"""
        try:
            response = self.session.get(url, timeout=self.timeout)
            response.raise_for_status()
            
            soup = BeautifulSoup(response.text, 'html.parser')
            
            for element in soup(['script', 'style', 'nav', 'footer', 'header', 'meta', 'link']):
                element.decompose()
                
            main_content = soup.find('main') or soup.find('article') or soup.body
            
            text_content = main_content.get_text(separator='\n', strip=True)
            cleaned_content = self.advanced_text_cleaning(text_content)
            
            return {
                'content': cleaned_content,
                'content_type': response.headers.get('Content-Type', ''),
                'timestamp': datetime.now().isoformat()
            }
        except Exception as e:
            logger.error(f"HTML processing failed: {e}")
            return None
            
class FileProcessor:
    """Class to handle file processing"""
    
    def __init__(self, max_file_size: int = 2 * 1024 * 1024 * 1024):  # 2GB default
        self.max_file_size = max_file_size
        self.supported_text_extensions = {'.txt', '.md', '.csv', '.json', '.xml'}

    def is_text_file(self, filepath: str) -> bool:
        """Check if file is a text file"""
        try:
            mime_type, _ = mimetypes.guess_type(filepath)
            return (mime_type and mime_type.startswith('text/')) or \
                   (os.path.splitext(filepath)[1].lower() in self.supported_text_extensions)
        except Exception:
            return False

    def process_files(self, files: Union[List[gr.File], List[str]]) -> List[Dict]:
        """Process multiple uploaded files and return a single JSON extraction"""
        if not files:
            return []

        combined_data = []
        try:
            for file in files:
                # Check if the file is a Gradio File object or a string path
                file_name = file.name if isinstance(file, gr.File) else file
                file_size = os.path.getsize(file_name)
                if file_size > self.max_file_size:
                    logger.warning(f"File size ({file_size} bytes) exceeds maximum allowed size")
                    continue  # Skip this file

                if zipfile.is_zipfile(file_name):
                    combined_data.extend(self._process_zip_file(file_name))
                else:
                    combined_data.extend(self._process_single_file(file_name))

        except Exception as e:
            logger.error(f"Error processing files: {str(e)}")
            return []

        return combined_data

    def _process_zip_file(self, zip_path: str) -> List[Dict]:
        """Process ZIP file contents"""
        results = []
        with zipfile.ZipFile(zip_path, 'r') as zip_ref:
            with tempfile.TemporaryDirectory() as temp_dir:
                zip_ref.extractall(temp_dir)
                for root, _, files in os.walk(temp_dir):
                    for filename in files:
                        filepath = os.path.join(root, filename)
                        if self.is_text_file(filepath):
                            try:
                                with open(filepath, 'r', encoding='utf-8', errors='ignore') as f:
                                    content = f.read()
                                if content.strip():
                                    results.append({
                                        "source": "file",
                                        "filename": filename,
                                        "content": content,
                                        "timestamp": datetime.now().isoformat()
                                    })
                            except Exception as e:
                                logger.error(f"Error reading file {filename}: {str(e)}")
        return results

    def _process_single_file(self, file_path: str) -> List[Dict]:
        try:
            file_stat = os.stat(file_path)
            with open(file_path, 'r', encoding='utf-8', errors='ignore') as f:
                content = f.read()
            
            return [{
                'source': 'file',
                'filename': os.path.basename(file_path),
                'file_size': file_stat.st_size,
                'mime_type': mimetypes.guess_type(file_path)[0],
                'created': datetime.fromtimestamp(file_stat.st_ctime).isoformat(),
                'modified': datetime.fromtimestamp(file_stat.st_mtime).isoformat(),
                'content': content,
                'timestamp': datetime.now().isoformat()
            }]
        except Exception as e:
            logger.error(f"File processing error: {e}")
            return []

class Chatbot:
    """Simple chatbot that uses provided JSON data for responses."""
    
    def __init__(self):
        self.data = None

    def load_data(self, json_data: str):
        """Load JSON data into the chatbot."""
        try:
            self.data = json.loads(json_data)
            return "Data loaded successfully!"
        except json.JSONDecodeError:
            return "Invalid JSON data. Please check your input."

    def chat(self, user_input: str) -> str:
        """Generate a response based on user input and loaded data."""
        if not self.data:
            return "No data loaded. Please load your JSON data first."

        # Simple keyword-based response logic
        for key, value in self.data.items():
            if key.lower() in user_input.lower():
                return f"{key}: {value}"
        
        return "I don't have information on that. Please ask about something else."

def create_interface():
    """Create a comprehensive Gradio interface with advanced features"""
    
    css = """
    body { background-color: #f0f4f8; font-family: 'Arial', sans-serif; }
    .container { max-width: 1200px; margin: auto; padding: 20px; border-radius: 8px; box-shadow: 0 4px 8px rgba(0, 0, 0, 0.1); }
    h1 { color: #333; }
    .tab { background-color: #ffffff; border-radius: 8px; padding: 20px; margin-bottom: 20px; }
    .button { background-color: #007bff; color: white; border: none; border-radius: 5px; padding: 10px 20px; cursor: pointer; }
    .button:hover { background-color: #0056b3; }
    .warning { background-color: #fff3cd; color: #856404; padding: 10px; border-radius: 5px; }
    .error { background-color: #f8d7da; color: #721c24; padding: 10px; border-radius: 5px; }
    """
    
    with gr.Blocks(css=css, title="Advanced Data Processing App") as interface:
        gr.Markdown("# 🌐 Advanced Data Processing Toolkit")
        
        with gr.Tab("URL Processing"):
            url_input = gr.Textbox(
                label="Enter URLs (comma or newline separated)", 
                lines=5,
                placeholder="https://example1.com\nhttps://example2.com"
            )
        
        with gr.Tab("File Input"):
            file_input = gr.File(
                label="Upload text files or ZIP archives",
                file_types=[".txt", ".zip", ".md", ".csv", ".json", ".xml"],
                multiple=True  # Allow multiple file uploads
            )

        with gr.Tab("Text Input"):
            text_input = gr.Textbox(
                label="Raw Text Input", 
                lines=5,
                placeholder="Paste your text here..."
            )
        
        with gr.Tab("Chat"):
            chat_input = gr.Textbox(
                label="Chat with your data",
                placeholder="Type your question here..."
            )
            json_input = gr.Textbox(
                label="Load JSON Data",
                placeholder="Paste your JSON data here...",
                lines=5
            )
            load_btn = gr.Button("Load Data", variant="primary")
            chat_output = gr.Textbox(label="Chatbot Response", interactive=False)

        process_btn = gr.Button("Process Input", variant="primary")
        
        output_text = gr.Textbox(label="Processing Results", interactive=False)
        output_file = gr.File(label="Processed Output")
        
        # Initialize chatbot
        chatbot = Chatbot()

        def process_all_inputs(urls, files, text):
            """Process all input types with progress tracking"""
            try:
                processor = URLProcessor()
                file_processor = FileProcessor()
                results = []
                
                # Process URLs
                if urls:
                    url_list = re.split(r '[,\n]', urls)
                    url_list = [url.strip() for url in url_list if url.strip()]
                    
                    for url in url_list:
                        validation = processor.validate_url(url)
                        if validation.get('is_valid'):
                            content = processor.fetch_content(url)
                            if content:
                                results.append({
                                    'source': 'url',
                                    'url': url,
                                    'content': content,
                                    'timestamp': datetime.now().isoformat()
                                })
                
                # Process files
                if files:
                    combined_data = file_processor.process_files(files)
                    results.extend(combined_data)
        
                # Process text input
                if text:
                    cleaned_text = processor.advanced_text_cleaning(text)
                    results.append({
                        'source': 'direct_input',
                        'content': cleaned_text,
                        'timestamp': datetime.now().isoformat()
                    })
                
                # Generate output
                if results:
                    output_dir = Path('output') / datetime.now().strftime('%Y-%m-%d')
                    output_dir.mkdir(parents=True, exist_ok=True)
                    output_path = output_dir / f'processed_{int(time.time())}.json'
                    
                    with open(output_path, 'w', encoding='utf-8') as f:
                        json.dump(results, f, ensure_ascii=False, indent=2)
                    
                    summary = f"Processed {len(results)} items successfully!"
                    return str(output_path), summary
                else:
                    return None, "No valid content to process."
            
            except Exception as e:
                logger.error(f"Processing error: {e}")
                return None, f"Error: {str(e)}"

        def load_chat_data(json_data):
            """Load JSON data into the chatbot."""
            return chatbot.load_data(json_data)

        def chat_with_data(user_input):
            """Chat with the loaded data."""
            return chatbot.chat(user_input)

        process_btn.click(
            process_all_inputs, 
            inputs=[url_input, file_input, text_input], 
            outputs=[output_file, output_text]
        )

        load_btn.click(
            load_chat_data,
            inputs=json_input,
            outputs=chat_output
        )

        chat_input.submit(
            chat_with_data,
            inputs=chat_input,
            outputs=chat_output
        )
        
        gr.Markdown("""
        ### Usage Guidelines
        - **URL Processing**: Enter valid HTTP/HTTPS URLs
        - **File Input**: Upload multiple text files or ZIP archives
        - **Text Input**: Direct text processing
        - **Chat**: Load your JSON data and ask questions about it
        - Advanced cleaning and validation included
        """)
    
    return interface

def main():
    # Configure system settings
    mimetypes.init()
    
    # Create and launch interface
    interface = create_interface()
    
    # Launch with proper configuration
    interface.launch(
        server_name="0.0.0.0",
        server_port=7860,
        share=True,  # Enable public sharing
        inbrowser=False,
        debug=False
    )

if __name__ == "__main__":
    main()