import json import sys import os import re import time import logging import mimetypes import tempfile from datetime import datetime from pathlib import Path from urllib.parse import urlparse from typing import List, Dict, Tuple, Union, Optional import requests import validators import gradio as gr from diskcache import Cache from bs4 import BeautifulSoup from fake_useragent import UserAgent from cleantext import clean import qrcode import zipfile # Setup logging with detailed configuration logging.basicConfig( level=logging.INFO, format='%(asctime)s - %(levelname)s - [%(filename)s:%(lineno)d] - %(message)s', handlers=[ logging.StreamHandler(), logging.FileHandler('app.log', encoding='utf-8') ]) logger = logging.getLogger(__name__) class URLProcessor: def __init__(self): self.session = requests.Session() self.timeout = 10 # seconds self.session.headers.update({ 'User-Agent': UserAgent().random, 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8', 'Accept-Language': 'en-US,en;q=0.5', 'Accept-Encoding': 'gzip, deflate, br', 'Connection': 'keep-alive', 'Upgrade-Insecure-Requests': '1' }) def advanced_text_cleaning(self, text: str) -> str: """Robust text cleaning with version compatibility""" try: cleaned_text = clean( text, to_ascii=True, lower=True, no_line_breaks=True, no_urls=True, no_emails=True, no_phone_numbers=True, no_numbers=False, no_digits=False, no_currency_symbols=True, no_punct=False ).strip() return cleaned_text except Exception as e: logger.warning(f"Text cleaning error: {e}. Using fallback method.") text = re.sub(r'[\x00-\x1F\x7F-\x9F]', '', text) # Remove control characters text = text.encode('ascii', 'ignore').decode('ascii') # Remove non-ASCII characters text = re.sub(r'\s+', ' ', text) # Normalize whitespace return text.strip() def validate_url(self, url: str) -> Dict: """Validate URL format and accessibility""" try: if not validators.url(url): return {'is_valid': False, 'message': 'Invalid URL format'} response = self.session.head(url, timeout=self.timeout) response.raise_for_status() return {'is_valid': True, 'message': 'URL is valid and accessible'} except Exception as e: return {'is_valid': False, 'message': f'URL validation failed: {str(e)}'} def fetch_content(self, url: str) -> Optional[Dict]: """Universal content fetcher with special case handling""" try: # Google Drive document handling if 'drive.google.com' in url: return self._handle_google_drive(url) # Google Calendar ICS handling if 'calendar.google.com' in url and 'ical' in url: return self._handle_google_calendar(url) # Standard HTML processing return self._fetch_html_content(url) except Exception as e: logger.error(f"Content fetch failed: {e}") return None def _handle_google_drive(self, url: str) -> Optional[Dict]: """Process Google Drive file links""" try: file_id = re.search(r'/file/d/([a-zA-Z0-9_-]+)', url) if not file_id: logger.error(f"Invalid Google Drive URL: {url}") return None direct_url = f"https://drive.google.com/uc?export=download&id={file_id.group(1)}" response = self.session.get(direct_url, timeout=self.timeout) response.raise_for_status() return { 'content': response.text, 'content_type': response.headers.get('Content-Type', ''), 'timestamp': datetime.now().isoformat() } except Exception as e: logger.error(f"Google Drive processing failed: {e}") return None def _handle_google_calendar(self, url: str) -> Optional[Dict]: """Process Google Calendar ICS feeds""" try: response = self.session.get(url, timeout=self.timeout) response.raise_for_status() return { 'content': response.text, 'content_type': 'text/calendar', 'timestamp': datetime.now().isoformat() } except Exception as e: logger.error(f"Calendar fetch failed: {e}") return None def _fetch_html_content(self, url: str) -> Optional[Dict]: """Standard HTML content processing""" try: response = self.session.get(url, timeout=self.timeout) response.raise_for_status() soup = BeautifulSoup(response.text, 'html.parser') # Remove unwanted elements for element in soup(['script', 'style', 'nav', 'footer', 'header', 'meta', 'link']): element.decompose() # Extract main content main_content = soup.find('main') or soup.find('article') or soup.body # Clean and structure content text_content = main_content.get_text(separator='\n', strip=True) cleaned_content = self.advanced_text_cleaning(text_content) return { 'content': cleaned_content, 'content_type': response.headers.get('Content-Type', ''), 'timestamp': datetime.now().isoformat() } except Exception as e: logger.error(f"HTML processing failed: {e}") return None class FileProcessor: """Class to handle file processing""" def __init__(self, max_file_size: int = 2 * 1024 * 1024 * 1024): # 2GB default self.max_file_size = max_file_size self.supported_text_extensions = {'.txt', '.md', '.csv', '.json', '.xml'} def is_text_file(self, filepath: str) -> bool: """Check if file is a text file""" try: mime_type, _ = mimetypes.guess_type(filepath) return (mime_type and mime_type.startswith('text/')) or \ (os.path.splitext(filepath)[1].lower() in self.supported_text_extensions) except Exception: return False def process_file(self, file) -> List[Dict]: """Process uploaded file with enhanced error handling""" if not file: return [] dataset = [] try: file_size = os.path.getsize(file.name) if file_size > self.max_file_size: logger.warning(f"File size ({file_size} bytes) exceeds maximum allowed size") return [] with tempfile.TemporaryDirectory() as temp_dir: if zipfile.is_zipfile(file.name): dataset.extend(self._process_zip_file(file.name, temp_dir)) else: dataset.extend(self._process_single_file(file)) except Exception as e: logger.error(f"Error processing file: {str(e)}") return [] return dataset def _process_zip_file(self, zip_path, temp_dir): """Extract and process files within a ZIP archive.""" result = [] with zipfile.ZipFile(zip_path, 'r') as zip_ref: zip_ref.extractall(temp_dir) for extracted_file in os.listdir(temp_dir): extracted_file_path = os.path.join(temp_dir, extracted_file) if os.path.isfile(extracted_file_path): with open(extracted_file_path, 'r', encoding='utf-8', errors='ignore') as f: result.append({ 'source': 'file_from_zip', 'filename': extracted_file, 'content': f.read(), 'timestamp': datetime.now().isoformat() }) return result def _process_single_file(self, file) -> List[Dict]: try: file_stat = os.stat(file.name) # For very large files, read in chunks and summarize if file_stat.st_size > 100 * 1024 * 1024: # 100MB logger.info(f"Processing large file: {file.name} ({file_stat.st_size} bytes)") # Read first and last 1MB for extremely large files content = "" with open(file.name, 'r', encoding='utf-8', errors='ignore') as f: content = f.read(1 * 1024 * 1024) # First 1MB content += "\n...[Content truncated due to large file size]...\n" # Seek to the last 1MB f.seek(max(0, file_stat.st_size - 1 * 1024 * 1024)) content += f.read() # Last 1MB else: # Regular file processing with open(file.name, 'r', encoding='utf-8', errors='ignore') as f: content = f.read() return [{ 'source': 'file', 'filename': os.path.basename(file.name), 'file_size': file_stat.st_size, 'mime_type': mimetypes.guess_type(file.name)[0], 'created': datetime.fromtimestamp(file_stat.st_ctime).isoformat(), 'modified': datetime.fromtimestamp(file_stat.st_mtime).isoformat(), 'content': content, 'timestamp': datetime.now().isoformat() }] except Exception as e: logger.error(f"File processing error: {e}") return [] def generate_qr(json_data): """Generate QR code from JSON data and return the file path.""" try: # Try first with automatic version selection qr = qrcode.QRCode( error_correction=qrcode.constants.ERROR_CORRECT_L, box_size=10, border=4, ) qr.add_data(json_data) qr.make(fit=True) img = qr.make_image(fill_color="black", back_color="white") temp_file = tempfile.NamedTemporaryFile(delete=False, suffix=".png") img.save(temp_file.name) return temp_file.name except Exception as e: # If the data is too large for a QR code logger.error(f"QR generation error: {e}") # Create a simple QR with error message qr = qrcode.QRCode( version=1, error_correction=qrcode.constants.ERROR_CORRECT_L, box_size=10, border=4, ) qr.add_data("Error: Data too large for QR code") qr.make(fit=True) img = qr.make_image(fill_color="black", back_color="white") temp_file = tempfile.NamedTemporaryFile(delete=False, suffix=".png") img.save(temp_file.name) return temp_file.name def create_interface(): """Create a comprehensive Gradio interface with advanced features and styling""" css = """ body { font-family: 'Inter', sans-serif; background: linear-gradient(to bottom, #08041C, #030712); /* Dark cosmic background */ color: #ffffff; } .container { max-width: 1200px; margin: auto; background-color: rgba(255, 255, 255, 0.06); backdrop-filter: blur(12px); border: 1px solid rgba(255, 255, 255, 0.1); border-radius: 1rem; box-shadow: 0 4px 12px rgba(0, 0, 0, 0.25); padding: 2rem; } h1 { color: #00FF00; text-align: center; text-shadow: 0 0 10px rgba(0, 255, 0, 0.8); } h2, h3, h4 { color: #FF9900; text-shadow: 0 0 10px rgba(255, 153, 0, 0.8); } .tab { background-color: rgba(255, 255, 255, 0.06); backdrop-filter: blur(12px); border: 1px solid rgba(255, 255, 255, 0.1); border-radius: 0.75rem; margin-bottom: 1rem; padding: 1.5rem; } .tab:hover { background-color: rgba(255, 255, 255, 0.1); } .warning { background-color: #fff3cd; color: #856404; border-radius: 0.5rem; padding: 1rem; margin-bottom: 1rem; } .error { background-color: #f8d7da; color: #721c24; border-radius: 0.5rem; padding: 1rem; margin-bottom: 1rem; } input[type="text"], input[type="file"] { width: 100%; padding: 0.75rem; border-radius: 0.5rem; background-color: rgba(0, 0, 0, 0.2); color: #ffffff; border: 1px solid #4a5568; font-size: 1rem; transition: border-color 0.3s ease; } input[type="text"]:focus, input[type="file"]:focus { outline: none; border-color: #00FF00; box-shadow: 0 0 5px rgba(0, 255, 0, 0.7); } .btn-primary { padding: 0.75rem 1.5rem; border-radius: 1.5rem; font-weight: 600; cursor: pointer; transition: transform 0.2s ease, box-shadow 0.2s ease, background-image 0.3s; background-image: linear-gradient(to right, #00FF00, #00A300); color: #000000; border: none; box-shadow: 0 0 8px rgba(0, 255, 0, 0.5); } .btn-primary:hover { transform: scale(1.05); box-shadow: 0 0 12px rgba(0, 255, 0, 0.7); background-image: linear-gradient(to right, #00A300, #007D00); } .btn-secondary { padding: 0.75rem 1.5rem; border-radius: 1.5rem; font-weight: 600; cursor: pointer; transition: transform 0.2s ease, box-shadow 0.2s ease, background-image 0.3s; background-image: linear-gradient(to right, #FF9900, #FF6600); color: #000000; border: none; box-shadow: 0 0 8px rgba(255, 153, 0, 0.5); } .btn-secondary:hover { transform: scale(1.05); box-shadow: 0 0 12px rgba(255, 153, 0, 0.7); background-image: linear-gradient(to right, #FF6600, #CC4700); } textarea { width: 100%; padding: 0.75rem; border-radius: 0.5rem; background-color: rgba(0, 0, 0, 0.2); color: #ffffff; border: 1px solid #4a5568; font-size: 1rem; transition: border-color 0.3s ease; min-height: 8rem; } textarea:focus { outline: none; border-color: #00FF00; box-shadow: 0 0 5px rgba(0, 255, 0, 0.7); } .output-box { background-color: rgba(0, 0, 0, 0.2); border: 1px solid #4a5568; border-radius: 0.5rem; padding: 1rem; overflow-x: auto; color: #ffffff; font-size: 1rem; white-space: pre-wrap; } #json-editor { background-color: rgba(0, 0, 0, 0.2); color: #ffffff; border: 1px solid #4a5568; border-radius: 0.5rem; padding: 1rem; font-size: 1rem; min-height: 20rem; } #json-editor:focus { outline: none; border-color: #00FF00; box-shadow: 0 0 5px rgba(0, 255, 0, 0.7); } #url-input { background-color: rgba(0, 0, 0, 0.2); color: #ffffff; border: 1px solid #4a5568; border-radius: 0.5rem; padding: 1rem; font-size: 1rem; min-height: 8rem; } #url-input:focus { outline: none; border-color: #00FF00; box-shadow: 0 0 5px rgba(0, 255, 0, 0.7); } #output-text{ background-color: rgba(0, 0, 0, 0.2); color: #ffffff; border: 1px solid #4a5568; border-radius: 0.5rem; padding: 1rem; font-size: 1rem; } """ with gr.Blocks(css=css, title="Advanced Text & URL Processor") as interface: gr.Markdown("# 🌐 Advanced URL & Text Processing Toolkit") with gr.Tab("URL Processing") as url_tab: url_input = gr.Textbox( label="Enter URLs (comma or newline separated)", lines=5, placeholder="https://example1.com\nhttps://example2.com", interactive=True, elem_id="url-input" ) with gr.Tab("File Input") as file_tab: file_input = gr.File( label="Upload text file or ZIP archive", file_types=[".txt", ".zip", ".md", ".csv", ".json", ".xml"] ) with gr.Tab("Text Input") as text_tab: text_input = gr.Textbox( label="Raw Text Input", lines=5, placeholder="Paste your text here...", interactive=True ) with gr.Tab("JSON Editor") as json_tab: json_editor = gr.Textbox( label="JSON Editor", lines=20, placeholder="View and edit your JSON data here...", interactive=True, elem_id="json-editor" ) with gr.Tab("Scratchpad") as scratchpad_tab: scratchpad = gr.Textbox( label="Scratchpad", lines=10, placeholder="Quick notes or text collections...", interactive=True ) process_btn = gr.Button("Process Input", variant="primary") qr_btn = gr.Button("Generate QR Code", variant="secondary") output_text = gr.Textbox(label="Processing Results", interactive=False, elem_id="output-text") output_file = gr.File(label="Processed Output") qr_output = gr.Image(label="QR Code", type="filepath") # To display the generated QR code def process_all_inputs(urls, file, text, notes): """Process all input types with progress tracking""" try: processor = URLProcessor() file_processor = FileProcessor() results = [] # Process URLs if urls: url_list = re.split(r'[,\n]', urls) url_list = [url.strip() for url in url_list if url.strip()] for url in url_list: validation = processor.validate_url(url) if validation.get('is_valid'): content = processor.fetch_content(url) if content: results.append({ 'source': 'url', 'url': url, 'content': content, 'timestamp': datetime.now().isoformat() }) # Process files if file: results.extend(file_processor.process_file(file)) # Process text input if text: cleaned_text = processor.advanced_text_cleaning(text) results.append({ 'source': 'direct_input', 'content': cleaned_text, 'timestamp': datetime.now().isoformat() }) # Generate output if results: output_dir = Path('output') / datetime.now().strftime('%Y-%m-%d') output_dir.mkdir(parents=True, exist_ok=True) output_path = output_dir / f'processed_{int(time.time())}.json' with open(output_path, 'w', encoding='utf-8') as f: json.dump(results, f, ensure_ascii=False, indent=2) summary = f"Processed {len(results)} items successfully!" json_data = json.dumps(results, indent=2) # Prepare JSON for QR code return str(output_path), summary, json_data # Return JSON for editor else: return None, "No valid content to process.", "" except Exception as e: logger.error(f"Processing error: {e}") return None, f"Error: {str(e)}", "" def generate_qr_code(json_data): """Generate QR code from JSON data.""" if not json_data: return "No data to encode." qr_file = generate_qr(json_data) return qr_file process_btn.click( process_all_inputs, inputs=[url_input, file_input, text_input, scratchpad], outputs=[output_file, output_text, json_editor] ) qr_btn.click(generate_qr_code, inputs=[json_editor], outputs=[qr_output]) gr.Markdown(""" ### Usage Guidelines - **URL Processing**: Enter valid HTTP/HTTPS URLs, separated by commas or newlines. - **File Input**: Upload text files orZIP archives containing text files. - **Text Input**: Paste text directly for processing. - **JSON Editor**: View the processed data in JSON format. This is automatically updated after processing. - **Scratchpad**: Use this area for temporary notes or text snippets. - Click "Process Input" to analyze the data. The results will be available for download and in the JSON Editor. - Click "Generate QR Code" to create a QR code from the JSON data. """) return interface def main(): # Configure system settings mimetypes.init() # Create and launch interface interface = create_interface() # Launch with proper configuration interface.launch( server_name="0.0.0.0", server_port=7860, show_error=True, share=False, inbrowser=True, debug=True ) if __name__ == "__main__": main()