Spaces:
Running
Running
Update app2.py
Browse files
app2.py
CHANGED
@@ -1,26 +1,29 @@
|
|
1 |
import json
|
2 |
import os
|
3 |
import re
|
|
|
4 |
import logging
|
5 |
import mimetypes
|
6 |
-
import
|
7 |
-
|
8 |
-
import zxing
|
9 |
-
import io
|
10 |
import zipfile
|
11 |
import tempfile
|
12 |
from datetime import datetime
|
13 |
from typing import List, Dict, Optional, Union
|
14 |
from pathlib import Path
|
|
|
|
|
15 |
import requests
|
16 |
import validators
|
17 |
import gradio as gr
|
|
|
18 |
from bs4 import BeautifulSoup
|
19 |
from fake_useragent import UserAgent
|
|
|
20 |
from cleantext import clean
|
21 |
-
import qrcode
|
22 |
-
|
23 |
-
|
24 |
logging.basicConfig(
|
25 |
level=logging.INFO,
|
26 |
format='%(asctime)s - %(levelname)s - [%(filename)s:%(lineno)d] - %(message)s',
|
@@ -37,276 +40,15 @@ Path('output/qr_codes').mkdir(parents=True, exist_ok=True)
|
|
37 |
class URLProcessor:
|
38 |
def __init__(self):
|
39 |
self.session = requests.Session()
|
40 |
-
self.timeout = 10
|
41 |
-
self.
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
# Update session headers with rotating user agents
|
50 |
-
self.update_user_agent()
|
51 |
-
|
52 |
-
if self.use_proxy and self.proxy_url:
|
53 |
-
self.session.proxies = {
|
54 |
-
'http': self.proxy_url,
|
55 |
-
'https': self.proxy_url
|
56 |
-
}
|
57 |
-
|
58 |
-
def update_user_agent(self):
|
59 |
-
"""Rotate user agents to avoid detection"""
|
60 |
-
try:
|
61 |
-
self.session.headers.update({
|
62 |
-
'User-Agent': UserAgent().random,
|
63 |
-
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
|
64 |
-
'Accept-Language': 'en-US,en;q=0.5',
|
65 |
-
'Accept-Encoding': 'gzip, deflate, br',
|
66 |
-
'Connection': 'keep-alive',
|
67 |
-
'Upgrade-Insecure-Requests': '1',
|
68 |
-
'Cache-Control': 'max-age=0'
|
69 |
-
})
|
70 |
-
except Exception as e:
|
71 |
-
logger.warning(f"Failed to update user agent: {e}")
|
72 |
-
# Fallback to a common user agent
|
73 |
-
self.session.headers.update({
|
74 |
-
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36'
|
75 |
-
})
|
76 |
-
|
77 |
-
def get_selenium_driver(self):
|
78 |
-
"""Initialize Selenium WebDriver for interactive sites"""
|
79 |
-
if self.selenium_driver is not None:
|
80 |
-
return self.selenium_driver
|
81 |
-
|
82 |
-
try:
|
83 |
-
from selenium import webdriver
|
84 |
-
from selenium.webdriver.chrome.service import Service
|
85 |
-
from selenium.webdriver.chrome.options import Options
|
86 |
-
from webdriver_manager.chrome import ChromeDriverManager
|
87 |
-
|
88 |
-
options = Options()
|
89 |
-
options.add_argument("--headless")
|
90 |
-
options.add_argument("--no-sandbox")
|
91 |
-
options.add_argument("--disable-dev-shm-usage")
|
92 |
-
options.add_argument(f"user-agent={self.session.headers['User-Agent']}")
|
93 |
-
options.add_argument("--disable-notifications")
|
94 |
-
options.add_argument("--disable-popup-blocking")
|
95 |
-
options.add_argument("--disable-extensions")
|
96 |
-
|
97 |
-
service = Service(ChromeDriverManager().install())
|
98 |
-
self.selenium_driver = webdriver.Chrome(service=service, options=options)
|
99 |
-
return self.selenium_driver
|
100 |
-
except Exception as e:
|
101 |
-
logger.error(f"Failed to initialize Selenium: {e}")
|
102 |
-
return None
|
103 |
-
|
104 |
-
def handle_rate_limits(self, domain):
|
105 |
-
"""Smart rate limiting based on domain"""
|
106 |
-
from urllib.parse import urlparse
|
107 |
-
import time
|
108 |
-
|
109 |
-
# Extract domain from URL
|
110 |
-
parsed_domain = urlparse(domain).netloc
|
111 |
-
|
112 |
-
# Check if we've accessed this domain recently
|
113 |
-
current_time = time.time()
|
114 |
-
if parsed_domain in self.rate_limits:
|
115 |
-
last_access, count = self.rate_limits[parsed_domain]
|
116 |
-
|
117 |
-
# Different delay strategies for different domains
|
118 |
-
if "facebook" in parsed_domain or "instagram" in parsed_domain:
|
119 |
-
min_delay = 5.0 # Longer delay for social media sites
|
120 |
-
elif "gov" in parsed_domain:
|
121 |
-
min_delay = 2.0 # Be respectful with government sites
|
122 |
-
else:
|
123 |
-
min_delay = self.request_delay
|
124 |
-
|
125 |
-
# Exponential backoff if we're making many requests
|
126 |
-
if count > 10:
|
127 |
-
min_delay *= 2
|
128 |
-
|
129 |
-
# Wait if needed
|
130 |
-
elapsed = current_time - last_access
|
131 |
-
if elapsed < min_delay:
|
132 |
-
time.sleep(min_delay - elapsed)
|
133 |
-
|
134 |
-
# Update count
|
135 |
-
self.rate_limits[parsed_domain] = (time.time(), count + 1)
|
136 |
-
else:
|
137 |
-
# First time accessing this domain
|
138 |
-
self.rate_limits[parsed_domain] = (current_time, 1)
|
139 |
-
|
140 |
-
def handle_interactive_site(self, url):
|
141 |
-
"""Handle sites that require interaction to bypass blocks"""
|
142 |
-
driver = self.get_selenium_driver()
|
143 |
-
if not driver:
|
144 |
-
return None
|
145 |
-
|
146 |
-
try:
|
147 |
-
driver.get(url)
|
148 |
-
|
149 |
-
# Wait for page to load
|
150 |
-
import time
|
151 |
-
time.sleep(3)
|
152 |
-
|
153 |
-
# Handle different types of sites
|
154 |
-
if "facebook.com" in url or "instagram.com" in url:
|
155 |
-
self._handle_social_media_site(driver)
|
156 |
-
elif "google.com" in url:
|
157 |
-
self._handle_google_site(driver)
|
158 |
-
|
159 |
-
# Get the page source after interaction
|
160 |
-
page_source = driver.page_source
|
161 |
-
|
162 |
-
return {
|
163 |
-
'content': page_source,
|
164 |
-
'content_type': 'text/html',
|
165 |
-
'url': url,
|
166 |
-
'title': driver.title
|
167 |
-
}
|
168 |
-
except Exception as e:
|
169 |
-
logger.error(f"Error handling interactive site {url}: {e}")
|
170 |
-
return None
|
171 |
-
|
172 |
-
def _handle_social_media_site(self, driver):
|
173 |
-
"""Handle Facebook/Instagram login walls"""
|
174 |
-
from selenium.webdriver.common.by import By
|
175 |
-
from selenium.webdriver.common.keys import Keys
|
176 |
-
from selenium.webdriver.support.ui import WebDriverWait
|
177 |
-
from selenium.webdriver.support import expected_conditions as EC
|
178 |
-
|
179 |
-
try:
|
180 |
-
# Try to find and close login popups
|
181 |
-
close_buttons = driver.find_elements(By.XPATH, "//button[contains(@aria-label, 'Close')]")
|
182 |
-
if close_buttons:
|
183 |
-
close_buttons[0].click()
|
184 |
-
time.sleep(1)
|
185 |
-
|
186 |
-
# Press ESC key to dismiss popups
|
187 |
-
webdriver.ActionChains(driver).send_keys(Keys.ESCAPE).perform()
|
188 |
-
time.sleep(1)
|
189 |
-
|
190 |
-
# Scroll down to load more content
|
191 |
-
driver.execute_script("window.scrollTo(0, document.body.scrollHeight/2);")
|
192 |
-
time.sleep(2)
|
193 |
-
except Exception as e:
|
194 |
-
logger.warning(f"Error handling social media site: {e}")
|
195 |
-
|
196 |
-
def _handle_google_site(self, driver):
|
197 |
-
"""Handle Google authentication and consent pages"""
|
198 |
-
from selenium.webdriver.common.by import By
|
199 |
-
|
200 |
-
try:
|
201 |
-
# Look for consent buttons
|
202 |
-
consent_buttons = driver.find_elements(By.XPATH, "//button[contains(text(), 'Accept all')]")
|
203 |
-
if consent_buttons:
|
204 |
-
consent_buttons[0].click()
|
205 |
-
time.sleep(1)
|
206 |
-
|
207 |
-
# Look for "I agree" buttons
|
208 |
-
agree_buttons = driver.find_elements(By.XPATH, "//button[contains(text(), 'I agree')]")
|
209 |
-
if agree_buttons:
|
210 |
-
agree_buttons[0].click()
|
211 |
-
time.sleep(1)
|
212 |
-
except Exception as e:
|
213 |
-
logger.warning(f"Error handling Google site: {e}")
|
214 |
-
|
215 |
-
def fetch_content(self, url: str) -> Optional[Dict]:
|
216 |
-
"""Fetch content with smart handling for different sites"""
|
217 |
-
# Check if URL is allowed by robots.txt
|
218 |
-
if self.respect_robots and not self.check_robots_txt(url):
|
219 |
-
logger.warning(f"URL {url} is disallowed by robots.txt")
|
220 |
-
return None
|
221 |
-
|
222 |
-
# Apply rate limiting
|
223 |
-
self.handle_rate_limits(url)
|
224 |
-
|
225 |
-
# Rotate user agent occasionally
|
226 |
-
if random.random() < 0.3: # 30% chance to rotate
|
227 |
-
self.update_user_agent()
|
228 |
-
|
229 |
-
# Determine if site needs special handling
|
230 |
-
needs_selenium = any(domain in url.lower() for domain in [
|
231 |
-
'facebook.com', 'instagram.com', 'linkedin.com',
|
232 |
-
'google.com/search', 'twitter.com', 'x.com'
|
233 |
-
])
|
234 |
-
|
235 |
-
for attempt in range(self.max_retries):
|
236 |
-
try:
|
237 |
-
if needs_selenium:
|
238 |
-
return self.handle_interactive_site(url)
|
239 |
-
|
240 |
-
# Try with cloudscraper first for sites with anti-bot measures
|
241 |
-
if any(domain in url.lower() for domain in ['cloudflare', '.gov']):
|
242 |
-
import cloudscraper
|
243 |
-
scraper = cloudscraper.create_scraper(
|
244 |
-
browser={'browser': 'chrome', 'platform': 'darwin', 'mobile': False}
|
245 |
-
)
|
246 |
-
response = scraper.get(url, timeout=self.timeout)
|
247 |
-
else:
|
248 |
-
# Standard request for most sites
|
249 |
-
response = self.session.get(url, timeout=self.timeout)
|
250 |
-
|
251 |
-
response.raise_for_status()
|
252 |
-
|
253 |
-
return {
|
254 |
-
'content': response.text,
|
255 |
-
'content_type': response.headers.get('Content-Type', ''),
|
256 |
-
'url': url,
|
257 |
-
'status_code': response.status_code
|
258 |
-
}
|
259 |
-
except Exception as e:
|
260 |
-
logger.warning(f"Attempt {attempt + 1} failed for {url}: {e}")
|
261 |
-
if attempt < self.max_retries - 1:
|
262 |
-
# Exponential backoff
|
263 |
-
time.sleep(self.request_delay * (2 ** attempt))
|
264 |
-
|
265 |
-
logger.error(f"All attempts failed for {url}")
|
266 |
-
return None
|
267 |
-
|
268 |
-
def check_robots_txt(self, url: str) -> bool:
|
269 |
-
"""Check if URL is allowed by robots.txt"""
|
270 |
-
if not self.respect_robots:
|
271 |
-
return True
|
272 |
-
|
273 |
-
try:
|
274 |
-
from urllib.parse import urlparse
|
275 |
-
from urllib.robotparser import RobotFileParser
|
276 |
-
|
277 |
-
parsed_url = urlparse(url)
|
278 |
-
robots_url = f"{parsed_url.scheme}://{parsed_url.netloc}/robots.txt"
|
279 |
-
|
280 |
-
rp = RobotFileParser()
|
281 |
-
rp.set_url(robots_url)
|
282 |
-
rp.read()
|
283 |
-
|
284 |
-
return rp.can_fetch(self.session.headers['User-Agent'], url)
|
285 |
-
except Exception as e:
|
286 |
-
logger.warning(f"Error checking robots.txt: {e}")
|
287 |
-
return True
|
288 |
-
|
289 |
-
def fetch_content(self, url: str) -> Optional[Dict]:
|
290 |
-
"""Fetch content with built-in rate limiting and robots.txt checking"""
|
291 |
-
if not self.check_robots_txt(url):
|
292 |
-
logger.warning(f"URL {url} is disallowed by robots.txt")
|
293 |
-
return None
|
294 |
-
|
295 |
-
time.sleep(self.request_delay) # Basic rate limiting
|
296 |
-
|
297 |
-
for attempt in range(self.max_retries):
|
298 |
-
try:
|
299 |
-
if 'drive.google.com' in url:
|
300 |
-
return self._handle_google_drive(url)
|
301 |
-
if 'calendar.google.com' in url:
|
302 |
-
return self._handle_google_calendar(url)
|
303 |
-
return self._fetch_html_content(url)
|
304 |
-
except Exception as e:
|
305 |
-
logger.error(f"Attempt {attempt + 1} failed: {e}")
|
306 |
-
if attempt < self.max_retries - 1:
|
307 |
-
time.sleep(self.request_delay * (attempt + 1))
|
308 |
-
|
309 |
-
return None
|
310 |
|
311 |
def advanced_text_cleaning(self, text: str) -> str:
|
312 |
"""Robust text cleaning with version compatibility"""
|
@@ -328,9 +70,9 @@ class URLProcessor:
|
|
328 |
return cleaned_text
|
329 |
except Exception as e:
|
330 |
logger.warning(f"Text cleaning error: {e}. Using fallback method.")
|
331 |
-
text = re.sub(r'[\x00-\x1F\x7F-\x9F]', '', text)
|
332 |
-
text = text.encode('ascii', 'ignore').decode('ascii')
|
333 |
-
text = re.sub(r'\s+', ' ', text)
|
334 |
return text.strip()
|
335 |
|
336 |
def validate_url(self, url: str) -> Dict:
|
@@ -338,7 +80,7 @@ class URLProcessor:
|
|
338 |
try:
|
339 |
if not validators.url(url):
|
340 |
return {'is_valid': False, 'message': 'Invalid URL format'}
|
341 |
-
|
342 |
response = self.session.head(url, timeout=self.timeout)
|
343 |
response.raise_for_status()
|
344 |
return {'is_valid': True, 'message': 'URL is valid and accessible'}
|
@@ -348,10 +90,15 @@ class URLProcessor:
|
|
348 |
def fetch_content(self, url: str) -> Optional[Dict]:
|
349 |
"""Universal content fetcher with special case handling"""
|
350 |
try:
|
|
|
351 |
if 'drive.google.com' in url:
|
352 |
return self._handle_google_drive(url)
|
|
|
|
|
353 |
if 'calendar.google.com' in url and 'ical' in url:
|
354 |
return self._handle_google_calendar(url)
|
|
|
|
|
355 |
return self._fetch_html_content(url)
|
356 |
except Exception as e:
|
357 |
logger.error(f"Content fetch failed: {e}")
|
@@ -364,11 +111,11 @@ class URLProcessor:
|
|
364 |
if not file_id:
|
365 |
logger.error(f"Invalid Google Drive URL: {url}")
|
366 |
return None
|
367 |
-
|
368 |
direct_url = f"https://drive.google.com/uc?export=download&id={file_id.group(1)}"
|
369 |
response = self.session.get(direct_url, timeout=self.timeout)
|
370 |
response.raise_for_status()
|
371 |
-
|
372 |
return {
|
373 |
'content': response.text,
|
374 |
'content_type': response.headers.get('Content-Type', ''),
|
@@ -397,13 +144,16 @@ class URLProcessor:
|
|
397 |
try:
|
398 |
response = self.session.get(url, timeout=self.timeout)
|
399 |
response.raise_for_status()
|
400 |
-
|
401 |
soup = BeautifulSoup(response.text, 'html.parser')
|
|
|
|
|
402 |
for element in soup(['script', 'style', 'nav', 'footer', 'header', 'meta', 'link']):
|
403 |
element.decompose()
|
404 |
-
|
|
|
405 |
main_content = soup.find('main') or soup.find('article') or soup.body
|
406 |
-
|
407 |
if main_content is None:
|
408 |
logger.warning(f"No main content found for URL: {url}")
|
409 |
return {
|
@@ -411,10 +161,11 @@ class URLProcessor:
|
|
411 |
'content_type': response.headers.get('Content-Type', ''),
|
412 |
'timestamp': datetime.now().isoformat()
|
413 |
}
|
414 |
-
|
|
|
415 |
text_content = main_content.get_text(separator='\n', strip=True)
|
416 |
cleaned_content = self.advanced_text_cleaning(text_content)
|
417 |
-
|
418 |
return {
|
419 |
'content': cleaned_content,
|
420 |
'content_type': response.headers.get('Content-Type', ''),
|
@@ -426,11 +177,11 @@ class URLProcessor:
|
|
426 |
|
427 |
class FileProcessor:
|
428 |
"""Class to handle file processing"""
|
429 |
-
|
430 |
def __init__(self, max_file_size: int = 2 * 1024 * 1024 * 1024): # 2GB default
|
431 |
self.max_file_size = max_file_size
|
432 |
self.supported_text_extensions = {'.txt', '.md', '.csv', '.json', '.xml'}
|
433 |
-
|
434 |
def is_text_file(self, filepath: str) -> bool:
|
435 |
"""Check if file is a text file"""
|
436 |
try:
|
@@ -481,7 +232,7 @@ class FileProcessor:
|
|
481 |
"source": "file",
|
482 |
"filename": filename,
|
483 |
"content": content,
|
484 |
-
"timestamp": datetime.now
|
485 |
})
|
486 |
except Exception as e:
|
487 |
logger.error(f"Error reading file {filename}: {str(e)}")
|
@@ -491,21 +242,25 @@ class FileProcessor:
|
|
491 |
"""Process a single file"""
|
492 |
try:
|
493 |
file_stat = os.stat(file.name)
|
494 |
-
|
|
|
495 |
if file_stat.st_size > 100 * 1024 * 1024: # 100MB
|
496 |
logger.info(f"Processing large file: {file.name} ({file_stat.st_size} bytes)")
|
497 |
-
|
|
|
498 |
content = ""
|
499 |
with open(file.name, 'r', encoding='utf-8', errors='ignore') as f:
|
500 |
content = f.read(1 * 1024 * 1024) # First 1MB
|
501 |
content += "\n...[Content truncated due to large file size]...\n"
|
502 |
-
|
|
|
503 |
f.seek(max(0, file_stat.st_size - 1 * 1024 * 1024))
|
504 |
content += f.read() # Last 1MB
|
505 |
else:
|
|
|
506 |
with open(file.name, 'r', encoding='utf-8', errors='ignore') as f:
|
507 |
content = f.read()
|
508 |
-
|
509 |
return [{
|
510 |
'source': 'file',
|
511 |
'filename': os.path.basename(file.name),
|
@@ -520,68 +275,55 @@ class FileProcessor:
|
|
520 |
logger.error(f"File processing error: {e}")
|
521 |
return []
|
522 |
|
523 |
-
|
524 |
-
|
525 |
-
|
526 |
-
|
527 |
-
|
528 |
-
|
529 |
-
|
530 |
-
|
531 |
-
|
532 |
-
|
533 |
-
|
534 |
-
|
535 |
-
|
536 |
-
|
537 |
-
|
538 |
-
|
539 |
-
|
540 |
-
|
541 |
-
|
542 |
-
|
543 |
-
|
544 |
-
|
545 |
-
|
546 |
-
|
547 |
-
|
548 |
-
|
549 |
-
|
550 |
-
|
551 |
-
|
552 |
-
|
553 |
-
|
554 |
-
|
555 |
-
|
556 |
-
|
557 |
-
|
558 |
-
|
559 |
-
|
560 |
-
|
561 |
-
|
562 |
-
|
563 |
-
|
564 |
-
|
565 |
-
|
566 |
-
|
567 |
-
|
568 |
-
|
569 |
-
|
570 |
-
|
571 |
-
|
572 |
-
border=4,
|
573 |
-
)
|
574 |
-
json_str = json.dumps(cleaned_item, ensure_ascii=False)
|
575 |
-
qr.add_data(json_str)
|
576 |
-
qr.make(fit=True)
|
577 |
-
|
578 |
-
img = qrcode.make_image(fill_color="black", back_color="white")
|
579 |
-
output_path = output_dir / f'item_{idx}_qr_{int(time.time())}.png'
|
580 |
-
img.save(str(output_path))
|
581 |
-
paths.append(str(output_path))
|
582 |
-
return paths
|
583 |
-
else:
|
584 |
-
cleaned_item = clean_json(data)
|
585 |
if cleaned_item:
|
586 |
qr = qrcode.QRCode(
|
587 |
version=None,
|
@@ -592,377 +334,220 @@ class FileProcessor:
|
|
592 |
json_str = json.dumps(cleaned_item, ensure_ascii=False)
|
593 |
qr.add_data(json_str)
|
594 |
qr.make(fit=True)
|
595 |
-
|
596 |
-
img =
|
597 |
-
output_path = output_dir / f'
|
598 |
img.save(str(output_path))
|
599 |
-
|
600 |
-
|
601 |
-
|
602 |
-
|
603 |
-
|
604 |
-
|
605 |
-
|
606 |
-
|
607 |
-
|
608 |
-
|
609 |
-
|
610 |
-
|
611 |
-
|
612 |
-
|
613 |
-
|
614 |
-
# Convert to grayscale
|
615 |
-
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
|
616 |
-
|
617 |
-
# Initialize QRCode detector
|
618 |
-
detector = cv2.QRCodeDetector()
|
619 |
-
|
620 |
-
# Detect and decode
|
621 |
-
data, vertices, _ = detector.detectAndDecode(gray)
|
622 |
-
|
623 |
-
if vertices is not None and data:
|
624 |
-
# Check if this might be binary data (like a PDF)
|
625 |
-
if data.startswith("%PDF") or not all(ord(c) < 128 for c in data):
|
626 |
-
# This is likely binary data, encode as base64
|
627 |
-
try:
|
628 |
-
# If it's already a string representation, convert to bytes first
|
629 |
-
if isinstance(data, str):
|
630 |
-
data_bytes = data.encode('latin-1') # Use latin-1 to preserve byte values
|
631 |
-
else:
|
632 |
-
data_bytes = data
|
633 |
-
|
634 |
-
# Encode as base64
|
635 |
-
base64_data = base64.b64encode(data_bytes).decode('ascii')
|
636 |
-
return f"base64:{base64_data}"
|
637 |
-
except Exception as e:
|
638 |
-
logger.error(f"Error encoding binary data: {e}")
|
639 |
-
|
640 |
-
return data
|
641 |
-
|
642 |
-
logger.warning("No QR code found in image")
|
643 |
-
return None
|
644 |
-
except Exception as e:
|
645 |
-
logger.error(f"QR decoding error: {e}")
|
646 |
-
return None
|
647 |
|
648 |
-
|
649 |
-
|
650 |
-
|
651 |
-
|
652 |
|
|
|
|
|
|
|
|
|
653 |
|
654 |
-
def datachat_interface(mode: str, data_source: str, json_input: str, qr_image: str, query: str) -> str:
|
655 |
-
data = None
|
656 |
-
if data_source == "JSON Input":
|
657 |
-
data = json_input
|
658 |
-
elif data_source == "QR Code":
|
659 |
-
try:
|
660 |
-
decoded_data = decode_qr_code(qr_image)
|
661 |
-
# Handle base64 encoded data...
|
662 |
-
except Exception as e:
|
663 |
-
return f"Invalid QR code data provided: {e}"
|
664 |
-
|
665 |
-
# Handle base64 encoded data
|
666 |
-
if decoded_data and decoded_data.startswith("base64:"):
|
667 |
-
base64_part = decoded_data[7:] # Remove the "base64:" prefix
|
668 |
-
try:
|
669 |
-
# For PDFs and other binary data, provide info about the content
|
670 |
-
binary_data = base64.b64decode(base64_part)
|
671 |
-
if binary_data.startswith(b"%PDF"):
|
672 |
-
data = "The QR code contains a PDF document. Binary data cannot be processed directly."
|
673 |
-
else:
|
674 |
-
# Try to decode as text as a fallback
|
675 |
-
data = binary_data.decode('utf-8', errors='replace')
|
676 |
-
except Exception as e:
|
677 |
-
logger.error(f"Error processing base64 data: {e}")
|
678 |
-
data = "The QR code contains binary data that cannot be processed directly."
|
679 |
-
else:
|
680 |
-
data = decoded_data
|
681 |
-
|
682 |
-
if not data:
|
683 |
-
return "No QR code found in the provided image."
|
684 |
-
except Exception as e:
|
685 |
-
return f"Invalid QR code data provided: {e}"
|
686 |
-
else:
|
687 |
-
return "No valid data source selected."
|
688 |
-
if mode == "Trained with Data":
|
689 |
-
return datachat_trained(data, query) # Ensure this function is defined
|
690 |
-
elif mode == "Chat about Data":
|
691 |
-
return datachat_simple(data, query) # Ensure this function is defined
|
692 |
-
else:
|
693 |
-
return "Invalid mode selected."
|
694 |
def create_interface():
|
695 |
"""Create a comprehensive Gradio interface with advanced features"""
|
|
|
696 |
css = """
|
697 |
.container { max-width: 1200px; margin: auto; }
|
698 |
.warning { background-color: #fff3cd; color: #856404; padding: 10px; border-radius: 4px; }
|
699 |
.error { background-color: #f8d7da; color: #721c24; padding: 10px; border-radius: 4px; }
|
700 |
.success { background-color: #d4edda; color: #155724; padding: 10px; border-radius: 4px; }
|
701 |
"""
|
702 |
-
|
703 |
-
with gr.Blocks(css=css, title="Advanced Data Processor & QR
|
704 |
gr.Markdown("# π Advanced Data Processing & QR Code Generator")
|
705 |
-
|
706 |
-
|
707 |
-
|
708 |
-
|
709 |
-
|
710 |
-
|
711 |
-
|
712 |
-
|
713 |
-
|
714 |
-
|
715 |
-
|
716 |
-
|
717 |
-
|
718 |
-
<div class="warning">
|
719 |
-
β οΈ <strong>Warning:</strong> Higher depth values (>2) may significantly increase processing time and resource usage.
|
720 |
-
</div>
|
721 |
-
""")
|
722 |
-
|
723 |
-
# URL processor instance
|
724 |
-
url_processor = URLProcessor()
|
725 |
-
|
726 |
-
def process_url(url, depth, respect_robots):
|
727 |
-
url_processor.respect_robots = respect_robots
|
728 |
-
results = []
|
729 |
-
try:
|
730 |
-
# Validate URL
|
731 |
-
validation = url_processor.validate_url(url)
|
732 |
-
if not validation['is_valid']:
|
733 |
-
return {"error": validation['message']}
|
734 |
-
|
735 |
-
# Process with depth
|
736 |
-
processed_urls = set()
|
737 |
-
urls_to_process = [(url, 0)] # (url, current_depth)
|
738 |
-
|
739 |
-
while urls_to_process:
|
740 |
-
current_url, current_depth = urls_to_process.pop(0)
|
741 |
-
|
742 |
-
if current_url in processed_urls:
|
743 |
-
continue
|
744 |
-
|
745 |
-
processed_urls.add(current_url)
|
746 |
-
content = url_processor.fetch_content(current_url)
|
747 |
-
|
748 |
-
if content:
|
749 |
-
results.append({
|
750 |
-
"url": current_url,
|
751 |
-
"content": content.get('content', ''),
|
752 |
-
"content_type": content.get('content_type', ''),
|
753 |
-
"timestamp": datetime.now().isoformat()
|
754 |
-
})
|
755 |
-
|
756 |
-
# If we haven't reached max depth, extract and queue more URLs
|
757 |
-
if current_depth < depth:
|
758 |
-
soup = BeautifulSoup(content.get('content', ''), 'html.parser')
|
759 |
-
for link in soup.find_all('a', href=True):
|
760 |
-
next_url = link['href']
|
761 |
-
if next_url.startswith('/'):
|
762 |
-
# Convert relative URL to absolute
|
763 |
-
from urllib.parse import urlparse, urljoin
|
764 |
-
parsed_url = urlparse(current_url)
|
765 |
-
base_url = f"{parsed_url.scheme}://{parsed_url.netloc}"
|
766 |
-
next_url = urljoin(base_url, next_url)
|
767 |
-
|
768 |
-
if validators.url(next_url) and next_url not in processed_urls:
|
769 |
-
urls_to_process.append((next_url, current_depth + 1))
|
770 |
-
|
771 |
-
return results
|
772 |
-
except Exception as e:
|
773 |
-
logger.error(f"URL processing error: {e}")
|
774 |
-
return {"error": str(e)}
|
775 |
-
|
776 |
-
def create_download_zip(results):
|
777 |
-
if not results or (isinstance(results, dict) and 'error' in results):
|
778 |
-
return None
|
779 |
-
|
780 |
-
try:
|
781 |
-
# Create a temporary zip file
|
782 |
-
with tempfile.NamedTemporaryFile(suffix='.zip', delete=False) as tmp:
|
783 |
-
with zipfile.ZipFile(tmp.name, 'w') as zipf:
|
784 |
-
# Add JSON data
|
785 |
-
zipf.writestr('extracted_data.json', json.dumps(results, indent=2))
|
786 |
-
|
787 |
-
# Add individual text files for each URL
|
788 |
-
for idx, item in enumerate(results):
|
789 |
-
if 'content' in item:
|
790 |
-
zipf.writestr(f'content_{idx}_{int(time.time())}.txt', item['content'])
|
791 |
-
|
792 |
-
return tmp.name
|
793 |
-
except Exception as e:
|
794 |
-
logger.error(f"Error creating ZIP file: {e}")
|
795 |
-
return None
|
796 |
-
|
797 |
-
extract_btn.click(process_url, [url_input, depth_slider, respect_robots], url_output)
|
798 |
-
download_btn.click(create_download_zip, [url_output], download_output)
|
799 |
-
|
800 |
-
# ZIP File Extractor Tab
|
801 |
-
with gr.Tab("ZIP File Extractor"):
|
802 |
-
zip_file_input = gr.File(label="Upload ZIP File")
|
803 |
-
extract_zip_btn = gr.Button("Extract and Process")
|
804 |
-
zip_output = gr.JSON(label="Extracted Data")
|
805 |
-
zip_qr_btn = gr.Button("Generate QR Code")
|
806 |
-
zip_qr_output = gr.Image(label="QR Code")
|
807 |
-
|
808 |
-
file_processor = FileProcessor()
|
809 |
-
|
810 |
-
def process_zip_file(file):
|
811 |
-
if not file:
|
812 |
-
return {"error": "No file uploaded"}
|
813 |
-
|
814 |
-
try:
|
815 |
-
results = file_processor.process_file(file)
|
816 |
-
return results
|
817 |
-
except Exception as e:
|
818 |
-
logger.error(f"ZIP processing error: {e}")
|
819 |
-
return {"error": str(e)}
|
820 |
-
|
821 |
-
def generate_zip_qr(data):
|
822 |
-
if not data or (isinstance(data, dict) and 'error' in data):
|
823 |
-
return None
|
824 |
-
|
825 |
-
try:
|
826 |
-
return file_processor.generate_qr_code(data, combined=True)[0]
|
827 |
-
except Exception as e:
|
828 |
-
logger.error(f"QR generation error: {e}")
|
829 |
-
return None
|
830 |
-
|
831 |
-
extract_zip_btn.click(process_zip_file, [zip_file_input], zip_output)
|
832 |
-
zip_qr_btn.click(generate_zip_qr, [zip_output], zip_qr_output)
|
833 |
-
|
834 |
-
# Raw Text to JSON Tab
|
835 |
-
with gr.Tab("Text to JSON"):
|
836 |
-
text_input = gr.Textbox(lines=10, label="Raw Text Input")
|
837 |
-
json_structure = gr.Dropdown(
|
838 |
-
choices=["Simple", "Structured", "Key-Value Pairs"],
|
839 |
-
label="JSON Structure",
|
840 |
-
value="Simple"
|
841 |
)
|
842 |
-
|
843 |
-
|
844 |
-
|
845 |
-
|
846 |
-
|
847 |
-
|
848 |
-
|
849 |
-
|
850 |
-
|
851 |
-
|
852 |
-
|
853 |
-
|
854 |
-
|
855 |
-
|
856 |
-
|
857 |
-
|
858 |
-
|
859 |
-
|
860 |
-
|
861 |
-
|
862 |
-
|
863 |
-
|
864 |
-
|
865 |
-
|
866 |
-
|
867 |
-
|
868 |
-
|
869 |
-
|
870 |
-
|
871 |
-
|
872 |
-
|
873 |
-
|
874 |
-
|
875 |
-
|
876 |
-
|
877 |
-
|
878 |
-
|
879 |
-
|
880 |
-
|
881 |
-
|
882 |
-
|
883 |
-
|
884 |
-
|
885 |
-
|
886 |
-
|
887 |
-
|
888 |
-
|
889 |
-
|
890 |
-
|
891 |
-
|
892 |
-
|
893 |
-
|
894 |
-
|
895 |
-
|
896 |
-
|
897 |
-
|
898 |
-
|
899 |
-
|
900 |
-
|
901 |
-
|
902 |
-
|
903 |
-
|
904 |
-
|
905 |
-
|
906 |
-
|
907 |
-
|
908 |
-
|
909 |
-
|
910 |
-
|
911 |
-
|
912 |
-
|
913 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
914 |
else:
|
915 |
-
|
916 |
-
|
917 |
-
|
918 |
-
|
919 |
-
|
920 |
-
|
921 |
-
|
922 |
-
|
923 |
-
|
924 |
-
|
925 |
-
|
926 |
-
|
927 |
-
|
928 |
-
|
929 |
-
|
930 |
-
|
931 |
-
with gr.Tab("DataChat"):
|
932 |
-
mode = gr.Radio(["Trained with Data", "Chat about Data"], label="Mode")
|
933 |
-
data_source = gr.Radio(["JSON Input", "QR Code"], label="Data Source")
|
934 |
-
json_input = gr.Textbox(lines=8, label="JSON Data")
|
935 |
-
qr_image = gr.Image(label="QR Code Image", type="filepath")
|
936 |
-
query = gr.Textbox(label="Query")
|
937 |
-
|
938 |
-
submit_btn = gr.Button("Submit")
|
939 |
-
output = gr.Textbox(label="Response")
|
940 |
-
|
941 |
-
submit_btn.click(datachat_interface, [mode, data_source, json_input, qr_image, query], output)
|
942 |
-
|
943 |
-
# QR Generator Tab (existing)
|
944 |
-
with gr.Tab("QR Generator"):
|
945 |
-
qr_input = gr.Textbox(lines=8, label="Input JSON for QR")
|
946 |
-
generate_btn = gr.Button("Generate QR")
|
947 |
-
qr_output = gr.Image(label="Generated QR Code")
|
948 |
-
|
949 |
-
def generate_qr(json_data):
|
950 |
-
data = file_processor.clean_json(json_data)
|
951 |
-
if data:
|
952 |
-
return file_processor.generate_qr_code(data)
|
953 |
-
return None
|
954 |
|
955 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
956 |
|
957 |
return interface
|
958 |
|
959 |
def main():
|
|
|
960 |
mimetypes.init()
|
|
|
|
|
961 |
Path('output/qr_codes').mkdir(parents=True, exist_ok=True)
|
|
|
|
|
962 |
interface = create_interface()
|
|
|
|
|
963 |
interface.launch(
|
964 |
server_name="0.0.0.0",
|
965 |
-
server_port=
|
966 |
show_error=True,
|
967 |
share=False,
|
968 |
inbrowser=True,
|
@@ -970,5 +555,4 @@ def main():
|
|
970 |
)
|
971 |
|
972 |
if __name__ == "__main__":
|
973 |
-
main()
|
974 |
-
|
|
|
1 |
import json
|
2 |
import os
|
3 |
import re
|
4 |
+
import time
|
5 |
import logging
|
6 |
import mimetypes
|
7 |
+
import concurrent.futures
|
8 |
+
import string
|
|
|
|
|
9 |
import zipfile
|
10 |
import tempfile
|
11 |
from datetime import datetime
|
12 |
from typing import List, Dict, Optional, Union
|
13 |
from pathlib import Path
|
14 |
+
from urllib.parse import urlparse
|
15 |
+
|
16 |
import requests
|
17 |
import validators
|
18 |
import gradio as gr
|
19 |
+
from diskcache import Cache
|
20 |
from bs4 import BeautifulSoup
|
21 |
from fake_useragent import UserAgent
|
22 |
+
from ratelimit import limits, sleep_and_retry
|
23 |
from cleantext import clean
|
24 |
+
import qrcode
|
25 |
+
|
26 |
+
# Setup logging
|
27 |
logging.basicConfig(
|
28 |
level=logging.INFO,
|
29 |
format='%(asctime)s - %(levelname)s - [%(filename)s:%(lineno)d] - %(message)s',
|
|
|
40 |
class URLProcessor:
|
41 |
def __init__(self):
|
42 |
self.session = requests.Session()
|
43 |
+
self.timeout = 10 # seconds
|
44 |
+
self.session.headers.update({
|
45 |
+
'User-Agent': UserAgent().random,
|
46 |
+
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
|
47 |
+
'Accept-Language': 'en-US,en;q=0.5',
|
48 |
+
'Accept-Encoding': 'gzip, deflate, br',
|
49 |
+
'Connection': 'keep-alive',
|
50 |
+
'Upgrade-Insecure-Requests': '1'
|
51 |
+
})
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
52 |
|
53 |
def advanced_text_cleaning(self, text: str) -> str:
|
54 |
"""Robust text cleaning with version compatibility"""
|
|
|
70 |
return cleaned_text
|
71 |
except Exception as e:
|
72 |
logger.warning(f"Text cleaning error: {e}. Using fallback method.")
|
73 |
+
text = re.sub(r'[\x00-\x1F\x7F-\x9F]', '', text) # Remove control characters
|
74 |
+
text = text.encode('ascii', 'ignore').decode('ascii') # Remove non-ASCII characters
|
75 |
+
text = re.sub(r'\s+', ' ', text) # Normalize whitespace
|
76 |
return text.strip()
|
77 |
|
78 |
def validate_url(self, url: str) -> Dict:
|
|
|
80 |
try:
|
81 |
if not validators.url(url):
|
82 |
return {'is_valid': False, 'message': 'Invalid URL format'}
|
83 |
+
|
84 |
response = self.session.head(url, timeout=self.timeout)
|
85 |
response.raise_for_status()
|
86 |
return {'is_valid': True, 'message': 'URL is valid and accessible'}
|
|
|
90 |
def fetch_content(self, url: str) -> Optional[Dict]:
|
91 |
"""Universal content fetcher with special case handling"""
|
92 |
try:
|
93 |
+
# Google Drive document handling
|
94 |
if 'drive.google.com' in url:
|
95 |
return self._handle_google_drive(url)
|
96 |
+
|
97 |
+
# Google Calendar ICS handling
|
98 |
if 'calendar.google.com' in url and 'ical' in url:
|
99 |
return self._handle_google_calendar(url)
|
100 |
+
|
101 |
+
# Standard HTML processing
|
102 |
return self._fetch_html_content(url)
|
103 |
except Exception as e:
|
104 |
logger.error(f"Content fetch failed: {e}")
|
|
|
111 |
if not file_id:
|
112 |
logger.error(f"Invalid Google Drive URL: {url}")
|
113 |
return None
|
114 |
+
|
115 |
direct_url = f"https://drive.google.com/uc?export=download&id={file_id.group(1)}"
|
116 |
response = self.session.get(direct_url, timeout=self.timeout)
|
117 |
response.raise_for_status()
|
118 |
+
|
119 |
return {
|
120 |
'content': response.text,
|
121 |
'content_type': response.headers.get('Content-Type', ''),
|
|
|
144 |
try:
|
145 |
response = self.session.get(url, timeout=self.timeout)
|
146 |
response.raise_for_status()
|
147 |
+
|
148 |
soup = BeautifulSoup(response.text, 'html.parser')
|
149 |
+
|
150 |
+
# Remove unwanted elements
|
151 |
for element in soup(['script', 'style', 'nav', 'footer', 'header', 'meta', 'link']):
|
152 |
element.decompose()
|
153 |
+
|
154 |
+
# Extract main content
|
155 |
main_content = soup.find('main') or soup.find('article') or soup.body
|
156 |
+
|
157 |
if main_content is None:
|
158 |
logger.warning(f"No main content found for URL: {url}")
|
159 |
return {
|
|
|
161 |
'content_type': response.headers.get('Content-Type', ''),
|
162 |
'timestamp': datetime.now().isoformat()
|
163 |
}
|
164 |
+
|
165 |
+
# Clean and structure content
|
166 |
text_content = main_content.get_text(separator='\n', strip=True)
|
167 |
cleaned_content = self.advanced_text_cleaning(text_content)
|
168 |
+
|
169 |
return {
|
170 |
'content': cleaned_content,
|
171 |
'content_type': response.headers.get('Content-Type', ''),
|
|
|
177 |
|
178 |
class FileProcessor:
|
179 |
"""Class to handle file processing"""
|
180 |
+
|
181 |
def __init__(self, max_file_size: int = 2 * 1024 * 1024 * 1024): # 2GB default
|
182 |
self.max_file_size = max_file_size
|
183 |
self.supported_text_extensions = {'.txt', '.md', '.csv', '.json', '.xml'}
|
184 |
+
|
185 |
def is_text_file(self, filepath: str) -> bool:
|
186 |
"""Check if file is a text file"""
|
187 |
try:
|
|
|
232 |
"source": "file",
|
233 |
"filename": filename,
|
234 |
"content": content,
|
235 |
+
"timestamp": datetime.now().isoformat()
|
236 |
})
|
237 |
except Exception as e:
|
238 |
logger.error(f"Error reading file {filename}: {str(e)}")
|
|
|
242 |
"""Process a single file"""
|
243 |
try:
|
244 |
file_stat = os.stat(file.name)
|
245 |
+
|
246 |
+
# For very large files, read in chunks and summarize
|
247 |
if file_stat.st_size > 100 * 1024 * 1024: # 100MB
|
248 |
logger.info(f"Processing large file: {file.name} ({file_stat.st_size} bytes)")
|
249 |
+
|
250 |
+
# Read first and last 1MB for extremely large files
|
251 |
content = ""
|
252 |
with open(file.name, 'r', encoding='utf-8', errors='ignore') as f:
|
253 |
content = f.read(1 * 1024 * 1024) # First 1MB
|
254 |
content += "\n...[Content truncated due to large file size]...\n"
|
255 |
+
|
256 |
+
# Seek to the last 1MB
|
257 |
f.seek(max(0, file_stat.st_size - 1 * 1024 * 1024))
|
258 |
content += f.read() # Last 1MB
|
259 |
else:
|
260 |
+
# Regular file processing
|
261 |
with open(file.name, 'r', encoding='utf-8', errors='ignore') as f:
|
262 |
content = f.read()
|
263 |
+
|
264 |
return [{
|
265 |
'source': 'file',
|
266 |
'filename': os.path.basename(file.name),
|
|
|
275 |
logger.error(f"File processing error: {e}")
|
276 |
return []
|
277 |
|
278 |
+
def clean_json(data: Union[str, Dict]) -> Optional[Dict]:
|
279 |
+
"""Clean and validate JSON data"""
|
280 |
+
try:
|
281 |
+
# If it's a string, try to parse it
|
282 |
+
if isinstance(data, str):
|
283 |
+
# Remove any existing content and extra whitespace
|
284 |
+
data = data.strip()
|
285 |
+
data = json.loads(data)
|
286 |
+
|
287 |
+
# Convert to string and back to ensure proper JSON format
|
288 |
+
cleaned = json.loads(json.dumps(data))
|
289 |
+
return cleaned
|
290 |
+
except json.JSONDecodeError as e:
|
291 |
+
logger.error(f"JSON cleaning error: {e}")
|
292 |
+
return None
|
293 |
+
except Exception as e:
|
294 |
+
logger.error(f"Unexpected error while cleaning JSON: {e}")
|
295 |
+
return None
|
296 |
+
|
297 |
+
def generate_qr_code(data: Union[str, Dict], combined: bool = True) -> List[str]:
|
298 |
+
"""Generate QR code(s) from data"""
|
299 |
+
try:
|
300 |
+
output_dir = Path('output/qr_codes')
|
301 |
+
output_dir.mkdir(parents=True, exist_ok=True)
|
302 |
+
|
303 |
+
if combined:
|
304 |
+
# Generate single QR code for all data
|
305 |
+
cleaned_data = clean_json(data)
|
306 |
+
if cleaned_data:
|
307 |
+
qr = qrcode.QRCode(
|
308 |
+
version=None,
|
309 |
+
error_correction=qrcode.constants.ERROR_CORRECT_L,
|
310 |
+
box_size=10,
|
311 |
+
border=4,
|
312 |
+
)
|
313 |
+
json_str = json.dumps(cleaned_data, ensure_ascii=False)
|
314 |
+
qr.add_data(json_str)
|
315 |
+
qr.make(fit=True)
|
316 |
+
|
317 |
+
img = qr.make_image(fill_color="black", back_color="white")
|
318 |
+
output_path = output_dir / f'combined_qr_{int(time.time())}.png'
|
319 |
+
img.save(str(output_path))
|
320 |
+
return [str(output_path)]
|
321 |
+
else:
|
322 |
+
# Generate separate QR codes for each item
|
323 |
+
if isinstance(data, list):
|
324 |
+
paths = []
|
325 |
+
for idx, item in enumerate(data):
|
326 |
+
cleaned_item = clean_json(item)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
327 |
if cleaned_item:
|
328 |
qr = qrcode.QRCode(
|
329 |
version=None,
|
|
|
334 |
json_str = json.dumps(cleaned_item, ensure_ascii=False)
|
335 |
qr.add_data(json_str)
|
336 |
qr.make(fit=True)
|
337 |
+
|
338 |
+
img = qr.make_image(fill_color="black", back_color="white")
|
339 |
+
output_path = output_dir / f'item_{idx}_qr_{int(time.time())}.png'
|
340 |
img.save(str(output_path))
|
341 |
+
paths.append(str(output_path))
|
342 |
+
return paths
|
343 |
+
else:
|
344 |
+
# Single item, not combined
|
345 |
+
cleaned_item = clean_json(data)
|
346 |
+
if cleaned_item:
|
347 |
+
qr = qrcode.QRCode(
|
348 |
+
version=None,
|
349 |
+
error_correction=qrcode.constants.ERROR_CORRECT_L,
|
350 |
+
box_size=10,
|
351 |
+
border=4,
|
352 |
+
)
|
353 |
+
json_str = json.dumps(cleaned_item, ensure_ascii=False)
|
354 |
+
qr.add_data(json_str)
|
355 |
+
qr.make(fit=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
356 |
|
357 |
+
img = qr.make_image(fill_color="black", back_color="white")
|
358 |
+
output_path = output_dir / f'single_qr_{int(time.time())}.png'
|
359 |
+
img.save(str(output_path))
|
360 |
+
return [str(output_path)]
|
361 |
|
362 |
+
return []
|
363 |
+
except Exception as e:
|
364 |
+
logger.error(f"QR generation error: {e}")
|
365 |
+
return []
|
366 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
367 |
def create_interface():
|
368 |
"""Create a comprehensive Gradio interface with advanced features"""
|
369 |
+
|
370 |
css = """
|
371 |
.container { max-width: 1200px; margin: auto; }
|
372 |
.warning { background-color: #fff3cd; color: #856404; padding: 10px; border-radius: 4px; }
|
373 |
.error { background-color: #f8d7da; color: #721c24; padding: 10px; border-radius: 4px; }
|
374 |
.success { background-color: #d4edda; color: #155724; padding: 10px; border-radius: 4px; }
|
375 |
"""
|
376 |
+
|
377 |
+
with gr.Blocks(css=css, title="Advanced Data Processor & QR Generator") as interface:
|
378 |
gr.Markdown("# π Advanced Data Processing & QR Code Generator")
|
379 |
+
|
380 |
+
with gr.Tab("URL Processing"):
|
381 |
+
url_input = gr.Textbox(
|
382 |
+
label="Enter URLs (comma or newline separated)",
|
383 |
+
lines=5,
|
384 |
+
placeholder="https://example1.com\nhttps://example2.com",
|
385 |
+
value=""
|
386 |
+
)
|
387 |
+
|
388 |
+
with gr.Tab("File Input"):
|
389 |
+
file_input = gr.File(
|
390 |
+
label="Upload text file or ZIP archive",
|
391 |
+
file_types=[".txt", ".zip", ".md", ".csv", ".json", ".xml"]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
392 |
)
|
393 |
+
|
394 |
+
with gr.Tab("Notepad"):
|
395 |
+
text_input = gr.TextArea(
|
396 |
+
label="JSON Data Input",
|
397 |
+
lines=15,
|
398 |
+
placeholder="Paste your JSON data here...",
|
399 |
+
value=""
|
400 |
+
)
|
401 |
+
|
402 |
+
with gr.Row():
|
403 |
+
example_btn = gr.Button("π Load Example JSON", variant="secondary")
|
404 |
+
clear_btn = gr.Button("ποΈ Clear Input", variant="secondary")
|
405 |
+
|
406 |
+
with gr.Row():
|
407 |
+
combine_data = gr.Checkbox(
|
408 |
+
label="Combine all data into single QR code",
|
409 |
+
value=True,
|
410 |
+
info="Generate one QR code for all data, or separate QR codes for each item"
|
411 |
+
)
|
412 |
+
process_btn = gr.Button("π Process & Generate QR", variant="primary", scale=2)
|
413 |
+
|
414 |
+
output_json = gr.JSON(label="Processed JSON Data")
|
415 |
+
output_gallery = gr.Gallery(label="Generated QR Codes", columns=2, height=400)
|
416 |
+
output_text = gr.Textbox(label="Processing Status", interactive=False)
|
417 |
+
|
418 |
+
def load_example():
|
419 |
+
example_json = {
|
420 |
+
"type": "product_catalog",
|
421 |
+
"items": [
|
422 |
+
{
|
423 |
+
"id": "123",
|
424 |
+
"name": "Test Product",
|
425 |
+
"description": "This is a test product description",
|
426 |
+
"price": 29.99,
|
427 |
+
"category": "electronics",
|
428 |
+
"tags": ["test", "sample", "demo"]
|
429 |
+
},
|
430 |
+
{
|
431 |
+
"id": "456",
|
432 |
+
"name": "Another Product",
|
433 |
+
"description": "Another test product description",
|
434 |
+
"price": 49.99,
|
435 |
+
"category": "accessories",
|
436 |
+
"tags": ["sample", "test"]
|
437 |
+
}
|
438 |
+
],
|
439 |
+
"metadata": {
|
440 |
+
"timestamp": datetime.now().isoformat(),
|
441 |
+
"version": "1.0",
|
442 |
+
"source": "example"
|
443 |
+
}
|
444 |
+
}
|
445 |
+
return json.dumps(example_json, indent=2)
|
446 |
+
|
447 |
+
def clear_input():
|
448 |
+
return ""
|
449 |
+
|
450 |
+
def process_all_inputs(urls, file, text, combine):
|
451 |
+
"""Process all input types and generate QR codes"""
|
452 |
+
try:
|
453 |
+
results = []
|
454 |
+
|
455 |
+
# Process text input first (since it's direct JSON)
|
456 |
+
if text and text.strip():
|
457 |
+
try:
|
458 |
+
# Try to parse as JSON
|
459 |
+
json_data = json.loads(text)
|
460 |
+
if isinstance(json_data, list):
|
461 |
+
results.extend(json_data)
|
462 |
+
else:
|
463 |
+
results.append(json_data)
|
464 |
+
except json.JSONDecodeError as e:
|
465 |
+
return None, [], f"β Invalid JSON format: {str(e)}"
|
466 |
+
|
467 |
+
# Process URLs if provided
|
468 |
+
if urls and urls.strip():
|
469 |
+
processor = URLProcessor()
|
470 |
+
url_list = re.split(r'[,\n]', urls)
|
471 |
+
url_list = [url.strip() for url in url_list if url.strip()]
|
472 |
+
|
473 |
+
for url in url_list:
|
474 |
+
validation = processor.validate_url(url)
|
475 |
+
if validation.get('is_valid'):
|
476 |
+
content = processor.fetch_content(url)
|
477 |
+
if content:
|
478 |
+
results.append({
|
479 |
+
'source': 'url',
|
480 |
+
'url': url,
|
481 |
+
'content': content,
|
482 |
+
'timestamp': datetime.now().isoformat()
|
483 |
+
})
|
484 |
+
|
485 |
+
# Process files if provided
|
486 |
+
if file:
|
487 |
+
file_processor = FileProcessor()
|
488 |
+
file_results = file_processor.process_file(file)
|
489 |
+
if file_results:
|
490 |
+
results.extend(file_results)
|
491 |
+
|
492 |
+
# Generate QR codes
|
493 |
+
if results:
|
494 |
+
qr_paths = generate_qr_code(results, combined=combine)
|
495 |
+
if qr_paths:
|
496 |
+
return (
|
497 |
+
results,
|
498 |
+
[str(path) for path in qr_paths],
|
499 |
+
f"β
Successfully processed {len(results)} items and generated {len(qr_paths)} QR code(s)!"
|
500 |
+
)
|
501 |
else:
|
502 |
+
return None, [], "β Failed to generate QR codes. Please check the input data."
|
503 |
+
else:
|
504 |
+
return None, [], "β οΈ No valid content to process. Please provide some input data."
|
505 |
+
|
506 |
+
except Exception as e:
|
507 |
+
logger.error(f"Processing error: {e}")
|
508 |
+
return None, [], f"β Error: {str(e)}"
|
509 |
+
|
510 |
+
# Set up event handlers
|
511 |
+
example_btn.click(load_example, outputs=[text_input])
|
512 |
+
clear_btn.click(clear_input, outputs=[text_input])
|
513 |
+
process_btn.click(
|
514 |
+
process_all_inputs,
|
515 |
+
inputs=[url_input, file_input, text_input, combine_data],
|
516 |
+
outputs=[output_json, output_gallery, output_text]
|
517 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
518 |
|
519 |
+
gr.Markdown("""
|
520 |
+
### Features
|
521 |
+
- **URL Processing**: Extract content from websites
|
522 |
+
- **File Processing**: Handle text files and archives
|
523 |
+
- **Notepad**: Direct JSON data input/manipulation
|
524 |
+
- **JSON Cleaning**: Automatic JSON validation and formatting
|
525 |
+
- **QR Generation**: Generate QR codes with embedded JSON data
|
526 |
+
- **Flexible Output**: Choose between combined or separate QR codes
|
527 |
+
|
528 |
+
### Usage Tips
|
529 |
+
1. Use the **Notepad** tab for direct JSON input
|
530 |
+
2. Click "Load Example JSON" to see a sample format
|
531 |
+
3. Choose whether to combine all data into a single QR code
|
532 |
+
4. The generated QR codes will contain the complete JSON data
|
533 |
+
""")
|
534 |
|
535 |
return interface
|
536 |
|
537 |
def main():
|
538 |
+
# Configure system settings
|
539 |
mimetypes.init()
|
540 |
+
|
541 |
+
# Create output directories
|
542 |
Path('output/qr_codes').mkdir(parents=True, exist_ok=True)
|
543 |
+
|
544 |
+
# Create and launch interface
|
545 |
interface = create_interface()
|
546 |
+
|
547 |
+
# Launch with proper configuration
|
548 |
interface.launch(
|
549 |
server_name="0.0.0.0",
|
550 |
+
server_port=8000,
|
551 |
show_error=True,
|
552 |
share=False,
|
553 |
inbrowser=True,
|
|
|
555 |
)
|
556 |
|
557 |
if __name__ == "__main__":
|
558 |
+
main()
|
|