Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -2,8 +2,8 @@
|
|
2 |
Advanced URL & Text Processing Suite - Main Application
|
3 |
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
4 |
|
5 |
-
A sophisticated Gradio interface
|
6 |
-
|
7 |
"""
|
8 |
|
9 |
import gradio as gr
|
@@ -11,14 +11,13 @@ import logging
|
|
11 |
import json
|
12 |
import os
|
13 |
import sys
|
|
|
|
|
|
|
14 |
from datetime import datetime
|
15 |
from pathlib import Path
|
16 |
from typing import Dict, List, Optional, Union, Any, Tuple
|
17 |
|
18 |
-
from url_processor import URLProcessor
|
19 |
-
from file_processor import FileProcessor
|
20 |
-
from qr_processor import QRProcessor
|
21 |
-
|
22 |
# Configure logging
|
23 |
logging.basicConfig(
|
24 |
level=logging.INFO,
|
@@ -35,391 +34,243 @@ THEME = gr.themes.Soft(
|
|
35 |
spacing_size=gr.themes.sizes.spacing_md,
|
36 |
radius_size=gr.themes.sizes.radius_md,
|
37 |
text_size=gr.themes.sizes.text_md,
|
38 |
-
).set(
|
39 |
-
body_background_fill="*background_fill_secondary",
|
40 |
-
button_primary_background_fill="*primary_500",
|
41 |
-
button_primary_background_fill_hover="*primary_600",
|
42 |
-
button_primary_text_color="white",
|
43 |
-
button_primary_border_color="*primary_500",
|
44 |
-
button_secondary_background_fill="*secondary_500",
|
45 |
-
button_secondary_background_fill_hover="*secondary_600",
|
46 |
-
button_secondary_text_color="white",
|
47 |
-
button_secondary_border_color="*secondary_500",
|
48 |
)
|
49 |
|
50 |
-
|
51 |
-
CUSTOM_CSS = """
|
52 |
-
.container {
|
53 |
-
max-width: 1200px !important;
|
54 |
-
margin: auto !important;
|
55 |
-
padding: 2rem !important;
|
56 |
-
}
|
57 |
-
|
58 |
-
.header {
|
59 |
-
text-align: center;
|
60 |
-
margin-bottom: 2rem;
|
61 |
-
padding: 1rem;
|
62 |
-
background: linear-gradient(135deg, #6366f1 0%, #2563eb 100%);
|
63 |
-
border-radius: 1rem;
|
64 |
-
color: white;
|
65 |
-
}
|
66 |
-
|
67 |
-
.status-bar {
|
68 |
-
background: #f8fafc;
|
69 |
-
border-radius: 0.5rem;
|
70 |
-
padding: 0.5rem;
|
71 |
-
margin: 1rem 0;
|
72 |
-
border: 1px solid #e2e8f0;
|
73 |
-
}
|
74 |
-
|
75 |
-
.success-message {
|
76 |
-
color: #059669;
|
77 |
-
background: #ecfdf5;
|
78 |
-
border: 1px solid #059669;
|
79 |
-
border-radius: 0.5rem;
|
80 |
-
padding: 1rem;
|
81 |
-
margin: 1rem 0;
|
82 |
-
}
|
83 |
-
|
84 |
-
.error-message {
|
85 |
-
color: #dc2626;
|
86 |
-
background: #fef2f2;
|
87 |
-
border: 1px solid #dc2626;
|
88 |
-
border-radius: 0.5rem;
|
89 |
-
padding: 1rem;
|
90 |
-
margin: 1rem 0;
|
91 |
-
}
|
92 |
-
|
93 |
-
.tab-nav {
|
94 |
-
background: #f8fafc;
|
95 |
-
border-radius: 0.5rem;
|
96 |
-
padding: 0.5rem;
|
97 |
-
margin-bottom: 1rem;
|
98 |
-
}
|
99 |
-
|
100 |
-
.tab-nav button {
|
101 |
-
transition: all 0.3s ease;
|
102 |
-
}
|
103 |
-
|
104 |
-
.tab-nav button:hover {
|
105 |
-
transform: translateY(-1px);
|
106 |
-
}
|
107 |
-
|
108 |
-
.json-editor {
|
109 |
-
font-family: 'JetBrains Mono', monospace;
|
110 |
-
background: #1e293b;
|
111 |
-
color: #e2e8f0;
|
112 |
-
border-radius: 0.5rem;
|
113 |
-
padding: 1rem;
|
114 |
-
}
|
115 |
-
|
116 |
-
.file-upload {
|
117 |
-
border: 2px dashed #e2e8f0;
|
118 |
-
border-radius: 1rem;
|
119 |
-
padding: 2rem;
|
120 |
-
text-align: center;
|
121 |
-
transition: all 0.3s ease;
|
122 |
-
}
|
123 |
-
|
124 |
-
.file-upload:hover {
|
125 |
-
border-color: #6366f1;
|
126 |
-
background: #f8fafc;
|
127 |
-
}
|
128 |
-
|
129 |
-
@keyframes pulse {
|
130 |
-
0% { transform: scale(1); }
|
131 |
-
50% { transform: scale(1.05); }
|
132 |
-
100% { transform: scale(1); }
|
133 |
-
}
|
134 |
-
|
135 |
-
.processing {
|
136 |
-
animation: pulse 2s infinite;
|
137 |
-
}
|
138 |
-
"""
|
139 |
-
|
140 |
-
class AdvancedInterface:
|
141 |
-
"""Advanced Gradio interface with sophisticated processing capabilities"""
|
142 |
-
|
143 |
def __init__(self):
|
144 |
-
|
145 |
-
self.
|
146 |
-
self.file_processor = FileProcessor()
|
147 |
-
self.qr_processor = QRProcessor()
|
148 |
-
self.processing_history: List[Dict] = []
|
149 |
|
150 |
-
def
|
151 |
-
"""Process URLs with advanced error handling and result formatting"""
|
152 |
try:
|
153 |
-
|
154 |
-
|
155 |
|
156 |
-
|
157 |
-
|
|
|
|
|
|
|
158 |
|
159 |
-
# Process
|
160 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
161 |
|
162 |
-
|
163 |
-
|
164 |
-
|
|
|
165 |
|
166 |
-
|
167 |
-
|
168 |
-
|
169 |
-
|
170 |
-
|
171 |
-
'success_count': len([r for r in results if 'error' not in r]),
|
172 |
-
'results': results
|
173 |
-
})
|
174 |
|
175 |
-
return
|
176 |
|
177 |
except Exception as e:
|
178 |
-
logger.error(f"
|
179 |
-
return
|
180 |
-
|
181 |
-
def
|
182 |
-
"""Process uploaded file with comprehensive error handling"""
|
183 |
try:
|
184 |
-
|
185 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
186 |
|
187 |
-
|
|
|
|
|
|
|
188 |
|
189 |
-
|
190 |
-
|
191 |
-
|
192 |
-
|
|
|
193 |
|
194 |
-
|
195 |
-
self.processing_history.append({
|
196 |
-
'timestamp': datetime.now().isoformat(),
|
197 |
-
'type': 'file_processing',
|
198 |
-
'filename': file.name,
|
199 |
-
'results': results
|
200 |
-
})
|
201 |
-
|
202 |
-
return (
|
203 |
-
json.dumps(results, indent=2),
|
204 |
-
f"✅ Successfully processed {file.name}",
|
205 |
-
output_path
|
206 |
-
)
|
207 |
|
208 |
except Exception as e:
|
209 |
-
logger.error(f"
|
210 |
-
return
|
211 |
-
|
212 |
-
def
|
213 |
-
|
|
|
|
|
|
|
|
|
214 |
try:
|
215 |
-
if
|
216 |
-
|
217 |
-
|
218 |
-
|
219 |
-
|
220 |
-
|
221 |
-
|
222 |
-
|
223 |
-
|
224 |
-
|
225 |
-
'timestamp': datetime.now().isoformat(),
|
226 |
-
'type': 'qr_generation',
|
227 |
-
'data_length': len(data),
|
228 |
-
'output_path': result['output_path']
|
229 |
-
})
|
230 |
|
231 |
-
|
|
|
232 |
|
233 |
except Exception as e:
|
234 |
-
logger.error(f"
|
235 |
-
|
236 |
-
|
237 |
-
|
238 |
-
|
239 |
-
|
240 |
-
error_count = len(results) - success_count
|
241 |
|
242 |
-
|
243 |
-
"
|
244 |
-
f"Total URLs: {len(results)}",
|
245 |
-
f"Successful: {success_count}",
|
246 |
-
f"Failed: {error_count}",
|
247 |
-
"",
|
248 |
-
"🔍 Details:"
|
249 |
-
]
|
250 |
|
251 |
-
|
252 |
-
|
253 |
-
|
254 |
-
|
255 |
-
|
|
|
|
|
|
|
256 |
|
257 |
-
return
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
258 |
|
259 |
-
|
260 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
261 |
|
262 |
-
with gr.
|
263 |
-
|
264 |
-
|
265 |
-
|
266 |
-
|
267 |
-
|
268 |
-
|
269 |
-
|
270 |
-
"""
|
271 |
-
)
|
272 |
-
|
273 |
-
# Main tabs
|
274 |
-
with gr.Tabs(elem_classes="tab-nav") as tabs:
|
275 |
-
# URL Processing Tab
|
276 |
-
with gr.Tab("🔗 URL Processing", id=1):
|
277 |
-
with gr.Row():
|
278 |
-
with gr.Column(scale=2):
|
279 |
-
url_input = gr.Textbox(
|
280 |
-
label="URLs",
|
281 |
-
placeholder="Enter URLs (one per line)",
|
282 |
-
lines=5
|
283 |
-
)
|
284 |
-
mode = gr.Radio(
|
285 |
-
choices=["basic", "interactive", "deep"],
|
286 |
-
value="basic",
|
287 |
-
label="Processing Mode"
|
288 |
-
)
|
289 |
-
process_btn = gr.Button("🚀 Process URLs", variant="primary")
|
290 |
-
|
291 |
-
with gr.Column(scale=1):
|
292 |
-
gr.Markdown(
|
293 |
-
"""
|
294 |
-
### 📝 Instructions
|
295 |
-
1. Enter URLs (one per line)
|
296 |
-
2. Select processing mode:
|
297 |
-
- Basic: Simple content fetch
|
298 |
-
- Interactive: Handle dynamic content
|
299 |
-
- Deep: Follow links and analyze deeply
|
300 |
-
3. Click Process to start
|
301 |
-
"""
|
302 |
-
)
|
303 |
|
304 |
-
|
305 |
-
|
306 |
-
|
|
|
|
|
|
|
|
|
307 |
|
308 |
-
|
309 |
-
|
310 |
-
|
311 |
-
|
312 |
-
interactive=False,
|
313 |
-
lines=10
|
314 |
-
)
|
315 |
|
316 |
-
#
|
317 |
-
with gr.
|
318 |
-
|
319 |
-
|
320 |
-
|
321 |
-
|
322 |
-
|
323 |
-
|
324 |
-
process_file_btn = gr.Button("📥 Process File", variant="primary")
|
325 |
-
|
326 |
-
with gr.Column(scale=1):
|
327 |
-
gr.Markdown(
|
328 |
-
"""
|
329 |
-
### 📝 Supported Formats
|
330 |
-
- Text files (.txt)
|
331 |
-
- PDF documents (.pdf)
|
332 |
-
- Archives (.zip, .tar.gz)
|
333 |
-
- Images (.jpg, .png)
|
334 |
-
- And more...
|
335 |
-
"""
|
336 |
-
)
|
337 |
|
338 |
-
|
339 |
-
|
340 |
-
|
|
|
|
|
341 |
|
342 |
-
with gr.Accordion("Results", open=False):
|
343 |
-
file_json_output = gr.JSON(label="Detailed Results")
|
344 |
-
|
345 |
-
# QR Code Tab
|
346 |
-
with gr.Tab("📱 QR Code", id=3):
|
347 |
with gr.Row():
|
348 |
-
|
349 |
-
|
350 |
-
|
351 |
-
|
352 |
-
|
353 |
-
|
354 |
-
|
355 |
-
|
356 |
-
|
357 |
-
|
358 |
-
|
359 |
-
|
360 |
-
|
361 |
-
|
362 |
-
|
363 |
-
|
364 |
-
|
365 |
-
|
366 |
-
|
367 |
-
|
368 |
-
|
369 |
-
|
370 |
-
|
371 |
-
|
372 |
-
|
373 |
-
|
374 |
-
"""
|
375 |
-
)
|
376 |
-
|
377 |
-
# Event handlers
|
378 |
-
process_btn.click(
|
379 |
-
fn=self.process_urls,
|
380 |
-
inputs=[url_input, mode],
|
381 |
-
outputs=[json_output, status_output, summary_output]
|
382 |
-
)
|
383 |
-
|
384 |
-
clear_btn.click(
|
385 |
-
fn=lambda: ("", "", ""),
|
386 |
-
inputs=[],
|
387 |
-
outputs=[url_input, status_output, summary_output]
|
388 |
-
)
|
389 |
-
|
390 |
-
process_file_btn.click(
|
391 |
-
fn=self.process_file,
|
392 |
-
inputs=[file_input],
|
393 |
-
outputs=[file_json_output, file_status, file_output]
|
394 |
-
)
|
395 |
-
|
396 |
-
generate_qr_btn.click(
|
397 |
-
fn=self.generate_qr,
|
398 |
-
inputs=[qr_input, qr_size],
|
399 |
-
outputs=[qr_output, qr_status]
|
400 |
-
)
|
401 |
|
402 |
return interface
|
403 |
|
404 |
def main():
|
405 |
-
"""Main entry point"""
|
406 |
try:
|
407 |
-
|
408 |
-
|
409 |
-
|
410 |
-
|
411 |
-
|
412 |
-
|
413 |
-
|
414 |
-
|
415 |
-
|
416 |
-
debug=True,
|
417 |
-
enable_queue=True,
|
418 |
-
max_threads=40,
|
419 |
-
auth=None, # Add authentication if needed
|
420 |
-
ssl_keyfile=None, # Add SSL if needed
|
421 |
-
ssl_certfile=None
|
422 |
-
)
|
423 |
except Exception as e:
|
424 |
logger.error(f"Application startup error: {e}", exc_info=True)
|
425 |
sys.exit(1)
|
|
|
2 |
Advanced URL & Text Processing Suite - Main Application
|
3 |
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
4 |
|
5 |
+
A sophisticated Gradio interface with URL processing, file manipulation, QR operations,
|
6 |
+
and advanced data chat capabilities.
|
7 |
"""
|
8 |
|
9 |
import gradio as gr
|
|
|
11 |
import json
|
12 |
import os
|
13 |
import sys
|
14 |
+
import zipfile
|
15 |
+
import pandas as pd
|
16 |
+
import numpy as np
|
17 |
from datetime import datetime
|
18 |
from pathlib import Path
|
19 |
from typing import Dict, List, Optional, Union, Any, Tuple
|
20 |
|
|
|
|
|
|
|
|
|
21 |
# Configure logging
|
22 |
logging.basicConfig(
|
23 |
level=logging.INFO,
|
|
|
34 |
spacing_size=gr.themes.sizes.spacing_md,
|
35 |
radius_size=gr.themes.sizes.radius_md,
|
36 |
text_size=gr.themes.sizes.text_md,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
37 |
)
|
38 |
|
39 |
+
class DataChatProcessor:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
40 |
def __init__(self):
|
41 |
+
self.trained_data = {}
|
42 |
+
self.current_dataset = None
|
|
|
|
|
|
|
43 |
|
44 |
+
def process_zip_file(self, file_obj, mode):
|
|
|
45 |
try:
|
46 |
+
if not file_obj:
|
47 |
+
return "Please upload a ZIP file", []
|
48 |
|
49 |
+
# Extract ZIP contents
|
50 |
+
with zipfile.ZipFile(file_obj.name, 'r') as zip_ref:
|
51 |
+
temp_dir = Path('temp_data')
|
52 |
+
temp_dir.mkdir(exist_ok=True)
|
53 |
+
zip_ref.extractall(temp_dir)
|
54 |
|
55 |
+
# Process based on mode
|
56 |
+
if mode == "TrainedOnData":
|
57 |
+
return self._train_on_data(temp_dir)
|
58 |
+
else: # TalkAboutData
|
59 |
+
return self._analyze_data(temp_dir)
|
60 |
+
|
61 |
+
except Exception as e:
|
62 |
+
logger.error(f"Error processing ZIP file: {e}")
|
63 |
+
return f"Error: {str(e)}", []
|
64 |
+
|
65 |
+
def _train_on_data(self, data_dir):
|
66 |
+
try:
|
67 |
+
datasets = []
|
68 |
+
for file in data_dir.glob('**/*.csv'):
|
69 |
+
df = pd.read_csv(file)
|
70 |
+
datasets.append({
|
71 |
+
'name': file.name,
|
72 |
+
'data': df,
|
73 |
+
'summary': {
|
74 |
+
'rows': len(df),
|
75 |
+
'columns': len(df.columns),
|
76 |
+
'dtypes': df.dtypes.astype(str).to_dict()
|
77 |
+
}
|
78 |
+
})
|
79 |
|
80 |
+
self.trained_data = {
|
81 |
+
'datasets': datasets,
|
82 |
+
'timestamp': datetime.now().isoformat()
|
83 |
+
}
|
84 |
|
85 |
+
summary = f"Trained on {len(datasets)} datasets"
|
86 |
+
messages = [
|
87 |
+
{"role": "assistant", "content": "Training completed successfully."},
|
88 |
+
{"role": "assistant", "content": summary}
|
89 |
+
]
|
|
|
|
|
|
|
90 |
|
91 |
+
return summary, messages
|
92 |
|
93 |
except Exception as e:
|
94 |
+
logger.error(f"Error training on data: {e}")
|
95 |
+
return f"Error during training: {str(e)}", []
|
96 |
+
|
97 |
+
def _analyze_data(self, data_dir):
|
|
|
98 |
try:
|
99 |
+
analyses = []
|
100 |
+
for file in data_dir.glob('**/*.csv'):
|
101 |
+
df = pd.read_csv(file)
|
102 |
+
analyses.append({
|
103 |
+
'file': file.name,
|
104 |
+
'shape': df.shape,
|
105 |
+
'dtypes': df.dtypes.astype(str).to_dict()
|
106 |
+
})
|
107 |
|
108 |
+
self.current_dataset = {
|
109 |
+
'analyses': analyses,
|
110 |
+
'timestamp': datetime.now().isoformat()
|
111 |
+
}
|
112 |
|
113 |
+
summary = f"Analyzed {len(analyses)} files"
|
114 |
+
messages = [
|
115 |
+
{"role": "assistant", "content": "Analysis completed successfully."},
|
116 |
+
{"role": "assistant", "content": summary}
|
117 |
+
]
|
118 |
|
119 |
+
return summary, messages
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
120 |
|
121 |
except Exception as e:
|
122 |
+
logger.error(f"Error analyzing data: {e}")
|
123 |
+
return f"Error during analysis: {str(e)}", []
|
124 |
+
|
125 |
+
def chat(self, message, history, mode):
|
126 |
+
if not message:
|
127 |
+
return "", history
|
128 |
+
|
129 |
+
history.append({"role": "user", "content": message})
|
130 |
+
|
131 |
try:
|
132 |
+
if mode == "TrainedOnData":
|
133 |
+
if not self.trained_data:
|
134 |
+
response = "Please upload and train on data first."
|
135 |
+
else:
|
136 |
+
response = self._generate_trained_response(message)
|
137 |
+
else:
|
138 |
+
if not self.current_dataset:
|
139 |
+
response = "Please upload data for analysis first."
|
140 |
+
else:
|
141 |
+
response = self._generate_analysis_response(message)
|
|
|
|
|
|
|
|
|
|
|
142 |
|
143 |
+
history.append({"role": "assistant", "content": response})
|
144 |
+
return "", history
|
145 |
|
146 |
except Exception as e:
|
147 |
+
logger.error(f"Error in chat: {e}")
|
148 |
+
history.append({"role": "assistant", "content": f"Error: {str(e)}"})
|
149 |
+
return "", history
|
150 |
+
|
151 |
+
def _generate_trained_response(self, message):
|
152 |
+
datasets = self.trained_data['datasets']
|
|
|
153 |
|
154 |
+
if "how many" in message.lower():
|
155 |
+
return f"There are {len(datasets)} datasets."
|
|
|
|
|
|
|
|
|
|
|
|
|
156 |
|
157 |
+
if "summary" in message.lower():
|
158 |
+
summaries = []
|
159 |
+
for ds in datasets:
|
160 |
+
summaries.append(
|
161 |
+
f"Dataset '{ds['name']}': {ds['summary']['rows']} rows, "
|
162 |
+
f"{ds['summary']['columns']} columns"
|
163 |
+
)
|
164 |
+
return "\n".join(summaries)
|
165 |
|
166 |
+
return "I can help you analyze the trained datasets. Ask about number of datasets or summaries."
|
167 |
+
|
168 |
+
def _generate_analysis_response(self, message):
|
169 |
+
analyses = self.current_dataset['analyses']
|
170 |
+
|
171 |
+
if "how many" in message.lower():
|
172 |
+
return f"There are {len(analyses)} files."
|
173 |
+
|
174 |
+
if "summary" in message.lower():
|
175 |
+
summaries = []
|
176 |
+
for analysis in analyses:
|
177 |
+
summaries.append(
|
178 |
+
f"File '{analysis['file']}': {analysis['shape'][0]} rows, "
|
179 |
+
f"{analysis['shape'][1]} columns"
|
180 |
+
)
|
181 |
+
return "\n".join(summaries)
|
182 |
+
|
183 |
+
return "I can help you explore the current dataset. Ask about file count or summaries."
|
184 |
|
185 |
+
def create_interface():
|
186 |
+
data_chat = DataChatProcessor()
|
187 |
+
|
188 |
+
with gr.Blocks(theme=THEME) as interface:
|
189 |
+
gr.Markdown(
|
190 |
+
"""
|
191 |
+
# 🌐 Advanced Data Processing & Analysis Suite
|
192 |
+
Enterprise-grade toolkit for data processing, analysis, and interactive chat capabilities.
|
193 |
+
"""
|
194 |
+
)
|
195 |
|
196 |
+
with gr.Tab("💬 DataChat"):
|
197 |
+
with gr.Row():
|
198 |
+
# Left column for file upload and mode selection
|
199 |
+
with gr.Column(scale=1):
|
200 |
+
data_file = gr.File(
|
201 |
+
label="Upload ZIP File",
|
202 |
+
file_types=[".zip"]
|
203 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
204 |
|
205 |
+
mode = gr.Radio(
|
206 |
+
choices=["TrainedOnData", "TalkAboutData"],
|
207 |
+
value="TrainedOnData",
|
208 |
+
label="Chat Mode"
|
209 |
+
)
|
210 |
+
|
211 |
+
process_btn = gr.Button("Process Data", variant="primary")
|
212 |
|
213 |
+
status_output = gr.Textbox(
|
214 |
+
label="Status",
|
215 |
+
interactive=False
|
216 |
+
)
|
|
|
|
|
|
|
217 |
|
218 |
+
# Right column for chat interface
|
219 |
+
with gr.Column(scale=2):
|
220 |
+
chatbot = gr.Chatbot(
|
221 |
+
label="Chat History",
|
222 |
+
height=400,
|
223 |
+
show_label=True,
|
224 |
+
type="messages" # Specify OpenAI-style message format
|
225 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
226 |
|
227 |
+
msg = gr.Textbox(
|
228 |
+
label="Your Message",
|
229 |
+
placeholder="Ask questions about your data...",
|
230 |
+
lines=2
|
231 |
+
)
|
232 |
|
|
|
|
|
|
|
|
|
|
|
233 |
with gr.Row():
|
234 |
+
submit_btn = gr.Button("Send", variant="primary")
|
235 |
+
clear_btn = gr.Button("Clear Chat", variant="secondary")
|
236 |
+
|
237 |
+
# Event handlers
|
238 |
+
process_btn.click(
|
239 |
+
fn=data_chat.process_zip_file,
|
240 |
+
inputs=[data_file, mode],
|
241 |
+
outputs=[status_output, chatbot]
|
242 |
+
)
|
243 |
+
|
244 |
+
submit_btn.click(
|
245 |
+
fn=data_chat.chat,
|
246 |
+
inputs=[msg, chatbot, mode],
|
247 |
+
outputs=[msg, chatbot]
|
248 |
+
)
|
249 |
+
|
250 |
+
msg.submit(
|
251 |
+
fn=data_chat.chat,
|
252 |
+
inputs=[msg, chatbot, mode],
|
253 |
+
outputs=[msg, chatbot]
|
254 |
+
)
|
255 |
+
|
256 |
+
clear_btn.click(
|
257 |
+
fn=lambda: ([], "Chat cleared"),
|
258 |
+
outputs=[chatbot, status_output]
|
259 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
260 |
|
261 |
return interface
|
262 |
|
263 |
def main():
|
|
|
264 |
try:
|
265 |
+
interface = create_interface()
|
266 |
+
if interface:
|
267 |
+
interface.launch(
|
268 |
+
server_name="0.0.0.0",
|
269 |
+
server_port=8000
|
270 |
+
)
|
271 |
+
else:
|
272 |
+
logger.error("Failed to create interface")
|
273 |
+
sys.exit(1)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
274 |
except Exception as e:
|
275 |
logger.error(f"Application startup error: {e}", exc_info=True)
|
276 |
sys.exit(1)
|