kgauvin603 commited on
Commit
07dafd9
·
verified ·
1 Parent(s): 9ea277f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +39 -5
app.py CHANGED
@@ -247,9 +247,36 @@ def process_awr(awr_text, threshold, performance_test_mode, exadata_model, rack_
247
 
248
  # === Gradio UI ===
249
  with gr.Blocks() as demo:
 
250
  with gr.Tab("Manual AWR Analysis"):
251
  gr.Markdown("# 🧠 Multi-Agent Oracle AWR Analyzer (Production Edition)")
252
- awr_text = gr.Textbox(label="Paste AWR Report", lines=30)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
253
  threshold = gr.Slider(0, 5, value=3, step=1, label="Correctness Threshold (Stars)")
254
  performance_test_mode = gr.Checkbox(label="Performance Test Mode")
255
  exadata_model = gr.Dropdown(choices=list(exadata_specs.keys()), label="Exadata Model", visible=False)
@@ -266,9 +293,11 @@ with gr.Blocks() as demo:
266
  rating = gr.Textbox(label="Rater", lines=3)
267
  retry_status = gr.Textbox(label="Retry Status")
268
 
269
- analyze_btn.click(process_awr,
270
- inputs=[awr_text, threshold, performance_test_mode, exadata_model, rack_size, llm_selector],
271
- outputs=[output, health, rating, retry_status])
 
 
272
 
273
  with gr.Tab("Compare AWRs from OCI"):
274
  upload_file = gr.File(label="Upload AWR Report", file_types=[".html", ".txt"])
@@ -281,9 +310,14 @@ with gr.Blocks() as demo:
281
 
282
  llm_compare = gr.Dropdown(choices=list(supported_llms.keys()), value="gpt-4-turbo", label="LLM Model for Comparison")
283
  compare_output = gr.Textbox(label="Comparison Output", lines=20)
284
- gr.Button("Compare Selected AWRs").click(fn=compare_awrs, inputs=[file_multiselect, llm_compare], outputs=compare_output)
 
 
 
 
285
 
286
  if __name__ == "__main__":
287
  demo.launch(debug=True)
288
 
 
289
 
 
247
 
248
  # === Gradio UI ===
249
  with gr.Blocks() as demo:
250
+
251
  with gr.Tab("Manual AWR Analysis"):
252
  gr.Markdown("# 🧠 Multi-Agent Oracle AWR Analyzer (Production Edition)")
253
+
254
+ # NEW: File upload for AWR
255
+ awr_file = gr.File(label="Upload AWR Report (.html or .txt)", file_types=[".html", ".txt"])
256
+ awr_text = gr.Textbox(label="AWR Report (pasted or loaded)", lines=30)
257
+
258
+ def awr_file_to_text(file_obj):
259
+ if not file_obj:
260
+ return ""
261
+ # Gradio File is a dict-like with 'name' and 'path'
262
+ filename = file_obj.name if hasattr(file_obj, "name") else str(file_obj)
263
+ # Support io.BytesIO for Gradio's in-memory files, fallback to path
264
+ try:
265
+ # Gradio may provide either path or file
266
+ content = file_obj.read() if hasattr(file_obj, "read") else open(file_obj, "rb").read()
267
+ except Exception:
268
+ with open(file_obj, "rb") as f:
269
+ content = f.read()
270
+ # Decode as text (may need to try 'latin-1' for Oracle HTML)
271
+ try:
272
+ text = content.decode()
273
+ except Exception:
274
+ text = content.decode("latin-1")
275
+ return clean_awr_content(text)
276
+
277
+ awr_file.upload(awr_file_to_text, inputs=awr_file, outputs=awr_text)
278
+
279
+ # Everything below is the same as before
280
  threshold = gr.Slider(0, 5, value=3, step=1, label="Correctness Threshold (Stars)")
281
  performance_test_mode = gr.Checkbox(label="Performance Test Mode")
282
  exadata_model = gr.Dropdown(choices=list(exadata_specs.keys()), label="Exadata Model", visible=False)
 
293
  rating = gr.Textbox(label="Rater", lines=3)
294
  retry_status = gr.Textbox(label="Retry Status")
295
 
296
+ analyze_btn.click(
297
+ process_awr,
298
+ inputs=[awr_text, threshold, performance_test_mode, exadata_model, rack_size, llm_selector],
299
+ outputs=[output, health, rating, retry_status]
300
+ )
301
 
302
  with gr.Tab("Compare AWRs from OCI"):
303
  upload_file = gr.File(label="Upload AWR Report", file_types=[".html", ".txt"])
 
310
 
311
  llm_compare = gr.Dropdown(choices=list(supported_llms.keys()), value="gpt-4-turbo", label="LLM Model for Comparison")
312
  compare_output = gr.Textbox(label="Comparison Output", lines=20)
313
+ gr.Button("Compare Selected AWRs").click(
314
+ fn=compare_awrs,
315
+ inputs=[file_multiselect, llm_compare],
316
+ outputs=compare_output
317
+ )
318
 
319
  if __name__ == "__main__":
320
  demo.launch(debug=True)
321
 
322
+
323