Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -4,7 +4,6 @@ import oci
|
|
4 |
import re
|
5 |
import gradio as gr
|
6 |
import openai
|
7 |
-
import oci
|
8 |
from datetime import datetime
|
9 |
from bs4 import BeautifulSoup
|
10 |
from reportlab.platypus import SimpleDocTemplate, Paragraph, Spacer
|
@@ -14,7 +13,6 @@ from reportlab.lib.enums import TA_CENTER
|
|
14 |
from reportlab.lib import colors
|
15 |
import tempfile
|
16 |
|
17 |
-
|
18 |
# --- API Keys ---
|
19 |
openai_api_key = os.environ.get("OPENAI_API_KEY")
|
20 |
if not openai_api_key:
|
@@ -26,8 +24,6 @@ openrouter_key = os.environ.get("OPENROUTER")
|
|
26 |
openrouter = openai.OpenAI(api_key=openrouter_key, base_url="https://openrouter.ai/api/v1")
|
27 |
|
28 |
# --- OCI Object Storage: Explicit Fixed Configuration ---
|
29 |
-
|
30 |
-
# === OCI Object Storage Setup ===
|
31 |
oci_config = {
|
32 |
"user": os.environ.get("OCI_USER"),
|
33 |
"tenancy": os.environ.get("OCI_TENANCY"),
|
@@ -35,7 +31,6 @@ oci_config = {
|
|
35 |
"region": os.environ.get("OCI_REGION"),
|
36 |
"key_content": os.environ.get("OCI_PRIVATE_KEY")
|
37 |
}
|
38 |
-
|
39 |
namespace = os.environ.get("OCI_NAMESPACE")
|
40 |
bucket_name = os.environ.get("OCI_BUCKET_NAME")
|
41 |
|
@@ -44,23 +39,6 @@ try:
|
|
44 |
except Exception as e:
|
45 |
print("Failed to initialize OCI Object Storage client:", e)
|
46 |
|
47 |
-
"""
|
48 |
-
# --- OCI Object Storage Setup ---
|
49 |
-
oci_config = {
|
50 |
-
"user": os.environ.get("OCI_USER"),
|
51 |
-
"tenancy": os.environ.get("OCI_TENANCY"),
|
52 |
-
"fingerprint": os.environ.get("OCI_FINGERPRINT"),
|
53 |
-
"region": os.environ.get("OCI_REGION"),
|
54 |
-
"key_content": os.environ.get("OCI_PRIVATE_KEY")
|
55 |
-
}
|
56 |
-
namespace = os.environ.get("OCI_NAMESPACE")
|
57 |
-
bucket_name = os.environ.get("OCI_BUCKET_NAME")
|
58 |
-
os.environ["OCI_BUCKET_NAME"] = "OracleTANGO"
|
59 |
-
try:
|
60 |
-
object_storage = oci.object_storage.ObjectStorageClient(oci_config)
|
61 |
-
except Exception as e:
|
62 |
-
print("Failed to initialize OCI Object Storage client:", e)
|
63 |
-
"""
|
64 |
# --- Exadata Specs ---
|
65 |
exadata_specs = {
|
66 |
"X7": {"Quarter Rack": {"max_iops": 350000, "max_throughput": 25}, "Half Rack": {"max_iops": 700000, "max_throughput": 50}, "Full Rack": {"max_iops": 1400000, "max_throughput": 100}},
|
@@ -85,6 +63,21 @@ def clean_awr_content(content):
|
|
85 |
return soup.get_text()
|
86 |
return content
|
87 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
88 |
def upload_awr_file(file_obj):
|
89 |
filename = os.path.basename(file_obj)
|
90 |
with open(file_obj, "rb") as f:
|
@@ -106,62 +99,29 @@ def get_awr_file_text(filename):
|
|
106 |
return clean_awr_content(raw)
|
107 |
except Exception as e:
|
108 |
return f"Error loading file: {str(e)}"
|
109 |
-
|
110 |
def generate_pdf(analysis_text, health_text, rating_text, retry_status_text):
|
111 |
-
# Temporary file path
|
112 |
temp_file = tempfile.NamedTemporaryFile(delete=False, suffix=".pdf")
|
113 |
pdf_path = temp_file.name
|
114 |
-
|
115 |
-
# PDF setup
|
116 |
doc = SimpleDocTemplate(pdf_path, pagesize=letter)
|
117 |
styles = getSampleStyleSheet()
|
118 |
elements = []
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
name="HeaderStyle",
|
123 |
-
fontSize=16,
|
124 |
-
alignment=TA_CENTER,
|
125 |
-
textColor=colors.darkblue,
|
126 |
-
spaceAfter=14
|
127 |
-
)
|
128 |
-
|
129 |
-
# Section title style
|
130 |
-
section_style = ParagraphStyle(
|
131 |
-
name="SectionHeader",
|
132 |
-
fontSize=14,
|
133 |
-
textColor=colors.darkred,
|
134 |
-
spaceAfter=8
|
135 |
-
)
|
136 |
-
|
137 |
-
# Body text style
|
138 |
-
body_style = ParagraphStyle(
|
139 |
-
name="BodyStyle",
|
140 |
-
fontSize=10,
|
141 |
-
leading=14,
|
142 |
-
spaceAfter=10
|
143 |
-
)
|
144 |
-
|
145 |
-
# Title
|
146 |
elements.append(Paragraph("Oracle AWR Analyzer Report", header_style))
|
147 |
elements.append(Spacer(1, 12))
|
148 |
-
|
149 |
-
# Sections
|
150 |
sections = [
|
151 |
("AWR Analysis", analysis_text),
|
152 |
("Health Agent Findings", health_text),
|
153 |
("Rater Output", rating_text),
|
154 |
("Retry Status", retry_status_text)
|
155 |
]
|
156 |
-
|
157 |
for title, content in sections:
|
158 |
elements.append(Paragraph(title, section_style))
|
159 |
elements.append(Paragraph(content.replace("\n", "<br/>"), body_style))
|
160 |
elements.append(Spacer(1, 12))
|
161 |
-
|
162 |
-
# Build PDF
|
163 |
doc.build(elements)
|
164 |
-
|
165 |
return pdf_path
|
166 |
|
167 |
def compare_awrs(file_list, llm_model):
|
@@ -189,13 +149,15 @@ AWR Reports:
|
|
189 |
)
|
190 |
return response.choices[0].message.content.strip()
|
191 |
|
|
|
|
|
|
|
192 |
# === AGENTS ===
|
193 |
class CriticalAnalyzerAgent:
|
194 |
def analyze(self, content, performance_test_mode, exadata_model, rack_size, llm_model):
|
195 |
cleaned_content = clean_awr_content(content)
|
196 |
if len(cleaned_content) > 128000:
|
197 |
cleaned_content = cleaned_content[:128000] + "\n\n[TRUNCATED]..."
|
198 |
-
|
199 |
prompt = f"""You are an expert Oracle DBA performance analyst specialized in AWR + Exadata.
|
200 |
Please perform advanced analysis on the following report:
|
201 |
======== AWR REPORT START ========
|
@@ -233,7 +195,6 @@ class HealthAgent:
|
|
233 |
cleaned_content = clean_awr_content(content)
|
234 |
if len(cleaned_content) > 128000:
|
235 |
cleaned_content = cleaned_content[:128000] + "\n\n[TRUNCATED]..."
|
236 |
-
|
237 |
prompt = f"""You are the Oracle AWR Health Analysis Agent.
|
238 |
Your primary responsibility is to detect and report ANY and ALL database health risks, alerts, warnings, or failures in the AWR report.
|
239 |
You MUST:
|
@@ -275,83 +236,55 @@ def process_awr(awr_text, threshold, performance_test_mode, exadata_model, rack_
|
|
275 |
analyzer = CriticalAnalyzerAgent()
|
276 |
health = HealthAgent()
|
277 |
rater = RaterAgent()
|
278 |
-
|
279 |
if not awr_text.strip():
|
280 |
return "No AWR text provided", "", "", ""
|
281 |
-
|
282 |
analysis = analyzer.analyze(awr_text, performance_test_mode, exadata_model, rack_size, llm_model)
|
283 |
health_status = health.check_health(awr_text, llm_model)
|
284 |
rating_text = rater.rate(analysis)
|
285 |
-
|
286 |
stars = 0
|
287 |
match = re.search(r"(\d+)", rating_text)
|
288 |
if match:
|
289 |
stars = int(match.group(1))
|
290 |
-
|
291 |
retry_status = "β
Accepted"
|
292 |
if stars < threshold:
|
293 |
analysis = analyzer.analyze(awr_text, performance_test_mode, exadata_model, rack_size, llm_model)
|
294 |
rating_text = rater.rate(analysis)
|
295 |
retry_status = "β
Retry Occurred"
|
296 |
-
|
297 |
return analysis, health_status, rating_text, retry_status
|
298 |
|
299 |
# === Gradio UI ===
|
300 |
-
with gr.
|
301 |
-
gr.
|
302 |
-
|
303 |
-
|
304 |
-
|
305 |
-
|
306 |
-
|
307 |
-
|
308 |
-
|
309 |
-
|
310 |
-
|
311 |
-
|
312 |
-
|
313 |
-
|
314 |
-
|
315 |
-
|
316 |
-
|
317 |
-
|
318 |
-
|
319 |
-
|
320 |
-
|
321 |
-
process_awr,
|
322 |
-
inputs=[awr_text, threshold, performance_test_mode, exadata_model, rack_size, llm_selector],
|
323 |
-
outputs=[output, health, rating, retry_status]
|
324 |
-
)
|
325 |
-
|
326 |
-
# PDF Export
|
327 |
-
pdf_button = gr.Button("π Generate PDF")
|
328 |
-
pdf_file = gr.File(label="Download PDF", type="file")
|
329 |
-
pdf_button.click(
|
330 |
-
fn=generate_pdf,
|
331 |
-
inputs=[output, health, rating, retry_status],
|
332 |
-
outputs=pdf_file
|
333 |
-
)
|
334 |
-
|
335 |
|
336 |
with gr.Tab("Compare AWRs from OCI"):
|
337 |
upload_file = gr.File(label="Upload AWR Report", file_types=[".html", ".txt"])
|
338 |
upload_status = gr.Textbox(label="Upload Status")
|
339 |
upload_file.upload(fn=upload_awr_file, inputs=upload_file, outputs=upload_status)
|
340 |
-
|
341 |
refresh_button = gr.Button("π Refresh File List")
|
342 |
file_multiselect = gr.Dropdown(choices=[], label="Select AWR Files", multiselect=True)
|
343 |
refresh_button.click(fn=lambda: gr.update(choices=list_awr_files()), outputs=file_multiselect)
|
344 |
-
|
345 |
-
llm_compare = gr.Dropdown(choices=list(supported_llms.keys()), value="gpt-4-turbo", label="LLM Model for Comparison")
|
346 |
compare_output = gr.Textbox(label="Comparison Output", lines=20)
|
347 |
-
gr.Button("Compare Selected AWRs").click(
|
348 |
-
fn=compare_awrs,
|
349 |
-
inputs=[file_multiselect, llm_compare],
|
350 |
-
outputs=compare_output
|
351 |
-
)
|
352 |
|
353 |
if __name__ == "__main__":
|
354 |
demo.launch(debug=True)
|
355 |
-
|
356 |
-
|
357 |
-
|
|
|
4 |
import re
|
5 |
import gradio as gr
|
6 |
import openai
|
|
|
7 |
from datetime import datetime
|
8 |
from bs4 import BeautifulSoup
|
9 |
from reportlab.platypus import SimpleDocTemplate, Paragraph, Spacer
|
|
|
13 |
from reportlab.lib import colors
|
14 |
import tempfile
|
15 |
|
|
|
16 |
# --- API Keys ---
|
17 |
openai_api_key = os.environ.get("OPENAI_API_KEY")
|
18 |
if not openai_api_key:
|
|
|
24 |
openrouter = openai.OpenAI(api_key=openrouter_key, base_url="https://openrouter.ai/api/v1")
|
25 |
|
26 |
# --- OCI Object Storage: Explicit Fixed Configuration ---
|
|
|
|
|
27 |
oci_config = {
|
28 |
"user": os.environ.get("OCI_USER"),
|
29 |
"tenancy": os.environ.get("OCI_TENANCY"),
|
|
|
31 |
"region": os.environ.get("OCI_REGION"),
|
32 |
"key_content": os.environ.get("OCI_PRIVATE_KEY")
|
33 |
}
|
|
|
34 |
namespace = os.environ.get("OCI_NAMESPACE")
|
35 |
bucket_name = os.environ.get("OCI_BUCKET_NAME")
|
36 |
|
|
|
39 |
except Exception as e:
|
40 |
print("Failed to initialize OCI Object Storage client:", e)
|
41 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
42 |
# --- Exadata Specs ---
|
43 |
exadata_specs = {
|
44 |
"X7": {"Quarter Rack": {"max_iops": 350000, "max_throughput": 25}, "Half Rack": {"max_iops": 700000, "max_throughput": 50}, "Full Rack": {"max_iops": 1400000, "max_throughput": 100}},
|
|
|
63 |
return soup.get_text()
|
64 |
return content
|
65 |
|
66 |
+
def awr_file_to_text(file_obj):
|
67 |
+
if not file_obj:
|
68 |
+
return ""
|
69 |
+
filename = file_obj.name if hasattr(file_obj, "name") else str(file_obj)
|
70 |
+
try:
|
71 |
+
content = file_obj.read() if hasattr(file_obj, "read") else open(file_obj, "rb").read()
|
72 |
+
except Exception:
|
73 |
+
with open(file_obj, "rb") as f:
|
74 |
+
content = f.read()
|
75 |
+
try:
|
76 |
+
text = content.decode()
|
77 |
+
except Exception:
|
78 |
+
text = content.decode("latin-1")
|
79 |
+
return clean_awr_content(text)
|
80 |
+
|
81 |
def upload_awr_file(file_obj):
|
82 |
filename = os.path.basename(file_obj)
|
83 |
with open(file_obj, "rb") as f:
|
|
|
99 |
return clean_awr_content(raw)
|
100 |
except Exception as e:
|
101 |
return f"Error loading file: {str(e)}"
|
102 |
+
|
103 |
def generate_pdf(analysis_text, health_text, rating_text, retry_status_text):
|
|
|
104 |
temp_file = tempfile.NamedTemporaryFile(delete=False, suffix=".pdf")
|
105 |
pdf_path = temp_file.name
|
|
|
|
|
106 |
doc = SimpleDocTemplate(pdf_path, pagesize=letter)
|
107 |
styles = getSampleStyleSheet()
|
108 |
elements = []
|
109 |
+
header_style = ParagraphStyle(name="HeaderStyle", fontSize=16, alignment=TA_CENTER, textColor=colors.darkblue, spaceAfter=14)
|
110 |
+
section_style = ParagraphStyle(name="SectionHeader", fontSize=14, textColor=colors.darkred, spaceAfter=8)
|
111 |
+
body_style = ParagraphStyle(name="BodyStyle", fontSize=10, leading=14, spaceAfter=10)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
112 |
elements.append(Paragraph("Oracle AWR Analyzer Report", header_style))
|
113 |
elements.append(Spacer(1, 12))
|
|
|
|
|
114 |
sections = [
|
115 |
("AWR Analysis", analysis_text),
|
116 |
("Health Agent Findings", health_text),
|
117 |
("Rater Output", rating_text),
|
118 |
("Retry Status", retry_status_text)
|
119 |
]
|
|
|
120 |
for title, content in sections:
|
121 |
elements.append(Paragraph(title, section_style))
|
122 |
elements.append(Paragraph(content.replace("\n", "<br/>"), body_style))
|
123 |
elements.append(Spacer(1, 12))
|
|
|
|
|
124 |
doc.build(elements)
|
|
|
125 |
return pdf_path
|
126 |
|
127 |
def compare_awrs(file_list, llm_model):
|
|
|
149 |
)
|
150 |
return response.choices[0].message.content.strip()
|
151 |
|
152 |
+
def toggle_visibility(mode):
|
153 |
+
return gr.update(visible=mode), gr.update(visible=mode)
|
154 |
+
|
155 |
# === AGENTS ===
|
156 |
class CriticalAnalyzerAgent:
|
157 |
def analyze(self, content, performance_test_mode, exadata_model, rack_size, llm_model):
|
158 |
cleaned_content = clean_awr_content(content)
|
159 |
if len(cleaned_content) > 128000:
|
160 |
cleaned_content = cleaned_content[:128000] + "\n\n[TRUNCATED]..."
|
|
|
161 |
prompt = f"""You are an expert Oracle DBA performance analyst specialized in AWR + Exadata.
|
162 |
Please perform advanced analysis on the following report:
|
163 |
======== AWR REPORT START ========
|
|
|
195 |
cleaned_content = clean_awr_content(content)
|
196 |
if len(cleaned_content) > 128000:
|
197 |
cleaned_content = cleaned_content[:128000] + "\n\n[TRUNCATED]..."
|
|
|
198 |
prompt = f"""You are the Oracle AWR Health Analysis Agent.
|
199 |
Your primary responsibility is to detect and report ANY and ALL database health risks, alerts, warnings, or failures in the AWR report.
|
200 |
You MUST:
|
|
|
236 |
analyzer = CriticalAnalyzerAgent()
|
237 |
health = HealthAgent()
|
238 |
rater = RaterAgent()
|
|
|
239 |
if not awr_text.strip():
|
240 |
return "No AWR text provided", "", "", ""
|
|
|
241 |
analysis = analyzer.analyze(awr_text, performance_test_mode, exadata_model, rack_size, llm_model)
|
242 |
health_status = health.check_health(awr_text, llm_model)
|
243 |
rating_text = rater.rate(analysis)
|
|
|
244 |
stars = 0
|
245 |
match = re.search(r"(\d+)", rating_text)
|
246 |
if match:
|
247 |
stars = int(match.group(1))
|
|
|
248 |
retry_status = "β
Accepted"
|
249 |
if stars < threshold:
|
250 |
analysis = analyzer.analyze(awr_text, performance_test_mode, exadata_model, rack_size, llm_model)
|
251 |
rating_text = rater.rate(analysis)
|
252 |
retry_status = "β
Retry Occurred"
|
|
|
253 |
return analysis, health_status, rating_text, retry_status
|
254 |
|
255 |
# === Gradio UI ===
|
256 |
+
with gr.Blocks() as demo:
|
257 |
+
with gr.Tab("Manual AWR Analysis"):
|
258 |
+
gr.Markdown("# Multi-Agent Oracle AWR Analyzer (Version 3.1)")
|
259 |
+
awr_file = gr.File(label="Upload AWR Report (.html or .txt)", file_types=[".html", ".txt"])
|
260 |
+
awr_text = gr.Textbox(label="AWR Report (pasted or loaded)", lines=30)
|
261 |
+
awr_file.upload(awr_file_to_text, inputs=awr_file, outputs=awr_text)
|
262 |
+
threshold = gr.Slider(0, 5, value=3, step=1, label="Correctness Threshold (Stars)")
|
263 |
+
performance_test_mode = gr.Checkbox(label="Performance Test Mode")
|
264 |
+
exadata_model = gr.Dropdown(choices=list(exadata_specs.keys()), label="Exadata Model", visible=False)
|
265 |
+
rack_size = gr.Dropdown(choices=["Quarter Rack", "Half Rack", "Full Rack"], label="Rack Size", visible=False)
|
266 |
+
llm_selector = gr.Dropdown(choices=list(supported_llms.keys()), value="gpt-4.1", label="LLM Model")
|
267 |
+
performance_test_mode.change(toggle_visibility, inputs=performance_test_mode, outputs=[exadata_model, rack_size])
|
268 |
+
analyze_btn = gr.Button("Analyze AWR Report")
|
269 |
+
output = gr.Textbox(label="AWR Analysis", lines=20)
|
270 |
+
health = gr.Textbox(label="Health Agent Findings", lines=10)
|
271 |
+
rating = gr.Textbox(label="Rater", lines=3)
|
272 |
+
retry_status = gr.Textbox(label="Retry Status")
|
273 |
+
analyze_btn.click(process_awr, inputs=[awr_text, threshold, performance_test_mode, exadata_model, rack_size, llm_selector], outputs=[output, health, rating, retry_status])
|
274 |
+
pdf_button = gr.Button("π Generate PDF")
|
275 |
+
pdf_file = gr.File(label="Download PDF", type="file")
|
276 |
+
pdf_button.click(fn=generate_pdf, inputs=[output, health, rating, retry_status], outputs=pdf_file)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
277 |
|
278 |
with gr.Tab("Compare AWRs from OCI"):
|
279 |
upload_file = gr.File(label="Upload AWR Report", file_types=[".html", ".txt"])
|
280 |
upload_status = gr.Textbox(label="Upload Status")
|
281 |
upload_file.upload(fn=upload_awr_file, inputs=upload_file, outputs=upload_status)
|
|
|
282 |
refresh_button = gr.Button("π Refresh File List")
|
283 |
file_multiselect = gr.Dropdown(choices=[], label="Select AWR Files", multiselect=True)
|
284 |
refresh_button.click(fn=lambda: gr.update(choices=list_awr_files()), outputs=file_multiselect)
|
285 |
+
llm_compare = gr.Dropdown(choices=list(supported_llms.keys()), value="gpt-4.1", label="LLM Model for Comparison")
|
|
|
286 |
compare_output = gr.Textbox(label="Comparison Output", lines=20)
|
287 |
+
gr.Button("Compare Selected AWRs").click(fn=compare_awrs, inputs=[file_multiselect, llm_compare], outputs=compare_output)
|
|
|
|
|
|
|
|
|
288 |
|
289 |
if __name__ == "__main__":
|
290 |
demo.launch(debug=True)
|
|
|
|
|
|