kgauvin603 commited on
Commit
ea53a0c
Β·
verified Β·
1 Parent(s): 901e23f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +129 -24
app.py CHANGED
@@ -1,3 +1,4 @@
 
1
  # === Imports ===
2
  import os
3
  import re
@@ -17,16 +18,7 @@ client = openai.OpenAI(api_key=openai_api_key)
17
  openrouter_key = os.environ.get("OPENROUTER")
18
  openrouter = openai.OpenAI(api_key=openrouter_key, base_url="https://openrouter.ai/api/v1")
19
 
20
- # --- OCI Object Storage Config ---
21
- #oci_config = {
22
- # "user": os.environ.get("OCI_USER"),
23
- # "tenancy": os.environ.get("OCI_TENANCY"),
24
- # "fingerprint": os.environ.get("OCI_FINGERPRINT"),
25
- # "region": os.environ.get("OCI_REGION"),
26
- # "key_content": os.environ.get("OCI_PRIVATE_KEY"),
27
- #}
28
-
29
- # === OCI Object Storage Setup ===
30
  oci_config = {
31
  "user": os.environ.get("OCI_USER"),
32
  "tenancy": os.environ.get("OCI_TENANCY"),
@@ -43,11 +35,6 @@ try:
43
  except Exception as e:
44
  print("Failed to initialize OCI Object Storage client:", e)
45
 
46
-
47
- namespace = os.environ.get("OCI_NAMESPACE")
48
- bucket_name = os.environ.get("OCI_BUCKET_NAME")
49
- object_storage = oci.object_storage.ObjectStorageClient(oci_config)
50
-
51
  # --- Exadata Specs ---
52
  exadata_specs = {
53
  "X7": {"Quarter Rack": {"max_iops": 350000, "max_throughput": 25}, "Half Rack": {"max_iops": 700000, "max_throughput": 50}, "Full Rack": {"max_iops": 1400000, "max_throughput": 100}},
@@ -75,7 +62,7 @@ def upload_awr_file(file_obj):
75
  filename = os.path.basename(file_obj.name)
76
  content = file_obj.read()
77
  object_storage.put_object(namespace, bucket_name, filename, content)
78
- return f"\u2705 Uploaded {filename}"
79
 
80
  def list_awr_files():
81
  try:
@@ -99,27 +86,145 @@ def compare_awrs(file_list, llm_model):
99
  for fname in file_list:
100
  content = get_awr_file_text(fname)
101
  combined_text += f"\n=== AWR: {fname} ===\n{content[:3000]}...\n"
102
- prompt = f"""
103
- You are a senior Oracle performance engineer. You will compare multiple AWR reports and highlight:
104
  - Key differences in workload or system behavior
105
  - Major trends or anomalies
106
  - Which report shows better performance and why
107
  - Exadata-specific metrics like Smart Scan, Flash I/O
108
  - Suggestions to unify or improve system behavior
 
109
  AWR Reports:
110
  {combined_text}
111
  """
112
  response = client.chat.completions.create(
113
  model=llm_model,
114
- messages=[{"role": "system", "content": "You are a comparative AWR analysis expert."},
115
- {"role": "user", "content": prompt}]
 
 
116
  )
117
  return response.choices[0].message.content.strip()
118
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
119
  # === Gradio UI ===
120
  with gr.Blocks() as demo:
121
  with gr.Tab("Manual AWR Analysis"):
122
- gr.Markdown("# \U0001f9e0 Multi-Agent Oracle AWR Analyzer (Production Edition)")
123
  awr_text = gr.Textbox(label="Paste AWR Report", lines=30)
124
  threshold = gr.Slider(0, 5, value=3, step=1, label="Correctness Threshold (Stars)")
125
  performance_test_mode = gr.Checkbox(label="Performance Test Mode")
@@ -137,7 +242,6 @@ with gr.Blocks() as demo:
137
  rating = gr.Textbox(label="Rater", lines=3)
138
  retry_status = gr.Textbox(label="Retry Status")
139
 
140
- from your_existing_code import process_awr # Replace with actual import or include function here
141
  analyze_btn.click(process_awr,
142
  inputs=[awr_text, threshold, performance_test_mode, exadata_model, rack_size, llm_selector],
143
  outputs=[output, health, rating, retry_status])
@@ -147,7 +251,7 @@ with gr.Blocks() as demo:
147
  upload_status = gr.Textbox(label="Upload Status")
148
  upload_file.upload(fn=upload_awr_file, inputs=upload_file, outputs=upload_status)
149
 
150
- refresh_button = gr.Button("\U0001f503 Refresh File List")
151
  file_multiselect = gr.Dropdown(choices=[], label="Select AWR Files", multiselect=True)
152
  refresh_button.click(fn=lambda: gr.update(choices=list_awr_files()), outputs=file_multiselect)
153
 
@@ -157,4 +261,5 @@ with gr.Blocks() as demo:
157
 
158
  if __name__ == "__main__":
159
  demo.launch(debug=True)
160
- #
 
 
1
+
2
  # === Imports ===
3
  import os
4
  import re
 
18
  openrouter_key = os.environ.get("OPENROUTER")
19
  openrouter = openai.OpenAI(api_key=openrouter_key, base_url="https://openrouter.ai/api/v1")
20
 
21
+ # --- OCI Object Storage Setup ---
 
 
 
 
 
 
 
 
 
22
  oci_config = {
23
  "user": os.environ.get("OCI_USER"),
24
  "tenancy": os.environ.get("OCI_TENANCY"),
 
35
  except Exception as e:
36
  print("Failed to initialize OCI Object Storage client:", e)
37
 
 
 
 
 
 
38
  # --- Exadata Specs ---
39
  exadata_specs = {
40
  "X7": {"Quarter Rack": {"max_iops": 350000, "max_throughput": 25}, "Half Rack": {"max_iops": 700000, "max_throughput": 50}, "Full Rack": {"max_iops": 1400000, "max_throughput": 100}},
 
62
  filename = os.path.basename(file_obj.name)
63
  content = file_obj.read()
64
  object_storage.put_object(namespace, bucket_name, filename, content)
65
+ return f"βœ… Uploaded {filename}"
66
 
67
  def list_awr_files():
68
  try:
 
86
  for fname in file_list:
87
  content = get_awr_file_text(fname)
88
  combined_text += f"\n=== AWR: {fname} ===\n{content[:3000]}...\n"
89
+ prompt = f"""You are a senior Oracle performance engineer. You will compare multiple AWR reports and highlight:
 
90
  - Key differences in workload or system behavior
91
  - Major trends or anomalies
92
  - Which report shows better performance and why
93
  - Exadata-specific metrics like Smart Scan, Flash I/O
94
  - Suggestions to unify or improve system behavior
95
+
96
  AWR Reports:
97
  {combined_text}
98
  """
99
  response = client.chat.completions.create(
100
  model=llm_model,
101
+ messages=[
102
+ {"role": "system", "content": "You are a comparative AWR analysis expert."},
103
+ {"role": "user", "content": prompt}
104
+ ]
105
  )
106
  return response.choices[0].message.content.strip()
107
 
108
+ # === AGENTS ===
109
+ class CriticalAnalyzerAgent:
110
+ def analyze(self, content, performance_test_mode, exadata_model, rack_size, llm_model):
111
+ cleaned_content = clean_awr_content(content)
112
+ if len(cleaned_content) > 128000:
113
+ cleaned_content = cleaned_content[:128000] + "\n\n[TRUNCATED]..."
114
+
115
+ prompt = f"""You are an expert Oracle DBA performance analyst specialized in AWR + Exadata.
116
+
117
+ Please perform advanced analysis on the following report:
118
+
119
+ ======== AWR REPORT START ========
120
+ {cleaned_content}
121
+ ======== AWR REPORT END ========
122
+
123
+ Required Output:
124
+ - Performance Summary (with metric values)
125
+ - Detailed Bottlenecks + Risks (quantified)
126
+ - Forecast + Predictions
127
+ - Monitoring Recommendations
128
+ - Exadata Statistics (IO, Flash Cache, Smart Scan)
129
+ - Recommended Next Steps to Bridge Gaps
130
+ """
131
+ if performance_test_mode and exadata_model and rack_size:
132
+ specs = exadata_specs.get(exadata_model, {}).get(rack_size, {})
133
+ if specs:
134
+ prompt += f"""
135
+
136
+ This was a PERFORMANCE TEST on Oracle Exadata {exadata_model} {rack_size}.
137
+ Theoretical Max:
138
+ - IOPS: {specs['max_iops']}
139
+ - Throughput: {specs['max_throughput']} GB/s
140
+
141
+ Compare observed vs theoretical. Recommend actions to close the performance gap.
142
+ """
143
+ response = client.chat.completions.create(
144
+ model=llm_model,
145
+ messages=[
146
+ {"role": "system", "content": "You are an expert Oracle DBA."},
147
+ {"role": "user", "content": prompt}
148
+ ]
149
+ )
150
+ return response.choices[0].message.content.strip()
151
+
152
+ class HealthAgent:
153
+ def check_health(self, content, llm_model):
154
+ cleaned_content = clean_awr_content(content)
155
+ if len(cleaned_content) > 128000:
156
+ cleaned_content = cleaned_content[:128000] + "\n\n[TRUNCATED]..."
157
+
158
+ prompt = f"""You are the Oracle AWR Health Analysis Agent.
159
+
160
+ Your primary responsibility is to detect and report ANY and ALL database health risks, alerts, warnings, or failures in the AWR report.
161
+
162
+ You MUST:
163
+ - Identify all issues marked as CRITICAL, WARNING, ALERT, FAILED, OFFLINE, CONFINED, DROPPED, or ERROR.
164
+ - Never omit or generalize. If something appears important, call it out.
165
+ - Classify each issue into: 🚨 CRITICAL / ⚠️ WARNING / βœ… INFO
166
+ - For CRITICAL and WARNING, provide suggested actions or considerations.
167
+ - Always confirm at the end if no CRITICAL or WARNING issues were found.
168
+
169
+ Special Attention Areas:
170
+ - Flash Cache or Flash Disk Failures
171
+ - I/O Subsystem stalls or errors
172
+ - ASM/Grid Disk issues
173
+ - Smart Scan failures
174
+ - Redo Log issues
175
+ - RAC Interconnect issues
176
+
177
+ AWR CONTENT:
178
+ {cleaned_content}
179
+ """
180
+ response = client.chat.completions.create(
181
+ model=llm_model,
182
+ messages=[
183
+ {"role": "system", "content": "You are the strict Oracle AWR Health Analysis Agent."},
184
+ {"role": "user", "content": prompt}
185
+ ]
186
+ )
187
+ return response.choices[0].message.content.strip()
188
+
189
+ class RaterAgent:
190
+ def rate(self, content):
191
+ prompt = f"Rate the following analysis from 1-5 stars and explain:\n\n{content}"
192
+ response = openrouter.chat.completions.create(
193
+ model="mistralai/Mixtral-8x7B-Instruct",
194
+ messages=[{"role": "user", "content": prompt}]
195
+ )
196
+ return response.choices[0].message.content.strip()
197
+
198
+ # === MAIN AWR PROCESS ===
199
+ def process_awr(awr_text, threshold, performance_test_mode, exadata_model, rack_size, llm_model):
200
+ analyzer = CriticalAnalyzerAgent()
201
+ health = HealthAgent()
202
+ rater = RaterAgent()
203
+
204
+ if not awr_text.strip():
205
+ return "No AWR text provided", "", "", ""
206
+
207
+ analysis = analyzer.analyze(awr_text, performance_test_mode, exadata_model, rack_size, llm_model)
208
+ health_status = health.check_health(awr_text, llm_model)
209
+ rating_text = rater.rate(analysis)
210
+
211
+ stars = 0
212
+ match = re.search(r"(\d+)", rating_text)
213
+ if match:
214
+ stars = int(match.group(1))
215
+
216
+ retry_status = "βœ… Accepted"
217
+ if stars < threshold:
218
+ analysis = analyzer.analyze(awr_text, performance_test_mode, exadata_model, rack_size, llm_model)
219
+ rating_text = rater.rate(analysis)
220
+ retry_status = "βœ… Retry Occurred"
221
+
222
+ return analysis, health_status, rating_text, retry_status
223
+
224
  # === Gradio UI ===
225
  with gr.Blocks() as demo:
226
  with gr.Tab("Manual AWR Analysis"):
227
+ gr.Markdown("# 🧠 Multi-Agent Oracle AWR Analyzer (Production Edition)")
228
  awr_text = gr.Textbox(label="Paste AWR Report", lines=30)
229
  threshold = gr.Slider(0, 5, value=3, step=1, label="Correctness Threshold (Stars)")
230
  performance_test_mode = gr.Checkbox(label="Performance Test Mode")
 
242
  rating = gr.Textbox(label="Rater", lines=3)
243
  retry_status = gr.Textbox(label="Retry Status")
244
 
 
245
  analyze_btn.click(process_awr,
246
  inputs=[awr_text, threshold, performance_test_mode, exadata_model, rack_size, llm_selector],
247
  outputs=[output, health, rating, retry_status])
 
251
  upload_status = gr.Textbox(label="Upload Status")
252
  upload_file.upload(fn=upload_awr_file, inputs=upload_file, outputs=upload_status)
253
 
254
+ refresh_button = gr.Button("πŸ”ƒ Refresh File List")
255
  file_multiselect = gr.Dropdown(choices=[], label="Select AWR Files", multiselect=True)
256
  refresh_button.click(fn=lambda: gr.update(choices=list_awr_files()), outputs=file_multiselect)
257
 
 
261
 
262
  if __name__ == "__main__":
263
  demo.launch(debug=True)
264
+
265
+