kgauvin603 commited on
Commit
746d46c
·
verified ·
1 Parent(s): f0845ba

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +78 -78
app.py CHANGED
@@ -8,10 +8,21 @@ from bs4 import BeautifulSoup
8
 
9
  # --- API Keys ---
10
  openai_api_key = os.environ.get("OPENAI_API_KEY")
 
 
11
  if not openai_api_key:
12
  raise ValueError("OPENAI_API_KEY environment variable is not set.")
 
 
13
 
14
  client = openai.OpenAI(api_key=openai_api_key)
 
 
 
 
 
 
 
15
 
16
  # --- Exadata Specs ---
17
  exadata_specs = {
@@ -42,98 +53,76 @@ def clean_awr_content(content):
42
  cleaned = "\n".join([line.strip() for line in text.splitlines() if line.strip()])
43
  return cleaned
44
 
 
 
 
 
45
 
46
- # --- Agent Classes ---
47
-
48
- SYSTEM_PROMPT = """
49
- You are an expert Oracle Exadata and RAC performance consultant.
50
- Prioritize CRITICAL SYSTEM HEALTH issues first. Provide DBA-level observations and recommendations.
51
- """
52
-
53
- class HealthRiskAgent:
54
- def __init__(self, model="gpt-4o"):
55
  self.model = model
56
 
57
- def analyze(self, data):
58
- prompt = f"""
59
- ======== BEGIN DATA ========
60
- {data}
61
- ======== END DATA ========
62
-
63
- Identify CRITICAL SYSTEM HEALTH issues (Flash Cache degraded, Confined Disks, Redo Stress, RAC GC waits, IO Errors).
64
- If issues found, output "⚠️ CRITICAL ALERTS DETECTED" + Explanation + DBA Actions.
65
- If clean, output "✅ None Detected".
66
- """
67
- response = client.chat.completions.create(
68
- model=self.model,
69
- messages=[
70
- {"role": "system", "content": SYSTEM_PROMPT},
71
- {"role": "user", "content": prompt}
72
- ]
73
- )
74
- return response.choices[0].message.content.strip()
75
-
76
 
77
- class PerformanceAnalyzerAgent:
78
- def __init__(self, model="gpt-4o"):
79
- self.model = model
80
-
81
- def analyze(self, data, exadata_model=None, rack_size=None):
82
  prompt = f"""
83
- ======== BEGIN DATA ========
84
- {data}
85
- ======== END DATA ========
 
86
 
87
  Please provide:
88
  - Performance Summary
89
- - Detailed Bottleneck Analysis
90
- - Forecast / Predictions
91
- - Monitoring Suggestions
92
- - Exadata Stats Summary
93
- - Recommended Next Steps
94
-
95
- If this is a performance test:
96
- - Compare observed vs theoretical for Exadata
97
- - Recommend gap-closing actions.
98
  """
99
- if exadata_model and rack_size:
 
100
  specs = exadata_specs.get(exadata_model, {}).get(rack_size, {})
101
  if specs:
102
  prompt += f"""
103
- Theoretical Max for Oracle Exadata {exadata_model} {rack_size}:
 
104
  - Max IOPS: {specs['max_iops']}
105
  - Max Throughput: {specs['max_throughput']} GB/s
 
106
  """
 
107
  response = client.chat.completions.create(
108
  model=self.model,
109
  messages=[
110
- {"role": "system", "content": SYSTEM_PROMPT},
111
  {"role": "user", "content": prompt}
112
  ]
113
  )
114
  return response.choices[0].message.content.strip()
115
 
 
 
 
116
 
117
- class AWRAgentCoordinator:
118
- def __init__(self):
119
- self.health_agent = HealthRiskAgent()
120
- self.performance_agent = PerformanceAnalyzerAgent()
121
-
122
- def analyze(self, awr_data, exadata_model=None, rack_size=None):
123
- # Run both agents
124
- health_result = self.health_agent.analyze(awr_data)
125
- perf_result = self.performance_agent.analyze(awr_data, exadata_model, rack_size)
126
-
127
- return health_result, perf_result
128
-
129
 
130
  # --- Gradio UI ---
131
- agent = AWRAgentCoordinator()
 
132
 
133
  with gr.Blocks() as demo:
134
- gr.Markdown("# 📊 Exadata + RAC AWR Analyzer (Multi-Agent View)")
135
 
136
- awr_text = gr.Textbox(label="Paste AWR Report (HTML or TXT)", lines=30, placeholder="Paste AWR report here...")
 
137
  performance_test_mode = gr.Checkbox(label="Performance Test Mode")
138
  exadata_model = gr.Dropdown(choices=["X7", "X8", "X9", "X10", "X11M"], label="Exadata Model", visible=False)
139
  rack_size = gr.Dropdown(choices=["Quarter Rack", "Half Rack", "Full Rack"], label="Rack Size", visible=False)
@@ -143,26 +132,37 @@ with gr.Blocks() as demo:
143
 
144
  performance_test_mode.change(toggle_visibility, inputs=performance_test_mode, outputs=[exadata_model, rack_size])
145
 
146
- analyze_btn = gr.Button("Analyze AWR Report")
147
-
148
- with gr.Row():
149
- health_output = gr.Textbox(label="Health Risk Agent (Critical Alerts + Actions)", lines=20)
150
- performance_output = gr.Textbox(label="Performance Analyzer Agent (Full Analysis)", lines=20)
151
 
152
- def run_analysis(awr_text, performance_test_mode, exadata_model, rack_size):
153
  if not awr_text.strip():
154
- return " Please paste the AWR report first.", ""
 
 
 
 
 
 
 
 
 
 
 
 
155
 
156
- cleaned = clean_awr_content(awr_text)
 
157
 
158
- if performance_test_mode:
159
- health, perf = agent.analyze(cleaned, exadata_model, rack_size)
160
  else:
161
- health, perf = agent.analyze(cleaned)
 
162
 
163
- return health, perf
164
 
165
- analyze_btn.click(run_analysis, inputs=[awr_text, performance_test_mode, exadata_model, rack_size],
166
- outputs=[health_output, performance_output])
167
 
168
- demo.launch(debug=True)
 
8
 
9
  # --- API Keys ---
10
  openai_api_key = os.environ.get("OPENAI_API_KEY")
11
+ openrouter_key = os.environ.get("OPENROUTER")
12
+
13
  if not openai_api_key:
14
  raise ValueError("OPENAI_API_KEY environment variable is not set.")
15
+ if not openrouter_key:
16
+ raise ValueError("OPENROUTER environment variable is not set.")
17
 
18
  client = openai.OpenAI(api_key=openai_api_key)
19
+ openai_rater = openai.OpenAI(api_key=openrouter_key, base_url="https://openrouter.ai/api/v1")
20
+
21
+ # --- Logger ---
22
+ log_filename = "rating_log.txt"
23
+ if not os.path.exists(log_filename):
24
+ with open(log_filename, "w", encoding="utf-8") as f:
25
+ f.write("=== Rating Log Initialized ===\n")
26
 
27
  # --- Exadata Specs ---
28
  exadata_specs = {
 
53
  cleaned = "\n".join([line.strip() for line in text.splitlines() if line.strip()])
54
  return cleaned
55
 
56
+ def truncate_awr_content(content, max_chars=90000):
57
+ if len(content) > max_chars:
58
+ return content[:max_chars] + "\n\n[TRUNCATED]"
59
+ return content
60
 
61
+ # --- AWR Analyzer ---
62
+ class AWRAnalyzer:
63
+ def __init__(self, model="gpt-4-turbo"):
 
 
 
 
 
 
64
  self.model = model
65
 
66
+ def analyze(self, content, performance_test_mode, exadata_model, rack_size):
67
+ cleaned_content = clean_awr_content(content)
68
+ final_content = truncate_awr_content(cleaned_content)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
69
 
 
 
 
 
 
70
  prompt = f"""
71
+ You are an expert Oracle Database performance analyst with deep knowledge of AWR reports, Oracle RAC internals, and Exadata architecture (Smart Scan, Flash Cache, IORM, RDMA, Storage Indexes).
72
+ ======== AWR REPORT START ========
73
+ {final_content}
74
+ ======== AWR REPORT END ========
75
 
76
  Please provide:
77
  - Performance Summary
78
+ - Detailed Analysis of Bottlenecks and/or Degradation Risks
79
+ - Performance Forecast and Predictions
80
+ - Specific Recommendations for Monitoring
81
+ - Exadata Statistics Performance Summary
82
+ - Recommended Next Steps to Bridge Performance Gap
 
 
 
 
83
  """
84
+
85
+ if performance_test_mode and exadata_model and rack_size:
86
  specs = exadata_specs.get(exadata_model, {}).get(rack_size, {})
87
  if specs:
88
  prompt += f"""
89
+ This was a PERFORMANCE TEST on Oracle Exadata {exadata_model} {rack_size}.
90
+ Theoretical Max:
91
  - Max IOPS: {specs['max_iops']}
92
  - Max Throughput: {specs['max_throughput']} GB/s
93
+ Show actual vs theoretical and generate Recommended Next Steps to Bridge Performance Gap.
94
  """
95
+
96
  response = client.chat.completions.create(
97
  model=self.model,
98
  messages=[
99
+ {"role": "system", "content": "You are an expert Oracle Database performance analyst."},
100
  {"role": "user", "content": prompt}
101
  ]
102
  )
103
  return response.choices[0].message.content.strip()
104
 
105
+ class Rater:
106
+ def __init__(self, model="mistral/ministral-8b"):
107
+ self.model = model
108
 
109
+ def rate(self, question, final_answer):
110
+ prompt = f"Rate this answer 1-5 stars with explanation:\n\n{final_answer}"
111
+ response = openai_rater.chat.completions.create(
112
+ model=self.model,
113
+ messages=[{"role": "user", "content": prompt}]
114
+ )
115
+ return response.choices[0].message.content.strip()
 
 
 
 
 
116
 
117
  # --- Gradio UI ---
118
+ analyzer_agent = AWRAnalyzer()
119
+ rater_agent = Rater()
120
 
121
  with gr.Blocks() as demo:
122
+ gr.Markdown("## 📊 Oracle AWR Analyzer (with Truncation + Multi-Agent View)")
123
 
124
+ awr_text = gr.Textbox(label="Paste AWR Report (HTML or TXT)", lines=30)
125
+ threshold = gr.Slider(0, 5, value=3, step=1, label="Correctness Threshold (Stars)")
126
  performance_test_mode = gr.Checkbox(label="Performance Test Mode")
127
  exadata_model = gr.Dropdown(choices=["X7", "X8", "X9", "X10", "X11M"], label="Exadata Model", visible=False)
128
  rack_size = gr.Dropdown(choices=["Quarter Rack", "Half Rack", "Full Rack"], label="Rack Size", visible=False)
 
132
 
133
  performance_test_mode.change(toggle_visibility, inputs=performance_test_mode, outputs=[exadata_model, rack_size])
134
 
135
+ analyze_btn = gr.Button("Analyze AWR")
136
+ output = gr.Textbox(label="AWR Analysis Result", lines=15)
137
+ rating = gr.Textbox(label="Rater Rating + Explanation", lines=4)
138
+ retry_status = gr.Textbox(label="Retry Status")
 
139
 
140
+ def process(awr_text, threshold, performance_test_mode, exadata_model, rack_size):
141
  if not awr_text.strip():
142
+ return "No AWR report provided.", "", ""
143
+
144
+ answer = analyzer_agent.analyze(awr_text, performance_test_mode, exadata_model, rack_size)
145
+ rating_text = rater_agent.rate("AWR Analysis", answer)
146
+
147
+ stars = 0
148
+ match = re.search(r"(\d+)", rating_text)
149
+ if match:
150
+ stars = int(match.group(1))
151
+
152
+ if stars < threshold:
153
+ retry_answer = analyzer_agent.analyze(awr_text, performance_test_mode, exadata_model, rack_size)
154
+ retry_rating = rater_agent.rate("AWR Analysis Retry", retry_answer)
155
 
156
+ with open(log_filename, "a", encoding="utf-8") as log_file:
157
+ log_file.write(f"\n---\n{datetime.now()} RETRY\nOriginal: {answer}\nRating: {rating_text}\nRetry: {retry_answer}\nRetry Rating: {retry_rating}\n")
158
 
159
+ return retry_answer, retry_rating, "✅ Retry Occurred"
 
160
  else:
161
+ with open(log_filename, "a", encoding="utf-8") as log_file:
162
+ log_file.write(f"\n---\n{datetime.now()} SUCCESS\nAnswer: {answer}\nRating: {rating_text}\n")
163
 
164
+ return answer, rating_text, "✅ Accepted on first try"
165
 
166
+ analyze_btn.click(process, inputs=[awr_text, threshold, performance_test_mode, exadata_model, rack_size], outputs=[output, rating, retry_status])
 
167
 
168
+ demo.launch(debug=True)