kgauvin603 commited on
Commit
4723629
Β·
verified Β·
1 Parent(s): 746d46c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +90 -80
app.py CHANGED
@@ -53,39 +53,37 @@ def clean_awr_content(content):
53
  cleaned = "\n".join([line.strip() for line in text.splitlines() if line.strip()])
54
  return cleaned
55
 
56
- def truncate_awr_content(content, max_chars=90000):
57
- if len(content) > max_chars:
58
- return content[:max_chars] + "\n\n[TRUNCATED]"
59
- return content
60
-
61
  # --- AWR Analyzer ---
62
- class AWRAnalyzer:
63
- def __init__(self, model="gpt-4-turbo"):
64
- self.model = model
 
 
65
 
66
- def analyze(self, content, performance_test_mode, exadata_model, rack_size):
67
- cleaned_content = clean_awr_content(content)
68
- final_content = truncate_awr_content(cleaned_content)
 
69
 
70
- prompt = f"""
71
- You are an expert Oracle Database performance analyst with deep knowledge of AWR reports, Oracle RAC internals, and Exadata architecture (Smart Scan, Flash Cache, IORM, RDMA, Storage Indexes).
72
  ======== AWR REPORT START ========
73
- {final_content}
74
  ======== AWR REPORT END ========
75
 
76
- Please provide:
77
- - Performance Summary
78
- - Detailed Analysis of Bottlenecks and/or Degradation Risks
79
- - Performance Forecast and Predictions
80
- - Specific Recommendations for Monitoring
81
- - Exadata Statistics Performance Summary
82
- - Recommended Next Steps to Bridge Performance Gap
 
83
  """
84
 
85
- if performance_test_mode and exadata_model and rack_size:
86
- specs = exadata_specs.get(exadata_model, {}).get(rack_size, {})
87
- if specs:
88
- prompt += f"""
 
89
  This was a PERFORMANCE TEST on Oracle Exadata {exadata_model} {rack_size}.
90
  Theoretical Max:
91
  - Max IOPS: {specs['max_iops']}
@@ -93,76 +91,88 @@ Theoretical Max:
93
  Show actual vs theoretical and generate Recommended Next Steps to Bridge Performance Gap.
94
  """
95
 
96
- response = client.chat.completions.create(
97
- model=self.model,
98
- messages=[
99
- {"role": "system", "content": "You are an expert Oracle Database performance analyst."},
100
- {"role": "user", "content": prompt}
101
- ]
102
- )
103
- return response.choices[0].message.content.strip()
104
-
105
- class Rater:
106
- def __init__(self, model="mistral/ministral-8b"):
107
- self.model = model
108
-
109
- def rate(self, question, final_answer):
110
- prompt = f"Rate this answer 1-5 stars with explanation:\n\n{final_answer}"
111
- response = openai_rater.chat.completions.create(
112
- model=self.model,
113
- messages=[{"role": "user", "content": prompt}]
114
- )
115
- return response.choices[0].message.content.strip()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
116
 
117
- # --- Gradio UI ---
118
- analyzer_agent = AWRAnalyzer()
119
- rater_agent = Rater()
120
 
 
121
  with gr.Blocks() as demo:
122
- gr.Markdown("## πŸ“Š Oracle AWR Analyzer (with Truncation + Multi-Agent View)")
123
 
124
- awr_text = gr.Textbox(label="Paste AWR Report (HTML or TXT)", lines=30)
125
- threshold = gr.Slider(0, 5, value=3, step=1, label="Correctness Threshold (Stars)")
126
  performance_test_mode = gr.Checkbox(label="Performance Test Mode")
127
  exadata_model = gr.Dropdown(choices=["X7", "X8", "X9", "X10", "X11M"], label="Exadata Model", visible=False)
128
  rack_size = gr.Dropdown(choices=["Quarter Rack", "Half Rack", "Full Rack"], label="Rack Size", visible=False)
129
 
 
 
 
 
 
 
 
130
  def toggle_visibility(mode):
131
  return gr.update(visible=mode), gr.update(visible=mode)
132
 
133
  performance_test_mode.change(toggle_visibility, inputs=performance_test_mode, outputs=[exadata_model, rack_size])
134
 
135
  analyze_btn = gr.Button("Analyze AWR")
136
- output = gr.Textbox(label="AWR Analysis Result", lines=15)
137
  rating = gr.Textbox(label="Rater Rating + Explanation", lines=4)
138
  retry_status = gr.Textbox(label="Retry Status")
139
 
140
- def process(awr_text, threshold, performance_test_mode, exadata_model, rack_size):
141
- if not awr_text.strip():
142
- return "No AWR report provided.", "", ""
143
-
144
- answer = analyzer_agent.analyze(awr_text, performance_test_mode, exadata_model, rack_size)
145
- rating_text = rater_agent.rate("AWR Analysis", answer)
146
-
147
- stars = 0
148
- match = re.search(r"(\d+)", rating_text)
149
- if match:
150
- stars = int(match.group(1))
151
-
152
- if stars < threshold:
153
- retry_answer = analyzer_agent.analyze(awr_text, performance_test_mode, exadata_model, rack_size)
154
- retry_rating = rater_agent.rate("AWR Analysis Retry", retry_answer)
155
-
156
- with open(log_filename, "a", encoding="utf-8") as log_file:
157
- log_file.write(f"\n---\n{datetime.now()} RETRY\nOriginal: {answer}\nRating: {rating_text}\nRetry: {retry_answer}\nRetry Rating: {retry_rating}\n")
158
-
159
- return retry_answer, retry_rating, "βœ… Retry Occurred"
160
- else:
161
- with open(log_filename, "a", encoding="utf-8") as log_file:
162
- log_file.write(f"\n---\n{datetime.now()} SUCCESS\nAnswer: {answer}\nRating: {rating_text}\n")
163
-
164
- return answer, rating_text, "βœ… Accepted on first try"
165
-
166
- analyze_btn.click(process, inputs=[awr_text, threshold, performance_test_mode, exadata_model, rack_size], outputs=[output, rating, retry_status])
167
 
168
- demo.launch(debug=True)
 
53
  cleaned = "\n".join([line.strip() for line in text.splitlines() if line.strip()])
54
  return cleaned
55
 
 
 
 
 
 
56
  # --- AWR Analyzer ---
57
+ def analyze_awr(content, performance_test_mode, exadata_model, rack_size, selected_model_key):
58
+ cleaned_content = clean_awr_content(content)
59
+ max_chars = 128000
60
+ if len(cleaned_content) > max_chars:
61
+ cleaned_content = cleaned_content[:max_chars] + "\n\n[TRUNCATED]..."
62
 
63
+ # Build prompt
64
+ prompt = f"""
65
+ You are an expert Oracle Database performance analyst with deep knowledge of AWR reports, Oracle RAC internals, and Exadata architecture (Smart Scan, Flash Cache, IORM, RDMA, Storage Indexes).
66
+ You must produce highly detailed diagnostic insights based on the AWR report provided below. Use numbers and thresholds whenever possible and explain why each observation matters.
67
 
 
 
68
  ======== AWR REPORT START ========
69
+ {cleaned_content}
70
  ======== AWR REPORT END ========
71
 
72
+ Please provide the following sections:
73
+
74
+ - **Performance Summary**
75
+ - **Detailed Analysis of Bottlenecks and/or Degradation Risks**
76
+ - **Performance Forecast and Predictions**
77
+ - **Specific Recommendations for Monitoring**
78
+ - **Exadata Statistics Performance Summary**
79
+ - **Recommended Next Steps to Bridge Performance Gap**
80
  """
81
 
82
+ # Add Exadata comparison if performance test mode
83
+ if performance_test_mode and exadata_model and rack_size:
84
+ specs = exadata_specs.get(exadata_model, {}).get(rack_size, {})
85
+ if specs:
86
+ prompt += f"""
87
  This was a PERFORMANCE TEST on Oracle Exadata {exadata_model} {rack_size}.
88
  Theoretical Max:
89
  - Max IOPS: {specs['max_iops']}
 
91
  Show actual vs theoretical and generate Recommended Next Steps to Bridge Performance Gap.
92
  """
93
 
94
+ # Select model
95
+ model_choices = {
96
+ "GPT-4o (Balanced - Recommended for most tasks)": "gpt-4o",
97
+ "Claude 3 Opus (Best for Deep Diagnostic Analysis)": "claude-3-opus-20240229",
98
+ "GPT-4-turbo (Budget-Friendly, Good Quality)": "gpt-4-turbo",
99
+ "Claude 3 Sonnet (Good Balance, Lower Cost)": "claude-3-sonnet-20240229"
100
+ }
101
+
102
+ MODEL = model_choices.get(selected_model_key, "gpt-4o") # Fallback to gpt-4o if invalid
103
+
104
+ response = client.chat.completions.create(
105
+ model=MODEL,
106
+ messages=[
107
+ {"role": "system", "content": "You are an expert Oracle Database performance analyst."},
108
+ {"role": "user", "content": prompt}
109
+ ]
110
+ )
111
+ return response.choices[0].message.content.strip()
112
+
113
+ # --- Rater ---
114
+ def rate_answer_rater(question, final_answer):
115
+ prompt = f"Rate this answer 1-5 stars with explanation:\n\n{final_answer}"
116
+ response = openai_rater.chat.completions.create(
117
+ model="mistral/ministral-8b",
118
+ messages=[{"role": "user", "content": prompt}]
119
+ )
120
+ return response.choices[0].message.content.strip()
121
+
122
+ # --- Main Logic ---
123
+ def process_awr(awr_text, correctness_threshold, performance_test_mode, exadata_model, rack_size, selected_model_key):
124
+ if not awr_text.strip():
125
+ return "No AWR report provided.", "", ""
126
+
127
+ answer = analyze_awr(awr_text, performance_test_mode, exadata_model, rack_size, selected_model_key)
128
+ rating_text = rate_answer_rater("AWR Analysis", answer)
129
+
130
+ stars = 0
131
+ match = re.search(r"(\d+)", rating_text)
132
+ if match:
133
+ stars = int(match.group(1))
134
+
135
+ if stars < correctness_threshold:
136
+ answer_retry = analyze_awr(awr_text, performance_test_mode, exadata_model, rack_size, selected_model_key)
137
+ rating_text_retry = rate_answer_rater("AWR Analysis (Retry)", answer_retry)
138
+
139
+ with open(log_filename, "a", encoding="utf-8") as log_file:
140
+ log_file.write(f"\n---\n{datetime.now()} RETRY\nOriginal: {answer}\nRating: {rating_text}\nRetry: {answer_retry}\nRetry Rating: {rating_text_retry}\n")
141
+
142
+ return answer_retry, rating_text_retry, "βœ… Retry Occurred (rating below threshold)"
143
+ else:
144
+ with open(log_filename, "a", encoding="utf-8") as log_file:
145
+ log_file.write(f"\n---\n{datetime.now()} SUCCESS\nAnswer: {answer}\nRating: {rating_text}\n")
146
 
147
+ return answer, rating_text, "βœ… Accepted on first try"
 
 
148
 
149
+ # --- Gradio UI ---
150
  with gr.Blocks() as demo:
151
+ gr.Markdown("## πŸ“Š Oracle AWR Analyzer (AI + Rating + Retry + Exadata Gap Analysis + Model Selection)")
152
 
153
+ awr_text = gr.Textbox(label="Paste AWR Report (HTML or TXT)", lines=30, placeholder="Paste full AWR here...")
154
+ threshold = gr.Slider(0, 5, value=3, step=1, label="Correctness Threshold (Stars for Retry)")
155
  performance_test_mode = gr.Checkbox(label="Performance Test Mode")
156
  exadata_model = gr.Dropdown(choices=["X7", "X8", "X9", "X10", "X11M"], label="Exadata Model", visible=False)
157
  rack_size = gr.Dropdown(choices=["Quarter Rack", "Half Rack", "Full Rack"], label="Rack Size", visible=False)
158
 
159
+ model_selector = gr.Dropdown(
160
+ choices=["GPT-4o (Balanced - Recommended for most tasks)", "Claude 3 Opus (Best for Deep Diagnostic Analysis)",
161
+ "GPT-4-turbo (Budget-Friendly, Good Quality)", "Claude 3 Sonnet (Good Balance, Lower Cost)"],
162
+ label="Choose AI Model for Analysis",
163
+ value="GPT-4o (Balanced - Recommended for most tasks)"
164
+ )
165
+
166
  def toggle_visibility(mode):
167
  return gr.update(visible=mode), gr.update(visible=mode)
168
 
169
  performance_test_mode.change(toggle_visibility, inputs=performance_test_mode, outputs=[exadata_model, rack_size])
170
 
171
  analyze_btn = gr.Button("Analyze AWR")
172
+ output = gr.Textbox(label="AWR Analysis Result", lines=20)
173
  rating = gr.Textbox(label="Rater Rating + Explanation", lines=4)
174
  retry_status = gr.Textbox(label="Retry Status")
175
 
176
+ analyze_btn.click(process_awr, inputs=[awr_text, threshold, performance_test_mode, exadata_model, rack_size, model_selector], outputs=[output, rating, retry_status])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
177
 
178
+ demo.launch(debug=True)