Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -53,39 +53,37 @@ def clean_awr_content(content):
|
|
53 |
cleaned = "\n".join([line.strip() for line in text.splitlines() if line.strip()])
|
54 |
return cleaned
|
55 |
|
56 |
-
def truncate_awr_content(content, max_chars=90000):
|
57 |
-
if len(content) > max_chars:
|
58 |
-
return content[:max_chars] + "\n\n[TRUNCATED]"
|
59 |
-
return content
|
60 |
-
|
61 |
# --- AWR Analyzer ---
|
62 |
-
|
63 |
-
|
64 |
-
|
|
|
|
|
65 |
|
66 |
-
|
67 |
-
|
68 |
-
|
|
|
69 |
|
70 |
-
prompt = f"""
|
71 |
-
You are an expert Oracle Database performance analyst with deep knowledge of AWR reports, Oracle RAC internals, and Exadata architecture (Smart Scan, Flash Cache, IORM, RDMA, Storage Indexes).
|
72 |
======== AWR REPORT START ========
|
73 |
-
{
|
74 |
======== AWR REPORT END ========
|
75 |
|
76 |
-
Please provide:
|
77 |
-
|
78 |
-
-
|
79 |
-
-
|
80 |
-
-
|
81 |
-
-
|
82 |
-
-
|
|
|
83 |
"""
|
84 |
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
|
|
89 |
This was a PERFORMANCE TEST on Oracle Exadata {exadata_model} {rack_size}.
|
90 |
Theoretical Max:
|
91 |
- Max IOPS: {specs['max_iops']}
|
@@ -93,76 +91,88 @@ Theoretical Max:
|
|
93 |
Show actual vs theoretical and generate Recommended Next Steps to Bridge Performance Gap.
|
94 |
"""
|
95 |
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
116 |
|
117 |
-
|
118 |
-
analyzer_agent = AWRAnalyzer()
|
119 |
-
rater_agent = Rater()
|
120 |
|
|
|
121 |
with gr.Blocks() as demo:
|
122 |
-
gr.Markdown("## π Oracle AWR Analyzer (
|
123 |
|
124 |
-
awr_text = gr.Textbox(label="Paste AWR Report (HTML or TXT)", lines=30)
|
125 |
-
threshold = gr.Slider(0, 5, value=3, step=1, label="Correctness Threshold (Stars)")
|
126 |
performance_test_mode = gr.Checkbox(label="Performance Test Mode")
|
127 |
exadata_model = gr.Dropdown(choices=["X7", "X8", "X9", "X10", "X11M"], label="Exadata Model", visible=False)
|
128 |
rack_size = gr.Dropdown(choices=["Quarter Rack", "Half Rack", "Full Rack"], label="Rack Size", visible=False)
|
129 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
130 |
def toggle_visibility(mode):
|
131 |
return gr.update(visible=mode), gr.update(visible=mode)
|
132 |
|
133 |
performance_test_mode.change(toggle_visibility, inputs=performance_test_mode, outputs=[exadata_model, rack_size])
|
134 |
|
135 |
analyze_btn = gr.Button("Analyze AWR")
|
136 |
-
output = gr.Textbox(label="AWR Analysis Result", lines=
|
137 |
rating = gr.Textbox(label="Rater Rating + Explanation", lines=4)
|
138 |
retry_status = gr.Textbox(label="Retry Status")
|
139 |
|
140 |
-
|
141 |
-
if not awr_text.strip():
|
142 |
-
return "No AWR report provided.", "", ""
|
143 |
-
|
144 |
-
answer = analyzer_agent.analyze(awr_text, performance_test_mode, exadata_model, rack_size)
|
145 |
-
rating_text = rater_agent.rate("AWR Analysis", answer)
|
146 |
-
|
147 |
-
stars = 0
|
148 |
-
match = re.search(r"(\d+)", rating_text)
|
149 |
-
if match:
|
150 |
-
stars = int(match.group(1))
|
151 |
-
|
152 |
-
if stars < threshold:
|
153 |
-
retry_answer = analyzer_agent.analyze(awr_text, performance_test_mode, exadata_model, rack_size)
|
154 |
-
retry_rating = rater_agent.rate("AWR Analysis Retry", retry_answer)
|
155 |
-
|
156 |
-
with open(log_filename, "a", encoding="utf-8") as log_file:
|
157 |
-
log_file.write(f"\n---\n{datetime.now()} RETRY\nOriginal: {answer}\nRating: {rating_text}\nRetry: {retry_answer}\nRetry Rating: {retry_rating}\n")
|
158 |
-
|
159 |
-
return retry_answer, retry_rating, "β
Retry Occurred"
|
160 |
-
else:
|
161 |
-
with open(log_filename, "a", encoding="utf-8") as log_file:
|
162 |
-
log_file.write(f"\n---\n{datetime.now()} SUCCESS\nAnswer: {answer}\nRating: {rating_text}\n")
|
163 |
-
|
164 |
-
return answer, rating_text, "β
Accepted on first try"
|
165 |
-
|
166 |
-
analyze_btn.click(process, inputs=[awr_text, threshold, performance_test_mode, exadata_model, rack_size], outputs=[output, rating, retry_status])
|
167 |
|
168 |
-
|
|
|
53 |
cleaned = "\n".join([line.strip() for line in text.splitlines() if line.strip()])
|
54 |
return cleaned
|
55 |
|
|
|
|
|
|
|
|
|
|
|
56 |
# --- AWR Analyzer ---
|
57 |
+
def analyze_awr(content, performance_test_mode, exadata_model, rack_size, selected_model_key):
|
58 |
+
cleaned_content = clean_awr_content(content)
|
59 |
+
max_chars = 128000
|
60 |
+
if len(cleaned_content) > max_chars:
|
61 |
+
cleaned_content = cleaned_content[:max_chars] + "\n\n[TRUNCATED]..."
|
62 |
|
63 |
+
# Build prompt
|
64 |
+
prompt = f"""
|
65 |
+
You are an expert Oracle Database performance analyst with deep knowledge of AWR reports, Oracle RAC internals, and Exadata architecture (Smart Scan, Flash Cache, IORM, RDMA, Storage Indexes).
|
66 |
+
You must produce highly detailed diagnostic insights based on the AWR report provided below. Use numbers and thresholds whenever possible and explain why each observation matters.
|
67 |
|
|
|
|
|
68 |
======== AWR REPORT START ========
|
69 |
+
{cleaned_content}
|
70 |
======== AWR REPORT END ========
|
71 |
|
72 |
+
Please provide the following sections:
|
73 |
+
|
74 |
+
- **Performance Summary**
|
75 |
+
- **Detailed Analysis of Bottlenecks and/or Degradation Risks**
|
76 |
+
- **Performance Forecast and Predictions**
|
77 |
+
- **Specific Recommendations for Monitoring**
|
78 |
+
- **Exadata Statistics Performance Summary**
|
79 |
+
- **Recommended Next Steps to Bridge Performance Gap**
|
80 |
"""
|
81 |
|
82 |
+
# Add Exadata comparison if performance test mode
|
83 |
+
if performance_test_mode and exadata_model and rack_size:
|
84 |
+
specs = exadata_specs.get(exadata_model, {}).get(rack_size, {})
|
85 |
+
if specs:
|
86 |
+
prompt += f"""
|
87 |
This was a PERFORMANCE TEST on Oracle Exadata {exadata_model} {rack_size}.
|
88 |
Theoretical Max:
|
89 |
- Max IOPS: {specs['max_iops']}
|
|
|
91 |
Show actual vs theoretical and generate Recommended Next Steps to Bridge Performance Gap.
|
92 |
"""
|
93 |
|
94 |
+
# Select model
|
95 |
+
model_choices = {
|
96 |
+
"GPT-4o (Balanced - Recommended for most tasks)": "gpt-4o",
|
97 |
+
"Claude 3 Opus (Best for Deep Diagnostic Analysis)": "claude-3-opus-20240229",
|
98 |
+
"GPT-4-turbo (Budget-Friendly, Good Quality)": "gpt-4-turbo",
|
99 |
+
"Claude 3 Sonnet (Good Balance, Lower Cost)": "claude-3-sonnet-20240229"
|
100 |
+
}
|
101 |
+
|
102 |
+
MODEL = model_choices.get(selected_model_key, "gpt-4o") # Fallback to gpt-4o if invalid
|
103 |
+
|
104 |
+
response = client.chat.completions.create(
|
105 |
+
model=MODEL,
|
106 |
+
messages=[
|
107 |
+
{"role": "system", "content": "You are an expert Oracle Database performance analyst."},
|
108 |
+
{"role": "user", "content": prompt}
|
109 |
+
]
|
110 |
+
)
|
111 |
+
return response.choices[0].message.content.strip()
|
112 |
+
|
113 |
+
# --- Rater ---
|
114 |
+
def rate_answer_rater(question, final_answer):
|
115 |
+
prompt = f"Rate this answer 1-5 stars with explanation:\n\n{final_answer}"
|
116 |
+
response = openai_rater.chat.completions.create(
|
117 |
+
model="mistral/ministral-8b",
|
118 |
+
messages=[{"role": "user", "content": prompt}]
|
119 |
+
)
|
120 |
+
return response.choices[0].message.content.strip()
|
121 |
+
|
122 |
+
# --- Main Logic ---
|
123 |
+
def process_awr(awr_text, correctness_threshold, performance_test_mode, exadata_model, rack_size, selected_model_key):
|
124 |
+
if not awr_text.strip():
|
125 |
+
return "No AWR report provided.", "", ""
|
126 |
+
|
127 |
+
answer = analyze_awr(awr_text, performance_test_mode, exadata_model, rack_size, selected_model_key)
|
128 |
+
rating_text = rate_answer_rater("AWR Analysis", answer)
|
129 |
+
|
130 |
+
stars = 0
|
131 |
+
match = re.search(r"(\d+)", rating_text)
|
132 |
+
if match:
|
133 |
+
stars = int(match.group(1))
|
134 |
+
|
135 |
+
if stars < correctness_threshold:
|
136 |
+
answer_retry = analyze_awr(awr_text, performance_test_mode, exadata_model, rack_size, selected_model_key)
|
137 |
+
rating_text_retry = rate_answer_rater("AWR Analysis (Retry)", answer_retry)
|
138 |
+
|
139 |
+
with open(log_filename, "a", encoding="utf-8") as log_file:
|
140 |
+
log_file.write(f"\n---\n{datetime.now()} RETRY\nOriginal: {answer}\nRating: {rating_text}\nRetry: {answer_retry}\nRetry Rating: {rating_text_retry}\n")
|
141 |
+
|
142 |
+
return answer_retry, rating_text_retry, "β
Retry Occurred (rating below threshold)"
|
143 |
+
else:
|
144 |
+
with open(log_filename, "a", encoding="utf-8") as log_file:
|
145 |
+
log_file.write(f"\n---\n{datetime.now()} SUCCESS\nAnswer: {answer}\nRating: {rating_text}\n")
|
146 |
|
147 |
+
return answer, rating_text, "β
Accepted on first try"
|
|
|
|
|
148 |
|
149 |
+
# --- Gradio UI ---
|
150 |
with gr.Blocks() as demo:
|
151 |
+
gr.Markdown("## π Oracle AWR Analyzer (AI + Rating + Retry + Exadata Gap Analysis + Model Selection)")
|
152 |
|
153 |
+
awr_text = gr.Textbox(label="Paste AWR Report (HTML or TXT)", lines=30, placeholder="Paste full AWR here...")
|
154 |
+
threshold = gr.Slider(0, 5, value=3, step=1, label="Correctness Threshold (Stars for Retry)")
|
155 |
performance_test_mode = gr.Checkbox(label="Performance Test Mode")
|
156 |
exadata_model = gr.Dropdown(choices=["X7", "X8", "X9", "X10", "X11M"], label="Exadata Model", visible=False)
|
157 |
rack_size = gr.Dropdown(choices=["Quarter Rack", "Half Rack", "Full Rack"], label="Rack Size", visible=False)
|
158 |
|
159 |
+
model_selector = gr.Dropdown(
|
160 |
+
choices=["GPT-4o (Balanced - Recommended for most tasks)", "Claude 3 Opus (Best for Deep Diagnostic Analysis)",
|
161 |
+
"GPT-4-turbo (Budget-Friendly, Good Quality)", "Claude 3 Sonnet (Good Balance, Lower Cost)"],
|
162 |
+
label="Choose AI Model for Analysis",
|
163 |
+
value="GPT-4o (Balanced - Recommended for most tasks)"
|
164 |
+
)
|
165 |
+
|
166 |
def toggle_visibility(mode):
|
167 |
return gr.update(visible=mode), gr.update(visible=mode)
|
168 |
|
169 |
performance_test_mode.change(toggle_visibility, inputs=performance_test_mode, outputs=[exadata_model, rack_size])
|
170 |
|
171 |
analyze_btn = gr.Button("Analyze AWR")
|
172 |
+
output = gr.Textbox(label="AWR Analysis Result", lines=20)
|
173 |
rating = gr.Textbox(label="Rater Rating + Explanation", lines=4)
|
174 |
retry_status = gr.Textbox(label="Retry Status")
|
175 |
|
176 |
+
analyze_btn.click(process_awr, inputs=[awr_text, threshold, performance_test_mode, exadata_model, rack_size, model_selector], outputs=[output, rating, retry_status])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
177 |
|
178 |
+
demo.launch(debug=True)
|