kgauvin603 commited on
Commit
fc06528
Β·
verified Β·
1 Parent(s): 2983bfb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +14 -39
app.py CHANGED
@@ -6,9 +6,7 @@ import openai
6
  from datetime import datetime
7
  from bs4 import BeautifulSoup
8
 
9
- # --- API Keys + Colab support ---
10
- import os
11
-
12
  openai_api_key = os.environ.get("OPENAI_API_KEY")
13
  openrouter_key = os.environ.get("OPENROUTER")
14
 
@@ -17,7 +15,6 @@ if not openai_api_key:
17
  if not openrouter_key:
18
  raise ValueError("OPENROUTER environment variable is not set.")
19
 
20
-
21
  client = openai.OpenAI(api_key=openai_api_key)
22
  openai_rater = openai.OpenAI(api_key=openrouter_key, base_url="https://openrouter.ai/api/v1")
23
 
@@ -63,51 +60,30 @@ def analyze_awr(content, performance_test_mode, exadata_model, rack_size):
63
  if len(cleaned_content) > max_chars:
64
  cleaned_content = cleaned_content[:max_chars] + "\n\n[TRUNCATED]..."
65
 
66
- # Build prompt
67
- prompt = f"""
68
  You are an expert Oracle Database performance analyst with deep knowledge of AWR reports, Oracle RAC internals, and Exadata architecture (Smart Scan, Flash Cache, IORM, RDMA, Storage Indexes).
69
 
70
- You must produce highly detailed diagnostic insights based on the AWR report provided below. Use numbers and thresholds whenever possible and explain why each observation matters. Do not simply say "high" or "low" β€” provide the metric, its value, and context (e.g., why 300 gc buffer busy waits/sec is high for OLTP). Explain implications in RAC/Exadata environments.
71
 
72
  ======== AWR REPORT START ========
73
  {cleaned_content}
74
  ======== AWR REPORT END ========
75
 
76
- Please provide the following sections with as much metric detail and technical context as possible:
77
-
78
  - **Performance Summary**
79
- - Overall DB load, CPU usage, and major wait events.
80
- - Discuss RAC-specific behaviors such as global cache waits.
81
-
82
  - **Detailed Analysis of Bottlenecks and/or Degradation Risks**
83
- - For each identified bottleneck, provide the metric (e.g. "gc buffer busy: 1500/sec") and explain why it is a problem.
84
- - Provide RAC-relevant interpretations (e.g. is GC messaging over interconnect too high).
85
- - Include flash cache and I/O specific risks.
86
-
87
  - **Performance Forecast and Predictions**
88
- - Given the current metrics, predict where the system is heading.
89
- - Use thresholds to indicate risk (e.g. "Redo size at 500MB/min approaching flash log limit").
90
-
91
  - **Specific Recommendations for Monitoring**
92
- - Suggest exactly which metrics should be tracked and why.
93
- - Include SQL_IDs, Global Cache metrics, Log IO, CPU.
94
-
95
  - **Exadata Statistics Performance Summary**
96
- - Include IO performance, flash cache hit %, Smart Scan utilization.
97
- - Mention storage server level metrics (latency, MB/s, read/write balance).
98
- - Indicate if IO saturation occurred based on latency and throughput.
99
-
100
  - **Recommended Next Steps to Bridge Performance Gap**
101
- - Generate action plans (e.g. SQL tuning, service affinity, adding storage cells, increasing LOG_BUFFER).
102
- - Clearly separate short term vs long term actions.
103
  """
104
 
105
-
106
- # Add Exadata comparison if performance test mode
107
- if performance_test_mode and exadata_model and rack_size:
108
- specs = exadata_specs.get(exadata_model, {}).get(rack_size, {})
109
- if specs:
110
- prompt += f"""
111
 
112
  This was a PERFORMANCE TEST on Oracle Exadata {exadata_model} {rack_size}.
113
  Theoretical Max:
@@ -117,8 +93,8 @@ Theoretical Max:
117
  Show actual vs theoretical and generate Recommended Next Steps to Bridge Performance Gap.
118
  """
119
 
120
- # --- Call GPT-4o (or turbo) ---
121
- MODEL = "gpt-4-turbo" # BEST (or change to gpt-4-turbo if needed)
122
 
123
  response = client.chat.completions.create(
124
  model=MODEL,
@@ -130,7 +106,6 @@ Show actual vs theoretical and generate Recommended Next Steps to Bridge Perform
130
 
131
  return response.choices[0].message.content.strip()
132
 
133
-
134
  # --- Rater ---
135
  def rate_answer_rater(question, final_answer):
136
  prompt = f"Rate this answer 1-5 stars with explanation:\n\n{final_answer}"
@@ -169,7 +144,7 @@ def process_awr(awr_text, correctness_threshold, performance_test_mode, exadata_
169
 
170
  # --- Gradio UI ---
171
  with gr.Blocks() as demo:
172
- gr.Markdown("## πŸ“Š Oracle AWR Analyzer (AI + Rating + Retry + Exadata Gap Analysis)")
173
 
174
  awr_text = gr.Textbox(label="Paste AWR Report (HTML or TXT)", lines=30, placeholder="Paste full AWR here...")
175
  threshold = gr.Slider(0, 5, value=3, step=1, label="Correctness Threshold (Stars for Retry)")
@@ -189,4 +164,4 @@ with gr.Blocks() as demo:
189
 
190
  analyze_btn.click(process_awr, inputs=[awr_text, threshold, performance_test_mode, exadata_model, rack_size], outputs=[output, rating, retry_status])
191
 
192
- demo.launch(debug=True)
 
6
  from datetime import datetime
7
  from bs4 import BeautifulSoup
8
 
9
+ # --- API Keys ---
 
 
10
  openai_api_key = os.environ.get("OPENAI_API_KEY")
11
  openrouter_key = os.environ.get("OPENROUTER")
12
 
 
15
  if not openrouter_key:
16
  raise ValueError("OPENROUTER environment variable is not set.")
17
 
 
18
  client = openai.OpenAI(api_key=openai_api_key)
19
  openai_rater = openai.OpenAI(api_key=openrouter_key, base_url="https://openrouter.ai/api/v1")
20
 
 
60
  if len(cleaned_content) > max_chars:
61
  cleaned_content = cleaned_content[:max_chars] + "\n\n[TRUNCATED]..."
62
 
63
+ # Build prompt
64
+ prompt = f"""
65
  You are an expert Oracle Database performance analyst with deep knowledge of AWR reports, Oracle RAC internals, and Exadata architecture (Smart Scan, Flash Cache, IORM, RDMA, Storage Indexes).
66
 
67
+ You must produce highly detailed diagnostic insights based on the AWR report provided below. Use numbers and thresholds whenever possible and explain why each observation matters. Do not simply say "high" or "low" β€” provide the metric, its value, and context.
68
 
69
  ======== AWR REPORT START ========
70
  {cleaned_content}
71
  ======== AWR REPORT END ========
72
 
73
+ Please provide:
 
74
  - **Performance Summary**
 
 
 
75
  - **Detailed Analysis of Bottlenecks and/or Degradation Risks**
 
 
 
 
76
  - **Performance Forecast and Predictions**
 
 
 
77
  - **Specific Recommendations for Monitoring**
 
 
 
78
  - **Exadata Statistics Performance Summary**
 
 
 
 
79
  - **Recommended Next Steps to Bridge Performance Gap**
 
 
80
  """
81
 
82
+ # Add Exadata comparison if performance test mode
83
+ if performance_test_mode and exadata_model and rack_size:
84
+ specs = exadata_specs.get(exadata_model, {}).get(rack_size, {})
85
+ if specs:
86
+ prompt += f"""
 
87
 
88
  This was a PERFORMANCE TEST on Oracle Exadata {exadata_model} {rack_size}.
89
  Theoretical Max:
 
93
  Show actual vs theoretical and generate Recommended Next Steps to Bridge Performance Gap.
94
  """
95
 
96
+ # --- Call GPT ---
97
+ MODEL = "gpt-4-turbo"
98
 
99
  response = client.chat.completions.create(
100
  model=MODEL,
 
106
 
107
  return response.choices[0].message.content.strip()
108
 
 
109
  # --- Rater ---
110
  def rate_answer_rater(question, final_answer):
111
  prompt = f"Rate this answer 1-5 stars with explanation:\n\n{final_answer}"
 
144
 
145
  # --- Gradio UI ---
146
  with gr.Blocks() as demo:
147
+ gr.Markdown("## πŸ“Š Oracle AWR Analyzer (AI + Rating + Exadata Gap Analysis)")
148
 
149
  awr_text = gr.Textbox(label="Paste AWR Report (HTML or TXT)", lines=30, placeholder="Paste full AWR here...")
150
  threshold = gr.Slider(0, 5, value=3, step=1, label="Correctness Threshold (Stars for Retry)")
 
164
 
165
  analyze_btn.click(process_awr, inputs=[awr_text, threshold, performance_test_mode, exadata_model, rack_size], outputs=[output, rating, retry_status])
166
 
167
+ demo.launch(Share=True)