kgauvin603 commited on
Commit
b17847f
·
verified ·
1 Parent(s): 905c0db

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +159 -0
app.py ADDED
@@ -0,0 +1,159 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # === Imports ===
2
+ import os
3
+ import re
4
+ import gradio as gr
5
+ import openai
6
+ import oci
7
+ from datetime import datetime
8
+ from bs4 import BeautifulSoup
9
+
10
+ # --- API Keys ---
11
+ openai_api_key = os.environ.get("OPENAI_API_KEY")
12
+ if not openai_api_key:
13
+ raise ValueError("OPENAI_API_KEY environment variable is not set.")
14
+
15
+ client = openai.OpenAI(api_key=openai_api_key)
16
+
17
+ openrouter_key = os.environ.get("OPENROUTER")
18
+ openrouter = openai.OpenAI(api_key=openrouter_key, base_url="https://openrouter.ai/api/v1")
19
+
20
+ # --- OCI Object Storage Config ---
21
+ #oci_config = {
22
+ # "user": os.environ.get("OCI_USER"),
23
+ # "tenancy": os.environ.get("OCI_TENANCY"),
24
+ # "fingerprint": os.environ.get("OCI_FINGERPRINT"),
25
+ # "region": os.environ.get("OCI_REGION"),
26
+ # "key_content": os.environ.get("OCI_PRIVATE_KEY"),
27
+ #}
28
+
29
+ # === OCI Object Storage Setup ===
30
+ oci_config = {
31
+ "user": os.environ.get("OCI_USER"),
32
+ "tenancy": os.environ.get("OCI_TENANCY"),
33
+ "fingerprint": os.environ.get("OCI_FINGERPRINT"),
34
+ "region": os.environ.get("OCI_REGION"),
35
+ "key_content": os.environ.get("OCI_PRIVATE_KEY")
36
+ }
37
+
38
+ namespace = os.environ.get("OCI_NAMESPACE")
39
+ bucket_name = os.environ.get("OCI_BUCKET_NAME")
40
+
41
+ try:
42
+ object_storage = oci.object_storage.ObjectStorageClient(oci_config)
43
+ except Exception as e:
44
+ print("Failed to initialize OCI Object Storage client:", e)
45
+
46
+
47
+ namespace = os.environ.get("OCI_NAMESPACE")
48
+ bucket_name = os.environ.get("OCI_BUCKET_NAME")
49
+ object_storage = oci.object_storage.ObjectStorageClient(oci_config)
50
+
51
+ # --- Exadata Specs ---
52
+ exadata_specs = {
53
+ "X7": {"Quarter Rack": {"max_iops": 350000, "max_throughput": 25}, "Half Rack": {"max_iops": 700000, "max_throughput": 50}, "Full Rack": {"max_iops": 1400000, "max_throughput": 100}},
54
+ "X8": {"Quarter Rack": {"max_iops": 380000, "max_throughput": 28}, "Half Rack": {"max_iops": 760000, "max_throughput": 56}, "Full Rack": {"max_iops": 1520000, "max_throughput": 112}},
55
+ "X9": {"Quarter Rack": {"max_iops": 450000, "max_throughput": 30}, "Half Rack": {"max_iops": 900000, "max_throughput": 60}, "Full Rack": {"max_iops": 1800000, "max_throughput": 120}},
56
+ "X10": {"Quarter Rack": {"max_iops": 500000, "max_throughput": 35}, "Half Rack": {"max_iops": 1000000, "max_throughput": 70}, "Full Rack": {"max_iops": 2000000, "max_throughput": 140}},
57
+ "X11M": {"Quarter Rack": {"max_iops": 600000, "max_throughput": 40}, "Half Rack": {"max_iops": 1200000, "max_throughput": 80}, "Full Rack": {"max_iops": 2400000, "max_throughput": 160}},
58
+ }
59
+
60
+ # --- Supported LLM Models ---
61
+ supported_llms = {
62
+ "gpt-3.5-turbo": "Fastest / Lowest Cost - General AWR Healthcheck",
63
+ "gpt-4-turbo": "Balanced - Production Performance Analysis",
64
+ "gpt-4o": "Deepest Analysis - Exadata, RAC, Smart Scan, Critical Issues",
65
+ }
66
+
67
+ # --- Utils ---
68
+ def clean_awr_content(content):
69
+ if "<html" in content.lower():
70
+ soup = BeautifulSoup(content, "html.parser")
71
+ return soup.get_text()
72
+ return content
73
+
74
+ def upload_awr_file(file_obj):
75
+ filename = os.path.basename(file_obj.name)
76
+ content = file_obj.read()
77
+ object_storage.put_object(namespace, bucket_name, filename, content)
78
+ return f"\u2705 Uploaded {filename}"
79
+
80
+ def list_awr_files():
81
+ try:
82
+ objects = object_storage.list_objects(namespace, bucket_name)
83
+ return [obj.name for obj in objects.data.objects if obj.name.endswith(".html") or obj.name.endswith(".txt")]
84
+ except Exception as e:
85
+ return [f"Error listing objects: {str(e)}"]
86
+
87
+ def get_awr_file_text(filename):
88
+ try:
89
+ response = object_storage.get_object(namespace, bucket_name, filename)
90
+ raw = response.data.content.decode()
91
+ return clean_awr_content(raw)
92
+ except Exception as e:
93
+ return f"Error loading file: {str(e)}"
94
+
95
+ def compare_awrs(file_list, llm_model):
96
+ if not file_list:
97
+ return "No files selected."
98
+ combined_text = ""
99
+ for fname in file_list:
100
+ content = get_awr_file_text(fname)
101
+ combined_text += f"\n=== AWR: {fname} ===\n{content[:3000]}...\n"
102
+ prompt = f"""
103
+ You are a senior Oracle performance engineer. You will compare multiple AWR reports and highlight:
104
+ - Key differences in workload or system behavior
105
+ - Major trends or anomalies
106
+ - Which report shows better performance and why
107
+ - Exadata-specific metrics like Smart Scan, Flash I/O
108
+ - Suggestions to unify or improve system behavior
109
+ AWR Reports:
110
+ {combined_text}
111
+ """
112
+ response = client.chat.completions.create(
113
+ model=llm_model,
114
+ messages=[{"role": "system", "content": "You are a comparative AWR analysis expert."},
115
+ {"role": "user", "content": prompt}]
116
+ )
117
+ return response.choices[0].message.content.strip()
118
+
119
+ # === Gradio UI ===
120
+ with gr.Blocks() as demo:
121
+ with gr.Tab("Manual AWR Analysis"):
122
+ gr.Markdown("# \U0001f9e0 Multi-Agent Oracle AWR Analyzer (Production Edition)")
123
+ awr_text = gr.Textbox(label="Paste AWR Report", lines=30)
124
+ threshold = gr.Slider(0, 5, value=3, step=1, label="Correctness Threshold (Stars)")
125
+ performance_test_mode = gr.Checkbox(label="Performance Test Mode")
126
+ exadata_model = gr.Dropdown(choices=list(exadata_specs.keys()), label="Exadata Model", visible=False)
127
+ rack_size = gr.Dropdown(choices=["Quarter Rack", "Half Rack", "Full Rack"], label="Rack Size", visible=False)
128
+ llm_selector = gr.Dropdown(choices=list(supported_llms.keys()), value="gpt-4-turbo", label="LLM Model")
129
+
130
+ def toggle_visibility(mode):
131
+ return gr.update(visible=mode), gr.update(visible=mode)
132
+
133
+ performance_test_mode.change(toggle_visibility, inputs=performance_test_mode, outputs=[exadata_model, rack_size])
134
+ analyze_btn = gr.Button("Analyze AWR Report")
135
+ output = gr.Textbox(label="AWR Analysis", lines=20)
136
+ health = gr.Textbox(label="Health Agent Findings", lines=10)
137
+ rating = gr.Textbox(label="Rater", lines=3)
138
+ retry_status = gr.Textbox(label="Retry Status")
139
+
140
+ from your_existing_code import process_awr # Replace with actual import or include function here
141
+ analyze_btn.click(process_awr,
142
+ inputs=[awr_text, threshold, performance_test_mode, exadata_model, rack_size, llm_selector],
143
+ outputs=[output, health, rating, retry_status])
144
+
145
+ with gr.Tab("Compare AWRs from OCI"):
146
+ upload_file = gr.File(label="Upload AWR Report", file_types=[".html", ".txt"])
147
+ upload_status = gr.Textbox(label="Upload Status")
148
+ upload_file.upload(fn=upload_awr_file, inputs=upload_file, outputs=upload_status)
149
+
150
+ refresh_button = gr.Button("\U0001f503 Refresh File List")
151
+ file_multiselect = gr.Dropdown(choices=[], label="Select AWR Files", multiselect=True)
152
+ refresh_button.click(fn=lambda: gr.update(choices=list_awr_files()), outputs=file_multiselect)
153
+
154
+ llm_compare = gr.Dropdown(choices=list(supported_llms.keys()), value="gpt-4-turbo", label="LLM Model for Comparison")
155
+ compare_output = gr.Textbox(label="Comparison Output", lines=20)
156
+ gr.Button("Compare Selected AWRs").click(fn=compare_awrs, inputs=[file_multiselect, llm_compare], outputs=compare_output)
157
+
158
+ if __name__ == "__main__":
159
+ demo.launch(debug=True)