Oracle-TANGO / app.py
kgauvin603's picture
Create app.py
b17847f verified
raw
history blame
7.22 kB
# === Imports ===
import os
import re
import gradio as gr
import openai
import oci
from datetime import datetime
from bs4 import BeautifulSoup
# --- API Keys ---
openai_api_key = os.environ.get("OPENAI_API_KEY")
if not openai_api_key:
raise ValueError("OPENAI_API_KEY environment variable is not set.")
client = openai.OpenAI(api_key=openai_api_key)
openrouter_key = os.environ.get("OPENROUTER")
openrouter = openai.OpenAI(api_key=openrouter_key, base_url="https://openrouter.ai/api/v1")
# --- OCI Object Storage Config ---
#oci_config = {
# "user": os.environ.get("OCI_USER"),
# "tenancy": os.environ.get("OCI_TENANCY"),
# "fingerprint": os.environ.get("OCI_FINGERPRINT"),
# "region": os.environ.get("OCI_REGION"),
# "key_content": os.environ.get("OCI_PRIVATE_KEY"),
#}
# === OCI Object Storage Setup ===
oci_config = {
"user": os.environ.get("OCI_USER"),
"tenancy": os.environ.get("OCI_TENANCY"),
"fingerprint": os.environ.get("OCI_FINGERPRINT"),
"region": os.environ.get("OCI_REGION"),
"key_content": os.environ.get("OCI_PRIVATE_KEY")
}
namespace = os.environ.get("OCI_NAMESPACE")
bucket_name = os.environ.get("OCI_BUCKET_NAME")
try:
object_storage = oci.object_storage.ObjectStorageClient(oci_config)
except Exception as e:
print("Failed to initialize OCI Object Storage client:", e)
namespace = os.environ.get("OCI_NAMESPACE")
bucket_name = os.environ.get("OCI_BUCKET_NAME")
object_storage = oci.object_storage.ObjectStorageClient(oci_config)
# --- Exadata Specs ---
exadata_specs = {
"X7": {"Quarter Rack": {"max_iops": 350000, "max_throughput": 25}, "Half Rack": {"max_iops": 700000, "max_throughput": 50}, "Full Rack": {"max_iops": 1400000, "max_throughput": 100}},
"X8": {"Quarter Rack": {"max_iops": 380000, "max_throughput": 28}, "Half Rack": {"max_iops": 760000, "max_throughput": 56}, "Full Rack": {"max_iops": 1520000, "max_throughput": 112}},
"X9": {"Quarter Rack": {"max_iops": 450000, "max_throughput": 30}, "Half Rack": {"max_iops": 900000, "max_throughput": 60}, "Full Rack": {"max_iops": 1800000, "max_throughput": 120}},
"X10": {"Quarter Rack": {"max_iops": 500000, "max_throughput": 35}, "Half Rack": {"max_iops": 1000000, "max_throughput": 70}, "Full Rack": {"max_iops": 2000000, "max_throughput": 140}},
"X11M": {"Quarter Rack": {"max_iops": 600000, "max_throughput": 40}, "Half Rack": {"max_iops": 1200000, "max_throughput": 80}, "Full Rack": {"max_iops": 2400000, "max_throughput": 160}},
}
# --- Supported LLM Models ---
supported_llms = {
"gpt-3.5-turbo": "Fastest / Lowest Cost - General AWR Healthcheck",
"gpt-4-turbo": "Balanced - Production Performance Analysis",
"gpt-4o": "Deepest Analysis - Exadata, RAC, Smart Scan, Critical Issues",
}
# --- Utils ---
def clean_awr_content(content):
if "<html" in content.lower():
soup = BeautifulSoup(content, "html.parser")
return soup.get_text()
return content
def upload_awr_file(file_obj):
filename = os.path.basename(file_obj.name)
content = file_obj.read()
object_storage.put_object(namespace, bucket_name, filename, content)
return f"\u2705 Uploaded {filename}"
def list_awr_files():
try:
objects = object_storage.list_objects(namespace, bucket_name)
return [obj.name for obj in objects.data.objects if obj.name.endswith(".html") or obj.name.endswith(".txt")]
except Exception as e:
return [f"Error listing objects: {str(e)}"]
def get_awr_file_text(filename):
try:
response = object_storage.get_object(namespace, bucket_name, filename)
raw = response.data.content.decode()
return clean_awr_content(raw)
except Exception as e:
return f"Error loading file: {str(e)}"
def compare_awrs(file_list, llm_model):
if not file_list:
return "No files selected."
combined_text = ""
for fname in file_list:
content = get_awr_file_text(fname)
combined_text += f"\n=== AWR: {fname} ===\n{content[:3000]}...\n"
prompt = f"""
You are a senior Oracle performance engineer. You will compare multiple AWR reports and highlight:
- Key differences in workload or system behavior
- Major trends or anomalies
- Which report shows better performance and why
- Exadata-specific metrics like Smart Scan, Flash I/O
- Suggestions to unify or improve system behavior
AWR Reports:
{combined_text}
"""
response = client.chat.completions.create(
model=llm_model,
messages=[{"role": "system", "content": "You are a comparative AWR analysis expert."},
{"role": "user", "content": prompt}]
)
return response.choices[0].message.content.strip()
# === Gradio UI ===
with gr.Blocks() as demo:
with gr.Tab("Manual AWR Analysis"):
gr.Markdown("# \U0001f9e0 Multi-Agent Oracle AWR Analyzer (Production Edition)")
awr_text = gr.Textbox(label="Paste AWR Report", lines=30)
threshold = gr.Slider(0, 5, value=3, step=1, label="Correctness Threshold (Stars)")
performance_test_mode = gr.Checkbox(label="Performance Test Mode")
exadata_model = gr.Dropdown(choices=list(exadata_specs.keys()), label="Exadata Model", visible=False)
rack_size = gr.Dropdown(choices=["Quarter Rack", "Half Rack", "Full Rack"], label="Rack Size", visible=False)
llm_selector = gr.Dropdown(choices=list(supported_llms.keys()), value="gpt-4-turbo", label="LLM Model")
def toggle_visibility(mode):
return gr.update(visible=mode), gr.update(visible=mode)
performance_test_mode.change(toggle_visibility, inputs=performance_test_mode, outputs=[exadata_model, rack_size])
analyze_btn = gr.Button("Analyze AWR Report")
output = gr.Textbox(label="AWR Analysis", lines=20)
health = gr.Textbox(label="Health Agent Findings", lines=10)
rating = gr.Textbox(label="Rater", lines=3)
retry_status = gr.Textbox(label="Retry Status")
from your_existing_code import process_awr # Replace with actual import or include function here
analyze_btn.click(process_awr,
inputs=[awr_text, threshold, performance_test_mode, exadata_model, rack_size, llm_selector],
outputs=[output, health, rating, retry_status])
with gr.Tab("Compare AWRs from OCI"):
upload_file = gr.File(label="Upload AWR Report", file_types=[".html", ".txt"])
upload_status = gr.Textbox(label="Upload Status")
upload_file.upload(fn=upload_awr_file, inputs=upload_file, outputs=upload_status)
refresh_button = gr.Button("\U0001f503 Refresh File List")
file_multiselect = gr.Dropdown(choices=[], label="Select AWR Files", multiselect=True)
refresh_button.click(fn=lambda: gr.update(choices=list_awr_files()), outputs=file_multiselect)
llm_compare = gr.Dropdown(choices=list(supported_llms.keys()), value="gpt-4-turbo", label="LLM Model for Comparison")
compare_output = gr.Textbox(label="Comparison Output", lines=20)
gr.Button("Compare Selected AWRs").click(fn=compare_awrs, inputs=[file_multiselect, llm_compare], outputs=compare_output)
if __name__ == "__main__":
demo.launch(debug=True)