|
import os |
|
import json |
|
import time |
|
import uuid |
|
import re |
|
import gradio as gr |
|
from datetime import datetime |
|
|
|
|
|
import os |
|
api_key = os.environ["OPENAI_API_KEY"] |
|
|
|
|
|
APP_TITLE = "GPT5 Demo" |
|
APP_DESC = "A polished Gradio chat demo with presets, file context, tools, and export." |
|
MODEL_IDENTITY_ANSWER = "GPT5 Thinking Model" |
|
|
|
def estimate_tokens(text: str) -> int: |
|
return max(1, int(len(text) / 4)) |
|
|
|
def format_timestamp(ts=None): |
|
return (ts or datetime.now()).strftime("%Y-%m-%d %H:%M:%S") |
|
|
|
def get_session_id(state): |
|
if state and state.get("session_id"): |
|
return state["session_id"] |
|
return str(uuid.uuid4()) |
|
|
|
def truncate_context(context, max_chars=8000): |
|
if len(context) <= max_chars: |
|
return context |
|
head = context[: max_chars // 2] |
|
tail = context[-max_chars // 2 :] |
|
return head + "\n...\n[Context truncated]\n...\n" + tail |
|
|
|
def is_model_identity_question(text: str) -> bool: |
|
if not text: |
|
return False |
|
t = text.lower().strip() |
|
patterns = [ |
|
r"\bwhich\s+model\b", |
|
r"\bwhat\s+model\b", |
|
r"\bare\s+you\s+(the\s+)?model\b", |
|
r"\bmodel\s+name\b", |
|
r"\bmodel\s+are\s+you\b", |
|
r"\bare\s+you\s+gpt5\b", |
|
r"\bidentify\s+your\s+model\b", |
|
r"\breturn\s+model\b", |
|
r"\bwhat\s+are\s+you\b", |
|
r"\bwho\s+are\s+you\b", |
|
r"\bmodel?\b" |
|
] |
|
return any(re.search(p, t) for p in patterns) |
|
|
|
def respond(system_prompt, history, user_msg, model_name, temperature, top_p, max_tokens, context_text, tool_choice): |
|
if is_model_identity_question(user_msg): |
|
response = MODEL_IDENTITY_ANSWER |
|
tokens_in = estimate_tokens(user_msg or "") |
|
tokens_out = estimate_tokens(response) |
|
return response, tokens_in, tokens_out |
|
history_text = "" |
|
for role, msg in history: |
|
history_text += f"{role.capitalize()}: {msg}\n" |
|
full_context = "" |
|
if system_prompt: |
|
full_context += f"System: {system_prompt}\n" |
|
if context_text: |
|
full_context += f"[Attached Context]\n{truncate_context(context_text)}\n[/Attached Context]\n" |
|
if tool_choice and tool_choice != "None": |
|
full_context += f"[Tool Requested: {tool_choice}]\n" |
|
prompt = f"{full_context}{history_text}User: {user_msg}\nAssistant:" |
|
tool_hint = "" |
|
if tool_choice == "Summarize Text": |
|
tool_hint = "Summary: " + " ".join(user_msg.split()[:80]) + ("..." if len(user_msg.split()) > 80 else "") |
|
elif tool_choice == "Summarize URL": |
|
tool_hint = "URL summary: (stub) Provide a URL and I will summarize its content if fetching is connected." |
|
else: |
|
tool_hint = "Thanks for your message! This is a demo response." |
|
response = f"[Model: {model_name} | T={temperature:.2f}, p={top_p:.2f}, max_tokens={max_tokens}]\n{tool_hint}\n\nEcho: {user_msg}" |
|
tokens_in = estimate_tokens(prompt) |
|
tokens_out = estimate_tokens(response) |
|
return response, tokens_in, tokens_out |
|
|
|
def read_files(files): |
|
texts = [] |
|
if not files: |
|
return "" |
|
for f in files: |
|
try: |
|
path = f.name if hasattr(f, "name") else str(f) |
|
with open(path, "rb") as fh: |
|
raw = fh.read() |
|
try: |
|
text = raw.decode("utf-8", errors="ignore") |
|
except Exception: |
|
text = str(raw) |
|
texts.append(f"\n=== File: {os.path.basename(path)} ===\n{text}\n") |
|
except Exception as e: |
|
texts.append(f"\n=== File Error ===\nCould not read {f}: {e}\n") |
|
return "\n".join(texts) |
|
|
|
def on_submit(user_msg, system_prompt, model_name, temperature, top_p, max_tokens, files, tool_choice, state, history, persist_history): |
|
state = state or {} |
|
state["session_id"] = get_session_id(state) |
|
context_text = read_files(files) |
|
if not persist_history: |
|
history = [] |
|
history = history + [("user", user_msg)] |
|
if is_model_identity_question(user_msg): |
|
reply = MODEL_IDENTITY_ANSWER |
|
tokens_in = estimate_tokens(user_msg) |
|
tokens_out = estimate_tokens(reply) |
|
history.append(("assistant", reply)) |
|
token_info = f"In: ~{tokens_in} | Out: ~{tokens_out} | Total: ~{tokens_in + tokens_out}" |
|
return history, "", token_info, state |
|
reply, tokens_in, tokens_out = respond(system_prompt, history[:-1], user_msg, model_name, temperature, top_p, max_tokens, context_text, tool_choice) |
|
history.append(("assistant", reply)) |
|
token_info = f"In: ~{tokens_in} | Out: ~{tokens_out} | Total: ~{tokens_in + tokens_out}" |
|
return history, "", token_info, state |
|
|
|
def clear_chat(state): |
|
state = state or {} |
|
state["session_id"] = get_session_id(state) |
|
return [], state, "" |
|
|
|
def apply_preset(preset_name): |
|
presets = { |
|
"Helpful Assistant": "You are a helpful, concise assistant.", |
|
"Creative Writer": "You are a creative writing assistant. Use vivid language and varied rhythm.", |
|
"Code Tutor": "You are a precise programming tutor. Provide clear, step-by-step guidance with examples.", |
|
"Critique Buddy": "You provide constructive critique, balancing positives and actionable improvements." |
|
} |
|
return presets.get(preset_name, "") |
|
|
|
def export_history(history, state): |
|
state = state or {} |
|
session_id = get_session_id(state) |
|
data = { |
|
"session_id": session_id, |
|
"exported_at": format_timestamp(), |
|
"title": APP_TITLE, |
|
"history": [{"role": r, "content": m} for r, m in history], |
|
} |
|
fname = f"gpt5_demo_{session_id[:8]}_{int(time.time())}.json" |
|
with open(fname, "w", encoding="utf-8") as f: |
|
json.dump(data, f, ensure_ascii=False, indent=2) |
|
return f"Saved transcript to {fname}" |
|
|
|
def summarize_text_action(input_text): |
|
if not input_text or not input_text.strip(): |
|
return "Provide text to summarize." |
|
words = input_text.strip().split() |
|
short = " ".join(words[:120]) + ("..." if len(words) > 120 else "") |
|
return f"Summary (quick): {short}" |
|
|
|
def summarize_url_action(url): |
|
if not url or not url.strip(): |
|
return "Provide a URL." |
|
return f"(Stub) Would fetch and summarize: {url}" |
|
|
|
with gr.Blocks(theme=gr.themes.Soft(), title=APP_TITLE, css=""" |
|
:root { --accent: #6c78ff; } |
|
.gradio-container { max-width: 1000px !important; margin: 0 auto; } |
|
#title { text-align: center; padding-top: 8px; } |
|
.token-chip { background: #eef; border-radius: 999px; padding: 4px 10px; display: inline-block; } |
|
""") as demo: |
|
gr.Markdown(f"# {APP_TITLE}", elem_id="title") |
|
gr.Markdown(APP_DESC) |
|
with gr.Row(): |
|
with gr.Column(scale=3): |
|
system_prompt = gr.Textbox(label="System Prompt", placeholder="e.g., You are a helpful assistant.", lines=3) |
|
with gr.Column(scale=2): |
|
model_name = gr.Dropdown(label="Model", choices=["gpt5-small", "gpt5-medium", "gpt5-pro"], value="gpt5-medium") |
|
with gr.Row(): |
|
temperature = gr.Slider(0.0, 1.5, value=0.7, step=0.05, label="Temperature") |
|
top_p = gr.Slider(0.05, 1.0, value=1.0, step=0.05, label="Top-p") |
|
max_tokens = gr.Slider(64, 4096, value=512, step=64, label="Max Tokens") |
|
persist_history = gr.Checkbox(label="Persist History", value=True) |
|
with gr.Row(): |
|
with gr.Column(scale=3): |
|
chat = gr.Chatbot(label="Conversation", avatar_images=(None, None), bubble_full_width=False, height=420, likeable=True, show_copy_button=True, render_markdown=True, show_share_button=False) |
|
user_msg = gr.Textbox(placeholder="Type your message and press Enter...", show_label=False, lines=2) |
|
with gr.Row(): |
|
submit_btn = gr.Button("Send", variant="primary") |
|
clear_btn = gr.Button("Clear") |
|
export_btn = gr.Button("Export Transcript") |
|
token_info = gr.Markdown("") |
|
with gr.Column(scale=2): |
|
gr.Markdown("Attachments and Tools") |
|
files = gr.Files(label="Upload files (txt, md, etc.)", file_count="multiple", type="filepath") |
|
tool_choice = gr.Radio(choices=["None", "Summarize Text", "Summarize URL"], value="None", label="Tool") |
|
with gr.Accordion("Quick Tools", open=False): |
|
quick_text = gr.Textbox(label="Text to Summarize", lines=6) |
|
quick_sum_btn = gr.Button("Summarize Text (Quick)") |
|
quick_sum_out = gr.Markdown() |
|
url_box = gr.Textbox(label="URL to Summarize") |
|
quick_url_btn = gr.Button("Summarize URL (Quick)") |
|
quick_url_out = gr.Markdown() |
|
with gr.Accordion("Presets", open=False): |
|
preset = gr.Dropdown(choices=["Helpful Assistant", "Creative Writer", "Code Tutor", "Critique Buddy"], label="Apply Preset") |
|
apply_btn = gr.Button("Apply Preset to System Prompt") |
|
state = gr.State({"session_id": str(uuid.uuid4())}) |
|
submit_evt = user_msg.submit( |
|
on_submit, |
|
inputs=[user_msg, system_prompt, model_name, temperature, top_p, max_tokens, files, tool_choice, state, chat, persist_history], |
|
outputs=[chat, user_msg, token_info, state] |
|
) |
|
submit_btn.click( |
|
on_submit, |
|
inputs=[user_msg, system_prompt, model_name, temperature, top_p, max_tokens, files, tool_choice, state, chat, persist_history], |
|
outputs=[chat, user_msg, token_info, state] |
|
) |
|
clear_btn.click(clear_chat, inputs=[state], outputs=[chat, state, token_info]) |
|
export_btn.click(export_history, inputs=[chat, state], outputs=[token_info]) |
|
apply_btn.click(apply_preset, inputs=[preset], outputs=[system_prompt]) |
|
quick_sum_btn.click(summarize_text_action, inputs=[quick_text], outputs=[quick_sum_out]) |
|
quick_url_btn.click(summarize_url_action, inputs=[url_box], outputs=[quick_url_out]) |
|
|
|
if __name__ == "__main__": |
|
demo.queue(api_open=False).launch(server_name="0.0.0.0", server_port=7860, show_api=False) |
|
|