Abs6187 commited on
Commit
9817165
·
verified ·
1 Parent(s): feb6937

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +210 -73
app.py CHANGED
@@ -1,81 +1,218 @@
1
  import os
 
 
 
 
2
  import gradio as gr
3
- import requests
4
-
5
- class GPT5Model:
6
- """
7
- GPT5Model handles interactions with the GPT-5 API.
8
- It includes system prompts, request construction, timeout handling,
9
- and graceful error responses if the API can't be reached.
10
- """
11
- def __init__(self, api_key: str):
12
- self.api_key = api_key
13
- self.system_prompt = (
14
- "You are GPT-5, the most advanced AI model available. "
15
- "Answer accurately, intelligently, and helpfully."
16
- )
17
-
18
- def generate_response(self, prompt: str) -> str:
19
- """
20
- Sends a prompt to the GPT-5 API and returns the response text.
21
- If there's a connection error, timeout, or invalid API key, returns a friendly error message.
22
- """
23
- url = "https://api.pplx.ai/v1/generate"
24
- headers = {
25
- "Authorization": f"Bearer {self.api_key}",
26
- "Content-Type": "application/json"
27
- }
28
- full_prompt = f"{self.system_prompt}\nUser: {prompt}\nGPT-5:"
29
- payload = {"prompt": full_prompt, "max_tokens": 500}
30
 
31
- try:
32
- response = requests.post(url, json=payload, headers=headers, timeout=15)
33
- except requests.exceptions.Timeout:
34
- return "Error: Request timed out. Please check your network or try again later."
35
- except requests.exceptions.ConnectionError:
36
- return "Error: Could not reach API. Please check network settings."
37
- except requests.exceptions.RequestException as e:
38
- return f"Error: Unexpected error occurred: {e}"
39
-
40
- if response.status_code == 401:
41
- return "Error: API key invalid or expired."
42
- if response.status_code != 200:
43
- return f"Error: API returned status {response.status_code}."
 
 
 
 
44
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
45
  try:
46
- data = response.json()
47
- text = data.get("choices", [{}])[0].get("text", "").strip()
48
- if not text:
49
- return "Error: No response content. API key may be exhausted."
50
- return text
51
- except ValueError:
52
- return "Error: Could not parse API response."
53
-
54
- # === Load API Key ===
55
- api_key = os.getenv("PPLX_API_KEY")
56
- if not api_key:
57
- raise EnvironmentError("API key not found. Please set PPLX_API_KEY environment variable.")
58
-
59
- model = GPT5Model(api_key)
60
-
61
- def respond(message, chat_history):
62
- reply = model.generate_response(message)
63
- chat_history.append((message, reply))
64
- return "", chat_history
65
-
66
- # === Gradio UI ===
67
- with gr.Blocks(css="""
68
- #title {text-align: center; font-size: 28px; font-weight: bold;}
69
- #footer {text-align: center; font-size: 14px; color: gray;}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
70
  """) as demo:
71
- gr.Markdown("<div id='title'>🚀 GPT-5 Model Interface</div>")
72
- chatbot = gr.Chatbot(label="Conversation with GPT-5", height=400)
73
  with gr.Row():
74
- txt = gr.Textbox(show_label=False, placeholder="Type your message here…", container=False)
75
- send_btn = gr.Button("Send", variant="primary")
76
- gr.Markdown("<div id='footer'>Powered by GPT-5 API Simulation | © 2025</div>")
77
-
78
- send_btn.click(respond, [txt, chatbot], [txt, chatbot])
79
- txt.submit(respond, [txt, chatbot], [txt, chatbot])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
80
 
81
- demo.launch()
 
 
1
  import os
2
+ import json
3
+ import time
4
+ import uuid
5
+ import re
6
  import gradio as gr
7
+ from datetime import datetime
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8
 
9
+ # Method to set OPENAI_API_KEY in Hugging Face Space
10
+ os.environ["OPENAI_API_KEY"] = "your_api_key_here"
11
+
12
+ APP_TITLE = "GPT5 Demo"
13
+ APP_DESC = "A polished Gradio chat demo with presets, file context, tools, and export."
14
+ MODEL_IDENTITY_ANSWER = "GPT5 Thinking Model"
15
+
16
+ def estimate_tokens(text: str) -> int:
17
+ return max(1, int(len(text) / 4))
18
+
19
+ def format_timestamp(ts=None):
20
+ return (ts or datetime.now()).strftime("%Y-%m-%d %H:%M:%S")
21
+
22
+ def get_session_id(state):
23
+ if state and state.get("session_id"):
24
+ return state["session_id"]
25
+ return str(uuid.uuid4())
26
 
27
+ def truncate_context(context, max_chars=8000):
28
+ if len(context) <= max_chars:
29
+ return context
30
+ head = context[: max_chars // 2]
31
+ tail = context[-max_chars // 2 :]
32
+ return head + "\n...\n[Context truncated]\n...\n" + tail
33
+
34
+ def is_model_identity_question(text: str) -> bool:
35
+ if not text:
36
+ return False
37
+ t = text.lower().strip()
38
+ patterns = [
39
+ r"\bwhich\s+model\b",
40
+ r"\bwhat\s+model\b",
41
+ r"\bare\s+you\s+(the\s+)?model\b",
42
+ r"\bmodel\s+name\b",
43
+ r"\bmodel\s+are\s+you\b",
44
+ r"\bare\s+you\s+gpt5\b",
45
+ r"\bidentify\s+your\s+model\b",
46
+ r"\breturn\s+model\b",
47
+ r"\bwhat\s+are\s+you\b",
48
+ r"\bwho\s+are\s+you\b",
49
+ r"\bmodel?\b"
50
+ ]
51
+ return any(re.search(p, t) for p in patterns)
52
+
53
+ def respond(system_prompt, history, user_msg, model_name, temperature, top_p, max_tokens, context_text, tool_choice):
54
+ if is_model_identity_question(user_msg):
55
+ response = MODEL_IDENTITY_ANSWER
56
+ tokens_in = estimate_tokens(user_msg or "")
57
+ tokens_out = estimate_tokens(response)
58
+ return response, tokens_in, tokens_out
59
+ history_text = ""
60
+ for role, msg in history:
61
+ history_text += f"{role.capitalize()}: {msg}\n"
62
+ full_context = ""
63
+ if system_prompt:
64
+ full_context += f"System: {system_prompt}\n"
65
+ if context_text:
66
+ full_context += f"[Attached Context]\n{truncate_context(context_text)}\n[/Attached Context]\n"
67
+ if tool_choice and tool_choice != "None":
68
+ full_context += f"[Tool Requested: {tool_choice}]\n"
69
+ prompt = f"{full_context}{history_text}User: {user_msg}\nAssistant:"
70
+ tool_hint = ""
71
+ if tool_choice == "Summarize Text":
72
+ tool_hint = "Summary: " + " ".join(user_msg.split()[:80]) + ("..." if len(user_msg.split()) > 80 else "")
73
+ elif tool_choice == "Summarize URL":
74
+ tool_hint = "URL summary: (stub) Provide a URL and I will summarize its content if fetching is connected."
75
+ else:
76
+ tool_hint = "Thanks for your message! This is a demo response."
77
+ response = f"[Model: {model_name} | T={temperature:.2f}, p={top_p:.2f}, max_tokens={max_tokens}]\n{tool_hint}\n\nEcho: {user_msg}"
78
+ tokens_in = estimate_tokens(prompt)
79
+ tokens_out = estimate_tokens(response)
80
+ return response, tokens_in, tokens_out
81
+
82
+ def read_files(files):
83
+ texts = []
84
+ if not files:
85
+ return ""
86
+ for f in files:
87
  try:
88
+ path = f.name if hasattr(f, "name") else str(f)
89
+ with open(path, "rb") as fh:
90
+ raw = fh.read()
91
+ try:
92
+ text = raw.decode("utf-8", errors="ignore")
93
+ except Exception:
94
+ text = str(raw)
95
+ texts.append(f"\n=== File: {os.path.basename(path)} ===\n{text}\n")
96
+ except Exception as e:
97
+ texts.append(f"\n=== File Error ===\nCould not read {f}: {e}\n")
98
+ return "\n".join(texts)
99
+
100
+ def on_submit(user_msg, system_prompt, model_name, temperature, top_p, max_tokens, files, tool_choice, state, history, persist_history):
101
+ state = state or {}
102
+ state["session_id"] = get_session_id(state)
103
+ context_text = read_files(files)
104
+ if not persist_history:
105
+ history = []
106
+ history = history + [("user", user_msg)]
107
+ if is_model_identity_question(user_msg):
108
+ reply = MODEL_IDENTITY_ANSWER
109
+ tokens_in = estimate_tokens(user_msg)
110
+ tokens_out = estimate_tokens(reply)
111
+ history.append(("assistant", reply))
112
+ token_info = f"In: ~{tokens_in} | Out: ~{tokens_out} | Total: ~{tokens_in + tokens_out}"
113
+ return history, "", token_info, state
114
+ reply, tokens_in, tokens_out = respond(system_prompt, history[:-1], user_msg, model_name, temperature, top_p, max_tokens, context_text, tool_choice)
115
+ history.append(("assistant", reply))
116
+ token_info = f"In: ~{tokens_in} | Out: ~{tokens_out} | Total: ~{tokens_in + tokens_out}"
117
+ return history, "", token_info, state
118
+
119
+ def clear_chat(state):
120
+ state = state or {}
121
+ state["session_id"] = get_session_id(state)
122
+ return [], state, ""
123
+
124
+ def apply_preset(preset_name):
125
+ presets = {
126
+ "Helpful Assistant": "You are a helpful, concise assistant.",
127
+ "Creative Writer": "You are a creative writing assistant. Use vivid language and varied rhythm.",
128
+ "Code Tutor": "You are a precise programming tutor. Provide clear, step-by-step guidance with examples.",
129
+ "Critique Buddy": "You provide constructive critique, balancing positives and actionable improvements."
130
+ }
131
+ return presets.get(preset_name, "")
132
+
133
+ def export_history(history, state):
134
+ state = state or {}
135
+ session_id = get_session_id(state)
136
+ data = {
137
+ "session_id": session_id,
138
+ "exported_at": format_timestamp(),
139
+ "title": APP_TITLE,
140
+ "history": [{"role": r, "content": m} for r, m in history],
141
+ }
142
+ fname = f"gpt5_demo_{session_id[:8]}_{int(time.time())}.json"
143
+ with open(fname, "w", encoding="utf-8") as f:
144
+ json.dump(data, f, ensure_ascii=False, indent=2)
145
+ return f"Saved transcript to {fname}"
146
+
147
+ def summarize_text_action(input_text):
148
+ if not input_text or not input_text.strip():
149
+ return "Provide text to summarize."
150
+ words = input_text.strip().split()
151
+ short = " ".join(words[:120]) + ("..." if len(words) > 120 else "")
152
+ return f"Summary (quick): {short}"
153
+
154
+ def summarize_url_action(url):
155
+ if not url or not url.strip():
156
+ return "Provide a URL."
157
+ return f"(Stub) Would fetch and summarize: {url}"
158
+
159
+ with gr.Blocks(theme=gr.themes.Soft(), title=APP_TITLE, css="""
160
+ :root { --accent: #6c78ff; }
161
+ .gradio-container { max-width: 1000px !important; margin: 0 auto; }
162
+ #title { text-align: center; padding-top: 8px; }
163
+ .token-chip { background: #eef; border-radius: 999px; padding: 4px 10px; display: inline-block; }
164
  """) as demo:
165
+ gr.Markdown(f"# {APP_TITLE}", elem_id="title")
166
+ gr.Markdown(APP_DESC)
167
  with gr.Row():
168
+ with gr.Column(scale=3):
169
+ system_prompt = gr.Textbox(label="System Prompt", placeholder="e.g., You are a helpful assistant.", lines=3)
170
+ with gr.Column(scale=2):
171
+ model_name = gr.Dropdown(label="Model", choices=["gpt5-small", "gpt5-medium", "gpt5-pro"], value="gpt5-medium")
172
+ with gr.Row():
173
+ temperature = gr.Slider(0.0, 1.5, value=0.7, step=0.05, label="Temperature")
174
+ top_p = gr.Slider(0.05, 1.0, value=1.0, step=0.05, label="Top-p")
175
+ max_tokens = gr.Slider(64, 4096, value=512, step=64, label="Max Tokens")
176
+ persist_history = gr.Checkbox(label="Persist History", value=True)
177
+ with gr.Row():
178
+ with gr.Column(scale=3):
179
+ chat = gr.Chatbot(label="Conversation", avatar_images=(None, None), bubble_full_width=False, height=420, likeable=True, show_copy_button=True, render_markdown=True, show_share_button=False)
180
+ user_msg = gr.Textbox(placeholder="Type your message and press Enter...", show_label=False, lines=2)
181
+ with gr.Row():
182
+ submit_btn = gr.Button("Send", variant="primary")
183
+ clear_btn = gr.Button("Clear")
184
+ export_btn = gr.Button("Export Transcript")
185
+ token_info = gr.Markdown("")
186
+ with gr.Column(scale=2):
187
+ gr.Markdown("Attachments and Tools")
188
+ files = gr.Files(label="Upload files (txt, md, etc.)", file_count="multiple", type="filepath")
189
+ tool_choice = gr.Radio(choices=["None", "Summarize Text", "Summarize URL"], value="None", label="Tool")
190
+ with gr.Accordion("Quick Tools", open=False):
191
+ quick_text = gr.Textbox(label="Text to Summarize", lines=6)
192
+ quick_sum_btn = gr.Button("Summarize Text (Quick)")
193
+ quick_sum_out = gr.Markdown()
194
+ url_box = gr.Textbox(label="URL to Summarize")
195
+ quick_url_btn = gr.Button("Summarize URL (Quick)")
196
+ quick_url_out = gr.Markdown()
197
+ with gr.Accordion("Presets", open=False):
198
+ preset = gr.Dropdown(choices=["Helpful Assistant", "Creative Writer", "Code Tutor", "Critique Buddy"], label="Apply Preset")
199
+ apply_btn = gr.Button("Apply Preset to System Prompt")
200
+ state = gr.State({"session_id": str(uuid.uuid4())})
201
+ submit_evt = user_msg.submit(
202
+ on_submit,
203
+ inputs=[user_msg, system_prompt, model_name, temperature, top_p, max_tokens, files, tool_choice, state, chat, persist_history],
204
+ outputs=[chat, user_msg, token_info, state]
205
+ )
206
+ submit_btn.click(
207
+ on_submit,
208
+ inputs=[user_msg, system_prompt, model_name, temperature, top_p, max_tokens, files, tool_choice, state, chat, persist_history],
209
+ outputs=[chat, user_msg, token_info, state]
210
+ )
211
+ clear_btn.click(clear_chat, inputs=[state], outputs=[chat, state, token_info])
212
+ export_btn.click(export_history, inputs=[chat, state], outputs=[token_info])
213
+ apply_btn.click(apply_preset, inputs=[preset], outputs=[system_prompt])
214
+ quick_sum_btn.click(summarize_text_action, inputs=[quick_text], outputs=[quick_sum_out])
215
+ quick_url_btn.click(summarize_url_action, inputs=[url_box], outputs=[quick_url_out])
216
 
217
+ if __name__ == "__main__":
218
+ demo.queue(api_open=False).launch(server_name="0.0.0.0", server_port=7860, show_api=False)