Abs6187 commited on
Commit
6d5283f
·
verified ·
1 Parent(s): 96173a7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +46 -211
app.py CHANGED
@@ -1,220 +1,55 @@
1
- import os
2
- import json
3
- import time
4
- import uuid
5
- import re
6
  import gradio as gr
7
- from datetime import datetime
8
 
9
- # Method to set OPENAI_API_KEY in Hugging Face Space
10
- import os
11
- api_key = os.environ["OPENAI_API_KEY"]
12
 
 
13
 
14
- APP_TITLE = "GPT5 Demo"
15
- APP_DESC = "A polished Gradio chat demo with presets, file context, tools, and export."
16
- MODEL_IDENTITY_ANSWER = "GPT5 Thinking Model"
17
-
18
- def estimate_tokens(text: str) -> int:
19
- return max(1, int(len(text) / 4))
20
-
21
- def format_timestamp(ts=None):
22
- return (ts or datetime.now()).strftime("%Y-%m-%d %H:%M:%S")
23
-
24
- def get_session_id(state):
25
- if state and state.get("session_id"):
26
- return state["session_id"]
27
- return str(uuid.uuid4())
28
-
29
- def truncate_context(context, max_chars=8000):
30
- if len(context) <= max_chars:
31
- return context
32
- head = context[: max_chars // 2]
33
- tail = context[-max_chars // 2 :]
34
- return head + "\n...\n[Context truncated]\n...\n" + tail
35
-
36
- def is_model_identity_question(text: str) -> bool:
37
- if not text:
38
- return False
39
- t = text.lower().strip()
40
- patterns = [
41
- r"\bwhich\s+model\b",
42
- r"\bwhat\s+model\b",
43
- r"\bare\s+you\s+(the\s+)?model\b",
44
- r"\bmodel\s+name\b",
45
- r"\bmodel\s+are\s+you\b",
46
- r"\bare\s+you\s+gpt5\b",
47
- r"\bidentify\s+your\s+model\b",
48
- r"\breturn\s+model\b",
49
- r"\bwhat\s+are\s+you\b",
50
- r"\bwho\s+are\s+you\b",
51
- r"\bmodel?\b"
52
- ]
53
- return any(re.search(p, t) for p in patterns)
54
-
55
- def respond(system_prompt, history, user_msg, model_name, temperature, top_p, max_tokens, context_text, tool_choice):
56
- if is_model_identity_question(user_msg):
57
- response = MODEL_IDENTITY_ANSWER
58
- tokens_in = estimate_tokens(user_msg or "")
59
- tokens_out = estimate_tokens(response)
60
- return response, tokens_in, tokens_out
61
- history_text = ""
62
- for role, msg in history:
63
- history_text += f"{role.capitalize()}: {msg}\n"
64
- full_context = ""
65
- if system_prompt:
66
- full_context += f"System: {system_prompt}\n"
67
- if context_text:
68
- full_context += f"[Attached Context]\n{truncate_context(context_text)}\n[/Attached Context]\n"
69
- if tool_choice and tool_choice != "None":
70
- full_context += f"[Tool Requested: {tool_choice}]\n"
71
- prompt = f"{full_context}{history_text}User: {user_msg}\nAssistant:"
72
- tool_hint = ""
73
- if tool_choice == "Summarize Text":
74
- tool_hint = "Summary: " + " ".join(user_msg.split()[:80]) + ("..." if len(user_msg.split()) > 80 else "")
75
- elif tool_choice == "Summarize URL":
76
- tool_hint = "URL summary: (stub) Provide a URL and I will summarize its content if fetching is connected."
77
- else:
78
- tool_hint = "Thanks for your message! This is a demo response."
79
- response = f"[Model: {model_name} | T={temperature:.2f}, p={top_p:.2f}, max_tokens={max_tokens}]\n{tool_hint}\n\nEcho: {user_msg}"
80
- tokens_in = estimate_tokens(prompt)
81
- tokens_out = estimate_tokens(response)
82
- return response, tokens_in, tokens_out
83
-
84
- def read_files(files):
85
- texts = []
86
- if not files:
87
- return ""
88
- for f in files:
89
- try:
90
- path = f.name if hasattr(f, "name") else str(f)
91
- with open(path, "rb") as fh:
92
- raw = fh.read()
93
- try:
94
- text = raw.decode("utf-8", errors="ignore")
95
- except Exception:
96
- text = str(raw)
97
- texts.append(f"\n=== File: {os.path.basename(path)} ===\n{text}\n")
98
- except Exception as e:
99
- texts.append(f"\n=== File Error ===\nCould not read {f}: {e}\n")
100
- return "\n".join(texts)
101
-
102
- def on_submit(user_msg, system_prompt, model_name, temperature, top_p, max_tokens, files, tool_choice, state, history, persist_history):
103
- state = state or {}
104
- state["session_id"] = get_session_id(state)
105
- context_text = read_files(files)
106
- if not persist_history:
107
- history = []
108
- history = history + [("user", user_msg)]
109
- if is_model_identity_question(user_msg):
110
- reply = MODEL_IDENTITY_ANSWER
111
- tokens_in = estimate_tokens(user_msg)
112
- tokens_out = estimate_tokens(reply)
113
- history.append(("assistant", reply))
114
- token_info = f"In: ~{tokens_in} | Out: ~{tokens_out} | Total: ~{tokens_in + tokens_out}"
115
- return history, "", token_info, state
116
- reply, tokens_in, tokens_out = respond(system_prompt, history[:-1], user_msg, model_name, temperature, top_p, max_tokens, context_text, tool_choice)
117
- history.append(("assistant", reply))
118
- token_info = f"In: ~{tokens_in} | Out: ~{tokens_out} | Total: ~{tokens_in + tokens_out}"
119
- return history, "", token_info, state
120
-
121
- def clear_chat(state):
122
- state = state or {}
123
- state["session_id"] = get_session_id(state)
124
- return [], state, ""
125
-
126
- def apply_preset(preset_name):
127
- presets = {
128
- "Helpful Assistant": "You are a helpful, concise assistant.",
129
- "Creative Writer": "You are a creative writing assistant. Use vivid language and varied rhythm.",
130
- "Code Tutor": "You are a precise programming tutor. Provide clear, step-by-step guidance with examples.",
131
- "Critique Buddy": "You provide constructive critique, balancing positives and actionable improvements."
132
  }
133
- return presets.get(preset_name, "")
134
 
135
- def export_history(history, state):
136
- state = state or {}
137
- session_id = get_session_id(state)
138
- data = {
139
- "session_id": session_id,
140
- "exported_at": format_timestamp(),
141
- "title": APP_TITLE,
142
- "history": [{"role": r, "content": m} for r, m in history],
143
- }
144
- fname = f"gpt5_demo_{session_id[:8]}_{int(time.time())}.json"
145
- with open(fname, "w", encoding="utf-8") as f:
146
- json.dump(data, f, ensure_ascii=False, indent=2)
147
- return f"Saved transcript to {fname}"
148
 
149
- def summarize_text_action(input_text):
150
- if not input_text or not input_text.strip():
151
- return "Provide text to summarize."
152
- words = input_text.strip().split()
153
- short = " ".join(words[:120]) + ("..." if len(words) > 120 else "")
154
- return f"Summary (quick): {short}"
155
-
156
- def summarize_url_action(url):
157
- if not url or not url.strip():
158
- return "Provide a URL."
159
- return f"(Stub) Would fetch and summarize: {url}"
160
-
161
- with gr.Blocks(theme=gr.themes.Soft(), title=APP_TITLE, css="""
162
- :root { --accent: #6c78ff; }
163
- .gradio-container { max-width: 1000px !important; margin: 0 auto; }
164
- #title { text-align: center; padding-top: 8px; }
165
- .token-chip { background: #eef; border-radius: 999px; padding: 4px 10px; display: inline-block; }
166
- """) as demo:
167
- gr.Markdown(f"# {APP_TITLE}", elem_id="title")
168
- gr.Markdown(APP_DESC)
169
- with gr.Row():
170
- with gr.Column(scale=3):
171
- system_prompt = gr.Textbox(label="System Prompt", placeholder="e.g., You are a helpful assistant.", lines=3)
172
- with gr.Column(scale=2):
173
- model_name = gr.Dropdown(label="Model", choices=["gpt5-small", "gpt5-medium", "gpt5-pro"], value="gpt5-medium")
174
- with gr.Row():
175
- temperature = gr.Slider(0.0, 1.5, value=0.7, step=0.05, label="Temperature")
176
- top_p = gr.Slider(0.05, 1.0, value=1.0, step=0.05, label="Top-p")
177
- max_tokens = gr.Slider(64, 4096, value=512, step=64, label="Max Tokens")
178
- persist_history = gr.Checkbox(label="Persist History", value=True)
179
- with gr.Row():
180
- with gr.Column(scale=3):
181
- chat = gr.Chatbot(label="Conversation", avatar_images=(None, None), bubble_full_width=False, height=420, show_copy_button=True, render_markdown=True, show_share_button=False)
182
- user_msg = gr.Textbox(placeholder="Type your message and press Enter...", show_label=False, lines=2)
183
- with gr.Row():
184
- submit_btn = gr.Button("Send", variant="primary")
185
- clear_btn = gr.Button("Clear")
186
- export_btn = gr.Button("Export Transcript")
187
- token_info = gr.Markdown("")
188
- with gr.Column(scale=2):
189
- gr.Markdown("Attachments and Tools")
190
- files = gr.Files(label="Upload files (txt, md, etc.)", file_count="multiple", type="filepath")
191
- tool_choice = gr.Radio(choices=["None", "Summarize Text", "Summarize URL"], value="None", label="Tool")
192
- with gr.Accordion("Quick Tools", open=False):
193
- quick_text = gr.Textbox(label="Text to Summarize", lines=6)
194
- quick_sum_btn = gr.Button("Summarize Text (Quick)")
195
- quick_sum_out = gr.Markdown()
196
- url_box = gr.Textbox(label="URL to Summarize")
197
- quick_url_btn = gr.Button("Summarize URL (Quick)")
198
- quick_url_out = gr.Markdown()
199
- with gr.Accordion("Presets", open=False):
200
- preset = gr.Dropdown(choices=["Helpful Assistant", "Creative Writer", "Code Tutor", "Critique Buddy"], label="Apply Preset")
201
- apply_btn = gr.Button("Apply Preset to System Prompt")
202
- state = gr.State({"session_id": str(uuid.uuid4())})
203
- submit_evt = user_msg.submit(
204
- on_submit,
205
- inputs=[user_msg, system_prompt, model_name, temperature, top_p, max_tokens, files, tool_choice, state, chat, persist_history],
206
- outputs=[chat, user_msg, token_info, state]
207
- )
208
- submit_btn.click(
209
- on_submit,
210
- inputs=[user_msg, system_prompt, model_name, temperature, top_p, max_tokens, files, tool_choice, state, chat, persist_history],
211
- outputs=[chat, user_msg, token_info, state]
212
- )
213
- clear_btn.click(clear_chat, inputs=[state], outputs=[chat, state, token_info])
214
- export_btn.click(export_history, inputs=[chat, state], outputs=[token_info])
215
- apply_btn.click(apply_preset, inputs=[preset], outputs=[system_prompt])
216
- quick_sum_btn.click(summarize_text_action, inputs=[quick_text], outputs=[quick_sum_out])
217
- quick_url_btn.click(summarize_url_action, inputs=[url_box], outputs=[quick_url_out])
218
 
219
  if __name__ == "__main__":
220
- demo.queue(api_open=False).launch(server_name="0.0.0.0", server_port=7860, show_api=False)
 
 
 
 
 
 
1
  import gradio as gr
2
+ import requests
3
 
4
+ PRIMARY_API = "https://api.openai.com/v1/chat/completions" # Replace with your main API
5
+ FALLBACK_API = "https://api.chatanywhere.tech/v1/chat/completions" # Host1
 
6
 
7
+ API_KEY = "YOUR_API_KEY"
8
 
9
+ def query_api(messages):
10
+ headers = {"Authorization": f"Bearer {API_KEY}"}
11
+ payload = {
12
+ "model": "gpt-3.5-turbo", # or whatever model you're using
13
+ "messages": messages
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
14
  }
 
15
 
16
+ try:
17
+ # Try primary API
18
+ response = requests.post(PRIMARY_API, headers=headers, json=payload, timeout=10)
19
+ response.raise_for_status()
20
+ return response.json()["choices"][0]["message"]["content"]
 
 
 
 
 
 
 
 
21
 
22
+ except Exception as e:
23
+ print(f"[ERROR] Primary API failed: {e}")
24
+ try:
25
+ # Fallback to Host1
26
+ response = requests.post(FALLBACK_API, headers=headers, json=payload, timeout=10)
27
+ response.raise_for_status()
28
+ return response.json()["choices"][0]["message"]["content"]
29
+ except Exception as e2:
30
+ print(f"[ERROR] Fallback API also failed: {e2}")
31
+ return " Both the primary and fallback services are unavailable right now."
32
+
33
+ # Gradio Chatbot UI (without unsupported params)
34
+ chat = gr.Chatbot(
35
+ label="Conversation",
36
+ avatar_images=(None, None),
37
+ bubble_full_width=False,
38
+ height=420,
39
+ show_copy_button=True,
40
+ render_markdown=True
41
+ )
42
+
43
+ def respond(user_message, history):
44
+ history = history or []
45
+ bot_reply = query_api([{"role": "user", "content": user_message}])
46
+ history.append((user_message, bot_reply))
47
+ return history
48
+
49
+ with gr.Blocks() as demo:
50
+ chatbot = chat
51
+ msg = gr.Textbox()
52
+ msg.submit(respond, [msg, chatbot], chatbot)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
53
 
54
  if __name__ == "__main__":
55
+ demo.launch()