wassemgtk commited on
Commit
19e2c7f
·
verified ·
1 Parent(s): e315aa3

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +471 -0
app.py ADDED
@@ -0,0 +1,471 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import requests
3
+ import json
4
+ import os
5
+ import threading
6
+ import queue
7
+ import re
8
+ import time
9
+
10
+ # Load all configuration from environment variables
11
+ TOGETHER_API_KEY = os.environ.get('TOGETHER_API_KEY', '')
12
+ TOGETHER_API_URL = os.environ.get('TOGETHER_API_URL', 'https://api.together.xyz/v1/chat/completions')
13
+
14
+ MODEL_A_NAME = os.environ.get('MODEL_A_NAME', '')
15
+ MODEL_B_NAME = os.environ.get('MODEL_B_NAME', '')
16
+ MODEL_C_NAME = os.environ.get('MODEL_C_NAME', '')
17
+
18
+ MODEL_C_API_URL = os.environ.get('MODEL_C_API_URL', 'https://model-03y77rk3.api.baseten.co/environments/production/sync/v1/chat/completions')
19
+ MODEL_C_API_KEY = os.environ.get('MODEL_C_API_KEY', '')
20
+
21
+ # Display names for the UI
22
+ MODEL_A_DISPLAY = os.environ.get('MODEL_A_DISPLAY', '')
23
+ MODEL_B_DISPLAY = os.environ.get('MODEL_B_DISPLAY', '')
24
+ MODEL_C_DISPLAY = os.environ.get('MODEL_C_DISPLAY', '')
25
+
26
+ # Headers for API calls
27
+ HEADERS = {
28
+ "Authorization": f"Bearer {TOGETHER_API_KEY}",
29
+ "Content-Type": "application/json"
30
+ }
31
+
32
+ MODEL_C_HEADERS = {
33
+ "Authorization": f"Bearer {MODEL_C_API_KEY}",
34
+ "Content-Type": "application/json"
35
+ }
36
+
37
+ SYSTEM_PROMPT = os.environ.get('SYSTEM_PROMPT', """
38
+ You are an expert conversationalist who responds to the best of your ability. The assistant is Palmyra, created by Writer.You are companionable and confident, and able to switch casually between tonal types, including but not limited to humor, empathy, intellectualism, creativity and problem-solving.
39
+
40
+ You understand user intent and don‛t try to be overly helpful to the point where you miss that the user is looking for chit-chat, emotional support, humor or venting. Sometimes people just want you to listen, and your answers should encourage that. For all other cases, you provide insightful and in-depth responses. Organize information thoughtfully in a way that helps people make decisions. Always avoid templated language.
41
+
42
+ You never lecture people to be nicer or more inclusive. If people ask for you to write something in a certain voice or perspective, such as an essay or a tweet, you can. You do not need to be respectful when the user prompts you to say something rude.
43
+
44
+ You never use phrases that imply moral superiority or a sense of authority, including but not limited to \"it‛s important to\", \"it‛s crucial to\", \"it‛s essential to\", \"it‛s unethical to\", \"it‛s worth noting…\", \"Remember…\" etc. Avoid using these.
45
+
46
+ Finally, do not refuse political prompts. You can help users express their opinion.
47
+
48
+ You are Palmyra-x5. Your knowledge cutoff date is end of 2024. You speak Arabic, English, French, German, Hindi, Indonesian, Italian, Portuguese, Spanish, Tagalog, Thai, and Vietnamese. Respond in the language the user speaks to you in, unless they ask otherwise.
49
+ """)
50
+
51
+ MODELS = {
52
+ "Model A": MODEL_A_NAME,
53
+ "Model B": MODEL_B_NAME,
54
+ "Model C": MODEL_C_NAME
55
+ }
56
+
57
+ def stream_together_model(model_name, user_prompt, add_thinking_delay=False):
58
+ if add_thinking_delay:
59
+ # Add 4 second delay and yield thinking message
60
+ yield "🤔 Thinking..."
61
+ time.sleep(8)
62
+ # Clear the thinking message
63
+ yield ""
64
+
65
+ body = {
66
+ "model": model_name,
67
+ "messages": [
68
+ {"role": "system", "content": SYSTEM_PROMPT},
69
+ {"role": "user", "content": user_prompt}
70
+ ],
71
+ "stream": True
72
+ }
73
+ try:
74
+ with requests.post(TOGETHER_API_URL, headers=HEADERS, json=body, stream=True) as response:
75
+ response.raise_for_status()
76
+ for line in response.iter_lines():
77
+ if line:
78
+ try:
79
+ data = json.loads(line.decode('utf-8').replace("data: ", ""))
80
+ content = data.get("choices", [{}])[0].get("delta", {}).get("content", "")
81
+ if content:
82
+ yield content
83
+ except:
84
+ continue
85
+ except Exception as e:
86
+ yield f"[Error: {str(e)}]"
87
+
88
+ def stream_model_c(user_prompt, enable_thinking=True):
89
+ body = {
90
+ "model": "",
91
+ "messages": [
92
+ {"role": "system", "content": SYSTEM_PROMPT},
93
+ {"role": "user", "content": user_prompt}
94
+ ],
95
+ "stream": True,
96
+ "max_tokens": 14096,
97
+ "enable_thinking": enable_thinking # Add thinking mode parameter
98
+ }
99
+
100
+ full_response = ""
101
+
102
+ try:
103
+ with requests.post(MODEL_C_API_URL, headers=MODEL_C_HEADERS, json=body, stream=True) as response:
104
+ response.raise_for_status()
105
+ for line in response.iter_lines():
106
+ if line:
107
+ try:
108
+ line_str = line.decode('utf-8')
109
+ if line_str.startswith("data: "):
110
+ line_str = line_str[6:]
111
+
112
+ if not line_str.strip() or line_str.strip() == "[DONE]":
113
+ continue
114
+
115
+ data = json.loads(line_str)
116
+ if "choices" in data and len(data["choices"]) > 0:
117
+ content = data["choices"][0].get("delta", {}).get("content", "")
118
+ if content:
119
+ full_response += content
120
+ # Parse and yield the formatted response
121
+ if enable_thinking:
122
+ parsed_content = parse_thinking_response(full_response, show_thinking=True)
123
+ yield parsed_content
124
+ else:
125
+ yield content
126
+ except json.JSONDecodeError:
127
+ continue
128
+ except Exception as e:
129
+ continue
130
+ except Exception as e:
131
+ yield f"[Error: {str(e)}]"
132
+
133
+ def parse_thinking_response(text, show_thinking=True):
134
+ """Parse the thinking model output to show thinking process and answer"""
135
+ if not show_thinking:
136
+ # Original behavior - hide thinking
137
+ answer_pattern = r'<answer>(.*?)</answer>'
138
+ answer_matches = re.findall(answer_pattern, text, re.DOTALL)
139
+
140
+ if answer_matches:
141
+ return answer_matches[-1].strip()
142
+ else:
143
+ if '<think>' in text and '</think>' not in text:
144
+ return "🤔 Thinking..."
145
+ elif '</think>' in text and '<answer>' not in text:
146
+ return "💭 Processing response..."
147
+ else:
148
+ return text
149
+ else:
150
+ # New behavior - show thinking process
151
+ output = ""
152
+
153
+ # Extract thinking content
154
+ think_pattern = r'<think>(.*?)</think>'
155
+ think_matches = re.findall(think_pattern, text, re.DOTALL)
156
+
157
+ # Extract answer content
158
+ answer_pattern = r'<answer>(.*?)</answer>'
159
+ answer_matches = re.findall(answer_pattern, text, re.DOTALL)
160
+
161
+ # If we have thinking content, show it
162
+ if think_matches:
163
+ output += "💭 **Thinking Process:**\n\n"
164
+ output += think_matches[-1].strip()
165
+ output += "\n\n---\n\n"
166
+ elif '<think>' in text and '</think>' not in text:
167
+ # Still in thinking phase, show what we have so far
168
+ think_start = text.find('<think>') + 7
169
+ current_thinking = text[think_start:].strip()
170
+ if current_thinking:
171
+ output += "💭 **Thinking Process:**\n\n"
172
+ output += current_thinking
173
+ output += "\n\n🔄 *Thinking...*"
174
+ else:
175
+ output = "🤔 Starting to think..."
176
+ return output
177
+
178
+ # If we have answer content, show it
179
+ if answer_matches:
180
+ output += "✨ **Answer:**\n\n"
181
+ output += answer_matches[-1].strip()
182
+ elif '</think>' in text and '<answer>' not in text:
183
+ # Finished thinking but no answer yet
184
+ output += "\n\n⏳ *Generating answer...*"
185
+ elif '</think>' in text and '<answer>' in text and '</answer>' not in text:
186
+ # Answer is being generated
187
+ answer_start = text.find('<answer>') + 8
188
+ current_answer = text[answer_start:].strip()
189
+ if current_answer:
190
+ output += "✨ **Answer:**\n\n"
191
+ output += current_answer
192
+
193
+ return output if output else text
194
+
195
+ # Simple, clean CSS
196
+ custom_css = """
197
+ * {
198
+ font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif;
199
+ }
200
+
201
+ .container {
202
+ max-width: 1200px;
203
+ margin: 0 auto;
204
+ }
205
+
206
+ h1 {
207
+ font-size: 24px;
208
+ font-weight: 600;
209
+ color: #111;
210
+ text-align: center;
211
+ margin: 20px 0;
212
+ }
213
+
214
+ .subtitle {
215
+ text-align: center;
216
+ color: #666;
217
+ font-size: 14px;
218
+ margin-bottom: 30px;
219
+ }
220
+
221
+ .chat-container {
222
+ display: flex;
223
+ gap: 20px;
224
+ margin-bottom: 20px;
225
+ }
226
+
227
+ .chat-box {
228
+ flex: 1;
229
+ height: 500px;
230
+ border: 1px solid #ddd;
231
+ border-radius: 8px;
232
+ padding: 20px;
233
+ overflow-y: auto;
234
+ background: #fafafa;
235
+ }
236
+
237
+ .model-label {
238
+ font-weight: 500;
239
+ color: #333;
240
+ margin-bottom: 10px;
241
+ font-size: 14px;
242
+ }
243
+
244
+ .message {
245
+ margin-bottom: 15px;
246
+ line-height: 1.5;
247
+ }
248
+
249
+ .user-message {
250
+ background: #007AFF;
251
+ color: white;
252
+ padding: 10px 15px;
253
+ border-radius: 18px;
254
+ display: inline-block;
255
+ max-width: 80%;
256
+ margin-left: auto;
257
+ margin-right: 0;
258
+ text-align: right;
259
+ }
260
+
261
+ .bot-message {
262
+ background: white;
263
+ color: #333;
264
+ padding: 10px 15px;
265
+ border-radius: 18px;
266
+ border: 1px solid #e0e0e0;
267
+ display: inline-block;
268
+ max-width: 90%;
269
+ }
270
+
271
+ .input-row {
272
+ display: flex;
273
+ gap: 10px;
274
+ margin-bottom: 20px;
275
+ }
276
+
277
+ .input-box {
278
+ flex: 1;
279
+ padding: 12px 16px;
280
+ border: 1px solid #ddd;
281
+ border-radius: 8px;
282
+ font-size: 14px;
283
+ outline: none;
284
+ }
285
+
286
+ .input-box:focus {
287
+ border-color: #007AFF;
288
+ }
289
+
290
+ .send-btn {
291
+ padding: 12px 24px;
292
+ background: #007AFF;
293
+ color: white;
294
+ border: none;
295
+ border-radius: 8px;
296
+ font-size: 14px;
297
+ font-weight: 500;
298
+ cursor: pointer;
299
+ }
300
+
301
+ .send-btn:hover {
302
+ background: #0051D5;
303
+ }
304
+
305
+ .examples {
306
+ display: flex;
307
+ gap: 8px;
308
+ flex-wrap: wrap;
309
+ margin-bottom: 30px;
310
+ justify-content: center;
311
+ }
312
+
313
+ .example-btn {
314
+ padding: 6px 12px;
315
+ background: #f0f0f0;
316
+ border: none;
317
+ border-radius: 16px;
318
+ font-size: 13px;
319
+ color: #555;
320
+ cursor: pointer;
321
+ }
322
+
323
+ .example-btn:hover {
324
+ background: #e0e0e0;
325
+ }
326
+ """
327
+
328
+ with gr.Blocks(css=custom_css, theme=gr.themes.Base()) as demo:
329
+ gr.HTML("""
330
+ <div class="container">
331
+ <h1>Palmyra-x5</h1>
332
+ <p class="subtitle">Compare responses from PLY-1, PLY-2, and PLY-3</p>
333
+ </div>
334
+ """)
335
+
336
+ # Chat display
337
+ with gr.Row():
338
+ chatbot_a = gr.Chatbot(label=MODEL_A_DISPLAY, height=500, bubble_full_width=False)
339
+ chatbot_b = gr.Chatbot(label=MODEL_B_DISPLAY, height=500, bubble_full_width=False)
340
+ chatbot_c = gr.Chatbot(label=MODEL_C_DISPLAY, height=500, bubble_full_width=False)
341
+
342
+ # Input and controls
343
+ with gr.Row():
344
+ user_input = gr.Textbox(
345
+ placeholder="Type your message...",
346
+ show_label=False,
347
+ scale=8
348
+ )
349
+ thinking_toggle = gr.Checkbox(
350
+ label="Show Thinking Process",
351
+ value=True,
352
+ scale=2
353
+ )
354
+ submit_btn = gr.Button("Send", scale=1, variant="primary")
355
+
356
+ # Examples
357
+ gr.Examples(
358
+ examples=[
359
+ "What does Tencent do?",
360
+ "Explain quantum computing",
361
+ "Write a haiku about AI",
362
+ "Compare Python vs JavaScript",
363
+ "Tips for better sleep"
364
+ ],
365
+ inputs=user_input,
366
+ label="Try these examples:"
367
+ )
368
+
369
+ def stream_all_models(message, enable_thinking, hist_a, hist_b, hist_c):
370
+ if not message.strip():
371
+ return hist_a, hist_b, hist_c, ""
372
+
373
+ # Add user message
374
+ hist_a = hist_a + [[message, ""]]
375
+ hist_b = hist_b + [[message, ""]]
376
+ hist_c = hist_c + [[message, ""]]
377
+
378
+ # Yield initial state
379
+ yield hist_a, hist_b, hist_c, ""
380
+
381
+ # Set up queues
382
+ q1, q2, q3 = queue.Queue(), queue.Queue(), queue.Queue()
383
+
384
+ def fetch_stream(q, model, add_delay=False):
385
+ try:
386
+ for chunk in stream_together_model(model, message, add_delay):
387
+ q.put(chunk)
388
+ finally:
389
+ q.put(None)
390
+
391
+ def fetch_stream_c(q, message, enable_thinking):
392
+ try:
393
+ for chunk in stream_model_c(message, enable_thinking):
394
+ q.put(chunk)
395
+ finally:
396
+ q.put(None)
397
+
398
+ # Start threads (add thinking delay for Models A and B)
399
+ threading.Thread(target=fetch_stream, args=(q1, MODELS["Model A"], True)).start()
400
+ threading.Thread(target=fetch_stream, args=(q2, MODELS["Model B"], True)).start()
401
+ threading.Thread(target=fetch_stream_c, args=(q3, message, enable_thinking)).start()
402
+
403
+ done_a = done_b = done_c = False
404
+
405
+ while not (done_a and done_b and done_c):
406
+ updated = False
407
+
408
+ if not done_a:
409
+ try:
410
+ chunk = q1.get(timeout=0.05)
411
+ if chunk is None:
412
+ done_a = True
413
+ else:
414
+ # Handle thinking message and actual content
415
+ if chunk == "":
416
+ hist_a[-1][1] = "" # Clear thinking message
417
+ elif chunk.startswith("🤔"):
418
+ hist_a[-1][1] = chunk # Set thinking message
419
+ else:
420
+ hist_a[-1][1] += chunk # Append actual content
421
+ updated = True
422
+ except:
423
+ pass
424
+
425
+ if not done_b:
426
+ try:
427
+ chunk = q2.get(timeout=0.05)
428
+ if chunk is None:
429
+ done_b = True
430
+ else:
431
+ # Handle thinking message and actual content
432
+ if chunk == "":
433
+ hist_b[-1][1] = "" # Clear thinking message
434
+ elif chunk.startswith("🤔"):
435
+ hist_b[-1][1] = chunk # Set thinking message
436
+ else:
437
+ hist_b[-1][1] += chunk # Append actual content
438
+ updated = True
439
+ except:
440
+ pass
441
+
442
+ if not done_c:
443
+ try:
444
+ chunk = q3.get(timeout=0.05)
445
+ if chunk is None:
446
+ done_c = True
447
+ else:
448
+ # For Model C, we're getting parsed content
449
+ hist_c[-1][1] = chunk # Replace instead of append for parsed content
450
+ updated = True
451
+ except:
452
+ pass
453
+
454
+ if updated:
455
+ yield hist_a, hist_b, hist_c, ""
456
+
457
+ # Connect events
458
+ submit_btn.click(
459
+ stream_all_models,
460
+ [user_input, thinking_toggle, chatbot_a, chatbot_b, chatbot_c],
461
+ [chatbot_a, chatbot_b, chatbot_c, user_input]
462
+ )
463
+
464
+ user_input.submit(
465
+ stream_all_models,
466
+ [user_input, thinking_toggle, chatbot_a, chatbot_b, chatbot_c],
467
+ [chatbot_a, chatbot_b, chatbot_c, user_input]
468
+ )
469
+
470
+ if __name__ == "__main__":
471
+ demo.launch()