ofirzaf commited on
Commit
49d87c4
·
1 Parent(s): bc34ee0

Initial commit

Browse files
Files changed (7) hide show
  1. README.md +1 -1
  2. app.py +453 -0
  3. conversation.py +441 -0
  4. gradio_css.py +71 -0
  5. gradio_patch.py +168 -0
  6. requirements.txt +4 -0
  7. utils.py +50 -0
README.md CHANGED
@@ -1,6 +1,6 @@
1
  ---
2
  title: Q8 Chat
3
- emoji: 🌍
4
  colorFrom: green
5
  colorTo: yellow
6
  sdk: gradio
 
1
  ---
2
  title: Q8 Chat
3
+ emoji: 🏃
4
  colorFrom: green
5
  colorTo: yellow
6
  sdk: gradio
app.py ADDED
@@ -0,0 +1,453 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ The gradio demo server for chatting with a single model.
3
+ """
4
+
5
+ import datetime
6
+ import json
7
+ import os
8
+ import time
9
+ import uuid
10
+ import logging
11
+
12
+ import gradio as gr
13
+ import requests
14
+
15
+ from conversation import get_conv_template
16
+ from gradio_patch import Chatbot as grChatbot
17
+ from gradio_css import code_highlight_css
18
+ from utils import (
19
+ WORKER_API_TIMEOUT,
20
+ ErrorCode,
21
+ server_error_msg,
22
+ get_window_url_params_js,
23
+ )
24
+
25
+
26
+ logging.basicConfig(
27
+ format='%(asctime)s %(levelname)s: %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p')
28
+ logger = logging.getLogger(__name__)
29
+ logger.setLevel(logging.INFO)
30
+
31
+
32
+ headers = {"User-Agent": "fastchat Client"}
33
+
34
+ no_change_btn = gr.Button.update()
35
+ enable_btn = gr.Button.update(interactive=True)
36
+ disable_btn = gr.Button.update(interactive=False)
37
+
38
+ controller_url = os.environ['controller_url']
39
+ concurrency_count = os.environ['concurrency_count']
40
+
41
+ learn_more_md = ("""
42
+ ### Notice
43
+ - All the models in this demo run on 4th Generation Intel® Xeon® (Sapphire Rapids) utilizing AMX operations and mixed precision inference
44
+ - This demo is based on the FastChat demo server. [[GitHub]](https://github.com/lm-sys/FastChat)
45
+
46
+ ### Terms of use
47
+ By using this service, users are required to agree to the following terms: The service is a research preview intended for non-commercial use only. It can produce factually incorrect output, and should not be relied on to produce factually accurate information. The service only provides limited safety measures and may generate lewd, biased or otherwise offensive content. It must not be used for any illegal, harmful, violent, racist, or sexual purposes. The service may collect user dialogue data for future research.
48
+
49
+ ### License
50
+ The service is a research preview intended for non-commercial use only, subject to the [Terms of Use](https://openai.com/policies/terms-of-use) of the data generated by OpenAI, and [Privacy Practices](https://chrome.google.com/webstore/detail/sharegpt-share-your-chatg/daiacboceoaocpibfodeljbdfacokfjb) of ShareGPT. Please contact us if you find any potential violation.
51
+ """)
52
+
53
+
54
+ def get_model_list(controller_url):
55
+ ret = requests.post(controller_url + "/refresh_all_workers")
56
+ assert ret.status_code == 200
57
+ ret = requests.post(controller_url + "/list_models")
58
+ models = ret.json()["models"]
59
+ models.sort()
60
+ logger.info(f"Models: {models}")
61
+ return models
62
+
63
+
64
+ def load_demo_refresh_model_list(url_params):
65
+ models = get_model_list(controller_url)
66
+ selected_model = models[0] if len(models) > 0 else ""
67
+ if "model" in url_params:
68
+ model = url_params["model"]
69
+ if model in models:
70
+ selected_model = model
71
+
72
+ dropdown_update = gr.Dropdown.update(
73
+ choices=models, value=selected_model, visible=True
74
+ )
75
+
76
+ state = None
77
+ return (
78
+ state,
79
+ dropdown_update,
80
+ gr.Chatbot.update(visible=True),
81
+ gr.Textbox.update(visible=True),
82
+ gr.Button.update(visible=True),
83
+ gr.Row.update(visible=True),
84
+ gr.Accordion.update(visible=True),
85
+ )
86
+
87
+
88
+ def load_demo_reload_model(url_params, request: gr.Request):
89
+ logger.info(
90
+ f"load_demo_reload_model. ip: {request.client.host}. params: {url_params}"
91
+ )
92
+ return load_demo_refresh_model_list(url_params)
93
+
94
+
95
+ def load_demo_single(models, url_params):
96
+ dropdown_update = gr.Dropdown.update(visible=True)
97
+ if "model" in url_params:
98
+ model = url_params["model"]
99
+ if model in models:
100
+ dropdown_update = gr.Dropdown.update(value=model, visible=True)
101
+
102
+ state = None
103
+ return (
104
+ state,
105
+ dropdown_update,
106
+ gr.Chatbot.update(visible=True),
107
+ gr.Textbox.update(visible=True),
108
+ gr.Button.update(visible=True),
109
+ gr.Row.update(visible=True),
110
+ gr.Accordion.update(visible=True),
111
+ )
112
+
113
+
114
+ def load_demo(url_params, request: gr.Request):
115
+ logger.info(f"load_demo. ip: {request.client.host}. params: {url_params}")
116
+ return load_demo_single(models, url_params)
117
+
118
+
119
+ def regenerate(state, request: gr.Request):
120
+ logger.info(f"regenerate. ip: {request.client.host}")
121
+ state.messages[-1][-1] = None
122
+ state.skip_next = False
123
+ return (state, state.to_gradio_chatbot(), "") + (disable_btn,) * 5
124
+
125
+
126
+ def clear_history(request: gr.Request):
127
+ logger.info(f"clear_history. ip: {request.client.host}")
128
+ state = None
129
+ return (state, [], "") + (disable_btn,) * 5
130
+
131
+
132
+ def add_text(state, text, request: gr.Request):
133
+ logger.info(f"add_text. ip: {request.client.host}. len: {len(text)}")
134
+
135
+ if state is None:
136
+ state = get_conv_template("vicuna_v1.1")
137
+
138
+ if len(text) <= 0:
139
+ state.skip_next = True
140
+ return (state, state.to_gradio_chatbot(), "") + (no_change_btn,) * 5
141
+
142
+ text = text[:1536] # Hard cut-off
143
+ state.append_message(state.roles[0], text)
144
+ state.append_message(state.roles[1], None)
145
+ state.skip_next = False
146
+ return (state, state.to_gradio_chatbot(), "") + (disable_btn,) * 5
147
+
148
+
149
+ def post_process_code(code):
150
+ sep = "\n```"
151
+ if sep in code:
152
+ blocks = code.split(sep)
153
+ if len(blocks) % 2 == 1:
154
+ for i in range(1, len(blocks), 2):
155
+ blocks[i] = blocks[i].replace("\\_", "_")
156
+ code = sep.join(blocks)
157
+ return code
158
+
159
+
160
+ def model_worker_stream_iter(
161
+ conv, model_name, worker_addr, prompt, temperature, top_p, max_new_tokens
162
+ ):
163
+ # Make requests
164
+ gen_params = {
165
+ "model": model_name,
166
+ "prompt": prompt,
167
+ "temperature": temperature,
168
+ "top_p": top_p,
169
+ "max_new_tokens": max_new_tokens,
170
+ "stop": conv.stop_str,
171
+ "stop_token_ids": conv.stop_token_ids,
172
+ "echo": False,
173
+ }
174
+ logger.info(f"==== request ====\n{gen_params}")
175
+
176
+ # Stream output
177
+ response = requests.post(
178
+ worker_addr + "/worker_generate_stream",
179
+ headers=headers,
180
+ json=gen_params,
181
+ stream=True,
182
+ timeout=WORKER_API_TIMEOUT,
183
+ )
184
+ for chunk in response.iter_lines(decode_unicode=False, delimiter=b"\0"):
185
+ if chunk:
186
+ data = json.loads(chunk.decode())
187
+ yield data
188
+
189
+
190
+ def http_bot(
191
+ state, model_selector, temperature, top_p, max_new_tokens, request: gr.Request
192
+ ):
193
+ logger.info(f"http_bot. ip: {request.client.host}")
194
+ start_tstamp = time.time()
195
+ model_name = model_selector
196
+ temperature = float(temperature)
197
+ top_p = float(top_p)
198
+ max_new_tokens = int(max_new_tokens)
199
+
200
+ if state.skip_next:
201
+ # This generate call is skipped due to invalid inputs
202
+ yield (state, state.to_gradio_chatbot()) + (no_change_btn,) * 5
203
+ return
204
+
205
+ if len(state.messages) == state.offset + 2:
206
+ # First round of conversation
207
+ new_state = get_conv_template(model_name.lower())
208
+ new_state.conv_id = uuid.uuid4().hex
209
+ new_state.model_name = state.model_name or model_selector
210
+ new_state.append_message(new_state.roles[0], state.messages[-2][1])
211
+ new_state.append_message(new_state.roles[1], None)
212
+ state = new_state
213
+
214
+ # Construct prompt
215
+ conv = state
216
+ if "chatglm" in model_name:
217
+ prompt = list(list(x) for x in conv.messages[conv.offset :])
218
+ else:
219
+ prompt = conv.get_prompt()
220
+ stream_iter = model_worker_stream_iter(
221
+ conv, model_name, controller_url, prompt, temperature, top_p, max_new_tokens
222
+ )
223
+
224
+ state.messages[-1][-1] = "▌"
225
+ yield (state, state.to_gradio_chatbot()) + (disable_btn,) * 5
226
+
227
+ try:
228
+ for data in stream_iter:
229
+ if data["error_code"] == 0:
230
+ output = data["text"].strip()
231
+ if "vicuna" in model_name:
232
+ output = post_process_code(output)
233
+ state.messages[-1][-1] = output + "▌"
234
+ yield (state, state.to_gradio_chatbot()) + (disable_btn,) * 5
235
+ else:
236
+ output = data["text"] + f"\n\n(error_code: {data['error_code']})"
237
+ state.messages[-1][-1] = output
238
+ yield (state, state.to_gradio_chatbot()) + (
239
+ disable_btn,
240
+ disable_btn,
241
+ disable_btn,
242
+ enable_btn,
243
+ enable_btn,
244
+ )
245
+ return
246
+ time.sleep(0.02)
247
+ except requests.exceptions.RequestException as e:
248
+ state.messages[-1][-1] = (
249
+ f"{server_error_msg}\n\n"
250
+ f"(error_code: {ErrorCode.GRADIO_REQUEST_ERROR}, {e})"
251
+ )
252
+ yield (state, state.to_gradio_chatbot()) + (
253
+ disable_btn,
254
+ disable_btn,
255
+ disable_btn,
256
+ enable_btn,
257
+ enable_btn,
258
+ )
259
+ return
260
+ except Exception as e:
261
+ state.messages[-1][-1] = (
262
+ f"{server_error_msg}\n\n"
263
+ f"(error_code: {ErrorCode.GRADIO_STREAM_UNKNOWN_ERROR}, {e})"
264
+ )
265
+ yield (state, state.to_gradio_chatbot()) + (
266
+ disable_btn,
267
+ disable_btn,
268
+ disable_btn,
269
+ enable_btn,
270
+ enable_btn,
271
+ )
272
+ return
273
+
274
+ state.messages[-1][-1] = state.messages[-1][-1][:-1]
275
+ yield (state, state.to_gradio_chatbot()) + (enable_btn,) * 5
276
+
277
+ finish_tstamp = time.time()
278
+ logger.info(f"{output}")
279
+
280
+ # TODO
281
+ # with open(get_conv_log_filename(), "a") as fout:
282
+ # data = {
283
+ # "tstamp": round(finish_tstamp, 4),
284
+ # "type": "chat",
285
+ # "model": model_name,
286
+ # "gen_params": {
287
+ # "temperature": temperature,
288
+ # "top_p": top_p,
289
+ # "max_new_tokens": max_new_tokens,
290
+ # },
291
+ # "start": round(start_tstamp, 4),
292
+ # "finish": round(start_tstamp, 4),
293
+ # "state": state.dict(),
294
+ # "ip": request.client.host,
295
+ # }
296
+ # fout.write(json.dumps(data) + "\n")
297
+
298
+
299
+ block_css = (
300
+ code_highlight_css
301
+ + """
302
+ pre {
303
+ white-space: pre-wrap; /* Since CSS 2.1 */
304
+ white-space: -moz-pre-wrap; /* Mozilla, since 1999 */
305
+ white-space: -pre-wrap; /* Opera 4-6 */
306
+ white-space: -o-pre-wrap; /* Opera 7 */
307
+ word-wrap: break-word; /* Internet Explorer 5.5+ */
308
+ }
309
+ #notice_markdown th {
310
+ display: none;
311
+ }
312
+ """
313
+ )
314
+
315
+
316
+ def build_single_model_ui(models):
317
+ notice_markdown = ("""
318
+ # <p style="text-align: center;">Chat with Intel Labs optimized Large Language Models</p>
319
+
320
+ ### Choose a model to chat with
321
+ """)
322
+
323
+ state = gr.State()
324
+ gr.Markdown(notice_markdown, elem_id="notice_markdown")
325
+
326
+ with gr.Row(elem_id="model_selector_row"):
327
+ model_selector = gr.Dropdown(
328
+ choices=models,
329
+ value=models[0] if len(models) > 0 else "",
330
+ interactive=True,
331
+ show_label=False,
332
+ ).style(container=False)
333
+
334
+ chatbot = grChatbot(
335
+ elem_id="chatbot", label="Scroll down and start chatting", visible=False,
336
+ ).style(height=550)
337
+ with gr.Row():
338
+ with gr.Column(scale=20):
339
+ textbox = gr.Textbox(
340
+ show_label=False,
341
+ placeholder="Type your message...",
342
+ visible=False,
343
+ ).style(container=False)
344
+ with gr.Column(scale=1, min_width=50):
345
+ send_btn = gr.Button(value="Send", visible=False)
346
+
347
+ with gr.Row(visible=False) as button_row:
348
+ regenerate_btn = gr.Button(value="Regenerate", interactive=False)
349
+ clear_btn = gr.Button(value="Clear history", interactive=False)
350
+
351
+ with gr.Accordion("Parameters", open=False, visible=False) as parameter_row:
352
+ temperature = gr.Slider(
353
+ minimum=0.0,
354
+ maximum=1.0,
355
+ value=0.1,
356
+ step=0.1,
357
+ interactive=True,
358
+ label="Temperature",
359
+ )
360
+ top_p = gr.Slider(
361
+ minimum=0.0,
362
+ maximum=1.0,
363
+ value=1.0,
364
+ step=0.1,
365
+ interactive=True,
366
+ label="Top P",
367
+ )
368
+ max_output_tokens = gr.Slider(
369
+ minimum=0,
370
+ maximum=1024,
371
+ value=512,
372
+ step=64,
373
+ interactive=True,
374
+ label="Max output tokens",
375
+ )
376
+
377
+ gr.Markdown(learn_more_md)
378
+
379
+ btn_list = [regenerate_btn, clear_btn]
380
+ regenerate_btn.click(regenerate, state, [state, chatbot, textbox] + btn_list).then(
381
+ http_bot,
382
+ [state, model_selector, temperature, top_p, max_output_tokens],
383
+ [state, chatbot] + btn_list,
384
+ )
385
+ clear_btn.click(clear_history, None, [state, chatbot, textbox] + btn_list)
386
+
387
+ model_selector.change(clear_history, None, [state, chatbot, textbox] + btn_list)
388
+
389
+ textbox.submit(
390
+ add_text, [state, textbox], [state, chatbot, textbox] + btn_list
391
+ ).then(
392
+ http_bot,
393
+ [state, model_selector, temperature, top_p, max_output_tokens],
394
+ [state, chatbot] + btn_list,
395
+ )
396
+ send_btn.click(
397
+ add_text, [state, textbox], [state, chatbot, textbox] + btn_list
398
+ ).then(
399
+ http_bot,
400
+ [state, model_selector, temperature, top_p, max_output_tokens],
401
+ [state, chatbot] + btn_list,
402
+ )
403
+
404
+ return state, model_selector, chatbot, textbox, send_btn, button_row, parameter_row
405
+
406
+
407
+ def build_demo(models):
408
+ with gr.Blocks(
409
+ title="Chat with Open Large Language Models",
410
+ theme=gr.themes.Soft(),
411
+ css=block_css,
412
+ ) as demo:
413
+ url_params = gr.JSON(visible=False)
414
+
415
+ with gr.Row():
416
+ gr.Column(scale=1, min_width=0)
417
+ with gr.Column(scale=9):
418
+ (
419
+ state,
420
+ model_selector,
421
+ chatbot,
422
+ textbox,
423
+ send_btn,
424
+ button_row,
425
+ parameter_row,
426
+ ) = build_single_model_ui(models)
427
+ gr.Column(scale=1, min_width=0)
428
+
429
+ demo.load(
430
+ load_demo_reload_model,
431
+ [url_params],
432
+ [
433
+ state,
434
+ model_selector,
435
+ chatbot,
436
+ textbox,
437
+ send_btn,
438
+ button_row,
439
+ parameter_row,
440
+ ],
441
+ _js=get_window_url_params_js,
442
+ )
443
+
444
+ return demo
445
+
446
+
447
+ if __name__ == "__main__":
448
+ models = get_model_list(controller_url)
449
+
450
+ demo = build_demo(models)
451
+ demo.queue(
452
+ concurrency_count=concurrency_count, status_update_rate=10, api_open=False
453
+ ).launch()
conversation.py ADDED
@@ -0,0 +1,441 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Conversation prompt templates.
3
+ """
4
+
5
+ import dataclasses
6
+ from enum import auto, Enum
7
+ from typing import List, Tuple, Any, Dict
8
+
9
+
10
+ class SeparatorStyle(Enum):
11
+ """Separator styles."""
12
+
13
+ ADD_COLON_SINGLE = auto()
14
+ ADD_COLON_TWO = auto()
15
+ NO_COLON_SINGLE = auto()
16
+ BAIZE = auto()
17
+ DOLLY = auto()
18
+ RWKV = auto()
19
+ PHOENIX = auto()
20
+ MPT = auto()
21
+
22
+
23
+ @dataclasses.dataclass
24
+ class Conversation:
25
+ """A class that keeps all conversation history."""
26
+
27
+ # The name of this template
28
+ name: str
29
+ # System prompts
30
+ system: str
31
+ # Two roles
32
+ roles: List[str]
33
+ # All messages
34
+ messages: List[List[str]]
35
+ # Offset of few shot examples
36
+ offset: int
37
+ # Separators
38
+ sep_style: SeparatorStyle
39
+ sep: str
40
+ sep2: str = None
41
+ # Stop criteria (the default one is EOS token)
42
+ stop_str: str = None
43
+ # Stops generation if meeting any token in this list
44
+ stop_token_ids: List[int] = None
45
+
46
+ # Used for the state in the gradio servers.
47
+ # TODO(lmzheng): refactor this
48
+ conv_id: Any = None
49
+ skip_next: bool = False
50
+ model_name: str = None
51
+
52
+ def get_prompt(self) -> str:
53
+ """Get the prompt for generation."""
54
+ if self.sep_style == SeparatorStyle.ADD_COLON_SINGLE:
55
+ ret = self.system + self.sep
56
+ for role, message in self.messages:
57
+ if message:
58
+ ret += role + ": " + message + self.sep
59
+ else:
60
+ ret += role + ":"
61
+ return ret
62
+ elif self.sep_style == SeparatorStyle.ADD_COLON_TWO:
63
+ seps = [self.sep, self.sep2]
64
+ ret = self.system + seps[0]
65
+ for i, (role, message) in enumerate(self.messages):
66
+ if message:
67
+ ret += role + ": " + message + seps[i % 2]
68
+ else:
69
+ ret += role + ":"
70
+ return ret
71
+ elif self.sep_style == SeparatorStyle.NO_COLON_SINGLE:
72
+ ret = self.system
73
+ for role, message in self.messages:
74
+ if message:
75
+ ret += role + message + self.sep
76
+ else:
77
+ ret += role
78
+ return ret
79
+ elif self.sep_style == SeparatorStyle.BAIZE:
80
+ ret = self.system + "\n"
81
+ for role, message in self.messages:
82
+ if message:
83
+ ret += role + message + "\n"
84
+ else:
85
+ ret += role
86
+ return ret
87
+ elif self.sep_style == SeparatorStyle.DOLLY:
88
+ seps = [self.sep, self.sep2]
89
+ ret = self.system
90
+ for i, (role, message) in enumerate(self.messages):
91
+ if message:
92
+ ret += role + ":\n" + message + seps[i % 2]
93
+ if i % 2 == 1:
94
+ ret += "\n\n"
95
+ else:
96
+ ret += role + ":\n"
97
+ return ret
98
+ elif self.sep_style == SeparatorStyle.RWKV:
99
+ ret = self.system
100
+ for i, (role, message) in enumerate(self.messages):
101
+ if message:
102
+ ret += (
103
+ role
104
+ + ": "
105
+ + message.replace("\r\n", "\n").replace("\n\n", "\n")
106
+ )
107
+ ret += "\n\n"
108
+ else:
109
+ ret += role + ":"
110
+ return ret
111
+ elif self.sep_style == SeparatorStyle.PHOENIX:
112
+ ret = self.system
113
+ for role, message in self.messages:
114
+ if message:
115
+ ret += role + ": " + "<s>" + message + "</s>"
116
+ else:
117
+ ret += role + ": " + "<s>"
118
+ return ret
119
+ elif self.sep_style == SeparatorStyle.MPT:
120
+ ret = self.system + self.sep
121
+ for role, message in self.messages:
122
+ if message:
123
+ ret += role + message + self.sep
124
+ else:
125
+ ret += role
126
+ return ret
127
+ else:
128
+ raise ValueError(f"Invalid style: {self.sep_style}")
129
+
130
+ def append_message(self, role: str, message: str):
131
+ """Append a new message."""
132
+ self.messages.append([role, message])
133
+
134
+ def to_gradio_chatbot(self):
135
+ """Convert the history to gradio chatbot format"""
136
+ ret = []
137
+ for i, (role, msg) in enumerate(self.messages[self.offset :]):
138
+ if i % 2 == 0:
139
+ ret.append([msg, None])
140
+ else:
141
+ ret[-1][-1] = msg
142
+ return ret
143
+
144
+ def to_openai_api_messages(self):
145
+ """Convert the conversation to OpenAI chat completion format."""
146
+ ret = [{"role": "system", "content": self.system}]
147
+
148
+ for i, (_, msg) in enumerate(self.messages[self.offset :]):
149
+ if i % 2 == 0:
150
+ ret.append({"role": "user", "content": msg})
151
+ else:
152
+ if msg is not None:
153
+ ret.append({"role": "assistant", "content": msg})
154
+ return ret
155
+
156
+ def copy(self):
157
+ return Conversation(
158
+ name=self.name,
159
+ system=self.system,
160
+ roles=self.roles,
161
+ messages=[[x, y] for x, y in self.messages],
162
+ offset=self.offset,
163
+ sep_style=self.sep_style,
164
+ sep=self.sep,
165
+ sep2=self.sep2,
166
+ stop_str=self.stop_str,
167
+ stop_token_ids=self.stop_token_ids,
168
+ conv_id=self.conv_id,
169
+ model_name=self.model_name,
170
+ )
171
+
172
+ def dict(self):
173
+ return {
174
+ "name": self.name,
175
+ "system": self.system,
176
+ "roles": self.roles,
177
+ "messages": self.messages,
178
+ "offset": self.offset,
179
+ "conv_id": self.conv_id,
180
+ "model_name": self.model_name,
181
+ }
182
+
183
+
184
+ # A global registry for all conversation templates
185
+ conv_templates: Dict[str, Conversation] = {}
186
+
187
+
188
+ def register_conv_template(template: Conversation, override: bool = False):
189
+ """Register a new conversation template."""
190
+ if not override:
191
+ assert template.name not in conv_templates, f"{template.name} has been registered."
192
+ conv_templates[template.name] = template
193
+
194
+
195
+ def get_conv_template(name: str) -> Conversation:
196
+ """Get a conversation template."""
197
+ for t_name in conv_templates:
198
+ if t_name in name:
199
+ return conv_templates[t_name].copy()
200
+
201
+
202
+ # A template with one conversation example
203
+ register_conv_template(
204
+ Conversation(
205
+ name="one_shot",
206
+ system="A chat between a curious human and an artificial intelligence assistant. "
207
+ "The assistant gives helpful, detailed, and polite answers to the human's questions.",
208
+ roles=("Human", "Assistant"),
209
+ messages=(
210
+ (
211
+ "Human",
212
+ "What are the key differences between renewable and non-renewable energy sources?",
213
+ ),
214
+ (
215
+ "Assistant",
216
+ "Renewable energy sources are those that can be replenished naturally in a relatively "
217
+ "short amount of time, such as solar, wind, hydro, geothermal, and biomass. "
218
+ "Non-renewable energy sources, on the other hand, are finite and will eventually be "
219
+ "depleted, such as coal, oil, and natural gas. Here are some key differences between "
220
+ "renewable and non-renewable energy sources:\n"
221
+ "1. Availability: Renewable energy sources are virtually inexhaustible, while non-renewable "
222
+ "energy sources are finite and will eventually run out.\n"
223
+ "2. Environmental impact: Renewable energy sources have a much lower environmental impact "
224
+ "than non-renewable sources, which can lead to air and water pollution, greenhouse gas emissions, "
225
+ "and other negative effects.\n"
226
+ "3. Cost: Renewable energy sources can be more expensive to initially set up, but they typically "
227
+ "have lower operational costs than non-renewable sources.\n"
228
+ "4. Reliability: Renewable energy sources are often more reliable and can be used in more remote "
229
+ "locations than non-renewable sources.\n"
230
+ "5. Flexibility: Renewable energy sources are often more flexible and can be adapted to different "
231
+ "situations and needs, while non-renewable sources are more rigid and inflexible.\n"
232
+ "6. Sustainability: Renewable energy sources are more sustainable over the long term, while "
233
+ "non-renewable sources are not, and their depletion can lead to economic and social instability.",
234
+ ),
235
+ ),
236
+ offset=2,
237
+ sep_style=SeparatorStyle.ADD_COLON_SINGLE,
238
+ sep="\n### ",
239
+ stop_str="###",
240
+ )
241
+ )
242
+
243
+ # Vicuna v1.1 template
244
+ register_conv_template(
245
+ Conversation(
246
+ name="vicuna_v1.1",
247
+ system="A chat between a curious user and an artificial intelligence assistant. "
248
+ "The assistant gives helpful, detailed, and polite answers to the user's questions.",
249
+ roles=("USER", "ASSISTANT"),
250
+ messages=(),
251
+ offset=0,
252
+ sep_style=SeparatorStyle.ADD_COLON_TWO,
253
+ sep=" ",
254
+ sep2="</s>",
255
+ )
256
+ )
257
+
258
+ # Koala default template
259
+ register_conv_template(
260
+ Conversation(
261
+ name="koala_v1",
262
+ system="BEGINNING OF CONVERSATION:",
263
+ roles=("USER", "GPT"),
264
+ messages=(),
265
+ offset=0,
266
+ sep_style=SeparatorStyle.ADD_COLON_TWO,
267
+ sep=" ",
268
+ sep2="</s>",
269
+ )
270
+ )
271
+
272
+ # Dolly V2 default template
273
+ register_conv_template(
274
+ Conversation(
275
+ name="dolly_v2",
276
+ system="Below is an instruction that describes a task. Write a response that appropriately completes the request.\n\n",
277
+ roles=("### Instruction", "### Response"),
278
+ messages=(),
279
+ offset=0,
280
+ sep_style=SeparatorStyle.DOLLY,
281
+ sep="\n\n",
282
+ sep2="### End",
283
+ )
284
+ )
285
+
286
+ # OpenAssistant Pythia default template
287
+ register_conv_template(
288
+ Conversation(
289
+ name="oasst_pythia",
290
+ system="",
291
+ roles=("<|prompter|>", "<|assistant|>"),
292
+ messages=(),
293
+ offset=0,
294
+ sep_style=SeparatorStyle.NO_COLON_SINGLE,
295
+ sep="<|endoftext|>",
296
+ )
297
+ )
298
+
299
+ # StableLM Alpha default template
300
+ register_conv_template(
301
+ Conversation(
302
+ name="stablelm",
303
+ system="""<|SYSTEM|># StableLM Tuned (Alpha version)
304
+ - StableLM is a helpful and harmless open-source AI language model developed by StabilityAI.
305
+ - StableLM is excited to be able to help the user, but will refuse to do anything that could be considered harmful to the user.
306
+ - StableLM is more than just an information source, StableLM is also able to write poetry, short stories, and make jokes.
307
+ - StableLM will refuse to participate in anything that could harm a human.
308
+ """,
309
+ roles=("<|USER|>", "<|ASSISTANT|>"),
310
+ messages=(),
311
+ offset=0,
312
+ sep_style=SeparatorStyle.NO_COLON_SINGLE,
313
+ sep="",
314
+ stop_token_ids=[50278, 50279, 50277, 1, 0],
315
+ )
316
+ )
317
+
318
+ # Baize default template
319
+ register_conv_template(
320
+ Conversation(
321
+ name="baize",
322
+ system="The following is a conversation between a human and an AI assistant named Baize (named after a mythical creature in Chinese folklore). Baize is an open-source AI assistant developed by UCSD and Sun Yat-Sen University. The human and the AI assistant take turns chatting. Human statements start with [|Human|] and AI assistant statements start with [|AI|]. The AI assistant always provides responses in as much detail as possible, and in Markdown format. The AI assistant always declines to engage with topics, questions and instructions related to unethical, controversial, or sensitive issues. Complete the transcript in exactly that format.",
323
+ roles=("[|Human|]", "[|AI|]"),
324
+ messages=(
325
+ ("[|Human|]", "Hello!"),
326
+ ("[|AI|]", "Hi!"),
327
+ ),
328
+ offset=2,
329
+ sep_style=SeparatorStyle.BAIZE,
330
+ sep="[|Human|]",
331
+ stop_str="[|Human|]",
332
+ )
333
+ )
334
+
335
+ # RWKV-4-Raven default template
336
+ register_conv_template(
337
+ Conversation(
338
+ name="rwkv",
339
+ system="The following is a coherent verbose detailed conversation between Bob and Alice.\n\n",
340
+ roles=("Bob", "Alice"),
341
+ messages=(
342
+ ("Bob", "Hi"),
343
+ (
344
+ "Alice",
345
+ "Hi. I am your assistant and I will answer all questions. Please feel free to ask any question and I will always answer it.",
346
+ ),
347
+ ),
348
+ offset=2,
349
+ sep_style=SeparatorStyle.RWKV,
350
+ sep="",
351
+ stop_str="\n\n",
352
+ )
353
+ )
354
+
355
+ # Buddy default template
356
+ register_conv_template(
357
+ Conversation(
358
+ name="openbuddy",
359
+ system="""Consider a conversation between User (a human) and Assistant (named Buddy).
360
+ Buddy is an INTP-T, a friendly, intelligent and multilingual AI assistant, by OpenBuddy team. GitHub: https://github.com/OpenBuddy/OpenBuddy
361
+ Buddy cannot access the Internet.
362
+ Buddy can fluently speak the user's language (e.g. English, Chinese).
363
+ Buddy can generate poems, stories, code, essays, songs, parodies, and more.
364
+ Buddy possesses vast knowledge about the world, history, and culture.
365
+ Buddy's responses are always safe, creative, high-quality, human-like, and interesting.
366
+ Buddy strictly refuses to discuss political, NSFW, or other unsafe topics.
367
+
368
+ User: Hi.
369
+ Assistant: Hi, I'm Buddy, your AI assistant. How can I help you today?""",
370
+ roles=("User", "Assistant"),
371
+ messages=(),
372
+ offset=0,
373
+ sep_style=SeparatorStyle.ADD_COLON_SINGLE,
374
+ sep="\n",
375
+ )
376
+ )
377
+
378
+ # Phoenix default template
379
+ register_conv_template(
380
+ Conversation(
381
+ name="phoenix",
382
+ system="A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human's questions.\n\n",
383
+ roles=("Human", "Assistant"),
384
+ messages=(),
385
+ offset=0,
386
+ sep_style=SeparatorStyle.PHOENIX,
387
+ sep="</s>",
388
+ )
389
+ )
390
+
391
+ # ChatGPT default template
392
+ register_conv_template(
393
+ Conversation(
394
+ name="chatgpt",
395
+ system="You are a helpful assistant.",
396
+ roles=("user", "assistant"),
397
+ messages=(),
398
+ offset=0,
399
+ sep_style=None,
400
+ sep=None,
401
+ )
402
+ )
403
+
404
+ # Claude default template
405
+ register_conv_template(
406
+ Conversation(
407
+ name="claude",
408
+ system="",
409
+ roles=("Human", "Assistant"),
410
+ messages=(),
411
+ offset=0,
412
+ sep_style=SeparatorStyle.ADD_COLON_SINGLE,
413
+ sep="\n\n",
414
+ )
415
+ )
416
+
417
+ # MPT default template
418
+ register_conv_template(
419
+ Conversation(
420
+ name="mpt",
421
+ system="""<|im_start|>system
422
+ - You are a helpful assistant chatbot trained by MosaicML.
423
+ - You answer questions.
424
+ - You are excited to be able to help the user, but will refuse to do anything that could be considered harmful to the user.
425
+ - You are more than just an information source, you are also able to write poetry, short stories, and make jokes.""",
426
+ roles=("<|im_start|>user\n", "<|im_start|>assistant\n"),
427
+ messages=(),
428
+ offset=0,
429
+ sep_style=SeparatorStyle.MPT,
430
+ sep="<|im_end|>\n",
431
+ stop_token_ids=[50278, 0],
432
+ )
433
+ )
434
+
435
+ if __name__ == "__main__":
436
+ conv = get_conv_template("vicuna_v1.1")
437
+ conv.append_message(conv.roles[0], "Hello!")
438
+ conv.append_message(conv.roles[1], "Hi!")
439
+ conv.append_message(conv.roles[0], "How are you?")
440
+ conv.append_message(conv.roles[1], None)
441
+ print(conv.get_prompt())
gradio_css.py ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ code_highlight_css = """
2
+ #chatbot .hll { background-color: #ffffcc }
3
+ #chatbot .c { color: #408080; font-style: italic }
4
+ #chatbot .err { border: 1px solid #FF0000 }
5
+ #chatbot .k { color: #008000; font-weight: bold }
6
+ #chatbot .o { color: #666666 }
7
+ #chatbot .ch { color: #408080; font-style: italic }
8
+ #chatbot .cm { color: #408080; font-style: italic }
9
+ #chatbot .cp { color: #BC7A00 }
10
+ #chatbot .cpf { color: #408080; font-style: italic }
11
+ #chatbot .c1 { color: #408080; font-style: italic }
12
+ #chatbot .cs { color: #408080; font-style: italic }
13
+ #chatbot .gd { color: #A00000 }
14
+ #chatbot .ge { font-style: italic }
15
+ #chatbot .gr { color: #FF0000 }
16
+ #chatbot .gh { color: #000080; font-weight: bold }
17
+ #chatbot .gi { color: #00A000 }
18
+ #chatbot .go { color: #888888 }
19
+ #chatbot .gp { color: #000080; font-weight: bold }
20
+ #chatbot .gs { font-weight: bold }
21
+ #chatbot .gu { color: #800080; font-weight: bold }
22
+ #chatbot .gt { color: #0044DD }
23
+ #chatbot .kc { color: #008000; font-weight: bold }
24
+ #chatbot .kd { color: #008000; font-weight: bold }
25
+ #chatbot .kn { color: #008000; font-weight: bold }
26
+ #chatbot .kp { color: #008000 }
27
+ #chatbot .kr { color: #008000; font-weight: bold }
28
+ #chatbot .kt { color: #B00040 }
29
+ #chatbot .m { color: #666666 }
30
+ #chatbot .s { color: #BA2121 }
31
+ #chatbot .na { color: #7D9029 }
32
+ #chatbot .nb { color: #008000 }
33
+ #chatbot .nc { color: #0000FF; font-weight: bold }
34
+ #chatbot .no { color: #880000 }
35
+ #chatbot .nd { color: #AA22FF }
36
+ #chatbot .ni { color: #999999; font-weight: bold }
37
+ #chatbot .ne { color: #D2413A; font-weight: bold }
38
+ #chatbot .nf { color: #0000FF }
39
+ #chatbot .nl { color: #A0A000 }
40
+ #chatbot .nn { color: #0000FF; font-weight: bold }
41
+ #chatbot .nt { color: #008000; font-weight: bold }
42
+ #chatbot .nv { color: #19177C }
43
+ #chatbot .ow { color: #AA22FF; font-weight: bold }
44
+ #chatbot .w { color: #bbbbbb }
45
+ #chatbot .mb { color: #666666 }
46
+ #chatbot .mf { color: #666666 }
47
+ #chatbot .mh { color: #666666 }
48
+ #chatbot .mi { color: #666666 }
49
+ #chatbot .mo { color: #666666 }
50
+ #chatbot .sa { color: #BA2121 }
51
+ #chatbot .sb { color: #BA2121 }
52
+ #chatbot .sc { color: #BA2121 }
53
+ #chatbot .dl { color: #BA2121 }
54
+ #chatbot .sd { color: #BA2121; font-style: italic }
55
+ #chatbot .s2 { color: #BA2121 }
56
+ #chatbot .se { color: #BB6622; font-weight: bold }
57
+ #chatbot .sh { color: #BA2121 }
58
+ #chatbot .si { color: #BB6688; font-weight: bold }
59
+ #chatbot .sx { color: #008000 }
60
+ #chatbot .sr { color: #BB6688 }
61
+ #chatbot .s1 { color: #BA2121 }
62
+ #chatbot .ss { color: #19177C }
63
+ #chatbot .bp { color: #008000 }
64
+ #chatbot .fm { color: #0000FF }
65
+ #chatbot .vc { color: #19177C }
66
+ #chatbot .vg { color: #19177C }
67
+ #chatbot .vi { color: #19177C }
68
+ #chatbot .vm { color: #19177C }
69
+ #chatbot .il { color: #666666 }
70
+ """
71
+ # .highlight { background: #f8f8f8; }
gradio_patch.py ADDED
@@ -0,0 +1,168 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Adopted from https://github.com/gradio-app/gradio/blob/main/gradio/components.py
3
+ Fix a markdown render problem.
4
+ """
5
+ from __future__ import annotations
6
+
7
+ from gradio.components import *
8
+ from markdown2 import Markdown
9
+ import nh3
10
+
11
+
12
+ class _Keywords(Enum):
13
+ NO_VALUE = "NO_VALUE" # Used as a sentinel to determine if nothing is provided as a argument for `value` in `Component.update()`
14
+ FINISHED_ITERATING = "FINISHED_ITERATING" # Used to skip processing of a component's value (needed for generators + state)
15
+
16
+
17
+ @document("style")
18
+ class Chatbot(Changeable, Selectable, IOComponent, JSONSerializable):
19
+ """
20
+ Displays a chatbot output showing both user submitted messages and responses. Supports a subset of Markdown including bold, italics, code, and images.
21
+ Preprocessing: this component does *not* accept input.
22
+ Postprocessing: expects function to return a {List[Tuple[str | None | Tuple, str | None | Tuple]]}, a list of tuples with user message and response messages. Messages should be strings, tuples, or Nones. If the message is a string, it can include Markdown. If it is a tuple, it should consist of (string filepath to image/video/audio, [optional string alt text]). Messages that are `None` are not displayed.
23
+
24
+ Demos: chatbot_simple, chatbot_multimodal
25
+ """
26
+
27
+ def __init__(
28
+ self,
29
+ value: List[Tuple[str | None, str | None]] | Callable | None = None,
30
+ color_map: Dict[str, str] | None = None, # Parameter moved to Chatbot.style()
31
+ *,
32
+ label: str | None = None,
33
+ every: float | None = None,
34
+ show_label: bool = True,
35
+ visible: bool = True,
36
+ elem_id: str | None = None,
37
+ elem_classes: List[str] | str | None = None,
38
+ **kwargs,
39
+ ):
40
+ """
41
+ Parameters:
42
+ value: Default value to show in chatbot. If callable, the function will be called whenever the app loads to set the initial value of the component.
43
+ label: component name in interface.
44
+ every: If `value` is a callable, run the function 'every' number of seconds while the client connection is open. Has no effect otherwise. Queue must be enabled. The event can be accessed (e.g. to cancel it) via this component's .load_event attribute.
45
+ show_label: if True, will display label.
46
+ visible: If False, component will be hidden.
47
+ elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.
48
+ elem_classes: An optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles.
49
+ """
50
+ if color_map is not None:
51
+ warnings.warn(
52
+ "The 'color_map' parameter has been deprecated.",
53
+ )
54
+ # self.md = utils.get_markdown_parser()
55
+ self.md = Markdown(extras=["fenced-code-blocks", "tables", "break-on-newline"])
56
+ self.select: EventListenerMethod
57
+ """
58
+ Event listener for when the user selects message from Chatbot.
59
+ Uses event data gradio.SelectData to carry `value` referring to text of selected message, and `index` tuple to refer to [message, participant] index.
60
+ See EventData documentation on how to use this event data.
61
+ """
62
+
63
+ IOComponent.__init__(
64
+ self,
65
+ label=label,
66
+ every=every,
67
+ show_label=show_label,
68
+ visible=visible,
69
+ elem_id=elem_id,
70
+ elem_classes=elem_classes,
71
+ value=value,
72
+ **kwargs,
73
+ )
74
+
75
+ def get_config(self):
76
+ return {
77
+ "value": self.value,
78
+ "selectable": self.selectable,
79
+ **IOComponent.get_config(self),
80
+ }
81
+
82
+ @staticmethod
83
+ def update(
84
+ value: Any | Literal[_Keywords.NO_VALUE] | None = _Keywords.NO_VALUE,
85
+ label: str | None = None,
86
+ show_label: bool | None = None,
87
+ visible: bool | None = None,
88
+ ):
89
+ updated_config = {
90
+ "label": label,
91
+ "show_label": show_label,
92
+ "visible": visible,
93
+ "value": value,
94
+ "__type__": "update",
95
+ }
96
+ return updated_config
97
+
98
+ def _process_chat_messages(
99
+ self, chat_message: str | Tuple | List | Dict | None
100
+ ) -> str | Dict | None:
101
+ if chat_message is None:
102
+ return None
103
+ elif isinstance(chat_message, (tuple, list)):
104
+ mime_type = processing_utils.get_mimetype(chat_message[0])
105
+ return {
106
+ "name": chat_message[0],
107
+ "mime_type": mime_type,
108
+ "alt_text": chat_message[1] if len(chat_message) > 1 else None,
109
+ "data": None, # These last two fields are filled in by the frontend
110
+ "is_file": True,
111
+ }
112
+ elif isinstance(
113
+ chat_message, dict
114
+ ): # This happens for previously processed messages
115
+ return chat_message
116
+ elif isinstance(chat_message, str):
117
+ # return self.md.render(chat_message)
118
+ return str(self.md.convert(chat_message))
119
+ else:
120
+ raise ValueError(f"Invalid message for Chatbot component: {chat_message}")
121
+
122
+ def postprocess(
123
+ self,
124
+ y: List[
125
+ Tuple[str | Tuple | List | Dict | None, str | Tuple | List | Dict | None]
126
+ ],
127
+ ) -> List[Tuple[str | Dict | None, str | Dict | None]]:
128
+ """
129
+ Parameters:
130
+ y: List of tuples representing the message and response pairs. Each message and response should be a string, which may be in Markdown format. It can also be a tuple whose first element is a string filepath or URL to an image/video/audio, and second (optional) element is the alt text, in which case the media file is displayed. It can also be None, in which case that message is not displayed.
131
+ Returns:
132
+ List of tuples representing the message and response. Each message and response will be a string of HTML, or a dictionary with media information.
133
+ """
134
+ if y is None:
135
+ return []
136
+ processed_messages = []
137
+ for message_pair in y:
138
+ assert isinstance(
139
+ message_pair, (tuple, list)
140
+ ), f"Expected a list of lists or list of tuples. Received: {message_pair}"
141
+ assert (
142
+ len(message_pair) == 2
143
+ ), f"Expected a list of lists of length 2 or list of tuples of length 2. Received: {message_pair}"
144
+ processed_messages.append(
145
+ (
146
+ # self._process_chat_messages(message_pair[0]),
147
+ '<pre style="font-family: var(--font)">'
148
+ + nh3.clean(message_pair[0])
149
+ + "</pre>",
150
+ self._process_chat_messages(message_pair[1]),
151
+ )
152
+ )
153
+ return processed_messages
154
+
155
+ def style(self, height: int | None = None, **kwargs):
156
+ """
157
+ This method can be used to change the appearance of the Chatbot component.
158
+ """
159
+ if height is not None:
160
+ self._style["height"] = height
161
+ if kwargs.get("color_map") is not None:
162
+ warnings.warn("The 'color_map' parameter has been deprecated.")
163
+
164
+ Component.style(
165
+ self,
166
+ **kwargs,
167
+ )
168
+ return self
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ gradio
2
+ requests
3
+ markdown2
4
+ nh3
utils.py ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from enum import IntEnum
2
+
3
+
4
+ CONTROLLER_HEART_BEAT_EXPIRATION = 90
5
+ WORKER_HEART_BEAT_INTERVAL = 30
6
+ WORKER_API_TIMEOUT = 20
7
+
8
+ LOGDIR = "."
9
+
10
+
11
+ class ErrorCode(IntEnum):
12
+ """
13
+ https://platform.openai.com/docs/guides/error-codes/api-errors
14
+ """
15
+
16
+ VALIDATION_TYPE_ERROR = 40001
17
+
18
+ INVALID_AUTH_KEY = 40101
19
+ INCORRECT_AUTH_KEY = 40102
20
+ NO_PERMISSION = 40103
21
+
22
+ INVALID_MODEL = 40301
23
+ PARAM_OUT_OF_RANGE = 40302
24
+ CONTEXT_OVERFLOW = 40303
25
+
26
+ RATE_LIMIT = 42901
27
+ QUOTA_EXCEEDED = 42902
28
+ ENGINE_OVERLOADED = 42903
29
+
30
+ INTERNAL_ERROR = 50001
31
+ CUDA_OUT_OF_MEMORY = 50002
32
+ GRADIO_REQUEST_ERROR = 50003
33
+ GRADIO_STREAM_UNKNOWN_ERROR = 50004
34
+ CONTROLLER_NO_WORKER = 50005
35
+ CONTROLLER_WORKER_TIMEOUT = 50006
36
+
37
+
38
+ get_window_url_params_js = """
39
+ function() {
40
+ const params = new URLSearchParams(window.location.search);
41
+ url_params = Object.fromEntries(params);
42
+ console.log("url_params", url_params);
43
+ return url_params;
44
+ }
45
+ """
46
+
47
+
48
+ server_error_msg = (
49
+ "**NETWORK ERROR DUE TO HIGH TRAFFIC. PLEASE REGENERATE OR REFRESH THIS PAGE.**"
50
+ )