Update backend/prompts.py
Browse files- backend/prompts.py +600 -599
backend/prompts.py
CHANGED
@@ -1,599 +1,600 @@
|
|
1 |
-
|
2 |
-
class PromptFormat:
|
3 |
-
|
4 |
-
def __init__(self):
|
5 |
-
pass
|
6 |
-
|
7 |
-
def format(self, prompt, response, system_prompt, settings):
|
8 |
-
raise NotImplementedError
|
9 |
-
|
10 |
-
def stop_conditions(self, tokenizer, settings):
|
11 |
-
raise NotImplementedError
|
12 |
-
|
13 |
-
def is_instruct(self):
|
14 |
-
raise NotImplementedError
|
15 |
-
|
16 |
-
def encode_special_tokens(self):
|
17 |
-
return True
|
18 |
-
|
19 |
-
def context_bos(self):
|
20 |
-
return False
|
21 |
-
|
22 |
-
@staticmethod
|
23 |
-
def supports_system_prompt():
|
24 |
-
return True
|
25 |
-
|
26 |
-
|
27 |
-
class PromptFormat_raw(PromptFormat):
|
28 |
-
|
29 |
-
description = "Model-agnostic mode simulating a raw chatlog between two or more users"
|
30 |
-
|
31 |
-
def __init__(self):
|
32 |
-
super().__init__()
|
33 |
-
pass
|
34 |
-
|
35 |
-
def is_instruct(self):
|
36 |
-
return False
|
37 |
-
|
38 |
-
def stop_conditions(self, tokenizer, settings):
|
39 |
-
raise NotImplementedError
|
40 |
-
|
41 |
-
def format(self, prompt, response, system_prompt, settings):
|
42 |
-
raise NotImplementedError
|
43 |
-
|
44 |
-
def encode_special_tokens(self):
|
45 |
-
return True
|
46 |
-
|
47 |
-
|
48 |
-
class PromptFormat_llama(PromptFormat):
|
49 |
-
|
50 |
-
description = "Llama-chat, Llama2-chat and Mistral-instruct models"
|
51 |
-
|
52 |
-
def __init__(self):
|
53 |
-
super().__init__()
|
54 |
-
pass
|
55 |
-
|
56 |
-
def is_instruct(self):
|
57 |
-
return True
|
58 |
-
|
59 |
-
def stop_conditions(self, tokenizer, settings):
|
60 |
-
return \
|
61 |
-
[tokenizer.eos_token_id]
|
62 |
-
|
63 |
-
def format(self, prompt, response, system_prompt, settings):
|
64 |
-
text = "<s>[INST] "
|
65 |
-
if system_prompt and system_prompt.strip() != "":
|
66 |
-
text += "<<SYS>>\n"
|
67 |
-
text += system_prompt
|
68 |
-
text += "\n<</SYS>>\n\n "
|
69 |
-
text += prompt
|
70 |
-
text += " [/INST]"
|
71 |
-
if response:
|
72 |
-
text += response
|
73 |
-
text += "</s>"
|
74 |
-
return text
|
75 |
-
|
76 |
-
class PromptFormat_mistral(PromptFormat):
|
77 |
-
|
78 |
-
def __init__(self):
|
79 |
-
super().__init__()
|
80 |
-
pass
|
81 |
-
|
82 |
-
def is_instruct(self):
|
83 |
-
return True
|
84 |
-
|
85 |
-
def stop_conditions(self, tokenizer, settings):
|
86 |
-
return \
|
87 |
-
[tokenizer.eos_token_id]
|
88 |
-
|
89 |
-
def context_bos(self):
|
90 |
-
return True
|
91 |
-
|
92 |
-
class PromptFormat_mistralv1(PromptFormat_mistral):
|
93 |
-
"""
|
94 |
-
<s> [INST] user message [/INST] assistant message</s> [INST] new user message [/INST]
|
95 |
-
"""
|
96 |
-
description = "Mistral tokenizer v1"
|
97 |
-
|
98 |
-
def __init__(self):
|
99 |
-
super().__init__()
|
100 |
-
pass
|
101 |
-
|
102 |
-
def format(self, p, r, sp, settings):
|
103 |
-
if sp and sp.strip():
|
104 |
-
text = f" [INST] {sp.strip()}\n\n {p.strip()} [/INST]"
|
105 |
-
else:
|
106 |
-
text = f" [INST] {p.strip()} [/INST]"
|
107 |
-
if r:
|
108 |
-
text += f" {r.strip()}</s>"
|
109 |
-
return text
|
110 |
-
|
111 |
-
class PromptFormat_mistralv2v3(PromptFormat_mistral):
|
112 |
-
"""
|
113 |
-
<s>[INST] user message[/INST] assistant message</s>[INST] new user message[/INST]
|
114 |
-
"""
|
115 |
-
description = "Mistral tokenizer v2/v3"
|
116 |
-
|
117 |
-
def __init__(self):
|
118 |
-
super().__init__()
|
119 |
-
pass
|
120 |
-
|
121 |
-
def format(self, p, r, sp, settings):
|
122 |
-
if sp and sp.strip():
|
123 |
-
text = f"[INST] {sp.strip()}\n\n {p.strip()}[/INST]"
|
124 |
-
else:
|
125 |
-
text = f"[INST] {p.strip()}[/INST]"
|
126 |
-
if r:
|
127 |
-
text += f" {r.strip()}</s>"
|
128 |
-
return text
|
129 |
-
|
130 |
-
class PromptFormat_mistralTekken(PromptFormat_mistral):
|
131 |
-
"""
|
132 |
-
<s>[INST]user message[/INST]assistant message</s>[INST]new user message[/INST]
|
133 |
-
"""
|
134 |
-
description = "Mistral tokenizer V3 (Tekken)"
|
135 |
-
|
136 |
-
def format(self, p, r, sp, settings):
|
137 |
-
if sp and sp.strip():
|
138 |
-
text = f"[INST]{sp.strip()}\n\n{p.strip()}[/INST]"
|
139 |
-
else:
|
140 |
-
text = f"[INST]{p.strip()}[/INST]"
|
141 |
-
if r:
|
142 |
-
text += f"{r.strip()}</s>"
|
143 |
-
return text
|
144 |
-
|
145 |
-
|
146 |
-
class PromptFormat_llama3(PromptFormat):
|
147 |
-
|
148 |
-
description = "Llama-3 instruct template."
|
149 |
-
|
150 |
-
def __init__(self):
|
151 |
-
super().__init__()
|
152 |
-
pass
|
153 |
-
|
154 |
-
def is_instruct(self):
|
155 |
-
return True
|
156 |
-
|
157 |
-
def stop_conditions(self, tokenizer, settings):
|
158 |
-
return \
|
159 |
-
[tokenizer.single_id("<|eot_id|>"),
|
160 |
-
tokenizer.single_id("<|start_header_id|>"),
|
161 |
-
tokenizer.eos_token_id]
|
162 |
-
|
163 |
-
def format(self, prompt, response, system_prompt, settings):
|
164 |
-
text = ""
|
165 |
-
if system_prompt and system_prompt.strip() != "":
|
166 |
-
text += "<|start_header_id|>system<|end_header_id|>\n\n"
|
167 |
-
text += system_prompt
|
168 |
-
text += "<|eot_id|>"
|
169 |
-
text += "<|start_header_id|>user<|end_header_id|>\n\n"
|
170 |
-
text += prompt
|
171 |
-
text += "<|eot_id|>"
|
172 |
-
text += "<|start_header_id|>assistant<|end_header_id|>\n\n"
|
173 |
-
if response:
|
174 |
-
text += response
|
175 |
-
text += "<|eot_id|>"
|
176 |
-
return text
|
177 |
-
|
178 |
-
def context_bos(self):
|
179 |
-
return True
|
180 |
-
|
181 |
-
|
182 |
-
class PromptFormat_phi3(PromptFormat):
|
183 |
-
|
184 |
-
description = "
|
185 |
-
|
186 |
-
def __init__(self):
|
187 |
-
super().__init__()
|
188 |
-
pass
|
189 |
-
|
190 |
-
def is_instruct(self):
|
191 |
-
return True
|
192 |
-
|
193 |
-
def stop_conditions(self, tokenizer, settings):
|
194 |
-
return \
|
195 |
-
[tokenizer.single_id("<|
|
196 |
-
tokenizer.single_id("<|
|
197 |
-
tokenizer.
|
198 |
-
|
199 |
-
|
200 |
-
|
201 |
-
|
202 |
-
|
203 |
-
text +=
|
204 |
-
text +=
|
205 |
-
|
206 |
-
text +=
|
207 |
-
text +=
|
208 |
-
text += "<|
|
209 |
-
|
210 |
-
|
211 |
-
text +=
|
212 |
-
|
213 |
-
|
214 |
-
|
215 |
-
|
216 |
-
|
217 |
-
|
218 |
-
|
219 |
-
|
220 |
-
|
221 |
-
|
222 |
-
|
223 |
-
|
224 |
-
|
225 |
-
|
226 |
-
|
227 |
-
|
228 |
-
|
229 |
-
|
230 |
-
|
231 |
-
|
232 |
-
|
233 |
-
|
234 |
-
|
235 |
-
|
236 |
-
|
237 |
-
text +=
|
238 |
-
text +=
|
239 |
-
|
240 |
-
text +=
|
241 |
-
text +=
|
242 |
-
text += "<|
|
243 |
-
|
244 |
-
|
245 |
-
text +=
|
246 |
-
|
247 |
-
|
248 |
-
|
249 |
-
|
250 |
-
|
251 |
-
|
252 |
-
|
253 |
-
|
254 |
-
|
255 |
-
|
256 |
-
|
257 |
-
|
258 |
-
|
259 |
-
|
260 |
-
|
261 |
-
|
262 |
-
|
263 |
-
|
264 |
-
|
265 |
-
|
266 |
-
|
267 |
-
|
268 |
-
|
269 |
-
|
270 |
-
text +=
|
271 |
-
|
272 |
-
text +=
|
273 |
-
|
274 |
-
|
275 |
-
text +=
|
276 |
-
|
277 |
-
|
278 |
-
|
279 |
-
#
|
280 |
-
#
|
281 |
-
#
|
282 |
-
#
|
283 |
-
#
|
284 |
-
#
|
285 |
-
#
|
286 |
-
#
|
287 |
-
#
|
288 |
-
#
|
289 |
-
|
290 |
-
|
291 |
-
|
292 |
-
|
293 |
-
|
294 |
-
|
295 |
-
|
296 |
-
|
297 |
-
|
298 |
-
|
299 |
-
|
300 |
-
|
301 |
-
|
302 |
-
|
303 |
-
|
304 |
-
|
305 |
-
|
306 |
-
|
307 |
-
|
308 |
-
|
309 |
-
|
310 |
-
|
311 |
-
text +=
|
312 |
-
text +=
|
313 |
-
|
314 |
-
text +=
|
315 |
-
text +=
|
316 |
-
text += "<|
|
317 |
-
|
318 |
-
|
319 |
-
text +=
|
320 |
-
|
321 |
-
|
322 |
-
|
323 |
-
|
324 |
-
|
325 |
-
|
326 |
-
|
327 |
-
|
328 |
-
|
329 |
-
|
330 |
-
|
331 |
-
|
332 |
-
|
333 |
-
|
334 |
-
|
335 |
-
|
336 |
-
|
337 |
-
|
338 |
-
|
339 |
-
|
340 |
-
|
341 |
-
|
342 |
-
|
343 |
-
|
344 |
-
|
345 |
-
|
346 |
-
|
347 |
-
|
348 |
-
|
349 |
-
|
350 |
-
|
351 |
-
|
352 |
-
|
353 |
-
text +=
|
354 |
-
text +=
|
355 |
-
|
356 |
-
text +=
|
357 |
-
text +=
|
358 |
-
|
359 |
-
|
360 |
-
text +=
|
361 |
-
|
362 |
-
|
363 |
-
|
364 |
-
|
365 |
-
|
366 |
-
|
367 |
-
|
368 |
-
|
369 |
-
|
370 |
-
|
371 |
-
|
372 |
-
|
373 |
-
|
374 |
-
|
375 |
-
|
376 |
-
|
377 |
-
|
378 |
-
|
379 |
-
|
380 |
-
|
381 |
-
|
382 |
-
|
383 |
-
text +=
|
384 |
-
|
385 |
-
text +=
|
386 |
-
text +=
|
387 |
-
|
388 |
-
|
389 |
-
text +=
|
390 |
-
|
391 |
-
|
392 |
-
|
393 |
-
|
394 |
-
|
395 |
-
|
396 |
-
|
397 |
-
|
398 |
-
|
399 |
-
|
400 |
-
|
401 |
-
|
402 |
-
|
403 |
-
|
404 |
-
|
405 |
-
|
406 |
-
|
407 |
-
|
408 |
-
|
409 |
-
|
410 |
-
|
411 |
-
|
412 |
-
text +=
|
413 |
-
text +=
|
414 |
-
|
415 |
-
text +=
|
416 |
-
text +=
|
417 |
-
|
418 |
-
|
419 |
-
text +=
|
420 |
-
|
421 |
-
|
422 |
-
|
423 |
-
|
424 |
-
|
425 |
-
|
426 |
-
|
427 |
-
|
428 |
-
|
429 |
-
|
430 |
-
|
431 |
-
|
432 |
-
|
433 |
-
|
434 |
-
|
435 |
-
|
436 |
-
|
437 |
-
|
438 |
-
"<|
|
439 |
-
"
|
440 |
-
|
441 |
-
|
442 |
-
|
443 |
-
|
444 |
-
|
445 |
-
|
446 |
-
text +=
|
447 |
-
|
448 |
-
text +=
|
449 |
-
text +=
|
450 |
-
text += "
|
451 |
-
|
452 |
-
|
453 |
-
text +=
|
454 |
-
|
455 |
-
|
456 |
-
|
457 |
-
|
458 |
-
|
459 |
-
|
460 |
-
|
461 |
-
|
462 |
-
|
463 |
-
|
464 |
-
|
465 |
-
|
466 |
-
|
467 |
-
|
468 |
-
|
469 |
-
|
470 |
-
|
471 |
-
|
472 |
-
|
473 |
-
|
474 |
-
|
475 |
-
|
476 |
-
|
477 |
-
|
478 |
-
|
479 |
-
#
|
480 |
-
#
|
481 |
-
# text +=
|
482 |
-
# text += "<
|
483 |
-
# text += "
|
484 |
-
|
485 |
-
text +=
|
486 |
-
text +=
|
487 |
-
text += "<
|
488 |
-
|
489 |
-
|
490 |
-
text +=
|
491 |
-
|
492 |
-
|
493 |
-
|
494 |
-
|
495 |
-
|
496 |
-
|
497 |
-
|
498 |
-
|
499 |
-
|
500 |
-
|
501 |
-
|
502 |
-
|
503 |
-
|
504 |
-
|
505 |
-
|
506 |
-
|
507 |
-
|
508 |
-
|
509 |
-
|
510 |
-
|
511 |
-
|
512 |
-
|
513 |
-
|
514 |
-
|
515 |
-
|
516 |
-
|
517 |
-
|
518 |
-
|
519 |
-
text += "
|
520 |
-
text +=
|
521 |
-
text +=
|
522 |
-
|
523 |
-
text +=
|
524 |
-
text +=
|
525 |
-
text += "<|
|
526 |
-
|
527 |
-
|
528 |
-
text +=
|
529 |
-
|
530 |
-
|
531 |
-
|
532 |
-
|
533 |
-
|
534 |
-
|
535 |
-
|
536 |
-
|
537 |
-
|
538 |
-
|
539 |
-
|
540 |
-
|
541 |
-
|
542 |
-
|
543 |
-
|
544 |
-
|
545 |
-
|
546 |
-
|
547 |
-
|
548 |
-
|
549 |
-
|
550 |
-
|
551 |
-
|
552 |
-
|
553 |
-
text +=
|
554 |
-
text +=
|
555 |
-
|
556 |
-
text +=
|
557 |
-
text +=
|
558 |
-
text += "
|
559 |
-
|
560 |
-
|
561 |
-
text +=
|
562 |
-
|
563 |
-
|
564 |
-
|
565 |
-
|
566 |
-
|
567 |
-
|
568 |
-
|
569 |
-
|
570 |
-
|
571 |
-
"
|
572 |
-
"
|
573 |
-
"
|
574 |
-
"
|
575 |
-
"
|
576 |
-
"
|
577 |
-
"
|
578 |
-
"Deepseek-
|
579 |
-
"
|
580 |
-
"
|
581 |
-
"
|
582 |
-
"
|
583 |
-
"
|
584 |
-
"
|
585 |
-
"
|
586 |
-
"Mistral
|
587 |
-
"Mistral V3
|
588 |
-
|
589 |
-
|
590 |
-
|
591 |
-
|
592 |
-
|
593 |
-
|
594 |
-
|
595 |
-
"
|
596 |
-
|
597 |
-
|
598 |
-
|
599 |
-
|
|
|
|
1 |
+
|
2 |
+
class PromptFormat:
|
3 |
+
|
4 |
+
def __init__(self):
|
5 |
+
pass
|
6 |
+
|
7 |
+
def format(self, prompt, response, system_prompt, settings):
|
8 |
+
raise NotImplementedError
|
9 |
+
|
10 |
+
def stop_conditions(self, tokenizer, settings):
|
11 |
+
raise NotImplementedError
|
12 |
+
|
13 |
+
def is_instruct(self):
|
14 |
+
raise NotImplementedError
|
15 |
+
|
16 |
+
def encode_special_tokens(self):
|
17 |
+
return True
|
18 |
+
|
19 |
+
def context_bos(self):
|
20 |
+
return False
|
21 |
+
|
22 |
+
@staticmethod
|
23 |
+
def supports_system_prompt():
|
24 |
+
return True
|
25 |
+
|
26 |
+
|
27 |
+
class PromptFormat_raw(PromptFormat):
|
28 |
+
|
29 |
+
description = "Model-agnostic mode simulating a raw chatlog between two or more users"
|
30 |
+
|
31 |
+
def __init__(self):
|
32 |
+
super().__init__()
|
33 |
+
pass
|
34 |
+
|
35 |
+
def is_instruct(self):
|
36 |
+
return False
|
37 |
+
|
38 |
+
def stop_conditions(self, tokenizer, settings):
|
39 |
+
raise NotImplementedError
|
40 |
+
|
41 |
+
def format(self, prompt, response, system_prompt, settings):
|
42 |
+
raise NotImplementedError
|
43 |
+
|
44 |
+
def encode_special_tokens(self):
|
45 |
+
return True
|
46 |
+
|
47 |
+
|
48 |
+
class PromptFormat_llama(PromptFormat):
|
49 |
+
|
50 |
+
description = "Llama-chat, Llama2-chat and Mistral-instruct models"
|
51 |
+
|
52 |
+
def __init__(self):
|
53 |
+
super().__init__()
|
54 |
+
pass
|
55 |
+
|
56 |
+
def is_instruct(self):
|
57 |
+
return True
|
58 |
+
|
59 |
+
def stop_conditions(self, tokenizer, settings):
|
60 |
+
return \
|
61 |
+
[tokenizer.eos_token_id]
|
62 |
+
|
63 |
+
def format(self, prompt, response, system_prompt, settings):
|
64 |
+
text = "<s>[INST] "
|
65 |
+
if system_prompt and system_prompt.strip() != "":
|
66 |
+
text += "<<SYS>>\n"
|
67 |
+
text += system_prompt
|
68 |
+
text += "\n<</SYS>>\n\n "
|
69 |
+
text += prompt
|
70 |
+
text += " [/INST]"
|
71 |
+
if response:
|
72 |
+
text += response
|
73 |
+
text += "</s>"
|
74 |
+
return text
|
75 |
+
|
76 |
+
class PromptFormat_mistral(PromptFormat):
|
77 |
+
|
78 |
+
def __init__(self):
|
79 |
+
super().__init__()
|
80 |
+
pass
|
81 |
+
|
82 |
+
def is_instruct(self):
|
83 |
+
return True
|
84 |
+
|
85 |
+
def stop_conditions(self, tokenizer, settings):
|
86 |
+
return \
|
87 |
+
[tokenizer.eos_token_id]
|
88 |
+
|
89 |
+
def context_bos(self):
|
90 |
+
return True
|
91 |
+
|
92 |
+
class PromptFormat_mistralv1(PromptFormat_mistral):
|
93 |
+
"""
|
94 |
+
<s> [INST] user message [/INST] assistant message</s> [INST] new user message [/INST]
|
95 |
+
"""
|
96 |
+
description = "Mistral tokenizer v1"
|
97 |
+
|
98 |
+
def __init__(self):
|
99 |
+
super().__init__()
|
100 |
+
pass
|
101 |
+
|
102 |
+
def format(self, p, r, sp, settings):
|
103 |
+
if sp and sp.strip():
|
104 |
+
text = f" [INST] {sp.strip()}\n\n {p.strip()} [/INST]"
|
105 |
+
else:
|
106 |
+
text = f" [INST] {p.strip()} [/INST]"
|
107 |
+
if r:
|
108 |
+
text += f" {r.strip()}</s>"
|
109 |
+
return text
|
110 |
+
|
111 |
+
class PromptFormat_mistralv2v3(PromptFormat_mistral):
|
112 |
+
"""
|
113 |
+
<s>[INST] user message[/INST] assistant message</s>[INST] new user message[/INST]
|
114 |
+
"""
|
115 |
+
description = "Mistral tokenizer v2/v3"
|
116 |
+
|
117 |
+
def __init__(self):
|
118 |
+
super().__init__()
|
119 |
+
pass
|
120 |
+
|
121 |
+
def format(self, p, r, sp, settings):
|
122 |
+
if sp and sp.strip():
|
123 |
+
text = f"[INST] {sp.strip()}\n\n {p.strip()}[/INST]"
|
124 |
+
else:
|
125 |
+
text = f"[INST] {p.strip()}[/INST]"
|
126 |
+
if r:
|
127 |
+
text += f" {r.strip()}</s>"
|
128 |
+
return text
|
129 |
+
|
130 |
+
class PromptFormat_mistralTekken(PromptFormat_mistral):
|
131 |
+
"""
|
132 |
+
<s>[INST]user message[/INST]assistant message</s>[INST]new user message[/INST]
|
133 |
+
"""
|
134 |
+
description = "Mistral tokenizer V3 (Tekken)"
|
135 |
+
|
136 |
+
def format(self, p, r, sp, settings):
|
137 |
+
if sp and sp.strip():
|
138 |
+
text = f"[INST]{sp.strip()}\n\n{p.strip()}[/INST]"
|
139 |
+
else:
|
140 |
+
text = f"[INST]{p.strip()}[/INST]"
|
141 |
+
if r:
|
142 |
+
text += f"{r.strip()}</s>"
|
143 |
+
return text
|
144 |
+
|
145 |
+
|
146 |
+
class PromptFormat_llama3(PromptFormat):
|
147 |
+
|
148 |
+
description = "Llama-3 instruct template."
|
149 |
+
|
150 |
+
def __init__(self):
|
151 |
+
super().__init__()
|
152 |
+
pass
|
153 |
+
|
154 |
+
def is_instruct(self):
|
155 |
+
return True
|
156 |
+
|
157 |
+
def stop_conditions(self, tokenizer, settings):
|
158 |
+
return \
|
159 |
+
[tokenizer.single_id("<|eot_id|>"),
|
160 |
+
tokenizer.single_id("<|start_header_id|>"),
|
161 |
+
tokenizer.eos_token_id]
|
162 |
+
|
163 |
+
def format(self, prompt, response, system_prompt, settings):
|
164 |
+
text = ""
|
165 |
+
if system_prompt and system_prompt.strip() != "":
|
166 |
+
text += "<|start_header_id|>system<|end_header_id|>\n\n"
|
167 |
+
text += system_prompt
|
168 |
+
text += "<|eot_id|>"
|
169 |
+
text += "<|start_header_id|>user<|end_header_id|>\n\n"
|
170 |
+
text += prompt
|
171 |
+
text += "<|eot_id|>"
|
172 |
+
text += "<|start_header_id|>assistant<|end_header_id|>\n\n"
|
173 |
+
if response:
|
174 |
+
text += response
|
175 |
+
text += "<|eot_id|>"
|
176 |
+
return text
|
177 |
+
|
178 |
+
def context_bos(self):
|
179 |
+
return True
|
180 |
+
|
181 |
+
|
182 |
+
class PromptFormat_phi3(PromptFormat):
|
183 |
+
|
184 |
+
description = "Phi-3 instruct"
|
185 |
+
|
186 |
+
def __init__(self):
|
187 |
+
super().__init__()
|
188 |
+
pass
|
189 |
+
|
190 |
+
def is_instruct(self):
|
191 |
+
return True
|
192 |
+
|
193 |
+
def stop_conditions(self, tokenizer, settings):
|
194 |
+
return \
|
195 |
+
[tokenizer.single_id("<|end|>"),
|
196 |
+
tokenizer.single_id("<|assistant|>"),
|
197 |
+
tokenizer.single_id("<|endoftext|>"),
|
198 |
+
tokenizer.eos_token_id]
|
199 |
+
|
200 |
+
def format(self, prompt, response, system_prompt, settings):
|
201 |
+
text = ""
|
202 |
+
if system_prompt and system_prompt.strip() != "":
|
203 |
+
text += "<|system|>\n"
|
204 |
+
text += system_prompt
|
205 |
+
text += "<|end|>\n"
|
206 |
+
text += "<|user|>\n"
|
207 |
+
text += prompt
|
208 |
+
text += "<|end|>\n"
|
209 |
+
text += "<|assistant|>\n"
|
210 |
+
if response:
|
211 |
+
text += response
|
212 |
+
text += "<|end|>"
|
213 |
+
return text
|
214 |
+
|
215 |
+
def context_bos(self):
|
216 |
+
return True
|
217 |
+
|
218 |
+
class PromptFormat_phi4(PromptFormat):
|
219 |
+
|
220 |
+
description = "Phi-4"
|
221 |
+
|
222 |
+
def __init__(self):
|
223 |
+
super().__init__()
|
224 |
+
pass
|
225 |
+
|
226 |
+
def is_instruct(self):
|
227 |
+
return True
|
228 |
+
|
229 |
+
def stop_conditions(self, tokenizer, settings):
|
230 |
+
return \
|
231 |
+
[tokenizer.eos_token_id,
|
232 |
+
"""<|im_end|>"""]
|
233 |
+
|
234 |
+
def format(self, prompt, response, system_prompt, settings):
|
235 |
+
text = ""
|
236 |
+
if system_prompt and system_prompt.strip() != "":
|
237 |
+
text += "<|im_start|>system\n"
|
238 |
+
text += system_prompt
|
239 |
+
text += "\n<|im_end|>\n"
|
240 |
+
text += "<|im_start|>user\n"
|
241 |
+
text += prompt
|
242 |
+
text += "<|im_end|>\n"
|
243 |
+
text += "<|im_start|>assistant\n"
|
244 |
+
if response:
|
245 |
+
text += response
|
246 |
+
text += "<|im_end|>\n"
|
247 |
+
return text
|
248 |
+
|
249 |
+
def context_bos(self):
|
250 |
+
return True
|
251 |
+
|
252 |
+
class PromptFormat_mistrallite(PromptFormat):
|
253 |
+
|
254 |
+
description = "MistralLite format"
|
255 |
+
|
256 |
+
def __init__(self):
|
257 |
+
super().__init__()
|
258 |
+
pass
|
259 |
+
|
260 |
+
def is_instruct(self):
|
261 |
+
return True
|
262 |
+
|
263 |
+
def stop_conditions(self, tokenizer, settings):
|
264 |
+
return \
|
265 |
+
[tokenizer.eos_token_id]
|
266 |
+
|
267 |
+
def format(self, prompt, response, system_prompt, settings):
|
268 |
+
text = "<|prompter|>"
|
269 |
+
if system_prompt and system_prompt.strip() != "":
|
270 |
+
text += system_prompt
|
271 |
+
text += "</s><|assistant|>Understood.</s><|prompter|>"
|
272 |
+
text += prompt
|
273 |
+
text += "</s><|assistant|>"
|
274 |
+
if response:
|
275 |
+
text += response
|
276 |
+
text += "</s>"
|
277 |
+
return text
|
278 |
+
|
279 |
+
# class PromptFormat_codellama(PromptFormat_llama):
|
280 |
+
#
|
281 |
+
# description = "CodeLlama-instruct"
|
282 |
+
#
|
283 |
+
# def __init__(self):
|
284 |
+
# super().__init__()
|
285 |
+
# pass
|
286 |
+
#
|
287 |
+
# def default_system_prompt(self):
|
288 |
+
# return \
|
289 |
+
# """You are a helpful coding assistant. Always answer as helpfully as possible."""
|
290 |
+
|
291 |
+
|
292 |
+
class PromptFormat_chatml(PromptFormat):
|
293 |
+
|
294 |
+
description = "ChatML format, as used by e.g. (Mistral)Orca"
|
295 |
+
|
296 |
+
def __init__(self):
|
297 |
+
super().__init__()
|
298 |
+
pass
|
299 |
+
|
300 |
+
def is_instruct(self):
|
301 |
+
return True
|
302 |
+
|
303 |
+
def stop_conditions(self, tokenizer, settings):
|
304 |
+
return \
|
305 |
+
[tokenizer.eos_token_id,
|
306 |
+
"""<|im_end|>"""]
|
307 |
+
|
308 |
+
def format(self, prompt, response, system_prompt, settings):
|
309 |
+
text = ""
|
310 |
+
if system_prompt and system_prompt.strip() != "":
|
311 |
+
text += "<|im_start|>system\n"
|
312 |
+
text += system_prompt
|
313 |
+
text += "\n<|im_end|>\n"
|
314 |
+
text += "<|im_start|>user\n"
|
315 |
+
text += prompt
|
316 |
+
text += "<|im_end|>\n"
|
317 |
+
text += "<|im_start|>assistant\n"
|
318 |
+
if response:
|
319 |
+
text += response
|
320 |
+
text += "<|im_end|>\n"
|
321 |
+
return text
|
322 |
+
|
323 |
+
def context_bos(self):
|
324 |
+
return True
|
325 |
+
|
326 |
+
|
327 |
+
class PromptFormat_tinyllama(PromptFormat_chatml):
|
328 |
+
|
329 |
+
description = "ChatML format, but ignoring special/added tokens. Use for TinyLlama-chat v0.3"
|
330 |
+
|
331 |
+
def encode_special_tokens(self):
|
332 |
+
return False
|
333 |
+
|
334 |
+
|
335 |
+
class PromptFormat_phind_codellama(PromptFormat):
|
336 |
+
|
337 |
+
description = "Vicuna/Alpaca-like format for Phind-CodeLlama"
|
338 |
+
|
339 |
+
def __init__(self):
|
340 |
+
super().__init__()
|
341 |
+
pass
|
342 |
+
|
343 |
+
def is_instruct(self):
|
344 |
+
return True
|
345 |
+
|
346 |
+
def stop_conditions(self, tokenizer, settings):
|
347 |
+
return \
|
348 |
+
[tokenizer.eos_token_id, "\n### "]
|
349 |
+
|
350 |
+
def format(self, prompt, response, system_prompt, settings):
|
351 |
+
text = ""
|
352 |
+
if system_prompt and system_prompt.strip() != "":
|
353 |
+
text += "### System Prompt\n"
|
354 |
+
text += system_prompt
|
355 |
+
text += "\n\n"
|
356 |
+
text += "### User Message\n"
|
357 |
+
text += prompt
|
358 |
+
text += "\n\n### Assistant\n"
|
359 |
+
if response:
|
360 |
+
text += response
|
361 |
+
text += "\n\n"
|
362 |
+
return text
|
363 |
+
|
364 |
+
|
365 |
+
class PromptFormat_deepseek_chat(PromptFormat):
|
366 |
+
|
367 |
+
description = "Deepseek LLM chat format"
|
368 |
+
|
369 |
+
def __init__(self):
|
370 |
+
super().__init__()
|
371 |
+
pass
|
372 |
+
|
373 |
+
def is_instruct(self):
|
374 |
+
return True
|
375 |
+
|
376 |
+
def stop_conditions(self, tokenizer, settings):
|
377 |
+
return \
|
378 |
+
[tokenizer.eos_token_id, "\n\nAssistant:"]
|
379 |
+
|
380 |
+
def format(self, prompt, response, system_prompt, settings):
|
381 |
+
text = ""
|
382 |
+
if system_prompt and system_prompt.strip() != "":
|
383 |
+
text += system_prompt
|
384 |
+
text += "\n\n"
|
385 |
+
text += "User: "
|
386 |
+
text += prompt
|
387 |
+
text += "\n\nAssistant:"
|
388 |
+
if response:
|
389 |
+
text += response
|
390 |
+
text += "\n\n"
|
391 |
+
return text
|
392 |
+
|
393 |
+
|
394 |
+
class PromptFormat_deepseek_instruct(PromptFormat):
|
395 |
+
|
396 |
+
description = "Deepseek instruct format for 'coder' models"
|
397 |
+
|
398 |
+
def __init__(self):
|
399 |
+
super().__init__()
|
400 |
+
pass
|
401 |
+
|
402 |
+
def is_instruct(self):
|
403 |
+
return True
|
404 |
+
|
405 |
+
def stop_conditions(self, tokenizer, settings):
|
406 |
+
return \
|
407 |
+
[tokenizer.eos_token_id, "<|EOT|>"]
|
408 |
+
|
409 |
+
def format(self, prompt, response, system_prompt, settings):
|
410 |
+
text = ""
|
411 |
+
if system_prompt and system_prompt.strip() != "":
|
412 |
+
text += "<|begin▁of▁sentence|>"
|
413 |
+
text += system_prompt
|
414 |
+
text += "\n"
|
415 |
+
text += "### Instruction:\n"
|
416 |
+
text += prompt
|
417 |
+
text += "\n### Response:\n"
|
418 |
+
if response:
|
419 |
+
text += response
|
420 |
+
text += "\n<|EOT|>\n"
|
421 |
+
return text
|
422 |
+
|
423 |
+
|
424 |
+
class PromptFormat_openchat(PromptFormat):
|
425 |
+
|
426 |
+
description = "OpenChat"
|
427 |
+
|
428 |
+
def __init__(self):
|
429 |
+
super().__init__()
|
430 |
+
pass
|
431 |
+
|
432 |
+
def is_instruct(self):
|
433 |
+
return True
|
434 |
+
|
435 |
+
def stop_conditions(self, tokenizer, settings):
|
436 |
+
return \
|
437 |
+
[tokenizer.eos_token_id,
|
438 |
+
"<|end_of_turn|>",
|
439 |
+
"<|endoftext|>",
|
440 |
+
"GPT4 Correct User:"
|
441 |
+
]
|
442 |
+
|
443 |
+
def format(self, prompt, response, system_prompt, settings):
|
444 |
+
text = ""
|
445 |
+
if system_prompt and system_prompt.strip() != "":
|
446 |
+
text += system_prompt
|
447 |
+
text += "<|end_of_turn|>"
|
448 |
+
text += "GPT4 Correct User:"
|
449 |
+
text += prompt
|
450 |
+
text += "<|end_of_turn|>"
|
451 |
+
text += "GPT4 Correct Assistant:"
|
452 |
+
if response:
|
453 |
+
text += response
|
454 |
+
text += "<|end_of_turn|>"
|
455 |
+
return text
|
456 |
+
|
457 |
+
|
458 |
+
class PromptFormat_gemma(PromptFormat):
|
459 |
+
|
460 |
+
description = "OpenChat"
|
461 |
+
|
462 |
+
def __init__(self):
|
463 |
+
super().__init__()
|
464 |
+
pass
|
465 |
+
|
466 |
+
def is_instruct(self):
|
467 |
+
return True
|
468 |
+
|
469 |
+
def stop_conditions(self, tokenizer, settings):
|
470 |
+
return \
|
471 |
+
[tokenizer.eos_token_id,
|
472 |
+
"<end_of_turn>",
|
473 |
+
]
|
474 |
+
|
475 |
+
def format(self, prompt, response, system_prompt, settings):
|
476 |
+
text = ""
|
477 |
+
if system_prompt is not None:
|
478 |
+
text += "<bos>"
|
479 |
+
# s = system_prompt.strip()
|
480 |
+
# if s != "":
|
481 |
+
# text += "<start_of_turn>user\n"
|
482 |
+
# text += s + "<end_of_turn>\n"
|
483 |
+
# text += "<start_of_turn>model\n"
|
484 |
+
# text += "Okay!<end_of_turn>\n"
|
485 |
+
text += "<start_of_turn>user\n"
|
486 |
+
text += prompt
|
487 |
+
text += "<end_of_turn>\n"
|
488 |
+
text += "<start_of_turn>model\n"
|
489 |
+
if response:
|
490 |
+
text += response
|
491 |
+
text += "<end_of_turn>\n"
|
492 |
+
return text
|
493 |
+
|
494 |
+
@staticmethod
|
495 |
+
def supports_system_prompt():
|
496 |
+
return False
|
497 |
+
|
498 |
+
|
499 |
+
class PromptFormat_cohere(PromptFormat):
|
500 |
+
|
501 |
+
description = "Cohere"
|
502 |
+
|
503 |
+
def __init__(self):
|
504 |
+
super().__init__()
|
505 |
+
pass
|
506 |
+
|
507 |
+
def is_instruct(self):
|
508 |
+
return True
|
509 |
+
|
510 |
+
def stop_conditions(self, tokenizer, settings):
|
511 |
+
return \
|
512 |
+
[tokenizer.eos_token_id,
|
513 |
+
"<|END_OF_TURN_TOKEN|>",
|
514 |
+
]
|
515 |
+
|
516 |
+
def format(self, prompt, response, system_prompt, settings):
|
517 |
+
text = ""
|
518 |
+
if system_prompt is not None:
|
519 |
+
text += "<BOS_TOKEN>"
|
520 |
+
text += "<|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|>"
|
521 |
+
text += system_prompt.strip()
|
522 |
+
text += "<|END_OF_TURN_TOKEN|>"
|
523 |
+
text += "<|START_OF_TURN_TOKEN|><|USER_TOKEN|>"
|
524 |
+
text += prompt
|
525 |
+
text += "<|END_OF_TURN_TOKEN|>"
|
526 |
+
text += "<|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>"
|
527 |
+
if response:
|
528 |
+
text += response
|
529 |
+
text += "<|END_OF_TURN_TOKEN|>"
|
530 |
+
return text
|
531 |
+
|
532 |
+
|
533 |
+
class PromptFormat_granite(PromptFormat):
|
534 |
+
|
535 |
+
description = "Granite"
|
536 |
+
|
537 |
+
def __init__(self):
|
538 |
+
super().__init__()
|
539 |
+
pass
|
540 |
+
|
541 |
+
def is_instruct(self):
|
542 |
+
return True
|
543 |
+
|
544 |
+
def stop_conditions(self, tokenizer, settings):
|
545 |
+
return \
|
546 |
+
[tokenizer.eos_token_id,
|
547 |
+
"\n\nQuestion:",
|
548 |
+
]
|
549 |
+
|
550 |
+
def format(self, prompt, response, system_prompt, settings):
|
551 |
+
text = ""
|
552 |
+
if system_prompt is not None:
|
553 |
+
text += "System:\n"
|
554 |
+
text += system_prompt.strip()
|
555 |
+
text += "\n\n"
|
556 |
+
text += "Question:\n"
|
557 |
+
text += prompt
|
558 |
+
text += "\n\n"
|
559 |
+
text += "Answer:\n"
|
560 |
+
if response:
|
561 |
+
text += response
|
562 |
+
text += "\n\n"
|
563 |
+
return text
|
564 |
+
|
565 |
+
def context_bos(self):
|
566 |
+
return True
|
567 |
+
|
568 |
+
|
569 |
+
prompt_formats = \
|
570 |
+
{
|
571 |
+
"Chat-RP": PromptFormat_raw,
|
572 |
+
"Llama-chat": PromptFormat_llama,
|
573 |
+
"Llama3-instruct": PromptFormat_llama3,
|
574 |
+
"ChatML": PromptFormat_chatml,
|
575 |
+
"TinyLlama-chat": PromptFormat_tinyllama,
|
576 |
+
"MistralLite": PromptFormat_mistrallite,
|
577 |
+
"Phind-CodeLlama": PromptFormat_phind_codellama,
|
578 |
+
"Deepseek-chat": PromptFormat_deepseek_chat,
|
579 |
+
"Deepseek-instruct": PromptFormat_deepseek_instruct,
|
580 |
+
"OpenChat": PromptFormat_openchat,
|
581 |
+
"Gemma": PromptFormat_gemma,
|
582 |
+
"Cohere": PromptFormat_cohere,
|
583 |
+
"Phi3-instruct": PromptFormat_phi3,
|
584 |
+
"Phi4": PromptFormat_phi4,
|
585 |
+
"Granite": PromptFormat_granite,
|
586 |
+
"Mistral V1": PromptFormat_mistralv1,
|
587 |
+
"Mistral V2/V3": PromptFormat_mistralv2v3,
|
588 |
+
"Mistral V3 (Tekken)": PromptFormat_mistralTekken,
|
589 |
+
}
|
590 |
+
|
591 |
+
def list_prompt_formats():
|
592 |
+
global prompt_formats
|
593 |
+
prompts = [
|
594 |
+
{
|
595 |
+
"name": k,
|
596 |
+
"supports_system_prompt": v.supports_system_prompt()
|
597 |
+
}
|
598 |
+
for k, v in prompt_formats.items()
|
599 |
+
]
|
600 |
+
return prompts
|