AlexHung29629 commited on
Commit
2ef161a
·
verified ·
1 Parent(s): a238d15

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +26 -1
app.py CHANGED
@@ -18,6 +18,31 @@ MAX_MAX_NEW_TOKENS = 2048
18
  DEFAULT_MAX_NEW_TOKENS = 1024
19
  MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
20
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
21
  if torch.cuda.is_available():
22
  model_id = "mistralai/Mistral-Small-24B-Instruct-2501"
23
  model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.bfloat16, device_map="auto")
@@ -36,7 +61,7 @@ def generate(
36
  ) -> Iterator[str]:
37
  conversation = [*chat_history, {"role": "user", "content": message}]
38
 
39
- input_ids = tokenizer.apply_chat_template(conversation, return_tensors="pt")
40
  if input_ids.shape[1] > MAX_INPUT_TOKEN_LENGTH:
41
  input_ids = input_ids[:, -MAX_INPUT_TOKEN_LENGTH:]
42
  gr.Warning(f"Trimmed input from conversation as it was longer than {MAX_INPUT_TOKEN_LENGTH} tokens.")
 
18
  DEFAULT_MAX_NEW_TOKENS = 1024
19
  MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
20
 
21
+ CHAT_TEMPLATE="""{%- set default_system_message = "A user will ask you to solve a task. You should first draft your thinking process (inner monologue) until you have derived the final answer. Afterwards, write a self-contained summary of your thoughts (i.e. your summary should be succinct but contain all the critical steps you needed to reach the conclusion). You should use Markdown and Latex to format your response. Write both your thoughts and summary in the same language as the task posed by the user.\n\nYour thinking process must follow the template below:\n<think>\nYour thoughts or/and draft, like working through an exercise on scratch paper. Be as casual and as long as you want until you are confident to generate a correct answer.\n</think>\n\nHere, provide a concise summary that reflects your reasoning and presents a clear final answer to the user.\n\nProblem:" %}
22
+
23
+ {{- bos_token }}
24
+
25
+ {%- if messages[0]['role'] == 'system' %}
26
+ {%- set system_message = messages[0]['content'] %}
27
+ {%- set loop_messages = messages[1:] %}
28
+ {%- else %}
29
+ {%- set system_message = default_system_message %}
30
+ {%- set loop_messages = messages %}
31
+ {%- endif %}
32
+ {{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}
33
+
34
+ {%- for message in loop_messages %}
35
+ {%- if message['role'] == 'user' %}
36
+ {{- '[INST]' + message['content'] + '[/INST]' }}
37
+ {%- elif message['role'] == 'system' %}
38
+ {{- '[SYSTEM_PROMPT]' + message['content'] + '[/SYSTEM_PROMPT]' }}
39
+ {%- elif message['role'] == 'assistant' %}
40
+ {{- message['content'] + eos_token }}
41
+ {%- else %}
42
+ {{- raise_exception('Only user, system and assistant roles are supported!') }}
43
+ {%- endif %}
44
+ {%- endfor %}"""
45
+
46
  if torch.cuda.is_available():
47
  model_id = "mistralai/Mistral-Small-24B-Instruct-2501"
48
  model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.bfloat16, device_map="auto")
 
61
  ) -> Iterator[str]:
62
  conversation = [*chat_history, {"role": "user", "content": message}]
63
 
64
+ input_ids = tokenizer.apply_chat_template(conversation, chat_template=CHAT_TEMPLATE, return_tensors="pt")
65
  if input_ids.shape[1] > MAX_INPUT_TOKEN_LENGTH:
66
  input_ids = input_ids[:, -MAX_INPUT_TOKEN_LENGTH:]
67
  gr.Warning(f"Trimmed input from conversation as it was longer than {MAX_INPUT_TOKEN_LENGTH} tokens.")