Update app.py
Browse files
app.py
CHANGED
|
@@ -116,11 +116,11 @@ def talk(prompt, history):
|
|
| 116 |
]
|
| 117 |
# indicates the end of a sequence
|
| 118 |
|
| 119 |
-
|
| 120 |
-
|
| 121 |
-
|
| 122 |
-
|
| 123 |
-
|
| 124 |
# preparing tokens for model input
|
| 125 |
# add_generation_prompt argument tells the template to add tokens that indicate the start of a bot response
|
| 126 |
# print(input_ids)
|
|
@@ -153,7 +153,7 @@ def talk(prompt, history):
|
|
| 153 |
outputs = []
|
| 154 |
print(messages)
|
| 155 |
print(*messages)
|
| 156 |
-
|
| 157 |
|
| 158 |
start = time.time()
|
| 159 |
NUM_TOKENS=0
|
|
|
|
| 116 |
]
|
| 117 |
# indicates the end of a sequence
|
| 118 |
|
| 119 |
+
input_ids = tokenizer.apply_chat_template(
|
| 120 |
+
messages,
|
| 121 |
+
add_generation_prompt=True,
|
| 122 |
+
return_tensors="pt"
|
| 123 |
+
)
|
| 124 |
# preparing tokens for model input
|
| 125 |
# add_generation_prompt argument tells the template to add tokens that indicate the start of a bot response
|
| 126 |
# print(input_ids)
|
|
|
|
| 153 |
outputs = []
|
| 154 |
print(messages)
|
| 155 |
print(*messages)
|
| 156 |
+
# input_ids = tokenizer(*messages)
|
| 157 |
|
| 158 |
start = time.time()
|
| 159 |
NUM_TOKENS=0
|