Spaces:
Running
on
Zero
Running
on
Zero
Update web_demo.py
Browse files- web_demo.py +4 -5
web_demo.py
CHANGED
@@ -167,7 +167,7 @@ def _launch_demo(model, tokenizer, audio_tokenizer):
|
|
167 |
model.generation_config.do_sample = False
|
168 |
|
169 |
|
170 |
-
|
171 |
add_generation_prompt =True
|
172 |
input_ids = tokenizer.apply_chat_template(
|
173 |
messages,
|
@@ -184,7 +184,7 @@ def _launch_demo(model, tokenizer, audio_tokenizer):
|
|
184 |
|
185 |
input_ids = torch.tensor([input_ids], dtype=torch.long).to("cuda")
|
186 |
|
187 |
-
|
188 |
|
189 |
|
190 |
if audio_path_list == []:
|
@@ -328,8 +328,7 @@ def _launch_demo(model, tokenizer, audio_tokenizer):
|
|
328 |
|
329 |
def main():
|
330 |
|
331 |
-
|
332 |
-
model_name_or_path = "VITA-MLLM/VITA-Audio-Plus-Boost"
|
333 |
|
334 |
device_map = "cuda:0"
|
335 |
|
@@ -385,7 +384,7 @@ def main():
|
|
385 |
model.generation_config.top_p = 1.0
|
386 |
model.generation_config.num_beams = 1
|
387 |
model.generation_config.pad_token_id = tokenizer.pad_token_id
|
388 |
-
|
389 |
|
390 |
|
391 |
_launch_demo(model, tokenizer, audio_tokenizer)
|
|
|
167 |
model.generation_config.do_sample = False
|
168 |
|
169 |
|
170 |
+
|
171 |
add_generation_prompt =True
|
172 |
input_ids = tokenizer.apply_chat_template(
|
173 |
messages,
|
|
|
184 |
|
185 |
input_ids = torch.tensor([input_ids], dtype=torch.long).to("cuda")
|
186 |
|
187 |
+
print("input", tokenizer.decode(input_ids[0], skip_special_tokens=False), flush=True)
|
188 |
|
189 |
|
190 |
if audio_path_list == []:
|
|
|
328 |
|
329 |
def main():
|
330 |
|
331 |
+
model_name_or_path = "VITA-MLLM/VITA-Audio-Plus-Vanilla"
|
|
|
332 |
|
333 |
device_map = "cuda:0"
|
334 |
|
|
|
384 |
model.generation_config.top_p = 1.0
|
385 |
model.generation_config.num_beams = 1
|
386 |
model.generation_config.pad_token_id = tokenizer.pad_token_id
|
387 |
+
model.generation_config.mtp_inference_mode = [8192,10]
|
388 |
|
389 |
|
390 |
_launch_demo(model, tokenizer, audio_tokenizer)
|