Spaces:
Runtime error
Runtime error
Commit
·
bb70f8e
1
Parent(s):
f8681d6
Update llama/m2ugen.py
Browse files- llama/m2ugen.py +1 -3
llama/m2ugen.py
CHANGED
|
@@ -621,9 +621,7 @@ class M2UGen(nn.Module):
|
|
| 621 |
return audio_outputs
|
| 622 |
else:
|
| 623 |
print("Generating Music...")
|
| 624 |
-
|
| 625 |
-
gen_inputs = self.generation_processor(text=music_caption, padding='max_length',
|
| 626 |
-
max_length=128, truncation=True, return_tensors="pt").to("cuda:1")
|
| 627 |
#gen_emb = self.generation_model.generate(**gen_inputs, guidance_scale=3.5, encoder_only=True)
|
| 628 |
audio_outputs = self.generation_model.generate(**gen_inputs, guidance_scale=3.5,
|
| 629 |
max_new_tokens=int(256 / 5 * audio_length_in_s))
|
|
|
|
| 621 |
return audio_outputs
|
| 622 |
else:
|
| 623 |
print("Generating Music...")
|
| 624 |
+
gen_inputs = self.generation_processor(text=music_caption, return_tensors="pt").to("cuda:1")
|
|
|
|
|
|
|
| 625 |
#gen_emb = self.generation_model.generate(**gen_inputs, guidance_scale=3.5, encoder_only=True)
|
| 626 |
audio_outputs = self.generation_model.generate(**gen_inputs, guidance_scale=3.5,
|
| 627 |
max_new_tokens=int(256 / 5 * audio_length_in_s))
|