File size: 6,925 Bytes
6aef5ba b2107ed 6aef5ba f524066 6aef5ba e1c09af 1e10592 e1c09af f4f936c b2107ed 6aef5ba 3577ab2 c1d97d2 6aef5ba 2699d44 6aef5ba 3577ab2 6aef5ba 4a977ff 18ba76b 4a977ff 6aef5ba 4a977ff 3dd2f88 e1d98c6 fa4bf1d 4a977ff e1d98c6 4a977ff 3dd2f88 f709fa6 3d7b7a5 3dd2f88 3d7b7a5 3dd2f88 2699d44 3dd2f88 7c68120 3dd2f88 3d7b7a5 3577ab2 3d7b7a5 3577ab2 3d7b7a5 3577ab2 3d7b7a5 3577ab2 3d7b7a5 3577ab2 3d7b7a5 3577ab2 3d7b7a5 d409ac9 3577ab2 6aef5ba |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 |
"""
Copyright (c) Meta Platforms, Inc. and affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
"""
from tempfile import NamedTemporaryFile
import torch
import gradio as gr
from scipy.io.wavfile import write
from audiocraft.models import MusicGen
import os
from audiocraft.data.audio import audio_write
MODEL = None
def split_process(audio, chosen_out_track):
os.makedirs("out", exist_ok=True)
write('test.wav', audio[0], audio[1])
os.system("python3 -m demucs.separate -n mdx_extra_q -j 4 test.wav -o out")
#return "./out/mdx_extra_q/test/vocals.wav","./out/mdx_extra_q/test/bass.wav","./out/mdx_extra_q/test/drums.wav","./out/mdx_extra_q/test/other.wav"
if chosen_out_track == "vocals":
return "./out/mdx_extra_q/test/vocals.wav"
elif chosen_out_track == "bass":
return "./out/mdx_extra_q/test/bass.wav"
elif chosen_out_track == "drums":
return "./out/mdx_extra_q/test/drums.wav"
elif chosen_out_track == "other":
return "./out/mdx_extra_q/test/other.wav"
elif chosen_out_track == "all-in":
return "test.wav"
def load_model(version):
print("Loading model", version)
return MusicGen.get_pretrained(version)
def predict(music_prompt, melody, duration, cfg_coef):
text = music_prompt
global MODEL
topk = int(250)
if MODEL is None or MODEL.name != "melody":
MODEL = load_model("melody")
if duration > MODEL.lm.cfg.dataset.segment_duration:
raise gr.Error("MusicGen currently supports durations of up to 30 seconds!")
MODEL.set_generation_params(
use_sampling=True,
top_k=250,
top_p=0,
temperature=1.0,
cfg_coef=cfg_coef,
duration=duration,
)
if melody:
sr, melody = melody[0], torch.from_numpy(melody[1]).to(MODEL.device).float().t().unsqueeze(0)
print(melody.shape)
if melody.dim() == 2:
melody = melody[None]
melody = melody[..., :int(sr * MODEL.lm.cfg.dataset.segment_duration)]
output = MODEL.generate_with_chroma(
descriptions=[text],
melody_wavs=melody,
melody_sample_rate=sr,
progress=False
)
else:
output = MODEL.generate(descriptions=[text], progress=False)
output = output.detach().cpu().float()[0]
with NamedTemporaryFile("wb", suffix=".wav", delete=False) as file:
audio_write(file.name, output, MODEL.sample_rate, strategy="loudness", add_suffix=False)
#waveform_video = gr.make_waveform(file.name)
return file.name
css="""
#col-container {max-width: 910px; margin-left: auto; margin-right: auto;}
a {text-decoration-line: underline; font-weight: 600;}
"""
with gr.Blocks(css=css) as demo:
with gr.Column(elem_id="col-container"):
gr.Markdown(
"""
# Split Audio Tracks to MusicGen
Upload an audio file, split audio tracks with Demucs, choose a track as conditional sound for MusicGen, get a remix ! <br/>
*** Careful, MusicGen model loaded here can only handle up to 30 second audio, please use the audio component gradio feature to edit your audio before conditioning ***
<br/>
<br/>
[](https://huggingface.co/spaces/fffiloni/SplitTrack2MusicGen?duplicate=true) for longer audio, more control and no queue.</p>
"""
)
with gr.Column():
uploaded_sound = gr.Audio(type="numpy", label="Input", source="upload")
with gr.Row():
chosen_track = gr.Radio(["vocals", "bass", "drums", "other", "all-in"], label="Track", info="Which track from your audio do you want to mashup ?", value="vocals")
load_sound_btn = gr.Button('Load your chosen track')
#split_vocals = gr.Audio(type="filepath", label="Vocals")
#split_bass = gr.Audio(type="filepath", label="Bass")
#split_drums = gr.Audio(type="filepath", label="Drums")
#split_others = gr.Audio(type="filepath", label="Other")
with gr.Row():
music_prompt = gr.Textbox(label="Musical Prompt", info="Describe what kind of music you wish for", interactive=True, placeholder="lofi slow bpm electro chill with organic samples")
melody = gr.Audio(source="upload", type="numpy", label="Track Condition (from previous step)", interactive=False)
with gr.Row():
#model = gr.Radio(["melody", "medium", "small", "large"], label="MusicGen Model", value="melody", interactive=True)
duration = gr.Slider(minimum=1, maximum=30, value=10, step=1, label="Generated Music Duration", interactive=True)
cfg_coef = gr.Slider(label="Classifier Free Guidance", minimum=1.0, maximum=10.0, step=0.1, value=3.0, interactive=True)
with gr.Row():
submit = gr.Button("Submit")
#with gr.Row():
# topk = gr.Number(label="Top-k", value=250, interactive=True)
# topp = gr.Number(label="Top-p", value=0, interactive=True)
# temperature = gr.Number(label="Temperature", value=1.0, interactive=True)
# cfg_coef = gr.Number(label="Classifier Free Guidance", value=3.0, interactive=True)
output = gr.Audio(label="Generated Music")
gr.Examples(
fn=predict,
examples=[
[
"An 80s driving pop song with heavy drums and synth pads in the background",
None,
10,
3.0
],
[
"A cheerful country song with acoustic guitars",
None,
10,
3.0
],
[
"90s rock song with electric guitar and heavy drums",
None,
10,
3.0
],
[
"a light and cheerly EDM track, with syncopated drums, aery pads, and strong emotions bpm: 130",
None,
10,
3.0
],
[
"lofi slow bpm electro chill with organic samples",
None,
10,
3.0
],
],
inputs=[music_prompt, melody, duration, cfg_coef],
outputs=[output]
)
load_sound_btn.click(split_process, inputs=[uploaded_sound, chosen_track], outputs=[melody], api_name="splt_trck")
submit.click(predict, inputs=[music_prompt, melody, duration, cfg_coef], outputs=[output])
demo.queue(max_size=32).launch()
|