Spaces:
Build error
Build error
Update app.py
Browse files
app.py
CHANGED
|
@@ -123,7 +123,7 @@ def prepare_output_dir(input_file, output_dir):
|
|
| 123 |
raise RuntimeError(f"Failed to prepare output directory {out_dir}: {e}")
|
| 124 |
return out_dir
|
| 125 |
|
| 126 |
-
def roformer_separator(audio, model_key, seg_size, override_seg_size, overlap, pitch_shift, model_dir, out_dir, out_format, norm_thresh, amp_thresh, progress=gr.Progress(track_tqdm=True)):
|
| 127 |
"""Separate audio using Roformer model."""
|
| 128 |
base_name = os.path.splitext(os.path.basename(audio))[0]
|
| 129 |
print_message(audio, model_key)
|
|
@@ -139,18 +139,15 @@ def roformer_separator(audio, model_key, seg_size, override_seg_size, overlap, p
|
|
| 139 |
amplification_threshold=amp_thresh,
|
| 140 |
use_autocast=use_autocast,
|
| 141 |
mdxc_params={
|
| 142 |
-
"batch_size": 1,
|
| 143 |
"segment_size": seg_size,
|
| 144 |
"override_model_segment_size": override_seg_size,
|
|
|
|
| 145 |
"overlap": overlap,
|
| 146 |
"pitch_shift": pitch_shift,
|
| 147 |
}
|
| 148 |
)
|
| 149 |
|
| 150 |
-
progress(0.2, desc="Model loaded...")
|
| 151 |
separator.load_model(model_filename=model)
|
| 152 |
-
|
| 153 |
-
progress(0.7, desc="Audio separated...")
|
| 154 |
separation = separator.separate(audio, f"{base_name}_(Stem1)", f"{base_name}_(Stem2)")
|
| 155 |
print(f"Separation complete!\nResults: {', '.join(separation)}")
|
| 156 |
|
|
@@ -159,7 +156,7 @@ def roformer_separator(audio, model_key, seg_size, override_seg_size, overlap, p
|
|
| 159 |
except Exception as e:
|
| 160 |
raise RuntimeError(f"Roformer separation failed: {e}") from e
|
| 161 |
|
| 162 |
-
def mdx23c_separator(audio, model, seg_size, override_seg_size, overlap, pitch_shift, model_dir, out_dir, out_format, norm_thresh, amp_thresh, progress=gr.Progress(track_tqdm=True)):
|
| 163 |
"""Separate audio using MDX23C model."""
|
| 164 |
base_name = os.path.splitext(os.path.basename(audio))[0]
|
| 165 |
print_message(audio, model)
|
|
@@ -174,18 +171,15 @@ def mdx23c_separator(audio, model, seg_size, override_seg_size, overlap, pitch_s
|
|
| 174 |
amplification_threshold=amp_thresh,
|
| 175 |
use_autocast=use_autocast,
|
| 176 |
mdxc_params={
|
| 177 |
-
"batch_size": 1,
|
| 178 |
"segment_size": seg_size,
|
| 179 |
"override_model_segment_size": override_seg_size,
|
|
|
|
| 180 |
"overlap": overlap,
|
| 181 |
"pitch_shift": pitch_shift,
|
| 182 |
}
|
| 183 |
)
|
| 184 |
|
| 185 |
-
progress(0.2, desc="Model loaded...")
|
| 186 |
separator.load_model(model_filename=model)
|
| 187 |
-
|
| 188 |
-
progress(0.7, desc="Audio separated...")
|
| 189 |
separation = separator.separate(audio, f"{base_name}_(Stem1)", f"{base_name}_(Stem2)")
|
| 190 |
print(f"Separation complete!\nResults: {', '.join(separation)}")
|
| 191 |
|
|
@@ -194,7 +188,7 @@ def mdx23c_separator(audio, model, seg_size, override_seg_size, overlap, pitch_s
|
|
| 194 |
except Exception as e:
|
| 195 |
raise RuntimeError(f"MDX23C separation failed: {e}") from e
|
| 196 |
|
| 197 |
-
def mdx_separator(audio, model, hop_length, seg_size, overlap, denoise, model_dir, out_dir, out_format, norm_thresh, amp_thresh, progress=gr.Progress(track_tqdm=True)):
|
| 198 |
"""Separate audio using MDX-NET model."""
|
| 199 |
base_name = os.path.splitext(os.path.basename(audio))[0]
|
| 200 |
print_message(audio, model)
|
|
@@ -209,18 +203,15 @@ def mdx_separator(audio, model, hop_length, seg_size, overlap, denoise, model_di
|
|
| 209 |
amplification_threshold=amp_thresh,
|
| 210 |
use_autocast=use_autocast,
|
| 211 |
mdx_params={
|
| 212 |
-
"batch_size": 1,
|
| 213 |
"hop_length": hop_length,
|
| 214 |
"segment_size": seg_size,
|
| 215 |
"overlap": overlap,
|
|
|
|
| 216 |
"enable_denoise": denoise,
|
| 217 |
}
|
| 218 |
)
|
| 219 |
|
| 220 |
-
progress(0.2, desc="Model loaded...")
|
| 221 |
separator.load_model(model_filename=model)
|
| 222 |
-
|
| 223 |
-
progress(0.7, desc="Audio separated...")
|
| 224 |
separation = separator.separate(audio, f"{base_name}_(Stem1)", f"{base_name}_(Stem2)")
|
| 225 |
print(f"Separation complete!\nResults: {', '.join(separation)}")
|
| 226 |
|
|
@@ -229,7 +220,7 @@ def mdx_separator(audio, model, hop_length, seg_size, overlap, denoise, model_di
|
|
| 229 |
except Exception as e:
|
| 230 |
raise RuntimeError(f"MDX-NET separation failed: {e}") from e
|
| 231 |
|
| 232 |
-
def vr_separator(audio, model, window_size, aggression, tta, post_process, post_process_threshold, high_end_process, model_dir, out_dir, out_format, norm_thresh, amp_thresh, progress=gr.Progress(track_tqdm=True)):
|
| 233 |
"""Separate audio using VR ARCH model."""
|
| 234 |
base_name = os.path.splitext(os.path.basename(audio))[0]
|
| 235 |
print_message(audio, model)
|
|
@@ -244,7 +235,7 @@ def vr_separator(audio, model, window_size, aggression, tta, post_process, post_
|
|
| 244 |
amplification_threshold=amp_thresh,
|
| 245 |
use_autocast=use_autocast,
|
| 246 |
vr_params={
|
| 247 |
-
"batch_size":
|
| 248 |
"window_size": window_size,
|
| 249 |
"aggression": aggression,
|
| 250 |
"enable_tta": tta,
|
|
@@ -254,10 +245,7 @@ def vr_separator(audio, model, window_size, aggression, tta, post_process, post_
|
|
| 254 |
}
|
| 255 |
)
|
| 256 |
|
| 257 |
-
progress(0.2, desc="Model loaded...")
|
| 258 |
separator.load_model(model_filename=model)
|
| 259 |
-
|
| 260 |
-
progress(0.7, desc="Audio separated...")
|
| 261 |
separation = separator.separate(audio, f"{base_name}_(Stem1)", f"{base_name}_(Stem2)")
|
| 262 |
print(f"Separation complete!\nResults: {', '.join(separation)}")
|
| 263 |
|
|
@@ -266,7 +254,7 @@ def vr_separator(audio, model, window_size, aggression, tta, post_process, post_
|
|
| 266 |
except Exception as e:
|
| 267 |
raise RuntimeError(f"VR ARCH separation failed: {e}") from e
|
| 268 |
|
| 269 |
-
def demucs_separator(audio, model, seg_size, shifts, overlap, segments_enabled, model_dir, out_dir, out_format, norm_thresh, amp_thresh, progress=gr.Progress(track_tqdm=True)):
|
| 270 |
"""Separate audio using Demucs model."""
|
| 271 |
print_message(audio, model)
|
| 272 |
try:
|
|
@@ -287,10 +275,7 @@ def demucs_separator(audio, model, seg_size, shifts, overlap, segments_enabled,
|
|
| 287 |
}
|
| 288 |
)
|
| 289 |
|
| 290 |
-
progress(0.2, desc="Model loaded...")
|
| 291 |
separator.load_model(model_filename=model)
|
| 292 |
-
|
| 293 |
-
progress(0.7, desc="Audio separated...")
|
| 294 |
separation = separator.separate(audio)
|
| 295 |
print(f"Separation complete!\nResults: {', '.join(separation)}")
|
| 296 |
|
|
@@ -323,13 +308,13 @@ with gr.Blocks(
|
|
| 323 |
gr.Markdown("<h1> Audio-Separator by Politrees </h1>")
|
| 324 |
with gr.Accordion("General settings", open=False):
|
| 325 |
with gr.Group():
|
| 326 |
-
model_file_dir = gr.Textbox(value="/tmp/audio-separator-models/", label="Directory
|
| 327 |
with gr.Row():
|
| 328 |
-
output_dir = gr.Textbox(value="output", label="File output directory", placeholder="output", interactive=False)
|
| 329 |
-
output_format = gr.Dropdown(value="wav", choices=["wav", "flac", "mp3"], label="Output Format")
|
| 330 |
with gr.Row():
|
| 331 |
-
norm_threshold = gr.Slider(minimum=0.1, maximum=1, step=0.1, value=0.9, label="Normalization", info="
|
| 332 |
-
amp_threshold = gr.Slider(minimum=0.1, maximum=1, step=0.1, value=0.6, label="Amplification", info="
|
| 333 |
|
| 334 |
with gr.Tab("Roformer"):
|
| 335 |
with gr.Group():
|
|
@@ -338,7 +323,8 @@ with gr.Blocks(
|
|
| 338 |
with gr.Row():
|
| 339 |
roformer_seg_size = gr.Slider(minimum=32, maximum=4000, step=32, value=256, label="Segment Size", info="Larger consumes more resources, but may give better results.")
|
| 340 |
roformer_override_seg_size = gr.Checkbox(value=False, label="Override segment size", info="Override model default segment size instead of using the model default value.")
|
| 341 |
-
|
|
|
|
| 342 |
roformer_pitch_shift = gr.Slider(minimum=-12, maximum=12, step=1, value=0, label="Pitch shift", info="Shift audio pitch by a number of semitones while processing. may improve output for deep/high vocals.")
|
| 343 |
with gr.Row():
|
| 344 |
roformer_audio = gr.Audio(label="Input Audio", type="filepath")
|
|
@@ -355,7 +341,8 @@ with gr.Blocks(
|
|
| 355 |
with gr.Row():
|
| 356 |
mdx23c_seg_size = gr.Slider(minimum=32, maximum=4000, step=32, value=256, label="Segment Size", info="Larger consumes more resources, but may give better results.")
|
| 357 |
mdx23c_override_seg_size = gr.Checkbox(value=False, label="Override segment size", info="Override model default segment size instead of using the model default value.")
|
| 358 |
-
|
|
|
|
| 359 |
mdx23c_pitch_shift = gr.Slider(minimum=-12, maximum=12, step=1, value=0, label="Pitch shift", info="Shift audio pitch by a number of semitones while processing. may improve output for deep/high vocals.")
|
| 360 |
with gr.Row():
|
| 361 |
mdx23c_audio = gr.Audio(label="Input Audio", type="filepath")
|
|
@@ -370,10 +357,11 @@ with gr.Blocks(
|
|
| 370 |
with gr.Row():
|
| 371 |
mdx_model = gr.Dropdown(label="Select the Model", choices=MDXNET_MODELS)
|
| 372 |
with gr.Row():
|
| 373 |
-
mdx_hop_length = gr.Slider(minimum=32, maximum=2048, step=32, value=1024, label="Hop Length")
|
| 374 |
mdx_seg_size = gr.Slider(minimum=32, maximum=4000, step=32, value=256, label="Segment Size", info="Larger consumes more resources, but may give better results.")
|
| 375 |
-
mdx_overlap = gr.Slider(minimum=0.001, maximum=0.999, step=0.001, value=0.25, label="Overlap")
|
| 376 |
-
|
|
|
|
| 377 |
with gr.Row():
|
| 378 |
mdx_audio = gr.Audio(label="Input Audio", type="filepath")
|
| 379 |
with gr.Row():
|
|
@@ -387,10 +375,11 @@ with gr.Blocks(
|
|
| 387 |
with gr.Row():
|
| 388 |
vr_model = gr.Dropdown(label="Select the Model", choices=VR_ARCH_MODELS)
|
| 389 |
with gr.Row():
|
| 390 |
-
|
|
|
|
| 391 |
vr_aggression = gr.Slider(minimum=1, maximum=50, step=1, value=5, label="Agression", info="Intensity of primary stem extraction.")
|
| 392 |
vr_tta = gr.Checkbox(value=False, label="TTA", info="Enable Test-Time-Augmentation; slow but improves quality.")
|
| 393 |
-
vr_post_process = gr.Checkbox(value=False, label="Post Process", info="
|
| 394 |
vr_post_process_threshold = gr.Slider(minimum=0.1, maximum=0.3, step=0.1, value=0.2, label="Post Process Threshold", info="Threshold for post-processing.")
|
| 395 |
vr_high_end_process = gr.Checkbox(value=False, label="High End Process", info="Mirror the missing frequency range of the output.")
|
| 396 |
with gr.Row():
|
|
@@ -406,10 +395,10 @@ with gr.Blocks(
|
|
| 406 |
with gr.Row():
|
| 407 |
demucs_model = gr.Dropdown(label="Select the Model", choices=DEMUCS_MODELS)
|
| 408 |
with gr.Row():
|
| 409 |
-
demucs_seg_size = gr.Slider(minimum=1, maximum=100, step=1, value=40, label="Segment Size")
|
| 410 |
demucs_shifts = gr.Slider(minimum=0, maximum=20, step=1, value=2, label="Shifts", info="Number of predictions with random shifts, higher = slower but better quality.")
|
| 411 |
-
demucs_overlap = gr.Slider(minimum=0.001, maximum=0.999, step=0.001, value=0.25, label="Overlap")
|
| 412 |
-
demucs_segments_enabled = gr.Checkbox(value=True, label="Segment-wise processing")
|
| 413 |
with gr.Row():
|
| 414 |
demucs_audio = gr.Audio(label="Input Audio", type="filepath")
|
| 415 |
with gr.Row():
|
|
@@ -433,6 +422,7 @@ with gr.Blocks(
|
|
| 433 |
roformer_model,
|
| 434 |
roformer_seg_size,
|
| 435 |
roformer_override_seg_size,
|
|
|
|
| 436 |
roformer_overlap,
|
| 437 |
roformer_pitch_shift,
|
| 438 |
model_file_dir,
|
|
@@ -450,6 +440,7 @@ with gr.Blocks(
|
|
| 450 |
mdx23c_model,
|
| 451 |
mdx23c_seg_size,
|
| 452 |
mdx23c_override_seg_size,
|
|
|
|
| 453 |
mdx23c_overlap,
|
| 454 |
mdx23c_pitch_shift,
|
| 455 |
model_file_dir,
|
|
@@ -468,6 +459,7 @@ with gr.Blocks(
|
|
| 468 |
mdx_hop_length,
|
| 469 |
mdx_seg_size,
|
| 470 |
mdx_overlap,
|
|
|
|
| 471 |
mdx_denoise,
|
| 472 |
model_file_dir,
|
| 473 |
output_dir,
|
|
@@ -482,6 +474,7 @@ with gr.Blocks(
|
|
| 482 |
inputs=[
|
| 483 |
vr_audio,
|
| 484 |
vr_model,
|
|
|
|
| 485 |
vr_window_size,
|
| 486 |
vr_aggression,
|
| 487 |
vr_tta,
|
|
|
|
| 123 |
raise RuntimeError(f"Failed to prepare output directory {out_dir}: {e}")
|
| 124 |
return out_dir
|
| 125 |
|
| 126 |
+
def roformer_separator(audio, model_key, seg_size, override_seg_size, batch_size, overlap, pitch_shift, model_dir, out_dir, out_format, norm_thresh, amp_thresh, progress=gr.Progress(track_tqdm=True, desc="Audio separated...")):
|
| 127 |
"""Separate audio using Roformer model."""
|
| 128 |
base_name = os.path.splitext(os.path.basename(audio))[0]
|
| 129 |
print_message(audio, model_key)
|
|
|
|
| 139 |
amplification_threshold=amp_thresh,
|
| 140 |
use_autocast=use_autocast,
|
| 141 |
mdxc_params={
|
|
|
|
| 142 |
"segment_size": seg_size,
|
| 143 |
"override_model_segment_size": override_seg_size,
|
| 144 |
+
"batch_size": batch_size,
|
| 145 |
"overlap": overlap,
|
| 146 |
"pitch_shift": pitch_shift,
|
| 147 |
}
|
| 148 |
)
|
| 149 |
|
|
|
|
| 150 |
separator.load_model(model_filename=model)
|
|
|
|
|
|
|
| 151 |
separation = separator.separate(audio, f"{base_name}_(Stem1)", f"{base_name}_(Stem2)")
|
| 152 |
print(f"Separation complete!\nResults: {', '.join(separation)}")
|
| 153 |
|
|
|
|
| 156 |
except Exception as e:
|
| 157 |
raise RuntimeError(f"Roformer separation failed: {e}") from e
|
| 158 |
|
| 159 |
+
def mdx23c_separator(audio, model, seg_size, override_seg_size, batch_size, overlap, pitch_shift, model_dir, out_dir, out_format, norm_thresh, amp_thresh, progress=gr.Progress(track_tqdm=True, desc="Audio separated...")):
|
| 160 |
"""Separate audio using MDX23C model."""
|
| 161 |
base_name = os.path.splitext(os.path.basename(audio))[0]
|
| 162 |
print_message(audio, model)
|
|
|
|
| 171 |
amplification_threshold=amp_thresh,
|
| 172 |
use_autocast=use_autocast,
|
| 173 |
mdxc_params={
|
|
|
|
| 174 |
"segment_size": seg_size,
|
| 175 |
"override_model_segment_size": override_seg_size,
|
| 176 |
+
"batch_size": batch_size,
|
| 177 |
"overlap": overlap,
|
| 178 |
"pitch_shift": pitch_shift,
|
| 179 |
}
|
| 180 |
)
|
| 181 |
|
|
|
|
| 182 |
separator.load_model(model_filename=model)
|
|
|
|
|
|
|
| 183 |
separation = separator.separate(audio, f"{base_name}_(Stem1)", f"{base_name}_(Stem2)")
|
| 184 |
print(f"Separation complete!\nResults: {', '.join(separation)}")
|
| 185 |
|
|
|
|
| 188 |
except Exception as e:
|
| 189 |
raise RuntimeError(f"MDX23C separation failed: {e}") from e
|
| 190 |
|
| 191 |
+
def mdx_separator(audio, model, hop_length, seg_size, overlap, batch_size, denoise, model_dir, out_dir, out_format, norm_thresh, amp_thresh, progress=gr.Progress(track_tqdm=True, desc="Audio separated...")):
|
| 192 |
"""Separate audio using MDX-NET model."""
|
| 193 |
base_name = os.path.splitext(os.path.basename(audio))[0]
|
| 194 |
print_message(audio, model)
|
|
|
|
| 203 |
amplification_threshold=amp_thresh,
|
| 204 |
use_autocast=use_autocast,
|
| 205 |
mdx_params={
|
|
|
|
| 206 |
"hop_length": hop_length,
|
| 207 |
"segment_size": seg_size,
|
| 208 |
"overlap": overlap,
|
| 209 |
+
"batch_size": batch_size,
|
| 210 |
"enable_denoise": denoise,
|
| 211 |
}
|
| 212 |
)
|
| 213 |
|
|
|
|
| 214 |
separator.load_model(model_filename=model)
|
|
|
|
|
|
|
| 215 |
separation = separator.separate(audio, f"{base_name}_(Stem1)", f"{base_name}_(Stem2)")
|
| 216 |
print(f"Separation complete!\nResults: {', '.join(separation)}")
|
| 217 |
|
|
|
|
| 220 |
except Exception as e:
|
| 221 |
raise RuntimeError(f"MDX-NET separation failed: {e}") from e
|
| 222 |
|
| 223 |
+
def vr_separator(audio, model, batch_size, window_size, aggression, tta, post_process, post_process_threshold, high_end_process, model_dir, out_dir, out_format, norm_thresh, amp_thresh, progress=gr.Progress(track_tqdm=True, desc="Audio separated...")):
|
| 224 |
"""Separate audio using VR ARCH model."""
|
| 225 |
base_name = os.path.splitext(os.path.basename(audio))[0]
|
| 226 |
print_message(audio, model)
|
|
|
|
| 235 |
amplification_threshold=amp_thresh,
|
| 236 |
use_autocast=use_autocast,
|
| 237 |
vr_params={
|
| 238 |
+
"batch_size": batch_size,
|
| 239 |
"window_size": window_size,
|
| 240 |
"aggression": aggression,
|
| 241 |
"enable_tta": tta,
|
|
|
|
| 245 |
}
|
| 246 |
)
|
| 247 |
|
|
|
|
| 248 |
separator.load_model(model_filename=model)
|
|
|
|
|
|
|
| 249 |
separation = separator.separate(audio, f"{base_name}_(Stem1)", f"{base_name}_(Stem2)")
|
| 250 |
print(f"Separation complete!\nResults: {', '.join(separation)}")
|
| 251 |
|
|
|
|
| 254 |
except Exception as e:
|
| 255 |
raise RuntimeError(f"VR ARCH separation failed: {e}") from e
|
| 256 |
|
| 257 |
+
def demucs_separator(audio, model, seg_size, shifts, overlap, segments_enabled, model_dir, out_dir, out_format, norm_thresh, amp_thresh, progress=gr.Progress(track_tqdm=True, desc="Audio separated...")):
|
| 258 |
"""Separate audio using Demucs model."""
|
| 259 |
print_message(audio, model)
|
| 260 |
try:
|
|
|
|
| 275 |
}
|
| 276 |
)
|
| 277 |
|
|
|
|
| 278 |
separator.load_model(model_filename=model)
|
|
|
|
|
|
|
| 279 |
separation = separator.separate(audio)
|
| 280 |
print(f"Separation complete!\nResults: {', '.join(separation)}")
|
| 281 |
|
|
|
|
| 308 |
gr.Markdown("<h1> Audio-Separator by Politrees </h1>")
|
| 309 |
with gr.Accordion("General settings", open=False):
|
| 310 |
with gr.Group():
|
| 311 |
+
model_file_dir = gr.Textbox(value="/tmp/audio-separator-models/", label="Directory to cache model files", info="The directory where model files are stored.", placeholder="/tmp/audio-separator-models/", interactive=False)
|
| 312 |
with gr.Row():
|
| 313 |
+
output_dir = gr.Textbox(value="output", label="File output directory", info="The directory where output files will be saved.", placeholder="output", interactive=False)
|
| 314 |
+
output_format = gr.Dropdown(value="wav", choices=["wav", "flac", "mp3"], label="Output Format", info="The format of the output audio file.")
|
| 315 |
with gr.Row():
|
| 316 |
+
norm_threshold = gr.Slider(minimum=0.1, maximum=1, step=0.1, value=0.9, label="Normalization threshold", info="The threshold for audio normalization.")
|
| 317 |
+
amp_threshold = gr.Slider(minimum=0.1, maximum=1, step=0.1, value=0.6, label="Amplification threshold", info="The threshold for audio amplification.")
|
| 318 |
|
| 319 |
with gr.Tab("Roformer"):
|
| 320 |
with gr.Group():
|
|
|
|
| 323 |
with gr.Row():
|
| 324 |
roformer_seg_size = gr.Slider(minimum=32, maximum=4000, step=32, value=256, label="Segment Size", info="Larger consumes more resources, but may give better results.")
|
| 325 |
roformer_override_seg_size = gr.Checkbox(value=False, label="Override segment size", info="Override model default segment size instead of using the model default value.")
|
| 326 |
+
roformer_batch_size = gr.Slider(minimum=1, maximum=16, step=1, value=1, label="Batch Size", info="Larger consumes more RAM but may process slightly faster.", interactive=False)
|
| 327 |
+
roformer_overlap = gr.Slider(minimum=2, maximum=10, step=1, value=8, label="Overlap", info="Amount of overlap between prediction windows. Lower is better but slower.")
|
| 328 |
roformer_pitch_shift = gr.Slider(minimum=-12, maximum=12, step=1, value=0, label="Pitch shift", info="Shift audio pitch by a number of semitones while processing. may improve output for deep/high vocals.")
|
| 329 |
with gr.Row():
|
| 330 |
roformer_audio = gr.Audio(label="Input Audio", type="filepath")
|
|
|
|
| 341 |
with gr.Row():
|
| 342 |
mdx23c_seg_size = gr.Slider(minimum=32, maximum=4000, step=32, value=256, label="Segment Size", info="Larger consumes more resources, but may give better results.")
|
| 343 |
mdx23c_override_seg_size = gr.Checkbox(value=False, label="Override segment size", info="Override model default segment size instead of using the model default value.")
|
| 344 |
+
mdx23c_batch_size = gr.Slider(minimum=1, maximum=16, step=1, value=1, label="Batch Size", info="Larger consumes more RAM but may process slightly faster.", interactive=False)
|
| 345 |
+
mdx23c_overlap = gr.Slider(minimum=2, maximum=50, step=1, value=8, label="Overlap", info="Amount of overlap between prediction windows. Higher is better but slower.")
|
| 346 |
mdx23c_pitch_shift = gr.Slider(minimum=-12, maximum=12, step=1, value=0, label="Pitch shift", info="Shift audio pitch by a number of semitones while processing. may improve output for deep/high vocals.")
|
| 347 |
with gr.Row():
|
| 348 |
mdx23c_audio = gr.Audio(label="Input Audio", type="filepath")
|
|
|
|
| 357 |
with gr.Row():
|
| 358 |
mdx_model = gr.Dropdown(label="Select the Model", choices=MDXNET_MODELS)
|
| 359 |
with gr.Row():
|
| 360 |
+
mdx_hop_length = gr.Slider(minimum=32, maximum=2048, step=32, value=1024, label="Hop Length", info"Usually called stride in neural networks; only change if you know what you're doing.")
|
| 361 |
mdx_seg_size = gr.Slider(minimum=32, maximum=4000, step=32, value=256, label="Segment Size", info="Larger consumes more resources, but may give better results.")
|
| 362 |
+
mdx_overlap = gr.Slider(minimum=0.001, maximum=0.999, step=0.001, value=0.25, label="Overlap", info"Amount of overlap between prediction windows. Higher is better but slower.")
|
| 363 |
+
mdx_batch_size = gr.Slider(minimum=1, maximum=16, step=1, value=1, label="Batch Size", info="Larger consumes more RAM but may process slightly faster.", interactive=False)
|
| 364 |
+
mdx_denoise = gr.Checkbox(value=False, label="Denoise", info="Enable denoising after separation.")
|
| 365 |
with gr.Row():
|
| 366 |
mdx_audio = gr.Audio(label="Input Audio", type="filepath")
|
| 367 |
with gr.Row():
|
|
|
|
| 375 |
with gr.Row():
|
| 376 |
vr_model = gr.Dropdown(label="Select the Model", choices=VR_ARCH_MODELS)
|
| 377 |
with gr.Row():
|
| 378 |
+
vr_batch_size = gr.Slider(minimum=1, maximum=16, step=1, value=1, label="Batch Size", info="Larger consumes more RAM but may process slightly faster.", interactive=False)
|
| 379 |
+
vr_window_size = gr.Slider(minimum=320, maximum=1024, step=32, value=512, label="Window Size", info="Balance quality and speed. 1024 = fast but lower, 320 = slower but better quality.")
|
| 380 |
vr_aggression = gr.Slider(minimum=1, maximum=50, step=1, value=5, label="Agression", info="Intensity of primary stem extraction.")
|
| 381 |
vr_tta = gr.Checkbox(value=False, label="TTA", info="Enable Test-Time-Augmentation; slow but improves quality.")
|
| 382 |
+
vr_post_process = gr.Checkbox(value=False, label="Post Process", info="Identify leftover artifacts within vocal output; may improve separation for some songs.")
|
| 383 |
vr_post_process_threshold = gr.Slider(minimum=0.1, maximum=0.3, step=0.1, value=0.2, label="Post Process Threshold", info="Threshold for post-processing.")
|
| 384 |
vr_high_end_process = gr.Checkbox(value=False, label="High End Process", info="Mirror the missing frequency range of the output.")
|
| 385 |
with gr.Row():
|
|
|
|
| 395 |
with gr.Row():
|
| 396 |
demucs_model = gr.Dropdown(label="Select the Model", choices=DEMUCS_MODELS)
|
| 397 |
with gr.Row():
|
| 398 |
+
demucs_seg_size = gr.Slider(minimum=1, maximum=100, step=1, value=40, label="Segment Size", info="Size of segments into which the audio is split. Higher = slower but better quality.")
|
| 399 |
demucs_shifts = gr.Slider(minimum=0, maximum=20, step=1, value=2, label="Shifts", info="Number of predictions with random shifts, higher = slower but better quality.")
|
| 400 |
+
demucs_overlap = gr.Slider(minimum=0.001, maximum=0.999, step=0.001, value=0.25, label="Overlap", info="Overlap between prediction windows. Higher = slower but better quality.")
|
| 401 |
+
demucs_segments_enabled = gr.Checkbox(value=True, label="Segment-wise processing", info="Enable segment-wise processing.")
|
| 402 |
with gr.Row():
|
| 403 |
demucs_audio = gr.Audio(label="Input Audio", type="filepath")
|
| 404 |
with gr.Row():
|
|
|
|
| 422 |
roformer_model,
|
| 423 |
roformer_seg_size,
|
| 424 |
roformer_override_seg_size,
|
| 425 |
+
roformer_batch_size,
|
| 426 |
roformer_overlap,
|
| 427 |
roformer_pitch_shift,
|
| 428 |
model_file_dir,
|
|
|
|
| 440 |
mdx23c_model,
|
| 441 |
mdx23c_seg_size,
|
| 442 |
mdx23c_override_seg_size,
|
| 443 |
+
mdx23c_batch_size,
|
| 444 |
mdx23c_overlap,
|
| 445 |
mdx23c_pitch_shift,
|
| 446 |
model_file_dir,
|
|
|
|
| 459 |
mdx_hop_length,
|
| 460 |
mdx_seg_size,
|
| 461 |
mdx_overlap,
|
| 462 |
+
mdx_batch_size,
|
| 463 |
mdx_denoise,
|
| 464 |
model_file_dir,
|
| 465 |
output_dir,
|
|
|
|
| 474 |
inputs=[
|
| 475 |
vr_audio,
|
| 476 |
vr_model,
|
| 477 |
+
vr_batch_size,
|
| 478 |
vr_window_size,
|
| 479 |
vr_aggression,
|
| 480 |
vr_tta,
|