NeoPy's picture
Upload folder using huggingface_hub
8bb5911 verified
import gradio as gr
from functools import lru_cache
import logging, os
from tabs.typing_extra import generate_switch, RVC_MODELS_DIR, update_models_list, show_hop_slider
@lru_cache(maxsize=1)
def get_current_models(models_dir):
"""Retrieve list of model directories, excluding specific items."""
try:
models_list = [item for item in os.listdir(models_dir) if item != 'mute' and os.path.isdir(os.path.join(models_dir, item))]
return sorted(models_list)
except OSError as e:
logger.error(f"Error accessing models directory: {e}\n{format_exc()}")
raise gr.Error(f"Failed to list models. Check directory permissions: {e}")
def inference_tab(status_message, rvc_model):
"""Define the Inference tab UI."""
with gr.TabItem("Inference"):
with gr.Row(equal_height=True):
rvc_model = gr.Dropdown(
label="Voice Model",
info="Select a model for voice conversion.",
choices=get_current_models(RVC_MODELS_DIR),
allow_custom_value=False
)
pitch = gr.Slider(
minimum=-12,
maximum=12,
value=0,
step=1,
label="Pitch Change (Vocals Only)",
info="Adjust vocal pitch in semitones."
)
with gr.Row(equal_height=True):
refresh_btn = gr.Button("Refresh Models", variant="secondary")
refresh_btn.click(
fn=update_models_list,
outputs=[rvc_model, status_message]
)
with gr.Group():
with gr.Row(equal_height=True):
with gr.Column(visible=True, variant="panel", scale=1):
song_input = gr.Textbox(
label="Song Input",
placeholder="Enter a YouTube link or local file path",
lines=1
)
with gr.Accordion("RVC Settings", open=False):
with gr.Row(equal_height=True):
with gr.Column(variant="panel", scale=1):
f0_method = gr.Dropdown(
choices=[
"rmvpe", "crepe", "fcpe",
"hybrid[rmvpe+fcpe]", "crepe-tiny"
],
value="rmvpe",
label="Pitch Detection Algorithm",
info="Algorithm for detecting vocal pitch."
)
with gr.Row(equal_height=True):
filter_radius = gr.Slider(0, 7, value=3, step=1, label="Filter Radius")
rms_mix_rate = gr.Slider(0, 1, value=0.25, step=0.01, label="RMS Mix Rate")
protect = gr.Slider(0, 0.5, value=0.33, step=0.01, label="Protect Rate")
index_rate = gr.Slider(0, 1, value=0.5, step=0.01, label="Search Feature Ratio")
crepe_hop_length = gr.Slider(
32, 320, value=128, step=1,
label="Crepe Hop Length",
visible=False,
info="Hop length for crepe pitch detection."
)
f0_method.change(
fn=show_hop_slider,
inputs=f0_method,
outputs=crepe_hop_length
)
with gr.Accordion("Audio Settings", open=False):
with gr.Column(variant="panel", scale=1):
with gr.Row(equal_height=True):
main_gain = gr.Slider(-20, 20, value=0, step=1, label="Main Vocals Gain (dB)")
backup_gain = gr.Slider(-20, 20, value=0, step=1, label="Backup Vocals Gain (dB)")
inst_gain = gr.Slider(-20, 20, value=0, step=1, label="Instrumental Gain (dB)")
with gr.Row(equal_height=True):
keep_files = gr.Checkbox(label="Keep Intermediate Files", value=True)
vocal_only = gr.Checkbox(label="Vocal-Only Conversion", value=False)
with gr.Row(equal_height=True):
generate_btn = gr.Button("Convert", variant="primary", scale=2)
ai_cover = gr.Audio(label="Output Cover", type="filepath", interactive=False, scale=9)
with gr.Column(min_width=160):
output_format = gr.Dropdown(
choices=["mp3", "wav", "flac"],
value="mp3",
label="Output Format"
)
generate_btn.click(
fn=generate_switch,
inputs=[
song_input, rvc_model, pitch, keep_files, main_gain,
backup_gain, inst_gain, index_rate, filter_radius, rms_mix_rate,
f0_method, crepe_hop_length, protect, output_format, vocal_only
],
outputs=[ai_cover, status_message],
queue=True,
show_progress="full"
)
return rvc_model