Upload 79 files
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitignore +22 -0
- rvc/configs/32000.json +42 -0
- rvc/configs/40000.json +42 -0
- rvc/configs/44100.json +42 -0
- rvc/configs/48000.json +42 -0
- rvc/configs/config.py +99 -0
- rvc/infer/infer.py +495 -0
- rvc/infer/pipeline.py +690 -0
- rvc/lib/algorithm/__init__.py +0 -0
- rvc/lib/algorithm/attentions.py +243 -0
- rvc/lib/algorithm/commons.py +117 -0
- rvc/lib/algorithm/discriminators.py +175 -0
- rvc/lib/algorithm/encoders.py +209 -0
- rvc/lib/algorithm/generators/hifigan.py +230 -0
- rvc/lib/algorithm/generators/hifigan_mrf.py +385 -0
- rvc/lib/algorithm/generators/hifigan_nsf.py +237 -0
- rvc/lib/algorithm/generators/refinegan.py +475 -0
- rvc/lib/algorithm/modules.py +117 -0
- rvc/lib/algorithm/normalization.py +26 -0
- rvc/lib/algorithm/residuals.py +267 -0
- rvc/lib/algorithm/synthesizers.py +244 -0
- rvc/lib/predictors/F0Extractor.py +99 -0
- rvc/lib/predictors/FCPE.py +918 -0
- rvc/lib/predictors/RMVPE.py +537 -0
- rvc/lib/tools/analyzer.py +76 -0
- rvc/lib/tools/gdown.py +285 -0
- rvc/lib/tools/launch_tensorboard.py +21 -0
- rvc/lib/tools/model_download.py +226 -0
- rvc/lib/tools/prerequisites_download.py +153 -0
- rvc/lib/tools/pretrained_selector.py +13 -0
- rvc/lib/tools/split_audio.py +79 -0
- rvc/lib/tools/tts.py +29 -0
- rvc/lib/tools/tts_voices.json +0 -0
- rvc/lib/utils.py +142 -0
- rvc/lib/zluda.py +76 -0
- rvc/models/embedders/.gitkeep +1 -0
- rvc/models/embedders/embedders_custom/.gitkeep +1 -0
- rvc/models/formant/.gitkeep +1 -0
- rvc/models/predictors/.gitkeep +0 -0
- rvc/models/pretraineds/.gitkeep +0 -0
- rvc/models/pretraineds/custom/.gitkeep +1 -0
- rvc/models/pretraineds/hifi-gan/.gitkeep +0 -0
- rvc/train/data_utils.py +379 -0
- rvc/train/extract/extract.py +248 -0
- rvc/train/extract/preparing_files.py +75 -0
- rvc/train/losses.py +132 -0
- rvc/train/mel_processing.py +234 -0
- rvc/train/preprocess/preprocess.py +345 -0
- rvc/train/preprocess/slicer.py +235 -0
- rvc/train/process/change_info.py +22 -0
.gitignore
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
*.exe
|
| 2 |
+
*.pt
|
| 3 |
+
*.onnx
|
| 4 |
+
*.pyc
|
| 5 |
+
*.pth
|
| 6 |
+
*.index
|
| 7 |
+
*.mp3
|
| 8 |
+
*.flac
|
| 9 |
+
*.ogg
|
| 10 |
+
*.m4a
|
| 11 |
+
*.bin
|
| 12 |
+
*.wav
|
| 13 |
+
*.txt
|
| 14 |
+
*.zip
|
| 15 |
+
*.png
|
| 16 |
+
*.safetensors
|
| 17 |
+
|
| 18 |
+
logs
|
| 19 |
+
rvc/models
|
| 20 |
+
env
|
| 21 |
+
venv
|
| 22 |
+
.venv
|
rvc/configs/32000.json
ADDED
|
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"train": {
|
| 3 |
+
"log_interval": 200,
|
| 4 |
+
"seed": 1234,
|
| 5 |
+
"learning_rate": 1e-4,
|
| 6 |
+
"betas": [0.8, 0.99],
|
| 7 |
+
"eps": 1e-9,
|
| 8 |
+
"lr_decay": 0.999875,
|
| 9 |
+
"segment_size": 12800,
|
| 10 |
+
"c_mel": 45,
|
| 11 |
+
"c_kl": 1.0
|
| 12 |
+
},
|
| 13 |
+
"data": {
|
| 14 |
+
"max_wav_value": 32768.0,
|
| 15 |
+
"sample_rate": 32000,
|
| 16 |
+
"filter_length": 1024,
|
| 17 |
+
"hop_length": 320,
|
| 18 |
+
"win_length": 1024,
|
| 19 |
+
"n_mel_channels": 80,
|
| 20 |
+
"mel_fmin": 0.0,
|
| 21 |
+
"mel_fmax": null
|
| 22 |
+
},
|
| 23 |
+
"model": {
|
| 24 |
+
"inter_channels": 192,
|
| 25 |
+
"hidden_channels": 192,
|
| 26 |
+
"filter_channels": 768,
|
| 27 |
+
"text_enc_hidden_dim": 768,
|
| 28 |
+
"n_heads": 2,
|
| 29 |
+
"n_layers": 6,
|
| 30 |
+
"kernel_size": 3,
|
| 31 |
+
"p_dropout": 0,
|
| 32 |
+
"resblock": "1",
|
| 33 |
+
"resblock_kernel_sizes": [3,7,11],
|
| 34 |
+
"resblock_dilation_sizes": [[1,3,5], [1,3,5], [1,3,5]],
|
| 35 |
+
"upsample_rates": [10,8,2,2],
|
| 36 |
+
"upsample_initial_channel": 512,
|
| 37 |
+
"upsample_kernel_sizes": [20,16,4,4],
|
| 38 |
+
"use_spectral_norm": false,
|
| 39 |
+
"gin_channels": 256,
|
| 40 |
+
"spk_embed_dim": 109
|
| 41 |
+
}
|
| 42 |
+
}
|
rvc/configs/40000.json
ADDED
|
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"train": {
|
| 3 |
+
"log_interval": 200,
|
| 4 |
+
"seed": 1234,
|
| 5 |
+
"learning_rate": 1e-4,
|
| 6 |
+
"betas": [0.8, 0.99],
|
| 7 |
+
"eps": 1e-9,
|
| 8 |
+
"lr_decay": 0.999875,
|
| 9 |
+
"segment_size": 12800,
|
| 10 |
+
"c_mel": 45,
|
| 11 |
+
"c_kl": 1.0
|
| 12 |
+
},
|
| 13 |
+
"data": {
|
| 14 |
+
"max_wav_value": 32768.0,
|
| 15 |
+
"sample_rate": 40000,
|
| 16 |
+
"filter_length": 2048,
|
| 17 |
+
"hop_length": 400,
|
| 18 |
+
"win_length": 2048,
|
| 19 |
+
"n_mel_channels": 125,
|
| 20 |
+
"mel_fmin": 0.0,
|
| 21 |
+
"mel_fmax": null
|
| 22 |
+
},
|
| 23 |
+
"model": {
|
| 24 |
+
"inter_channels": 192,
|
| 25 |
+
"hidden_channels": 192,
|
| 26 |
+
"filter_channels": 768,
|
| 27 |
+
"text_enc_hidden_dim": 768,
|
| 28 |
+
"n_heads": 2,
|
| 29 |
+
"n_layers": 6,
|
| 30 |
+
"kernel_size": 3,
|
| 31 |
+
"p_dropout": 0,
|
| 32 |
+
"resblock": "1",
|
| 33 |
+
"resblock_kernel_sizes": [3,7,11],
|
| 34 |
+
"resblock_dilation_sizes": [[1,3,5], [1,3,5], [1,3,5]],
|
| 35 |
+
"upsample_rates": [10,10,2,2],
|
| 36 |
+
"upsample_initial_channel": 512,
|
| 37 |
+
"upsample_kernel_sizes": [16,16,4,4],
|
| 38 |
+
"use_spectral_norm": false,
|
| 39 |
+
"gin_channels": 256,
|
| 40 |
+
"spk_embed_dim": 109
|
| 41 |
+
}
|
| 42 |
+
}
|
rvc/configs/44100.json
ADDED
|
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"train": {
|
| 3 |
+
"log_interval": 200,
|
| 4 |
+
"seed": 1234,
|
| 5 |
+
"learning_rate": 0.0001,
|
| 6 |
+
"betas": [0.8, 0.99],
|
| 7 |
+
"eps": 1e-09,
|
| 8 |
+
"lr_decay": 0.999875,
|
| 9 |
+
"segment_size": 15876,
|
| 10 |
+
"c_mel": 45,
|
| 11 |
+
"c_kl": 1.0
|
| 12 |
+
},
|
| 13 |
+
"data": {
|
| 14 |
+
"max_wav_value": 32768.0,
|
| 15 |
+
"sample_rate": 44100,
|
| 16 |
+
"filter_length": 2048,
|
| 17 |
+
"hop_length": 441,
|
| 18 |
+
"win_length": 2048,
|
| 19 |
+
"n_mel_channels": 160,
|
| 20 |
+
"mel_fmin": 0.0,
|
| 21 |
+
"mel_fmax": null
|
| 22 |
+
},
|
| 23 |
+
"model": {
|
| 24 |
+
"inter_channels": 192,
|
| 25 |
+
"hidden_channels": 192,
|
| 26 |
+
"filter_channels": 768,
|
| 27 |
+
"text_enc_hidden_dim": 768,
|
| 28 |
+
"n_heads": 2,
|
| 29 |
+
"n_layers": 6,
|
| 30 |
+
"kernel_size": 3,
|
| 31 |
+
"p_dropout": 0,
|
| 32 |
+
"resblock": "1",
|
| 33 |
+
"resblock_kernel_sizes": [3,7,11],
|
| 34 |
+
"resblock_dilation_sizes": [[1,3,5], [1,3,5], [1,3,5]],
|
| 35 |
+
"upsample_rates": [7,7,3,3],
|
| 36 |
+
"upsample_initial_channel": 512,
|
| 37 |
+
"upsample_kernel_sizes": [14,14,6,6],
|
| 38 |
+
"use_spectral_norm": false,
|
| 39 |
+
"gin_channels": 256,
|
| 40 |
+
"spk_embed_dim": 109
|
| 41 |
+
}
|
| 42 |
+
}
|
rvc/configs/48000.json
ADDED
|
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"train": {
|
| 3 |
+
"log_interval": 200,
|
| 4 |
+
"seed": 1234,
|
| 5 |
+
"learning_rate": 1e-4,
|
| 6 |
+
"betas": [0.8, 0.99],
|
| 7 |
+
"eps": 1e-9,
|
| 8 |
+
"lr_decay": 0.999875,
|
| 9 |
+
"segment_size": 17280,
|
| 10 |
+
"c_mel": 45,
|
| 11 |
+
"c_kl": 1.0
|
| 12 |
+
},
|
| 13 |
+
"data": {
|
| 14 |
+
"max_wav_value": 32768.0,
|
| 15 |
+
"sample_rate": 48000,
|
| 16 |
+
"filter_length": 2048,
|
| 17 |
+
"hop_length": 480,
|
| 18 |
+
"win_length": 2048,
|
| 19 |
+
"n_mel_channels": 128,
|
| 20 |
+
"mel_fmin": 0.0,
|
| 21 |
+
"mel_fmax": null
|
| 22 |
+
},
|
| 23 |
+
"model": {
|
| 24 |
+
"inter_channels": 192,
|
| 25 |
+
"hidden_channels": 192,
|
| 26 |
+
"filter_channels": 768,
|
| 27 |
+
"text_enc_hidden_dim": 768,
|
| 28 |
+
"n_heads": 2,
|
| 29 |
+
"n_layers": 6,
|
| 30 |
+
"kernel_size": 3,
|
| 31 |
+
"p_dropout": 0,
|
| 32 |
+
"resblock": "1",
|
| 33 |
+
"resblock_kernel_sizes": [3,7,11],
|
| 34 |
+
"resblock_dilation_sizes": [[1,3,5], [1,3,5], [1,3,5]],
|
| 35 |
+
"upsample_rates": [12,10,2,2],
|
| 36 |
+
"upsample_initial_channel": 512,
|
| 37 |
+
"upsample_kernel_sizes": [24,20,4,4],
|
| 38 |
+
"use_spectral_norm": false,
|
| 39 |
+
"gin_channels": 256,
|
| 40 |
+
"spk_embed_dim": 109
|
| 41 |
+
}
|
| 42 |
+
}
|
rvc/configs/config.py
ADDED
|
@@ -0,0 +1,99 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import json
|
| 3 |
+
import os
|
| 4 |
+
|
| 5 |
+
version_config_paths = [
|
| 6 |
+
os.path.join("48000.json"),
|
| 7 |
+
os.path.join("40000.json"),
|
| 8 |
+
os.path.join("44100.json"),
|
| 9 |
+
os.path.join("32000.json"),
|
| 10 |
+
]
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
def singleton(cls):
|
| 14 |
+
instances = {}
|
| 15 |
+
|
| 16 |
+
def get_instance(*args, **kwargs):
|
| 17 |
+
if cls not in instances:
|
| 18 |
+
instances[cls] = cls(*args, **kwargs)
|
| 19 |
+
return instances[cls]
|
| 20 |
+
|
| 21 |
+
return get_instance
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
@singleton
|
| 25 |
+
class Config:
|
| 26 |
+
def __init__(self):
|
| 27 |
+
self.device = "cuda:0" if torch.cuda.is_available() else "cpu"
|
| 28 |
+
self.gpu_name = (
|
| 29 |
+
torch.cuda.get_device_name(int(self.device.split(":")[-1]))
|
| 30 |
+
if self.device.startswith("cuda")
|
| 31 |
+
else None
|
| 32 |
+
)
|
| 33 |
+
self.json_config = self.load_config_json()
|
| 34 |
+
self.gpu_mem = None
|
| 35 |
+
self.x_pad, self.x_query, self.x_center, self.x_max = self.device_config()
|
| 36 |
+
|
| 37 |
+
def load_config_json(self):
|
| 38 |
+
configs = {}
|
| 39 |
+
for config_file in version_config_paths:
|
| 40 |
+
config_path = os.path.join("rvc", "configs", config_file)
|
| 41 |
+
with open(config_path, "r") as f:
|
| 42 |
+
configs[config_file] = json.load(f)
|
| 43 |
+
return configs
|
| 44 |
+
|
| 45 |
+
def device_config(self):
|
| 46 |
+
if self.device.startswith("cuda"):
|
| 47 |
+
self.set_cuda_config()
|
| 48 |
+
else:
|
| 49 |
+
self.device = "cpu"
|
| 50 |
+
|
| 51 |
+
# Configuration for 6GB GPU memory
|
| 52 |
+
x_pad, x_query, x_center, x_max = (1, 6, 38, 41)
|
| 53 |
+
if self.gpu_mem is not None and self.gpu_mem <= 4:
|
| 54 |
+
# Configuration for 5GB GPU memory
|
| 55 |
+
x_pad, x_query, x_center, x_max = (1, 5, 30, 32)
|
| 56 |
+
|
| 57 |
+
return x_pad, x_query, x_center, x_max
|
| 58 |
+
|
| 59 |
+
def set_cuda_config(self):
|
| 60 |
+
i_device = int(self.device.split(":")[-1])
|
| 61 |
+
self.gpu_name = torch.cuda.get_device_name(i_device)
|
| 62 |
+
self.gpu_mem = torch.cuda.get_device_properties(i_device).total_memory // (
|
| 63 |
+
1024**3
|
| 64 |
+
)
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
def max_vram_gpu(gpu):
|
| 68 |
+
if torch.cuda.is_available():
|
| 69 |
+
gpu_properties = torch.cuda.get_device_properties(gpu)
|
| 70 |
+
total_memory_gb = round(gpu_properties.total_memory / 1024 / 1024 / 1024)
|
| 71 |
+
return total_memory_gb
|
| 72 |
+
else:
|
| 73 |
+
return "8"
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
def get_gpu_info():
|
| 77 |
+
ngpu = torch.cuda.device_count()
|
| 78 |
+
gpu_infos = []
|
| 79 |
+
if torch.cuda.is_available() or ngpu != 0:
|
| 80 |
+
for i in range(ngpu):
|
| 81 |
+
gpu_name = torch.cuda.get_device_name(i)
|
| 82 |
+
mem = int(
|
| 83 |
+
torch.cuda.get_device_properties(i).total_memory / 1024 / 1024 / 1024
|
| 84 |
+
+ 0.4
|
| 85 |
+
)
|
| 86 |
+
gpu_infos.append(f"{i}: {gpu_name} ({mem} GB)")
|
| 87 |
+
if len(gpu_infos) > 0:
|
| 88 |
+
gpu_info = "\n".join(gpu_infos)
|
| 89 |
+
else:
|
| 90 |
+
gpu_info = "Unfortunately, there is no compatible GPU available to support your training."
|
| 91 |
+
return gpu_info
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
def get_number_of_gpus():
|
| 95 |
+
if torch.cuda.is_available():
|
| 96 |
+
num_gpus = torch.cuda.device_count()
|
| 97 |
+
return "-".join(map(str, range(num_gpus)))
|
| 98 |
+
else:
|
| 99 |
+
return "-"
|
rvc/infer/infer.py
ADDED
|
@@ -0,0 +1,495 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import sys
|
| 3 |
+
import soxr
|
| 4 |
+
import time
|
| 5 |
+
import torch
|
| 6 |
+
import librosa
|
| 7 |
+
import logging
|
| 8 |
+
import traceback
|
| 9 |
+
import numpy as np
|
| 10 |
+
import soundfile as sf
|
| 11 |
+
import noisereduce as nr
|
| 12 |
+
from pedalboard import (
|
| 13 |
+
Pedalboard,
|
| 14 |
+
Chorus,
|
| 15 |
+
Distortion,
|
| 16 |
+
Reverb,
|
| 17 |
+
PitchShift,
|
| 18 |
+
Limiter,
|
| 19 |
+
Gain,
|
| 20 |
+
Bitcrush,
|
| 21 |
+
Clipping,
|
| 22 |
+
Compressor,
|
| 23 |
+
Delay,
|
| 24 |
+
)
|
| 25 |
+
|
| 26 |
+
now_dir = os.getcwd()
|
| 27 |
+
sys.path.append(now_dir)
|
| 28 |
+
|
| 29 |
+
from rvc.infer.pipeline import Pipeline as VC
|
| 30 |
+
from rvc.lib.utils import load_audio_infer, load_embedding
|
| 31 |
+
from rvc.lib.tools.split_audio import process_audio, merge_audio
|
| 32 |
+
from rvc.lib.algorithm.synthesizers import Synthesizer
|
| 33 |
+
from rvc.configs.config import Config
|
| 34 |
+
|
| 35 |
+
logging.getLogger("httpx").setLevel(logging.WARNING)
|
| 36 |
+
logging.getLogger("httpcore").setLevel(logging.WARNING)
|
| 37 |
+
logging.getLogger("faiss").setLevel(logging.WARNING)
|
| 38 |
+
logging.getLogger("faiss.loader").setLevel(logging.WARNING)
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
class VoiceConverter:
|
| 42 |
+
"""
|
| 43 |
+
A class for performing voice conversion using the Retrieval-Based Voice Conversion (RVC) method.
|
| 44 |
+
"""
|
| 45 |
+
|
| 46 |
+
def __init__(self):
|
| 47 |
+
"""
|
| 48 |
+
Initializes the VoiceConverter with default configuration, and sets up models and parameters.
|
| 49 |
+
"""
|
| 50 |
+
self.config = Config() # Load configuration
|
| 51 |
+
self.hubert_model = (
|
| 52 |
+
None # Initialize the Hubert model (for embedding extraction)
|
| 53 |
+
)
|
| 54 |
+
self.last_embedder_model = None # Last used embedder model
|
| 55 |
+
self.tgt_sr = None # Target sampling rate for the output audio
|
| 56 |
+
self.net_g = None # Generator network for voice conversion
|
| 57 |
+
self.vc = None # Voice conversion pipeline instance
|
| 58 |
+
self.cpt = None # Checkpoint for loading model weights
|
| 59 |
+
self.version = None # Model version
|
| 60 |
+
self.n_spk = None # Number of speakers in the model
|
| 61 |
+
self.use_f0 = None # Whether the model uses F0
|
| 62 |
+
self.loaded_model = None
|
| 63 |
+
|
| 64 |
+
def load_hubert(self, embedder_model: str, embedder_model_custom: str = None):
|
| 65 |
+
"""
|
| 66 |
+
Loads the HuBERT model for speaker embedding extraction.
|
| 67 |
+
|
| 68 |
+
Args:
|
| 69 |
+
embedder_model (str): Path to the pre-trained HuBERT model.
|
| 70 |
+
embedder_model_custom (str): Path to the custom HuBERT model.
|
| 71 |
+
"""
|
| 72 |
+
self.hubert_model = load_embedding(embedder_model, embedder_model_custom)
|
| 73 |
+
self.hubert_model = self.hubert_model.to(self.config.device).float()
|
| 74 |
+
self.hubert_model.eval()
|
| 75 |
+
|
| 76 |
+
@staticmethod
|
| 77 |
+
def remove_audio_noise(data, sr, reduction_strength=0.7):
|
| 78 |
+
"""
|
| 79 |
+
Removes noise from an audio file using the NoiseReduce library.
|
| 80 |
+
|
| 81 |
+
Args:
|
| 82 |
+
data (numpy.ndarray): The audio data as a NumPy array.
|
| 83 |
+
sr (int): The sample rate of the audio data.
|
| 84 |
+
reduction_strength (float): Strength of the noise reduction. Default is 0.7.
|
| 85 |
+
"""
|
| 86 |
+
try:
|
| 87 |
+
reduced_noise = nr.reduce_noise(
|
| 88 |
+
y=data, sr=sr, prop_decrease=reduction_strength
|
| 89 |
+
)
|
| 90 |
+
return reduced_noise
|
| 91 |
+
except Exception as error:
|
| 92 |
+
print(f"An error occurred removing audio noise: {error}")
|
| 93 |
+
return None
|
| 94 |
+
|
| 95 |
+
@staticmethod
|
| 96 |
+
def convert_audio_format(input_path, output_path, output_format):
|
| 97 |
+
"""
|
| 98 |
+
Converts an audio file to a specified output format.
|
| 99 |
+
|
| 100 |
+
Args:
|
| 101 |
+
input_path (str): Path to the input audio file.
|
| 102 |
+
output_path (str): Path to the output audio file.
|
| 103 |
+
output_format (str): Desired audio format (e.g., "WAV", "MP3").
|
| 104 |
+
"""
|
| 105 |
+
try:
|
| 106 |
+
if output_format != "WAV":
|
| 107 |
+
print(f"Saving audio as {output_format}...")
|
| 108 |
+
audio, sample_rate = librosa.load(input_path, sr=None)
|
| 109 |
+
common_sample_rates = [
|
| 110 |
+
8000,
|
| 111 |
+
11025,
|
| 112 |
+
12000,
|
| 113 |
+
16000,
|
| 114 |
+
22050,
|
| 115 |
+
24000,
|
| 116 |
+
32000,
|
| 117 |
+
44100,
|
| 118 |
+
48000,
|
| 119 |
+
]
|
| 120 |
+
target_sr = min(common_sample_rates, key=lambda x: abs(x - sample_rate))
|
| 121 |
+
audio = librosa.resample(
|
| 122 |
+
audio, orig_sr=sample_rate, target_sr=target_sr, res_type="soxr_vhq"
|
| 123 |
+
)
|
| 124 |
+
sf.write(output_path, audio, target_sr, format=output_format.lower())
|
| 125 |
+
return output_path
|
| 126 |
+
except Exception as error:
|
| 127 |
+
print(f"An error occurred converting the audio format: {error}")
|
| 128 |
+
|
| 129 |
+
@staticmethod
|
| 130 |
+
def post_process_audio(
|
| 131 |
+
audio_input,
|
| 132 |
+
sample_rate,
|
| 133 |
+
**kwargs,
|
| 134 |
+
):
|
| 135 |
+
board = Pedalboard()
|
| 136 |
+
if kwargs.get("reverb", False):
|
| 137 |
+
reverb = Reverb(
|
| 138 |
+
room_size=kwargs.get("reverb_room_size", 0.5),
|
| 139 |
+
damping=kwargs.get("reverb_damping", 0.5),
|
| 140 |
+
wet_level=kwargs.get("reverb_wet_level", 0.33),
|
| 141 |
+
dry_level=kwargs.get("reverb_dry_level", 0.4),
|
| 142 |
+
width=kwargs.get("reverb_width", 1.0),
|
| 143 |
+
freeze_mode=kwargs.get("reverb_freeze_mode", 0),
|
| 144 |
+
)
|
| 145 |
+
board.append(reverb)
|
| 146 |
+
if kwargs.get("pitch_shift", False):
|
| 147 |
+
pitch_shift = PitchShift(semitones=kwargs.get("pitch_shift_semitones", 0))
|
| 148 |
+
board.append(pitch_shift)
|
| 149 |
+
if kwargs.get("limiter", False):
|
| 150 |
+
limiter = Limiter(
|
| 151 |
+
threshold_db=kwargs.get("limiter_threshold", -6),
|
| 152 |
+
release_ms=kwargs.get("limiter_release", 0.05),
|
| 153 |
+
)
|
| 154 |
+
board.append(limiter)
|
| 155 |
+
if kwargs.get("gain", False):
|
| 156 |
+
gain = Gain(gain_db=kwargs.get("gain_db", 0))
|
| 157 |
+
board.append(gain)
|
| 158 |
+
if kwargs.get("distortion", False):
|
| 159 |
+
distortion = Distortion(drive_db=kwargs.get("distortion_gain", 25))
|
| 160 |
+
board.append(distortion)
|
| 161 |
+
if kwargs.get("chorus", False):
|
| 162 |
+
chorus = Chorus(
|
| 163 |
+
rate_hz=kwargs.get("chorus_rate", 1.0),
|
| 164 |
+
depth=kwargs.get("chorus_depth", 0.25),
|
| 165 |
+
centre_delay_ms=kwargs.get("chorus_delay", 7),
|
| 166 |
+
feedback=kwargs.get("chorus_feedback", 0.0),
|
| 167 |
+
mix=kwargs.get("chorus_mix", 0.5),
|
| 168 |
+
)
|
| 169 |
+
board.append(chorus)
|
| 170 |
+
if kwargs.get("bitcrush", False):
|
| 171 |
+
bitcrush = Bitcrush(bit_depth=kwargs.get("bitcrush_bit_depth", 8))
|
| 172 |
+
board.append(bitcrush)
|
| 173 |
+
if kwargs.get("clipping", False):
|
| 174 |
+
clipping = Clipping(threshold_db=kwargs.get("clipping_threshold", 0))
|
| 175 |
+
board.append(clipping)
|
| 176 |
+
if kwargs.get("compressor", False):
|
| 177 |
+
compressor = Compressor(
|
| 178 |
+
threshold_db=kwargs.get("compressor_threshold", 0),
|
| 179 |
+
ratio=kwargs.get("compressor_ratio", 1),
|
| 180 |
+
attack_ms=kwargs.get("compressor_attack", 1.0),
|
| 181 |
+
release_ms=kwargs.get("compressor_release", 100),
|
| 182 |
+
)
|
| 183 |
+
board.append(compressor)
|
| 184 |
+
if kwargs.get("delay", False):
|
| 185 |
+
delay = Delay(
|
| 186 |
+
delay_seconds=kwargs.get("delay_seconds", 0.5),
|
| 187 |
+
feedback=kwargs.get("delay_feedback", 0.0),
|
| 188 |
+
mix=kwargs.get("delay_mix", 0.5),
|
| 189 |
+
)
|
| 190 |
+
board.append(delay)
|
| 191 |
+
return board(audio_input, sample_rate)
|
| 192 |
+
|
| 193 |
+
def convert_audio(
|
| 194 |
+
self,
|
| 195 |
+
audio_input_path: str,
|
| 196 |
+
audio_output_path: str,
|
| 197 |
+
model_path: str,
|
| 198 |
+
index_path: str,
|
| 199 |
+
pitch: int = 0,
|
| 200 |
+
f0_file: str = None,
|
| 201 |
+
f0_method: str = "rmvpe",
|
| 202 |
+
index_rate: float = 0.75,
|
| 203 |
+
volume_envelope: float = 1,
|
| 204 |
+
protect: float = 0.5,
|
| 205 |
+
hop_length: int = 128,
|
| 206 |
+
split_audio: bool = False,
|
| 207 |
+
f0_autotune: bool = False,
|
| 208 |
+
f0_autotune_strength: float = 1,
|
| 209 |
+
filter_radius: int = 3,
|
| 210 |
+
embedder_model: str = "contentvec",
|
| 211 |
+
embedder_model_custom: str = None,
|
| 212 |
+
clean_audio: bool = False,
|
| 213 |
+
clean_strength: float = 0.5,
|
| 214 |
+
export_format: str = "WAV",
|
| 215 |
+
post_process: bool = False,
|
| 216 |
+
resample_sr: int = 0,
|
| 217 |
+
sid: int = 0,
|
| 218 |
+
**kwargs,
|
| 219 |
+
):
|
| 220 |
+
"""
|
| 221 |
+
Performs voice conversion on the input audio.
|
| 222 |
+
|
| 223 |
+
Args:
|
| 224 |
+
pitch (int): Key for F0 up-sampling.
|
| 225 |
+
filter_radius (int): Radius for filtering.
|
| 226 |
+
index_rate (float): Rate for index matching.
|
| 227 |
+
volume_envelope (int): RMS mix rate.
|
| 228 |
+
protect (float): Protection rate for certain audio segments.
|
| 229 |
+
hop_length (int): Hop length for audio processing.
|
| 230 |
+
f0_method (str): Method for F0 extraction.
|
| 231 |
+
audio_input_path (str): Path to the input audio file.
|
| 232 |
+
audio_output_path (str): Path to the output audio file.
|
| 233 |
+
model_path (str): Path to the voice conversion model.
|
| 234 |
+
index_path (str): Path to the index file.
|
| 235 |
+
split_audio (bool): Whether to split the audio for processing.
|
| 236 |
+
f0_autotune (bool): Whether to use F0 autotune.
|
| 237 |
+
clean_audio (bool): Whether to clean the audio.
|
| 238 |
+
clean_strength (float): Strength of the audio cleaning.
|
| 239 |
+
export_format (str): Format for exporting the audio.
|
| 240 |
+
f0_file (str): Path to the F0 file.
|
| 241 |
+
embedder_model (str): Path to the embedder model.
|
| 242 |
+
embedder_model_custom (str): Path to the custom embedder model.
|
| 243 |
+
resample_sr (int, optional): Resample sampling rate. Default is 0.
|
| 244 |
+
sid (int, optional): Speaker ID. Default is 0.
|
| 245 |
+
**kwargs: Additional keyword arguments.
|
| 246 |
+
"""
|
| 247 |
+
if not model_path:
|
| 248 |
+
print("No model path provided. Aborting conversion.")
|
| 249 |
+
return
|
| 250 |
+
|
| 251 |
+
self.get_vc(model_path, sid)
|
| 252 |
+
|
| 253 |
+
try:
|
| 254 |
+
start_time = time.time()
|
| 255 |
+
print(f"Converting audio '{audio_input_path}'...")
|
| 256 |
+
|
| 257 |
+
audio = load_audio_infer(
|
| 258 |
+
audio_input_path,
|
| 259 |
+
16000,
|
| 260 |
+
**kwargs,
|
| 261 |
+
)
|
| 262 |
+
audio_max = np.abs(audio).max() / 0.95
|
| 263 |
+
|
| 264 |
+
if audio_max > 1:
|
| 265 |
+
audio /= audio_max
|
| 266 |
+
|
| 267 |
+
if not self.hubert_model or embedder_model != self.last_embedder_model:
|
| 268 |
+
self.load_hubert(embedder_model, embedder_model_custom)
|
| 269 |
+
self.last_embedder_model = embedder_model
|
| 270 |
+
|
| 271 |
+
file_index = (
|
| 272 |
+
index_path.strip()
|
| 273 |
+
.strip('"')
|
| 274 |
+
.strip("\n")
|
| 275 |
+
.strip('"')
|
| 276 |
+
.strip()
|
| 277 |
+
.replace("trained", "added")
|
| 278 |
+
)
|
| 279 |
+
|
| 280 |
+
if self.tgt_sr != resample_sr >= 16000:
|
| 281 |
+
self.tgt_sr = resample_sr
|
| 282 |
+
|
| 283 |
+
if split_audio:
|
| 284 |
+
chunks, intervals = process_audio(audio, 16000)
|
| 285 |
+
print(f"Audio split into {len(chunks)} chunks for processing.")
|
| 286 |
+
else:
|
| 287 |
+
chunks = []
|
| 288 |
+
chunks.append(audio)
|
| 289 |
+
|
| 290 |
+
converted_chunks = []
|
| 291 |
+
for c in chunks:
|
| 292 |
+
audio_opt = self.vc.pipeline(
|
| 293 |
+
model=self.hubert_model,
|
| 294 |
+
net_g=self.net_g,
|
| 295 |
+
sid=sid,
|
| 296 |
+
audio=c,
|
| 297 |
+
pitch=pitch,
|
| 298 |
+
f0_method=f0_method,
|
| 299 |
+
file_index=file_index,
|
| 300 |
+
index_rate=index_rate,
|
| 301 |
+
pitch_guidance=self.use_f0,
|
| 302 |
+
filter_radius=filter_radius,
|
| 303 |
+
volume_envelope=volume_envelope,
|
| 304 |
+
version=self.version,
|
| 305 |
+
protect=protect,
|
| 306 |
+
hop_length=hop_length,
|
| 307 |
+
f0_autotune=f0_autotune,
|
| 308 |
+
f0_autotune_strength=f0_autotune_strength,
|
| 309 |
+
f0_file=f0_file,
|
| 310 |
+
)
|
| 311 |
+
converted_chunks.append(audio_opt)
|
| 312 |
+
if split_audio:
|
| 313 |
+
print(f"Converted audio chunk {len(converted_chunks)}")
|
| 314 |
+
|
| 315 |
+
if split_audio:
|
| 316 |
+
audio_opt = merge_audio(
|
| 317 |
+
chunks, converted_chunks, intervals, 16000, self.tgt_sr
|
| 318 |
+
)
|
| 319 |
+
else:
|
| 320 |
+
audio_opt = converted_chunks[0]
|
| 321 |
+
|
| 322 |
+
if clean_audio:
|
| 323 |
+
cleaned_audio = self.remove_audio_noise(
|
| 324 |
+
audio_opt, self.tgt_sr, clean_strength
|
| 325 |
+
)
|
| 326 |
+
if cleaned_audio is not None:
|
| 327 |
+
audio_opt = cleaned_audio
|
| 328 |
+
|
| 329 |
+
if post_process:
|
| 330 |
+
audio_opt = self.post_process_audio(
|
| 331 |
+
audio_input=audio_opt,
|
| 332 |
+
sample_rate=self.tgt_sr,
|
| 333 |
+
**kwargs,
|
| 334 |
+
)
|
| 335 |
+
|
| 336 |
+
sf.write(audio_output_path, audio_opt, self.tgt_sr, format="WAV")
|
| 337 |
+
output_path_format = audio_output_path.replace(
|
| 338 |
+
".wav", f".{export_format.lower()}"
|
| 339 |
+
)
|
| 340 |
+
audio_output_path = self.convert_audio_format(
|
| 341 |
+
audio_output_path, output_path_format, export_format
|
| 342 |
+
)
|
| 343 |
+
|
| 344 |
+
elapsed_time = time.time() - start_time
|
| 345 |
+
print(
|
| 346 |
+
f"Conversion completed at '{audio_output_path}' in {elapsed_time:.2f} seconds."
|
| 347 |
+
)
|
| 348 |
+
except Exception as error:
|
| 349 |
+
print(f"An error occurred during audio conversion: {error}")
|
| 350 |
+
print(traceback.format_exc())
|
| 351 |
+
|
| 352 |
+
def convert_audio_batch(
|
| 353 |
+
self,
|
| 354 |
+
audio_input_paths: str,
|
| 355 |
+
audio_output_path: str,
|
| 356 |
+
**kwargs,
|
| 357 |
+
):
|
| 358 |
+
"""
|
| 359 |
+
Performs voice conversion on a batch of input audio files.
|
| 360 |
+
|
| 361 |
+
Args:
|
| 362 |
+
audio_input_paths (str): List of paths to the input audio files.
|
| 363 |
+
audio_output_path (str): Path to the output audio file.
|
| 364 |
+
resample_sr (int, optional): Resample sampling rate. Default is 0.
|
| 365 |
+
sid (int, optional): Speaker ID. Default is 0.
|
| 366 |
+
**kwargs: Additional keyword arguments.
|
| 367 |
+
"""
|
| 368 |
+
pid = os.getpid()
|
| 369 |
+
try:
|
| 370 |
+
with open(
|
| 371 |
+
os.path.join(now_dir, "assets", "infer_pid.txt"), "w"
|
| 372 |
+
) as pid_file:
|
| 373 |
+
pid_file.write(str(pid))
|
| 374 |
+
start_time = time.time()
|
| 375 |
+
print(f"Converting audio batch '{audio_input_paths}'...")
|
| 376 |
+
audio_files = [
|
| 377 |
+
f
|
| 378 |
+
for f in os.listdir(audio_input_paths)
|
| 379 |
+
if f.endswith(
|
| 380 |
+
(
|
| 381 |
+
"wav",
|
| 382 |
+
"mp3",
|
| 383 |
+
"flac",
|
| 384 |
+
"ogg",
|
| 385 |
+
"opus",
|
| 386 |
+
"m4a",
|
| 387 |
+
"mp4",
|
| 388 |
+
"aac",
|
| 389 |
+
"alac",
|
| 390 |
+
"wma",
|
| 391 |
+
"aiff",
|
| 392 |
+
"webm",
|
| 393 |
+
"ac3",
|
| 394 |
+
)
|
| 395 |
+
)
|
| 396 |
+
]
|
| 397 |
+
print(f"Detected {len(audio_files)} audio files for inference.")
|
| 398 |
+
for a in audio_files:
|
| 399 |
+
new_input = os.path.join(audio_input_paths, a)
|
| 400 |
+
new_output = os.path.splitext(a)[0] + "_output.wav"
|
| 401 |
+
new_output = os.path.join(audio_output_path, new_output)
|
| 402 |
+
if os.path.exists(new_output):
|
| 403 |
+
continue
|
| 404 |
+
self.convert_audio(
|
| 405 |
+
audio_input_path=new_input,
|
| 406 |
+
audio_output_path=new_output,
|
| 407 |
+
**kwargs,
|
| 408 |
+
)
|
| 409 |
+
print(f"Conversion completed at '{audio_input_paths}'.")
|
| 410 |
+
elapsed_time = time.time() - start_time
|
| 411 |
+
print(f"Batch conversion completed in {elapsed_time:.2f} seconds.")
|
| 412 |
+
except Exception as error:
|
| 413 |
+
print(f"An error occurred during audio batch conversion: {error}")
|
| 414 |
+
print(traceback.format_exc())
|
| 415 |
+
finally:
|
| 416 |
+
os.remove(os.path.join(now_dir, "assets", "infer_pid.txt"))
|
| 417 |
+
|
| 418 |
+
def get_vc(self, weight_root, sid):
|
| 419 |
+
"""
|
| 420 |
+
Loads the voice conversion model and sets up the pipeline.
|
| 421 |
+
|
| 422 |
+
Args:
|
| 423 |
+
weight_root (str): Path to the model weights.
|
| 424 |
+
sid (int): Speaker ID.
|
| 425 |
+
"""
|
| 426 |
+
if sid == "" or sid == []:
|
| 427 |
+
self.cleanup_model()
|
| 428 |
+
if torch.cuda.is_available():
|
| 429 |
+
torch.cuda.empty_cache()
|
| 430 |
+
|
| 431 |
+
if not self.loaded_model or self.loaded_model != weight_root:
|
| 432 |
+
self.load_model(weight_root)
|
| 433 |
+
if self.cpt is not None:
|
| 434 |
+
self.setup_network()
|
| 435 |
+
self.setup_vc_instance()
|
| 436 |
+
self.loaded_model = weight_root
|
| 437 |
+
|
| 438 |
+
def cleanup_model(self):
|
| 439 |
+
"""
|
| 440 |
+
Cleans up the model and releases resources.
|
| 441 |
+
"""
|
| 442 |
+
if self.hubert_model is not None:
|
| 443 |
+
del self.net_g, self.n_spk, self.vc, self.hubert_model, self.tgt_sr
|
| 444 |
+
self.hubert_model = self.net_g = self.n_spk = self.vc = self.tgt_sr = None
|
| 445 |
+
if torch.cuda.is_available():
|
| 446 |
+
torch.cuda.empty_cache()
|
| 447 |
+
|
| 448 |
+
del self.net_g, self.cpt
|
| 449 |
+
if torch.cuda.is_available():
|
| 450 |
+
torch.cuda.empty_cache()
|
| 451 |
+
self.cpt = None
|
| 452 |
+
|
| 453 |
+
def load_model(self, weight_root):
|
| 454 |
+
"""
|
| 455 |
+
Loads the model weights from the specified path.
|
| 456 |
+
|
| 457 |
+
Args:
|
| 458 |
+
weight_root (str): Path to the model weights.
|
| 459 |
+
"""
|
| 460 |
+
self.cpt = (
|
| 461 |
+
torch.load(weight_root, map_location="cpu", weights_only=True)
|
| 462 |
+
if os.path.isfile(weight_root)
|
| 463 |
+
else None
|
| 464 |
+
)
|
| 465 |
+
|
| 466 |
+
def setup_network(self):
|
| 467 |
+
"""
|
| 468 |
+
Sets up the network configuration based on the loaded checkpoint.
|
| 469 |
+
"""
|
| 470 |
+
if self.cpt is not None:
|
| 471 |
+
self.tgt_sr = self.cpt["config"][-1]
|
| 472 |
+
self.cpt["config"][-3] = self.cpt["weight"]["emb_g.weight"].shape[0]
|
| 473 |
+
self.use_f0 = self.cpt.get("f0", 1)
|
| 474 |
+
|
| 475 |
+
self.version = self.cpt.get("version", "v1")
|
| 476 |
+
self.text_enc_hidden_dim = 768 if self.version == "v2" else 256
|
| 477 |
+
self.vocoder = self.cpt.get("vocoder", "HiFi-GAN")
|
| 478 |
+
self.net_g = Synthesizer(
|
| 479 |
+
*self.cpt["config"],
|
| 480 |
+
use_f0=self.use_f0,
|
| 481 |
+
text_enc_hidden_dim=self.text_enc_hidden_dim,
|
| 482 |
+
vocoder=self.vocoder,
|
| 483 |
+
)
|
| 484 |
+
del self.net_g.enc_q
|
| 485 |
+
self.net_g.load_state_dict(self.cpt["weight"], strict=False)
|
| 486 |
+
self.net_g = self.net_g.to(self.config.device).float()
|
| 487 |
+
self.net_g.eval()
|
| 488 |
+
|
| 489 |
+
def setup_vc_instance(self):
|
| 490 |
+
"""
|
| 491 |
+
Sets up the voice conversion pipeline instance based on the target sampling rate and configuration.
|
| 492 |
+
"""
|
| 493 |
+
if self.cpt is not None:
|
| 494 |
+
self.vc = VC(self.tgt_sr, self.config)
|
| 495 |
+
self.n_spk = self.cpt["config"][-3]
|
rvc/infer/pipeline.py
ADDED
|
@@ -0,0 +1,690 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import gc
|
| 3 |
+
import re
|
| 4 |
+
import sys
|
| 5 |
+
import torch
|
| 6 |
+
import torch.nn.functional as F
|
| 7 |
+
import torchcrepe
|
| 8 |
+
import faiss
|
| 9 |
+
import librosa
|
| 10 |
+
import numpy as np
|
| 11 |
+
from scipy import signal
|
| 12 |
+
from torch import Tensor
|
| 13 |
+
|
| 14 |
+
now_dir = os.getcwd()
|
| 15 |
+
sys.path.append(now_dir)
|
| 16 |
+
|
| 17 |
+
from rvc.lib.predictors.RMVPE import RMVPE0Predictor
|
| 18 |
+
from rvc.lib.predictors.FCPE import FCPEF0Predictor
|
| 19 |
+
|
| 20 |
+
import logging
|
| 21 |
+
|
| 22 |
+
logging.getLogger("faiss").setLevel(logging.WARNING)
|
| 23 |
+
|
| 24 |
+
FILTER_ORDER = 5
|
| 25 |
+
CUTOFF_FREQUENCY = 48 # Hz
|
| 26 |
+
SAMPLE_RATE = 16000 # Hz
|
| 27 |
+
bh, ah = signal.butter(
|
| 28 |
+
N=FILTER_ORDER, Wn=CUTOFF_FREQUENCY, btype="high", fs=SAMPLE_RATE
|
| 29 |
+
)
|
| 30 |
+
|
| 31 |
+
input_audio_path2wav = {}
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
class AudioProcessor:
|
| 35 |
+
"""
|
| 36 |
+
A class for processing audio signals, specifically for adjusting RMS levels.
|
| 37 |
+
"""
|
| 38 |
+
|
| 39 |
+
def change_rms(
|
| 40 |
+
source_audio: np.ndarray,
|
| 41 |
+
source_rate: int,
|
| 42 |
+
target_audio: np.ndarray,
|
| 43 |
+
target_rate: int,
|
| 44 |
+
rate: float,
|
| 45 |
+
):
|
| 46 |
+
"""
|
| 47 |
+
Adjust the RMS level of target_audio to match the RMS of source_audio, with a given blending rate.
|
| 48 |
+
|
| 49 |
+
Args:
|
| 50 |
+
source_audio: The source audio signal as a NumPy array.
|
| 51 |
+
source_rate: The sampling rate of the source audio.
|
| 52 |
+
target_audio: The target audio signal to adjust.
|
| 53 |
+
target_rate: The sampling rate of the target audio.
|
| 54 |
+
rate: The blending rate between the source and target RMS levels.
|
| 55 |
+
"""
|
| 56 |
+
# Calculate RMS of both audio data
|
| 57 |
+
rms1 = librosa.feature.rms(
|
| 58 |
+
y=source_audio,
|
| 59 |
+
frame_length=source_rate // 2 * 2,
|
| 60 |
+
hop_length=source_rate // 2,
|
| 61 |
+
)
|
| 62 |
+
rms2 = librosa.feature.rms(
|
| 63 |
+
y=target_audio,
|
| 64 |
+
frame_length=target_rate // 2 * 2,
|
| 65 |
+
hop_length=target_rate // 2,
|
| 66 |
+
)
|
| 67 |
+
|
| 68 |
+
# Interpolate RMS to match target audio length
|
| 69 |
+
rms1 = F.interpolate(
|
| 70 |
+
torch.from_numpy(rms1).float().unsqueeze(0),
|
| 71 |
+
size=target_audio.shape[0],
|
| 72 |
+
mode="linear",
|
| 73 |
+
).squeeze()
|
| 74 |
+
rms2 = F.interpolate(
|
| 75 |
+
torch.from_numpy(rms2).float().unsqueeze(0),
|
| 76 |
+
size=target_audio.shape[0],
|
| 77 |
+
mode="linear",
|
| 78 |
+
).squeeze()
|
| 79 |
+
rms2 = torch.maximum(rms2, torch.zeros_like(rms2) + 1e-6)
|
| 80 |
+
|
| 81 |
+
# Adjust target audio RMS based on the source audio RMS
|
| 82 |
+
adjusted_audio = (
|
| 83 |
+
target_audio
|
| 84 |
+
* (torch.pow(rms1, 1 - rate) * torch.pow(rms2, rate - 1)).numpy()
|
| 85 |
+
)
|
| 86 |
+
return adjusted_audio
|
| 87 |
+
|
| 88 |
+
|
| 89 |
+
class Autotune:
|
| 90 |
+
"""
|
| 91 |
+
A class for applying autotune to a given fundamental frequency (F0) contour.
|
| 92 |
+
"""
|
| 93 |
+
|
| 94 |
+
def __init__(self, ref_freqs):
|
| 95 |
+
"""
|
| 96 |
+
Initializes the Autotune class with a set of reference frequencies.
|
| 97 |
+
|
| 98 |
+
Args:
|
| 99 |
+
ref_freqs: A list of reference frequencies representing musical notes.
|
| 100 |
+
"""
|
| 101 |
+
self.ref_freqs = ref_freqs
|
| 102 |
+
self.note_dict = self.ref_freqs # No interpolation needed
|
| 103 |
+
|
| 104 |
+
def autotune_f0(self, f0, f0_autotune_strength):
|
| 105 |
+
"""
|
| 106 |
+
Autotunes a given F0 contour by snapping each frequency to the closest reference frequency.
|
| 107 |
+
|
| 108 |
+
Args:
|
| 109 |
+
f0: The input F0 contour as a NumPy array.
|
| 110 |
+
"""
|
| 111 |
+
autotuned_f0 = np.zeros_like(f0)
|
| 112 |
+
for i, freq in enumerate(f0):
|
| 113 |
+
closest_note = min(self.note_dict, key=lambda x: abs(x - freq))
|
| 114 |
+
autotuned_f0[i] = freq + (closest_note - freq) * f0_autotune_strength
|
| 115 |
+
return autotuned_f0
|
| 116 |
+
|
| 117 |
+
|
| 118 |
+
class Pipeline:
|
| 119 |
+
"""
|
| 120 |
+
The main pipeline class for performing voice conversion, including preprocessing, F0 estimation,
|
| 121 |
+
voice conversion using a model, and post-processing.
|
| 122 |
+
"""
|
| 123 |
+
|
| 124 |
+
def __init__(self, tgt_sr, config):
|
| 125 |
+
"""
|
| 126 |
+
Initializes the Pipeline class with target sampling rate and configuration parameters.
|
| 127 |
+
|
| 128 |
+
Args:
|
| 129 |
+
tgt_sr: The target sampling rate for the output audio.
|
| 130 |
+
config: A configuration object containing various parameters for the pipeline.
|
| 131 |
+
"""
|
| 132 |
+
self.x_pad = config.x_pad
|
| 133 |
+
self.x_query = config.x_query
|
| 134 |
+
self.x_center = config.x_center
|
| 135 |
+
self.x_max = config.x_max
|
| 136 |
+
self.sample_rate = 16000
|
| 137 |
+
self.window = 160
|
| 138 |
+
self.t_pad = self.sample_rate * self.x_pad
|
| 139 |
+
self.t_pad_tgt = tgt_sr * self.x_pad
|
| 140 |
+
self.t_pad2 = self.t_pad * 2
|
| 141 |
+
self.t_query = self.sample_rate * self.x_query
|
| 142 |
+
self.t_center = self.sample_rate * self.x_center
|
| 143 |
+
self.t_max = self.sample_rate * self.x_max
|
| 144 |
+
self.time_step = self.window / self.sample_rate * 1000
|
| 145 |
+
self.f0_min = 50
|
| 146 |
+
self.f0_max = 1100
|
| 147 |
+
self.f0_mel_min = 1127 * np.log(1 + self.f0_min / 700)
|
| 148 |
+
self.f0_mel_max = 1127 * np.log(1 + self.f0_max / 700)
|
| 149 |
+
self.device = config.device
|
| 150 |
+
self.ref_freqs = [
|
| 151 |
+
49.00, # G1
|
| 152 |
+
51.91, # G#1 / Ab1
|
| 153 |
+
55.00, # A1
|
| 154 |
+
58.27, # A#1 / Bb1
|
| 155 |
+
61.74, # B1
|
| 156 |
+
65.41, # C2
|
| 157 |
+
69.30, # C#2 / Db2
|
| 158 |
+
73.42, # D2
|
| 159 |
+
77.78, # D#2 / Eb2
|
| 160 |
+
82.41, # E2
|
| 161 |
+
87.31, # F2
|
| 162 |
+
92.50, # F#2 / Gb2
|
| 163 |
+
98.00, # G2
|
| 164 |
+
103.83, # G#2 / Ab2
|
| 165 |
+
110.00, # A2
|
| 166 |
+
116.54, # A#2 / Bb2
|
| 167 |
+
123.47, # B2
|
| 168 |
+
130.81, # C3
|
| 169 |
+
138.59, # C#3 / Db3
|
| 170 |
+
146.83, # D3
|
| 171 |
+
155.56, # D#3 / Eb3
|
| 172 |
+
164.81, # E3
|
| 173 |
+
174.61, # F3
|
| 174 |
+
185.00, # F#3 / Gb3
|
| 175 |
+
196.00, # G3
|
| 176 |
+
207.65, # G#3 / Ab3
|
| 177 |
+
220.00, # A3
|
| 178 |
+
233.08, # A#3 / Bb3
|
| 179 |
+
246.94, # B3
|
| 180 |
+
261.63, # C4
|
| 181 |
+
277.18, # C#4 / Db4
|
| 182 |
+
293.66, # D4
|
| 183 |
+
311.13, # D#4 / Eb4
|
| 184 |
+
329.63, # E4
|
| 185 |
+
349.23, # F4
|
| 186 |
+
369.99, # F#4 / Gb4
|
| 187 |
+
392.00, # G4
|
| 188 |
+
415.30, # G#4 / Ab4
|
| 189 |
+
440.00, # A4
|
| 190 |
+
466.16, # A#4 / Bb4
|
| 191 |
+
493.88, # B4
|
| 192 |
+
523.25, # C5
|
| 193 |
+
554.37, # C#5 / Db5
|
| 194 |
+
587.33, # D5
|
| 195 |
+
622.25, # D#5 / Eb5
|
| 196 |
+
659.25, # E5
|
| 197 |
+
698.46, # F5
|
| 198 |
+
739.99, # F#5 / Gb5
|
| 199 |
+
783.99, # G5
|
| 200 |
+
830.61, # G#5 / Ab5
|
| 201 |
+
880.00, # A5
|
| 202 |
+
932.33, # A#5 / Bb5
|
| 203 |
+
987.77, # B5
|
| 204 |
+
1046.50, # C6
|
| 205 |
+
]
|
| 206 |
+
self.autotune = Autotune(self.ref_freqs)
|
| 207 |
+
self.note_dict = self.autotune.note_dict
|
| 208 |
+
self.model_rmvpe = RMVPE0Predictor(
|
| 209 |
+
os.path.join("rvc", "models", "predictors", "rmvpe.pt"),
|
| 210 |
+
device=self.device,
|
| 211 |
+
)
|
| 212 |
+
|
| 213 |
+
def get_f0_crepe(
|
| 214 |
+
self,
|
| 215 |
+
x,
|
| 216 |
+
f0_min,
|
| 217 |
+
f0_max,
|
| 218 |
+
p_len,
|
| 219 |
+
hop_length,
|
| 220 |
+
model="full",
|
| 221 |
+
):
|
| 222 |
+
"""
|
| 223 |
+
Estimates the fundamental frequency (F0) of a given audio signal using the Crepe model.
|
| 224 |
+
|
| 225 |
+
Args:
|
| 226 |
+
x: The input audio signal as a NumPy array.
|
| 227 |
+
f0_min: Minimum F0 value to consider.
|
| 228 |
+
f0_max: Maximum F0 value to consider.
|
| 229 |
+
p_len: Desired length of the F0 output.
|
| 230 |
+
hop_length: Hop length for the Crepe model.
|
| 231 |
+
model: Crepe model size to use ("full" or "tiny").
|
| 232 |
+
"""
|
| 233 |
+
x = x.astype(np.float32)
|
| 234 |
+
x /= np.quantile(np.abs(x), 0.999)
|
| 235 |
+
audio = torch.from_numpy(x).to(self.device, copy=True)
|
| 236 |
+
audio = torch.unsqueeze(audio, dim=0)
|
| 237 |
+
if audio.ndim == 2 and audio.shape[0] > 1:
|
| 238 |
+
audio = torch.mean(audio, dim=0, keepdim=True).detach()
|
| 239 |
+
audio = audio.detach()
|
| 240 |
+
pitch: Tensor = torchcrepe.predict(
|
| 241 |
+
audio,
|
| 242 |
+
self.sample_rate,
|
| 243 |
+
hop_length,
|
| 244 |
+
f0_min,
|
| 245 |
+
f0_max,
|
| 246 |
+
model,
|
| 247 |
+
batch_size=hop_length * 2,
|
| 248 |
+
device=self.device,
|
| 249 |
+
pad=True,
|
| 250 |
+
)
|
| 251 |
+
p_len = p_len or x.shape[0] // hop_length
|
| 252 |
+
source = np.array(pitch.squeeze(0).cpu().float().numpy())
|
| 253 |
+
source[source < 0.001] = np.nan
|
| 254 |
+
target = np.interp(
|
| 255 |
+
np.arange(0, len(source) * p_len, len(source)) / p_len,
|
| 256 |
+
np.arange(0, len(source)),
|
| 257 |
+
source,
|
| 258 |
+
)
|
| 259 |
+
f0 = np.nan_to_num(target)
|
| 260 |
+
return f0
|
| 261 |
+
|
| 262 |
+
def get_f0_hybrid(
|
| 263 |
+
self,
|
| 264 |
+
methods_str,
|
| 265 |
+
x,
|
| 266 |
+
f0_min,
|
| 267 |
+
f0_max,
|
| 268 |
+
p_len,
|
| 269 |
+
hop_length,
|
| 270 |
+
):
|
| 271 |
+
"""
|
| 272 |
+
Estimates the fundamental frequency (F0) using a hybrid approach combining multiple methods.
|
| 273 |
+
|
| 274 |
+
Args:
|
| 275 |
+
methods_str: A string specifying the methods to combine (e.g., "hybrid[crepe+rmvpe]").
|
| 276 |
+
x: The input audio signal as a NumPy array.
|
| 277 |
+
f0_min: Minimum F0 value to consider.
|
| 278 |
+
f0_max: Maximum F0 value to consider.
|
| 279 |
+
p_len: Desired length of the F0 output.
|
| 280 |
+
hop_length: Hop length for F0 estimation methods.
|
| 281 |
+
"""
|
| 282 |
+
methods_str = re.search("hybrid\[(.+)\]", methods_str)
|
| 283 |
+
if methods_str:
|
| 284 |
+
methods = [method.strip() for method in methods_str.group(1).split("+")]
|
| 285 |
+
f0_computation_stack = []
|
| 286 |
+
print(f"Calculating f0 pitch estimations for methods: {', '.join(methods)}")
|
| 287 |
+
x = x.astype(np.float32)
|
| 288 |
+
x /= np.quantile(np.abs(x), 0.999)
|
| 289 |
+
for method in methods:
|
| 290 |
+
f0 = None
|
| 291 |
+
if method == "crepe":
|
| 292 |
+
f0 = self.get_f0_crepe_computation(
|
| 293 |
+
x, f0_min, f0_max, p_len, int(hop_length)
|
| 294 |
+
)
|
| 295 |
+
elif method == "rmvpe":
|
| 296 |
+
f0 = self.model_rmvpe.infer_from_audio(x, thred=0.03)
|
| 297 |
+
f0 = f0[1:]
|
| 298 |
+
elif method == "fcpe":
|
| 299 |
+
self.model_fcpe = FCPEF0Predictor(
|
| 300 |
+
os.path.join("rvc", "models", "predictors", "fcpe.pt"),
|
| 301 |
+
f0_min=int(f0_min),
|
| 302 |
+
f0_max=int(f0_max),
|
| 303 |
+
dtype=torch.float32,
|
| 304 |
+
device=self.device,
|
| 305 |
+
sample_rate=self.sample_rate,
|
| 306 |
+
threshold=0.03,
|
| 307 |
+
)
|
| 308 |
+
f0 = self.model_fcpe.compute_f0(x, p_len=p_len)
|
| 309 |
+
del self.model_fcpe
|
| 310 |
+
gc.collect()
|
| 311 |
+
f0_computation_stack.append(f0)
|
| 312 |
+
|
| 313 |
+
f0_computation_stack = [fc for fc in f0_computation_stack if fc is not None]
|
| 314 |
+
f0_median_hybrid = None
|
| 315 |
+
if len(f0_computation_stack) == 1:
|
| 316 |
+
f0_median_hybrid = f0_computation_stack[0]
|
| 317 |
+
else:
|
| 318 |
+
f0_median_hybrid = np.nanmedian(f0_computation_stack, axis=0)
|
| 319 |
+
return f0_median_hybrid
|
| 320 |
+
|
| 321 |
+
def get_f0(
|
| 322 |
+
self,
|
| 323 |
+
input_audio_path,
|
| 324 |
+
x,
|
| 325 |
+
p_len,
|
| 326 |
+
pitch,
|
| 327 |
+
f0_method,
|
| 328 |
+
filter_radius,
|
| 329 |
+
hop_length,
|
| 330 |
+
f0_autotune,
|
| 331 |
+
f0_autotune_strength,
|
| 332 |
+
inp_f0=None,
|
| 333 |
+
):
|
| 334 |
+
"""
|
| 335 |
+
Estimates the fundamental frequency (F0) of a given audio signal using various methods.
|
| 336 |
+
|
| 337 |
+
Args:
|
| 338 |
+
input_audio_path: Path to the input audio file.
|
| 339 |
+
x: The input audio signal as a NumPy array.
|
| 340 |
+
p_len: Desired length of the F0 output.
|
| 341 |
+
pitch: Key to adjust the pitch of the F0 contour.
|
| 342 |
+
f0_method: Method to use for F0 estimation (e.g., "crepe").
|
| 343 |
+
filter_radius: Radius for median filtering the F0 contour.
|
| 344 |
+
hop_length: Hop length for F0 estimation methods.
|
| 345 |
+
f0_autotune: Whether to apply autotune to the F0 contour.
|
| 346 |
+
inp_f0: Optional input F0 contour to use instead of estimating.
|
| 347 |
+
"""
|
| 348 |
+
global input_audio_path2wav
|
| 349 |
+
if f0_method == "crepe":
|
| 350 |
+
f0 = self.get_f0_crepe(x, self.f0_min, self.f0_max, p_len, int(hop_length))
|
| 351 |
+
elif f0_method == "crepe-tiny":
|
| 352 |
+
f0 = self.get_f0_crepe(
|
| 353 |
+
x, self.f0_min, self.f0_max, p_len, int(hop_length), "tiny"
|
| 354 |
+
)
|
| 355 |
+
elif f0_method == "rmvpe":
|
| 356 |
+
f0 = self.model_rmvpe.infer_from_audio(x, thred=0.03)
|
| 357 |
+
elif f0_method == "fcpe":
|
| 358 |
+
self.model_fcpe = FCPEF0Predictor(
|
| 359 |
+
os.path.join("rvc", "models", "predictors", "fcpe.pt"),
|
| 360 |
+
f0_min=int(self.f0_min),
|
| 361 |
+
f0_max=int(self.f0_max),
|
| 362 |
+
dtype=torch.float32,
|
| 363 |
+
device=self.device,
|
| 364 |
+
sample_rate=self.sample_rate,
|
| 365 |
+
threshold=0.03,
|
| 366 |
+
)
|
| 367 |
+
f0 = self.model_fcpe.compute_f0(x, p_len=p_len)
|
| 368 |
+
del self.model_fcpe
|
| 369 |
+
gc.collect()
|
| 370 |
+
elif "hybrid" in f0_method:
|
| 371 |
+
input_audio_path2wav[input_audio_path] = x.astype(np.double)
|
| 372 |
+
f0 = self.get_f0_hybrid(
|
| 373 |
+
f0_method,
|
| 374 |
+
x,
|
| 375 |
+
self.f0_min,
|
| 376 |
+
self.f0_max,
|
| 377 |
+
p_len,
|
| 378 |
+
hop_length,
|
| 379 |
+
)
|
| 380 |
+
|
| 381 |
+
if f0_autotune is True:
|
| 382 |
+
f0 = Autotune.autotune_f0(self, f0, f0_autotune_strength)
|
| 383 |
+
|
| 384 |
+
f0 *= pow(2, pitch / 12)
|
| 385 |
+
tf0 = self.sample_rate // self.window
|
| 386 |
+
if inp_f0 is not None:
|
| 387 |
+
delta_t = np.round(
|
| 388 |
+
(inp_f0[:, 0].max() - inp_f0[:, 0].min()) * tf0 + 1
|
| 389 |
+
).astype("int16")
|
| 390 |
+
replace_f0 = np.interp(
|
| 391 |
+
list(range(delta_t)), inp_f0[:, 0] * 100, inp_f0[:, 1]
|
| 392 |
+
)
|
| 393 |
+
shape = f0[self.x_pad * tf0 : self.x_pad * tf0 + len(replace_f0)].shape[0]
|
| 394 |
+
f0[self.x_pad * tf0 : self.x_pad * tf0 + len(replace_f0)] = replace_f0[
|
| 395 |
+
:shape
|
| 396 |
+
]
|
| 397 |
+
f0bak = f0.copy()
|
| 398 |
+
f0_mel = 1127 * np.log(1 + f0 / 700)
|
| 399 |
+
f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - self.f0_mel_min) * 254 / (
|
| 400 |
+
self.f0_mel_max - self.f0_mel_min
|
| 401 |
+
) + 1
|
| 402 |
+
f0_mel[f0_mel <= 1] = 1
|
| 403 |
+
f0_mel[f0_mel > 255] = 255
|
| 404 |
+
f0_coarse = np.rint(f0_mel).astype(int)
|
| 405 |
+
|
| 406 |
+
return f0_coarse, f0bak
|
| 407 |
+
|
| 408 |
+
def voice_conversion(
|
| 409 |
+
self,
|
| 410 |
+
model,
|
| 411 |
+
net_g,
|
| 412 |
+
sid,
|
| 413 |
+
audio0,
|
| 414 |
+
pitch,
|
| 415 |
+
pitchf,
|
| 416 |
+
index,
|
| 417 |
+
big_npy,
|
| 418 |
+
index_rate,
|
| 419 |
+
version,
|
| 420 |
+
protect,
|
| 421 |
+
):
|
| 422 |
+
"""
|
| 423 |
+
Performs voice conversion on a given audio segment.
|
| 424 |
+
|
| 425 |
+
Args:
|
| 426 |
+
model: The feature extractor model.
|
| 427 |
+
net_g: The generative model for synthesizing speech.
|
| 428 |
+
sid: Speaker ID for the target voice.
|
| 429 |
+
audio0: The input audio segment.
|
| 430 |
+
pitch: Quantized F0 contour for pitch guidance.
|
| 431 |
+
pitchf: Original F0 contour for pitch guidance.
|
| 432 |
+
index: FAISS index for speaker embedding retrieval.
|
| 433 |
+
big_npy: Speaker embeddings stored in a NumPy array.
|
| 434 |
+
index_rate: Blending rate for speaker embedding retrieval.
|
| 435 |
+
version: Model version (Keep to support old models).
|
| 436 |
+
protect: Protection level for preserving the original pitch.
|
| 437 |
+
"""
|
| 438 |
+
with torch.no_grad():
|
| 439 |
+
pitch_guidance = pitch != None and pitchf != None
|
| 440 |
+
# prepare source audio
|
| 441 |
+
feats = torch.from_numpy(audio0).float()
|
| 442 |
+
feats = feats.mean(-1) if feats.dim() == 2 else feats
|
| 443 |
+
assert feats.dim() == 1, feats.dim()
|
| 444 |
+
feats = feats.view(1, -1).to(self.device)
|
| 445 |
+
# extract features
|
| 446 |
+
feats = model(feats)["last_hidden_state"]
|
| 447 |
+
feats = (
|
| 448 |
+
model.final_proj(feats[0]).unsqueeze(0) if version == "v1" else feats
|
| 449 |
+
)
|
| 450 |
+
# make a copy for pitch guidance and protection
|
| 451 |
+
feats0 = feats.clone() if pitch_guidance else None
|
| 452 |
+
if (
|
| 453 |
+
index
|
| 454 |
+
): # set by parent function, only true if index is available, loaded, and index rate > 0
|
| 455 |
+
feats = self._retrieve_speaker_embeddings(
|
| 456 |
+
feats, index, big_npy, index_rate
|
| 457 |
+
)
|
| 458 |
+
# feature upsampling
|
| 459 |
+
feats = F.interpolate(feats.permute(0, 2, 1), scale_factor=2).permute(
|
| 460 |
+
0, 2, 1
|
| 461 |
+
)
|
| 462 |
+
# adjust the length if the audio is short
|
| 463 |
+
p_len = min(audio0.shape[0] // self.window, feats.shape[1])
|
| 464 |
+
if pitch_guidance:
|
| 465 |
+
feats0 = F.interpolate(feats0.permute(0, 2, 1), scale_factor=2).permute(
|
| 466 |
+
0, 2, 1
|
| 467 |
+
)
|
| 468 |
+
pitch, pitchf = pitch[:, :p_len], pitchf[:, :p_len]
|
| 469 |
+
# Pitch protection blending
|
| 470 |
+
if protect < 0.5:
|
| 471 |
+
pitchff = pitchf.clone()
|
| 472 |
+
pitchff[pitchf > 0] = 1
|
| 473 |
+
pitchff[pitchf < 1] = protect
|
| 474 |
+
feats = feats * pitchff.unsqueeze(-1) + feats0 * (
|
| 475 |
+
1 - pitchff.unsqueeze(-1)
|
| 476 |
+
)
|
| 477 |
+
feats = feats.to(feats0.dtype)
|
| 478 |
+
else:
|
| 479 |
+
pitch, pitchf = None, None
|
| 480 |
+
p_len = torch.tensor([p_len], device=self.device).long()
|
| 481 |
+
audio1 = (
|
| 482 |
+
(net_g.infer(feats.float(), p_len, pitch, pitchf.float(), sid)[0][0, 0])
|
| 483 |
+
.data.cpu()
|
| 484 |
+
.float()
|
| 485 |
+
.numpy()
|
| 486 |
+
)
|
| 487 |
+
# clean up
|
| 488 |
+
del feats, feats0, p_len
|
| 489 |
+
if torch.cuda.is_available():
|
| 490 |
+
torch.cuda.empty_cache()
|
| 491 |
+
return audio1
|
| 492 |
+
|
| 493 |
+
def _retrieve_speaker_embeddings(self, feats, index, big_npy, index_rate):
|
| 494 |
+
npy = feats[0].cpu().numpy()
|
| 495 |
+
score, ix = index.search(npy, k=8)
|
| 496 |
+
weight = np.square(1 / score)
|
| 497 |
+
weight /= weight.sum(axis=1, keepdims=True)
|
| 498 |
+
npy = np.sum(big_npy[ix] * np.expand_dims(weight, axis=2), axis=1)
|
| 499 |
+
feats = (
|
| 500 |
+
torch.from_numpy(npy).unsqueeze(0).to(self.device) * index_rate
|
| 501 |
+
+ (1 - index_rate) * feats
|
| 502 |
+
)
|
| 503 |
+
return feats
|
| 504 |
+
|
| 505 |
+
def pipeline(
|
| 506 |
+
self,
|
| 507 |
+
model,
|
| 508 |
+
net_g,
|
| 509 |
+
sid,
|
| 510 |
+
audio,
|
| 511 |
+
pitch,
|
| 512 |
+
f0_method,
|
| 513 |
+
file_index,
|
| 514 |
+
index_rate,
|
| 515 |
+
pitch_guidance,
|
| 516 |
+
filter_radius,
|
| 517 |
+
volume_envelope,
|
| 518 |
+
version,
|
| 519 |
+
protect,
|
| 520 |
+
hop_length,
|
| 521 |
+
f0_autotune,
|
| 522 |
+
f0_autotune_strength,
|
| 523 |
+
f0_file,
|
| 524 |
+
):
|
| 525 |
+
"""
|
| 526 |
+
The main pipeline function for performing voice conversion.
|
| 527 |
+
|
| 528 |
+
Args:
|
| 529 |
+
model: The feature extractor model.
|
| 530 |
+
net_g: The generative model for synthesizing speech.
|
| 531 |
+
sid: Speaker ID for the target voice.
|
| 532 |
+
audio: The input audio signal.
|
| 533 |
+
input_audio_path: Path to the input audio file.
|
| 534 |
+
pitch: Key to adjust the pitch of the F0 contour.
|
| 535 |
+
f0_method: Method to use for F0 estimation.
|
| 536 |
+
file_index: Path to the FAISS index file for speaker embedding retrieval.
|
| 537 |
+
index_rate: Blending rate for speaker embedding retrieval.
|
| 538 |
+
pitch_guidance: Whether to use pitch guidance during voice conversion.
|
| 539 |
+
filter_radius: Radius for median filtering the F0 contour.
|
| 540 |
+
tgt_sr: Target sampling rate for the output audio.
|
| 541 |
+
resample_sr: Resampling rate for the output audio.
|
| 542 |
+
volume_envelope: Blending rate for adjusting the RMS level of the output audio.
|
| 543 |
+
version: Model version.
|
| 544 |
+
protect: Protection level for preserving the original pitch.
|
| 545 |
+
hop_length: Hop length for F0 estimation methods.
|
| 546 |
+
f0_autotune: Whether to apply autotune to the F0 contour.
|
| 547 |
+
f0_file: Path to a file containing an F0 contour to use.
|
| 548 |
+
"""
|
| 549 |
+
if file_index != "" and os.path.exists(file_index) and index_rate > 0:
|
| 550 |
+
try:
|
| 551 |
+
index = faiss.read_index(file_index)
|
| 552 |
+
big_npy = index.reconstruct_n(0, index.ntotal)
|
| 553 |
+
except Exception as error:
|
| 554 |
+
print(f"An error occurred reading the FAISS index: {error}")
|
| 555 |
+
index = big_npy = None
|
| 556 |
+
else:
|
| 557 |
+
index = big_npy = None
|
| 558 |
+
audio = signal.filtfilt(bh, ah, audio)
|
| 559 |
+
audio_pad = np.pad(audio, (self.window // 2, self.window // 2), mode="reflect")
|
| 560 |
+
opt_ts = []
|
| 561 |
+
if audio_pad.shape[0] > self.t_max:
|
| 562 |
+
audio_sum = np.zeros_like(audio)
|
| 563 |
+
for i in range(self.window):
|
| 564 |
+
audio_sum += audio_pad[i : i - self.window]
|
| 565 |
+
for t in range(self.t_center, audio.shape[0], self.t_center):
|
| 566 |
+
opt_ts.append(
|
| 567 |
+
t
|
| 568 |
+
- self.t_query
|
| 569 |
+
+ np.where(
|
| 570 |
+
np.abs(audio_sum[t - self.t_query : t + self.t_query])
|
| 571 |
+
== np.abs(audio_sum[t - self.t_query : t + self.t_query]).min()
|
| 572 |
+
)[0][0]
|
| 573 |
+
)
|
| 574 |
+
s = 0
|
| 575 |
+
audio_opt = []
|
| 576 |
+
t = None
|
| 577 |
+
audio_pad = np.pad(audio, (self.t_pad, self.t_pad), mode="reflect")
|
| 578 |
+
p_len = audio_pad.shape[0] // self.window
|
| 579 |
+
inp_f0 = None
|
| 580 |
+
if hasattr(f0_file, "name"):
|
| 581 |
+
try:
|
| 582 |
+
with open(f0_file.name, "r") as f:
|
| 583 |
+
lines = f.read().strip("\n").split("\n")
|
| 584 |
+
inp_f0 = []
|
| 585 |
+
for line in lines:
|
| 586 |
+
inp_f0.append([float(i) for i in line.split(",")])
|
| 587 |
+
inp_f0 = np.array(inp_f0, dtype="float32")
|
| 588 |
+
except Exception as error:
|
| 589 |
+
print(f"An error occurred reading the F0 file: {error}")
|
| 590 |
+
sid = torch.tensor(sid, device=self.device).unsqueeze(0).long()
|
| 591 |
+
if pitch_guidance:
|
| 592 |
+
pitch, pitchf = self.get_f0(
|
| 593 |
+
"input_audio_path", # questionable purpose of making a key for an array
|
| 594 |
+
audio_pad,
|
| 595 |
+
p_len,
|
| 596 |
+
pitch,
|
| 597 |
+
f0_method,
|
| 598 |
+
filter_radius,
|
| 599 |
+
hop_length,
|
| 600 |
+
f0_autotune,
|
| 601 |
+
f0_autotune_strength,
|
| 602 |
+
inp_f0,
|
| 603 |
+
)
|
| 604 |
+
pitch = pitch[:p_len]
|
| 605 |
+
pitchf = pitchf[:p_len]
|
| 606 |
+
if self.device == "mps":
|
| 607 |
+
pitchf = pitchf.astype(np.float32)
|
| 608 |
+
pitch = torch.tensor(pitch, device=self.device).unsqueeze(0).long()
|
| 609 |
+
pitchf = torch.tensor(pitchf, device=self.device).unsqueeze(0).float()
|
| 610 |
+
for t in opt_ts:
|
| 611 |
+
t = t // self.window * self.window
|
| 612 |
+
if pitch_guidance:
|
| 613 |
+
audio_opt.append(
|
| 614 |
+
self.voice_conversion(
|
| 615 |
+
model,
|
| 616 |
+
net_g,
|
| 617 |
+
sid,
|
| 618 |
+
audio_pad[s : t + self.t_pad2 + self.window],
|
| 619 |
+
pitch[:, s // self.window : (t + self.t_pad2) // self.window],
|
| 620 |
+
pitchf[:, s // self.window : (t + self.t_pad2) // self.window],
|
| 621 |
+
index,
|
| 622 |
+
big_npy,
|
| 623 |
+
index_rate,
|
| 624 |
+
version,
|
| 625 |
+
protect,
|
| 626 |
+
)[self.t_pad_tgt : -self.t_pad_tgt]
|
| 627 |
+
)
|
| 628 |
+
else:
|
| 629 |
+
audio_opt.append(
|
| 630 |
+
self.voice_conversion(
|
| 631 |
+
model,
|
| 632 |
+
net_g,
|
| 633 |
+
sid,
|
| 634 |
+
audio_pad[s : t + self.t_pad2 + self.window],
|
| 635 |
+
None,
|
| 636 |
+
None,
|
| 637 |
+
index,
|
| 638 |
+
big_npy,
|
| 639 |
+
index_rate,
|
| 640 |
+
version,
|
| 641 |
+
protect,
|
| 642 |
+
)[self.t_pad_tgt : -self.t_pad_tgt]
|
| 643 |
+
)
|
| 644 |
+
s = t
|
| 645 |
+
if pitch_guidance:
|
| 646 |
+
audio_opt.append(
|
| 647 |
+
self.voice_conversion(
|
| 648 |
+
model,
|
| 649 |
+
net_g,
|
| 650 |
+
sid,
|
| 651 |
+
audio_pad[t:],
|
| 652 |
+
pitch[:, t // self.window :] if t is not None else pitch,
|
| 653 |
+
pitchf[:, t // self.window :] if t is not None else pitchf,
|
| 654 |
+
index,
|
| 655 |
+
big_npy,
|
| 656 |
+
index_rate,
|
| 657 |
+
version,
|
| 658 |
+
protect,
|
| 659 |
+
)[self.t_pad_tgt : -self.t_pad_tgt]
|
| 660 |
+
)
|
| 661 |
+
else:
|
| 662 |
+
audio_opt.append(
|
| 663 |
+
self.voice_conversion(
|
| 664 |
+
model,
|
| 665 |
+
net_g,
|
| 666 |
+
sid,
|
| 667 |
+
audio_pad[t:],
|
| 668 |
+
None,
|
| 669 |
+
None,
|
| 670 |
+
index,
|
| 671 |
+
big_npy,
|
| 672 |
+
index_rate,
|
| 673 |
+
version,
|
| 674 |
+
protect,
|
| 675 |
+
)[self.t_pad_tgt : -self.t_pad_tgt]
|
| 676 |
+
)
|
| 677 |
+
audio_opt = np.concatenate(audio_opt)
|
| 678 |
+
if volume_envelope != 1:
|
| 679 |
+
audio_opt = AudioProcessor.change_rms(
|
| 680 |
+
audio, self.sample_rate, audio_opt, self.sample_rate, volume_envelope
|
| 681 |
+
)
|
| 682 |
+
audio_max = np.abs(audio_opt).max() / 0.99
|
| 683 |
+
if audio_max > 1:
|
| 684 |
+
audio_opt /= audio_max
|
| 685 |
+
if pitch_guidance:
|
| 686 |
+
del pitch, pitchf
|
| 687 |
+
del sid
|
| 688 |
+
if torch.cuda.is_available():
|
| 689 |
+
torch.cuda.empty_cache()
|
| 690 |
+
return audio_opt
|
rvc/lib/algorithm/__init__.py
ADDED
|
File without changes
|
rvc/lib/algorithm/attentions.py
ADDED
|
@@ -0,0 +1,243 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import math
|
| 2 |
+
import torch
|
| 3 |
+
from rvc.lib.algorithm.commons import convert_pad_shape
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
class MultiHeadAttention(torch.nn.Module):
|
| 7 |
+
"""
|
| 8 |
+
Multi-head attention module with optional relative positional encoding and proximal bias.
|
| 9 |
+
|
| 10 |
+
Args:
|
| 11 |
+
channels (int): Number of input channels.
|
| 12 |
+
out_channels (int): Number of output channels.
|
| 13 |
+
n_heads (int): Number of attention heads.
|
| 14 |
+
p_dropout (float, optional): Dropout probability. Defaults to 0.0.
|
| 15 |
+
window_size (int, optional): Window size for relative positional encoding. Defaults to None.
|
| 16 |
+
heads_share (bool, optional): Whether to share relative positional embeddings across heads. Defaults to True.
|
| 17 |
+
block_length (int, optional): Block length for local attention. Defaults to None.
|
| 18 |
+
proximal_bias (bool, optional): Whether to use proximal bias in self-attention. Defaults to False.
|
| 19 |
+
proximal_init (bool, optional): Whether to initialize the key projection weights the same as query projection weights. Defaults to False.
|
| 20 |
+
"""
|
| 21 |
+
|
| 22 |
+
def __init__(
|
| 23 |
+
self,
|
| 24 |
+
channels: int,
|
| 25 |
+
out_channels: int,
|
| 26 |
+
n_heads: int,
|
| 27 |
+
p_dropout: float = 0.0,
|
| 28 |
+
window_size: int = None,
|
| 29 |
+
heads_share: bool = True,
|
| 30 |
+
block_length: int = None,
|
| 31 |
+
proximal_bias: bool = False,
|
| 32 |
+
proximal_init: bool = False,
|
| 33 |
+
):
|
| 34 |
+
super().__init__()
|
| 35 |
+
assert (
|
| 36 |
+
channels % n_heads == 0
|
| 37 |
+
), "Channels must be divisible by the number of heads."
|
| 38 |
+
|
| 39 |
+
self.channels = channels
|
| 40 |
+
self.out_channels = out_channels
|
| 41 |
+
self.n_heads = n_heads
|
| 42 |
+
self.k_channels = channels // n_heads
|
| 43 |
+
self.window_size = window_size
|
| 44 |
+
self.block_length = block_length
|
| 45 |
+
self.proximal_bias = proximal_bias
|
| 46 |
+
|
| 47 |
+
# Define projections
|
| 48 |
+
self.conv_q = torch.nn.Conv1d(channels, channels, 1)
|
| 49 |
+
self.conv_k = torch.nn.Conv1d(channels, channels, 1)
|
| 50 |
+
self.conv_v = torch.nn.Conv1d(channels, channels, 1)
|
| 51 |
+
self.conv_o = torch.nn.Conv1d(channels, out_channels, 1)
|
| 52 |
+
|
| 53 |
+
self.drop = torch.nn.Dropout(p_dropout)
|
| 54 |
+
|
| 55 |
+
# Relative positional encodings
|
| 56 |
+
if window_size:
|
| 57 |
+
n_heads_rel = 1 if heads_share else n_heads
|
| 58 |
+
rel_stddev = self.k_channels**-0.5
|
| 59 |
+
self.emb_rel_k = torch.nn.Parameter(
|
| 60 |
+
torch.randn(n_heads_rel, 2 * window_size + 1, self.k_channels)
|
| 61 |
+
* rel_stddev
|
| 62 |
+
)
|
| 63 |
+
self.emb_rel_v = torch.nn.Parameter(
|
| 64 |
+
torch.randn(n_heads_rel, 2 * window_size + 1, self.k_channels)
|
| 65 |
+
* rel_stddev
|
| 66 |
+
)
|
| 67 |
+
|
| 68 |
+
# Initialize weights
|
| 69 |
+
torch.nn.init.xavier_uniform_(self.conv_q.weight)
|
| 70 |
+
torch.nn.init.xavier_uniform_(self.conv_k.weight)
|
| 71 |
+
torch.nn.init.xavier_uniform_(self.conv_v.weight)
|
| 72 |
+
torch.nn.init.xavier_uniform_(self.conv_o.weight)
|
| 73 |
+
|
| 74 |
+
if proximal_init:
|
| 75 |
+
with torch.no_grad():
|
| 76 |
+
self.conv_k.weight.copy_(self.conv_q.weight)
|
| 77 |
+
self.conv_k.bias.copy_(self.conv_q.bias)
|
| 78 |
+
|
| 79 |
+
def forward(self, x, c, attn_mask=None):
|
| 80 |
+
# Compute query, key, value projections
|
| 81 |
+
q, k, v = self.conv_q(x), self.conv_k(c), self.conv_v(c)
|
| 82 |
+
|
| 83 |
+
# Compute attention
|
| 84 |
+
x, self.attn = self.attention(q, k, v, mask=attn_mask)
|
| 85 |
+
|
| 86 |
+
# Final output projection
|
| 87 |
+
return self.conv_o(x)
|
| 88 |
+
|
| 89 |
+
def attention(self, query, key, value, mask=None):
|
| 90 |
+
# Reshape and compute scaled dot-product attention
|
| 91 |
+
b, d, t_s, t_t = (*key.size(), query.size(2))
|
| 92 |
+
query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3)
|
| 93 |
+
key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
|
| 94 |
+
value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
|
| 95 |
+
|
| 96 |
+
scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1))
|
| 97 |
+
|
| 98 |
+
if self.window_size:
|
| 99 |
+
assert t_s == t_t, "Relative attention only supports self-attention."
|
| 100 |
+
scores += self._compute_relative_scores(query, t_s)
|
| 101 |
+
|
| 102 |
+
if self.proximal_bias:
|
| 103 |
+
assert t_s == t_t, "Proximal bias only supports self-attention."
|
| 104 |
+
scores += self._attention_bias_proximal(t_s).to(scores.device, scores.dtype)
|
| 105 |
+
|
| 106 |
+
if mask is not None:
|
| 107 |
+
scores = scores.masked_fill(mask == 0, -1e4)
|
| 108 |
+
if self.block_length:
|
| 109 |
+
block_mask = (
|
| 110 |
+
torch.ones_like(scores)
|
| 111 |
+
.triu(-self.block_length)
|
| 112 |
+
.tril(self.block_length)
|
| 113 |
+
)
|
| 114 |
+
scores = scores.masked_fill(block_mask == 0, -1e4)
|
| 115 |
+
|
| 116 |
+
# Apply softmax and dropout
|
| 117 |
+
p_attn = self.drop(torch.nn.functional.softmax(scores, dim=-1))
|
| 118 |
+
|
| 119 |
+
# Compute attention output
|
| 120 |
+
output = torch.matmul(p_attn, value)
|
| 121 |
+
|
| 122 |
+
if self.window_size:
|
| 123 |
+
output += self._apply_relative_values(p_attn, t_s)
|
| 124 |
+
|
| 125 |
+
return output.transpose(2, 3).contiguous().view(b, d, t_t), p_attn
|
| 126 |
+
|
| 127 |
+
def _compute_relative_scores(self, query, length):
|
| 128 |
+
rel_emb = self._get_relative_embeddings(self.emb_rel_k, length)
|
| 129 |
+
rel_logits = self._matmul_with_relative_keys(
|
| 130 |
+
query / math.sqrt(self.k_channels), rel_emb
|
| 131 |
+
)
|
| 132 |
+
return self._relative_position_to_absolute_position(rel_logits)
|
| 133 |
+
|
| 134 |
+
def _apply_relative_values(self, p_attn, length):
|
| 135 |
+
rel_weights = self._absolute_position_to_relative_position(p_attn)
|
| 136 |
+
rel_emb = self._get_relative_embeddings(self.emb_rel_v, length)
|
| 137 |
+
return self._matmul_with_relative_values(rel_weights, rel_emb)
|
| 138 |
+
|
| 139 |
+
# Helper methods
|
| 140 |
+
def _matmul_with_relative_values(self, x, y):
|
| 141 |
+
return torch.matmul(x, y.unsqueeze(0))
|
| 142 |
+
|
| 143 |
+
def _matmul_with_relative_keys(self, x, y):
|
| 144 |
+
return torch.matmul(x, y.unsqueeze(0).transpose(-2, -1))
|
| 145 |
+
|
| 146 |
+
def _get_relative_embeddings(self, embeddings, length):
|
| 147 |
+
pad_length = max(length - (self.window_size + 1), 0)
|
| 148 |
+
start = max((self.window_size + 1) - length, 0)
|
| 149 |
+
end = start + 2 * length - 1
|
| 150 |
+
|
| 151 |
+
if pad_length > 0:
|
| 152 |
+
embeddings = torch.nn.functional.pad(
|
| 153 |
+
embeddings,
|
| 154 |
+
convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]]),
|
| 155 |
+
)
|
| 156 |
+
return embeddings[:, start:end]
|
| 157 |
+
|
| 158 |
+
def _relative_position_to_absolute_position(self, x):
|
| 159 |
+
batch, heads, length, _ = x.size()
|
| 160 |
+
x = torch.nn.functional.pad(
|
| 161 |
+
x, convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, 1]])
|
| 162 |
+
)
|
| 163 |
+
x_flat = x.view(batch, heads, length * 2 * length)
|
| 164 |
+
x_flat = torch.nn.functional.pad(
|
| 165 |
+
x_flat, convert_pad_shape([[0, 0], [0, 0], [0, length - 1]])
|
| 166 |
+
)
|
| 167 |
+
return x_flat.view(batch, heads, length + 1, 2 * length - 1)[
|
| 168 |
+
:, :, :length, length - 1 :
|
| 169 |
+
]
|
| 170 |
+
|
| 171 |
+
def _absolute_position_to_relative_position(self, x):
|
| 172 |
+
batch, heads, length, _ = x.size()
|
| 173 |
+
x = torch.nn.functional.pad(
|
| 174 |
+
x, convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length - 1]])
|
| 175 |
+
)
|
| 176 |
+
x_flat = x.view(batch, heads, length**2 + length * (length - 1))
|
| 177 |
+
x_flat = torch.nn.functional.pad(
|
| 178 |
+
x_flat, convert_pad_shape([[0, 0], [0, 0], [length, 0]])
|
| 179 |
+
)
|
| 180 |
+
return x_flat.view(batch, heads, length, 2 * length)[:, :, :, 1:]
|
| 181 |
+
|
| 182 |
+
def _attention_bias_proximal(self, length):
|
| 183 |
+
r = torch.arange(length, dtype=torch.float32)
|
| 184 |
+
diff = r.unsqueeze(0) - r.unsqueeze(1)
|
| 185 |
+
return -torch.log1p(torch.abs(diff)).unsqueeze(0).unsqueeze(0)
|
| 186 |
+
|
| 187 |
+
|
| 188 |
+
class FFN(torch.nn.Module):
|
| 189 |
+
"""
|
| 190 |
+
Feed-forward network module.
|
| 191 |
+
|
| 192 |
+
Args:
|
| 193 |
+
in_channels (int): Number of input channels.
|
| 194 |
+
out_channels (int): Number of output channels.
|
| 195 |
+
filter_channels (int): Number of filter channels in the convolution layers.
|
| 196 |
+
kernel_size (int): Kernel size of the convolution layers.
|
| 197 |
+
p_dropout (float, optional): Dropout probability. Defaults to 0.0.
|
| 198 |
+
activation (str, optional): Activation function to use. Defaults to None.
|
| 199 |
+
causal (bool, optional): Whether to use causal padding in the convolution layers. Defaults to False.
|
| 200 |
+
"""
|
| 201 |
+
|
| 202 |
+
def __init__(
|
| 203 |
+
self,
|
| 204 |
+
in_channels: int,
|
| 205 |
+
out_channels: int,
|
| 206 |
+
filter_channels: int,
|
| 207 |
+
kernel_size: int,
|
| 208 |
+
p_dropout: float = 0.0,
|
| 209 |
+
activation: str = None,
|
| 210 |
+
causal: bool = False,
|
| 211 |
+
):
|
| 212 |
+
super().__init__()
|
| 213 |
+
self.padding_fn = self._causal_padding if causal else self._same_padding
|
| 214 |
+
|
| 215 |
+
self.conv_1 = torch.nn.Conv1d(in_channels, filter_channels, kernel_size)
|
| 216 |
+
self.conv_2 = torch.nn.Conv1d(filter_channels, out_channels, kernel_size)
|
| 217 |
+
self.drop = torch.nn.Dropout(p_dropout)
|
| 218 |
+
|
| 219 |
+
self.activation = activation
|
| 220 |
+
|
| 221 |
+
def forward(self, x, x_mask):
|
| 222 |
+
x = self.conv_1(self.padding_fn(x * x_mask))
|
| 223 |
+
x = self._apply_activation(x)
|
| 224 |
+
x = self.drop(x)
|
| 225 |
+
x = self.conv_2(self.padding_fn(x * x_mask))
|
| 226 |
+
return x * x_mask
|
| 227 |
+
|
| 228 |
+
def _apply_activation(self, x):
|
| 229 |
+
if self.activation == "gelu":
|
| 230 |
+
return x * torch.sigmoid(1.702 * x)
|
| 231 |
+
return torch.relu(x)
|
| 232 |
+
|
| 233 |
+
def _causal_padding(self, x):
|
| 234 |
+
pad_l, pad_r = self.conv_1.kernel_size[0] - 1, 0
|
| 235 |
+
return torch.nn.functional.pad(
|
| 236 |
+
x, convert_pad_shape([[0, 0], [0, 0], [pad_l, pad_r]])
|
| 237 |
+
)
|
| 238 |
+
|
| 239 |
+
def _same_padding(self, x):
|
| 240 |
+
pad = (self.conv_1.kernel_size[0] - 1) // 2
|
| 241 |
+
return torch.nn.functional.pad(
|
| 242 |
+
x, convert_pad_shape([[0, 0], [0, 0], [pad, pad]])
|
| 243 |
+
)
|
rvc/lib/algorithm/commons.py
ADDED
|
@@ -0,0 +1,117 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
from typing import Optional
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
def init_weights(m, mean=0.0, std=0.01):
|
| 6 |
+
"""
|
| 7 |
+
Initialize the weights of a module.
|
| 8 |
+
|
| 9 |
+
Args:
|
| 10 |
+
m: The module to initialize.
|
| 11 |
+
mean: The mean of the normal distribution.
|
| 12 |
+
std: The standard deviation of the normal distribution.
|
| 13 |
+
"""
|
| 14 |
+
classname = m.__class__.__name__
|
| 15 |
+
if classname.find("Conv") != -1:
|
| 16 |
+
m.weight.data.normal_(mean, std)
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
def get_padding(kernel_size, dilation=1):
|
| 20 |
+
"""
|
| 21 |
+
Calculate the padding needed for a convolution.
|
| 22 |
+
|
| 23 |
+
Args:
|
| 24 |
+
kernel_size: The size of the kernel.
|
| 25 |
+
dilation: The dilation of the convolution.
|
| 26 |
+
"""
|
| 27 |
+
return int((kernel_size * dilation - dilation) / 2)
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
def convert_pad_shape(pad_shape):
|
| 31 |
+
"""
|
| 32 |
+
Convert the pad shape to a list of integers.
|
| 33 |
+
|
| 34 |
+
Args:
|
| 35 |
+
pad_shape: The pad shape..
|
| 36 |
+
"""
|
| 37 |
+
l = pad_shape[::-1]
|
| 38 |
+
pad_shape = [item for sublist in l for item in sublist]
|
| 39 |
+
return pad_shape
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
def slice_segments(
|
| 43 |
+
x: torch.Tensor, ids_str: torch.Tensor, segment_size: int = 4, dim: int = 2
|
| 44 |
+
):
|
| 45 |
+
"""
|
| 46 |
+
Slice segments from a tensor, handling tensors with different numbers of dimensions.
|
| 47 |
+
|
| 48 |
+
Args:
|
| 49 |
+
x (torch.Tensor): The tensor to slice.
|
| 50 |
+
ids_str (torch.Tensor): The starting indices of the segments.
|
| 51 |
+
segment_size (int, optional): The size of each segment. Defaults to 4.
|
| 52 |
+
dim (int, optional): The dimension to slice across (2D or 3D tensors). Defaults to 2.
|
| 53 |
+
"""
|
| 54 |
+
if dim == 2:
|
| 55 |
+
ret = torch.zeros_like(x[:, :segment_size])
|
| 56 |
+
elif dim == 3:
|
| 57 |
+
ret = torch.zeros_like(x[:, :, :segment_size])
|
| 58 |
+
|
| 59 |
+
for i in range(x.size(0)):
|
| 60 |
+
idx_str = ids_str[i].item()
|
| 61 |
+
idx_end = idx_str + segment_size
|
| 62 |
+
if dim == 2:
|
| 63 |
+
ret[i] = x[i, idx_str:idx_end]
|
| 64 |
+
else:
|
| 65 |
+
ret[i] = x[i, :, idx_str:idx_end]
|
| 66 |
+
|
| 67 |
+
return ret
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
def rand_slice_segments(x, x_lengths=None, segment_size=4):
|
| 71 |
+
"""
|
| 72 |
+
Randomly slice segments from a tensor.
|
| 73 |
+
|
| 74 |
+
Args:
|
| 75 |
+
x: The tensor to slice.
|
| 76 |
+
x_lengths: The lengths of the sequences.
|
| 77 |
+
segment_size: The size of each segment.
|
| 78 |
+
"""
|
| 79 |
+
b, d, t = x.size()
|
| 80 |
+
if x_lengths is None:
|
| 81 |
+
x_lengths = t
|
| 82 |
+
ids_str_max = x_lengths - segment_size + 1
|
| 83 |
+
ids_str = (torch.rand([b], device=x.device) * ids_str_max).to(dtype=torch.long)
|
| 84 |
+
ret = slice_segments(x, ids_str, segment_size, dim=3)
|
| 85 |
+
return ret, ids_str
|
| 86 |
+
|
| 87 |
+
|
| 88 |
+
@torch.jit.script
|
| 89 |
+
def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels):
|
| 90 |
+
"""
|
| 91 |
+
Fused add tanh sigmoid multiply operation.
|
| 92 |
+
|
| 93 |
+
Args:
|
| 94 |
+
input_a: The first input tensor.
|
| 95 |
+
input_b: The second input tensor.
|
| 96 |
+
n_channels: The number of channels.
|
| 97 |
+
"""
|
| 98 |
+
n_channels_int = n_channels[0]
|
| 99 |
+
in_act = input_a + input_b
|
| 100 |
+
t_act = torch.tanh(in_act[:, :n_channels_int, :])
|
| 101 |
+
s_act = torch.sigmoid(in_act[:, n_channels_int:, :])
|
| 102 |
+
acts = t_act * s_act
|
| 103 |
+
return acts
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
def sequence_mask(length: torch.Tensor, max_length: Optional[int] = None):
|
| 107 |
+
"""
|
| 108 |
+
Generate a sequence mask.
|
| 109 |
+
|
| 110 |
+
Args:
|
| 111 |
+
length: The lengths of the sequences.
|
| 112 |
+
max_length: The maximum length of the sequences.
|
| 113 |
+
"""
|
| 114 |
+
if max_length is None:
|
| 115 |
+
max_length = length.max()
|
| 116 |
+
x = torch.arange(max_length, dtype=length.dtype, device=length.device)
|
| 117 |
+
return x.unsqueeze(0) < length.unsqueeze(1)
|
rvc/lib/algorithm/discriminators.py
ADDED
|
@@ -0,0 +1,175 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
from torch.utils.checkpoint import checkpoint
|
| 3 |
+
from torch.nn.utils.parametrizations import spectral_norm, weight_norm
|
| 4 |
+
|
| 5 |
+
from rvc.lib.algorithm.commons import get_padding
|
| 6 |
+
from rvc.lib.algorithm.residuals import LRELU_SLOPE
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
class MultiPeriodDiscriminator(torch.nn.Module):
|
| 10 |
+
"""
|
| 11 |
+
Multi-period discriminator.
|
| 12 |
+
|
| 13 |
+
This class implements a multi-period discriminator, which is used to
|
| 14 |
+
discriminate between real and fake audio signals. The discriminator
|
| 15 |
+
is composed of a series of convolutional layers that are applied to
|
| 16 |
+
the input signal at different periods.
|
| 17 |
+
|
| 18 |
+
Args:
|
| 19 |
+
use_spectral_norm (bool): Whether to use spectral normalization.
|
| 20 |
+
Defaults to False.
|
| 21 |
+
"""
|
| 22 |
+
|
| 23 |
+
def __init__(self, use_spectral_norm: bool = False, checkpointing: bool = False):
|
| 24 |
+
super(MultiPeriodDiscriminator, self).__init__()
|
| 25 |
+
periods = [2, 3, 5, 7, 11, 17, 23, 37]
|
| 26 |
+
self.checkpointing = checkpointing
|
| 27 |
+
self.discriminators = torch.nn.ModuleList(
|
| 28 |
+
[
|
| 29 |
+
DiscriminatorS(
|
| 30 |
+
use_spectral_norm=use_spectral_norm, checkpointing=checkpointing
|
| 31 |
+
)
|
| 32 |
+
]
|
| 33 |
+
+ [
|
| 34 |
+
DiscriminatorP(
|
| 35 |
+
p, use_spectral_norm=use_spectral_norm, checkpointing=checkpointing
|
| 36 |
+
)
|
| 37 |
+
for p in periods
|
| 38 |
+
]
|
| 39 |
+
)
|
| 40 |
+
|
| 41 |
+
def forward(self, y, y_hat):
|
| 42 |
+
y_d_rs, y_d_gs, fmap_rs, fmap_gs = [], [], [], []
|
| 43 |
+
for d in self.discriminators:
|
| 44 |
+
if self.training and self.checkpointing:
|
| 45 |
+
|
| 46 |
+
def forward_discriminator(d, y, y_hat):
|
| 47 |
+
y_d_r, fmap_r = d(y)
|
| 48 |
+
y_d_g, fmap_g = d(y_hat)
|
| 49 |
+
return y_d_r, fmap_r, y_d_g, fmap_g
|
| 50 |
+
|
| 51 |
+
y_d_r, fmap_r, y_d_g, fmap_g = checkpoint(
|
| 52 |
+
forward_discriminator, d, y, y_hat, use_reentrant=False
|
| 53 |
+
)
|
| 54 |
+
else:
|
| 55 |
+
y_d_r, fmap_r = d(y)
|
| 56 |
+
y_d_g, fmap_g = d(y_hat)
|
| 57 |
+
y_d_rs.append(y_d_r)
|
| 58 |
+
y_d_gs.append(y_d_g)
|
| 59 |
+
fmap_rs.append(fmap_r)
|
| 60 |
+
fmap_gs.append(fmap_g)
|
| 61 |
+
|
| 62 |
+
return y_d_rs, y_d_gs, fmap_rs, fmap_gs
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
class DiscriminatorS(torch.nn.Module):
|
| 66 |
+
"""
|
| 67 |
+
Discriminator for the short-term component.
|
| 68 |
+
|
| 69 |
+
This class implements a discriminator for the short-term component
|
| 70 |
+
of the audio signal. The discriminator is composed of a series of
|
| 71 |
+
convolutional layers that are applied to the input signal.
|
| 72 |
+
"""
|
| 73 |
+
|
| 74 |
+
def __init__(self, use_spectral_norm: bool = False, checkpointing: bool = False):
|
| 75 |
+
super(DiscriminatorS, self).__init__()
|
| 76 |
+
self.checkpointing = checkpointing
|
| 77 |
+
norm_f = spectral_norm if use_spectral_norm else weight_norm
|
| 78 |
+
self.convs = torch.nn.ModuleList(
|
| 79 |
+
[
|
| 80 |
+
norm_f(torch.nn.Conv1d(1, 16, 15, 1, padding=7)),
|
| 81 |
+
norm_f(torch.nn.Conv1d(16, 64, 41, 4, groups=4, padding=20)),
|
| 82 |
+
norm_f(torch.nn.Conv1d(64, 256, 41, 4, groups=16, padding=20)),
|
| 83 |
+
norm_f(torch.nn.Conv1d(256, 1024, 41, 4, groups=64, padding=20)),
|
| 84 |
+
norm_f(torch.nn.Conv1d(1024, 1024, 41, 4, groups=256, padding=20)),
|
| 85 |
+
norm_f(torch.nn.Conv1d(1024, 1024, 5, 1, padding=2)),
|
| 86 |
+
]
|
| 87 |
+
)
|
| 88 |
+
self.conv_post = norm_f(torch.nn.Conv1d(1024, 1, 3, 1, padding=1))
|
| 89 |
+
self.lrelu = torch.nn.LeakyReLU(LRELU_SLOPE, inplace=True)
|
| 90 |
+
|
| 91 |
+
def forward(self, x):
|
| 92 |
+
fmap = []
|
| 93 |
+
for conv in self.convs:
|
| 94 |
+
if self.training and self.checkpointing:
|
| 95 |
+
x = checkpoint(conv, x, use_reentrant=False)
|
| 96 |
+
x = checkpoint(self.lrelu, x, use_reentrant=False)
|
| 97 |
+
else:
|
| 98 |
+
x = self.lrelu(conv(x))
|
| 99 |
+
fmap.append(x)
|
| 100 |
+
x = self.conv_post(x)
|
| 101 |
+
fmap.append(x)
|
| 102 |
+
x = torch.flatten(x, 1, -1)
|
| 103 |
+
return x, fmap
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
class DiscriminatorP(torch.nn.Module):
|
| 107 |
+
"""
|
| 108 |
+
Discriminator for the long-term component.
|
| 109 |
+
|
| 110 |
+
This class implements a discriminator for the long-term component
|
| 111 |
+
of the audio signal. The discriminator is composed of a series of
|
| 112 |
+
convolutional layers that are applied to the input signal at a given
|
| 113 |
+
period.
|
| 114 |
+
|
| 115 |
+
Args:
|
| 116 |
+
period (int): Period of the discriminator.
|
| 117 |
+
kernel_size (int): Kernel size of the convolutional layers. Defaults to 5.
|
| 118 |
+
stride (int): Stride of the convolutional layers. Defaults to 3.
|
| 119 |
+
use_spectral_norm (bool): Whether to use spectral normalization. Defaults to False.
|
| 120 |
+
"""
|
| 121 |
+
|
| 122 |
+
def __init__(
|
| 123 |
+
self,
|
| 124 |
+
period: int,
|
| 125 |
+
kernel_size: int = 5,
|
| 126 |
+
stride: int = 3,
|
| 127 |
+
use_spectral_norm: bool = False,
|
| 128 |
+
checkpointing: bool = False,
|
| 129 |
+
):
|
| 130 |
+
super(DiscriminatorP, self).__init__()
|
| 131 |
+
self.checkpointing = checkpointing
|
| 132 |
+
self.period = period
|
| 133 |
+
norm_f = spectral_norm if use_spectral_norm else weight_norm
|
| 134 |
+
|
| 135 |
+
in_channels = [1, 32, 128, 512, 1024]
|
| 136 |
+
out_channels = [32, 128, 512, 1024, 1024]
|
| 137 |
+
|
| 138 |
+
self.convs = torch.nn.ModuleList(
|
| 139 |
+
[
|
| 140 |
+
norm_f(
|
| 141 |
+
torch.nn.Conv2d(
|
| 142 |
+
in_ch,
|
| 143 |
+
out_ch,
|
| 144 |
+
(kernel_size, 1),
|
| 145 |
+
(stride, 1),
|
| 146 |
+
padding=(get_padding(kernel_size, 1), 0),
|
| 147 |
+
)
|
| 148 |
+
)
|
| 149 |
+
for in_ch, out_ch in zip(in_channels, out_channels)
|
| 150 |
+
]
|
| 151 |
+
)
|
| 152 |
+
|
| 153 |
+
self.conv_post = norm_f(torch.nn.Conv2d(1024, 1, (3, 1), 1, padding=(1, 0)))
|
| 154 |
+
self.lrelu = torch.nn.LeakyReLU(LRELU_SLOPE, inplace=True)
|
| 155 |
+
|
| 156 |
+
def forward(self, x):
|
| 157 |
+
fmap = []
|
| 158 |
+
b, c, t = x.shape
|
| 159 |
+
if t % self.period != 0:
|
| 160 |
+
n_pad = self.period - (t % self.period)
|
| 161 |
+
x = torch.nn.functional.pad(x, (0, n_pad), "reflect")
|
| 162 |
+
x = x.view(b, c, -1, self.period)
|
| 163 |
+
|
| 164 |
+
for conv in self.convs:
|
| 165 |
+
if self.training and self.checkpointing:
|
| 166 |
+
x = checkpoint(conv, x, use_reentrant=False)
|
| 167 |
+
x = checkpoint(self.lrelu, x, use_reentrant=False)
|
| 168 |
+
else:
|
| 169 |
+
x = self.lrelu(conv(x))
|
| 170 |
+
fmap.append(x)
|
| 171 |
+
|
| 172 |
+
x = self.conv_post(x)
|
| 173 |
+
fmap.append(x)
|
| 174 |
+
x = torch.flatten(x, 1, -1)
|
| 175 |
+
return x, fmap
|
rvc/lib/algorithm/encoders.py
ADDED
|
@@ -0,0 +1,209 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import math
|
| 2 |
+
import torch
|
| 3 |
+
from typing import Optional
|
| 4 |
+
|
| 5 |
+
from rvc.lib.algorithm.commons import sequence_mask
|
| 6 |
+
from rvc.lib.algorithm.modules import WaveNet
|
| 7 |
+
from rvc.lib.algorithm.normalization import LayerNorm
|
| 8 |
+
from rvc.lib.algorithm.attentions import FFN, MultiHeadAttention
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
class Encoder(torch.nn.Module):
|
| 12 |
+
"""
|
| 13 |
+
Encoder module for the Transformer model.
|
| 14 |
+
|
| 15 |
+
Args:
|
| 16 |
+
hidden_channels (int): Number of hidden channels in the encoder.
|
| 17 |
+
filter_channels (int): Number of filter channels in the feed-forward network.
|
| 18 |
+
n_heads (int): Number of attention heads.
|
| 19 |
+
n_layers (int): Number of encoder layers.
|
| 20 |
+
kernel_size (int, optional): Kernel size of the convolution layers in the feed-forward network. Defaults to 1.
|
| 21 |
+
p_dropout (float, optional): Dropout probability. Defaults to 0.0.
|
| 22 |
+
window_size (int, optional): Window size for relative positional encoding. Defaults to 10.
|
| 23 |
+
"""
|
| 24 |
+
|
| 25 |
+
def __init__(
|
| 26 |
+
self,
|
| 27 |
+
hidden_channels: int,
|
| 28 |
+
filter_channels: int,
|
| 29 |
+
n_heads: int,
|
| 30 |
+
n_layers: int,
|
| 31 |
+
kernel_size: int = 1,
|
| 32 |
+
p_dropout: float = 0.0,
|
| 33 |
+
window_size: int = 10,
|
| 34 |
+
):
|
| 35 |
+
super().__init__()
|
| 36 |
+
|
| 37 |
+
self.hidden_channels = hidden_channels
|
| 38 |
+
self.n_layers = n_layers
|
| 39 |
+
self.drop = torch.nn.Dropout(p_dropout)
|
| 40 |
+
|
| 41 |
+
self.attn_layers = torch.nn.ModuleList(
|
| 42 |
+
[
|
| 43 |
+
MultiHeadAttention(
|
| 44 |
+
hidden_channels,
|
| 45 |
+
hidden_channels,
|
| 46 |
+
n_heads,
|
| 47 |
+
p_dropout=p_dropout,
|
| 48 |
+
window_size=window_size,
|
| 49 |
+
)
|
| 50 |
+
for _ in range(n_layers)
|
| 51 |
+
]
|
| 52 |
+
)
|
| 53 |
+
self.norm_layers_1 = torch.nn.ModuleList(
|
| 54 |
+
[LayerNorm(hidden_channels) for _ in range(n_layers)]
|
| 55 |
+
)
|
| 56 |
+
self.ffn_layers = torch.nn.ModuleList(
|
| 57 |
+
[
|
| 58 |
+
FFN(
|
| 59 |
+
hidden_channels,
|
| 60 |
+
hidden_channels,
|
| 61 |
+
filter_channels,
|
| 62 |
+
kernel_size,
|
| 63 |
+
p_dropout=p_dropout,
|
| 64 |
+
)
|
| 65 |
+
for _ in range(n_layers)
|
| 66 |
+
]
|
| 67 |
+
)
|
| 68 |
+
self.norm_layers_2 = torch.nn.ModuleList(
|
| 69 |
+
[LayerNorm(hidden_channels) for _ in range(n_layers)]
|
| 70 |
+
)
|
| 71 |
+
|
| 72 |
+
def forward(self, x, x_mask):
|
| 73 |
+
attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
|
| 74 |
+
x = x * x_mask
|
| 75 |
+
|
| 76 |
+
for i in range(self.n_layers):
|
| 77 |
+
y = self.attn_layers[i](x, x, attn_mask)
|
| 78 |
+
y = self.drop(y)
|
| 79 |
+
x = self.norm_layers_1[i](x + y)
|
| 80 |
+
|
| 81 |
+
y = self.ffn_layers[i](x, x_mask)
|
| 82 |
+
y = self.drop(y)
|
| 83 |
+
x = self.norm_layers_2[i](x + y)
|
| 84 |
+
|
| 85 |
+
return x * x_mask
|
| 86 |
+
|
| 87 |
+
|
| 88 |
+
class TextEncoder(torch.nn.Module):
|
| 89 |
+
"""
|
| 90 |
+
Text Encoder with configurable embedding dimension.
|
| 91 |
+
|
| 92 |
+
Args:
|
| 93 |
+
out_channels (int): Output channels of the encoder.
|
| 94 |
+
hidden_channels (int): Hidden channels of the encoder.
|
| 95 |
+
filter_channels (int): Filter channels of the encoder.
|
| 96 |
+
n_heads (int): Number of attention heads.
|
| 97 |
+
n_layers (int): Number of encoder layers.
|
| 98 |
+
kernel_size (int): Kernel size of the convolutional layers.
|
| 99 |
+
p_dropout (float): Dropout probability.
|
| 100 |
+
embedding_dim (int): Embedding dimension for phone embeddings (v1 = 256, v2 = 768).
|
| 101 |
+
f0 (bool, optional): Whether to use F0 embedding. Defaults to True.
|
| 102 |
+
"""
|
| 103 |
+
|
| 104 |
+
def __init__(
|
| 105 |
+
self,
|
| 106 |
+
out_channels: int,
|
| 107 |
+
hidden_channels: int,
|
| 108 |
+
filter_channels: int,
|
| 109 |
+
n_heads: int,
|
| 110 |
+
n_layers: int,
|
| 111 |
+
kernel_size: int,
|
| 112 |
+
p_dropout: float,
|
| 113 |
+
embedding_dim: int,
|
| 114 |
+
f0: bool = True,
|
| 115 |
+
):
|
| 116 |
+
super().__init__()
|
| 117 |
+
self.hidden_channels = hidden_channels
|
| 118 |
+
self.out_channels = out_channels
|
| 119 |
+
self.emb_phone = torch.nn.Linear(embedding_dim, hidden_channels)
|
| 120 |
+
self.lrelu = torch.nn.LeakyReLU(0.1, inplace=True)
|
| 121 |
+
self.emb_pitch = torch.nn.Embedding(256, hidden_channels) if f0 else None
|
| 122 |
+
|
| 123 |
+
self.encoder = Encoder(
|
| 124 |
+
hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout
|
| 125 |
+
)
|
| 126 |
+
self.proj = torch.nn.Conv1d(hidden_channels, out_channels * 2, 1)
|
| 127 |
+
|
| 128 |
+
def forward(
|
| 129 |
+
self, phone: torch.Tensor, pitch: Optional[torch.Tensor], lengths: torch.Tensor
|
| 130 |
+
):
|
| 131 |
+
x = self.emb_phone(phone)
|
| 132 |
+
if pitch is not None and self.emb_pitch:
|
| 133 |
+
x += self.emb_pitch(pitch)
|
| 134 |
+
|
| 135 |
+
x *= math.sqrt(self.hidden_channels)
|
| 136 |
+
x = self.lrelu(x)
|
| 137 |
+
x = x.transpose(1, -1) # [B, H, T]
|
| 138 |
+
|
| 139 |
+
x_mask = sequence_mask(lengths, x.size(2)).unsqueeze(1).to(x.dtype)
|
| 140 |
+
x = self.encoder(x, x_mask)
|
| 141 |
+
stats = self.proj(x) * x_mask
|
| 142 |
+
|
| 143 |
+
m, logs = torch.split(stats, self.out_channels, dim=1)
|
| 144 |
+
return m, logs, x_mask
|
| 145 |
+
|
| 146 |
+
|
| 147 |
+
class PosteriorEncoder(torch.nn.Module):
|
| 148 |
+
"""
|
| 149 |
+
Posterior Encoder for inferring latent representation.
|
| 150 |
+
|
| 151 |
+
Args:
|
| 152 |
+
in_channels (int): Number of channels in the input.
|
| 153 |
+
out_channels (int): Number of channels in the output.
|
| 154 |
+
hidden_channels (int): Number of hidden channels in the encoder.
|
| 155 |
+
kernel_size (int): Kernel size of the convolutional layers.
|
| 156 |
+
dilation_rate (int): Dilation rate of the convolutional layers.
|
| 157 |
+
n_layers (int): Number of layers in the encoder.
|
| 158 |
+
gin_channels (int, optional): Number of channels for the global conditioning input. Defaults to 0.
|
| 159 |
+
"""
|
| 160 |
+
|
| 161 |
+
def __init__(
|
| 162 |
+
self,
|
| 163 |
+
in_channels: int,
|
| 164 |
+
out_channels: int,
|
| 165 |
+
hidden_channels: int,
|
| 166 |
+
kernel_size: int,
|
| 167 |
+
dilation_rate: int,
|
| 168 |
+
n_layers: int,
|
| 169 |
+
gin_channels: int = 0,
|
| 170 |
+
):
|
| 171 |
+
super().__init__()
|
| 172 |
+
self.out_channels = out_channels
|
| 173 |
+
self.pre = torch.nn.Conv1d(in_channels, hidden_channels, 1)
|
| 174 |
+
self.enc = WaveNet(
|
| 175 |
+
hidden_channels,
|
| 176 |
+
kernel_size,
|
| 177 |
+
dilation_rate,
|
| 178 |
+
n_layers,
|
| 179 |
+
gin_channels=gin_channels,
|
| 180 |
+
)
|
| 181 |
+
self.proj = torch.nn.Conv1d(hidden_channels, out_channels * 2, 1)
|
| 182 |
+
|
| 183 |
+
def forward(
|
| 184 |
+
self, x: torch.Tensor, x_lengths: torch.Tensor, g: Optional[torch.Tensor] = None
|
| 185 |
+
):
|
| 186 |
+
x_mask = sequence_mask(x_lengths, x.size(2)).unsqueeze(1).to(x.dtype)
|
| 187 |
+
|
| 188 |
+
x = self.pre(x) * x_mask
|
| 189 |
+
x = self.enc(x, x_mask, g=g)
|
| 190 |
+
|
| 191 |
+
stats = self.proj(x) * x_mask
|
| 192 |
+
m, logs = torch.split(stats, self.out_channels, dim=1)
|
| 193 |
+
|
| 194 |
+
z = m + torch.randn_like(m) * torch.exp(logs)
|
| 195 |
+
z *= x_mask
|
| 196 |
+
|
| 197 |
+
return z, m, logs, x_mask
|
| 198 |
+
|
| 199 |
+
def remove_weight_norm(self):
|
| 200 |
+
self.enc.remove_weight_norm()
|
| 201 |
+
|
| 202 |
+
def __prepare_scriptable__(self):
|
| 203 |
+
for hook in self.enc._forward_pre_hooks.values():
|
| 204 |
+
if (
|
| 205 |
+
hook.__module__ == "torch.nn.utils.parametrizations.weight_norm"
|
| 206 |
+
and hook.__class__.__name__ == "WeightNorm"
|
| 207 |
+
):
|
| 208 |
+
torch.nn.utils.remove_weight_norm(self.enc)
|
| 209 |
+
return self
|
rvc/lib/algorithm/generators/hifigan.py
ADDED
|
@@ -0,0 +1,230 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import numpy as np
|
| 3 |
+
from torch.nn.utils import remove_weight_norm
|
| 4 |
+
from torch.nn.utils.parametrizations import weight_norm
|
| 5 |
+
from typing import Optional
|
| 6 |
+
|
| 7 |
+
from rvc.lib.algorithm.residuals import LRELU_SLOPE, ResBlock
|
| 8 |
+
from rvc.lib.algorithm.commons import init_weights
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
class HiFiGANGenerator(torch.nn.Module):
|
| 12 |
+
"""
|
| 13 |
+
HiFi-GAN Generator module for audio synthesis.
|
| 14 |
+
|
| 15 |
+
This module implements the generator part of the HiFi-GAN architecture,
|
| 16 |
+
which uses transposed convolutions for upsampling and residual blocks for
|
| 17 |
+
refining the audio output. It can also incorporate global conditioning.
|
| 18 |
+
|
| 19 |
+
Args:
|
| 20 |
+
initial_channel (int): Number of input channels to the initial convolutional layer.
|
| 21 |
+
resblock_kernel_sizes (list): List of kernel sizes for the residual blocks.
|
| 22 |
+
resblock_dilation_sizes (list): List of lists of dilation rates for the residual blocks, corresponding to each kernel size.
|
| 23 |
+
upsample_rates (list): List of upsampling factors for each upsampling layer.
|
| 24 |
+
upsample_initial_channel (int): Number of output channels from the initial convolutional layer, which is also the input to the first upsampling layer.
|
| 25 |
+
upsample_kernel_sizes (list): List of kernel sizes for the transposed convolutional layers used for upsampling.
|
| 26 |
+
gin_channels (int, optional): Number of input channels for the global conditioning. If 0, no global conditioning is used. Defaults to 0.
|
| 27 |
+
"""
|
| 28 |
+
|
| 29 |
+
def __init__(
|
| 30 |
+
self,
|
| 31 |
+
initial_channel: int,
|
| 32 |
+
resblock_kernel_sizes: list,
|
| 33 |
+
resblock_dilation_sizes: list,
|
| 34 |
+
upsample_rates: list,
|
| 35 |
+
upsample_initial_channel: int,
|
| 36 |
+
upsample_kernel_sizes: list,
|
| 37 |
+
gin_channels: int = 0,
|
| 38 |
+
):
|
| 39 |
+
super(HiFiGANGenerator, self).__init__()
|
| 40 |
+
self.num_kernels = len(resblock_kernel_sizes)
|
| 41 |
+
self.num_upsamples = len(upsample_rates)
|
| 42 |
+
self.conv_pre = torch.nn.Conv1d(
|
| 43 |
+
initial_channel, upsample_initial_channel, 7, 1, padding=3
|
| 44 |
+
)
|
| 45 |
+
|
| 46 |
+
self.ups = torch.nn.ModuleList()
|
| 47 |
+
self.resblocks = torch.nn.ModuleList()
|
| 48 |
+
|
| 49 |
+
for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
|
| 50 |
+
self.ups.append(
|
| 51 |
+
weight_norm(
|
| 52 |
+
torch.nn.ConvTranspose1d(
|
| 53 |
+
upsample_initial_channel // (2**i),
|
| 54 |
+
upsample_initial_channel // (2 ** (i + 1)),
|
| 55 |
+
k,
|
| 56 |
+
u,
|
| 57 |
+
padding=(k - u) // 2,
|
| 58 |
+
)
|
| 59 |
+
)
|
| 60 |
+
)
|
| 61 |
+
ch = upsample_initial_channel // (2 ** (i + 1))
|
| 62 |
+
for j, (k, d) in enumerate(
|
| 63 |
+
zip(resblock_kernel_sizes, resblock_dilation_sizes)
|
| 64 |
+
):
|
| 65 |
+
self.resblocks.append(ResBlock(ch, k, d))
|
| 66 |
+
|
| 67 |
+
self.conv_post = torch.nn.Conv1d(ch, 1, 7, 1, padding=3, bias=False)
|
| 68 |
+
self.ups.apply(init_weights)
|
| 69 |
+
|
| 70 |
+
if gin_channels != 0:
|
| 71 |
+
self.cond = torch.nn.Conv1d(gin_channels, upsample_initial_channel, 1)
|
| 72 |
+
|
| 73 |
+
def forward(self, x: torch.Tensor, g: Optional[torch.Tensor] = None):
|
| 74 |
+
# new tensor
|
| 75 |
+
x = self.conv_pre(x)
|
| 76 |
+
|
| 77 |
+
if g is not None:
|
| 78 |
+
# in-place call
|
| 79 |
+
x += self.cond(g)
|
| 80 |
+
|
| 81 |
+
for i in range(self.num_upsamples):
|
| 82 |
+
# in-place call
|
| 83 |
+
x = torch.nn.functional.leaky_relu_(x, LRELU_SLOPE)
|
| 84 |
+
x = self.ups[i](x)
|
| 85 |
+
xs = None
|
| 86 |
+
for j in range(self.num_kernels):
|
| 87 |
+
if xs is None:
|
| 88 |
+
xs = self.resblocks[i * self.num_kernels + j](x)
|
| 89 |
+
else:
|
| 90 |
+
xs += self.resblocks[i * self.num_kernels + j](x)
|
| 91 |
+
x = xs / self.num_kernels
|
| 92 |
+
# in-place call
|
| 93 |
+
x = torch.nn.functional.leaky_relu_(x)
|
| 94 |
+
x = self.conv_post(x)
|
| 95 |
+
# in-place call
|
| 96 |
+
x = torch.tanh_(x)
|
| 97 |
+
|
| 98 |
+
return x
|
| 99 |
+
|
| 100 |
+
def __prepare_scriptable__(self):
|
| 101 |
+
for l in self.ups_and_resblocks:
|
| 102 |
+
for hook in l._forward_pre_hooks.values():
|
| 103 |
+
if (
|
| 104 |
+
hook.__module__ == "torch.nn.utils.parametrizations.weight_norm"
|
| 105 |
+
and hook.__class__.__name__ == "WeightNorm"
|
| 106 |
+
):
|
| 107 |
+
torch.nn.utils.remove_weight_norm(l)
|
| 108 |
+
return self
|
| 109 |
+
|
| 110 |
+
def remove_weight_norm(self):
|
| 111 |
+
for l in self.ups:
|
| 112 |
+
remove_weight_norm(l)
|
| 113 |
+
for l in self.resblocks:
|
| 114 |
+
l.remove_weight_norm()
|
| 115 |
+
|
| 116 |
+
|
| 117 |
+
class SineGenerator(torch.nn.Module):
|
| 118 |
+
"""
|
| 119 |
+
Sine wave generator with optional harmonic overtones and noise.
|
| 120 |
+
|
| 121 |
+
This module generates sine waves for a fundamental frequency and its harmonics.
|
| 122 |
+
It can also add Gaussian noise and apply a voiced/unvoiced mask.
|
| 123 |
+
|
| 124 |
+
Args:
|
| 125 |
+
sampling_rate (int): The sampling rate of the audio in Hz.
|
| 126 |
+
num_harmonics (int, optional): The number of harmonic overtones to generate. Defaults to 0.
|
| 127 |
+
sine_amplitude (float, optional): The amplitude of the sine wave components. Defaults to 0.1.
|
| 128 |
+
noise_stddev (float, optional): The standard deviation of the additive Gaussian noise. Defaults to 0.003.
|
| 129 |
+
voiced_threshold (float, optional): The threshold for the fundamental frequency (F0) to determine if a frame is voiced. Defaults to 0.0.
|
| 130 |
+
"""
|
| 131 |
+
|
| 132 |
+
def __init__(
|
| 133 |
+
self,
|
| 134 |
+
sampling_rate: int,
|
| 135 |
+
num_harmonics: int = 0,
|
| 136 |
+
sine_amplitude: float = 0.1,
|
| 137 |
+
noise_stddev: float = 0.003,
|
| 138 |
+
voiced_threshold: float = 0.0,
|
| 139 |
+
):
|
| 140 |
+
super(SineGenerator, self).__init__()
|
| 141 |
+
self.sampling_rate = sampling_rate
|
| 142 |
+
self.num_harmonics = num_harmonics
|
| 143 |
+
self.sine_amplitude = sine_amplitude
|
| 144 |
+
self.noise_stddev = noise_stddev
|
| 145 |
+
self.voiced_threshold = voiced_threshold
|
| 146 |
+
self.waveform_dim = self.num_harmonics + 1 # fundamental + harmonics
|
| 147 |
+
|
| 148 |
+
def _compute_voiced_unvoiced(self, f0: torch.Tensor):
|
| 149 |
+
"""
|
| 150 |
+
Generates a binary mask indicating voiced/unvoiced frames based on the fundamental frequency.
|
| 151 |
+
|
| 152 |
+
Args:
|
| 153 |
+
f0 (torch.Tensor): Fundamental frequency tensor of shape (batch_size, length).
|
| 154 |
+
"""
|
| 155 |
+
uv_mask = (f0 > self.voiced_threshold).float()
|
| 156 |
+
return uv_mask
|
| 157 |
+
|
| 158 |
+
def _generate_sine_wave(self, f0: torch.Tensor, upsampling_factor: int):
|
| 159 |
+
"""
|
| 160 |
+
Generates sine waves for the fundamental frequency and its harmonics.
|
| 161 |
+
|
| 162 |
+
Args:
|
| 163 |
+
f0 (torch.Tensor): Fundamental frequency tensor of shape (batch_size, length, 1).
|
| 164 |
+
upsampling_factor (int): The factor by which to upsample the sine wave.
|
| 165 |
+
"""
|
| 166 |
+
batch_size, length, _ = f0.shape
|
| 167 |
+
|
| 168 |
+
# Create an upsampling grid
|
| 169 |
+
upsampling_grid = torch.arange(
|
| 170 |
+
1, upsampling_factor + 1, dtype=f0.dtype, device=f0.device
|
| 171 |
+
)
|
| 172 |
+
|
| 173 |
+
# Calculate phase increments
|
| 174 |
+
phase_increments = (f0 / self.sampling_rate) * upsampling_grid
|
| 175 |
+
phase_remainder = torch.fmod(phase_increments[:, :-1, -1:] + 0.5, 1.0) - 0.5
|
| 176 |
+
cumulative_phase = phase_remainder.cumsum(dim=1).fmod(1.0).to(f0.dtype)
|
| 177 |
+
phase_increments += torch.nn.functional.pad(
|
| 178 |
+
cumulative_phase, (0, 0, 1, 0), mode="constant"
|
| 179 |
+
)
|
| 180 |
+
|
| 181 |
+
# Reshape to match the sine wave shape
|
| 182 |
+
phase_increments = phase_increments.reshape(batch_size, -1, 1)
|
| 183 |
+
|
| 184 |
+
# Scale for harmonics
|
| 185 |
+
harmonic_scale = torch.arange(
|
| 186 |
+
1, self.waveform_dim + 1, dtype=f0.dtype, device=f0.device
|
| 187 |
+
).reshape(1, 1, -1)
|
| 188 |
+
phase_increments *= harmonic_scale
|
| 189 |
+
|
| 190 |
+
# Add random phase offset (except for the fundamental)
|
| 191 |
+
random_phase = torch.rand(1, 1, self.waveform_dim, device=f0.device)
|
| 192 |
+
random_phase[..., 0] = 0 # Fundamental frequency has no random offset
|
| 193 |
+
phase_increments += random_phase
|
| 194 |
+
|
| 195 |
+
# Generate sine waves
|
| 196 |
+
sine_waves = torch.sin(2 * np.pi * phase_increments)
|
| 197 |
+
return sine_waves
|
| 198 |
+
|
| 199 |
+
def forward(self, f0: torch.Tensor, upsampling_factor: int):
|
| 200 |
+
with torch.no_grad():
|
| 201 |
+
# Expand `f0` to include waveform dimensions
|
| 202 |
+
f0 = f0.unsqueeze(-1)
|
| 203 |
+
|
| 204 |
+
# Generate sine waves
|
| 205 |
+
sine_waves = (
|
| 206 |
+
self._generate_sine_wave(f0, upsampling_factor) * self.sine_amplitude
|
| 207 |
+
)
|
| 208 |
+
|
| 209 |
+
# Compute voiced/unvoiced mask
|
| 210 |
+
voiced_mask = self._compute_voiced_unvoiced(f0)
|
| 211 |
+
|
| 212 |
+
# Upsample voiced/unvoiced mask
|
| 213 |
+
voiced_mask = torch.nn.functional.interpolate(
|
| 214 |
+
voiced_mask.transpose(2, 1),
|
| 215 |
+
scale_factor=float(upsampling_factor),
|
| 216 |
+
mode="nearest",
|
| 217 |
+
).transpose(2, 1)
|
| 218 |
+
|
| 219 |
+
# Compute noise amplitude
|
| 220 |
+
noise_amplitude = voiced_mask * self.noise_stddev + (1 - voiced_mask) * (
|
| 221 |
+
self.sine_amplitude / 3
|
| 222 |
+
)
|
| 223 |
+
|
| 224 |
+
# Add Gaussian noise
|
| 225 |
+
noise = noise_amplitude * torch.randn_like(sine_waves)
|
| 226 |
+
|
| 227 |
+
# Combine sine waves and noise
|
| 228 |
+
sine_waveforms = sine_waves * voiced_mask + noise
|
| 229 |
+
|
| 230 |
+
return sine_waveforms, voiced_mask, noise
|
rvc/lib/algorithm/generators/hifigan_mrf.py
ADDED
|
@@ -0,0 +1,385 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import math
|
| 2 |
+
from typing import Optional
|
| 3 |
+
|
| 4 |
+
import numpy as np
|
| 5 |
+
import torch
|
| 6 |
+
from torch.nn.utils import remove_weight_norm
|
| 7 |
+
from torch.nn.utils.parametrizations import weight_norm
|
| 8 |
+
from torch.utils.checkpoint import checkpoint
|
| 9 |
+
|
| 10 |
+
LRELU_SLOPE = 0.1
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
class MRFLayer(torch.nn.Module):
|
| 14 |
+
"""
|
| 15 |
+
A single layer of the Multi-Receptive Field (MRF) block.
|
| 16 |
+
|
| 17 |
+
This layer consists of two 1D convolutional layers with weight normalization
|
| 18 |
+
and Leaky ReLU activation in between. The first convolution has a dilation,
|
| 19 |
+
while the second has a dilation of 1. A skip connection is added from the input
|
| 20 |
+
to the output.
|
| 21 |
+
|
| 22 |
+
Args:
|
| 23 |
+
channels (int): The number of input and output channels.
|
| 24 |
+
kernel_size (int): The kernel size of the convolutional layers.
|
| 25 |
+
dilation (int): The dilation rate for the first convolutional layer.
|
| 26 |
+
"""
|
| 27 |
+
|
| 28 |
+
def __init__(self, channels, kernel_size, dilation):
|
| 29 |
+
super().__init__()
|
| 30 |
+
self.conv1 = weight_norm(
|
| 31 |
+
torch.nn.Conv1d(
|
| 32 |
+
channels,
|
| 33 |
+
channels,
|
| 34 |
+
kernel_size,
|
| 35 |
+
padding=(kernel_size * dilation - dilation) // 2,
|
| 36 |
+
dilation=dilation,
|
| 37 |
+
)
|
| 38 |
+
)
|
| 39 |
+
self.conv2 = weight_norm(
|
| 40 |
+
torch.nn.Conv1d(
|
| 41 |
+
channels, channels, kernel_size, padding=kernel_size // 2, dilation=1
|
| 42 |
+
)
|
| 43 |
+
)
|
| 44 |
+
|
| 45 |
+
def forward(self, x: torch.Tensor):
|
| 46 |
+
# new tensor
|
| 47 |
+
y = torch.nn.functional.leaky_relu(x, LRELU_SLOPE)
|
| 48 |
+
y = self.conv1(y)
|
| 49 |
+
# in-place call
|
| 50 |
+
y = torch.nn.functional.leaky_relu_(y, LRELU_SLOPE)
|
| 51 |
+
y = self.conv2(y)
|
| 52 |
+
return x + y
|
| 53 |
+
|
| 54 |
+
def remove_weight_norm(self):
|
| 55 |
+
remove_weight_norm(self.conv1)
|
| 56 |
+
remove_weight_norm(self.conv2)
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
class MRFBlock(torch.nn.Module):
|
| 60 |
+
"""
|
| 61 |
+
A Multi-Receptive Field (MRF) block.
|
| 62 |
+
|
| 63 |
+
This block consists of multiple MRFLayers with different dilation rates.
|
| 64 |
+
It applies each layer sequentially to the input.
|
| 65 |
+
|
| 66 |
+
Args:
|
| 67 |
+
channels (int): The number of input and output channels for the MRFLayers.
|
| 68 |
+
kernel_size (int): The kernel size for the convolutional layers in the MRFLayers.
|
| 69 |
+
dilations (list[int]): A list of dilation rates for the MRFLayers.
|
| 70 |
+
"""
|
| 71 |
+
|
| 72 |
+
def __init__(self, channels, kernel_size, dilations):
|
| 73 |
+
super().__init__()
|
| 74 |
+
self.layers = torch.nn.ModuleList()
|
| 75 |
+
for dilation in dilations:
|
| 76 |
+
self.layers.append(MRFLayer(channels, kernel_size, dilation))
|
| 77 |
+
|
| 78 |
+
def forward(self, x: torch.Tensor):
|
| 79 |
+
for layer in self.layers:
|
| 80 |
+
x = layer(x)
|
| 81 |
+
return x
|
| 82 |
+
|
| 83 |
+
def remove_weight_norm(self):
|
| 84 |
+
for layer in self.layers:
|
| 85 |
+
layer.remove_weight_norm()
|
| 86 |
+
|
| 87 |
+
|
| 88 |
+
class SineGenerator(torch.nn.Module):
|
| 89 |
+
"""
|
| 90 |
+
Definition of sine generator
|
| 91 |
+
|
| 92 |
+
Generates sine waveforms with optional harmonics and additive noise.
|
| 93 |
+
Can be used to create harmonic noise source for neural vocoders.
|
| 94 |
+
|
| 95 |
+
Args:
|
| 96 |
+
samp_rate (int): Sampling rate in Hz.
|
| 97 |
+
harmonic_num (int): Number of harmonic overtones (default 0).
|
| 98 |
+
sine_amp (float): Amplitude of sine-waveform (default 0.1).
|
| 99 |
+
noise_std (float): Standard deviation of Gaussian noise (default 0.003).
|
| 100 |
+
voiced_threshold (float): F0 threshold for voiced/unvoiced classification (default 0).
|
| 101 |
+
"""
|
| 102 |
+
|
| 103 |
+
def __init__(
|
| 104 |
+
self,
|
| 105 |
+
samp_rate: int,
|
| 106 |
+
harmonic_num: int = 0,
|
| 107 |
+
sine_amp: float = 0.1,
|
| 108 |
+
noise_std: float = 0.003,
|
| 109 |
+
voiced_threshold: float = 0,
|
| 110 |
+
):
|
| 111 |
+
super(SineGenerator, self).__init__()
|
| 112 |
+
self.sine_amp = sine_amp
|
| 113 |
+
self.noise_std = noise_std
|
| 114 |
+
self.harmonic_num = harmonic_num
|
| 115 |
+
self.dim = self.harmonic_num + 1
|
| 116 |
+
self.sampling_rate = samp_rate
|
| 117 |
+
self.voiced_threshold = voiced_threshold
|
| 118 |
+
|
| 119 |
+
def _f02uv(self, f0: torch.Tensor):
|
| 120 |
+
"""
|
| 121 |
+
Generates voiced/unvoiced (UV) signal based on the fundamental frequency (F0).
|
| 122 |
+
|
| 123 |
+
Args:
|
| 124 |
+
f0 (torch.Tensor): Fundamental frequency tensor of shape (batch_size, length, 1).
|
| 125 |
+
"""
|
| 126 |
+
# generate uv signal
|
| 127 |
+
uv = torch.ones_like(f0)
|
| 128 |
+
uv = uv * (f0 > self.voiced_threshold)
|
| 129 |
+
return uv
|
| 130 |
+
|
| 131 |
+
def _f02sine(self, f0_values: torch.Tensor):
|
| 132 |
+
"""
|
| 133 |
+
Generates sine waveforms based on the fundamental frequency (F0) and its harmonics.
|
| 134 |
+
|
| 135 |
+
Args:
|
| 136 |
+
f0_values (torch.Tensor): Tensor of fundamental frequency and its harmonics,
|
| 137 |
+
shape (batch_size, length, dim), where dim indicates
|
| 138 |
+
the fundamental tone and overtones.
|
| 139 |
+
"""
|
| 140 |
+
# convert to F0 in rad. The integer part n can be ignored
|
| 141 |
+
# because 2 * np.pi * n doesn't affect phase
|
| 142 |
+
rad_values = (f0_values / self.sampling_rate) % 1
|
| 143 |
+
|
| 144 |
+
# initial phase noise (no noise for fundamental component)
|
| 145 |
+
rand_ini = torch.rand(
|
| 146 |
+
f0_values.shape[0], f0_values.shape[2], device=f0_values.device
|
| 147 |
+
)
|
| 148 |
+
rand_ini[:, 0] = 0
|
| 149 |
+
rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini
|
| 150 |
+
|
| 151 |
+
# instantanouse phase sine[t] = sin(2*pi \sum_i=1 ^{t} rad)
|
| 152 |
+
tmp_over_one = torch.cumsum(rad_values, 1) % 1
|
| 153 |
+
tmp_over_one_idx = (tmp_over_one[:, 1:, :] - tmp_over_one[:, :-1, :]) < 0
|
| 154 |
+
cumsum_shift = torch.zeros_like(rad_values)
|
| 155 |
+
cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0
|
| 156 |
+
|
| 157 |
+
sines = torch.sin(torch.cumsum(rad_values + cumsum_shift, dim=1) * 2 * np.pi)
|
| 158 |
+
|
| 159 |
+
return sines
|
| 160 |
+
|
| 161 |
+
def forward(self, f0: torch.Tensor):
|
| 162 |
+
with torch.no_grad():
|
| 163 |
+
f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, device=f0.device)
|
| 164 |
+
# fundamental component
|
| 165 |
+
f0_buf[:, :, 0] = f0[:, :, 0]
|
| 166 |
+
for idx in np.arange(self.harmonic_num):
|
| 167 |
+
f0_buf[:, :, idx + 1] = f0_buf[:, :, 0] * (idx + 2)
|
| 168 |
+
|
| 169 |
+
sine_waves = self._f02sine(f0_buf) * self.sine_amp
|
| 170 |
+
|
| 171 |
+
uv = self._f02uv(f0)
|
| 172 |
+
|
| 173 |
+
noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3
|
| 174 |
+
noise = noise_amp * torch.randn_like(sine_waves)
|
| 175 |
+
|
| 176 |
+
sine_waves = sine_waves * uv + noise
|
| 177 |
+
return sine_waves, uv, noise
|
| 178 |
+
|
| 179 |
+
|
| 180 |
+
class SourceModuleHnNSF(torch.nn.Module):
|
| 181 |
+
"""
|
| 182 |
+
Generates harmonic and noise source features.
|
| 183 |
+
|
| 184 |
+
This module uses the SineGenerator to create harmonic signals based on the
|
| 185 |
+
fundamental frequency (F0) and merges them into a single excitation signal.
|
| 186 |
+
|
| 187 |
+
Args:
|
| 188 |
+
sample_rate (int): Sampling rate in Hz.
|
| 189 |
+
harmonic_num (int, optional): Number of harmonics above F0. Defaults to 0.
|
| 190 |
+
sine_amp (float, optional): Amplitude of sine source signal. Defaults to 0.1.
|
| 191 |
+
add_noise_std (float, optional): Standard deviation of additive Gaussian noise. Defaults to 0.003.
|
| 192 |
+
voiced_threshod (float, optional): Threshold to set voiced/unvoiced given F0. Defaults to 0.
|
| 193 |
+
"""
|
| 194 |
+
|
| 195 |
+
def __init__(
|
| 196 |
+
self,
|
| 197 |
+
sampling_rate: int,
|
| 198 |
+
harmonic_num: int = 0,
|
| 199 |
+
sine_amp: float = 0.1,
|
| 200 |
+
add_noise_std: float = 0.003,
|
| 201 |
+
voiced_threshold: float = 0,
|
| 202 |
+
):
|
| 203 |
+
super(SourceModuleHnNSF, self).__init__()
|
| 204 |
+
|
| 205 |
+
self.sine_amp = sine_amp
|
| 206 |
+
self.noise_std = add_noise_std
|
| 207 |
+
|
| 208 |
+
# to produce sine waveforms
|
| 209 |
+
self.l_sin_gen = SineGenerator(
|
| 210 |
+
sampling_rate, harmonic_num, sine_amp, add_noise_std, voiced_threshold
|
| 211 |
+
)
|
| 212 |
+
|
| 213 |
+
# to merge source harmonics into a single excitation
|
| 214 |
+
self.l_linear = torch.nn.Linear(harmonic_num + 1, 1)
|
| 215 |
+
self.l_tanh = torch.nn.Tanh()
|
| 216 |
+
|
| 217 |
+
def forward(self, x: torch.Tensor):
|
| 218 |
+
sine_wavs, uv, _ = self.l_sin_gen(x)
|
| 219 |
+
sine_wavs = sine_wavs.to(dtype=self.l_linear.weight.dtype)
|
| 220 |
+
sine_merge = self.l_tanh(self.l_linear(sine_wavs))
|
| 221 |
+
|
| 222 |
+
return sine_merge, None, None
|
| 223 |
+
|
| 224 |
+
|
| 225 |
+
class HiFiGANMRFGenerator(torch.nn.Module):
|
| 226 |
+
"""
|
| 227 |
+
HiFi-GAN generator with Multi-Receptive Field (MRF) blocks.
|
| 228 |
+
|
| 229 |
+
This generator takes an input feature sequence and fundamental frequency (F0)
|
| 230 |
+
as input and generates an audio waveform. It utilizes transposed convolutions
|
| 231 |
+
for upsampling and MRF blocks for feature refinement. It can also condition
|
| 232 |
+
on global conditioning features.
|
| 233 |
+
|
| 234 |
+
Args:
|
| 235 |
+
in_channel (int): Number of input channels.
|
| 236 |
+
upsample_initial_channel (int): Number of channels after the initial convolution.
|
| 237 |
+
upsample_rates (list[int]): List of upsampling rates for the transposed convolutions.
|
| 238 |
+
upsample_kernel_sizes (list[int]): List of kernel sizes for the transposed convolutions.
|
| 239 |
+
resblock_kernel_sizes (list[int]): List of kernel sizes for the convolutional layers in the MRF blocks.
|
| 240 |
+
resblock_dilations (list[list[int]]): List of lists of dilation rates for the MRF blocks.
|
| 241 |
+
gin_channels (int): Number of global conditioning input channels (0 if no global conditioning).
|
| 242 |
+
sample_rate (int): Sampling rate of the audio.
|
| 243 |
+
harmonic_num (int): Number of harmonics to generate.
|
| 244 |
+
checkpointing (bool): Whether to use checkpointing to save memory during training (default: False).
|
| 245 |
+
"""
|
| 246 |
+
|
| 247 |
+
def __init__(
|
| 248 |
+
self,
|
| 249 |
+
in_channel: int,
|
| 250 |
+
upsample_initial_channel: int,
|
| 251 |
+
upsample_rates: list[int],
|
| 252 |
+
upsample_kernel_sizes: list[int],
|
| 253 |
+
resblock_kernel_sizes: list[int],
|
| 254 |
+
resblock_dilations: list[list[int]],
|
| 255 |
+
gin_channels: int,
|
| 256 |
+
sample_rate: int,
|
| 257 |
+
harmonic_num: int,
|
| 258 |
+
checkpointing: bool = False,
|
| 259 |
+
):
|
| 260 |
+
super().__init__()
|
| 261 |
+
self.num_kernels = len(resblock_kernel_sizes)
|
| 262 |
+
self.checkpointing = checkpointing
|
| 263 |
+
|
| 264 |
+
self.f0_upsample = torch.nn.Upsample(scale_factor=np.prod(upsample_rates))
|
| 265 |
+
self.m_source = SourceModuleHnNSF(sample_rate, harmonic_num)
|
| 266 |
+
|
| 267 |
+
self.conv_pre = weight_norm(
|
| 268 |
+
torch.nn.Conv1d(
|
| 269 |
+
in_channel, upsample_initial_channel, kernel_size=7, stride=1, padding=3
|
| 270 |
+
)
|
| 271 |
+
)
|
| 272 |
+
self.upsamples = torch.nn.ModuleList()
|
| 273 |
+
self.noise_convs = torch.nn.ModuleList()
|
| 274 |
+
|
| 275 |
+
stride_f0s = [
|
| 276 |
+
math.prod(upsample_rates[i + 1 :]) if i + 1 < len(upsample_rates) else 1
|
| 277 |
+
for i in range(len(upsample_rates))
|
| 278 |
+
]
|
| 279 |
+
|
| 280 |
+
for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
|
| 281 |
+
# handling odd upsampling rates
|
| 282 |
+
if u % 2 == 0:
|
| 283 |
+
# old method
|
| 284 |
+
padding = (k - u) // 2
|
| 285 |
+
else:
|
| 286 |
+
padding = u // 2 + u % 2
|
| 287 |
+
|
| 288 |
+
self.upsamples.append(
|
| 289 |
+
weight_norm(
|
| 290 |
+
torch.nn.ConvTranspose1d(
|
| 291 |
+
upsample_initial_channel // (2**i),
|
| 292 |
+
upsample_initial_channel // (2 ** (i + 1)),
|
| 293 |
+
kernel_size=k,
|
| 294 |
+
stride=u,
|
| 295 |
+
padding=padding,
|
| 296 |
+
output_padding=u % 2,
|
| 297 |
+
)
|
| 298 |
+
)
|
| 299 |
+
)
|
| 300 |
+
""" handling odd upsampling rates
|
| 301 |
+
# s k p
|
| 302 |
+
# 40 80 20
|
| 303 |
+
# 32 64 16
|
| 304 |
+
# 4 8 2
|
| 305 |
+
# 2 3 1
|
| 306 |
+
# 63 125 31
|
| 307 |
+
# 9 17 4
|
| 308 |
+
# 3 5 1
|
| 309 |
+
# 1 1 0
|
| 310 |
+
"""
|
| 311 |
+
stride = stride_f0s[i]
|
| 312 |
+
kernel = 1 if stride == 1 else stride * 2 - stride % 2
|
| 313 |
+
padding = 0 if stride == 1 else (kernel - stride) // 2
|
| 314 |
+
|
| 315 |
+
self.noise_convs.append(
|
| 316 |
+
torch.nn.Conv1d(
|
| 317 |
+
1,
|
| 318 |
+
upsample_initial_channel // (2 ** (i + 1)),
|
| 319 |
+
kernel_size=kernel,
|
| 320 |
+
stride=stride,
|
| 321 |
+
padding=padding,
|
| 322 |
+
)
|
| 323 |
+
)
|
| 324 |
+
self.mrfs = torch.nn.ModuleList()
|
| 325 |
+
for i in range(len(self.upsamples)):
|
| 326 |
+
channel = upsample_initial_channel // (2 ** (i + 1))
|
| 327 |
+
self.mrfs.append(
|
| 328 |
+
torch.nn.ModuleList(
|
| 329 |
+
[
|
| 330 |
+
MRFBlock(channel, kernel_size=k, dilations=d)
|
| 331 |
+
for k, d in zip(resblock_kernel_sizes, resblock_dilations)
|
| 332 |
+
]
|
| 333 |
+
)
|
| 334 |
+
)
|
| 335 |
+
self.conv_post = weight_norm(
|
| 336 |
+
torch.nn.Conv1d(channel, 1, kernel_size=7, stride=1, padding=3)
|
| 337 |
+
)
|
| 338 |
+
if gin_channels != 0:
|
| 339 |
+
self.cond = torch.nn.Conv1d(gin_channels, upsample_initial_channel, 1)
|
| 340 |
+
|
| 341 |
+
def forward(
|
| 342 |
+
self, x: torch.Tensor, f0: torch.Tensor, g: Optional[torch.Tensor] = None
|
| 343 |
+
):
|
| 344 |
+
f0 = self.f0_upsample(f0[:, None, :]).transpose(-1, -2)
|
| 345 |
+
har_source, _, _ = self.m_source(f0)
|
| 346 |
+
har_source = har_source.transpose(-1, -2)
|
| 347 |
+
# new tensor
|
| 348 |
+
x = self.conv_pre(x)
|
| 349 |
+
|
| 350 |
+
if g is not None:
|
| 351 |
+
# in-place call
|
| 352 |
+
x += self.cond(g)
|
| 353 |
+
|
| 354 |
+
for ups, mrf, noise_conv in zip(self.upsamples, self.mrfs, self.noise_convs):
|
| 355 |
+
# in-place call
|
| 356 |
+
x = torch.nn.functional.leaky_relu_(x, LRELU_SLOPE)
|
| 357 |
+
|
| 358 |
+
if self.training and self.checkpointing:
|
| 359 |
+
x = checkpoint(ups, x, use_reentrant=False)
|
| 360 |
+
else:
|
| 361 |
+
x = ups(x)
|
| 362 |
+
|
| 363 |
+
x += noise_conv(har_source)
|
| 364 |
+
|
| 365 |
+
def mrf_sum(x, layers):
|
| 366 |
+
return sum(layer(x) for layer in layers) / self.num_kernels
|
| 367 |
+
|
| 368 |
+
if self.training and self.checkpointing:
|
| 369 |
+
x = checkpoint(mrf_sum, x, mrf, use_reentrant=False)
|
| 370 |
+
else:
|
| 371 |
+
x = mrf_sum(x, mrf)
|
| 372 |
+
# in-place call
|
| 373 |
+
x = torch.nn.functional.leaky_relu_(x)
|
| 374 |
+
x = self.conv_post(x)
|
| 375 |
+
# in-place call
|
| 376 |
+
x = torch.tanh_(x)
|
| 377 |
+
return x
|
| 378 |
+
|
| 379 |
+
def remove_weight_norm(self):
|
| 380 |
+
remove_weight_norm(self.conv_pre)
|
| 381 |
+
for up in self.upsamples:
|
| 382 |
+
remove_weight_norm(up)
|
| 383 |
+
for mrf in self.mrfs:
|
| 384 |
+
mrf.remove_weight_norm()
|
| 385 |
+
remove_weight_norm(self.conv_post)
|
rvc/lib/algorithm/generators/hifigan_nsf.py
ADDED
|
@@ -0,0 +1,237 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import math
|
| 2 |
+
from typing import Optional
|
| 3 |
+
|
| 4 |
+
import torch
|
| 5 |
+
from torch.nn.utils import remove_weight_norm
|
| 6 |
+
from torch.nn.utils.parametrizations import weight_norm
|
| 7 |
+
from torch.utils.checkpoint import checkpoint
|
| 8 |
+
|
| 9 |
+
from rvc.lib.algorithm.commons import init_weights
|
| 10 |
+
from rvc.lib.algorithm.generators.hifigan import SineGenerator
|
| 11 |
+
from rvc.lib.algorithm.residuals import LRELU_SLOPE, ResBlock
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
class SourceModuleHnNSF(torch.nn.Module):
|
| 15 |
+
"""
|
| 16 |
+
Source Module for generating harmonic and noise components for audio synthesis.
|
| 17 |
+
|
| 18 |
+
This module generates a harmonic source signal using sine waves and adds
|
| 19 |
+
optional noise. It's often used in neural vocoders as a source of excitation.
|
| 20 |
+
|
| 21 |
+
Args:
|
| 22 |
+
sample_rate (int): Sampling rate of the audio in Hz.
|
| 23 |
+
harmonic_num (int, optional): Number of harmonic overtones to generate above the fundamental frequency (F0). Defaults to 0.
|
| 24 |
+
sine_amp (float, optional): Amplitude of the sine wave components. Defaults to 0.1.
|
| 25 |
+
add_noise_std (float, optional): Standard deviation of the additive white Gaussian noise. Defaults to 0.003.
|
| 26 |
+
voiced_threshod (float, optional): Threshold for the fundamental frequency (F0) to determine if a frame is voiced. If F0 is below this threshold, it's considered unvoiced. Defaults to 0.
|
| 27 |
+
"""
|
| 28 |
+
|
| 29 |
+
def __init__(
|
| 30 |
+
self,
|
| 31 |
+
sample_rate: int,
|
| 32 |
+
harmonic_num: int = 0,
|
| 33 |
+
sine_amp: float = 0.1,
|
| 34 |
+
add_noise_std: float = 0.003,
|
| 35 |
+
voiced_threshod: float = 0,
|
| 36 |
+
):
|
| 37 |
+
super(SourceModuleHnNSF, self).__init__()
|
| 38 |
+
|
| 39 |
+
self.sine_amp = sine_amp
|
| 40 |
+
self.noise_std = add_noise_std
|
| 41 |
+
|
| 42 |
+
self.l_sin_gen = SineGenerator(
|
| 43 |
+
sample_rate, harmonic_num, sine_amp, add_noise_std, voiced_threshod
|
| 44 |
+
)
|
| 45 |
+
self.l_linear = torch.nn.Linear(harmonic_num + 1, 1)
|
| 46 |
+
self.l_tanh = torch.nn.Tanh()
|
| 47 |
+
|
| 48 |
+
def forward(self, x: torch.Tensor, upsample_factor: int = 1):
|
| 49 |
+
sine_wavs, uv, _ = self.l_sin_gen(x, upsample_factor)
|
| 50 |
+
sine_wavs = sine_wavs.to(dtype=self.l_linear.weight.dtype)
|
| 51 |
+
sine_merge = self.l_tanh(self.l_linear(sine_wavs))
|
| 52 |
+
return sine_merge, None, None
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
class HiFiGANNSFGenerator(torch.nn.Module):
|
| 56 |
+
"""
|
| 57 |
+
Generator module based on the Neural Source Filter (NSF) architecture.
|
| 58 |
+
|
| 59 |
+
This generator synthesizes audio by first generating a source excitation signal
|
| 60 |
+
(harmonic and noise) and then filtering it through a series of upsampling and
|
| 61 |
+
residual blocks. Global conditioning can be applied to influence the generation.
|
| 62 |
+
|
| 63 |
+
Args:
|
| 64 |
+
initial_channel (int): Number of input channels to the initial convolutional layer.
|
| 65 |
+
resblock_kernel_sizes (list): List of kernel sizes for the residual blocks.
|
| 66 |
+
resblock_dilation_sizes (list): List of lists of dilation rates for the residual blocks, corresponding to each kernel size.
|
| 67 |
+
upsample_rates (list): List of upsampling factors for each upsampling layer.
|
| 68 |
+
upsample_initial_channel (int): Number of output channels from the initial convolutional layer, which is also the input to the first upsampling layer.
|
| 69 |
+
upsample_kernel_sizes (list): List of kernel sizes for the transposed convolutional layers used for upsampling.
|
| 70 |
+
gin_channels (int): Number of input channels for the global conditioning. If 0, no global conditioning is used.
|
| 71 |
+
sr (int): Sampling rate of the audio.
|
| 72 |
+
checkpointing (bool, optional): Whether to use gradient checkpointing to save memory during training. Defaults to False.
|
| 73 |
+
"""
|
| 74 |
+
|
| 75 |
+
def __init__(
|
| 76 |
+
self,
|
| 77 |
+
initial_channel: int,
|
| 78 |
+
resblock_kernel_sizes: list,
|
| 79 |
+
resblock_dilation_sizes: list,
|
| 80 |
+
upsample_rates: list,
|
| 81 |
+
upsample_initial_channel: int,
|
| 82 |
+
upsample_kernel_sizes: list,
|
| 83 |
+
gin_channels: int,
|
| 84 |
+
sr: int,
|
| 85 |
+
checkpointing: bool = False,
|
| 86 |
+
):
|
| 87 |
+
super(HiFiGANNSFGenerator, self).__init__()
|
| 88 |
+
|
| 89 |
+
self.num_kernels = len(resblock_kernel_sizes)
|
| 90 |
+
self.num_upsamples = len(upsample_rates)
|
| 91 |
+
self.checkpointing = checkpointing
|
| 92 |
+
self.f0_upsamp = torch.nn.Upsample(scale_factor=math.prod(upsample_rates))
|
| 93 |
+
self.m_source = SourceModuleHnNSF(sample_rate=sr, harmonic_num=0)
|
| 94 |
+
|
| 95 |
+
self.conv_pre = torch.nn.Conv1d(
|
| 96 |
+
initial_channel, upsample_initial_channel, 7, 1, padding=3
|
| 97 |
+
)
|
| 98 |
+
|
| 99 |
+
self.ups = torch.nn.ModuleList()
|
| 100 |
+
self.noise_convs = torch.nn.ModuleList()
|
| 101 |
+
|
| 102 |
+
channels = [
|
| 103 |
+
upsample_initial_channel // (2 ** (i + 1))
|
| 104 |
+
for i in range(len(upsample_rates))
|
| 105 |
+
]
|
| 106 |
+
stride_f0s = [
|
| 107 |
+
math.prod(upsample_rates[i + 1 :]) if i + 1 < len(upsample_rates) else 1
|
| 108 |
+
for i in range(len(upsample_rates))
|
| 109 |
+
]
|
| 110 |
+
|
| 111 |
+
for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
|
| 112 |
+
# handling odd upsampling rates
|
| 113 |
+
if u % 2 == 0:
|
| 114 |
+
# old method
|
| 115 |
+
padding = (k - u) // 2
|
| 116 |
+
else:
|
| 117 |
+
padding = u // 2 + u % 2
|
| 118 |
+
|
| 119 |
+
self.ups.append(
|
| 120 |
+
weight_norm(
|
| 121 |
+
torch.nn.ConvTranspose1d(
|
| 122 |
+
upsample_initial_channel // (2**i),
|
| 123 |
+
channels[i],
|
| 124 |
+
k,
|
| 125 |
+
u,
|
| 126 |
+
padding=padding,
|
| 127 |
+
output_padding=u % 2,
|
| 128 |
+
)
|
| 129 |
+
)
|
| 130 |
+
)
|
| 131 |
+
""" handling odd upsampling rates
|
| 132 |
+
# s k p
|
| 133 |
+
# 40 80 20
|
| 134 |
+
# 32 64 16
|
| 135 |
+
# 4 8 2
|
| 136 |
+
# 2 3 1
|
| 137 |
+
# 63 125 31
|
| 138 |
+
# 9 17 4
|
| 139 |
+
# 3 5 1
|
| 140 |
+
# 1 1 0
|
| 141 |
+
"""
|
| 142 |
+
stride = stride_f0s[i]
|
| 143 |
+
kernel = 1 if stride == 1 else stride * 2 - stride % 2
|
| 144 |
+
padding = 0 if stride == 1 else (kernel - stride) // 2
|
| 145 |
+
|
| 146 |
+
self.noise_convs.append(
|
| 147 |
+
torch.nn.Conv1d(
|
| 148 |
+
1,
|
| 149 |
+
channels[i],
|
| 150 |
+
kernel_size=kernel,
|
| 151 |
+
stride=stride,
|
| 152 |
+
padding=padding,
|
| 153 |
+
)
|
| 154 |
+
)
|
| 155 |
+
|
| 156 |
+
self.resblocks = torch.nn.ModuleList(
|
| 157 |
+
[
|
| 158 |
+
ResBlock(channels[i], k, d)
|
| 159 |
+
for i in range(len(self.ups))
|
| 160 |
+
for k, d in zip(resblock_kernel_sizes, resblock_dilation_sizes)
|
| 161 |
+
]
|
| 162 |
+
)
|
| 163 |
+
|
| 164 |
+
self.conv_post = torch.nn.Conv1d(channels[-1], 1, 7, 1, padding=3, bias=False)
|
| 165 |
+
self.ups.apply(init_weights)
|
| 166 |
+
|
| 167 |
+
if gin_channels != 0:
|
| 168 |
+
self.cond = torch.nn.Conv1d(gin_channels, upsample_initial_channel, 1)
|
| 169 |
+
|
| 170 |
+
self.upp = math.prod(upsample_rates)
|
| 171 |
+
self.lrelu_slope = LRELU_SLOPE
|
| 172 |
+
|
| 173 |
+
def forward(
|
| 174 |
+
self, x: torch.Tensor, f0: torch.Tensor, g: Optional[torch.Tensor] = None
|
| 175 |
+
):
|
| 176 |
+
har_source, _, _ = self.m_source(f0, self.upp)
|
| 177 |
+
har_source = har_source.transpose(1, 2)
|
| 178 |
+
# new tensor
|
| 179 |
+
x = self.conv_pre(x)
|
| 180 |
+
|
| 181 |
+
if g is not None:
|
| 182 |
+
# in-place call
|
| 183 |
+
x += self.cond(g)
|
| 184 |
+
|
| 185 |
+
for i, (ups, noise_convs) in enumerate(zip(self.ups, self.noise_convs)):
|
| 186 |
+
# in-place call
|
| 187 |
+
x = torch.nn.functional.leaky_relu_(x, self.lrelu_slope)
|
| 188 |
+
|
| 189 |
+
# Apply upsampling layer
|
| 190 |
+
if self.training and self.checkpointing:
|
| 191 |
+
x = checkpoint(ups, x, use_reentrant=False)
|
| 192 |
+
else:
|
| 193 |
+
x = ups(x)
|
| 194 |
+
|
| 195 |
+
# Add noise excitation
|
| 196 |
+
x += noise_convs(har_source)
|
| 197 |
+
|
| 198 |
+
# Apply residual blocks
|
| 199 |
+
def resblock_forward(x, blocks):
|
| 200 |
+
return sum(block(x) for block in blocks) / len(blocks)
|
| 201 |
+
|
| 202 |
+
blocks = self.resblocks[i * self.num_kernels : (i + 1) * self.num_kernels]
|
| 203 |
+
|
| 204 |
+
# Checkpoint or regular computation for ResBlocks
|
| 205 |
+
if self.training and self.checkpointing:
|
| 206 |
+
x = checkpoint(resblock_forward, x, blocks, use_reentrant=False)
|
| 207 |
+
else:
|
| 208 |
+
x = resblock_forward(x, blocks)
|
| 209 |
+
# in-place call
|
| 210 |
+
x = torch.nn.functional.leaky_relu_(x)
|
| 211 |
+
# in-place call
|
| 212 |
+
x = torch.tanh_(self.conv_post(x))
|
| 213 |
+
|
| 214 |
+
return x
|
| 215 |
+
|
| 216 |
+
def remove_weight_norm(self):
|
| 217 |
+
for l in self.ups:
|
| 218 |
+
remove_weight_norm(l)
|
| 219 |
+
for l in self.resblocks:
|
| 220 |
+
l.remove_weight_norm()
|
| 221 |
+
|
| 222 |
+
def __prepare_scriptable__(self):
|
| 223 |
+
for l in self.ups:
|
| 224 |
+
for hook in l._forward_pre_hooks.values():
|
| 225 |
+
if (
|
| 226 |
+
hook.__module__ == "torch.nn.utils.parametrizations.weight_norm"
|
| 227 |
+
and hook.__class__.__name__ == "WeightNorm"
|
| 228 |
+
):
|
| 229 |
+
remove_weight_norm(l)
|
| 230 |
+
for l in self.resblocks:
|
| 231 |
+
for hook in l._forward_pre_hooks.values():
|
| 232 |
+
if (
|
| 233 |
+
hook.__module__ == "torch.nn.utils.parametrizations.weight_norm"
|
| 234 |
+
and hook.__class__.__name__ == "WeightNorm"
|
| 235 |
+
):
|
| 236 |
+
remove_weight_norm(l)
|
| 237 |
+
return self
|
rvc/lib/algorithm/generators/refinegan.py
ADDED
|
@@ -0,0 +1,475 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import math
|
| 2 |
+
import numpy as np
|
| 3 |
+
import torch
|
| 4 |
+
from torch import nn
|
| 5 |
+
from torch.nn import functional as F
|
| 6 |
+
from torch.nn.utils.parametrizations import weight_norm
|
| 7 |
+
from torch.nn.utils.parametrize import remove_parametrizations
|
| 8 |
+
from torch.utils.checkpoint import checkpoint
|
| 9 |
+
|
| 10 |
+
from rvc.lib.algorithm.commons import get_padding
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
class ResBlock(nn.Module):
|
| 14 |
+
"""
|
| 15 |
+
Residual block with multiple dilated convolutions.
|
| 16 |
+
|
| 17 |
+
This block applies a sequence of dilated convolutional layers with Leaky ReLU activation.
|
| 18 |
+
It's designed to capture information at different scales due to the varying dilation rates.
|
| 19 |
+
|
| 20 |
+
Args:
|
| 21 |
+
in_channels (int): Number of input channels.
|
| 22 |
+
out_channels (int): Number of output channels.
|
| 23 |
+
kernel_size (int, optional): Kernel size for the convolutional layers. Defaults to 7.
|
| 24 |
+
dilation (tuple[int], optional): Tuple of dilation rates for the convolutional layers. Defaults to (1, 3, 5).
|
| 25 |
+
leaky_relu_slope (float, optional): Slope for the Leaky ReLU activation. Defaults to 0.2.
|
| 26 |
+
"""
|
| 27 |
+
|
| 28 |
+
def __init__(
|
| 29 |
+
self,
|
| 30 |
+
*,
|
| 31 |
+
in_channels: int,
|
| 32 |
+
out_channels: int,
|
| 33 |
+
kernel_size: int = 7,
|
| 34 |
+
dilation: tuple[int] = (1, 3, 5),
|
| 35 |
+
leaky_relu_slope: float = 0.2,
|
| 36 |
+
):
|
| 37 |
+
super(ResBlock, self).__init__()
|
| 38 |
+
|
| 39 |
+
self.leaky_relu_slope = leaky_relu_slope
|
| 40 |
+
self.in_channels = in_channels
|
| 41 |
+
self.out_channels = out_channels
|
| 42 |
+
|
| 43 |
+
self.convs1 = nn.ModuleList(
|
| 44 |
+
[
|
| 45 |
+
weight_norm(
|
| 46 |
+
nn.Conv1d(
|
| 47 |
+
in_channels=in_channels if idx == 0 else out_channels,
|
| 48 |
+
out_channels=out_channels,
|
| 49 |
+
kernel_size=kernel_size,
|
| 50 |
+
stride=1,
|
| 51 |
+
dilation=d,
|
| 52 |
+
padding=get_padding(kernel_size, d),
|
| 53 |
+
)
|
| 54 |
+
)
|
| 55 |
+
for idx, d in enumerate(dilation)
|
| 56 |
+
]
|
| 57 |
+
)
|
| 58 |
+
self.convs1.apply(self.init_weights)
|
| 59 |
+
|
| 60 |
+
self.convs2 = nn.ModuleList(
|
| 61 |
+
[
|
| 62 |
+
weight_norm(
|
| 63 |
+
nn.Conv1d(
|
| 64 |
+
in_channels=out_channels,
|
| 65 |
+
out_channels=out_channels,
|
| 66 |
+
kernel_size=kernel_size,
|
| 67 |
+
stride=1,
|
| 68 |
+
dilation=d,
|
| 69 |
+
padding=get_padding(kernel_size, d),
|
| 70 |
+
)
|
| 71 |
+
)
|
| 72 |
+
for idx, d in enumerate(dilation)
|
| 73 |
+
]
|
| 74 |
+
)
|
| 75 |
+
self.convs2.apply(self.init_weights)
|
| 76 |
+
|
| 77 |
+
def forward(self, x: torch.Tensor):
|
| 78 |
+
for idx, (c1, c2) in enumerate(zip(self.convs1, self.convs2)):
|
| 79 |
+
# new tensor
|
| 80 |
+
xt = F.leaky_relu(x, self.leaky_relu_slope)
|
| 81 |
+
xt = c1(xt)
|
| 82 |
+
# in-place call
|
| 83 |
+
xt = F.leaky_relu_(xt, self.leaky_relu_slope)
|
| 84 |
+
xt = c2(xt)
|
| 85 |
+
|
| 86 |
+
if idx != 0 or self.in_channels == self.out_channels:
|
| 87 |
+
x = xt + x
|
| 88 |
+
else:
|
| 89 |
+
x = xt
|
| 90 |
+
|
| 91 |
+
return x
|
| 92 |
+
|
| 93 |
+
def remove_parametrizations(self):
|
| 94 |
+
for c1, c2 in zip(self.convs1, self.convs2):
|
| 95 |
+
remove_parametrizations(c1)
|
| 96 |
+
remove_parametrizations(c2)
|
| 97 |
+
|
| 98 |
+
def init_weights(self, m):
|
| 99 |
+
if type(m) == nn.Conv1d:
|
| 100 |
+
m.weight.data.normal_(0, 0.01)
|
| 101 |
+
m.bias.data.fill_(0.0)
|
| 102 |
+
|
| 103 |
+
|
| 104 |
+
class AdaIN(nn.Module):
|
| 105 |
+
"""
|
| 106 |
+
Adaptive Instance Normalization layer.
|
| 107 |
+
|
| 108 |
+
This layer applies a scaling factor to the input based on a learnable weight.
|
| 109 |
+
|
| 110 |
+
Args:
|
| 111 |
+
channels (int): Number of input channels.
|
| 112 |
+
leaky_relu_slope (float, optional): Slope for the Leaky ReLU activation applied after scaling. Defaults to 0.2.
|
| 113 |
+
"""
|
| 114 |
+
|
| 115 |
+
def __init__(
|
| 116 |
+
self,
|
| 117 |
+
*,
|
| 118 |
+
channels: int,
|
| 119 |
+
leaky_relu_slope: float = 0.2,
|
| 120 |
+
):
|
| 121 |
+
super().__init__()
|
| 122 |
+
|
| 123 |
+
self.weight = nn.Parameter(torch.ones(channels))
|
| 124 |
+
# safe to use in-place as it is used on a new x+gaussian tensor
|
| 125 |
+
self.activation = nn.LeakyReLU(leaky_relu_slope, inplace=True)
|
| 126 |
+
|
| 127 |
+
def forward(self, x: torch.Tensor):
|
| 128 |
+
gaussian = torch.randn_like(x) * self.weight[None, :, None]
|
| 129 |
+
|
| 130 |
+
return self.activation(x + gaussian)
|
| 131 |
+
|
| 132 |
+
|
| 133 |
+
class ParallelResBlock(nn.Module):
|
| 134 |
+
"""
|
| 135 |
+
Parallel residual block that applies multiple residual blocks with different kernel sizes in parallel.
|
| 136 |
+
|
| 137 |
+
Args:
|
| 138 |
+
in_channels (int): Number of input channels.
|
| 139 |
+
out_channels (int): Number of output channels.
|
| 140 |
+
kernel_sizes (tuple[int], optional): Tuple of kernel sizes for the parallel residual blocks. Defaults to (3, 7, 11).
|
| 141 |
+
dilation (tuple[int], optional): Tuple of dilation rates for the convolutional layers within the residual blocks. Defaults to (1, 3, 5).
|
| 142 |
+
leaky_relu_slope (float, optional): Slope for the Leaky ReLU activation. Defaults to 0.2.
|
| 143 |
+
"""
|
| 144 |
+
|
| 145 |
+
def __init__(
|
| 146 |
+
self,
|
| 147 |
+
*,
|
| 148 |
+
in_channels: int,
|
| 149 |
+
out_channels: int,
|
| 150 |
+
kernel_sizes: tuple[int] = (3, 7, 11),
|
| 151 |
+
dilation: tuple[int] = (1, 3, 5),
|
| 152 |
+
leaky_relu_slope: float = 0.2,
|
| 153 |
+
):
|
| 154 |
+
super().__init__()
|
| 155 |
+
|
| 156 |
+
self.in_channels = in_channels
|
| 157 |
+
self.out_channels = out_channels
|
| 158 |
+
|
| 159 |
+
self.input_conv = nn.Conv1d(
|
| 160 |
+
in_channels=in_channels,
|
| 161 |
+
out_channels=out_channels,
|
| 162 |
+
kernel_size=7,
|
| 163 |
+
stride=1,
|
| 164 |
+
padding=3,
|
| 165 |
+
)
|
| 166 |
+
|
| 167 |
+
self.blocks = nn.ModuleList(
|
| 168 |
+
[
|
| 169 |
+
nn.Sequential(
|
| 170 |
+
AdaIN(channels=out_channels),
|
| 171 |
+
ResBlock(
|
| 172 |
+
in_channels=out_channels,
|
| 173 |
+
out_channels=out_channels,
|
| 174 |
+
kernel_size=kernel_size,
|
| 175 |
+
dilation=dilation,
|
| 176 |
+
leaky_relu_slope=leaky_relu_slope,
|
| 177 |
+
),
|
| 178 |
+
AdaIN(channels=out_channels),
|
| 179 |
+
)
|
| 180 |
+
for kernel_size in kernel_sizes
|
| 181 |
+
]
|
| 182 |
+
)
|
| 183 |
+
|
| 184 |
+
def forward(self, x: torch.Tensor):
|
| 185 |
+
x = self.input_conv(x)
|
| 186 |
+
|
| 187 |
+
results = [block(x) for block in self.blocks]
|
| 188 |
+
|
| 189 |
+
return torch.mean(torch.stack(results), dim=0)
|
| 190 |
+
|
| 191 |
+
def remove_parametrizations(self):
|
| 192 |
+
for block in self.blocks:
|
| 193 |
+
block[1].remove_parametrizations()
|
| 194 |
+
|
| 195 |
+
|
| 196 |
+
class SineGenerator(nn.Module):
|
| 197 |
+
"""
|
| 198 |
+
Definition of sine generator
|
| 199 |
+
|
| 200 |
+
Generates sine waveforms with optional harmonics and additive noise.
|
| 201 |
+
Can be used to create harmonic noise source for neural vocoders.
|
| 202 |
+
|
| 203 |
+
Args:
|
| 204 |
+
samp_rate (int): Sampling rate in Hz.
|
| 205 |
+
harmonic_num (int): Number of harmonic overtones (default 0).
|
| 206 |
+
sine_amp (float): Amplitude of sine-waveform (default 0.1).
|
| 207 |
+
noise_std (float): Standard deviation of Gaussian noise (default 0.003).
|
| 208 |
+
voiced_threshold (float): F0 threshold for voiced/unvoiced classification (default 0).
|
| 209 |
+
"""
|
| 210 |
+
|
| 211 |
+
def __init__(
|
| 212 |
+
self,
|
| 213 |
+
samp_rate,
|
| 214 |
+
harmonic_num=0,
|
| 215 |
+
sine_amp=0.1,
|
| 216 |
+
noise_std=0.003,
|
| 217 |
+
voiced_threshold=0,
|
| 218 |
+
):
|
| 219 |
+
super(SineGenerator, self).__init__()
|
| 220 |
+
self.sine_amp = sine_amp
|
| 221 |
+
self.noise_std = noise_std
|
| 222 |
+
self.harmonic_num = harmonic_num
|
| 223 |
+
self.dim = self.harmonic_num + 1
|
| 224 |
+
self.sampling_rate = samp_rate
|
| 225 |
+
self.voiced_threshold = voiced_threshold
|
| 226 |
+
|
| 227 |
+
self.merge = nn.Sequential(
|
| 228 |
+
nn.Linear(self.dim, 1, bias=False),
|
| 229 |
+
nn.Tanh(),
|
| 230 |
+
)
|
| 231 |
+
|
| 232 |
+
def _f02uv(self, f0):
|
| 233 |
+
# generate uv signal
|
| 234 |
+
uv = torch.ones_like(f0)
|
| 235 |
+
uv = uv * (f0 > self.voiced_threshold)
|
| 236 |
+
return uv
|
| 237 |
+
|
| 238 |
+
def _f02sine(self, f0_values):
|
| 239 |
+
"""f0_values: (batchsize, length, dim)
|
| 240 |
+
where dim indicates fundamental tone and overtones
|
| 241 |
+
"""
|
| 242 |
+
# convert to F0 in rad. The integer part n can be ignored
|
| 243 |
+
# because 2 * np.pi * n doesn't affect phase
|
| 244 |
+
rad_values = (f0_values / self.sampling_rate) % 1
|
| 245 |
+
|
| 246 |
+
# initial phase noise (no noise for fundamental component)
|
| 247 |
+
rand_ini = torch.rand(
|
| 248 |
+
f0_values.shape[0], f0_values.shape[2], device=f0_values.device
|
| 249 |
+
)
|
| 250 |
+
rand_ini[:, 0] = 0
|
| 251 |
+
rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini
|
| 252 |
+
|
| 253 |
+
# instantanouse phase sine[t] = sin(2*pi \sum_i=1 ^{t} rad)
|
| 254 |
+
tmp_over_one = torch.cumsum(rad_values, 1) % 1
|
| 255 |
+
tmp_over_one_idx = (tmp_over_one[:, 1:, :] - tmp_over_one[:, :-1, :]) < 0
|
| 256 |
+
cumsum_shift = torch.zeros_like(rad_values)
|
| 257 |
+
cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0
|
| 258 |
+
|
| 259 |
+
sines = torch.sin(torch.cumsum(rad_values + cumsum_shift, dim=1) * 2 * np.pi)
|
| 260 |
+
|
| 261 |
+
return sines
|
| 262 |
+
|
| 263 |
+
def forward(self, f0):
|
| 264 |
+
with torch.no_grad():
|
| 265 |
+
f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, device=f0.device)
|
| 266 |
+
# fundamental component
|
| 267 |
+
f0_buf[:, :, 0] = f0[:, :, 0]
|
| 268 |
+
for idx in np.arange(self.harmonic_num):
|
| 269 |
+
f0_buf[:, :, idx + 1] = f0_buf[:, :, 0] * (idx + 2)
|
| 270 |
+
|
| 271 |
+
sine_waves = self._f02sine(f0_buf) * self.sine_amp
|
| 272 |
+
|
| 273 |
+
uv = self._f02uv(f0)
|
| 274 |
+
|
| 275 |
+
noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3
|
| 276 |
+
noise = noise_amp * torch.randn_like(sine_waves)
|
| 277 |
+
|
| 278 |
+
sine_waves = sine_waves * uv + noise
|
| 279 |
+
# correct DC offset
|
| 280 |
+
sine_waves = sine_waves - sine_waves.mean(dim=1, keepdim=True)
|
| 281 |
+
# merge with grad
|
| 282 |
+
return self.merge(sine_waves)
|
| 283 |
+
|
| 284 |
+
|
| 285 |
+
class RefineGANGenerator(nn.Module):
|
| 286 |
+
"""
|
| 287 |
+
RefineGAN generator for audio synthesis.
|
| 288 |
+
|
| 289 |
+
This generator uses a combination of downsampling, residual blocks, and parallel residual blocks
|
| 290 |
+
to refine an input mel-spectrogram and fundamental frequency (F0) into an audio waveform.
|
| 291 |
+
It can also incorporate global conditioning.
|
| 292 |
+
|
| 293 |
+
Args:
|
| 294 |
+
sample_rate (int, optional): Sampling rate of the audio. Defaults to 44100.
|
| 295 |
+
downsample_rates (tuple[int], optional): Downsampling rates for the downsampling blocks. Defaults to (2, 2, 8, 8).
|
| 296 |
+
upsample_rates (tuple[int], optional): Upsampling rates for the upsampling blocks. Defaults to (8, 8, 2, 2).
|
| 297 |
+
leaky_relu_slope (float, optional): Slope for the Leaky ReLU activation. Defaults to 0.2.
|
| 298 |
+
num_mels (int, optional): Number of mel-frequency bins in the input mel-spectrogram. Defaults to 128.
|
| 299 |
+
start_channels (int, optional): Number of channels in the initial convolutional layer. Defaults to 16.
|
| 300 |
+
gin_channels (int, optional): Number of channels for the global conditioning input. Defaults to 256.
|
| 301 |
+
checkpointing (bool, optional): Whether to use checkpointing for memory efficiency. Defaults to False.
|
| 302 |
+
"""
|
| 303 |
+
|
| 304 |
+
def __init__(
|
| 305 |
+
self,
|
| 306 |
+
*,
|
| 307 |
+
sample_rate: int = 44100,
|
| 308 |
+
downsample_rates: tuple[int] = (2, 2, 8, 8),
|
| 309 |
+
upsample_rates: tuple[int] = (8, 8, 2, 2),
|
| 310 |
+
leaky_relu_slope: float = 0.2,
|
| 311 |
+
num_mels: int = 128,
|
| 312 |
+
start_channels: int = 16,
|
| 313 |
+
gin_channels: int = 256,
|
| 314 |
+
checkpointing: bool = False,
|
| 315 |
+
upsample_initial_channel=512,
|
| 316 |
+
):
|
| 317 |
+
super().__init__()
|
| 318 |
+
|
| 319 |
+
self.upsample_rates = upsample_rates
|
| 320 |
+
self.leaky_relu_slope = leaky_relu_slope
|
| 321 |
+
self.checkpointing = checkpointing
|
| 322 |
+
|
| 323 |
+
self.upp = np.prod(upsample_rates)
|
| 324 |
+
self.m_source = SineGenerator(sample_rate)
|
| 325 |
+
|
| 326 |
+
# expanded f0 sinegen -> match mel_conv
|
| 327 |
+
self.pre_conv = weight_norm(
|
| 328 |
+
nn.Conv1d(
|
| 329 |
+
in_channels=1,
|
| 330 |
+
out_channels=upsample_initial_channel // 2,
|
| 331 |
+
kernel_size=7,
|
| 332 |
+
stride=1,
|
| 333 |
+
padding=3,
|
| 334 |
+
bias=False,
|
| 335 |
+
)
|
| 336 |
+
)
|
| 337 |
+
|
| 338 |
+
stride_f0s = [
|
| 339 |
+
math.prod(upsample_rates[i + 1 :]) if i + 1 < len(upsample_rates) else 1
|
| 340 |
+
for i in range(len(upsample_rates))
|
| 341 |
+
]
|
| 342 |
+
|
| 343 |
+
channels = upsample_initial_channel
|
| 344 |
+
|
| 345 |
+
self.downsample_blocks = nn.ModuleList([])
|
| 346 |
+
for i, u in enumerate(upsample_rates):
|
| 347 |
+
# handling odd upsampling rates
|
| 348 |
+
stride = stride_f0s[i]
|
| 349 |
+
kernel = 1 if stride == 1 else stride * 2 - stride % 2
|
| 350 |
+
padding = 0 if stride == 1 else (kernel - stride) // 2
|
| 351 |
+
|
| 352 |
+
# f0 input gets upscaled to full segment size, then downscaled back to match each upscale step
|
| 353 |
+
|
| 354 |
+
self.downsample_blocks.append(
|
| 355 |
+
nn.Conv1d(
|
| 356 |
+
in_channels=1,
|
| 357 |
+
out_channels=channels // 2 ** (i + 2),
|
| 358 |
+
kernel_size=kernel,
|
| 359 |
+
stride=stride,
|
| 360 |
+
padding=padding,
|
| 361 |
+
)
|
| 362 |
+
)
|
| 363 |
+
|
| 364 |
+
self.mel_conv = weight_norm(
|
| 365 |
+
nn.Conv1d(
|
| 366 |
+
in_channels=num_mels,
|
| 367 |
+
out_channels=channels // 2,
|
| 368 |
+
kernel_size=7,
|
| 369 |
+
stride=1,
|
| 370 |
+
padding=3,
|
| 371 |
+
)
|
| 372 |
+
)
|
| 373 |
+
|
| 374 |
+
if gin_channels != 0:
|
| 375 |
+
self.cond = nn.Conv1d(256, channels // 2, 1)
|
| 376 |
+
|
| 377 |
+
self.upsample_blocks = nn.ModuleList([])
|
| 378 |
+
self.upsample_conv_blocks = nn.ModuleList([])
|
| 379 |
+
self.filters = nn.ModuleList([])
|
| 380 |
+
|
| 381 |
+
for rate in upsample_rates:
|
| 382 |
+
new_channels = channels // 2
|
| 383 |
+
|
| 384 |
+
self.upsample_blocks.append(nn.Upsample(scale_factor=rate, mode="linear"))
|
| 385 |
+
|
| 386 |
+
low_pass = nn.Conv1d(
|
| 387 |
+
channels,
|
| 388 |
+
channels,
|
| 389 |
+
kernel_size=15,
|
| 390 |
+
padding=7,
|
| 391 |
+
groups=channels,
|
| 392 |
+
bias=False,
|
| 393 |
+
)
|
| 394 |
+
|
| 395 |
+
low_pass.weight.data.fill_(1.0 / 15)
|
| 396 |
+
|
| 397 |
+
self.filters.append(low_pass)
|
| 398 |
+
|
| 399 |
+
self.upsample_conv_blocks.append(
|
| 400 |
+
ParallelResBlock(
|
| 401 |
+
in_channels=channels + channels // 4,
|
| 402 |
+
out_channels=new_channels,
|
| 403 |
+
kernel_sizes=(3, 7, 11),
|
| 404 |
+
dilation=(1, 3, 5),
|
| 405 |
+
leaky_relu_slope=leaky_relu_slope,
|
| 406 |
+
)
|
| 407 |
+
)
|
| 408 |
+
|
| 409 |
+
channels = new_channels
|
| 410 |
+
|
| 411 |
+
self.conv_post = weight_norm(
|
| 412 |
+
nn.Conv1d(
|
| 413 |
+
in_channels=channels,
|
| 414 |
+
out_channels=1,
|
| 415 |
+
kernel_size=7,
|
| 416 |
+
stride=1,
|
| 417 |
+
padding=3,
|
| 418 |
+
)
|
| 419 |
+
)
|
| 420 |
+
|
| 421 |
+
def forward(self, mel: torch.Tensor, f0: torch.Tensor, g: torch.Tensor = None):
|
| 422 |
+
|
| 423 |
+
f0 = F.interpolate(
|
| 424 |
+
f0.unsqueeze(1), size=mel.shape[-1] * self.upp, mode="linear"
|
| 425 |
+
)
|
| 426 |
+
har_source = self.m_source(f0.transpose(1, 2)).transpose(1, 2)
|
| 427 |
+
|
| 428 |
+
x = self.pre_conv(har_source)
|
| 429 |
+
x = F.interpolate(x, size=mel.shape[-1], mode="linear")
|
| 430 |
+
# expanding spectrogram from 192 to 256 channels
|
| 431 |
+
mel = self.mel_conv(mel)
|
| 432 |
+
|
| 433 |
+
if g is not None:
|
| 434 |
+
# adding expanded speaker embedding
|
| 435 |
+
mel += self.cond(g)
|
| 436 |
+
x = torch.cat([mel, x], dim=1)
|
| 437 |
+
|
| 438 |
+
for ups, res, down, flt in zip(
|
| 439 |
+
self.upsample_blocks,
|
| 440 |
+
self.upsample_conv_blocks,
|
| 441 |
+
self.downsample_blocks,
|
| 442 |
+
self.filters,
|
| 443 |
+
):
|
| 444 |
+
# in-place call
|
| 445 |
+
x = F.leaky_relu_(x, self.leaky_relu_slope)
|
| 446 |
+
|
| 447 |
+
if self.training and self.checkpointing:
|
| 448 |
+
x = checkpoint(ups, x, use_reentrant=False)
|
| 449 |
+
x = checkpoint(flt, x, use_reentrant=False)
|
| 450 |
+
x = torch.cat([x, down(har_source)], dim=1)
|
| 451 |
+
x = checkpoint(res, x, use_reentrant=False)
|
| 452 |
+
else:
|
| 453 |
+
x = ups(x)
|
| 454 |
+
x = flt(x)
|
| 455 |
+
x = torch.cat([x, down(har_source)], dim=1)
|
| 456 |
+
x = res(x)
|
| 457 |
+
|
| 458 |
+
# in-place call
|
| 459 |
+
x = F.leaky_relu_(x, self.leaky_relu_slope)
|
| 460 |
+
x = self.conv_post(x)
|
| 461 |
+
# in-place call
|
| 462 |
+
x = torch.tanh_(x)
|
| 463 |
+
|
| 464 |
+
return x
|
| 465 |
+
|
| 466 |
+
def remove_parametrizations(self):
|
| 467 |
+
remove_parametrizations(self.source_conv)
|
| 468 |
+
remove_parametrizations(self.mel_conv)
|
| 469 |
+
remove_parametrizations(self.conv_post)
|
| 470 |
+
|
| 471 |
+
for block in self.downsample_blocks:
|
| 472 |
+
block[1].remove_parametrizations()
|
| 473 |
+
|
| 474 |
+
for block in self.upsample_conv_blocks:
|
| 475 |
+
block.remove_parametrizations()
|
rvc/lib/algorithm/modules.py
ADDED
|
@@ -0,0 +1,117 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
from rvc.lib.algorithm.commons import fused_add_tanh_sigmoid_multiply
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
class WaveNet(torch.nn.Module):
|
| 6 |
+
"""
|
| 7 |
+
WaveNet residual blocks as used in WaveGlow.
|
| 8 |
+
|
| 9 |
+
Args:
|
| 10 |
+
hidden_channels (int): Number of hidden channels.
|
| 11 |
+
kernel_size (int): Size of the convolutional kernel.
|
| 12 |
+
dilation_rate (int): Dilation rate of the convolution.
|
| 13 |
+
n_layers (int): Number of convolutional layers.
|
| 14 |
+
gin_channels (int, optional): Number of conditioning channels. Defaults to 0.
|
| 15 |
+
p_dropout (float, optional): Dropout probability. Defaults to 0.
|
| 16 |
+
"""
|
| 17 |
+
|
| 18 |
+
def __init__(
|
| 19 |
+
self,
|
| 20 |
+
hidden_channels: int,
|
| 21 |
+
kernel_size: int,
|
| 22 |
+
dilation_rate,
|
| 23 |
+
n_layers: int,
|
| 24 |
+
gin_channels: int = 0,
|
| 25 |
+
p_dropout: int = 0,
|
| 26 |
+
):
|
| 27 |
+
super().__init__()
|
| 28 |
+
assert kernel_size % 2 == 1, "Kernel size must be odd for proper padding."
|
| 29 |
+
|
| 30 |
+
self.hidden_channels = hidden_channels
|
| 31 |
+
self.kernel_size = (kernel_size,)
|
| 32 |
+
self.dilation_rate = dilation_rate
|
| 33 |
+
self.n_layers = n_layers
|
| 34 |
+
self.gin_channels = gin_channels
|
| 35 |
+
self.p_dropout = p_dropout
|
| 36 |
+
self.n_channels_tensor = torch.IntTensor([hidden_channels]) # Static tensor
|
| 37 |
+
|
| 38 |
+
self.in_layers = torch.nn.ModuleList()
|
| 39 |
+
self.res_skip_layers = torch.nn.ModuleList()
|
| 40 |
+
self.drop = torch.nn.Dropout(p_dropout)
|
| 41 |
+
|
| 42 |
+
# Conditional layer for global conditioning
|
| 43 |
+
if gin_channels:
|
| 44 |
+
self.cond_layer = torch.nn.utils.parametrizations.weight_norm(
|
| 45 |
+
torch.nn.Conv1d(gin_channels, 2 * hidden_channels * n_layers, 1),
|
| 46 |
+
name="weight",
|
| 47 |
+
)
|
| 48 |
+
|
| 49 |
+
# Precompute dilations and paddings
|
| 50 |
+
dilations = [dilation_rate**i for i in range(n_layers)]
|
| 51 |
+
paddings = [(kernel_size * d - d) // 2 for d in dilations]
|
| 52 |
+
|
| 53 |
+
# Initialize layers
|
| 54 |
+
for i in range(n_layers):
|
| 55 |
+
self.in_layers.append(
|
| 56 |
+
torch.nn.utils.parametrizations.weight_norm(
|
| 57 |
+
torch.nn.Conv1d(
|
| 58 |
+
hidden_channels,
|
| 59 |
+
2 * hidden_channels,
|
| 60 |
+
kernel_size,
|
| 61 |
+
dilation=dilations[i],
|
| 62 |
+
padding=paddings[i],
|
| 63 |
+
),
|
| 64 |
+
name="weight",
|
| 65 |
+
)
|
| 66 |
+
)
|
| 67 |
+
|
| 68 |
+
res_skip_channels = (
|
| 69 |
+
hidden_channels if i == n_layers - 1 else 2 * hidden_channels
|
| 70 |
+
)
|
| 71 |
+
self.res_skip_layers.append(
|
| 72 |
+
torch.nn.utils.parametrizations.weight_norm(
|
| 73 |
+
torch.nn.Conv1d(hidden_channels, res_skip_channels, 1),
|
| 74 |
+
name="weight",
|
| 75 |
+
)
|
| 76 |
+
)
|
| 77 |
+
|
| 78 |
+
def forward(self, x, x_mask, g=None):
|
| 79 |
+
output = x.clone().zero_()
|
| 80 |
+
|
| 81 |
+
# Apply conditional layer if global conditioning is provided
|
| 82 |
+
g = self.cond_layer(g) if g is not None else None
|
| 83 |
+
|
| 84 |
+
for i in range(self.n_layers):
|
| 85 |
+
x_in = self.in_layers[i](x)
|
| 86 |
+
g_l = (
|
| 87 |
+
g[
|
| 88 |
+
:,
|
| 89 |
+
i * 2 * self.hidden_channels : (i + 1) * 2 * self.hidden_channels,
|
| 90 |
+
:,
|
| 91 |
+
]
|
| 92 |
+
if g is not None
|
| 93 |
+
else 0
|
| 94 |
+
)
|
| 95 |
+
|
| 96 |
+
# Activation with fused Tanh-Sigmoid
|
| 97 |
+
acts = fused_add_tanh_sigmoid_multiply(x_in, g_l, self.n_channels_tensor)
|
| 98 |
+
acts = self.drop(acts)
|
| 99 |
+
|
| 100 |
+
# Residual and skip connections
|
| 101 |
+
res_skip_acts = self.res_skip_layers[i](acts)
|
| 102 |
+
if i < self.n_layers - 1:
|
| 103 |
+
res_acts = res_skip_acts[:, : self.hidden_channels, :]
|
| 104 |
+
x = (x + res_acts) * x_mask
|
| 105 |
+
output = output + res_skip_acts[:, self.hidden_channels :, :]
|
| 106 |
+
else:
|
| 107 |
+
output = output + res_skip_acts
|
| 108 |
+
|
| 109 |
+
return output * x_mask
|
| 110 |
+
|
| 111 |
+
def remove_weight_norm(self):
|
| 112 |
+
if self.gin_channels:
|
| 113 |
+
torch.nn.utils.remove_weight_norm(self.cond_layer)
|
| 114 |
+
for layer in self.in_layers:
|
| 115 |
+
torch.nn.utils.remove_weight_norm(layer)
|
| 116 |
+
for layer in self.res_skip_layers:
|
| 117 |
+
torch.nn.utils.remove_weight_norm(layer)
|
rvc/lib/algorithm/normalization.py
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
class LayerNorm(torch.nn.Module):
|
| 5 |
+
"""
|
| 6 |
+
Layer normalization module.
|
| 7 |
+
|
| 8 |
+
Args:
|
| 9 |
+
channels (int): Number of channels.
|
| 10 |
+
eps (float, optional): Epsilon value for numerical stability. Defaults to 1e-5.
|
| 11 |
+
"""
|
| 12 |
+
|
| 13 |
+
def __init__(self, channels: int, eps: float = 1e-5):
|
| 14 |
+
super().__init__()
|
| 15 |
+
self.eps = eps
|
| 16 |
+
self.gamma = torch.nn.Parameter(torch.ones(channels))
|
| 17 |
+
self.beta = torch.nn.Parameter(torch.zeros(channels))
|
| 18 |
+
|
| 19 |
+
def forward(self, x):
|
| 20 |
+
# Transpose to (batch_size, time_steps, channels) for layer_norm
|
| 21 |
+
x = x.transpose(1, -1)
|
| 22 |
+
x = torch.nn.functional.layer_norm(
|
| 23 |
+
x, (x.size(-1),), self.gamma, self.beta, self.eps
|
| 24 |
+
)
|
| 25 |
+
# Transpose back to (batch_size, channels, time_steps)
|
| 26 |
+
return x.transpose(1, -1)
|
rvc/lib/algorithm/residuals.py
ADDED
|
@@ -0,0 +1,267 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
from itertools import chain
|
| 3 |
+
from typing import Optional, Tuple
|
| 4 |
+
from torch.nn.utils import remove_weight_norm
|
| 5 |
+
from torch.nn.utils.parametrizations import weight_norm
|
| 6 |
+
|
| 7 |
+
from rvc.lib.algorithm.modules import WaveNet
|
| 8 |
+
from rvc.lib.algorithm.commons import get_padding, init_weights
|
| 9 |
+
|
| 10 |
+
LRELU_SLOPE = 0.1
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
def create_conv1d_layer(channels, kernel_size, dilation):
|
| 14 |
+
return weight_norm(
|
| 15 |
+
torch.nn.Conv1d(
|
| 16 |
+
channels,
|
| 17 |
+
channels,
|
| 18 |
+
kernel_size,
|
| 19 |
+
1,
|
| 20 |
+
dilation=dilation,
|
| 21 |
+
padding=get_padding(kernel_size, dilation),
|
| 22 |
+
)
|
| 23 |
+
)
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
def apply_mask(tensor: torch.Tensor, mask: Optional[torch.Tensor]):
|
| 27 |
+
return tensor * mask if mask else tensor
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
def apply_mask_(tensor: torch.Tensor, mask: Optional[torch.Tensor]):
|
| 31 |
+
return tensor.mul_(mask) if mask else tensor
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
class ResBlock(torch.nn.Module):
|
| 35 |
+
"""
|
| 36 |
+
A residual block module that applies a series of 1D convolutional layers with residual connections.
|
| 37 |
+
"""
|
| 38 |
+
|
| 39 |
+
def __init__(
|
| 40 |
+
self, channels: int, kernel_size: int = 3, dilations: Tuple[int] = (1, 3, 5)
|
| 41 |
+
):
|
| 42 |
+
"""
|
| 43 |
+
Initializes the ResBlock.
|
| 44 |
+
|
| 45 |
+
Args:
|
| 46 |
+
channels (int): Number of input and output channels for the convolution layers.
|
| 47 |
+
kernel_size (int): Size of the convolution kernel. Defaults to 3.
|
| 48 |
+
dilations (Tuple[int]): Tuple of dilation rates for the convolution layers in the first set.
|
| 49 |
+
"""
|
| 50 |
+
super().__init__()
|
| 51 |
+
# Create convolutional layers with specified dilations and initialize weights
|
| 52 |
+
self.convs1 = self._create_convs(channels, kernel_size, dilations)
|
| 53 |
+
self.convs2 = self._create_convs(channels, kernel_size, [1] * len(dilations))
|
| 54 |
+
|
| 55 |
+
@staticmethod
|
| 56 |
+
def _create_convs(channels: int, kernel_size: int, dilations: Tuple[int]):
|
| 57 |
+
"""
|
| 58 |
+
Creates a list of 1D convolutional layers with specified dilations.
|
| 59 |
+
|
| 60 |
+
Args:
|
| 61 |
+
channels (int): Number of input and output channels for the convolution layers.
|
| 62 |
+
kernel_size (int): Size of the convolution kernel.
|
| 63 |
+
dilations (Tuple[int]): Tuple of dilation rates for each convolution layer.
|
| 64 |
+
"""
|
| 65 |
+
layers = torch.nn.ModuleList(
|
| 66 |
+
[create_conv1d_layer(channels, kernel_size, d) for d in dilations]
|
| 67 |
+
)
|
| 68 |
+
layers.apply(init_weights)
|
| 69 |
+
return layers
|
| 70 |
+
|
| 71 |
+
def forward(self, x: torch.Tensor, x_mask: torch.Tensor = None):
|
| 72 |
+
for conv1, conv2 in zip(self.convs1, self.convs2):
|
| 73 |
+
x_residual = x
|
| 74 |
+
# new tensor
|
| 75 |
+
x = torch.nn.functional.leaky_relu(x, LRELU_SLOPE)
|
| 76 |
+
# in-place call
|
| 77 |
+
x = apply_mask_(x, x_mask)
|
| 78 |
+
# in-place call
|
| 79 |
+
x = torch.nn.functional.leaky_relu_(conv1(x), LRELU_SLOPE)
|
| 80 |
+
# in-place call
|
| 81 |
+
x = apply_mask_(x, x_mask)
|
| 82 |
+
x = conv2(x)
|
| 83 |
+
# in-place call
|
| 84 |
+
x += x_residual
|
| 85 |
+
# in-place call
|
| 86 |
+
return apply_mask_(x, x_mask)
|
| 87 |
+
|
| 88 |
+
def remove_weight_norm(self):
|
| 89 |
+
for conv in chain(self.convs1, self.convs2):
|
| 90 |
+
remove_weight_norm(conv)
|
| 91 |
+
|
| 92 |
+
|
| 93 |
+
class Flip(torch.nn.Module):
|
| 94 |
+
"""
|
| 95 |
+
Flip module for flow-based models.
|
| 96 |
+
|
| 97 |
+
This module flips the input along the time dimension.
|
| 98 |
+
"""
|
| 99 |
+
|
| 100 |
+
def forward(self, x, *args, reverse=False, **kwargs):
|
| 101 |
+
x = torch.flip(x, [1])
|
| 102 |
+
if not reverse:
|
| 103 |
+
logdet = torch.zeros(x.size(0), dtype=x.dtype, device=x.device)
|
| 104 |
+
return x, logdet
|
| 105 |
+
else:
|
| 106 |
+
return x
|
| 107 |
+
|
| 108 |
+
|
| 109 |
+
class ResidualCouplingBlock(torch.nn.Module):
|
| 110 |
+
"""
|
| 111 |
+
Residual Coupling Block for normalizing flow.
|
| 112 |
+
|
| 113 |
+
Args:
|
| 114 |
+
channels (int): Number of channels in the input.
|
| 115 |
+
hidden_channels (int): Number of hidden channels in the coupling layer.
|
| 116 |
+
kernel_size (int): Kernel size of the convolutional layers.
|
| 117 |
+
dilation_rate (int): Dilation rate of the convolutional layers.
|
| 118 |
+
n_layers (int): Number of layers in the coupling layer.
|
| 119 |
+
n_flows (int, optional): Number of coupling layers in the block. Defaults to 4.
|
| 120 |
+
gin_channels (int, optional): Number of channels for the global conditioning input. Defaults to 0.
|
| 121 |
+
"""
|
| 122 |
+
|
| 123 |
+
def __init__(
|
| 124 |
+
self,
|
| 125 |
+
channels: int,
|
| 126 |
+
hidden_channels: int,
|
| 127 |
+
kernel_size: int,
|
| 128 |
+
dilation_rate: int,
|
| 129 |
+
n_layers: int,
|
| 130 |
+
n_flows: int = 4,
|
| 131 |
+
gin_channels: int = 0,
|
| 132 |
+
):
|
| 133 |
+
super(ResidualCouplingBlock, self).__init__()
|
| 134 |
+
self.channels = channels
|
| 135 |
+
self.hidden_channels = hidden_channels
|
| 136 |
+
self.kernel_size = kernel_size
|
| 137 |
+
self.dilation_rate = dilation_rate
|
| 138 |
+
self.n_layers = n_layers
|
| 139 |
+
self.n_flows = n_flows
|
| 140 |
+
self.gin_channels = gin_channels
|
| 141 |
+
|
| 142 |
+
self.flows = torch.nn.ModuleList()
|
| 143 |
+
for _ in range(n_flows):
|
| 144 |
+
self.flows.append(
|
| 145 |
+
ResidualCouplingLayer(
|
| 146 |
+
channels,
|
| 147 |
+
hidden_channels,
|
| 148 |
+
kernel_size,
|
| 149 |
+
dilation_rate,
|
| 150 |
+
n_layers,
|
| 151 |
+
gin_channels=gin_channels,
|
| 152 |
+
mean_only=True,
|
| 153 |
+
)
|
| 154 |
+
)
|
| 155 |
+
self.flows.append(Flip())
|
| 156 |
+
|
| 157 |
+
def forward(
|
| 158 |
+
self,
|
| 159 |
+
x: torch.Tensor,
|
| 160 |
+
x_mask: torch.Tensor,
|
| 161 |
+
g: Optional[torch.Tensor] = None,
|
| 162 |
+
reverse: bool = False,
|
| 163 |
+
):
|
| 164 |
+
if not reverse:
|
| 165 |
+
for flow in self.flows:
|
| 166 |
+
x, _ = flow(x, x_mask, g=g, reverse=reverse)
|
| 167 |
+
else:
|
| 168 |
+
for flow in reversed(self.flows):
|
| 169 |
+
x = flow.forward(x, x_mask, g=g, reverse=reverse)
|
| 170 |
+
return x
|
| 171 |
+
|
| 172 |
+
def remove_weight_norm(self):
|
| 173 |
+
for i in range(self.n_flows):
|
| 174 |
+
self.flows[i * 2].remove_weight_norm()
|
| 175 |
+
|
| 176 |
+
def __prepare_scriptable__(self):
|
| 177 |
+
for i in range(self.n_flows):
|
| 178 |
+
for hook in self.flows[i * 2]._forward_pre_hooks.values():
|
| 179 |
+
if (
|
| 180 |
+
hook.__module__ == "torch.nn.utils.parametrizations.weight_norm"
|
| 181 |
+
and hook.__class__.__name__ == "WeightNorm"
|
| 182 |
+
):
|
| 183 |
+
torch.nn.utils.remove_weight_norm(self.flows[i * 2])
|
| 184 |
+
|
| 185 |
+
return self
|
| 186 |
+
|
| 187 |
+
|
| 188 |
+
class ResidualCouplingLayer(torch.nn.Module):
|
| 189 |
+
"""
|
| 190 |
+
Residual coupling layer for flow-based models.
|
| 191 |
+
|
| 192 |
+
Args:
|
| 193 |
+
channels (int): Number of channels.
|
| 194 |
+
hidden_channels (int): Number of hidden channels.
|
| 195 |
+
kernel_size (int): Size of the convolutional kernel.
|
| 196 |
+
dilation_rate (int): Dilation rate of the convolution.
|
| 197 |
+
n_layers (int): Number of convolutional layers.
|
| 198 |
+
p_dropout (float, optional): Dropout probability. Defaults to 0.
|
| 199 |
+
gin_channels (int, optional): Number of conditioning channels. Defaults to 0.
|
| 200 |
+
mean_only (bool, optional): Whether to use mean-only coupling. Defaults to False.
|
| 201 |
+
"""
|
| 202 |
+
|
| 203 |
+
def __init__(
|
| 204 |
+
self,
|
| 205 |
+
channels: int,
|
| 206 |
+
hidden_channels: int,
|
| 207 |
+
kernel_size: int,
|
| 208 |
+
dilation_rate: int,
|
| 209 |
+
n_layers: int,
|
| 210 |
+
p_dropout: float = 0,
|
| 211 |
+
gin_channels: int = 0,
|
| 212 |
+
mean_only: bool = False,
|
| 213 |
+
):
|
| 214 |
+
assert channels % 2 == 0, "channels should be divisible by 2"
|
| 215 |
+
super().__init__()
|
| 216 |
+
self.channels = channels
|
| 217 |
+
self.hidden_channels = hidden_channels
|
| 218 |
+
self.kernel_size = kernel_size
|
| 219 |
+
self.dilation_rate = dilation_rate
|
| 220 |
+
self.n_layers = n_layers
|
| 221 |
+
self.half_channels = channels // 2
|
| 222 |
+
self.mean_only = mean_only
|
| 223 |
+
|
| 224 |
+
self.pre = torch.nn.Conv1d(self.half_channels, hidden_channels, 1)
|
| 225 |
+
self.enc = WaveNet(
|
| 226 |
+
hidden_channels,
|
| 227 |
+
kernel_size,
|
| 228 |
+
dilation_rate,
|
| 229 |
+
n_layers,
|
| 230 |
+
p_dropout=p_dropout,
|
| 231 |
+
gin_channels=gin_channels,
|
| 232 |
+
)
|
| 233 |
+
self.post = torch.nn.Conv1d(
|
| 234 |
+
hidden_channels, self.half_channels * (2 - mean_only), 1
|
| 235 |
+
)
|
| 236 |
+
self.post.weight.data.zero_()
|
| 237 |
+
self.post.bias.data.zero_()
|
| 238 |
+
|
| 239 |
+
def forward(
|
| 240 |
+
self,
|
| 241 |
+
x: torch.Tensor,
|
| 242 |
+
x_mask: torch.Tensor,
|
| 243 |
+
g: Optional[torch.Tensor] = None,
|
| 244 |
+
reverse: bool = False,
|
| 245 |
+
):
|
| 246 |
+
x0, x1 = torch.split(x, [self.half_channels] * 2, 1)
|
| 247 |
+
h = self.pre(x0) * x_mask
|
| 248 |
+
h = self.enc(h, x_mask, g=g)
|
| 249 |
+
stats = self.post(h) * x_mask
|
| 250 |
+
if not self.mean_only:
|
| 251 |
+
m, logs = torch.split(stats, [self.half_channels] * 2, 1)
|
| 252 |
+
else:
|
| 253 |
+
m = stats
|
| 254 |
+
logs = torch.zeros_like(m)
|
| 255 |
+
|
| 256 |
+
if not reverse:
|
| 257 |
+
x1 = m + x1 * torch.exp(logs) * x_mask
|
| 258 |
+
x = torch.cat([x0, x1], 1)
|
| 259 |
+
logdet = torch.sum(logs, [1, 2])
|
| 260 |
+
return x, logdet
|
| 261 |
+
else:
|
| 262 |
+
x1 = (x1 - m) * torch.exp(-logs) * x_mask
|
| 263 |
+
x = torch.cat([x0, x1], 1)
|
| 264 |
+
return x
|
| 265 |
+
|
| 266 |
+
def remove_weight_norm(self):
|
| 267 |
+
self.enc.remove_weight_norm()
|
rvc/lib/algorithm/synthesizers.py
ADDED
|
@@ -0,0 +1,244 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
from typing import Optional
|
| 3 |
+
from rvc.lib.algorithm.generators.hifigan_mrf import HiFiGANMRFGenerator
|
| 4 |
+
from rvc.lib.algorithm.generators.hifigan_nsf import HiFiGANNSFGenerator
|
| 5 |
+
from rvc.lib.algorithm.generators.hifigan import HiFiGANGenerator
|
| 6 |
+
from rvc.lib.algorithm.generators.refinegan import RefineGANGenerator
|
| 7 |
+
from rvc.lib.algorithm.commons import slice_segments, rand_slice_segments
|
| 8 |
+
from rvc.lib.algorithm.residuals import ResidualCouplingBlock
|
| 9 |
+
from rvc.lib.algorithm.encoders import TextEncoder, PosteriorEncoder
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
class Synthesizer(torch.nn.Module):
|
| 13 |
+
"""
|
| 14 |
+
Base Synthesizer model.
|
| 15 |
+
|
| 16 |
+
Args:
|
| 17 |
+
spec_channels (int): Number of channels in the spectrogram.
|
| 18 |
+
segment_size (int): Size of the audio segment.
|
| 19 |
+
inter_channels (int): Number of channels in the intermediate layers.
|
| 20 |
+
hidden_channels (int): Number of channels in the hidden layers.
|
| 21 |
+
filter_channels (int): Number of channels in the filter layers.
|
| 22 |
+
n_heads (int): Number of attention heads.
|
| 23 |
+
n_layers (int): Number of layers in the encoder.
|
| 24 |
+
kernel_size (int): Size of the convolution kernel.
|
| 25 |
+
p_dropout (float): Dropout probability.
|
| 26 |
+
resblock (str): Type of residual block.
|
| 27 |
+
resblock_kernel_sizes (list): Kernel sizes for the residual blocks.
|
| 28 |
+
resblock_dilation_sizes (list): Dilation sizes for the residual blocks.
|
| 29 |
+
upsample_rates (list): Upsampling rates for the decoder.
|
| 30 |
+
upsample_initial_channel (int): Number of channels in the initial upsampling layer.
|
| 31 |
+
upsample_kernel_sizes (list): Kernel sizes for the upsampling layers.
|
| 32 |
+
spk_embed_dim (int): Dimension of the speaker embedding.
|
| 33 |
+
gin_channels (int): Number of channels in the global conditioning vector.
|
| 34 |
+
sr (int): Sampling rate of the audio.
|
| 35 |
+
use_f0 (bool): Whether to use F0 information.
|
| 36 |
+
text_enc_hidden_dim (int): Hidden dimension for the text encoder.
|
| 37 |
+
kwargs: Additional keyword arguments.
|
| 38 |
+
"""
|
| 39 |
+
|
| 40 |
+
def __init__(
|
| 41 |
+
self,
|
| 42 |
+
spec_channels: int,
|
| 43 |
+
segment_size: int,
|
| 44 |
+
inter_channels: int,
|
| 45 |
+
hidden_channels: int,
|
| 46 |
+
filter_channels: int,
|
| 47 |
+
n_heads: int,
|
| 48 |
+
n_layers: int,
|
| 49 |
+
kernel_size: int,
|
| 50 |
+
p_dropout: float,
|
| 51 |
+
resblock: str,
|
| 52 |
+
resblock_kernel_sizes: list,
|
| 53 |
+
resblock_dilation_sizes: list,
|
| 54 |
+
upsample_rates: list,
|
| 55 |
+
upsample_initial_channel: int,
|
| 56 |
+
upsample_kernel_sizes: list,
|
| 57 |
+
spk_embed_dim: int,
|
| 58 |
+
gin_channels: int,
|
| 59 |
+
sr: int,
|
| 60 |
+
use_f0: bool,
|
| 61 |
+
text_enc_hidden_dim: int = 768,
|
| 62 |
+
vocoder: str = "HiFi-GAN",
|
| 63 |
+
randomized: bool = True,
|
| 64 |
+
checkpointing: bool = False,
|
| 65 |
+
**kwargs,
|
| 66 |
+
):
|
| 67 |
+
super().__init__()
|
| 68 |
+
self.segment_size = segment_size
|
| 69 |
+
self.use_f0 = use_f0
|
| 70 |
+
self.randomized = randomized
|
| 71 |
+
|
| 72 |
+
self.enc_p = TextEncoder(
|
| 73 |
+
inter_channels,
|
| 74 |
+
hidden_channels,
|
| 75 |
+
filter_channels,
|
| 76 |
+
n_heads,
|
| 77 |
+
n_layers,
|
| 78 |
+
kernel_size,
|
| 79 |
+
p_dropout,
|
| 80 |
+
text_enc_hidden_dim,
|
| 81 |
+
f0=use_f0,
|
| 82 |
+
)
|
| 83 |
+
print(f"Using {vocoder} vocoder")
|
| 84 |
+
if use_f0:
|
| 85 |
+
if vocoder == "MRF HiFi-GAN":
|
| 86 |
+
self.dec = HiFiGANMRFGenerator(
|
| 87 |
+
in_channel=inter_channels,
|
| 88 |
+
upsample_initial_channel=upsample_initial_channel,
|
| 89 |
+
upsample_rates=upsample_rates,
|
| 90 |
+
upsample_kernel_sizes=upsample_kernel_sizes,
|
| 91 |
+
resblock_kernel_sizes=resblock_kernel_sizes,
|
| 92 |
+
resblock_dilations=resblock_dilation_sizes,
|
| 93 |
+
gin_channels=gin_channels,
|
| 94 |
+
sample_rate=sr,
|
| 95 |
+
harmonic_num=8,
|
| 96 |
+
checkpointing=checkpointing,
|
| 97 |
+
)
|
| 98 |
+
elif vocoder == "RefineGAN":
|
| 99 |
+
self.dec = RefineGANGenerator(
|
| 100 |
+
sample_rate=sr,
|
| 101 |
+
downsample_rates=upsample_rates[::-1],
|
| 102 |
+
upsample_rates=upsample_rates,
|
| 103 |
+
start_channels=16,
|
| 104 |
+
num_mels=inter_channels,
|
| 105 |
+
checkpointing=checkpointing,
|
| 106 |
+
)
|
| 107 |
+
else:
|
| 108 |
+
self.dec = HiFiGANNSFGenerator(
|
| 109 |
+
inter_channels,
|
| 110 |
+
resblock_kernel_sizes,
|
| 111 |
+
resblock_dilation_sizes,
|
| 112 |
+
upsample_rates,
|
| 113 |
+
upsample_initial_channel,
|
| 114 |
+
upsample_kernel_sizes,
|
| 115 |
+
gin_channels=gin_channels,
|
| 116 |
+
sr=sr,
|
| 117 |
+
checkpointing=checkpointing,
|
| 118 |
+
)
|
| 119 |
+
else:
|
| 120 |
+
if vocoder == "MRF HiFi-GAN":
|
| 121 |
+
print("MRF HiFi-GAN does not support training without pitch guidance.")
|
| 122 |
+
self.dec = None
|
| 123 |
+
elif vocoder == "RefineGAN":
|
| 124 |
+
print("RefineGAN does not support training without pitch guidance.")
|
| 125 |
+
self.dec = None
|
| 126 |
+
else:
|
| 127 |
+
self.dec = HiFiGANGenerator(
|
| 128 |
+
inter_channels,
|
| 129 |
+
resblock_kernel_sizes,
|
| 130 |
+
resblock_dilation_sizes,
|
| 131 |
+
upsample_rates,
|
| 132 |
+
upsample_initial_channel,
|
| 133 |
+
upsample_kernel_sizes,
|
| 134 |
+
gin_channels=gin_channels,
|
| 135 |
+
checkpointing=checkpointing,
|
| 136 |
+
)
|
| 137 |
+
self.enc_q = PosteriorEncoder(
|
| 138 |
+
spec_channels,
|
| 139 |
+
inter_channels,
|
| 140 |
+
hidden_channels,
|
| 141 |
+
5,
|
| 142 |
+
1,
|
| 143 |
+
16,
|
| 144 |
+
gin_channels=gin_channels,
|
| 145 |
+
)
|
| 146 |
+
self.flow = ResidualCouplingBlock(
|
| 147 |
+
inter_channels,
|
| 148 |
+
hidden_channels,
|
| 149 |
+
5,
|
| 150 |
+
1,
|
| 151 |
+
3,
|
| 152 |
+
gin_channels=gin_channels,
|
| 153 |
+
)
|
| 154 |
+
self.emb_g = torch.nn.Embedding(spk_embed_dim, gin_channels)
|
| 155 |
+
|
| 156 |
+
def _remove_weight_norm_from(self, module):
|
| 157 |
+
for hook in module._forward_pre_hooks.values():
|
| 158 |
+
if getattr(hook, "__class__", None).__name__ == "WeightNorm":
|
| 159 |
+
torch.nn.utils.remove_weight_norm(module)
|
| 160 |
+
|
| 161 |
+
def remove_weight_norm(self):
|
| 162 |
+
for module in [self.dec, self.flow, self.enc_q]:
|
| 163 |
+
self._remove_weight_norm_from(module)
|
| 164 |
+
|
| 165 |
+
def __prepare_scriptable__(self):
|
| 166 |
+
self.remove_weight_norm()
|
| 167 |
+
return self
|
| 168 |
+
|
| 169 |
+
def forward(
|
| 170 |
+
self,
|
| 171 |
+
phone: torch.Tensor,
|
| 172 |
+
phone_lengths: torch.Tensor,
|
| 173 |
+
pitch: Optional[torch.Tensor] = None,
|
| 174 |
+
pitchf: Optional[torch.Tensor] = None,
|
| 175 |
+
y: Optional[torch.Tensor] = None,
|
| 176 |
+
y_lengths: Optional[torch.Tensor] = None,
|
| 177 |
+
ds: Optional[torch.Tensor] = None,
|
| 178 |
+
):
|
| 179 |
+
g = self.emb_g(ds).unsqueeze(-1)
|
| 180 |
+
m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)
|
| 181 |
+
|
| 182 |
+
if y is not None:
|
| 183 |
+
z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)
|
| 184 |
+
z_p = self.flow(z, y_mask, g=g)
|
| 185 |
+
# regular old training method using random slices
|
| 186 |
+
if self.randomized:
|
| 187 |
+
z_slice, ids_slice = rand_slice_segments(
|
| 188 |
+
z, y_lengths, self.segment_size
|
| 189 |
+
)
|
| 190 |
+
if self.use_f0:
|
| 191 |
+
pitchf = slice_segments(pitchf, ids_slice, self.segment_size, 2)
|
| 192 |
+
o = self.dec(z_slice, pitchf, g=g)
|
| 193 |
+
else:
|
| 194 |
+
o = self.dec(z_slice, g=g)
|
| 195 |
+
return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)
|
| 196 |
+
# future use for finetuning using the entire dataset each pass
|
| 197 |
+
else:
|
| 198 |
+
if self.use_f0:
|
| 199 |
+
o = self.dec(z, pitchf, g=g)
|
| 200 |
+
else:
|
| 201 |
+
o = self.dec(z, g=g)
|
| 202 |
+
return o, None, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)
|
| 203 |
+
else:
|
| 204 |
+
return None, None, x_mask, None, (None, None, m_p, logs_p, None, None)
|
| 205 |
+
|
| 206 |
+
@torch.jit.export
|
| 207 |
+
def infer(
|
| 208 |
+
self,
|
| 209 |
+
phone: torch.Tensor,
|
| 210 |
+
phone_lengths: torch.Tensor,
|
| 211 |
+
pitch: Optional[torch.Tensor] = None,
|
| 212 |
+
nsff0: Optional[torch.Tensor] = None,
|
| 213 |
+
sid: torch.Tensor = None,
|
| 214 |
+
rate: Optional[torch.Tensor] = None,
|
| 215 |
+
):
|
| 216 |
+
"""
|
| 217 |
+
Inference of the model.
|
| 218 |
+
|
| 219 |
+
Args:
|
| 220 |
+
phone (torch.Tensor): Phoneme sequence.
|
| 221 |
+
phone_lengths (torch.Tensor): Lengths of the phoneme sequences.
|
| 222 |
+
pitch (torch.Tensor, optional): Pitch sequence.
|
| 223 |
+
nsff0 (torch.Tensor, optional): Fine-grained pitch sequence.
|
| 224 |
+
sid (torch.Tensor): Speaker embedding.
|
| 225 |
+
rate (torch.Tensor, optional): Rate for time-stretching.
|
| 226 |
+
"""
|
| 227 |
+
g = self.emb_g(sid).unsqueeze(-1)
|
| 228 |
+
m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)
|
| 229 |
+
z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask
|
| 230 |
+
|
| 231 |
+
if rate is not None:
|
| 232 |
+
head = int(z_p.shape[2] * (1.0 - rate.item()))
|
| 233 |
+
z_p, x_mask = z_p[:, :, head:], x_mask[:, :, head:]
|
| 234 |
+
if self.use_f0 and nsff0 is not None:
|
| 235 |
+
nsff0 = nsff0[:, head:]
|
| 236 |
+
|
| 237 |
+
z = self.flow(z_p, x_mask, g=g, reverse=True)
|
| 238 |
+
o = (
|
| 239 |
+
self.dec(z * x_mask, nsff0, g=g)
|
| 240 |
+
if self.use_f0
|
| 241 |
+
else self.dec(z * x_mask, g=g)
|
| 242 |
+
)
|
| 243 |
+
|
| 244 |
+
return o, x_mask, (z, z_p, m_p, logs_p)
|
rvc/lib/predictors/F0Extractor.py
ADDED
|
@@ -0,0 +1,99 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import dataclasses
|
| 2 |
+
import pathlib
|
| 3 |
+
import libf0
|
| 4 |
+
import librosa
|
| 5 |
+
import numpy as np
|
| 6 |
+
import resampy
|
| 7 |
+
import torch
|
| 8 |
+
import torchcrepe
|
| 9 |
+
import torchfcpe
|
| 10 |
+
import os
|
| 11 |
+
|
| 12 |
+
# from tools.anyf0.rmvpe import RMVPE
|
| 13 |
+
from rvc.lib.predictors.RMVPE import RMVPE0Predictor
|
| 14 |
+
from rvc.configs.config import Config
|
| 15 |
+
|
| 16 |
+
config = Config()
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
@dataclasses.dataclass
|
| 20 |
+
class F0Extractor:
|
| 21 |
+
wav_path: pathlib.Path
|
| 22 |
+
sample_rate: int = 44100
|
| 23 |
+
hop_length: int = 512
|
| 24 |
+
f0_min: int = 50
|
| 25 |
+
f0_max: int = 1600
|
| 26 |
+
method: str = "rmvpe"
|
| 27 |
+
x: np.ndarray = dataclasses.field(init=False)
|
| 28 |
+
|
| 29 |
+
def __post_init__(self):
|
| 30 |
+
self.x, self.sample_rate = librosa.load(self.wav_path, sr=self.sample_rate)
|
| 31 |
+
|
| 32 |
+
@property
|
| 33 |
+
def hop_size(self):
|
| 34 |
+
return self.hop_length / self.sample_rate
|
| 35 |
+
|
| 36 |
+
@property
|
| 37 |
+
def wav16k(self):
|
| 38 |
+
return resampy.resample(self.x, self.sample_rate, 16000)
|
| 39 |
+
|
| 40 |
+
def extract_f0(self):
|
| 41 |
+
f0 = None
|
| 42 |
+
method = self.method
|
| 43 |
+
if method == "crepe":
|
| 44 |
+
wav16k_torch = torch.FloatTensor(self.wav16k).unsqueeze(0).to(config.device)
|
| 45 |
+
f0 = torchcrepe.predict(
|
| 46 |
+
wav16k_torch,
|
| 47 |
+
sample_rate=16000,
|
| 48 |
+
hop_length=160,
|
| 49 |
+
batch_size=512,
|
| 50 |
+
fmin=self.f0_min,
|
| 51 |
+
fmax=self.f0_max,
|
| 52 |
+
device=config.device,
|
| 53 |
+
)
|
| 54 |
+
f0 = f0[0].cpu().numpy()
|
| 55 |
+
elif method == "fcpe":
|
| 56 |
+
audio = librosa.to_mono(self.x)
|
| 57 |
+
audio_length = len(audio)
|
| 58 |
+
f0_target_length = (audio_length // self.hop_length) + 1
|
| 59 |
+
audio = (
|
| 60 |
+
torch.from_numpy(audio)
|
| 61 |
+
.float()
|
| 62 |
+
.unsqueeze(0)
|
| 63 |
+
.unsqueeze(-1)
|
| 64 |
+
.to(config.device)
|
| 65 |
+
)
|
| 66 |
+
model = torchfcpe.spawn_bundled_infer_model(device=config.device)
|
| 67 |
+
|
| 68 |
+
f0 = model.infer(
|
| 69 |
+
audio,
|
| 70 |
+
sr=self.sample_rate,
|
| 71 |
+
decoder_mode="local_argmax",
|
| 72 |
+
threshold=0.006,
|
| 73 |
+
f0_min=self.f0_min,
|
| 74 |
+
f0_max=self.f0_max,
|
| 75 |
+
interp_uv=False,
|
| 76 |
+
output_interp_target_length=f0_target_length,
|
| 77 |
+
)
|
| 78 |
+
f0 = f0.squeeze().cpu().numpy()
|
| 79 |
+
elif method == "rmvpe":
|
| 80 |
+
model_rmvpe = RMVPE0Predictor(
|
| 81 |
+
os.path.join("rvc", "models", "predictors", "rmvpe.pt"),
|
| 82 |
+
device=config.device,
|
| 83 |
+
# hop_length=80
|
| 84 |
+
)
|
| 85 |
+
f0 = model_rmvpe.infer_from_audio(self.wav16k, thred=0.03)
|
| 86 |
+
|
| 87 |
+
else:
|
| 88 |
+
raise ValueError(f"Unknown method: {self.method}")
|
| 89 |
+
return libf0.hz_to_cents(f0, librosa.midi_to_hz(0))
|
| 90 |
+
|
| 91 |
+
def plot_f0(self, f0):
|
| 92 |
+
from matplotlib import pyplot as plt
|
| 93 |
+
|
| 94 |
+
plt.figure(figsize=(10, 4))
|
| 95 |
+
plt.plot(f0)
|
| 96 |
+
plt.title(self.method)
|
| 97 |
+
plt.xlabel("Time (frames)")
|
| 98 |
+
plt.ylabel("F0 (cents)")
|
| 99 |
+
plt.show()
|
rvc/lib/predictors/FCPE.py
ADDED
|
@@ -0,0 +1,918 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Union
|
| 2 |
+
|
| 3 |
+
import torch.nn.functional as F
|
| 4 |
+
import numpy as np
|
| 5 |
+
import torch
|
| 6 |
+
import torch.nn as nn
|
| 7 |
+
from torch.nn.utils.parametrizations import weight_norm
|
| 8 |
+
from torchaudio.transforms import Resample
|
| 9 |
+
import os
|
| 10 |
+
import librosa
|
| 11 |
+
import soundfile as sf
|
| 12 |
+
import torch.utils.data
|
| 13 |
+
from librosa.filters import mel as librosa_mel_fn
|
| 14 |
+
import math
|
| 15 |
+
from functools import partial
|
| 16 |
+
|
| 17 |
+
from einops import rearrange, repeat
|
| 18 |
+
from local_attention import LocalAttention
|
| 19 |
+
from torch import nn
|
| 20 |
+
|
| 21 |
+
os.environ["LRU_CACHE_CAPACITY"] = "3"
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
def load_wav_to_torch(full_path, target_sr=None, return_empty_on_exception=False):
|
| 25 |
+
"""Loads wav file to torch tensor."""
|
| 26 |
+
try:
|
| 27 |
+
data, sample_rate = sf.read(full_path, always_2d=True)
|
| 28 |
+
except Exception as error:
|
| 29 |
+
print(f"An error occurred loading {full_path}: {error}")
|
| 30 |
+
if return_empty_on_exception:
|
| 31 |
+
return [], sample_rate or target_sr or 48000
|
| 32 |
+
else:
|
| 33 |
+
raise
|
| 34 |
+
|
| 35 |
+
data = data[:, 0] if len(data.shape) > 1 else data
|
| 36 |
+
assert len(data) > 2
|
| 37 |
+
|
| 38 |
+
# Normalize data
|
| 39 |
+
max_mag = (
|
| 40 |
+
-np.iinfo(data.dtype).min
|
| 41 |
+
if np.issubdtype(data.dtype, np.integer)
|
| 42 |
+
else max(np.amax(data), -np.amin(data))
|
| 43 |
+
)
|
| 44 |
+
max_mag = (
|
| 45 |
+
(2**31) + 1 if max_mag > (2**15) else ((2**15) + 1 if max_mag > 1.01 else 1.0)
|
| 46 |
+
)
|
| 47 |
+
data = torch.FloatTensor(data.astype(np.float32)) / max_mag
|
| 48 |
+
|
| 49 |
+
# Handle exceptions and resample
|
| 50 |
+
if (torch.isinf(data) | torch.isnan(data)).any() and return_empty_on_exception:
|
| 51 |
+
return [], sample_rate or target_sr or 48000
|
| 52 |
+
if target_sr is not None and sample_rate != target_sr:
|
| 53 |
+
data = torch.from_numpy(
|
| 54 |
+
librosa.core.resample(
|
| 55 |
+
data.numpy(), orig_sr=sample_rate, target_sr=target_sr
|
| 56 |
+
)
|
| 57 |
+
)
|
| 58 |
+
sample_rate = target_sr
|
| 59 |
+
|
| 60 |
+
return data, sample_rate
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
def dynamic_range_compression(x, C=1, clip_val=1e-5):
|
| 64 |
+
return np.log(np.clip(x, a_min=clip_val, a_max=None) * C)
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
def dynamic_range_decompression(x, C=1):
|
| 68 |
+
return np.exp(x) / C
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
def dynamic_range_compression_torch(x, C=1, clip_val=1e-5):
|
| 72 |
+
return torch.log(torch.clamp(x, min=clip_val) * C)
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
def dynamic_range_decompression_torch(x, C=1):
|
| 76 |
+
return torch.exp(x) / C
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
class STFT:
|
| 80 |
+
def __init__(
|
| 81 |
+
self,
|
| 82 |
+
sr=22050,
|
| 83 |
+
n_mels=80,
|
| 84 |
+
n_fft=1024,
|
| 85 |
+
win_size=1024,
|
| 86 |
+
hop_length=256,
|
| 87 |
+
fmin=20,
|
| 88 |
+
fmax=11025,
|
| 89 |
+
clip_val=1e-5,
|
| 90 |
+
):
|
| 91 |
+
self.target_sr = sr
|
| 92 |
+
self.n_mels = n_mels
|
| 93 |
+
self.n_fft = n_fft
|
| 94 |
+
self.win_size = win_size
|
| 95 |
+
self.hop_length = hop_length
|
| 96 |
+
self.fmin = fmin
|
| 97 |
+
self.fmax = fmax
|
| 98 |
+
self.clip_val = clip_val
|
| 99 |
+
self.mel_basis = {}
|
| 100 |
+
self.hann_window = {}
|
| 101 |
+
|
| 102 |
+
def get_mel(self, y, keyshift=0, speed=1, center=False, train=False):
|
| 103 |
+
sample_rate = self.target_sr
|
| 104 |
+
n_mels = self.n_mels
|
| 105 |
+
n_fft = self.n_fft
|
| 106 |
+
win_size = self.win_size
|
| 107 |
+
hop_length = self.hop_length
|
| 108 |
+
fmin = self.fmin
|
| 109 |
+
fmax = self.fmax
|
| 110 |
+
clip_val = self.clip_val
|
| 111 |
+
|
| 112 |
+
factor = 2 ** (keyshift / 12)
|
| 113 |
+
n_fft_new = int(np.round(n_fft * factor))
|
| 114 |
+
win_size_new = int(np.round(win_size * factor))
|
| 115 |
+
hop_length_new = int(np.round(hop_length * speed))
|
| 116 |
+
|
| 117 |
+
# Optimize mel_basis and hann_window caching
|
| 118 |
+
mel_basis = self.mel_basis if not train else {}
|
| 119 |
+
hann_window = self.hann_window if not train else {}
|
| 120 |
+
|
| 121 |
+
mel_basis_key = str(fmax) + "_" + str(y.device)
|
| 122 |
+
if mel_basis_key not in mel_basis:
|
| 123 |
+
mel = librosa_mel_fn(
|
| 124 |
+
sr=sample_rate, n_fft=n_fft, n_mels=n_mels, fmin=fmin, fmax=fmax
|
| 125 |
+
)
|
| 126 |
+
mel_basis[mel_basis_key] = torch.from_numpy(mel).float().to(y.device)
|
| 127 |
+
|
| 128 |
+
keyshift_key = str(keyshift) + "_" + str(y.device)
|
| 129 |
+
if keyshift_key not in hann_window:
|
| 130 |
+
hann_window[keyshift_key] = torch.hann_window(win_size_new).to(y.device)
|
| 131 |
+
|
| 132 |
+
# Padding and STFT
|
| 133 |
+
pad_left = (win_size_new - hop_length_new) // 2
|
| 134 |
+
pad_right = max(
|
| 135 |
+
(win_size_new - hop_length_new + 1) // 2,
|
| 136 |
+
win_size_new - y.size(-1) - pad_left,
|
| 137 |
+
)
|
| 138 |
+
mode = "reflect" if pad_right < y.size(-1) else "constant"
|
| 139 |
+
y = torch.nn.functional.pad(y.unsqueeze(1), (pad_left, pad_right), mode=mode)
|
| 140 |
+
y = y.squeeze(1)
|
| 141 |
+
|
| 142 |
+
spec = torch.stft(
|
| 143 |
+
y,
|
| 144 |
+
n_fft=n_fft_new,
|
| 145 |
+
hop_length=hop_length_new,
|
| 146 |
+
win_length=win_size_new,
|
| 147 |
+
window=hann_window[keyshift_key],
|
| 148 |
+
center=center,
|
| 149 |
+
pad_mode="reflect",
|
| 150 |
+
normalized=False,
|
| 151 |
+
onesided=True,
|
| 152 |
+
return_complex=True,
|
| 153 |
+
)
|
| 154 |
+
spec = torch.sqrt(spec.real.pow(2) + spec.imag.pow(2) + (1e-9))
|
| 155 |
+
|
| 156 |
+
# Handle keyshift and mel conversion
|
| 157 |
+
if keyshift != 0:
|
| 158 |
+
size = n_fft // 2 + 1
|
| 159 |
+
resize = spec.size(1)
|
| 160 |
+
spec = (
|
| 161 |
+
F.pad(spec, (0, 0, 0, size - resize))
|
| 162 |
+
if resize < size
|
| 163 |
+
else spec[:, :size, :]
|
| 164 |
+
)
|
| 165 |
+
spec = spec * win_size / win_size_new
|
| 166 |
+
spec = torch.matmul(mel_basis[mel_basis_key], spec)
|
| 167 |
+
spec = dynamic_range_compression_torch(spec, clip_val=clip_val)
|
| 168 |
+
return spec
|
| 169 |
+
|
| 170 |
+
def __call__(self, audiopath):
|
| 171 |
+
audio, sr = load_wav_to_torch(audiopath, target_sr=self.target_sr)
|
| 172 |
+
spect = self.get_mel(audio.unsqueeze(0)).squeeze(0)
|
| 173 |
+
return spect
|
| 174 |
+
|
| 175 |
+
|
| 176 |
+
stft = STFT()
|
| 177 |
+
|
| 178 |
+
|
| 179 |
+
def softmax_kernel(
|
| 180 |
+
data, *, projection_matrix, is_query, normalize_data=True, eps=1e-4, device=None
|
| 181 |
+
):
|
| 182 |
+
b, h, *_ = data.shape
|
| 183 |
+
|
| 184 |
+
# Normalize data
|
| 185 |
+
data_normalizer = (data.shape[-1] ** -0.25) if normalize_data else 1.0
|
| 186 |
+
|
| 187 |
+
# Project data
|
| 188 |
+
ratio = projection_matrix.shape[0] ** -0.5
|
| 189 |
+
projection = repeat(projection_matrix, "j d -> b h j d", b=b, h=h)
|
| 190 |
+
projection = projection.type_as(data)
|
| 191 |
+
data_dash = torch.einsum("...id,...jd->...ij", (data_normalizer * data), projection)
|
| 192 |
+
|
| 193 |
+
# Calculate diagonal data
|
| 194 |
+
diag_data = data**2
|
| 195 |
+
diag_data = torch.sum(diag_data, dim=-1)
|
| 196 |
+
diag_data = (diag_data / 2.0) * (data_normalizer**2)
|
| 197 |
+
diag_data = diag_data.unsqueeze(dim=-1)
|
| 198 |
+
|
| 199 |
+
# Apply softmax
|
| 200 |
+
if is_query:
|
| 201 |
+
data_dash = ratio * (
|
| 202 |
+
torch.exp(
|
| 203 |
+
data_dash
|
| 204 |
+
- diag_data
|
| 205 |
+
- torch.max(data_dash, dim=-1, keepdim=True).values
|
| 206 |
+
)
|
| 207 |
+
+ eps
|
| 208 |
+
)
|
| 209 |
+
else:
|
| 210 |
+
data_dash = ratio * (torch.exp(data_dash - diag_data + eps))
|
| 211 |
+
|
| 212 |
+
return data_dash.type_as(data)
|
| 213 |
+
|
| 214 |
+
|
| 215 |
+
def orthogonal_matrix_chunk(cols, qr_uniform_q=False, device=None):
|
| 216 |
+
unstructured_block = torch.randn((cols, cols), device=device)
|
| 217 |
+
q, r = torch.linalg.qr(unstructured_block.cpu(), mode="reduced")
|
| 218 |
+
q, r = map(lambda t: t.to(device), (q, r))
|
| 219 |
+
|
| 220 |
+
if qr_uniform_q:
|
| 221 |
+
d = torch.diag(r, 0)
|
| 222 |
+
q *= d.sign()
|
| 223 |
+
return q.t()
|
| 224 |
+
|
| 225 |
+
|
| 226 |
+
def exists(val):
|
| 227 |
+
return val is not None
|
| 228 |
+
|
| 229 |
+
|
| 230 |
+
def empty(tensor):
|
| 231 |
+
return tensor.numel() == 0
|
| 232 |
+
|
| 233 |
+
|
| 234 |
+
def default(val, d):
|
| 235 |
+
return val if exists(val) else d
|
| 236 |
+
|
| 237 |
+
|
| 238 |
+
def cast_tuple(val):
|
| 239 |
+
return (val,) if not isinstance(val, tuple) else val
|
| 240 |
+
|
| 241 |
+
|
| 242 |
+
class PCmer(nn.Module):
|
| 243 |
+
def __init__(
|
| 244 |
+
self,
|
| 245 |
+
num_layers,
|
| 246 |
+
num_heads,
|
| 247 |
+
dim_model,
|
| 248 |
+
dim_keys,
|
| 249 |
+
dim_values,
|
| 250 |
+
residual_dropout,
|
| 251 |
+
attention_dropout,
|
| 252 |
+
):
|
| 253 |
+
super().__init__()
|
| 254 |
+
self.num_layers = num_layers
|
| 255 |
+
self.num_heads = num_heads
|
| 256 |
+
self.dim_model = dim_model
|
| 257 |
+
self.dim_values = dim_values
|
| 258 |
+
self.dim_keys = dim_keys
|
| 259 |
+
self.residual_dropout = residual_dropout
|
| 260 |
+
self.attention_dropout = attention_dropout
|
| 261 |
+
|
| 262 |
+
self._layers = nn.ModuleList([_EncoderLayer(self) for _ in range(num_layers)])
|
| 263 |
+
|
| 264 |
+
def forward(self, phone, mask=None):
|
| 265 |
+
for layer in self._layers:
|
| 266 |
+
phone = layer(phone, mask)
|
| 267 |
+
return phone
|
| 268 |
+
|
| 269 |
+
|
| 270 |
+
class _EncoderLayer(nn.Module):
|
| 271 |
+
def __init__(self, parent: PCmer):
|
| 272 |
+
super().__init__()
|
| 273 |
+
self.conformer = ConformerConvModule(parent.dim_model)
|
| 274 |
+
self.norm = nn.LayerNorm(parent.dim_model)
|
| 275 |
+
self.dropout = nn.Dropout(parent.residual_dropout)
|
| 276 |
+
self.attn = SelfAttention(
|
| 277 |
+
dim=parent.dim_model, heads=parent.num_heads, causal=False
|
| 278 |
+
)
|
| 279 |
+
|
| 280 |
+
def forward(self, phone, mask=None):
|
| 281 |
+
phone = phone + (self.attn(self.norm(phone), mask=mask))
|
| 282 |
+
phone = phone + (self.conformer(phone))
|
| 283 |
+
return phone
|
| 284 |
+
|
| 285 |
+
|
| 286 |
+
def calc_same_padding(kernel_size):
|
| 287 |
+
pad = kernel_size // 2
|
| 288 |
+
return (pad, pad - (kernel_size + 1) % 2)
|
| 289 |
+
|
| 290 |
+
|
| 291 |
+
class Swish(nn.Module):
|
| 292 |
+
def forward(self, x):
|
| 293 |
+
return x * x.sigmoid()
|
| 294 |
+
|
| 295 |
+
|
| 296 |
+
class Transpose(nn.Module):
|
| 297 |
+
def __init__(self, dims):
|
| 298 |
+
super().__init__()
|
| 299 |
+
assert len(dims) == 2, "dims must be a tuple of two dimensions"
|
| 300 |
+
self.dims = dims
|
| 301 |
+
|
| 302 |
+
def forward(self, x):
|
| 303 |
+
return x.transpose(*self.dims)
|
| 304 |
+
|
| 305 |
+
|
| 306 |
+
class GLU(nn.Module):
|
| 307 |
+
def __init__(self, dim):
|
| 308 |
+
super().__init__()
|
| 309 |
+
self.dim = dim
|
| 310 |
+
|
| 311 |
+
def forward(self, x):
|
| 312 |
+
out, gate = x.chunk(2, dim=self.dim)
|
| 313 |
+
return out * gate.sigmoid()
|
| 314 |
+
|
| 315 |
+
|
| 316 |
+
class DepthWiseConv1d(nn.Module):
|
| 317 |
+
def __init__(self, chan_in, chan_out, kernel_size, padding):
|
| 318 |
+
super().__init__()
|
| 319 |
+
self.padding = padding
|
| 320 |
+
self.conv = nn.Conv1d(chan_in, chan_out, kernel_size, groups=chan_in)
|
| 321 |
+
|
| 322 |
+
def forward(self, x):
|
| 323 |
+
x = F.pad(x, self.padding)
|
| 324 |
+
return self.conv(x)
|
| 325 |
+
|
| 326 |
+
|
| 327 |
+
class ConformerConvModule(nn.Module):
|
| 328 |
+
def __init__(
|
| 329 |
+
self, dim, causal=False, expansion_factor=2, kernel_size=31, dropout=0.0
|
| 330 |
+
):
|
| 331 |
+
super().__init__()
|
| 332 |
+
|
| 333 |
+
inner_dim = dim * expansion_factor
|
| 334 |
+
padding = calc_same_padding(kernel_size) if not causal else (kernel_size - 1, 0)
|
| 335 |
+
|
| 336 |
+
self.net = nn.Sequential(
|
| 337 |
+
nn.LayerNorm(dim),
|
| 338 |
+
Transpose((1, 2)),
|
| 339 |
+
nn.Conv1d(dim, inner_dim * 2, 1),
|
| 340 |
+
GLU(dim=1),
|
| 341 |
+
DepthWiseConv1d(
|
| 342 |
+
inner_dim, inner_dim, kernel_size=kernel_size, padding=padding
|
| 343 |
+
),
|
| 344 |
+
Swish(),
|
| 345 |
+
nn.Conv1d(inner_dim, dim, 1),
|
| 346 |
+
Transpose((1, 2)),
|
| 347 |
+
nn.Dropout(dropout),
|
| 348 |
+
)
|
| 349 |
+
|
| 350 |
+
def forward(self, x):
|
| 351 |
+
return self.net(x)
|
| 352 |
+
|
| 353 |
+
|
| 354 |
+
def linear_attention(q, k, v):
|
| 355 |
+
if v is None:
|
| 356 |
+
out = torch.einsum("...ed,...nd->...ne", k, q)
|
| 357 |
+
return out
|
| 358 |
+
else:
|
| 359 |
+
k_cumsum = k.sum(dim=-2)
|
| 360 |
+
D_inv = 1.0 / (torch.einsum("...nd,...d->...n", q, k_cumsum.type_as(q)) + 1e-8)
|
| 361 |
+
context = torch.einsum("...nd,...ne->...de", k, v)
|
| 362 |
+
out = torch.einsum("...de,...nd,...n->...ne", context, q, D_inv)
|
| 363 |
+
return out
|
| 364 |
+
|
| 365 |
+
|
| 366 |
+
def gaussian_orthogonal_random_matrix(
|
| 367 |
+
nb_rows, nb_columns, scaling=0, qr_uniform_q=False, device=None
|
| 368 |
+
):
|
| 369 |
+
nb_full_blocks = int(nb_rows / nb_columns)
|
| 370 |
+
block_list = []
|
| 371 |
+
|
| 372 |
+
for _ in range(nb_full_blocks):
|
| 373 |
+
q = orthogonal_matrix_chunk(
|
| 374 |
+
nb_columns, qr_uniform_q=qr_uniform_q, device=device
|
| 375 |
+
)
|
| 376 |
+
block_list.append(q)
|
| 377 |
+
|
| 378 |
+
remaining_rows = nb_rows - nb_full_blocks * nb_columns
|
| 379 |
+
if remaining_rows > 0:
|
| 380 |
+
q = orthogonal_matrix_chunk(
|
| 381 |
+
nb_columns, qr_uniform_q=qr_uniform_q, device=device
|
| 382 |
+
)
|
| 383 |
+
block_list.append(q[:remaining_rows])
|
| 384 |
+
|
| 385 |
+
final_matrix = torch.cat(block_list)
|
| 386 |
+
|
| 387 |
+
if scaling == 0:
|
| 388 |
+
multiplier = torch.randn((nb_rows, nb_columns), device=device).norm(dim=1)
|
| 389 |
+
elif scaling == 1:
|
| 390 |
+
multiplier = math.sqrt((float(nb_columns))) * torch.ones(
|
| 391 |
+
(nb_rows,), device=device
|
| 392 |
+
)
|
| 393 |
+
else:
|
| 394 |
+
raise ValueError(f"Invalid scaling {scaling}")
|
| 395 |
+
|
| 396 |
+
return torch.diag(multiplier) @ final_matrix
|
| 397 |
+
|
| 398 |
+
|
| 399 |
+
class FastAttention(nn.Module):
|
| 400 |
+
def __init__(
|
| 401 |
+
self,
|
| 402 |
+
dim_heads,
|
| 403 |
+
nb_features=None,
|
| 404 |
+
ortho_scaling=0,
|
| 405 |
+
causal=False,
|
| 406 |
+
generalized_attention=False,
|
| 407 |
+
kernel_fn=nn.ReLU(),
|
| 408 |
+
qr_uniform_q=False,
|
| 409 |
+
no_projection=False,
|
| 410 |
+
):
|
| 411 |
+
super().__init__()
|
| 412 |
+
nb_features = default(nb_features, int(dim_heads * math.log(dim_heads)))
|
| 413 |
+
|
| 414 |
+
self.dim_heads = dim_heads
|
| 415 |
+
self.nb_features = nb_features
|
| 416 |
+
self.ortho_scaling = ortho_scaling
|
| 417 |
+
|
| 418 |
+
self.create_projection = partial(
|
| 419 |
+
gaussian_orthogonal_random_matrix,
|
| 420 |
+
nb_rows=self.nb_features,
|
| 421 |
+
nb_columns=dim_heads,
|
| 422 |
+
scaling=ortho_scaling,
|
| 423 |
+
qr_uniform_q=qr_uniform_q,
|
| 424 |
+
)
|
| 425 |
+
projection_matrix = self.create_projection()
|
| 426 |
+
self.register_buffer("projection_matrix", projection_matrix)
|
| 427 |
+
|
| 428 |
+
self.generalized_attention = generalized_attention
|
| 429 |
+
self.kernel_fn = kernel_fn
|
| 430 |
+
self.no_projection = no_projection
|
| 431 |
+
self.causal = causal
|
| 432 |
+
|
| 433 |
+
@torch.no_grad()
|
| 434 |
+
def redraw_projection_matrix(self):
|
| 435 |
+
projections = self.create_projection()
|
| 436 |
+
self.projection_matrix.copy_(projections)
|
| 437 |
+
del projections
|
| 438 |
+
|
| 439 |
+
def forward(self, q, k, v):
|
| 440 |
+
device = q.device
|
| 441 |
+
|
| 442 |
+
if self.no_projection:
|
| 443 |
+
q = q.softmax(dim=-1)
|
| 444 |
+
k = torch.exp(k) if self.causal else k.softmax(dim=-2)
|
| 445 |
+
else:
|
| 446 |
+
create_kernel = partial(
|
| 447 |
+
softmax_kernel, projection_matrix=self.projection_matrix, device=device
|
| 448 |
+
)
|
| 449 |
+
q = create_kernel(q, is_query=True)
|
| 450 |
+
k = create_kernel(k, is_query=False)
|
| 451 |
+
|
| 452 |
+
attn_fn = linear_attention if not self.causal else self.causal_linear_fn
|
| 453 |
+
|
| 454 |
+
if v is None:
|
| 455 |
+
out = attn_fn(q, k, None)
|
| 456 |
+
return out
|
| 457 |
+
else:
|
| 458 |
+
out = attn_fn(q, k, v)
|
| 459 |
+
return out
|
| 460 |
+
|
| 461 |
+
|
| 462 |
+
class SelfAttention(nn.Module):
|
| 463 |
+
def __init__(
|
| 464 |
+
self,
|
| 465 |
+
dim,
|
| 466 |
+
causal=False,
|
| 467 |
+
heads=8,
|
| 468 |
+
dim_head=64,
|
| 469 |
+
local_heads=0,
|
| 470 |
+
local_window_size=256,
|
| 471 |
+
nb_features=None,
|
| 472 |
+
feature_redraw_interval=1000,
|
| 473 |
+
generalized_attention=False,
|
| 474 |
+
kernel_fn=nn.ReLU(),
|
| 475 |
+
qr_uniform_q=False,
|
| 476 |
+
dropout=0.0,
|
| 477 |
+
no_projection=False,
|
| 478 |
+
):
|
| 479 |
+
super().__init__()
|
| 480 |
+
assert dim % heads == 0, "dimension must be divisible by number of heads"
|
| 481 |
+
dim_head = default(dim_head, dim // heads)
|
| 482 |
+
inner_dim = dim_head * heads
|
| 483 |
+
self.fast_attention = FastAttention(
|
| 484 |
+
dim_head,
|
| 485 |
+
nb_features,
|
| 486 |
+
causal=causal,
|
| 487 |
+
generalized_attention=generalized_attention,
|
| 488 |
+
kernel_fn=kernel_fn,
|
| 489 |
+
qr_uniform_q=qr_uniform_q,
|
| 490 |
+
no_projection=no_projection,
|
| 491 |
+
)
|
| 492 |
+
|
| 493 |
+
self.heads = heads
|
| 494 |
+
self.global_heads = heads - local_heads
|
| 495 |
+
self.local_attn = (
|
| 496 |
+
LocalAttention(
|
| 497 |
+
window_size=local_window_size,
|
| 498 |
+
causal=causal,
|
| 499 |
+
autopad=True,
|
| 500 |
+
dropout=dropout,
|
| 501 |
+
look_forward=int(not causal),
|
| 502 |
+
rel_pos_emb_config=(dim_head, local_heads),
|
| 503 |
+
)
|
| 504 |
+
if local_heads > 0
|
| 505 |
+
else None
|
| 506 |
+
)
|
| 507 |
+
|
| 508 |
+
self.to_q = nn.Linear(dim, inner_dim)
|
| 509 |
+
self.to_k = nn.Linear(dim, inner_dim)
|
| 510 |
+
self.to_v = nn.Linear(dim, inner_dim)
|
| 511 |
+
self.to_out = nn.Linear(inner_dim, dim)
|
| 512 |
+
self.dropout = nn.Dropout(dropout)
|
| 513 |
+
|
| 514 |
+
@torch.no_grad()
|
| 515 |
+
def redraw_projection_matrix(self):
|
| 516 |
+
self.fast_attention.redraw_projection_matrix()
|
| 517 |
+
|
| 518 |
+
def forward(
|
| 519 |
+
self,
|
| 520 |
+
x,
|
| 521 |
+
context=None,
|
| 522 |
+
mask=None,
|
| 523 |
+
context_mask=None,
|
| 524 |
+
name=None,
|
| 525 |
+
inference=False,
|
| 526 |
+
**kwargs,
|
| 527 |
+
):
|
| 528 |
+
_, _, _, h, gh = *x.shape, self.heads, self.global_heads
|
| 529 |
+
|
| 530 |
+
cross_attend = exists(context)
|
| 531 |
+
context = default(context, x)
|
| 532 |
+
context_mask = default(context_mask, mask) if not cross_attend else context_mask
|
| 533 |
+
q, k, v = self.to_q(x), self.to_k(context), self.to_v(context)
|
| 534 |
+
|
| 535 |
+
q, k, v = map(lambda t: rearrange(t, "b n (h d) -> b h n d", h=h), (q, k, v))
|
| 536 |
+
(q, lq), (k, lk), (v, lv) = map(lambda t: (t[:, :gh], t[:, gh:]), (q, k, v))
|
| 537 |
+
|
| 538 |
+
attn_outs = []
|
| 539 |
+
if not empty(q):
|
| 540 |
+
if exists(context_mask):
|
| 541 |
+
global_mask = context_mask[:, None, :, None]
|
| 542 |
+
v.masked_fill_(~global_mask, 0.0)
|
| 543 |
+
if cross_attend:
|
| 544 |
+
pass # TODO: Implement cross-attention
|
| 545 |
+
else:
|
| 546 |
+
out = self.fast_attention(q, k, v)
|
| 547 |
+
attn_outs.append(out)
|
| 548 |
+
|
| 549 |
+
if not empty(lq):
|
| 550 |
+
assert (
|
| 551 |
+
not cross_attend
|
| 552 |
+
), "local attention is not compatible with cross attention"
|
| 553 |
+
out = self.local_attn(lq, lk, lv, input_mask=mask)
|
| 554 |
+
attn_outs.append(out)
|
| 555 |
+
|
| 556 |
+
out = torch.cat(attn_outs, dim=1)
|
| 557 |
+
out = rearrange(out, "b h n d -> b n (h d)")
|
| 558 |
+
out = self.to_out(out)
|
| 559 |
+
return self.dropout(out)
|
| 560 |
+
|
| 561 |
+
|
| 562 |
+
def l2_regularization(model, l2_alpha):
|
| 563 |
+
l2_loss = []
|
| 564 |
+
for module in model.modules():
|
| 565 |
+
if type(module) is nn.Conv2d:
|
| 566 |
+
l2_loss.append((module.weight**2).sum() / 2.0)
|
| 567 |
+
return l2_alpha * sum(l2_loss)
|
| 568 |
+
|
| 569 |
+
|
| 570 |
+
class FCPE(nn.Module):
|
| 571 |
+
def __init__(
|
| 572 |
+
self,
|
| 573 |
+
input_channel=128,
|
| 574 |
+
out_dims=360,
|
| 575 |
+
n_layers=12,
|
| 576 |
+
n_chans=512,
|
| 577 |
+
use_siren=False,
|
| 578 |
+
use_full=False,
|
| 579 |
+
loss_mse_scale=10,
|
| 580 |
+
loss_l2_regularization=False,
|
| 581 |
+
loss_l2_regularization_scale=1,
|
| 582 |
+
loss_grad1_mse=False,
|
| 583 |
+
loss_grad1_mse_scale=1,
|
| 584 |
+
f0_max=1975.5,
|
| 585 |
+
f0_min=32.70,
|
| 586 |
+
confidence=False,
|
| 587 |
+
threshold=0.05,
|
| 588 |
+
use_input_conv=True,
|
| 589 |
+
):
|
| 590 |
+
super().__init__()
|
| 591 |
+
if use_siren is True:
|
| 592 |
+
raise ValueError("Siren is not supported yet.")
|
| 593 |
+
if use_full is True:
|
| 594 |
+
raise ValueError("Full model is not supported yet.")
|
| 595 |
+
|
| 596 |
+
self.loss_mse_scale = loss_mse_scale if (loss_mse_scale is not None) else 10
|
| 597 |
+
self.loss_l2_regularization = (
|
| 598 |
+
loss_l2_regularization if (loss_l2_regularization is not None) else False
|
| 599 |
+
)
|
| 600 |
+
self.loss_l2_regularization_scale = (
|
| 601 |
+
loss_l2_regularization_scale
|
| 602 |
+
if (loss_l2_regularization_scale is not None)
|
| 603 |
+
else 1
|
| 604 |
+
)
|
| 605 |
+
self.loss_grad1_mse = loss_grad1_mse if (loss_grad1_mse is not None) else False
|
| 606 |
+
self.loss_grad1_mse_scale = (
|
| 607 |
+
loss_grad1_mse_scale if (loss_grad1_mse_scale is not None) else 1
|
| 608 |
+
)
|
| 609 |
+
self.f0_max = f0_max if (f0_max is not None) else 1975.5
|
| 610 |
+
self.f0_min = f0_min if (f0_min is not None) else 32.70
|
| 611 |
+
self.confidence = confidence if (confidence is not None) else False
|
| 612 |
+
self.threshold = threshold if (threshold is not None) else 0.05
|
| 613 |
+
self.use_input_conv = use_input_conv if (use_input_conv is not None) else True
|
| 614 |
+
|
| 615 |
+
self.cent_table_b = torch.Tensor(
|
| 616 |
+
np.linspace(
|
| 617 |
+
self.f0_to_cent(torch.Tensor([f0_min]))[0],
|
| 618 |
+
self.f0_to_cent(torch.Tensor([f0_max]))[0],
|
| 619 |
+
out_dims,
|
| 620 |
+
)
|
| 621 |
+
)
|
| 622 |
+
self.register_buffer("cent_table", self.cent_table_b)
|
| 623 |
+
|
| 624 |
+
# conv in stack
|
| 625 |
+
_leaky = nn.LeakyReLU()
|
| 626 |
+
self.stack = nn.Sequential(
|
| 627 |
+
nn.Conv1d(input_channel, n_chans, 3, 1, 1),
|
| 628 |
+
nn.GroupNorm(4, n_chans),
|
| 629 |
+
_leaky,
|
| 630 |
+
nn.Conv1d(n_chans, n_chans, 3, 1, 1),
|
| 631 |
+
)
|
| 632 |
+
|
| 633 |
+
# transformer
|
| 634 |
+
self.decoder = PCmer(
|
| 635 |
+
num_layers=n_layers,
|
| 636 |
+
num_heads=8,
|
| 637 |
+
dim_model=n_chans,
|
| 638 |
+
dim_keys=n_chans,
|
| 639 |
+
dim_values=n_chans,
|
| 640 |
+
residual_dropout=0.1,
|
| 641 |
+
attention_dropout=0.1,
|
| 642 |
+
)
|
| 643 |
+
self.norm = nn.LayerNorm(n_chans)
|
| 644 |
+
|
| 645 |
+
# out
|
| 646 |
+
self.n_out = out_dims
|
| 647 |
+
self.dense_out = weight_norm(nn.Linear(n_chans, self.n_out))
|
| 648 |
+
|
| 649 |
+
def forward(
|
| 650 |
+
self, mel, infer=True, gt_f0=None, return_hz_f0=False, cdecoder="local_argmax"
|
| 651 |
+
):
|
| 652 |
+
if cdecoder == "argmax":
|
| 653 |
+
self.cdecoder = self.cents_decoder
|
| 654 |
+
elif cdecoder == "local_argmax":
|
| 655 |
+
self.cdecoder = self.cents_local_decoder
|
| 656 |
+
|
| 657 |
+
x = (
|
| 658 |
+
self.stack(mel.transpose(1, 2)).transpose(1, 2)
|
| 659 |
+
if self.use_input_conv
|
| 660 |
+
else mel
|
| 661 |
+
)
|
| 662 |
+
x = self.decoder(x)
|
| 663 |
+
x = self.norm(x)
|
| 664 |
+
x = self.dense_out(x)
|
| 665 |
+
x = torch.sigmoid(x)
|
| 666 |
+
|
| 667 |
+
if not infer:
|
| 668 |
+
gt_cent_f0 = self.f0_to_cent(gt_f0)
|
| 669 |
+
gt_cent_f0 = self.gaussian_blurred_cent(gt_cent_f0)
|
| 670 |
+
loss_all = self.loss_mse_scale * F.binary_cross_entropy(x, gt_cent_f0)
|
| 671 |
+
if self.loss_l2_regularization:
|
| 672 |
+
loss_all = loss_all + l2_regularization(
|
| 673 |
+
model=self, l2_alpha=self.loss_l2_regularization_scale
|
| 674 |
+
)
|
| 675 |
+
x = loss_all
|
| 676 |
+
if infer:
|
| 677 |
+
x = self.cdecoder(x)
|
| 678 |
+
x = self.cent_to_f0(x)
|
| 679 |
+
x = (1 + x / 700).log() if not return_hz_f0 else x
|
| 680 |
+
|
| 681 |
+
return x
|
| 682 |
+
|
| 683 |
+
def cents_decoder(self, y, mask=True):
|
| 684 |
+
B, N, _ = y.size()
|
| 685 |
+
ci = self.cent_table[None, None, :].expand(B, N, -1)
|
| 686 |
+
rtn = torch.sum(ci * y, dim=-1, keepdim=True) / torch.sum(
|
| 687 |
+
y, dim=-1, keepdim=True
|
| 688 |
+
)
|
| 689 |
+
if mask:
|
| 690 |
+
confident = torch.max(y, dim=-1, keepdim=True)[0]
|
| 691 |
+
confident_mask = torch.ones_like(confident)
|
| 692 |
+
confident_mask[confident <= self.threshold] = float("-INF")
|
| 693 |
+
rtn = rtn * confident_mask
|
| 694 |
+
return (rtn, confident) if self.confidence else rtn
|
| 695 |
+
|
| 696 |
+
def cents_local_decoder(self, y, mask=True):
|
| 697 |
+
B, N, _ = y.size()
|
| 698 |
+
ci = self.cent_table[None, None, :].expand(B, N, -1)
|
| 699 |
+
confident, max_index = torch.max(y, dim=-1, keepdim=True)
|
| 700 |
+
local_argmax_index = torch.arange(0, 9).to(max_index.device) + (max_index - 4)
|
| 701 |
+
local_argmax_index = torch.clamp(local_argmax_index, 0, self.n_out - 1)
|
| 702 |
+
ci_l = torch.gather(ci, -1, local_argmax_index)
|
| 703 |
+
y_l = torch.gather(y, -1, local_argmax_index)
|
| 704 |
+
rtn = torch.sum(ci_l * y_l, dim=-1, keepdim=True) / torch.sum(
|
| 705 |
+
y_l, dim=-1, keepdim=True
|
| 706 |
+
)
|
| 707 |
+
if mask:
|
| 708 |
+
confident_mask = torch.ones_like(confident)
|
| 709 |
+
confident_mask[confident <= self.threshold] = float("-INF")
|
| 710 |
+
rtn = rtn * confident_mask
|
| 711 |
+
return (rtn, confident) if self.confidence else rtn
|
| 712 |
+
|
| 713 |
+
def cent_to_f0(self, cent):
|
| 714 |
+
return 10.0 * 2 ** (cent / 1200.0)
|
| 715 |
+
|
| 716 |
+
def f0_to_cent(self, f0):
|
| 717 |
+
return 1200.0 * torch.log2(f0 / 10.0)
|
| 718 |
+
|
| 719 |
+
def gaussian_blurred_cent(self, cents):
|
| 720 |
+
mask = (cents > 0.1) & (cents < (1200.0 * np.log2(self.f0_max / 10.0)))
|
| 721 |
+
B, N, _ = cents.size()
|
| 722 |
+
ci = self.cent_table[None, None, :].expand(B, N, -1)
|
| 723 |
+
return torch.exp(-torch.square(ci - cents) / 1250) * mask.float()
|
| 724 |
+
|
| 725 |
+
|
| 726 |
+
class FCPEInfer:
|
| 727 |
+
def __init__(self, model_path, device=None, dtype=torch.float32):
|
| 728 |
+
if device is None:
|
| 729 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 730 |
+
self.device = device
|
| 731 |
+
ckpt = torch.load(model_path, map_location=torch.device(self.device), weights_only=True)
|
| 732 |
+
self.args = DotDict(ckpt["config"])
|
| 733 |
+
self.dtype = dtype
|
| 734 |
+
model = FCPE(
|
| 735 |
+
input_channel=self.args.model.input_channel,
|
| 736 |
+
out_dims=self.args.model.out_dims,
|
| 737 |
+
n_layers=self.args.model.n_layers,
|
| 738 |
+
n_chans=self.args.model.n_chans,
|
| 739 |
+
use_siren=self.args.model.use_siren,
|
| 740 |
+
use_full=self.args.model.use_full,
|
| 741 |
+
loss_mse_scale=self.args.loss.loss_mse_scale,
|
| 742 |
+
loss_l2_regularization=self.args.loss.loss_l2_regularization,
|
| 743 |
+
loss_l2_regularization_scale=self.args.loss.loss_l2_regularization_scale,
|
| 744 |
+
loss_grad1_mse=self.args.loss.loss_grad1_mse,
|
| 745 |
+
loss_grad1_mse_scale=self.args.loss.loss_grad1_mse_scale,
|
| 746 |
+
f0_max=self.args.model.f0_max,
|
| 747 |
+
f0_min=self.args.model.f0_min,
|
| 748 |
+
confidence=self.args.model.confidence,
|
| 749 |
+
)
|
| 750 |
+
model.to(self.device).to(self.dtype)
|
| 751 |
+
model.load_state_dict(ckpt["model"])
|
| 752 |
+
model.eval()
|
| 753 |
+
self.model = model
|
| 754 |
+
self.wav2mel = Wav2Mel(self.args, dtype=self.dtype, device=self.device)
|
| 755 |
+
|
| 756 |
+
@torch.no_grad()
|
| 757 |
+
def __call__(self, audio, sr, threshold=0.05):
|
| 758 |
+
self.model.threshold = threshold
|
| 759 |
+
audio = audio[None, :]
|
| 760 |
+
mel = self.wav2mel(audio=audio, sample_rate=sr).to(self.dtype)
|
| 761 |
+
f0 = self.model(mel=mel, infer=True, return_hz_f0=True)
|
| 762 |
+
return f0
|
| 763 |
+
|
| 764 |
+
|
| 765 |
+
class Wav2Mel:
|
| 766 |
+
def __init__(self, args, device=None, dtype=torch.float32):
|
| 767 |
+
self.sample_rate = args.mel.sampling_rate
|
| 768 |
+
self.hop_size = args.mel.hop_size
|
| 769 |
+
if device is None:
|
| 770 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 771 |
+
self.device = device
|
| 772 |
+
self.dtype = dtype
|
| 773 |
+
self.stft = STFT(
|
| 774 |
+
args.mel.sampling_rate,
|
| 775 |
+
args.mel.num_mels,
|
| 776 |
+
args.mel.n_fft,
|
| 777 |
+
args.mel.win_size,
|
| 778 |
+
args.mel.hop_size,
|
| 779 |
+
args.mel.fmin,
|
| 780 |
+
args.mel.fmax,
|
| 781 |
+
)
|
| 782 |
+
self.resample_kernel = {}
|
| 783 |
+
|
| 784 |
+
def extract_nvstft(self, audio, keyshift=0, train=False):
|
| 785 |
+
mel = self.stft.get_mel(audio, keyshift=keyshift, train=train).transpose(1, 2)
|
| 786 |
+
return mel
|
| 787 |
+
|
| 788 |
+
def extract_mel(self, audio, sample_rate, keyshift=0, train=False):
|
| 789 |
+
audio = audio.to(self.dtype).to(self.device)
|
| 790 |
+
if sample_rate == self.sample_rate:
|
| 791 |
+
audio_res = audio
|
| 792 |
+
else:
|
| 793 |
+
key_str = str(sample_rate)
|
| 794 |
+
if key_str not in self.resample_kernel:
|
| 795 |
+
self.resample_kernel[key_str] = Resample(
|
| 796 |
+
sample_rate, self.sample_rate, lowpass_filter_width=128
|
| 797 |
+
)
|
| 798 |
+
self.resample_kernel[key_str] = (
|
| 799 |
+
self.resample_kernel[key_str].to(self.dtype).to(self.device)
|
| 800 |
+
)
|
| 801 |
+
audio_res = self.resample_kernel[key_str](audio)
|
| 802 |
+
|
| 803 |
+
mel = self.extract_nvstft(
|
| 804 |
+
audio_res, keyshift=keyshift, train=train
|
| 805 |
+
) # B, n_frames, bins
|
| 806 |
+
n_frames = int(audio.shape[1] // self.hop_size) + 1
|
| 807 |
+
mel = (
|
| 808 |
+
torch.cat((mel, mel[:, -1:, :]), 1) if n_frames > int(mel.shape[1]) else mel
|
| 809 |
+
)
|
| 810 |
+
mel = mel[:, :n_frames, :] if n_frames < int(mel.shape[1]) else mel
|
| 811 |
+
return mel
|
| 812 |
+
|
| 813 |
+
def __call__(self, audio, sample_rate, keyshift=0, train=False):
|
| 814 |
+
return self.extract_mel(audio, sample_rate, keyshift=keyshift, train=train)
|
| 815 |
+
|
| 816 |
+
|
| 817 |
+
class DotDict(dict):
|
| 818 |
+
def __getattr__(*args):
|
| 819 |
+
val = dict.get(*args)
|
| 820 |
+
return DotDict(val) if type(val) is dict else val
|
| 821 |
+
|
| 822 |
+
__setattr__ = dict.__setitem__
|
| 823 |
+
__delattr__ = dict.__delitem__
|
| 824 |
+
|
| 825 |
+
|
| 826 |
+
class F0Predictor(object):
|
| 827 |
+
def compute_f0(self, wav, p_len):
|
| 828 |
+
pass
|
| 829 |
+
|
| 830 |
+
def compute_f0_uv(self, wav, p_len):
|
| 831 |
+
pass
|
| 832 |
+
|
| 833 |
+
|
| 834 |
+
class FCPEF0Predictor(F0Predictor):
|
| 835 |
+
def __init__(
|
| 836 |
+
self,
|
| 837 |
+
model_path,
|
| 838 |
+
hop_length=512,
|
| 839 |
+
f0_min=50,
|
| 840 |
+
f0_max=1100,
|
| 841 |
+
dtype=torch.float32,
|
| 842 |
+
device=None,
|
| 843 |
+
sample_rate=44100,
|
| 844 |
+
threshold=0.05,
|
| 845 |
+
):
|
| 846 |
+
self.fcpe = FCPEInfer(model_path, device=device, dtype=dtype)
|
| 847 |
+
self.hop_length = hop_length
|
| 848 |
+
self.f0_min = f0_min
|
| 849 |
+
self.f0_max = f0_max
|
| 850 |
+
self.device = device or ("cuda" if torch.cuda.is_available() else "cpu")
|
| 851 |
+
self.threshold = threshold
|
| 852 |
+
self.sample_rate = sample_rate
|
| 853 |
+
self.dtype = dtype
|
| 854 |
+
self.name = "fcpe"
|
| 855 |
+
|
| 856 |
+
def repeat_expand(
|
| 857 |
+
self,
|
| 858 |
+
content: Union[torch.Tensor, np.ndarray],
|
| 859 |
+
target_len: int,
|
| 860 |
+
mode: str = "nearest",
|
| 861 |
+
):
|
| 862 |
+
ndim = content.ndim
|
| 863 |
+
content = (
|
| 864 |
+
content[None, None]
|
| 865 |
+
if ndim == 1
|
| 866 |
+
else content[None] if ndim == 2 else content
|
| 867 |
+
)
|
| 868 |
+
assert content.ndim == 3
|
| 869 |
+
is_np = isinstance(content, np.ndarray)
|
| 870 |
+
content = torch.from_numpy(content) if is_np else content
|
| 871 |
+
results = torch.nn.functional.interpolate(content, size=target_len, mode=mode)
|
| 872 |
+
results = results.numpy() if is_np else results
|
| 873 |
+
return results[0, 0] if ndim == 1 else results[0] if ndim == 2 else results
|
| 874 |
+
|
| 875 |
+
def post_process(self, x, sample_rate, f0, pad_to):
|
| 876 |
+
f0 = (
|
| 877 |
+
torch.from_numpy(f0).float().to(x.device)
|
| 878 |
+
if isinstance(f0, np.ndarray)
|
| 879 |
+
else f0
|
| 880 |
+
)
|
| 881 |
+
f0 = self.repeat_expand(f0, pad_to) if pad_to is not None else f0
|
| 882 |
+
|
| 883 |
+
vuv_vector = torch.zeros_like(f0)
|
| 884 |
+
vuv_vector[f0 > 0.0] = 1.0
|
| 885 |
+
vuv_vector[f0 <= 0.0] = 0.0
|
| 886 |
+
|
| 887 |
+
nzindex = torch.nonzero(f0).squeeze()
|
| 888 |
+
f0 = torch.index_select(f0, dim=0, index=nzindex).cpu().numpy()
|
| 889 |
+
time_org = self.hop_length / sample_rate * nzindex.cpu().numpy()
|
| 890 |
+
time_frame = np.arange(pad_to) * self.hop_length / sample_rate
|
| 891 |
+
|
| 892 |
+
vuv_vector = F.interpolate(vuv_vector[None, None, :], size=pad_to)[0][0]
|
| 893 |
+
|
| 894 |
+
if f0.shape[0] <= 0:
|
| 895 |
+
return np.zeros(pad_to), vuv_vector.cpu().numpy()
|
| 896 |
+
if f0.shape[0] == 1:
|
| 897 |
+
return np.ones(pad_to) * f0[0], vuv_vector.cpu().numpy()
|
| 898 |
+
|
| 899 |
+
f0 = np.interp(time_frame, time_org, f0, left=f0[0], right=f0[-1])
|
| 900 |
+
return f0, vuv_vector.cpu().numpy()
|
| 901 |
+
|
| 902 |
+
def compute_f0(self, wav, p_len=None):
|
| 903 |
+
x = torch.FloatTensor(wav).to(self.dtype).to(self.device)
|
| 904 |
+
p_len = x.shape[0] // self.hop_length if p_len is None else p_len
|
| 905 |
+
f0 = self.fcpe(x, sr=self.sample_rate, threshold=self.threshold)[0, :, 0]
|
| 906 |
+
if torch.all(f0 == 0):
|
| 907 |
+
return f0.cpu().numpy() if p_len is None else np.zeros(p_len)
|
| 908 |
+
return self.post_process(x, self.sample_rate, f0, p_len)[0]
|
| 909 |
+
|
| 910 |
+
def compute_f0_uv(self, wav, p_len=None):
|
| 911 |
+
x = torch.FloatTensor(wav).to(self.dtype).to(self.device)
|
| 912 |
+
p_len = x.shape[0] // self.hop_length if p_len is None else p_len
|
| 913 |
+
f0 = self.fcpe(x, sr=self.sample_rate, threshold=self.threshold)[0, :, 0]
|
| 914 |
+
if torch.all(f0 == 0):
|
| 915 |
+
return f0.cpu().numpy() if p_len is None else np.zeros(p_len), (
|
| 916 |
+
f0.cpu().numpy() if p_len is None else np.zeros(p_len)
|
| 917 |
+
)
|
| 918 |
+
return self.post_process(x, self.sample_rate, f0, p_len)
|
rvc/lib/predictors/RMVPE.py
ADDED
|
@@ -0,0 +1,537 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torch.nn as nn
|
| 3 |
+
import torch.nn.functional as F
|
| 4 |
+
import numpy as np
|
| 5 |
+
|
| 6 |
+
from librosa.filters import mel
|
| 7 |
+
from typing import List
|
| 8 |
+
|
| 9 |
+
N_MELS = 128
|
| 10 |
+
N_CLASS = 360
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
class ConvBlockRes(nn.Module):
|
| 14 |
+
"""
|
| 15 |
+
A convolutional block with residual connection.
|
| 16 |
+
|
| 17 |
+
Args:
|
| 18 |
+
in_channels (int): Number of input channels.
|
| 19 |
+
out_channels (int): Number of output channels.
|
| 20 |
+
momentum (float): Momentum for batch normalization.
|
| 21 |
+
"""
|
| 22 |
+
|
| 23 |
+
def __init__(self, in_channels, out_channels, momentum=0.01):
|
| 24 |
+
super(ConvBlockRes, self).__init__()
|
| 25 |
+
self.conv = nn.Sequential(
|
| 26 |
+
nn.Conv2d(
|
| 27 |
+
in_channels=in_channels,
|
| 28 |
+
out_channels=out_channels,
|
| 29 |
+
kernel_size=(3, 3),
|
| 30 |
+
stride=(1, 1),
|
| 31 |
+
padding=(1, 1),
|
| 32 |
+
bias=False,
|
| 33 |
+
),
|
| 34 |
+
nn.BatchNorm2d(out_channels, momentum=momentum),
|
| 35 |
+
nn.ReLU(),
|
| 36 |
+
nn.Conv2d(
|
| 37 |
+
in_channels=out_channels,
|
| 38 |
+
out_channels=out_channels,
|
| 39 |
+
kernel_size=(3, 3),
|
| 40 |
+
stride=(1, 1),
|
| 41 |
+
padding=(1, 1),
|
| 42 |
+
bias=False,
|
| 43 |
+
),
|
| 44 |
+
nn.BatchNorm2d(out_channels, momentum=momentum),
|
| 45 |
+
nn.ReLU(),
|
| 46 |
+
)
|
| 47 |
+
if in_channels != out_channels:
|
| 48 |
+
self.shortcut = nn.Conv2d(in_channels, out_channels, (1, 1))
|
| 49 |
+
self.is_shortcut = True
|
| 50 |
+
else:
|
| 51 |
+
self.is_shortcut = False
|
| 52 |
+
|
| 53 |
+
def forward(self, x):
|
| 54 |
+
if self.is_shortcut:
|
| 55 |
+
return self.conv(x) + self.shortcut(x)
|
| 56 |
+
else:
|
| 57 |
+
return self.conv(x) + x
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
class ResEncoderBlock(nn.Module):
|
| 61 |
+
"""
|
| 62 |
+
A residual encoder block.
|
| 63 |
+
|
| 64 |
+
Args:
|
| 65 |
+
in_channels (int): Number of input channels.
|
| 66 |
+
out_channels (int): Number of output channels.
|
| 67 |
+
kernel_size (tuple): Size of the average pooling kernel.
|
| 68 |
+
n_blocks (int): Number of convolutional blocks in the block.
|
| 69 |
+
momentum (float): Momentum for batch normalization.
|
| 70 |
+
"""
|
| 71 |
+
|
| 72 |
+
def __init__(
|
| 73 |
+
self, in_channels, out_channels, kernel_size, n_blocks=1, momentum=0.01
|
| 74 |
+
):
|
| 75 |
+
super(ResEncoderBlock, self).__init__()
|
| 76 |
+
self.n_blocks = n_blocks
|
| 77 |
+
self.conv = nn.ModuleList()
|
| 78 |
+
self.conv.append(ConvBlockRes(in_channels, out_channels, momentum))
|
| 79 |
+
for _ in range(n_blocks - 1):
|
| 80 |
+
self.conv.append(ConvBlockRes(out_channels, out_channels, momentum))
|
| 81 |
+
self.kernel_size = kernel_size
|
| 82 |
+
if self.kernel_size is not None:
|
| 83 |
+
self.pool = nn.AvgPool2d(kernel_size=kernel_size)
|
| 84 |
+
|
| 85 |
+
def forward(self, x):
|
| 86 |
+
for i in range(self.n_blocks):
|
| 87 |
+
x = self.conv[i](x)
|
| 88 |
+
if self.kernel_size is not None:
|
| 89 |
+
return x, self.pool(x)
|
| 90 |
+
else:
|
| 91 |
+
return x
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
class Encoder(nn.Module):
|
| 95 |
+
"""
|
| 96 |
+
The encoder part of the DeepUnet.
|
| 97 |
+
|
| 98 |
+
Args:
|
| 99 |
+
in_channels (int): Number of input channels.
|
| 100 |
+
in_size (int): Size of the input tensor.
|
| 101 |
+
n_encoders (int): Number of encoder blocks.
|
| 102 |
+
kernel_size (tuple): Size of the average pooling kernel.
|
| 103 |
+
n_blocks (int): Number of convolutional blocks in each encoder block.
|
| 104 |
+
out_channels (int): Number of output channels for the first encoder block.
|
| 105 |
+
momentum (float): Momentum for batch normalization.
|
| 106 |
+
"""
|
| 107 |
+
|
| 108 |
+
def __init__(
|
| 109 |
+
self,
|
| 110 |
+
in_channels,
|
| 111 |
+
in_size,
|
| 112 |
+
n_encoders,
|
| 113 |
+
kernel_size,
|
| 114 |
+
n_blocks,
|
| 115 |
+
out_channels=16,
|
| 116 |
+
momentum=0.01,
|
| 117 |
+
):
|
| 118 |
+
super(Encoder, self).__init__()
|
| 119 |
+
self.n_encoders = n_encoders
|
| 120 |
+
self.bn = nn.BatchNorm2d(in_channels, momentum=momentum)
|
| 121 |
+
self.layers = nn.ModuleList()
|
| 122 |
+
self.latent_channels = []
|
| 123 |
+
for i in range(self.n_encoders):
|
| 124 |
+
self.layers.append(
|
| 125 |
+
ResEncoderBlock(
|
| 126 |
+
in_channels, out_channels, kernel_size, n_blocks, momentum=momentum
|
| 127 |
+
)
|
| 128 |
+
)
|
| 129 |
+
self.latent_channels.append([out_channels, in_size])
|
| 130 |
+
in_channels = out_channels
|
| 131 |
+
out_channels *= 2
|
| 132 |
+
in_size //= 2
|
| 133 |
+
self.out_size = in_size
|
| 134 |
+
self.out_channel = out_channels
|
| 135 |
+
|
| 136 |
+
def forward(self, x: torch.Tensor):
|
| 137 |
+
concat_tensors: List[torch.Tensor] = []
|
| 138 |
+
x = self.bn(x)
|
| 139 |
+
for i in range(self.n_encoders):
|
| 140 |
+
t, x = self.layers[i](x)
|
| 141 |
+
concat_tensors.append(t)
|
| 142 |
+
return x, concat_tensors
|
| 143 |
+
|
| 144 |
+
|
| 145 |
+
class Intermediate(nn.Module):
|
| 146 |
+
"""
|
| 147 |
+
The intermediate layer of the DeepUnet.
|
| 148 |
+
|
| 149 |
+
Args:
|
| 150 |
+
in_channels (int): Number of input channels.
|
| 151 |
+
out_channels (int): Number of output channels.
|
| 152 |
+
n_inters (int): Number of convolutional blocks in the intermediate layer.
|
| 153 |
+
n_blocks (int): Number of convolutional blocks in each intermediate block.
|
| 154 |
+
momentum (float): Momentum for batch normalization.
|
| 155 |
+
"""
|
| 156 |
+
|
| 157 |
+
def __init__(self, in_channels, out_channels, n_inters, n_blocks, momentum=0.01):
|
| 158 |
+
super(Intermediate, self).__init__()
|
| 159 |
+
self.n_inters = n_inters
|
| 160 |
+
self.layers = nn.ModuleList()
|
| 161 |
+
self.layers.append(
|
| 162 |
+
ResEncoderBlock(in_channels, out_channels, None, n_blocks, momentum)
|
| 163 |
+
)
|
| 164 |
+
for _ in range(self.n_inters - 1):
|
| 165 |
+
self.layers.append(
|
| 166 |
+
ResEncoderBlock(out_channels, out_channels, None, n_blocks, momentum)
|
| 167 |
+
)
|
| 168 |
+
|
| 169 |
+
def forward(self, x):
|
| 170 |
+
for i in range(self.n_inters):
|
| 171 |
+
x = self.layers[i](x)
|
| 172 |
+
return x
|
| 173 |
+
|
| 174 |
+
|
| 175 |
+
class ResDecoderBlock(nn.Module):
|
| 176 |
+
"""
|
| 177 |
+
A residual decoder block.
|
| 178 |
+
|
| 179 |
+
Args:
|
| 180 |
+
in_channels (int): Number of input channels.
|
| 181 |
+
out_channels (int): Number of output channels.
|
| 182 |
+
stride (tuple): Stride for transposed convolution.
|
| 183 |
+
n_blocks (int): Number of convolutional blocks in the block.
|
| 184 |
+
momentum (float): Momentum for batch normalization.
|
| 185 |
+
"""
|
| 186 |
+
|
| 187 |
+
def __init__(self, in_channels, out_channels, stride, n_blocks=1, momentum=0.01):
|
| 188 |
+
super(ResDecoderBlock, self).__init__()
|
| 189 |
+
out_padding = (0, 1) if stride == (1, 2) else (1, 1)
|
| 190 |
+
self.n_blocks = n_blocks
|
| 191 |
+
self.conv1 = nn.Sequential(
|
| 192 |
+
nn.ConvTranspose2d(
|
| 193 |
+
in_channels=in_channels,
|
| 194 |
+
out_channels=out_channels,
|
| 195 |
+
kernel_size=(3, 3),
|
| 196 |
+
stride=stride,
|
| 197 |
+
padding=(1, 1),
|
| 198 |
+
output_padding=out_padding,
|
| 199 |
+
bias=False,
|
| 200 |
+
),
|
| 201 |
+
nn.BatchNorm2d(out_channels, momentum=momentum),
|
| 202 |
+
nn.ReLU(),
|
| 203 |
+
)
|
| 204 |
+
self.conv2 = nn.ModuleList()
|
| 205 |
+
self.conv2.append(ConvBlockRes(out_channels * 2, out_channels, momentum))
|
| 206 |
+
for _ in range(n_blocks - 1):
|
| 207 |
+
self.conv2.append(ConvBlockRes(out_channels, out_channels, momentum))
|
| 208 |
+
|
| 209 |
+
def forward(self, x, concat_tensor):
|
| 210 |
+
x = self.conv1(x)
|
| 211 |
+
x = torch.cat((x, concat_tensor), dim=1)
|
| 212 |
+
for i in range(self.n_blocks):
|
| 213 |
+
x = self.conv2[i](x)
|
| 214 |
+
return x
|
| 215 |
+
|
| 216 |
+
|
| 217 |
+
class Decoder(nn.Module):
|
| 218 |
+
"""
|
| 219 |
+
The decoder part of the DeepUnet.
|
| 220 |
+
|
| 221 |
+
Args:
|
| 222 |
+
in_channels (int): Number of input channels.
|
| 223 |
+
n_decoders (int): Number of decoder blocks.
|
| 224 |
+
stride (tuple): Stride for transposed convolution.
|
| 225 |
+
n_blocks (int): Number of convolutional blocks in each decoder block.
|
| 226 |
+
momentum (float): Momentum for batch normalization.
|
| 227 |
+
"""
|
| 228 |
+
|
| 229 |
+
def __init__(self, in_channels, n_decoders, stride, n_blocks, momentum=0.01):
|
| 230 |
+
super(Decoder, self).__init__()
|
| 231 |
+
self.layers = nn.ModuleList()
|
| 232 |
+
self.n_decoders = n_decoders
|
| 233 |
+
for _ in range(self.n_decoders):
|
| 234 |
+
out_channels = in_channels // 2
|
| 235 |
+
self.layers.append(
|
| 236 |
+
ResDecoderBlock(in_channels, out_channels, stride, n_blocks, momentum)
|
| 237 |
+
)
|
| 238 |
+
in_channels = out_channels
|
| 239 |
+
|
| 240 |
+
def forward(self, x, concat_tensors):
|
| 241 |
+
for i in range(self.n_decoders):
|
| 242 |
+
x = self.layers[i](x, concat_tensors[-1 - i])
|
| 243 |
+
return x
|
| 244 |
+
|
| 245 |
+
|
| 246 |
+
class DeepUnet(nn.Module):
|
| 247 |
+
"""
|
| 248 |
+
The DeepUnet architecture.
|
| 249 |
+
|
| 250 |
+
Args:
|
| 251 |
+
kernel_size (tuple): Size of the average pooling kernel.
|
| 252 |
+
n_blocks (int): Number of convolutional blocks in each encoder/decoder block.
|
| 253 |
+
en_de_layers (int): Number of encoder/decoder layers.
|
| 254 |
+
inter_layers (int): Number of convolutional blocks in the intermediate layer.
|
| 255 |
+
in_channels (int): Number of input channels.
|
| 256 |
+
en_out_channels (int): Number of output channels for the first encoder block.
|
| 257 |
+
"""
|
| 258 |
+
|
| 259 |
+
def __init__(
|
| 260 |
+
self,
|
| 261 |
+
kernel_size,
|
| 262 |
+
n_blocks,
|
| 263 |
+
en_de_layers=5,
|
| 264 |
+
inter_layers=4,
|
| 265 |
+
in_channels=1,
|
| 266 |
+
en_out_channels=16,
|
| 267 |
+
):
|
| 268 |
+
super(DeepUnet, self).__init__()
|
| 269 |
+
self.encoder = Encoder(
|
| 270 |
+
in_channels, 128, en_de_layers, kernel_size, n_blocks, en_out_channels
|
| 271 |
+
)
|
| 272 |
+
self.intermediate = Intermediate(
|
| 273 |
+
self.encoder.out_channel // 2,
|
| 274 |
+
self.encoder.out_channel,
|
| 275 |
+
inter_layers,
|
| 276 |
+
n_blocks,
|
| 277 |
+
)
|
| 278 |
+
self.decoder = Decoder(
|
| 279 |
+
self.encoder.out_channel, en_de_layers, kernel_size, n_blocks
|
| 280 |
+
)
|
| 281 |
+
|
| 282 |
+
def forward(self, x):
|
| 283 |
+
x, concat_tensors = self.encoder(x)
|
| 284 |
+
x = self.intermediate(x)
|
| 285 |
+
x = self.decoder(x, concat_tensors)
|
| 286 |
+
return x
|
| 287 |
+
|
| 288 |
+
|
| 289 |
+
class E2E(nn.Module):
|
| 290 |
+
"""
|
| 291 |
+
The end-to-end model.
|
| 292 |
+
|
| 293 |
+
Args:
|
| 294 |
+
n_blocks (int): Number of convolutional blocks in each encoder/decoder block.
|
| 295 |
+
n_gru (int): Number of GRU layers.
|
| 296 |
+
kernel_size (tuple): Size of the average pooling kernel.
|
| 297 |
+
en_de_layers (int): Number of encoder/decoder layers.
|
| 298 |
+
inter_layers (int): Number of convolutional blocks in the intermediate layer.
|
| 299 |
+
in_channels (int): Number of input channels.
|
| 300 |
+
en_out_channels (int): Number of output channels for the first encoder block.
|
| 301 |
+
"""
|
| 302 |
+
|
| 303 |
+
def __init__(
|
| 304 |
+
self,
|
| 305 |
+
n_blocks,
|
| 306 |
+
n_gru,
|
| 307 |
+
kernel_size,
|
| 308 |
+
en_de_layers=5,
|
| 309 |
+
inter_layers=4,
|
| 310 |
+
in_channels=1,
|
| 311 |
+
en_out_channels=16,
|
| 312 |
+
):
|
| 313 |
+
super(E2E, self).__init__()
|
| 314 |
+
self.unet = DeepUnet(
|
| 315 |
+
kernel_size,
|
| 316 |
+
n_blocks,
|
| 317 |
+
en_de_layers,
|
| 318 |
+
inter_layers,
|
| 319 |
+
in_channels,
|
| 320 |
+
en_out_channels,
|
| 321 |
+
)
|
| 322 |
+
self.cnn = nn.Conv2d(en_out_channels, 3, (3, 3), padding=(1, 1))
|
| 323 |
+
if n_gru:
|
| 324 |
+
self.fc = nn.Sequential(
|
| 325 |
+
BiGRU(3 * 128, 256, n_gru),
|
| 326 |
+
nn.Linear(512, N_CLASS),
|
| 327 |
+
nn.Dropout(0.25),
|
| 328 |
+
nn.Sigmoid(),
|
| 329 |
+
)
|
| 330 |
+
else:
|
| 331 |
+
self.fc = nn.Sequential(
|
| 332 |
+
nn.Linear(3 * N_MELS, N_CLASS), nn.Dropout(0.25), nn.Sigmoid()
|
| 333 |
+
)
|
| 334 |
+
|
| 335 |
+
def forward(self, mel):
|
| 336 |
+
mel = mel.transpose(-1, -2).unsqueeze(1)
|
| 337 |
+
x = self.cnn(self.unet(mel)).transpose(1, 2).flatten(-2)
|
| 338 |
+
x = self.fc(x)
|
| 339 |
+
return x
|
| 340 |
+
|
| 341 |
+
|
| 342 |
+
class MelSpectrogram(torch.nn.Module):
|
| 343 |
+
"""
|
| 344 |
+
Extracts Mel-spectrogram features from audio.
|
| 345 |
+
|
| 346 |
+
Args:
|
| 347 |
+
n_mel_channels (int): Number of Mel-frequency bands.
|
| 348 |
+
sample_rate (int): Sampling rate of the audio.
|
| 349 |
+
win_length (int): Length of the window function in samples.
|
| 350 |
+
hop_length (int): Hop size between frames in samples.
|
| 351 |
+
n_fft (int, optional): Length of the FFT window. Defaults to None, which uses win_length.
|
| 352 |
+
mel_fmin (int, optional): Minimum frequency for the Mel filter bank. Defaults to 0.
|
| 353 |
+
mel_fmax (int, optional): Maximum frequency for the Mel filter bank. Defaults to None.
|
| 354 |
+
clamp (float, optional): Minimum value for clamping the Mel-spectrogram. Defaults to 1e-5.
|
| 355 |
+
"""
|
| 356 |
+
|
| 357 |
+
def __init__(
|
| 358 |
+
self,
|
| 359 |
+
n_mel_channels,
|
| 360 |
+
sample_rate,
|
| 361 |
+
win_length,
|
| 362 |
+
hop_length,
|
| 363 |
+
n_fft=None,
|
| 364 |
+
mel_fmin=0,
|
| 365 |
+
mel_fmax=None,
|
| 366 |
+
clamp=1e-5,
|
| 367 |
+
):
|
| 368 |
+
super().__init__()
|
| 369 |
+
n_fft = win_length if n_fft is None else n_fft
|
| 370 |
+
self.hann_window = {}
|
| 371 |
+
mel_basis = mel(
|
| 372 |
+
sr=sample_rate,
|
| 373 |
+
n_fft=n_fft,
|
| 374 |
+
n_mels=n_mel_channels,
|
| 375 |
+
fmin=mel_fmin,
|
| 376 |
+
fmax=mel_fmax,
|
| 377 |
+
htk=True,
|
| 378 |
+
)
|
| 379 |
+
mel_basis = torch.from_numpy(mel_basis).float()
|
| 380 |
+
self.register_buffer("mel_basis", mel_basis)
|
| 381 |
+
self.n_fft = win_length if n_fft is None else n_fft
|
| 382 |
+
self.hop_length = hop_length
|
| 383 |
+
self.win_length = win_length
|
| 384 |
+
self.sample_rate = sample_rate
|
| 385 |
+
self.n_mel_channels = n_mel_channels
|
| 386 |
+
self.clamp = clamp
|
| 387 |
+
|
| 388 |
+
def forward(self, audio, keyshift=0, speed=1, center=True):
|
| 389 |
+
factor = 2 ** (keyshift / 12)
|
| 390 |
+
n_fft_new = int(np.round(self.n_fft * factor))
|
| 391 |
+
win_length_new = int(np.round(self.win_length * factor))
|
| 392 |
+
hop_length_new = int(np.round(self.hop_length * speed))
|
| 393 |
+
keyshift_key = str(keyshift) + "_" + str(audio.device)
|
| 394 |
+
if keyshift_key not in self.hann_window:
|
| 395 |
+
self.hann_window[keyshift_key] = torch.hann_window(win_length_new).to(
|
| 396 |
+
audio.device
|
| 397 |
+
)
|
| 398 |
+
fft = torch.stft(
|
| 399 |
+
audio,
|
| 400 |
+
n_fft=n_fft_new,
|
| 401 |
+
hop_length=hop_length_new,
|
| 402 |
+
win_length=win_length_new,
|
| 403 |
+
window=self.hann_window[keyshift_key],
|
| 404 |
+
center=center,
|
| 405 |
+
return_complex=True,
|
| 406 |
+
)
|
| 407 |
+
|
| 408 |
+
magnitude = torch.sqrt(fft.real.pow(2) + fft.imag.pow(2))
|
| 409 |
+
if keyshift != 0:
|
| 410 |
+
size = self.n_fft // 2 + 1
|
| 411 |
+
resize = magnitude.size(1)
|
| 412 |
+
if resize < size:
|
| 413 |
+
magnitude = F.pad(magnitude, (0, 0, 0, size - resize))
|
| 414 |
+
magnitude = magnitude[:, :size, :] * self.win_length / win_length_new
|
| 415 |
+
mel_output = torch.matmul(self.mel_basis, magnitude)
|
| 416 |
+
log_mel_spec = torch.log(torch.clamp(mel_output, min=self.clamp))
|
| 417 |
+
return log_mel_spec
|
| 418 |
+
|
| 419 |
+
|
| 420 |
+
class RMVPE0Predictor:
|
| 421 |
+
"""
|
| 422 |
+
A predictor for fundamental frequency (F0) based on the RMVPE0 model.
|
| 423 |
+
|
| 424 |
+
Args:
|
| 425 |
+
model_path (str): Path to the RMVPE0 model file.
|
| 426 |
+
device (str, optional): Device to use for computation. Defaults to None, which uses CUDA if available.
|
| 427 |
+
"""
|
| 428 |
+
|
| 429 |
+
def __init__(self, model_path, device=None):
|
| 430 |
+
self.resample_kernel = {}
|
| 431 |
+
model = E2E(4, 1, (2, 2))
|
| 432 |
+
ckpt = torch.load(model_path, map_location="cpu", weights_only=True)
|
| 433 |
+
model.load_state_dict(ckpt)
|
| 434 |
+
model.eval()
|
| 435 |
+
self.model = model
|
| 436 |
+
self.resample_kernel = {}
|
| 437 |
+
self.device = device
|
| 438 |
+
self.mel_extractor = MelSpectrogram(
|
| 439 |
+
N_MELS, 16000, 1024, 160, None, 30, 8000
|
| 440 |
+
).to(device)
|
| 441 |
+
self.model = self.model.to(device)
|
| 442 |
+
cents_mapping = 20 * np.arange(N_CLASS) + 1997.3794084376191
|
| 443 |
+
self.cents_mapping = np.pad(cents_mapping, (4, 4))
|
| 444 |
+
|
| 445 |
+
def mel2hidden(self, mel):
|
| 446 |
+
"""
|
| 447 |
+
Converts Mel-spectrogram features to hidden representation.
|
| 448 |
+
|
| 449 |
+
Args:
|
| 450 |
+
mel (torch.Tensor): Mel-spectrogram features.
|
| 451 |
+
"""
|
| 452 |
+
with torch.no_grad():
|
| 453 |
+
n_frames = mel.shape[-1]
|
| 454 |
+
mel = F.pad(
|
| 455 |
+
mel, (0, 32 * ((n_frames - 1) // 32 + 1) - n_frames), mode="reflect"
|
| 456 |
+
)
|
| 457 |
+
hidden = self.model(mel)
|
| 458 |
+
return hidden[:, :n_frames]
|
| 459 |
+
|
| 460 |
+
def decode(self, hidden, thred=0.03):
|
| 461 |
+
"""
|
| 462 |
+
Decodes hidden representation to F0.
|
| 463 |
+
|
| 464 |
+
Args:
|
| 465 |
+
hidden (np.ndarray): Hidden representation.
|
| 466 |
+
thred (float, optional): Threshold for salience. Defaults to 0.03.
|
| 467 |
+
"""
|
| 468 |
+
cents_pred = self.to_local_average_cents(hidden, thred=thred)
|
| 469 |
+
f0 = 10 * (2 ** (cents_pred / 1200))
|
| 470 |
+
f0[f0 == 10] = 0
|
| 471 |
+
return f0
|
| 472 |
+
|
| 473 |
+
def infer_from_audio(self, audio, thred=0.03):
|
| 474 |
+
"""
|
| 475 |
+
Infers F0 from audio.
|
| 476 |
+
|
| 477 |
+
Args:
|
| 478 |
+
audio (np.ndarray): Audio signal.
|
| 479 |
+
thred (float, optional): Threshold for salience. Defaults to 0.03.
|
| 480 |
+
"""
|
| 481 |
+
audio = torch.from_numpy(audio).float().to(self.device).unsqueeze(0)
|
| 482 |
+
mel = self.mel_extractor(audio, center=True)
|
| 483 |
+
hidden = self.mel2hidden(mel)
|
| 484 |
+
hidden = hidden.squeeze(0).cpu().numpy()
|
| 485 |
+
f0 = self.decode(hidden, thred=thred)
|
| 486 |
+
return f0
|
| 487 |
+
|
| 488 |
+
def to_local_average_cents(self, salience, thred=0.05):
|
| 489 |
+
"""
|
| 490 |
+
Converts salience to local average cents.
|
| 491 |
+
|
| 492 |
+
Args:
|
| 493 |
+
salience (np.ndarray): Salience values.
|
| 494 |
+
thred (float, optional): Threshold for salience. Defaults to 0.05.
|
| 495 |
+
"""
|
| 496 |
+
center = np.argmax(salience, axis=1)
|
| 497 |
+
salience = np.pad(salience, ((0, 0), (4, 4)))
|
| 498 |
+
center += 4
|
| 499 |
+
todo_salience = []
|
| 500 |
+
todo_cents_mapping = []
|
| 501 |
+
starts = center - 4
|
| 502 |
+
ends = center + 5
|
| 503 |
+
for idx in range(salience.shape[0]):
|
| 504 |
+
todo_salience.append(salience[:, starts[idx] : ends[idx]][idx])
|
| 505 |
+
todo_cents_mapping.append(self.cents_mapping[starts[idx] : ends[idx]])
|
| 506 |
+
todo_salience = np.array(todo_salience)
|
| 507 |
+
todo_cents_mapping = np.array(todo_cents_mapping)
|
| 508 |
+
product_sum = np.sum(todo_salience * todo_cents_mapping, 1)
|
| 509 |
+
weight_sum = np.sum(todo_salience, 1)
|
| 510 |
+
devided = product_sum / weight_sum
|
| 511 |
+
maxx = np.max(salience, axis=1)
|
| 512 |
+
devided[maxx <= thred] = 0
|
| 513 |
+
return devided
|
| 514 |
+
|
| 515 |
+
|
| 516 |
+
class BiGRU(nn.Module):
|
| 517 |
+
"""
|
| 518 |
+
A bidirectional GRU layer.
|
| 519 |
+
|
| 520 |
+
Args:
|
| 521 |
+
input_features (int): Number of input features.
|
| 522 |
+
hidden_features (int): Number of hidden features.
|
| 523 |
+
num_layers (int): Number of GRU layers.
|
| 524 |
+
"""
|
| 525 |
+
|
| 526 |
+
def __init__(self, input_features, hidden_features, num_layers):
|
| 527 |
+
super(BiGRU, self).__init__()
|
| 528 |
+
self.gru = nn.GRU(
|
| 529 |
+
input_features,
|
| 530 |
+
hidden_features,
|
| 531 |
+
num_layers=num_layers,
|
| 532 |
+
batch_first=True,
|
| 533 |
+
bidirectional=True,
|
| 534 |
+
)
|
| 535 |
+
|
| 536 |
+
def forward(self, x):
|
| 537 |
+
return self.gru(x)[0]
|
rvc/lib/tools/analyzer.py
ADDED
|
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
import matplotlib.pyplot as plt
|
| 3 |
+
import librosa.display
|
| 4 |
+
import librosa
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
def calculate_features(y, sr):
|
| 8 |
+
stft = np.abs(librosa.stft(y))
|
| 9 |
+
duration = librosa.get_duration(y=y, sr=sr)
|
| 10 |
+
cent = librosa.feature.spectral_centroid(S=stft, sr=sr)[0]
|
| 11 |
+
bw = librosa.feature.spectral_bandwidth(S=stft, sr=sr)[0]
|
| 12 |
+
rolloff = librosa.feature.spectral_rolloff(S=stft, sr=sr)[0]
|
| 13 |
+
return stft, duration, cent, bw, rolloff
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
def plot_title(title):
|
| 17 |
+
plt.suptitle(title, fontsize=16, fontweight="bold")
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
def plot_spectrogram(y, sr, stft, duration, cmap="inferno"):
|
| 21 |
+
plt.subplot(3, 1, 1)
|
| 22 |
+
plt.imshow(
|
| 23 |
+
librosa.amplitude_to_db(stft, ref=np.max),
|
| 24 |
+
origin="lower",
|
| 25 |
+
extent=[0, duration, 0, sr / 1000],
|
| 26 |
+
aspect="auto",
|
| 27 |
+
cmap=cmap, # Change the colormap here
|
| 28 |
+
)
|
| 29 |
+
plt.colorbar(format="%+2.0f dB")
|
| 30 |
+
plt.xlabel("Time (s)")
|
| 31 |
+
plt.ylabel("Frequency (kHz)")
|
| 32 |
+
plt.title("Spectrogram")
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
def plot_waveform(y, sr, duration):
|
| 36 |
+
plt.subplot(3, 1, 2)
|
| 37 |
+
librosa.display.waveshow(y, sr=sr)
|
| 38 |
+
plt.xlabel("Time (s)")
|
| 39 |
+
plt.ylabel("Amplitude")
|
| 40 |
+
plt.title("Waveform")
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
def plot_features(times, cent, bw, rolloff, duration):
|
| 44 |
+
plt.subplot(3, 1, 3)
|
| 45 |
+
plt.plot(times, cent, label="Spectral Centroid (kHz)", color="b")
|
| 46 |
+
plt.plot(times, bw, label="Spectral Bandwidth (kHz)", color="g")
|
| 47 |
+
plt.plot(times, rolloff, label="Spectral Rolloff (kHz)", color="r")
|
| 48 |
+
plt.xlabel("Time (s)")
|
| 49 |
+
plt.title("Spectral Features")
|
| 50 |
+
plt.legend()
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
def analyze_audio(audio_file, save_plot_path="logs/audio_analysis.png"):
|
| 54 |
+
y, sr = librosa.load(audio_file)
|
| 55 |
+
stft, duration, cent, bw, rolloff = calculate_features(y, sr)
|
| 56 |
+
|
| 57 |
+
plt.figure(figsize=(12, 10))
|
| 58 |
+
|
| 59 |
+
plot_title("Audio Analysis" + " - " + audio_file.split("/")[-1])
|
| 60 |
+
plot_spectrogram(y, sr, stft, duration)
|
| 61 |
+
plot_waveform(y, sr, duration)
|
| 62 |
+
plot_features(librosa.times_like(cent), cent, bw, rolloff, duration)
|
| 63 |
+
|
| 64 |
+
plt.tight_layout()
|
| 65 |
+
|
| 66 |
+
if save_plot_path:
|
| 67 |
+
plt.savefig(save_plot_path, bbox_inches="tight", dpi=300)
|
| 68 |
+
plt.close()
|
| 69 |
+
|
| 70 |
+
audio_info = f"""Sample Rate: {sr}\nDuration: {(
|
| 71 |
+
str(round(duration, 2)) + " seconds"
|
| 72 |
+
if duration < 60
|
| 73 |
+
else str(round(duration / 60, 2)) + " minutes"
|
| 74 |
+
)}\nNumber of Samples: {len(y)}\nBits per Sample: {librosa.get_samplerate(audio_file)}\nChannels: {"Mono (1)" if y.ndim == 1 else "Stereo (2)"}"""
|
| 75 |
+
|
| 76 |
+
return audio_info, save_plot_path
|
rvc/lib/tools/gdown.py
ADDED
|
@@ -0,0 +1,285 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import re
|
| 3 |
+
import sys
|
| 4 |
+
import json
|
| 5 |
+
import time
|
| 6 |
+
import shutil
|
| 7 |
+
import tempfile
|
| 8 |
+
import warnings
|
| 9 |
+
from typing import Optional, Union, IO
|
| 10 |
+
import requests
|
| 11 |
+
from urllib.parse import urlparse, unquote
|
| 12 |
+
from tqdm import tqdm
|
| 13 |
+
|
| 14 |
+
CHUNK_SIZE = 512 * 1024
|
| 15 |
+
HOME = os.path.expanduser("~")
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
def indent(text: str, prefix: str):
|
| 19 |
+
"""Indent each non-empty line of text with the given prefix."""
|
| 20 |
+
return "".join(
|
| 21 |
+
(prefix + line if line.strip() else line) for line in text.splitlines(True)
|
| 22 |
+
)
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
class FileURLRetrievalError(Exception):
|
| 26 |
+
"""Custom exception for issues retrieving file URLs."""
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
def _extract_download_url_from_confirmation(contents: str, url_origin: str):
|
| 30 |
+
"""Extract the download URL from a Google Drive confirmation page."""
|
| 31 |
+
patterns = [
|
| 32 |
+
r'href="(\/uc\?export=download[^"]+)',
|
| 33 |
+
r'href="/open\?id=([^"]+)"',
|
| 34 |
+
r'"downloadUrl":"([^"]+)',
|
| 35 |
+
]
|
| 36 |
+
for pattern in patterns:
|
| 37 |
+
match = re.search(pattern, contents)
|
| 38 |
+
if match:
|
| 39 |
+
url = match.group(1)
|
| 40 |
+
if pattern == r'href="/open\?id=([^"]+)"':
|
| 41 |
+
uuid_match = re.search(
|
| 42 |
+
r'<input\s+type="hidden"\s+name="uuid"\s+value="([^"]+)"',
|
| 43 |
+
contents,
|
| 44 |
+
)
|
| 45 |
+
if uuid_match:
|
| 46 |
+
uuid = uuid_match.group(1)
|
| 47 |
+
return (
|
| 48 |
+
"https://drive.usercontent.google.com/download?id="
|
| 49 |
+
+ url
|
| 50 |
+
+ "&confirm=t&uuid="
|
| 51 |
+
+ uuid
|
| 52 |
+
)
|
| 53 |
+
raise FileURLRetrievalError(
|
| 54 |
+
f"Could not find UUID for download from {url_origin}"
|
| 55 |
+
)
|
| 56 |
+
elif pattern == r'"downloadUrl":"([^"]+)':
|
| 57 |
+
return url.replace("\\u003d", "=").replace("\\u0026", "&")
|
| 58 |
+
else:
|
| 59 |
+
return "https://docs.google.com" + url.replace("&", "&")
|
| 60 |
+
|
| 61 |
+
error_match = re.search(r'<p class="uc-error-subcaption">(.*)</p>', contents)
|
| 62 |
+
if error_match:
|
| 63 |
+
error = error_match.group(1)
|
| 64 |
+
raise FileURLRetrievalError(error)
|
| 65 |
+
|
| 66 |
+
raise FileURLRetrievalError(
|
| 67 |
+
"Cannot retrieve the public link of the file. "
|
| 68 |
+
"You may need to change the permission to "
|
| 69 |
+
"'Anyone with the link', or have had many accesses."
|
| 70 |
+
)
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
def _create_session(
|
| 74 |
+
proxy: Optional[str] = None,
|
| 75 |
+
use_cookies: bool = True,
|
| 76 |
+
return_cookies_file: bool = False,
|
| 77 |
+
):
|
| 78 |
+
"""Create a requests session with optional proxy and cookie handling."""
|
| 79 |
+
sess = requests.session()
|
| 80 |
+
sess.headers.update(
|
| 81 |
+
{"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6)"}
|
| 82 |
+
)
|
| 83 |
+
|
| 84 |
+
if proxy:
|
| 85 |
+
sess.proxies = {"http": proxy, "https": proxy}
|
| 86 |
+
|
| 87 |
+
cookies_file = os.path.join(HOME, ".cache/gdown/cookies.json")
|
| 88 |
+
if os.path.exists(cookies_file) and use_cookies:
|
| 89 |
+
try:
|
| 90 |
+
with open(cookies_file) as f:
|
| 91 |
+
cookies = json.load(f)
|
| 92 |
+
for k, v in cookies:
|
| 93 |
+
sess.cookies[k] = v
|
| 94 |
+
except json.JSONDecodeError:
|
| 95 |
+
warnings.warn("Corrupted Cookies file")
|
| 96 |
+
|
| 97 |
+
return (sess, cookies_file) if return_cookies_file else sess
|
| 98 |
+
|
| 99 |
+
|
| 100 |
+
def download(
|
| 101 |
+
output: Optional[str] = None,
|
| 102 |
+
quiet: bool = False,
|
| 103 |
+
proxy: Optional[str] = None,
|
| 104 |
+
speed: Optional[float] = None,
|
| 105 |
+
use_cookies: bool = True,
|
| 106 |
+
verify: Union[bool, str] = True,
|
| 107 |
+
id: Optional[str] = None,
|
| 108 |
+
fuzzy: bool = True,
|
| 109 |
+
resume: bool = False,
|
| 110 |
+
format: Optional[str] = None,
|
| 111 |
+
url: Optional[str] = None,
|
| 112 |
+
):
|
| 113 |
+
"""Download a file from a URL, supporting Google Drive links.
|
| 114 |
+
|
| 115 |
+
Args:
|
| 116 |
+
output: Output filepath. Default is basename of URL.
|
| 117 |
+
quiet: Suppress terminal output.
|
| 118 |
+
proxy: HTTP/HTTPS proxy.
|
| 119 |
+
speed: Download speed limit (bytes per second).
|
| 120 |
+
use_cookies: Flag to use cookies.
|
| 121 |
+
verify: Verify TLS certificates.
|
| 122 |
+
id: Google Drive's file ID.
|
| 123 |
+
fuzzy: Fuzzy Google Drive ID extraction.
|
| 124 |
+
resume: Resume download from a tmp file.
|
| 125 |
+
format: Format for Google Docs/Sheets/Slides.
|
| 126 |
+
url: URL to download from.
|
| 127 |
+
|
| 128 |
+
Returns:
|
| 129 |
+
Output filename, or None on error.
|
| 130 |
+
"""
|
| 131 |
+
if not (id is None) ^ (url is None):
|
| 132 |
+
raise ValueError("Either url or id has to be specified")
|
| 133 |
+
|
| 134 |
+
if id is not None:
|
| 135 |
+
url = f"https://drive.google.com/uc?id={id}"
|
| 136 |
+
|
| 137 |
+
url_origin = url
|
| 138 |
+
sess, cookies_file = _create_session(
|
| 139 |
+
proxy=proxy, use_cookies=use_cookies, return_cookies_file=True
|
| 140 |
+
)
|
| 141 |
+
|
| 142 |
+
while True:
|
| 143 |
+
res = sess.get(url, stream=True, verify=verify)
|
| 144 |
+
res.raise_for_status()
|
| 145 |
+
|
| 146 |
+
if url == url_origin and res.status_code == 500:
|
| 147 |
+
url = f"https://drive.google.com/open?id={id}"
|
| 148 |
+
continue
|
| 149 |
+
|
| 150 |
+
if res.headers.get("Content-Type", "").startswith("text/html"):
|
| 151 |
+
title_match = re.search("<title>(.+)</title>", res.text)
|
| 152 |
+
if title_match:
|
| 153 |
+
title = title_match.group(1)
|
| 154 |
+
if title.endswith(" - Google Docs"):
|
| 155 |
+
url = f"https://docs.google.com/document/d/{id}/export?format={'docx' if format is None else format}"
|
| 156 |
+
continue
|
| 157 |
+
if title.endswith(" - Google Sheets"):
|
| 158 |
+
url = f"https://docs.google.com/spreadsheets/d/{id}/export?format={'xlsx' if format is None else format}"
|
| 159 |
+
continue
|
| 160 |
+
if title.endswith(" - Google Slides"):
|
| 161 |
+
url = f"https://docs.google.com/presentation/d/{id}/export?format={'pptx' if format is None else format}"
|
| 162 |
+
continue
|
| 163 |
+
if (
|
| 164 |
+
"Content-Disposition" in res.headers
|
| 165 |
+
and res.headers["Content-Disposition"].endswith("pptx")
|
| 166 |
+
and format not in (None, "pptx")
|
| 167 |
+
):
|
| 168 |
+
url = f"https://docs.google.com/presentation/d/{id}/export?format={'pptx' if format is None else format}"
|
| 169 |
+
continue
|
| 170 |
+
|
| 171 |
+
if use_cookies:
|
| 172 |
+
os.makedirs(os.path.dirname(cookies_file), exist_ok=True)
|
| 173 |
+
cookies = [
|
| 174 |
+
(k, v)
|
| 175 |
+
for k, v in sess.cookies.items()
|
| 176 |
+
if not k.startswith("download_warning_")
|
| 177 |
+
]
|
| 178 |
+
with open(cookies_file, "w") as f:
|
| 179 |
+
json.dump(cookies, f, indent=2)
|
| 180 |
+
|
| 181 |
+
if "Content-Disposition" in res.headers:
|
| 182 |
+
break
|
| 183 |
+
|
| 184 |
+
parsed_url = urlparse(url)
|
| 185 |
+
is_gdrive = parsed_url.hostname in ("drive.google.com", "docs.google.com")
|
| 186 |
+
is_download_link = parsed_url.path.endswith("/uc")
|
| 187 |
+
|
| 188 |
+
if not (is_gdrive and is_download_link and fuzzy):
|
| 189 |
+
break
|
| 190 |
+
|
| 191 |
+
try:
|
| 192 |
+
url = _extract_download_url_from_confirmation(res.text, url_origin)
|
| 193 |
+
except FileURLRetrievalError as e:
|
| 194 |
+
raise FileURLRetrievalError(e)
|
| 195 |
+
|
| 196 |
+
content_disposition = res.headers.get("Content-Disposition", "")
|
| 197 |
+
filename_match = re.search(
|
| 198 |
+
r"filename\*=UTF-8''(.*)", content_disposition
|
| 199 |
+
) or re.search(r'filename=["\']?(.*?)["\']?$', content_disposition)
|
| 200 |
+
filename_from_url = (
|
| 201 |
+
unquote(filename_match.group(1)) if filename_match else os.path.basename(url)
|
| 202 |
+
)
|
| 203 |
+
download_path = output or filename_from_url
|
| 204 |
+
|
| 205 |
+
if isinstance(download_path, str) and download_path.endswith(os.path.sep):
|
| 206 |
+
os.makedirs(download_path, exist_ok=True)
|
| 207 |
+
download_path = os.path.join(download_path, filename_from_url)
|
| 208 |
+
|
| 209 |
+
temp_dir = os.path.dirname(download_path) or "."
|
| 210 |
+
prefix = os.path.basename(download_path)
|
| 211 |
+
|
| 212 |
+
if isinstance(download_path, str):
|
| 213 |
+
existing_tmp_files = [
|
| 214 |
+
os.path.join(temp_dir, file)
|
| 215 |
+
for file in os.listdir(temp_dir)
|
| 216 |
+
if file.startswith(prefix)
|
| 217 |
+
]
|
| 218 |
+
if resume and existing_tmp_files:
|
| 219 |
+
if len(existing_tmp_files) > 1:
|
| 220 |
+
print(
|
| 221 |
+
"There are multiple temporary files to resume:",
|
| 222 |
+
file=sys.stderr,
|
| 223 |
+
)
|
| 224 |
+
for file in existing_tmp_files:
|
| 225 |
+
print(f"\t{file}", file=sys.stderr)
|
| 226 |
+
print(
|
| 227 |
+
"Please remove them except one to resume downloading.",
|
| 228 |
+
file=sys.stderr,
|
| 229 |
+
)
|
| 230 |
+
return None
|
| 231 |
+
temp_file_path = existing_tmp_files[0]
|
| 232 |
+
else:
|
| 233 |
+
resume = False
|
| 234 |
+
temp_file_path = tempfile.mktemp(
|
| 235 |
+
suffix=tempfile.template, prefix=prefix, dir=temp_dir
|
| 236 |
+
)
|
| 237 |
+
|
| 238 |
+
try:
|
| 239 |
+
file_obj: IO = open(temp_file_path, "ab")
|
| 240 |
+
except Exception as e:
|
| 241 |
+
print(
|
| 242 |
+
f"Could not open the temporary file {temp_file_path}: {e}",
|
| 243 |
+
file=sys.stderr,
|
| 244 |
+
)
|
| 245 |
+
return None
|
| 246 |
+
else:
|
| 247 |
+
temp_file_path = None
|
| 248 |
+
file_obj = download_path
|
| 249 |
+
|
| 250 |
+
if temp_file_path is not None and file_obj.tell() != 0:
|
| 251 |
+
headers = {"Range": f"bytes={file_obj.tell()}-"}
|
| 252 |
+
res = sess.get(url, headers=headers, stream=True, verify=verify)
|
| 253 |
+
res.raise_for_status()
|
| 254 |
+
|
| 255 |
+
try:
|
| 256 |
+
total = int(res.headers.get("Content-Length", 0))
|
| 257 |
+
if total > 0:
|
| 258 |
+
if not quiet:
|
| 259 |
+
pbar = tqdm(
|
| 260 |
+
total=total, unit="B", unit_scale=True, desc=filename_from_url
|
| 261 |
+
)
|
| 262 |
+
else:
|
| 263 |
+
if not quiet:
|
| 264 |
+
pbar = tqdm(unit="B", unit_scale=True, desc=filename_from_url)
|
| 265 |
+
|
| 266 |
+
t_start = time.time()
|
| 267 |
+
for chunk in res.iter_content(chunk_size=CHUNK_SIZE):
|
| 268 |
+
file_obj.write(chunk)
|
| 269 |
+
if not quiet:
|
| 270 |
+
pbar.update(len(chunk))
|
| 271 |
+
if speed is not None:
|
| 272 |
+
elapsed_time_expected = 1.0 * pbar.n / speed
|
| 273 |
+
elapsed_time = time.time() - t_start
|
| 274 |
+
if elapsed_time < elapsed_time_expected:
|
| 275 |
+
time.sleep(elapsed_time_expected - elapsed_time)
|
| 276 |
+
if not quiet:
|
| 277 |
+
pbar.close()
|
| 278 |
+
|
| 279 |
+
if temp_file_path:
|
| 280 |
+
file_obj.close()
|
| 281 |
+
shutil.move(temp_file_path, download_path)
|
| 282 |
+
finally:
|
| 283 |
+
sess.close()
|
| 284 |
+
|
| 285 |
+
return download_path
|
rvc/lib/tools/launch_tensorboard.py
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import time
|
| 2 |
+
import logging
|
| 3 |
+
from tensorboard import program
|
| 4 |
+
|
| 5 |
+
log_path = "logs"
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
def launch_tensorboard_pipeline():
|
| 9 |
+
logging.getLogger("root").setLevel(logging.WARNING)
|
| 10 |
+
logging.getLogger("tensorboard").setLevel(logging.WARNING)
|
| 11 |
+
|
| 12 |
+
tb = program.TensorBoard()
|
| 13 |
+
tb.configure(argv=[None, "--logdir", log_path])
|
| 14 |
+
url = tb.launch()
|
| 15 |
+
|
| 16 |
+
print(
|
| 17 |
+
f"Access the tensorboard using the following link:\n{url}?pinnedCards=%5B%7B%22plugin%22%3A%22scalars%22%2C%22tag%22%3A%22loss%2Fg%2Ftotal%22%7D%2C%7B%22plugin%22%3A%22scalars%22%2C%22tag%22%3A%22loss%2Fd%2Ftotal%22%7D%2C%7B%22plugin%22%3A%22scalars%22%2C%22tag%22%3A%22loss%2Fg%2Fkl%22%7D%2C%7B%22plugin%22%3A%22scalars%22%2C%22tag%22%3A%22loss%2Fg%2Fmel%22%7D%5D"
|
| 18 |
+
)
|
| 19 |
+
|
| 20 |
+
while True:
|
| 21 |
+
time.sleep(600)
|
rvc/lib/tools/model_download.py
ADDED
|
@@ -0,0 +1,226 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import re
|
| 3 |
+
import sys
|
| 4 |
+
import shutil
|
| 5 |
+
import zipfile
|
| 6 |
+
import requests
|
| 7 |
+
from bs4 import BeautifulSoup
|
| 8 |
+
from urllib.parse import unquote
|
| 9 |
+
from tqdm import tqdm
|
| 10 |
+
|
| 11 |
+
now_dir = os.getcwd()
|
| 12 |
+
sys.path.append(now_dir)
|
| 13 |
+
|
| 14 |
+
from rvc.lib.utils import format_title
|
| 15 |
+
from rvc.lib.tools import gdown
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
file_path = os.path.join(now_dir, "logs")
|
| 19 |
+
zips_path = os.path.join(file_path, "zips")
|
| 20 |
+
os.makedirs(zips_path, exist_ok=True)
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
def search_pth_index(folder):
|
| 24 |
+
pth_paths = [
|
| 25 |
+
os.path.join(folder, file)
|
| 26 |
+
for file in os.listdir(folder)
|
| 27 |
+
if os.path.isfile(os.path.join(folder, file)) and file.endswith(".pth")
|
| 28 |
+
]
|
| 29 |
+
index_paths = [
|
| 30 |
+
os.path.join(folder, file)
|
| 31 |
+
for file in os.listdir(folder)
|
| 32 |
+
if os.path.isfile(os.path.join(folder, file)) and file.endswith(".index")
|
| 33 |
+
]
|
| 34 |
+
return pth_paths, index_paths
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
def download_from_url(url):
|
| 38 |
+
os.chdir(zips_path)
|
| 39 |
+
|
| 40 |
+
try:
|
| 41 |
+
if "drive.google.com" in url:
|
| 42 |
+
file_id = extract_google_drive_id(url)
|
| 43 |
+
if file_id:
|
| 44 |
+
gdown.download(
|
| 45 |
+
url=f"https://drive.google.com/uc?id={file_id}",
|
| 46 |
+
quiet=False,
|
| 47 |
+
fuzzy=True,
|
| 48 |
+
)
|
| 49 |
+
elif "/blob/" in url or "/resolve/" in url:
|
| 50 |
+
download_blob_or_resolve(url)
|
| 51 |
+
elif "/tree/main" in url:
|
| 52 |
+
download_from_huggingface(url)
|
| 53 |
+
else:
|
| 54 |
+
download_file(url)
|
| 55 |
+
|
| 56 |
+
rename_downloaded_files()
|
| 57 |
+
return "downloaded"
|
| 58 |
+
except Exception as error:
|
| 59 |
+
print(f"An error occurred downloading the file: {error}")
|
| 60 |
+
return None
|
| 61 |
+
finally:
|
| 62 |
+
os.chdir(now_dir)
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
def extract_google_drive_id(url):
|
| 66 |
+
if "file/d/" in url:
|
| 67 |
+
return url.split("file/d/")[1].split("/")[0]
|
| 68 |
+
if "id=" in url:
|
| 69 |
+
return url.split("id=")[1].split("&")[0]
|
| 70 |
+
return None
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
def download_blob_or_resolve(url):
|
| 74 |
+
if "/blob/" in url:
|
| 75 |
+
url = url.replace("/blob/", "/resolve/")
|
| 76 |
+
response = requests.get(url, stream=True)
|
| 77 |
+
if response.status_code == 200:
|
| 78 |
+
save_response_content(response)
|
| 79 |
+
else:
|
| 80 |
+
raise ValueError(
|
| 81 |
+
"Download failed with status code: " + str(response.status_code)
|
| 82 |
+
)
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
def save_response_content(response):
|
| 86 |
+
content_disposition = unquote(response.headers.get("Content-Disposition", ""))
|
| 87 |
+
file_name = (
|
| 88 |
+
re.search(r'filename="([^"]+)"', content_disposition)
|
| 89 |
+
.groups()[0]
|
| 90 |
+
.replace(os.path.sep, "_")
|
| 91 |
+
if content_disposition
|
| 92 |
+
else "downloaded_file"
|
| 93 |
+
)
|
| 94 |
+
|
| 95 |
+
total_size = int(response.headers.get("Content-Length", 0))
|
| 96 |
+
chunk_size = 1024
|
| 97 |
+
|
| 98 |
+
with open(os.path.join(zips_path, file_name), "wb") as file, tqdm(
|
| 99 |
+
total=total_size, unit="B", unit_scale=True, desc=file_name
|
| 100 |
+
) as progress_bar:
|
| 101 |
+
for data in response.iter_content(chunk_size):
|
| 102 |
+
file.write(data)
|
| 103 |
+
progress_bar.update(len(data))
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
def download_from_huggingface(url):
|
| 107 |
+
response = requests.get(url)
|
| 108 |
+
soup = BeautifulSoup(response.content, "html.parser")
|
| 109 |
+
temp_url = next(
|
| 110 |
+
(
|
| 111 |
+
link["href"]
|
| 112 |
+
for link in soup.find_all("a", href=True)
|
| 113 |
+
if link["href"].endswith(".zip")
|
| 114 |
+
),
|
| 115 |
+
None,
|
| 116 |
+
)
|
| 117 |
+
if temp_url:
|
| 118 |
+
url = temp_url.replace("blob", "resolve")
|
| 119 |
+
if "huggingface.co" not in url:
|
| 120 |
+
url = "https://huggingface.co" + url
|
| 121 |
+
download_file(url)
|
| 122 |
+
else:
|
| 123 |
+
raise ValueError("No zip file found in Huggingface URL")
|
| 124 |
+
|
| 125 |
+
|
| 126 |
+
def download_file(url):
|
| 127 |
+
response = requests.get(url, stream=True)
|
| 128 |
+
if response.status_code == 200:
|
| 129 |
+
save_response_content(response)
|
| 130 |
+
else:
|
| 131 |
+
raise ValueError(
|
| 132 |
+
"Download failed with status code: " + str(response.status_code)
|
| 133 |
+
)
|
| 134 |
+
|
| 135 |
+
|
| 136 |
+
def rename_downloaded_files():
|
| 137 |
+
for currentPath, _, zipFiles in os.walk(zips_path):
|
| 138 |
+
for file in zipFiles:
|
| 139 |
+
file_name, extension = os.path.splitext(file)
|
| 140 |
+
real_path = os.path.join(currentPath, file)
|
| 141 |
+
os.rename(real_path, file_name.replace(os.path.sep, "_") + extension)
|
| 142 |
+
|
| 143 |
+
|
| 144 |
+
def extract(zipfile_path, unzips_path):
|
| 145 |
+
try:
|
| 146 |
+
with zipfile.ZipFile(zipfile_path, "r") as zip_ref:
|
| 147 |
+
zip_ref.extractall(unzips_path)
|
| 148 |
+
os.remove(zipfile_path)
|
| 149 |
+
return True
|
| 150 |
+
except Exception as error:
|
| 151 |
+
print(f"An error occurred extracting the zip file: {error}")
|
| 152 |
+
return False
|
| 153 |
+
|
| 154 |
+
|
| 155 |
+
def unzip_file(zip_path, zip_file_name):
|
| 156 |
+
zip_file_path = os.path.join(zip_path, zip_file_name + ".zip")
|
| 157 |
+
extract_path = os.path.join(file_path, zip_file_name)
|
| 158 |
+
with zipfile.ZipFile(zip_file_path, "r") as zip_ref:
|
| 159 |
+
zip_ref.extractall(extract_path)
|
| 160 |
+
os.remove(zip_file_path)
|
| 161 |
+
|
| 162 |
+
|
| 163 |
+
def model_download_pipeline(url: str):
|
| 164 |
+
try:
|
| 165 |
+
result = download_from_url(url)
|
| 166 |
+
if result == "downloaded":
|
| 167 |
+
return handle_extraction_process()
|
| 168 |
+
else:
|
| 169 |
+
return "Error"
|
| 170 |
+
except Exception as error:
|
| 171 |
+
print(f"An unexpected error occurred: {error}")
|
| 172 |
+
return "Error"
|
| 173 |
+
|
| 174 |
+
|
| 175 |
+
def handle_extraction_process():
|
| 176 |
+
extract_folder_path = ""
|
| 177 |
+
for filename in os.listdir(zips_path):
|
| 178 |
+
if filename.endswith(".zip"):
|
| 179 |
+
zipfile_path = os.path.join(zips_path, filename)
|
| 180 |
+
model_name = format_title(os.path.basename(zipfile_path).split(".zip")[0])
|
| 181 |
+
extract_folder_path = os.path.join("logs", os.path.normpath(model_name))
|
| 182 |
+
success = extract(zipfile_path, extract_folder_path)
|
| 183 |
+
clean_extracted_files(extract_folder_path, model_name)
|
| 184 |
+
|
| 185 |
+
if success:
|
| 186 |
+
print(f"Model {model_name} downloaded!")
|
| 187 |
+
else:
|
| 188 |
+
print(f"Error downloading {model_name}")
|
| 189 |
+
return "Error"
|
| 190 |
+
if not extract_folder_path:
|
| 191 |
+
print("Zip file was not found.")
|
| 192 |
+
return "Error"
|
| 193 |
+
return search_pth_index(extract_folder_path)
|
| 194 |
+
|
| 195 |
+
|
| 196 |
+
def clean_extracted_files(extract_folder_path, model_name):
|
| 197 |
+
macosx_path = os.path.join(extract_folder_path, "__MACOSX")
|
| 198 |
+
if os.path.exists(macosx_path):
|
| 199 |
+
shutil.rmtree(macosx_path)
|
| 200 |
+
|
| 201 |
+
subfolders = [
|
| 202 |
+
f
|
| 203 |
+
for f in os.listdir(extract_folder_path)
|
| 204 |
+
if os.path.isdir(os.path.join(extract_folder_path, f))
|
| 205 |
+
]
|
| 206 |
+
if len(subfolders) == 1:
|
| 207 |
+
subfolder_path = os.path.join(extract_folder_path, subfolders[0])
|
| 208 |
+
for item in os.listdir(subfolder_path):
|
| 209 |
+
shutil.move(
|
| 210 |
+
os.path.join(subfolder_path, item),
|
| 211 |
+
os.path.join(extract_folder_path, item),
|
| 212 |
+
)
|
| 213 |
+
os.rmdir(subfolder_path)
|
| 214 |
+
|
| 215 |
+
for item in os.listdir(extract_folder_path):
|
| 216 |
+
source_path = os.path.join(extract_folder_path, item)
|
| 217 |
+
if ".pth" in item:
|
| 218 |
+
new_file_name = model_name + ".pth"
|
| 219 |
+
elif ".index" in item:
|
| 220 |
+
new_file_name = model_name + ".index"
|
| 221 |
+
else:
|
| 222 |
+
continue
|
| 223 |
+
|
| 224 |
+
destination_path = os.path.join(extract_folder_path, new_file_name)
|
| 225 |
+
if not os.path.exists(destination_path):
|
| 226 |
+
os.rename(source_path, destination_path)
|
rvc/lib/tools/prerequisites_download.py
ADDED
|
@@ -0,0 +1,153 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
from concurrent.futures import ThreadPoolExecutor
|
| 3 |
+
from tqdm import tqdm
|
| 4 |
+
import requests
|
| 5 |
+
|
| 6 |
+
url_base = "https://huggingface.co/IAHispano/Applio/resolve/main/Resources"
|
| 7 |
+
|
| 8 |
+
pretraineds_hifigan_list = [
|
| 9 |
+
(
|
| 10 |
+
"pretrained_v2/",
|
| 11 |
+
[
|
| 12 |
+
"f0D32k.pth",
|
| 13 |
+
"f0D40k.pth",
|
| 14 |
+
"f0D48k.pth",
|
| 15 |
+
"f0G32k.pth",
|
| 16 |
+
"f0G40k.pth",
|
| 17 |
+
"f0G48k.pth",
|
| 18 |
+
],
|
| 19 |
+
)
|
| 20 |
+
]
|
| 21 |
+
models_list = [("predictors/", ["rmvpe.pt", "fcpe.pt"])]
|
| 22 |
+
embedders_list = [("embedders/contentvec/", ["pytorch_model.bin", "config.json"])]
|
| 23 |
+
executables_list = [
|
| 24 |
+
("", ["ffmpeg.exe", "ffprobe.exe"]),
|
| 25 |
+
]
|
| 26 |
+
|
| 27 |
+
folder_mapping_list = {
|
| 28 |
+
"pretrained_v2/": "rvc/models/pretraineds/hifi-gan/",
|
| 29 |
+
"embedders/contentvec/": "rvc/models/embedders/contentvec/",
|
| 30 |
+
"predictors/": "rvc/models/predictors/",
|
| 31 |
+
"formant/": "rvc/models/formant/",
|
| 32 |
+
}
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
def get_file_size_if_missing(file_list):
|
| 36 |
+
"""
|
| 37 |
+
Calculate the total size of files to be downloaded only if they do not exist locally.
|
| 38 |
+
"""
|
| 39 |
+
total_size = 0
|
| 40 |
+
for remote_folder, files in file_list:
|
| 41 |
+
local_folder = folder_mapping_list.get(remote_folder, "")
|
| 42 |
+
for file in files:
|
| 43 |
+
destination_path = os.path.join(local_folder, file)
|
| 44 |
+
if not os.path.exists(destination_path):
|
| 45 |
+
url = f"{url_base}/{remote_folder}{file}"
|
| 46 |
+
response = requests.head(url)
|
| 47 |
+
total_size += int(response.headers.get("content-length", 0))
|
| 48 |
+
return total_size
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
def download_file(url, destination_path, global_bar):
|
| 52 |
+
"""
|
| 53 |
+
Download a file from the given URL to the specified destination path,
|
| 54 |
+
updating the global progress bar as data is downloaded.
|
| 55 |
+
"""
|
| 56 |
+
|
| 57 |
+
dir_name = os.path.dirname(destination_path)
|
| 58 |
+
if dir_name:
|
| 59 |
+
os.makedirs(dir_name, exist_ok=True)
|
| 60 |
+
response = requests.get(url, stream=True)
|
| 61 |
+
block_size = 1024
|
| 62 |
+
with open(destination_path, "wb") as file:
|
| 63 |
+
for data in response.iter_content(block_size):
|
| 64 |
+
file.write(data)
|
| 65 |
+
global_bar.update(len(data))
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
def download_mapping_files(file_mapping_list, global_bar):
|
| 69 |
+
"""
|
| 70 |
+
Download all files in the provided file mapping list using a thread pool executor,
|
| 71 |
+
and update the global progress bar as downloads progress.
|
| 72 |
+
"""
|
| 73 |
+
with ThreadPoolExecutor() as executor:
|
| 74 |
+
futures = []
|
| 75 |
+
for remote_folder, file_list in file_mapping_list:
|
| 76 |
+
local_folder = folder_mapping_list.get(remote_folder, "")
|
| 77 |
+
for file in file_list:
|
| 78 |
+
destination_path = os.path.join(local_folder, file)
|
| 79 |
+
if not os.path.exists(destination_path):
|
| 80 |
+
url = f"{url_base}/{remote_folder}{file}"
|
| 81 |
+
futures.append(
|
| 82 |
+
executor.submit(
|
| 83 |
+
download_file, url, destination_path, global_bar
|
| 84 |
+
)
|
| 85 |
+
)
|
| 86 |
+
for future in futures:
|
| 87 |
+
future.result()
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
def split_pretraineds(pretrained_list):
|
| 91 |
+
f0_list = []
|
| 92 |
+
non_f0_list = []
|
| 93 |
+
for folder, files in pretrained_list:
|
| 94 |
+
f0_files = [f for f in files if f.startswith("f0")]
|
| 95 |
+
non_f0_files = [f for f in files if not f.startswith("f0")]
|
| 96 |
+
if f0_files:
|
| 97 |
+
f0_list.append((folder, f0_files))
|
| 98 |
+
if non_f0_files:
|
| 99 |
+
non_f0_list.append((folder, non_f0_files))
|
| 100 |
+
return f0_list, non_f0_list
|
| 101 |
+
|
| 102 |
+
|
| 103 |
+
pretraineds_hifigan_list, _ = split_pretraineds(pretraineds_hifigan_list)
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
def calculate_total_size(
|
| 107 |
+
pretraineds_hifigan,
|
| 108 |
+
models,
|
| 109 |
+
exe,
|
| 110 |
+
):
|
| 111 |
+
"""
|
| 112 |
+
Calculate the total size of all files to be downloaded based on selected categories.
|
| 113 |
+
"""
|
| 114 |
+
total_size = 0
|
| 115 |
+
if models:
|
| 116 |
+
total_size += get_file_size_if_missing(models_list)
|
| 117 |
+
total_size += get_file_size_if_missing(embedders_list)
|
| 118 |
+
if exe and os.name == "nt":
|
| 119 |
+
total_size += get_file_size_if_missing(executables_list)
|
| 120 |
+
total_size += get_file_size_if_missing(pretraineds_hifigan)
|
| 121 |
+
return total_size
|
| 122 |
+
|
| 123 |
+
|
| 124 |
+
def prequisites_download_pipeline(
|
| 125 |
+
pretraineds_hifigan,
|
| 126 |
+
models,
|
| 127 |
+
exe,
|
| 128 |
+
):
|
| 129 |
+
"""
|
| 130 |
+
Manage the download pipeline for different categories of files.
|
| 131 |
+
"""
|
| 132 |
+
total_size = calculate_total_size(
|
| 133 |
+
pretraineds_hifigan_list if pretraineds_hifigan else [],
|
| 134 |
+
models,
|
| 135 |
+
exe,
|
| 136 |
+
)
|
| 137 |
+
|
| 138 |
+
if total_size > 0:
|
| 139 |
+
with tqdm(
|
| 140 |
+
total=total_size, unit="iB", unit_scale=True, desc="Downloading all files"
|
| 141 |
+
) as global_bar:
|
| 142 |
+
if models:
|
| 143 |
+
download_mapping_files(models_list, global_bar)
|
| 144 |
+
download_mapping_files(embedders_list, global_bar)
|
| 145 |
+
if exe:
|
| 146 |
+
if os.name == "nt":
|
| 147 |
+
download_mapping_files(executables_list, global_bar)
|
| 148 |
+
else:
|
| 149 |
+
print("No executables needed")
|
| 150 |
+
if pretraineds_hifigan:
|
| 151 |
+
download_mapping_files(pretraineds_hifigan_list, global_bar)
|
| 152 |
+
else:
|
| 153 |
+
pass
|
rvc/lib/tools/pretrained_selector.py
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
def pretrained_selector(vocoder, sample_rate):
|
| 5 |
+
base_path = os.path.join("rvc", "models", "pretraineds", f"{vocoder.lower()}")
|
| 6 |
+
|
| 7 |
+
path_g = os.path.join(base_path, f"f0G{str(sample_rate)[:2]}k.pth")
|
| 8 |
+
path_d = os.path.join(base_path, f"f0D{str(sample_rate)[:2]}k.pth")
|
| 9 |
+
|
| 10 |
+
if os.path.exists(path_g) and os.path.exists(path_d):
|
| 11 |
+
return path_g, path_d
|
| 12 |
+
else:
|
| 13 |
+
return "", ""
|
rvc/lib/tools/split_audio.py
ADDED
|
@@ -0,0 +1,79 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
import librosa
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
def process_audio(audio, sr=16000, silence_thresh=-60, min_silence_len=250):
|
| 6 |
+
"""
|
| 7 |
+
Splits an audio signal into segments using a fixed frame size and hop size.
|
| 8 |
+
|
| 9 |
+
Parameters:
|
| 10 |
+
- audio (np.ndarray): The audio signal to split.
|
| 11 |
+
- sr (int): The sample rate of the input audio (default is 16000).
|
| 12 |
+
- silence_thresh (int): Silence threshold (default =-60dB)
|
| 13 |
+
- min_silence_len (int): Minimum silence duration (default 250ms).
|
| 14 |
+
|
| 15 |
+
Returns:
|
| 16 |
+
- list of np.ndarray: A list of audio segments.
|
| 17 |
+
- np.ndarray: The intervals where the audio was split.
|
| 18 |
+
"""
|
| 19 |
+
frame_length = int(min_silence_len / 1000 * sr)
|
| 20 |
+
hop_length = frame_length // 2
|
| 21 |
+
intervals = librosa.effects.split(
|
| 22 |
+
audio, top_db=-silence_thresh, frame_length=frame_length, hop_length=hop_length
|
| 23 |
+
)
|
| 24 |
+
audio_segments = [audio[start:end] for start, end in intervals]
|
| 25 |
+
|
| 26 |
+
return audio_segments, intervals
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
def merge_audio(audio_segments_org, audio_segments_new, intervals, sr_orig, sr_new):
|
| 30 |
+
"""
|
| 31 |
+
Merges audio segments back into a single audio signal, filling gaps with silence.
|
| 32 |
+
Assumes audio segments are already at sr_new.
|
| 33 |
+
|
| 34 |
+
Parameters:
|
| 35 |
+
- audio_segments_org (list of np.ndarray): The non-silent audio segments (at sr_orig).
|
| 36 |
+
- audio_segments_new (list of np.ndarray): The non-silent audio segments (at sr_new).
|
| 37 |
+
- intervals (np.ndarray): The intervals used for splitting the original audio.
|
| 38 |
+
- sr_orig (int): The sample rate of the original audio
|
| 39 |
+
- sr_new (int): The sample rate of the model
|
| 40 |
+
Returns:
|
| 41 |
+
- np.ndarray: The merged audio signal with silent gaps restored.
|
| 42 |
+
"""
|
| 43 |
+
merged_audio = np.array([], dtype=audio_segments_new[0].dtype)
|
| 44 |
+
sr_ratio = sr_new / sr_orig
|
| 45 |
+
|
| 46 |
+
for i, (start, end) in enumerate(intervals):
|
| 47 |
+
|
| 48 |
+
start_new = int(start * sr_ratio)
|
| 49 |
+
end_new = int(end * sr_ratio)
|
| 50 |
+
|
| 51 |
+
original_duration = len(audio_segments_org[i]) / sr_orig
|
| 52 |
+
new_duration = len(audio_segments_new[i]) / sr_new
|
| 53 |
+
duration_diff = new_duration - original_duration
|
| 54 |
+
|
| 55 |
+
silence_samples = int(abs(duration_diff) * sr_new)
|
| 56 |
+
silence_compensation = np.zeros(
|
| 57 |
+
silence_samples, dtype=audio_segments_new[0].dtype
|
| 58 |
+
)
|
| 59 |
+
|
| 60 |
+
if i == 0 and start_new > 0:
|
| 61 |
+
initial_silence = np.zeros(start_new, dtype=audio_segments_new[0].dtype)
|
| 62 |
+
merged_audio = np.concatenate((merged_audio, initial_silence))
|
| 63 |
+
|
| 64 |
+
if duration_diff > 0:
|
| 65 |
+
merged_audio = np.concatenate((merged_audio, silence_compensation))
|
| 66 |
+
|
| 67 |
+
merged_audio = np.concatenate((merged_audio, audio_segments_new[i]))
|
| 68 |
+
|
| 69 |
+
if duration_diff < 0:
|
| 70 |
+
merged_audio = np.concatenate((merged_audio, silence_compensation))
|
| 71 |
+
|
| 72 |
+
if i < len(intervals) - 1:
|
| 73 |
+
next_start_new = int(intervals[i + 1][0] * sr_ratio)
|
| 74 |
+
silence_duration = next_start_new - end_new
|
| 75 |
+
if silence_duration > 0:
|
| 76 |
+
silence = np.zeros(silence_duration, dtype=audio_segments_new[0].dtype)
|
| 77 |
+
merged_audio = np.concatenate((merged_audio, silence))
|
| 78 |
+
|
| 79 |
+
return merged_audio
|
rvc/lib/tools/tts.py
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import sys
|
| 2 |
+
import asyncio
|
| 3 |
+
import edge_tts
|
| 4 |
+
import os
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
async def main():
|
| 8 |
+
# Parse command line arguments
|
| 9 |
+
tts_file = str(sys.argv[1])
|
| 10 |
+
text = str(sys.argv[2])
|
| 11 |
+
voice = str(sys.argv[3])
|
| 12 |
+
rate = int(sys.argv[4])
|
| 13 |
+
output_file = str(sys.argv[5])
|
| 14 |
+
|
| 15 |
+
rates = f"+{rate}%" if rate >= 0 else f"{rate}%"
|
| 16 |
+
if tts_file and os.path.exists(tts_file):
|
| 17 |
+
text = ""
|
| 18 |
+
try:
|
| 19 |
+
with open(tts_file, "r", encoding="utf-8") as file:
|
| 20 |
+
text = file.read()
|
| 21 |
+
except UnicodeDecodeError:
|
| 22 |
+
with open(tts_file, "r") as file:
|
| 23 |
+
text = file.read()
|
| 24 |
+
await edge_tts.Communicate(text, voice, rate=rates).save(output_file)
|
| 25 |
+
# print(f"TTS with {voice} completed. Output TTS file: '{output_file}'")
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
if __name__ == "__main__":
|
| 29 |
+
asyncio.run(main())
|
rvc/lib/tools/tts_voices.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
rvc/lib/utils.py
ADDED
|
@@ -0,0 +1,142 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import sys
|
| 3 |
+
import soxr
|
| 4 |
+
import librosa
|
| 5 |
+
import soundfile as sf
|
| 6 |
+
import numpy as np
|
| 7 |
+
import re
|
| 8 |
+
import unicodedata
|
| 9 |
+
import wget
|
| 10 |
+
from torch import nn
|
| 11 |
+
|
| 12 |
+
import logging
|
| 13 |
+
from transformers import HubertModel
|
| 14 |
+
import warnings
|
| 15 |
+
|
| 16 |
+
# Remove this to see warnings about transformers models
|
| 17 |
+
warnings.filterwarnings("ignore")
|
| 18 |
+
|
| 19 |
+
logging.getLogger("fairseq").setLevel(logging.ERROR)
|
| 20 |
+
logging.getLogger("faiss.loader").setLevel(logging.ERROR)
|
| 21 |
+
logging.getLogger("transformers").setLevel(logging.ERROR)
|
| 22 |
+
logging.getLogger("torch").setLevel(logging.ERROR)
|
| 23 |
+
|
| 24 |
+
now_dir = os.getcwd()
|
| 25 |
+
sys.path.append(now_dir)
|
| 26 |
+
|
| 27 |
+
base_path = os.path.join(now_dir, "rvc", "models", "formant", "stftpitchshift")
|
| 28 |
+
stft = base_path + ".exe" if sys.platform == "win32" else base_path
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
class HubertModelWithFinalProj(HubertModel):
|
| 32 |
+
def __init__(self, config):
|
| 33 |
+
super().__init__(config)
|
| 34 |
+
self.final_proj = nn.Linear(config.hidden_size, config.classifier_proj_size)
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
def load_audio(file, sample_rate):
|
| 38 |
+
try:
|
| 39 |
+
file = file.strip(" ").strip('"').strip("\n").strip('"').strip(" ")
|
| 40 |
+
audio, sr = sf.read(file)
|
| 41 |
+
if len(audio.shape) > 1:
|
| 42 |
+
audio = librosa.to_mono(audio.T)
|
| 43 |
+
if sr != sample_rate:
|
| 44 |
+
audio = librosa.resample(
|
| 45 |
+
audio, orig_sr=sr, target_sr=sample_rate, res_type="soxr_vhq"
|
| 46 |
+
)
|
| 47 |
+
except Exception as error:
|
| 48 |
+
raise RuntimeError(f"An error occurred loading the audio: {error}")
|
| 49 |
+
|
| 50 |
+
return audio.flatten()
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
def load_audio_infer(
|
| 54 |
+
file,
|
| 55 |
+
sample_rate,
|
| 56 |
+
**kwargs,
|
| 57 |
+
):
|
| 58 |
+
formant_shifting = kwargs.get("formant_shifting", False)
|
| 59 |
+
try:
|
| 60 |
+
file = file.strip(" ").strip('"').strip("\n").strip('"').strip(" ")
|
| 61 |
+
if not os.path.isfile(file):
|
| 62 |
+
raise FileNotFoundError(f"File not found: {file}")
|
| 63 |
+
audio, sr = sf.read(file)
|
| 64 |
+
if len(audio.shape) > 1:
|
| 65 |
+
audio = librosa.to_mono(audio.T)
|
| 66 |
+
if sr != sample_rate:
|
| 67 |
+
audio = librosa.resample(
|
| 68 |
+
audio, orig_sr=sr, target_sr=sample_rate, res_type="soxr_vhq"
|
| 69 |
+
)
|
| 70 |
+
if formant_shifting:
|
| 71 |
+
formant_qfrency = kwargs.get("formant_qfrency", 0.8)
|
| 72 |
+
formant_timbre = kwargs.get("formant_timbre", 0.8)
|
| 73 |
+
|
| 74 |
+
from stftpitchshift import StftPitchShift
|
| 75 |
+
|
| 76 |
+
pitchshifter = StftPitchShift(1024, 32, sample_rate)
|
| 77 |
+
audio = pitchshifter.shiftpitch(
|
| 78 |
+
audio,
|
| 79 |
+
factors=1,
|
| 80 |
+
quefrency=formant_qfrency * 1e-3,
|
| 81 |
+
distortion=formant_timbre,
|
| 82 |
+
)
|
| 83 |
+
except Exception as error:
|
| 84 |
+
raise RuntimeError(f"An error occurred loading the audio: {error}")
|
| 85 |
+
return np.array(audio).flatten()
|
| 86 |
+
|
| 87 |
+
|
| 88 |
+
def format_title(title):
|
| 89 |
+
formatted_title = (
|
| 90 |
+
unicodedata.normalize("NFKD", title).encode("ascii", "ignore").decode("utf-8")
|
| 91 |
+
)
|
| 92 |
+
formatted_title = re.sub(r"[\u2500-\u257F]+", "", formatted_title)
|
| 93 |
+
formatted_title = re.sub(r"[^\w\s.-]", "", formatted_title)
|
| 94 |
+
formatted_title = re.sub(r"\s+", "_", formatted_title)
|
| 95 |
+
return formatted_title
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
def load_embedding(embedder_model, custom_embedder=None):
|
| 99 |
+
embedder_root = os.path.join(now_dir, "rvc", "models", "embedders")
|
| 100 |
+
embedding_list = {
|
| 101 |
+
"contentvec": os.path.join(embedder_root, "contentvec"),
|
| 102 |
+
"chinese-hubert-base": os.path.join(embedder_root, "chinese_hubert_base"),
|
| 103 |
+
"japanese-hubert-base": os.path.join(embedder_root, "japanese_hubert_base"),
|
| 104 |
+
"korean-hubert-base": os.path.join(embedder_root, "korean_hubert_base"),
|
| 105 |
+
}
|
| 106 |
+
|
| 107 |
+
online_embedders = {
|
| 108 |
+
"contentvec": "https://huggingface.co/IAHispano/Applio/resolve/main/Resources/embedders/contentvec/pytorch_model.bin",
|
| 109 |
+
"chinese-hubert-base": "https://huggingface.co/IAHispano/Applio/resolve/main/Resources/embedders/chinese_hubert_base/pytorch_model.bin",
|
| 110 |
+
"japanese-hubert-base": "https://huggingface.co/IAHispano/Applio/resolve/main/Resources/embedders/japanese_hubert_base/pytorch_model.bin",
|
| 111 |
+
"korean-hubert-base": "https://huggingface.co/IAHispano/Applio/resolve/main/Resources/embedders/korean_hubert_base/pytorch_model.bin",
|
| 112 |
+
}
|
| 113 |
+
|
| 114 |
+
config_files = {
|
| 115 |
+
"contentvec": "https://huggingface.co/IAHispano/Applio/resolve/main/Resources/embedders/contentvec/config.json",
|
| 116 |
+
"chinese-hubert-base": "https://huggingface.co/IAHispano/Applio/resolve/main/Resources/embedders/chinese_hubert_base/config.json",
|
| 117 |
+
"japanese-hubert-base": "https://huggingface.co/IAHispano/Applio/resolve/main/Resources/embedders/japanese_hubert_base/config.json",
|
| 118 |
+
"korean-hubert-base": "https://huggingface.co/IAHispano/Applio/resolve/main/Resources/embedders/korean_hubert_base/config.json",
|
| 119 |
+
}
|
| 120 |
+
|
| 121 |
+
if embedder_model == "custom":
|
| 122 |
+
if os.path.exists(custom_embedder):
|
| 123 |
+
model_path = custom_embedder
|
| 124 |
+
else:
|
| 125 |
+
print(f"Custom embedder not found: {custom_embedder}, using contentvec")
|
| 126 |
+
model_path = embedding_list["contentvec"]
|
| 127 |
+
else:
|
| 128 |
+
model_path = embedding_list[embedder_model]
|
| 129 |
+
bin_file = os.path.join(model_path, "pytorch_model.bin")
|
| 130 |
+
json_file = os.path.join(model_path, "config.json")
|
| 131 |
+
os.makedirs(model_path, exist_ok=True)
|
| 132 |
+
if not os.path.exists(bin_file):
|
| 133 |
+
url = online_embedders[embedder_model]
|
| 134 |
+
print(f"Downloading {url} to {model_path}...")
|
| 135 |
+
wget.download(url, out=bin_file)
|
| 136 |
+
if not os.path.exists(json_file):
|
| 137 |
+
url = config_files[embedder_model]
|
| 138 |
+
print(f"Downloading {url} to {model_path}...")
|
| 139 |
+
wget.download(url, out=json_file)
|
| 140 |
+
|
| 141 |
+
models = HubertModelWithFinalProj.from_pretrained(model_path)
|
| 142 |
+
return models
|
rvc/lib/zluda.py
ADDED
|
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
|
| 3 |
+
if torch.cuda.is_available() and torch.cuda.get_device_name().endswith("[ZLUDA]"):
|
| 4 |
+
|
| 5 |
+
class STFT:
|
| 6 |
+
def __init__(self):
|
| 7 |
+
self.device = "cuda"
|
| 8 |
+
self.fourier_bases = {} # Cache for Fourier bases
|
| 9 |
+
|
| 10 |
+
def _get_fourier_basis(self, n_fft):
|
| 11 |
+
# Check if the basis for this n_fft is already cached
|
| 12 |
+
if n_fft in self.fourier_bases:
|
| 13 |
+
return self.fourier_bases[n_fft]
|
| 14 |
+
fourier_basis = torch.fft.fft(torch.eye(n_fft, device="cpu")).to(
|
| 15 |
+
self.device
|
| 16 |
+
)
|
| 17 |
+
# stack separated real and imaginary components and convert to torch tensor
|
| 18 |
+
cutoff = n_fft // 2 + 1
|
| 19 |
+
fourier_basis = torch.cat(
|
| 20 |
+
[fourier_basis.real[:cutoff], fourier_basis.imag[:cutoff]], dim=0
|
| 21 |
+
)
|
| 22 |
+
# cache the tensor and return
|
| 23 |
+
self.fourier_bases[n_fft] = fourier_basis
|
| 24 |
+
return fourier_basis
|
| 25 |
+
|
| 26 |
+
def transform(self, input, n_fft, hop_length, window):
|
| 27 |
+
# fetch cached Fourier basis
|
| 28 |
+
fourier_basis = self._get_fourier_basis(n_fft)
|
| 29 |
+
# apply hann window to Fourier basis
|
| 30 |
+
fourier_basis = fourier_basis * window
|
| 31 |
+
# pad input to center with reflect
|
| 32 |
+
pad_amount = n_fft // 2
|
| 33 |
+
input = torch.nn.functional.pad(
|
| 34 |
+
input, (pad_amount, pad_amount), mode="reflect"
|
| 35 |
+
)
|
| 36 |
+
# separate input into n_fft-sized frames
|
| 37 |
+
input_frames = input.unfold(1, n_fft, hop_length).permute(0, 2, 1)
|
| 38 |
+
# apply fft to each frame
|
| 39 |
+
fourier_transform = torch.matmul(fourier_basis, input_frames)
|
| 40 |
+
cutoff = n_fft // 2 + 1
|
| 41 |
+
return torch.complex(
|
| 42 |
+
fourier_transform[:, :cutoff, :], fourier_transform[:, cutoff:, :]
|
| 43 |
+
)
|
| 44 |
+
|
| 45 |
+
stft = STFT()
|
| 46 |
+
_torch_stft = torch.stft
|
| 47 |
+
|
| 48 |
+
def z_stft(input: torch.Tensor, window: torch.Tensor, *args, **kwargs):
|
| 49 |
+
# only optimizing a specific call from rvc.train.mel_processing.MultiScaleMelSpectrogramLoss
|
| 50 |
+
if (
|
| 51 |
+
kwargs.get("win_length") == None
|
| 52 |
+
and kwargs.get("center") == None
|
| 53 |
+
and kwargs.get("return_complex") == True
|
| 54 |
+
):
|
| 55 |
+
# use GPU accelerated calculation
|
| 56 |
+
return stft.transform(
|
| 57 |
+
input, kwargs.get("n_fft"), kwargs.get("hop_length"), window
|
| 58 |
+
)
|
| 59 |
+
else:
|
| 60 |
+
# simply do the operation on CPU
|
| 61 |
+
return _torch_stft(
|
| 62 |
+
input=input.cpu(), window=window.cpu(), *args, **kwargs
|
| 63 |
+
).to(input.device)
|
| 64 |
+
|
| 65 |
+
def z_jit(f, *_, **__):
|
| 66 |
+
f.graph = torch._C.Graph()
|
| 67 |
+
return f
|
| 68 |
+
|
| 69 |
+
# hijacks
|
| 70 |
+
torch.stft = z_stft
|
| 71 |
+
torch.jit.script = z_jit
|
| 72 |
+
# disabling unsupported cudnn
|
| 73 |
+
torch.backends.cudnn.enabled = False
|
| 74 |
+
torch.backends.cuda.enable_flash_sdp(False)
|
| 75 |
+
torch.backends.cuda.enable_math_sdp(True)
|
| 76 |
+
torch.backends.cuda.enable_mem_efficient_sdp(False)
|
rvc/models/embedders/.gitkeep
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
|
rvc/models/embedders/embedders_custom/.gitkeep
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
|
rvc/models/formant/.gitkeep
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
|
rvc/models/predictors/.gitkeep
ADDED
|
File without changes
|
rvc/models/pretraineds/.gitkeep
ADDED
|
File without changes
|
rvc/models/pretraineds/custom/.gitkeep
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
|
rvc/models/pretraineds/hifi-gan/.gitkeep
ADDED
|
File without changes
|
rvc/train/data_utils.py
ADDED
|
@@ -0,0 +1,379 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import numpy as np
|
| 3 |
+
import torch
|
| 4 |
+
import torch.utils.data
|
| 5 |
+
|
| 6 |
+
from mel_processing import spectrogram_torch
|
| 7 |
+
from utils import load_filepaths_and_text, load_wav_to_torch
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
class TextAudioLoaderMultiNSFsid(torch.utils.data.Dataset):
|
| 11 |
+
"""
|
| 12 |
+
Dataset that loads text and audio pairs.
|
| 13 |
+
|
| 14 |
+
Args:
|
| 15 |
+
hparams: Hyperparameters.
|
| 16 |
+
"""
|
| 17 |
+
|
| 18 |
+
def __init__(self, hparams):
|
| 19 |
+
self.audiopaths_and_text = load_filepaths_and_text(hparams.training_files)
|
| 20 |
+
self.max_wav_value = hparams.max_wav_value
|
| 21 |
+
self.sample_rate = hparams.sample_rate
|
| 22 |
+
self.filter_length = hparams.filter_length
|
| 23 |
+
self.hop_length = hparams.hop_length
|
| 24 |
+
self.win_length = hparams.win_length
|
| 25 |
+
self.sample_rate = hparams.sample_rate
|
| 26 |
+
self.min_text_len = getattr(hparams, "min_text_len", 1)
|
| 27 |
+
self.max_text_len = getattr(hparams, "max_text_len", 5000)
|
| 28 |
+
self._filter()
|
| 29 |
+
|
| 30 |
+
def _filter(self):
|
| 31 |
+
"""
|
| 32 |
+
Filters audio paths and text pairs based on text length.
|
| 33 |
+
"""
|
| 34 |
+
audiopaths_and_text_new = []
|
| 35 |
+
lengths = []
|
| 36 |
+
for audiopath, text, pitch, pitchf, dv in self.audiopaths_and_text:
|
| 37 |
+
if self.min_text_len <= len(text) and len(text) <= self.max_text_len:
|
| 38 |
+
audiopaths_and_text_new.append([audiopath, text, pitch, pitchf, dv])
|
| 39 |
+
lengths.append(os.path.getsize(audiopath) // (3 * self.hop_length))
|
| 40 |
+
self.audiopaths_and_text = audiopaths_and_text_new
|
| 41 |
+
self.lengths = lengths
|
| 42 |
+
|
| 43 |
+
def get_sid(self, sid):
|
| 44 |
+
"""
|
| 45 |
+
Converts speaker ID to a LongTensor.
|
| 46 |
+
|
| 47 |
+
Args:
|
| 48 |
+
sid (str): Speaker ID.
|
| 49 |
+
"""
|
| 50 |
+
try:
|
| 51 |
+
sid = torch.LongTensor([int(sid)])
|
| 52 |
+
except ValueError as error:
|
| 53 |
+
print(f"Error converting speaker ID '{sid}' to integer. Exception: {error}")
|
| 54 |
+
sid = torch.LongTensor([0])
|
| 55 |
+
return sid
|
| 56 |
+
|
| 57 |
+
def get_audio_text_pair(self, audiopath_and_text):
|
| 58 |
+
"""
|
| 59 |
+
Loads and processes audio and text data for a single pair.
|
| 60 |
+
|
| 61 |
+
Args:
|
| 62 |
+
audiopath_and_text (list): List containing audio path, text, pitch, pitchf, and speaker ID.
|
| 63 |
+
"""
|
| 64 |
+
file = audiopath_and_text[0]
|
| 65 |
+
phone = audiopath_and_text[1]
|
| 66 |
+
pitch = audiopath_and_text[2]
|
| 67 |
+
pitchf = audiopath_and_text[3]
|
| 68 |
+
dv = audiopath_and_text[4]
|
| 69 |
+
|
| 70 |
+
phone, pitch, pitchf = self.get_labels(phone, pitch, pitchf)
|
| 71 |
+
spec, wav = self.get_audio(file)
|
| 72 |
+
dv = self.get_sid(dv)
|
| 73 |
+
|
| 74 |
+
len_phone = phone.size()[0]
|
| 75 |
+
len_spec = spec.size()[-1]
|
| 76 |
+
if len_phone != len_spec:
|
| 77 |
+
len_min = min(len_phone, len_spec)
|
| 78 |
+
len_wav = len_min * self.hop_length
|
| 79 |
+
|
| 80 |
+
spec = spec[:, :len_min]
|
| 81 |
+
wav = wav[:, :len_wav]
|
| 82 |
+
|
| 83 |
+
phone = phone[:len_min, :]
|
| 84 |
+
pitch = pitch[:len_min]
|
| 85 |
+
pitchf = pitchf[:len_min]
|
| 86 |
+
|
| 87 |
+
return (spec, wav, phone, pitch, pitchf, dv)
|
| 88 |
+
|
| 89 |
+
def get_labels(self, phone, pitch, pitchf):
|
| 90 |
+
"""
|
| 91 |
+
Loads and processes phoneme, pitch, and pitchf labels.
|
| 92 |
+
|
| 93 |
+
Args:
|
| 94 |
+
phone (str): Path to phoneme label file.
|
| 95 |
+
pitch (str): Path to pitch label file.
|
| 96 |
+
pitchf (str): Path to pitchf label file.
|
| 97 |
+
"""
|
| 98 |
+
phone = np.load(phone)
|
| 99 |
+
phone = np.repeat(phone, 2, axis=0)
|
| 100 |
+
pitch = np.load(pitch)
|
| 101 |
+
pitchf = np.load(pitchf)
|
| 102 |
+
n_num = min(phone.shape[0], 900)
|
| 103 |
+
phone = phone[:n_num, :]
|
| 104 |
+
pitch = pitch[:n_num]
|
| 105 |
+
pitchf = pitchf[:n_num]
|
| 106 |
+
phone = torch.FloatTensor(phone)
|
| 107 |
+
pitch = torch.LongTensor(pitch)
|
| 108 |
+
pitchf = torch.FloatTensor(pitchf)
|
| 109 |
+
return phone, pitch, pitchf
|
| 110 |
+
|
| 111 |
+
def get_audio(self, filename):
|
| 112 |
+
"""
|
| 113 |
+
Loads and processes audio data.
|
| 114 |
+
|
| 115 |
+
Args:
|
| 116 |
+
filename (str): Path to audio file.
|
| 117 |
+
"""
|
| 118 |
+
audio, sample_rate = load_wav_to_torch(filename)
|
| 119 |
+
if sample_rate != self.sample_rate:
|
| 120 |
+
raise ValueError(
|
| 121 |
+
f"{sample_rate} SR doesn't match target {self.sample_rate} SR"
|
| 122 |
+
)
|
| 123 |
+
audio_norm = audio
|
| 124 |
+
audio_norm = audio_norm.unsqueeze(0)
|
| 125 |
+
spec_filename = filename.replace(".wav", ".spec.pt")
|
| 126 |
+
if os.path.exists(spec_filename):
|
| 127 |
+
try:
|
| 128 |
+
spec = torch.load(spec_filename, weights_only=True)
|
| 129 |
+
except Exception as error:
|
| 130 |
+
print(f"An error occurred getting spec from {spec_filename}: {error}")
|
| 131 |
+
spec = spectrogram_torch(
|
| 132 |
+
audio_norm,
|
| 133 |
+
self.filter_length,
|
| 134 |
+
self.hop_length,
|
| 135 |
+
self.win_length,
|
| 136 |
+
center=False,
|
| 137 |
+
)
|
| 138 |
+
spec = torch.squeeze(spec, 0)
|
| 139 |
+
torch.save(spec, spec_filename, _use_new_zipfile_serialization=False)
|
| 140 |
+
else:
|
| 141 |
+
spec = spectrogram_torch(
|
| 142 |
+
audio_norm,
|
| 143 |
+
self.filter_length,
|
| 144 |
+
self.hop_length,
|
| 145 |
+
self.win_length,
|
| 146 |
+
center=False,
|
| 147 |
+
)
|
| 148 |
+
spec = torch.squeeze(spec, 0)
|
| 149 |
+
torch.save(spec, spec_filename, _use_new_zipfile_serialization=False)
|
| 150 |
+
return spec, audio_norm
|
| 151 |
+
|
| 152 |
+
def __getitem__(self, index):
|
| 153 |
+
"""
|
| 154 |
+
Returns a single audio-text pair.
|
| 155 |
+
|
| 156 |
+
Args:
|
| 157 |
+
index (int): Index of the data sample.
|
| 158 |
+
"""
|
| 159 |
+
return self.get_audio_text_pair(self.audiopaths_and_text[index])
|
| 160 |
+
|
| 161 |
+
def __len__(self):
|
| 162 |
+
"""
|
| 163 |
+
Returns the length of the dataset.
|
| 164 |
+
"""
|
| 165 |
+
return len(self.audiopaths_and_text)
|
| 166 |
+
|
| 167 |
+
|
| 168 |
+
class TextAudioCollateMultiNSFsid:
|
| 169 |
+
"""
|
| 170 |
+
Collates text and audio data for training.
|
| 171 |
+
|
| 172 |
+
Args:
|
| 173 |
+
return_ids (bool, optional): Whether to return sample IDs. Defaults to False.
|
| 174 |
+
"""
|
| 175 |
+
|
| 176 |
+
def __init__(self, return_ids=False):
|
| 177 |
+
self.return_ids = return_ids
|
| 178 |
+
|
| 179 |
+
def __call__(self, batch):
|
| 180 |
+
"""
|
| 181 |
+
Collates a batch of data samples.
|
| 182 |
+
|
| 183 |
+
Args:
|
| 184 |
+
batch (list): List of data samples.
|
| 185 |
+
"""
|
| 186 |
+
_, ids_sorted_decreasing = torch.sort(
|
| 187 |
+
torch.LongTensor([x[0].size(1) for x in batch]), dim=0, descending=True
|
| 188 |
+
)
|
| 189 |
+
|
| 190 |
+
max_spec_len = max([x[0].size(1) for x in batch])
|
| 191 |
+
max_wave_len = max([x[1].size(1) for x in batch])
|
| 192 |
+
spec_lengths = torch.LongTensor(len(batch))
|
| 193 |
+
wave_lengths = torch.LongTensor(len(batch))
|
| 194 |
+
spec_padded = torch.FloatTensor(len(batch), batch[0][0].size(0), max_spec_len)
|
| 195 |
+
wave_padded = torch.FloatTensor(len(batch), 1, max_wave_len)
|
| 196 |
+
spec_padded.zero_()
|
| 197 |
+
wave_padded.zero_()
|
| 198 |
+
|
| 199 |
+
max_phone_len = max([x[2].size(0) for x in batch])
|
| 200 |
+
phone_lengths = torch.LongTensor(len(batch))
|
| 201 |
+
phone_padded = torch.FloatTensor(
|
| 202 |
+
len(batch), max_phone_len, batch[0][2].shape[1]
|
| 203 |
+
)
|
| 204 |
+
pitch_padded = torch.LongTensor(len(batch), max_phone_len)
|
| 205 |
+
pitchf_padded = torch.FloatTensor(len(batch), max_phone_len)
|
| 206 |
+
phone_padded.zero_()
|
| 207 |
+
pitch_padded.zero_()
|
| 208 |
+
pitchf_padded.zero_()
|
| 209 |
+
sid = torch.LongTensor(len(batch))
|
| 210 |
+
|
| 211 |
+
for i in range(len(ids_sorted_decreasing)):
|
| 212 |
+
row = batch[ids_sorted_decreasing[i]]
|
| 213 |
+
|
| 214 |
+
spec = row[0]
|
| 215 |
+
spec_padded[i, :, : spec.size(1)] = spec
|
| 216 |
+
spec_lengths[i] = spec.size(1)
|
| 217 |
+
|
| 218 |
+
wave = row[1]
|
| 219 |
+
wave_padded[i, :, : wave.size(1)] = wave
|
| 220 |
+
wave_lengths[i] = wave.size(1)
|
| 221 |
+
|
| 222 |
+
phone = row[2]
|
| 223 |
+
phone_padded[i, : phone.size(0), :] = phone
|
| 224 |
+
phone_lengths[i] = phone.size(0)
|
| 225 |
+
|
| 226 |
+
pitch = row[3]
|
| 227 |
+
pitch_padded[i, : pitch.size(0)] = pitch
|
| 228 |
+
pitchf = row[4]
|
| 229 |
+
pitchf_padded[i, : pitchf.size(0)] = pitchf
|
| 230 |
+
|
| 231 |
+
sid[i] = row[5]
|
| 232 |
+
|
| 233 |
+
return (
|
| 234 |
+
phone_padded,
|
| 235 |
+
phone_lengths,
|
| 236 |
+
pitch_padded,
|
| 237 |
+
pitchf_padded,
|
| 238 |
+
spec_padded,
|
| 239 |
+
spec_lengths,
|
| 240 |
+
wave_padded,
|
| 241 |
+
wave_lengths,
|
| 242 |
+
sid,
|
| 243 |
+
)
|
| 244 |
+
|
| 245 |
+
|
| 246 |
+
class DistributedBucketSampler(torch.utils.data.distributed.DistributedSampler):
|
| 247 |
+
"""
|
| 248 |
+
Distributed sampler that groups data into buckets based on length.
|
| 249 |
+
|
| 250 |
+
Args:
|
| 251 |
+
dataset (torch.utils.data.Dataset): Dataset to sample from.
|
| 252 |
+
batch_size (int): Batch size.
|
| 253 |
+
boundaries (list): List of length boundaries for buckets.
|
| 254 |
+
num_replicas (int, optional): Number of processes participating in distributed training. Defaults to None.
|
| 255 |
+
rank (int, optional): Rank of the current process. Defaults to None.
|
| 256 |
+
shuffle (bool, optional): Whether to shuffle the data. Defaults to True.
|
| 257 |
+
"""
|
| 258 |
+
|
| 259 |
+
def __init__(
|
| 260 |
+
self,
|
| 261 |
+
dataset,
|
| 262 |
+
batch_size,
|
| 263 |
+
boundaries,
|
| 264 |
+
num_replicas=None,
|
| 265 |
+
rank=None,
|
| 266 |
+
shuffle=True,
|
| 267 |
+
):
|
| 268 |
+
super().__init__(dataset, num_replicas=num_replicas, rank=rank, shuffle=shuffle)
|
| 269 |
+
self.lengths = dataset.lengths
|
| 270 |
+
self.batch_size = batch_size
|
| 271 |
+
self.boundaries = boundaries
|
| 272 |
+
|
| 273 |
+
self.buckets, self.num_samples_per_bucket = self._create_buckets()
|
| 274 |
+
self.total_size = sum(self.num_samples_per_bucket)
|
| 275 |
+
self.num_samples = self.total_size // self.num_replicas
|
| 276 |
+
|
| 277 |
+
def _create_buckets(self):
|
| 278 |
+
"""
|
| 279 |
+
Creates buckets of data samples based on length.
|
| 280 |
+
"""
|
| 281 |
+
buckets = [[] for _ in range(len(self.boundaries) - 1)]
|
| 282 |
+
for i in range(len(self.lengths)):
|
| 283 |
+
length = self.lengths[i]
|
| 284 |
+
idx_bucket = self._bisect(length)
|
| 285 |
+
if idx_bucket != -1:
|
| 286 |
+
buckets[idx_bucket].append(i)
|
| 287 |
+
|
| 288 |
+
for i in range(len(buckets) - 1, -1, -1): #
|
| 289 |
+
if len(buckets[i]) == 0:
|
| 290 |
+
buckets.pop(i)
|
| 291 |
+
self.boundaries.pop(i + 1)
|
| 292 |
+
|
| 293 |
+
num_samples_per_bucket = []
|
| 294 |
+
for i in range(len(buckets)):
|
| 295 |
+
len_bucket = len(buckets[i])
|
| 296 |
+
total_batch_size = self.num_replicas * self.batch_size
|
| 297 |
+
rem = (
|
| 298 |
+
total_batch_size - (len_bucket % total_batch_size)
|
| 299 |
+
) % total_batch_size
|
| 300 |
+
num_samples_per_bucket.append(len_bucket + rem)
|
| 301 |
+
return buckets, num_samples_per_bucket
|
| 302 |
+
|
| 303 |
+
def __iter__(self):
|
| 304 |
+
"""
|
| 305 |
+
Iterates over batches of data samples.
|
| 306 |
+
"""
|
| 307 |
+
g = torch.Generator()
|
| 308 |
+
g.manual_seed(self.epoch)
|
| 309 |
+
|
| 310 |
+
indices = []
|
| 311 |
+
if self.shuffle:
|
| 312 |
+
for bucket in self.buckets:
|
| 313 |
+
indices.append(torch.randperm(len(bucket), generator=g).tolist())
|
| 314 |
+
else:
|
| 315 |
+
for bucket in self.buckets:
|
| 316 |
+
indices.append(list(range(len(bucket))))
|
| 317 |
+
|
| 318 |
+
batches = []
|
| 319 |
+
for i in range(len(self.buckets)):
|
| 320 |
+
bucket = self.buckets[i]
|
| 321 |
+
len_bucket = len(bucket)
|
| 322 |
+
ids_bucket = indices[i]
|
| 323 |
+
num_samples_bucket = self.num_samples_per_bucket[i]
|
| 324 |
+
|
| 325 |
+
rem = num_samples_bucket - len_bucket
|
| 326 |
+
ids_bucket = (
|
| 327 |
+
ids_bucket
|
| 328 |
+
+ ids_bucket * (rem // len_bucket)
|
| 329 |
+
+ ids_bucket[: (rem % len_bucket)]
|
| 330 |
+
)
|
| 331 |
+
|
| 332 |
+
ids_bucket = ids_bucket[self.rank :: self.num_replicas]
|
| 333 |
+
|
| 334 |
+
# batching
|
| 335 |
+
for j in range(len(ids_bucket) // self.batch_size):
|
| 336 |
+
batch = [
|
| 337 |
+
bucket[idx]
|
| 338 |
+
for idx in ids_bucket[
|
| 339 |
+
j * self.batch_size : (j + 1) * self.batch_size
|
| 340 |
+
]
|
| 341 |
+
]
|
| 342 |
+
batches.append(batch)
|
| 343 |
+
|
| 344 |
+
if self.shuffle:
|
| 345 |
+
batch_ids = torch.randperm(len(batches), generator=g).tolist()
|
| 346 |
+
batches = [batches[i] for i in batch_ids]
|
| 347 |
+
self.batches = batches
|
| 348 |
+
|
| 349 |
+
assert len(self.batches) * self.batch_size == self.num_samples
|
| 350 |
+
return iter(self.batches)
|
| 351 |
+
|
| 352 |
+
def _bisect(self, x, lo=0, hi=None):
|
| 353 |
+
"""
|
| 354 |
+
Performs binary search to find the bucket index for a given length.
|
| 355 |
+
|
| 356 |
+
Args:
|
| 357 |
+
x (int): Length to find the bucket for.
|
| 358 |
+
lo (int, optional): Lower bound of the search range. Defaults to 0.
|
| 359 |
+
hi (int, optional): Upper bound of the search range. Defaults to None.
|
| 360 |
+
"""
|
| 361 |
+
if hi is None:
|
| 362 |
+
hi = len(self.boundaries) - 1
|
| 363 |
+
|
| 364 |
+
if hi > lo:
|
| 365 |
+
mid = (hi + lo) // 2
|
| 366 |
+
if self.boundaries[mid] < x and x <= self.boundaries[mid + 1]:
|
| 367 |
+
return mid
|
| 368 |
+
elif x <= self.boundaries[mid]:
|
| 369 |
+
return self._bisect(x, lo, mid)
|
| 370 |
+
else:
|
| 371 |
+
return self._bisect(x, mid + 1, hi)
|
| 372 |
+
else:
|
| 373 |
+
return -1
|
| 374 |
+
|
| 375 |
+
def __len__(self):
|
| 376 |
+
"""
|
| 377 |
+
Returns the length of the sampler.
|
| 378 |
+
"""
|
| 379 |
+
return self.num_samples // self.batch_size
|
rvc/train/extract/extract.py
ADDED
|
@@ -0,0 +1,248 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import sys
|
| 3 |
+
import glob
|
| 4 |
+
import time
|
| 5 |
+
import tqdm
|
| 6 |
+
import torch
|
| 7 |
+
import torchcrepe
|
| 8 |
+
import numpy as np
|
| 9 |
+
import concurrent.futures
|
| 10 |
+
import multiprocessing as mp
|
| 11 |
+
import json
|
| 12 |
+
|
| 13 |
+
now_dir = os.getcwd()
|
| 14 |
+
sys.path.append(os.path.join(now_dir))
|
| 15 |
+
|
| 16 |
+
# Zluda hijack
|
| 17 |
+
import rvc.lib.zluda
|
| 18 |
+
|
| 19 |
+
from rvc.lib.utils import load_audio, load_embedding
|
| 20 |
+
from rvc.train.extract.preparing_files import generate_config, generate_filelist
|
| 21 |
+
from rvc.lib.predictors.RMVPE import RMVPE0Predictor
|
| 22 |
+
from rvc.configs.config import Config
|
| 23 |
+
|
| 24 |
+
# Load config
|
| 25 |
+
config = Config()
|
| 26 |
+
mp.set_start_method("spawn", force=True)
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
class FeatureInput:
|
| 30 |
+
def __init__(self, sample_rate=16000, hop_size=160, device="cpu"):
|
| 31 |
+
self.fs = sample_rate
|
| 32 |
+
self.hop = hop_size
|
| 33 |
+
self.f0_bin = 256
|
| 34 |
+
self.f0_max = 1100.0
|
| 35 |
+
self.f0_min = 50.0
|
| 36 |
+
self.f0_mel_min = 1127 * np.log(1 + self.f0_min / 700)
|
| 37 |
+
self.f0_mel_max = 1127 * np.log(1 + self.f0_max / 700)
|
| 38 |
+
self.device = device
|
| 39 |
+
self.model_rmvpe = None
|
| 40 |
+
|
| 41 |
+
def compute_f0(self, audio_array, method, hop_length):
|
| 42 |
+
if method == "crepe":
|
| 43 |
+
return self._get_crepe(audio_array, hop_length, type="full")
|
| 44 |
+
elif method == "crepe-tiny":
|
| 45 |
+
return self._get_crepe(audio_array, hop_length, type="tiny")
|
| 46 |
+
elif method == "rmvpe":
|
| 47 |
+
return self.model_rmvpe.infer_from_audio(audio_array, thred=0.03)
|
| 48 |
+
|
| 49 |
+
def _get_crepe(self, x, hop_length, type):
|
| 50 |
+
audio = torch.from_numpy(x.astype(np.float32)).to(self.device)
|
| 51 |
+
audio /= torch.quantile(torch.abs(audio), 0.999)
|
| 52 |
+
audio = audio.unsqueeze(0)
|
| 53 |
+
pitch = torchcrepe.predict(
|
| 54 |
+
audio,
|
| 55 |
+
self.fs,
|
| 56 |
+
hop_length,
|
| 57 |
+
self.f0_min,
|
| 58 |
+
self.f0_max,
|
| 59 |
+
type,
|
| 60 |
+
batch_size=hop_length * 2,
|
| 61 |
+
device=audio.device,
|
| 62 |
+
pad=True,
|
| 63 |
+
)
|
| 64 |
+
source = pitch.squeeze(0).cpu().float().numpy()
|
| 65 |
+
source[source < 0.001] = np.nan
|
| 66 |
+
return np.nan_to_num(
|
| 67 |
+
np.interp(
|
| 68 |
+
np.arange(0, len(source) * (x.size // self.hop), len(source))
|
| 69 |
+
/ (x.size // self.hop),
|
| 70 |
+
np.arange(0, len(source)),
|
| 71 |
+
source,
|
| 72 |
+
)
|
| 73 |
+
)
|
| 74 |
+
|
| 75 |
+
def coarse_f0(self, f0):
|
| 76 |
+
f0_mel = 1127.0 * np.log(1.0 + f0 / 700.0)
|
| 77 |
+
f0_mel = np.clip(
|
| 78 |
+
(f0_mel - self.f0_mel_min)
|
| 79 |
+
* (self.f0_bin - 2)
|
| 80 |
+
/ (self.f0_mel_max - self.f0_mel_min)
|
| 81 |
+
+ 1,
|
| 82 |
+
1,
|
| 83 |
+
self.f0_bin - 1,
|
| 84 |
+
)
|
| 85 |
+
return np.rint(f0_mel).astype(int)
|
| 86 |
+
|
| 87 |
+
def process_file(self, file_info, f0_method, hop_length):
|
| 88 |
+
inp_path, opt_path_coarse, opt_path_full, _ = file_info
|
| 89 |
+
if os.path.exists(opt_path_coarse) and os.path.exists(opt_path_full):
|
| 90 |
+
return
|
| 91 |
+
|
| 92 |
+
try:
|
| 93 |
+
np_arr = load_audio(inp_path, self.fs)
|
| 94 |
+
feature_pit = self.compute_f0(np_arr, f0_method, hop_length)
|
| 95 |
+
np.save(opt_path_full, feature_pit, allow_pickle=False)
|
| 96 |
+
coarse_pit = self.coarse_f0(feature_pit)
|
| 97 |
+
np.save(opt_path_coarse, coarse_pit, allow_pickle=False)
|
| 98 |
+
except Exception as error:
|
| 99 |
+
print(
|
| 100 |
+
f"An error occurred extracting file {inp_path} on {self.device}: {error}"
|
| 101 |
+
)
|
| 102 |
+
|
| 103 |
+
def process_files(self, files, f0_method, hop_length, device, threads):
|
| 104 |
+
self.device = device
|
| 105 |
+
if f0_method == "rmvpe":
|
| 106 |
+
self.model_rmvpe = RMVPE0Predictor(
|
| 107 |
+
os.path.join("rvc", "models", "predictors", "rmvpe.pt"),
|
| 108 |
+
device=device,
|
| 109 |
+
)
|
| 110 |
+
|
| 111 |
+
def worker(file_info):
|
| 112 |
+
self.process_file(file_info, f0_method, hop_length)
|
| 113 |
+
|
| 114 |
+
with tqdm.tqdm(total=len(files), leave=True) as pbar:
|
| 115 |
+
with concurrent.futures.ThreadPoolExecutor(max_workers=threads) as executor:
|
| 116 |
+
futures = [executor.submit(worker, f) for f in files]
|
| 117 |
+
for _ in concurrent.futures.as_completed(futures):
|
| 118 |
+
pbar.update(1)
|
| 119 |
+
|
| 120 |
+
|
| 121 |
+
def run_pitch_extraction(files, devices, f0_method, hop_length, threads):
|
| 122 |
+
devices_str = ", ".join(devices)
|
| 123 |
+
print(
|
| 124 |
+
f"Starting pitch extraction with {num_processes} cores on {devices_str} using {f0_method}..."
|
| 125 |
+
)
|
| 126 |
+
start_time = time.time()
|
| 127 |
+
fe = FeatureInput()
|
| 128 |
+
with concurrent.futures.ProcessPoolExecutor(max_workers=len(devices)) as executor:
|
| 129 |
+
tasks = [
|
| 130 |
+
executor.submit(
|
| 131 |
+
fe.process_files,
|
| 132 |
+
files[i :: len(devices)],
|
| 133 |
+
f0_method,
|
| 134 |
+
hop_length,
|
| 135 |
+
devices[i],
|
| 136 |
+
threads // len(devices),
|
| 137 |
+
)
|
| 138 |
+
for i in range(len(devices))
|
| 139 |
+
]
|
| 140 |
+
concurrent.futures.wait(tasks)
|
| 141 |
+
|
| 142 |
+
print(f"Pitch extraction completed in {time.time() - start_time:.2f} seconds.")
|
| 143 |
+
|
| 144 |
+
|
| 145 |
+
def process_file_embedding(
|
| 146 |
+
files, embedder_model, embedder_model_custom, device_num, device, n_threads
|
| 147 |
+
):
|
| 148 |
+
model = load_embedding(embedder_model, embedder_model_custom).to(device).float()
|
| 149 |
+
model.eval()
|
| 150 |
+
n_threads = max(1, n_threads)
|
| 151 |
+
|
| 152 |
+
def worker(file_info):
|
| 153 |
+
wav_file_path, _, _, out_file_path = file_info
|
| 154 |
+
if os.path.exists(out_file_path):
|
| 155 |
+
return
|
| 156 |
+
feats = torch.from_numpy(load_audio(wav_file_path, 16000)).to(device).float()
|
| 157 |
+
feats = feats.view(1, -1)
|
| 158 |
+
with torch.no_grad():
|
| 159 |
+
result = model(feats)["last_hidden_state"]
|
| 160 |
+
feats_out = result.squeeze(0).float().cpu().numpy()
|
| 161 |
+
if not np.isnan(feats_out).any():
|
| 162 |
+
np.save(out_file_path, feats_out, allow_pickle=False)
|
| 163 |
+
else:
|
| 164 |
+
print(f"{wav_file_path} produced NaN values; skipping.")
|
| 165 |
+
|
| 166 |
+
with tqdm.tqdm(total=len(files), leave=True, position=device_num) as pbar:
|
| 167 |
+
with concurrent.futures.ThreadPoolExecutor(max_workers=n_threads) as executor:
|
| 168 |
+
futures = [executor.submit(worker, f) for f in files]
|
| 169 |
+
for _ in concurrent.futures.as_completed(futures):
|
| 170 |
+
pbar.update(1)
|
| 171 |
+
|
| 172 |
+
|
| 173 |
+
def run_embedding_extraction(
|
| 174 |
+
files, devices, embedder_model, embedder_model_custom, threads
|
| 175 |
+
):
|
| 176 |
+
devices_str = ", ".join(devices)
|
| 177 |
+
print(
|
| 178 |
+
f"Starting embedding extraction with {num_processes} cores on {devices_str}..."
|
| 179 |
+
)
|
| 180 |
+
start_time = time.time()
|
| 181 |
+
with concurrent.futures.ProcessPoolExecutor(max_workers=len(devices)) as executor:
|
| 182 |
+
tasks = [
|
| 183 |
+
executor.submit(
|
| 184 |
+
process_file_embedding,
|
| 185 |
+
files[i :: len(devices)],
|
| 186 |
+
embedder_model,
|
| 187 |
+
embedder_model_custom,
|
| 188 |
+
i,
|
| 189 |
+
devices[i],
|
| 190 |
+
threads // len(devices),
|
| 191 |
+
)
|
| 192 |
+
for i in range(len(devices))
|
| 193 |
+
]
|
| 194 |
+
concurrent.futures.wait(tasks)
|
| 195 |
+
|
| 196 |
+
print(f"Embedding extraction completed in {time.time() - start_time:.2f} seconds.")
|
| 197 |
+
|
| 198 |
+
|
| 199 |
+
if __name__ == "__main__":
|
| 200 |
+
exp_dir = sys.argv[1]
|
| 201 |
+
f0_method = sys.argv[2]
|
| 202 |
+
hop_length = int(sys.argv[3])
|
| 203 |
+
num_processes = int(sys.argv[4])
|
| 204 |
+
gpus = sys.argv[5]
|
| 205 |
+
sample_rate = sys.argv[6]
|
| 206 |
+
embedder_model = sys.argv[7]
|
| 207 |
+
embedder_model_custom = sys.argv[8] if len(sys.argv) > 8 else None
|
| 208 |
+
include_mutes = int(sys.argv[9]) if len(sys.argv) > 9 else 2
|
| 209 |
+
|
| 210 |
+
wav_path = os.path.join(exp_dir, "sliced_audios_16k")
|
| 211 |
+
os.makedirs(os.path.join(exp_dir, "f0"), exist_ok=True)
|
| 212 |
+
os.makedirs(os.path.join(exp_dir, "f0_voiced"), exist_ok=True)
|
| 213 |
+
os.makedirs(os.path.join(exp_dir, "extracted"), exist_ok=True)
|
| 214 |
+
|
| 215 |
+
chosen_embedder_model = (
|
| 216 |
+
embedder_model_custom if embedder_model == "custom" else embedder_model
|
| 217 |
+
)
|
| 218 |
+
file_path = os.path.join(exp_dir, "model_info.json")
|
| 219 |
+
if os.path.exists(file_path):
|
| 220 |
+
with open(file_path, "r") as f:
|
| 221 |
+
data = json.load(f)
|
| 222 |
+
else:
|
| 223 |
+
data = {}
|
| 224 |
+
data["embedder_model"] = chosen_embedder_model
|
| 225 |
+
with open(file_path, "w") as f:
|
| 226 |
+
json.dump(data, f, indent=4)
|
| 227 |
+
|
| 228 |
+
files = []
|
| 229 |
+
for file in glob.glob(os.path.join(wav_path, "*.wav")):
|
| 230 |
+
file_name = os.path.basename(file)
|
| 231 |
+
file_info = [
|
| 232 |
+
file,
|
| 233 |
+
os.path.join(exp_dir, "f0", file_name + ".npy"),
|
| 234 |
+
os.path.join(exp_dir, "f0_voiced", file_name + ".npy"),
|
| 235 |
+
os.path.join(exp_dir, "extracted", file_name.replace("wav", "npy")),
|
| 236 |
+
]
|
| 237 |
+
files.append(file_info)
|
| 238 |
+
|
| 239 |
+
devices = ["cpu"] if gpus == "-" else [f"cuda:{idx}" for idx in gpus.split("-")]
|
| 240 |
+
|
| 241 |
+
run_pitch_extraction(files, devices, f0_method, hop_length, num_processes)
|
| 242 |
+
|
| 243 |
+
run_embedding_extraction(
|
| 244 |
+
files, devices, embedder_model, embedder_model_custom, num_processes
|
| 245 |
+
)
|
| 246 |
+
|
| 247 |
+
generate_config(sample_rate, exp_dir)
|
| 248 |
+
generate_filelist(exp_dir, sample_rate, include_mutes)
|
rvc/train/extract/preparing_files.py
ADDED
|
@@ -0,0 +1,75 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import shutil
|
| 3 |
+
from random import shuffle
|
| 4 |
+
from rvc.configs.config import Config
|
| 5 |
+
import json
|
| 6 |
+
|
| 7 |
+
config = Config()
|
| 8 |
+
current_directory = os.getcwd()
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
def generate_config(sample_rate: int, model_path: str):
|
| 12 |
+
config_path = os.path.join("rvc", "configs", f"{sample_rate}.json")
|
| 13 |
+
config_save_path = os.path.join(model_path, "config.json")
|
| 14 |
+
if not os.path.exists(config_save_path):
|
| 15 |
+
shutil.copyfile(config_path, config_save_path)
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
def generate_filelist(model_path: str, sample_rate: int, include_mutes: int = 2):
|
| 19 |
+
gt_wavs_dir = os.path.join(model_path, "sliced_audios")
|
| 20 |
+
feature_dir = os.path.join(model_path, f"extracted")
|
| 21 |
+
|
| 22 |
+
f0_dir, f0nsf_dir = None, None
|
| 23 |
+
f0_dir = os.path.join(model_path, "f0")
|
| 24 |
+
f0nsf_dir = os.path.join(model_path, "f0_voiced")
|
| 25 |
+
|
| 26 |
+
gt_wavs_files = set(name.split(".")[0] for name in os.listdir(gt_wavs_dir))
|
| 27 |
+
feature_files = set(name.split(".")[0] for name in os.listdir(feature_dir))
|
| 28 |
+
|
| 29 |
+
f0_files = set(name.split(".")[0] for name in os.listdir(f0_dir))
|
| 30 |
+
f0nsf_files = set(name.split(".")[0] for name in os.listdir(f0nsf_dir))
|
| 31 |
+
names = gt_wavs_files & feature_files & f0_files & f0nsf_files
|
| 32 |
+
|
| 33 |
+
options = []
|
| 34 |
+
mute_base_path = os.path.join(current_directory, "logs", "mute")
|
| 35 |
+
sids = []
|
| 36 |
+
for name in names:
|
| 37 |
+
sid = name.split("_")[0]
|
| 38 |
+
if sid not in sids:
|
| 39 |
+
sids.append(sid)
|
| 40 |
+
options.append(
|
| 41 |
+
f"{os.path.join(gt_wavs_dir, name)}.wav|{os.path.join(feature_dir, name)}.npy|{os.path.join(f0_dir, name)}.wav.npy|{os.path.join(f0nsf_dir, name)}.wav.npy|{sid}"
|
| 42 |
+
)
|
| 43 |
+
|
| 44 |
+
if include_mutes > 0:
|
| 45 |
+
mute_audio_path = os.path.join(
|
| 46 |
+
mute_base_path, "sliced_audios", f"mute{sample_rate}.wav"
|
| 47 |
+
)
|
| 48 |
+
mute_feature_path = os.path.join(mute_base_path, f"extracted", "mute.npy")
|
| 49 |
+
mute_f0_path = os.path.join(mute_base_path, "f0", "mute.wav.npy")
|
| 50 |
+
mute_f0nsf_path = os.path.join(mute_base_path, "f0_voiced", "mute.wav.npy")
|
| 51 |
+
|
| 52 |
+
# adding x files per sid
|
| 53 |
+
for sid in sids * include_mutes:
|
| 54 |
+
options.append(
|
| 55 |
+
f"{mute_audio_path}|{mute_feature_path}|{mute_f0_path}|{mute_f0nsf_path}|{sid}"
|
| 56 |
+
)
|
| 57 |
+
|
| 58 |
+
file_path = os.path.join(model_path, "model_info.json")
|
| 59 |
+
if os.path.exists(file_path):
|
| 60 |
+
with open(file_path, "r") as f:
|
| 61 |
+
data = json.load(f)
|
| 62 |
+
else:
|
| 63 |
+
data = {}
|
| 64 |
+
data.update(
|
| 65 |
+
{
|
| 66 |
+
"speakers_id": len(sids),
|
| 67 |
+
}
|
| 68 |
+
)
|
| 69 |
+
with open(file_path, "w") as f:
|
| 70 |
+
json.dump(data, f, indent=4)
|
| 71 |
+
|
| 72 |
+
shuffle(options)
|
| 73 |
+
|
| 74 |
+
with open(os.path.join(model_path, "filelist.txt"), "w") as f:
|
| 75 |
+
f.write("\n".join(options))
|
rvc/train/losses.py
ADDED
|
@@ -0,0 +1,132 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
def feature_loss(fmap_r, fmap_g):
|
| 5 |
+
"""
|
| 6 |
+
Compute the feature loss between reference and generated feature maps.
|
| 7 |
+
|
| 8 |
+
Args:
|
| 9 |
+
fmap_r (list of torch.Tensor): List of reference feature maps.
|
| 10 |
+
fmap_g (list of torch.Tensor): List of generated feature maps.
|
| 11 |
+
"""
|
| 12 |
+
return 2 * sum(
|
| 13 |
+
torch.mean(torch.abs(rl - gl))
|
| 14 |
+
for dr, dg in zip(fmap_r, fmap_g)
|
| 15 |
+
for rl, gl in zip(dr, dg)
|
| 16 |
+
)
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
def discriminator_loss(disc_real_outputs, disc_generated_outputs):
|
| 20 |
+
"""
|
| 21 |
+
Compute the discriminator loss for real and generated outputs.
|
| 22 |
+
|
| 23 |
+
Args:
|
| 24 |
+
disc_real_outputs (list of torch.Tensor): List of discriminator outputs for real samples.
|
| 25 |
+
disc_generated_outputs (list of torch.Tensor): List of discriminator outputs for generated samples.
|
| 26 |
+
"""
|
| 27 |
+
loss = 0
|
| 28 |
+
r_losses = []
|
| 29 |
+
g_losses = []
|
| 30 |
+
for dr, dg in zip(disc_real_outputs, disc_generated_outputs):
|
| 31 |
+
r_loss = torch.mean((1 - dr.float()) ** 2)
|
| 32 |
+
g_loss = torch.mean(dg.float() ** 2)
|
| 33 |
+
|
| 34 |
+
# r_losses.append(r_loss.item())
|
| 35 |
+
# g_losses.append(g_loss.item())
|
| 36 |
+
loss += r_loss + g_loss
|
| 37 |
+
|
| 38 |
+
return loss, r_losses, g_losses
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
def generator_loss(disc_outputs):
|
| 42 |
+
"""
|
| 43 |
+
Compute the generator loss based on discriminator outputs.
|
| 44 |
+
|
| 45 |
+
Args:
|
| 46 |
+
disc_outputs (list of torch.Tensor): List of discriminator outputs for generated samples.
|
| 47 |
+
"""
|
| 48 |
+
loss = 0
|
| 49 |
+
gen_losses = []
|
| 50 |
+
for dg in disc_outputs:
|
| 51 |
+
l = torch.mean((1 - dg.float()) ** 2)
|
| 52 |
+
# gen_losses.append(l.item())
|
| 53 |
+
loss += l
|
| 54 |
+
|
| 55 |
+
return loss, gen_losses
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
def discriminator_loss_scaled(disc_real, disc_fake, scale=1.0):
|
| 59 |
+
loss = 0
|
| 60 |
+
for i, (d_real, d_fake) in enumerate(zip(disc_real, disc_fake)):
|
| 61 |
+
real_loss = torch.mean((1 - d_real) ** 2)
|
| 62 |
+
fake_loss = torch.mean(d_fake**2)
|
| 63 |
+
_loss = real_loss + fake_loss
|
| 64 |
+
loss += _loss if i < len(disc_real) / 2 else scale * _loss
|
| 65 |
+
return loss, None, None
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
def generator_loss_scaled(disc_outputs, scale=1.0):
|
| 69 |
+
loss = 0
|
| 70 |
+
for i, d_fake in enumerate(disc_outputs):
|
| 71 |
+
d_fake = d_fake.float()
|
| 72 |
+
_loss = torch.mean((1 - d_fake) ** 2)
|
| 73 |
+
loss += _loss if i < len(disc_outputs) / 2 else scale * _loss
|
| 74 |
+
return loss, None, None
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
def discriminator_loss_scaled(disc_real, disc_fake, scale=1.0):
|
| 78 |
+
"""
|
| 79 |
+
Compute the scaled discriminator loss for real and generated outputs.
|
| 80 |
+
|
| 81 |
+
Args:
|
| 82 |
+
disc_real (list of torch.Tensor): List of discriminator outputs for real samples.
|
| 83 |
+
disc_fake (list of torch.Tensor): List of discriminator outputs for generated samples.
|
| 84 |
+
scale (float, optional): Scaling factor applied to losses beyond the midpoint. Default is 1.0.
|
| 85 |
+
"""
|
| 86 |
+
midpoint = len(disc_real) // 2
|
| 87 |
+
losses = []
|
| 88 |
+
for i, (d_real, d_fake) in enumerate(zip(disc_real, disc_fake)):
|
| 89 |
+
real_loss = (1 - d_real).pow(2).mean()
|
| 90 |
+
fake_loss = d_fake.pow(2).mean()
|
| 91 |
+
total_loss = real_loss + fake_loss
|
| 92 |
+
if i >= midpoint:
|
| 93 |
+
total_loss *= scale
|
| 94 |
+
losses.append(total_loss)
|
| 95 |
+
loss = sum(losses)
|
| 96 |
+
return loss, None, None
|
| 97 |
+
|
| 98 |
+
|
| 99 |
+
def generator_loss_scaled(disc_outputs, scale=1.0):
|
| 100 |
+
"""
|
| 101 |
+
Compute the scaled generator loss based on discriminator outputs.
|
| 102 |
+
|
| 103 |
+
Args:
|
| 104 |
+
disc_outputs (list of torch.Tensor): List of discriminator outputs for generated samples.
|
| 105 |
+
scale (float, optional): Scaling factor applied to losses beyond the midpoint. Default is 1.0.
|
| 106 |
+
"""
|
| 107 |
+
midpoint = len(disc_outputs) // 2
|
| 108 |
+
losses = []
|
| 109 |
+
for i, d_fake in enumerate(disc_outputs):
|
| 110 |
+
loss_value = (1 - d_fake).pow(2).mean()
|
| 111 |
+
if i >= midpoint:
|
| 112 |
+
loss_value *= scale
|
| 113 |
+
losses.append(loss_value)
|
| 114 |
+
loss = sum(losses)
|
| 115 |
+
return loss, None, None
|
| 116 |
+
|
| 117 |
+
|
| 118 |
+
def kl_loss(z_p, logs_q, m_p, logs_p, z_mask):
|
| 119 |
+
"""
|
| 120 |
+
Compute the Kullback-Leibler divergence loss.
|
| 121 |
+
|
| 122 |
+
Args:
|
| 123 |
+
z_p (torch.Tensor): Latent variable z_p [b, h, t_t].
|
| 124 |
+
logs_q (torch.Tensor): Log variance of q [b, h, t_t].
|
| 125 |
+
m_p (torch.Tensor): Mean of p [b, h, t_t].
|
| 126 |
+
logs_p (torch.Tensor): Log variance of p [b, h, t_t].
|
| 127 |
+
z_mask (torch.Tensor): Mask for the latent variables [b, h, t_t].
|
| 128 |
+
"""
|
| 129 |
+
kl = logs_p - logs_q - 0.5 + 0.5 * ((z_p - m_p) ** 2) * torch.exp(-2 * logs_p)
|
| 130 |
+
kl = (kl * z_mask).sum()
|
| 131 |
+
loss = kl / z_mask.sum()
|
| 132 |
+
return loss
|
rvc/train/mel_processing.py
ADDED
|
@@ -0,0 +1,234 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torch.utils.data
|
| 3 |
+
from librosa.filters import mel as librosa_mel_fn
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
def dynamic_range_compression_torch(x, C=1, clip_val=1e-5):
|
| 7 |
+
"""
|
| 8 |
+
Dynamic range compression using log10.
|
| 9 |
+
|
| 10 |
+
Args:
|
| 11 |
+
x (torch.Tensor): Input tensor.
|
| 12 |
+
C (float, optional): Scaling factor. Defaults to 1.
|
| 13 |
+
clip_val (float, optional): Minimum value for clamping. Defaults to 1e-5.
|
| 14 |
+
"""
|
| 15 |
+
return torch.log(torch.clamp(x, min=clip_val) * C)
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
def dynamic_range_decompression_torch(x, C=1):
|
| 19 |
+
"""
|
| 20 |
+
Dynamic range decompression using exp.
|
| 21 |
+
|
| 22 |
+
Args:
|
| 23 |
+
x (torch.Tensor): Input tensor.
|
| 24 |
+
C (float, optional): Scaling factor. Defaults to 1.
|
| 25 |
+
"""
|
| 26 |
+
return torch.exp(x) / C
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
def spectral_normalize_torch(magnitudes):
|
| 30 |
+
"""
|
| 31 |
+
Spectral normalization using dynamic range compression.
|
| 32 |
+
|
| 33 |
+
Args:
|
| 34 |
+
magnitudes (torch.Tensor): Magnitude spectrogram.
|
| 35 |
+
"""
|
| 36 |
+
return dynamic_range_compression_torch(magnitudes)
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
def spectral_de_normalize_torch(magnitudes):
|
| 40 |
+
"""
|
| 41 |
+
Spectral de-normalization using dynamic range decompression.
|
| 42 |
+
|
| 43 |
+
Args:
|
| 44 |
+
magnitudes (torch.Tensor): Normalized spectrogram.
|
| 45 |
+
"""
|
| 46 |
+
return dynamic_range_decompression_torch(magnitudes)
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
mel_basis = {}
|
| 50 |
+
hann_window = {}
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
def spectrogram_torch(y, n_fft, hop_size, win_size, center=False):
|
| 54 |
+
"""
|
| 55 |
+
Compute the spectrogram of a signal using STFT.
|
| 56 |
+
|
| 57 |
+
Args:
|
| 58 |
+
y (torch.Tensor): Input signal.
|
| 59 |
+
n_fft (int): FFT window size.
|
| 60 |
+
hop_size (int): Hop size between frames.
|
| 61 |
+
win_size (int): Window size.
|
| 62 |
+
center (bool, optional): Whether to center the window. Defaults to False.
|
| 63 |
+
"""
|
| 64 |
+
global hann_window
|
| 65 |
+
dtype_device = str(y.dtype) + "_" + str(y.device)
|
| 66 |
+
wnsize_dtype_device = str(win_size) + "_" + dtype_device
|
| 67 |
+
if wnsize_dtype_device not in hann_window:
|
| 68 |
+
hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(
|
| 69 |
+
dtype=y.dtype, device=y.device
|
| 70 |
+
)
|
| 71 |
+
|
| 72 |
+
y = torch.nn.functional.pad(
|
| 73 |
+
y.unsqueeze(1),
|
| 74 |
+
(int((n_fft - hop_size) / 2), int((n_fft - hop_size) / 2)),
|
| 75 |
+
mode="reflect",
|
| 76 |
+
)
|
| 77 |
+
y = y.squeeze(1)
|
| 78 |
+
|
| 79 |
+
spec = torch.stft(
|
| 80 |
+
y,
|
| 81 |
+
n_fft=n_fft,
|
| 82 |
+
hop_length=hop_size,
|
| 83 |
+
win_length=win_size,
|
| 84 |
+
window=hann_window[wnsize_dtype_device],
|
| 85 |
+
center=center,
|
| 86 |
+
pad_mode="reflect",
|
| 87 |
+
normalized=False,
|
| 88 |
+
onesided=True,
|
| 89 |
+
return_complex=True,
|
| 90 |
+
)
|
| 91 |
+
|
| 92 |
+
spec = torch.sqrt(spec.real.pow(2) + spec.imag.pow(2) + 1e-6)
|
| 93 |
+
|
| 94 |
+
return spec
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
def spec_to_mel_torch(spec, n_fft, num_mels, sample_rate, fmin, fmax):
|
| 98 |
+
"""
|
| 99 |
+
Convert a spectrogram to a mel-spectrogram.
|
| 100 |
+
|
| 101 |
+
Args:
|
| 102 |
+
spec (torch.Tensor): Magnitude spectrogram.
|
| 103 |
+
n_fft (int): FFT window size.
|
| 104 |
+
num_mels (int): Number of mel frequency bins.
|
| 105 |
+
sample_rate (int): Sampling rate of the audio signal.
|
| 106 |
+
fmin (float): Minimum frequency.
|
| 107 |
+
fmax (float): Maximum frequency.
|
| 108 |
+
"""
|
| 109 |
+
global mel_basis
|
| 110 |
+
dtype_device = str(spec.dtype) + "_" + str(spec.device)
|
| 111 |
+
fmax_dtype_device = str(fmax) + "_" + dtype_device
|
| 112 |
+
if fmax_dtype_device not in mel_basis:
|
| 113 |
+
mel = librosa_mel_fn(
|
| 114 |
+
sr=sample_rate, n_fft=n_fft, n_mels=num_mels, fmin=fmin, fmax=fmax
|
| 115 |
+
)
|
| 116 |
+
mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(
|
| 117 |
+
dtype=spec.dtype, device=spec.device
|
| 118 |
+
)
|
| 119 |
+
|
| 120 |
+
melspec = torch.matmul(mel_basis[fmax_dtype_device], spec)
|
| 121 |
+
melspec = spectral_normalize_torch(melspec)
|
| 122 |
+
return melspec
|
| 123 |
+
|
| 124 |
+
|
| 125 |
+
def mel_spectrogram_torch(
|
| 126 |
+
y, n_fft, num_mels, sample_rate, hop_size, win_size, fmin, fmax, center=False
|
| 127 |
+
):
|
| 128 |
+
"""
|
| 129 |
+
Compute the mel-spectrogram of a signal.
|
| 130 |
+
|
| 131 |
+
Args:
|
| 132 |
+
y (torch.Tensor): Input signal.
|
| 133 |
+
n_fft (int): FFT window size.
|
| 134 |
+
num_mels (int): Number of mel frequency bins.
|
| 135 |
+
sample_rate (int): Sampling rate of the audio signal.
|
| 136 |
+
hop_size (int): Hop size between frames.
|
| 137 |
+
win_size (int): Window size.
|
| 138 |
+
fmin (float): Minimum frequency.
|
| 139 |
+
fmax (float): Maximum frequency.
|
| 140 |
+
center (bool, optional): Whether to center the window. Defaults to False.
|
| 141 |
+
"""
|
| 142 |
+
spec = spectrogram_torch(y, n_fft, hop_size, win_size, center)
|
| 143 |
+
|
| 144 |
+
melspec = spec_to_mel_torch(spec, n_fft, num_mels, sample_rate, fmin, fmax)
|
| 145 |
+
|
| 146 |
+
return melspec
|
| 147 |
+
|
| 148 |
+
|
| 149 |
+
def compute_window_length(n_mels: int, sample_rate: int):
|
| 150 |
+
f_min = 0
|
| 151 |
+
f_max = sample_rate / 2
|
| 152 |
+
window_length_seconds = 8 * n_mels / (f_max - f_min)
|
| 153 |
+
window_length = int(window_length_seconds * sample_rate)
|
| 154 |
+
return 2 ** (window_length.bit_length() - 1)
|
| 155 |
+
|
| 156 |
+
|
| 157 |
+
class MultiScaleMelSpectrogramLoss(torch.nn.Module):
|
| 158 |
+
|
| 159 |
+
def __init__(
|
| 160 |
+
self,
|
| 161 |
+
sample_rate: int = 24000,
|
| 162 |
+
n_mels: list[int] = [5, 10, 20, 40, 80, 160, 320, 480],
|
| 163 |
+
loss_fn=torch.nn.L1Loss(),
|
| 164 |
+
):
|
| 165 |
+
super().__init__()
|
| 166 |
+
self.sample_rate = sample_rate
|
| 167 |
+
self.loss_fn = loss_fn
|
| 168 |
+
self.log_base = torch.log(torch.tensor(10.0))
|
| 169 |
+
self.stft_params: list[tuple] = []
|
| 170 |
+
self.hann_window: dict[int, torch.Tensor] = {}
|
| 171 |
+
self.mel_banks: dict[int, torch.Tensor] = {}
|
| 172 |
+
|
| 173 |
+
self.stft_params = [
|
| 174 |
+
(mel, compute_window_length(mel, sample_rate), self.sample_rate // 100)
|
| 175 |
+
for mel in n_mels
|
| 176 |
+
]
|
| 177 |
+
|
| 178 |
+
def mel_spectrogram(
|
| 179 |
+
self,
|
| 180 |
+
wav: torch.Tensor,
|
| 181 |
+
n_mels: int,
|
| 182 |
+
window_length: int,
|
| 183 |
+
hop_length: int,
|
| 184 |
+
):
|
| 185 |
+
# IDs for caching
|
| 186 |
+
dtype_device = str(wav.dtype) + "_" + str(wav.device)
|
| 187 |
+
win_dtype_device = str(window_length) + "_" + dtype_device
|
| 188 |
+
mel_dtype_device = str(n_mels) + "_" + dtype_device
|
| 189 |
+
# caching hann window
|
| 190 |
+
if win_dtype_device not in self.hann_window:
|
| 191 |
+
self.hann_window[win_dtype_device] = torch.hann_window(
|
| 192 |
+
window_length, device=wav.device, dtype=torch.float32
|
| 193 |
+
)
|
| 194 |
+
|
| 195 |
+
wav = wav.squeeze(1) # -> torch(B, T)
|
| 196 |
+
|
| 197 |
+
stft = torch.stft(
|
| 198 |
+
wav.float(),
|
| 199 |
+
n_fft=window_length,
|
| 200 |
+
hop_length=hop_length,
|
| 201 |
+
window=self.hann_window[win_dtype_device],
|
| 202 |
+
return_complex=True,
|
| 203 |
+
) # -> torch (B, window_length // 2 + 1, (T - window_length)/hop_length + 1)
|
| 204 |
+
|
| 205 |
+
magnitude = torch.sqrt(stft.real.pow(2) + stft.imag.pow(2) + 1e-6)
|
| 206 |
+
|
| 207 |
+
# caching mel filter
|
| 208 |
+
if mel_dtype_device not in self.mel_banks:
|
| 209 |
+
self.mel_banks[mel_dtype_device] = torch.from_numpy(
|
| 210 |
+
librosa_mel_fn(
|
| 211 |
+
sr=self.sample_rate,
|
| 212 |
+
n_mels=n_mels,
|
| 213 |
+
n_fft=window_length,
|
| 214 |
+
fmin=0,
|
| 215 |
+
fmax=None,
|
| 216 |
+
)
|
| 217 |
+
).to(device=wav.device, dtype=torch.float32)
|
| 218 |
+
|
| 219 |
+
mel_spectrogram = torch.matmul(
|
| 220 |
+
self.mel_banks[mel_dtype_device], magnitude
|
| 221 |
+
) # torch(B, n_mels, stft.frames)
|
| 222 |
+
return mel_spectrogram
|
| 223 |
+
|
| 224 |
+
def forward(
|
| 225 |
+
self, real: torch.Tensor, fake: torch.Tensor
|
| 226 |
+
): # real: torch(B, 1, T) , fake: torch(B, 1, T)
|
| 227 |
+
loss = 0.0
|
| 228 |
+
for p in self.stft_params:
|
| 229 |
+
real_mels = self.mel_spectrogram(real, *p)
|
| 230 |
+
fake_mels = self.mel_spectrogram(fake, *p)
|
| 231 |
+
real_logmels = torch.log(real_mels.clamp(min=1e-5)) / self.log_base
|
| 232 |
+
fake_logmels = torch.log(fake_mels.clamp(min=1e-5)) / self.log_base
|
| 233 |
+
loss += self.loss_fn(real_logmels, fake_logmels)
|
| 234 |
+
return loss
|
rvc/train/preprocess/preprocess.py
ADDED
|
@@ -0,0 +1,345 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import sys
|
| 3 |
+
import time
|
| 4 |
+
from scipy import signal
|
| 5 |
+
from scipy.io import wavfile
|
| 6 |
+
import numpy as np
|
| 7 |
+
import concurrent.futures
|
| 8 |
+
from tqdm import tqdm
|
| 9 |
+
import json
|
| 10 |
+
from distutils.util import strtobool
|
| 11 |
+
import librosa
|
| 12 |
+
import multiprocessing
|
| 13 |
+
import noisereduce as nr
|
| 14 |
+
import soxr
|
| 15 |
+
|
| 16 |
+
now_directory = os.getcwd()
|
| 17 |
+
sys.path.append(now_directory)
|
| 18 |
+
|
| 19 |
+
from rvc.lib.utils import load_audio
|
| 20 |
+
from rvc.train.preprocess.slicer import Slicer
|
| 21 |
+
|
| 22 |
+
import logging
|
| 23 |
+
|
| 24 |
+
logging.getLogger("numba.core.byteflow").setLevel(logging.WARNING)
|
| 25 |
+
logging.getLogger("numba.core.ssa").setLevel(logging.WARNING)
|
| 26 |
+
logging.getLogger("numba.core.interpreter").setLevel(logging.WARNING)
|
| 27 |
+
|
| 28 |
+
OVERLAP = 0.3
|
| 29 |
+
PERCENTAGE = 3.0
|
| 30 |
+
MAX_AMPLITUDE = 0.9
|
| 31 |
+
ALPHA = 0.75
|
| 32 |
+
HIGH_PASS_CUTOFF = 48
|
| 33 |
+
SAMPLE_RATE_16K = 16000
|
| 34 |
+
RES_TYPE = "soxr_vhq"
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
class PreProcess:
|
| 38 |
+
def __init__(self, sr: int, exp_dir: str):
|
| 39 |
+
self.slicer = Slicer(
|
| 40 |
+
sr=sr,
|
| 41 |
+
threshold=-42,
|
| 42 |
+
min_length=1500,
|
| 43 |
+
min_interval=400,
|
| 44 |
+
hop_size=15,
|
| 45 |
+
max_sil_kept=500,
|
| 46 |
+
)
|
| 47 |
+
self.sr = sr
|
| 48 |
+
self.b_high, self.a_high = signal.butter(
|
| 49 |
+
N=5, Wn=HIGH_PASS_CUTOFF, btype="high", fs=self.sr
|
| 50 |
+
)
|
| 51 |
+
self.exp_dir = exp_dir
|
| 52 |
+
self.device = "cpu"
|
| 53 |
+
self.gt_wavs_dir = os.path.join(exp_dir, "sliced_audios")
|
| 54 |
+
self.wavs16k_dir = os.path.join(exp_dir, "sliced_audios_16k")
|
| 55 |
+
os.makedirs(self.gt_wavs_dir, exist_ok=True)
|
| 56 |
+
os.makedirs(self.wavs16k_dir, exist_ok=True)
|
| 57 |
+
|
| 58 |
+
def _normalize_audio(self, audio: np.ndarray):
|
| 59 |
+
tmp_max = np.abs(audio).max()
|
| 60 |
+
if tmp_max > 2.5:
|
| 61 |
+
return None
|
| 62 |
+
return (audio / tmp_max * (MAX_AMPLITUDE * ALPHA)) + (1 - ALPHA) * audio
|
| 63 |
+
|
| 64 |
+
def process_audio_segment(
|
| 65 |
+
self,
|
| 66 |
+
normalized_audio: np.ndarray,
|
| 67 |
+
sid: int,
|
| 68 |
+
idx0: int,
|
| 69 |
+
idx1: int,
|
| 70 |
+
):
|
| 71 |
+
if normalized_audio is None:
|
| 72 |
+
print(f"{sid}-{idx0}-{idx1}-filtered")
|
| 73 |
+
return
|
| 74 |
+
wavfile.write(
|
| 75 |
+
os.path.join(self.gt_wavs_dir, f"{sid}_{idx0}_{idx1}.wav"),
|
| 76 |
+
self.sr,
|
| 77 |
+
normalized_audio.astype(np.float32),
|
| 78 |
+
)
|
| 79 |
+
audio_16k = librosa.resample(
|
| 80 |
+
normalized_audio,
|
| 81 |
+
orig_sr=self.sr,
|
| 82 |
+
target_sr=SAMPLE_RATE_16K,
|
| 83 |
+
res_type=RES_TYPE,
|
| 84 |
+
)
|
| 85 |
+
wavfile.write(
|
| 86 |
+
os.path.join(self.wavs16k_dir, f"{sid}_{idx0}_{idx1}.wav"),
|
| 87 |
+
SAMPLE_RATE_16K,
|
| 88 |
+
audio_16k.astype(np.float32),
|
| 89 |
+
)
|
| 90 |
+
|
| 91 |
+
def simple_cut(
|
| 92 |
+
self,
|
| 93 |
+
audio: np.ndarray,
|
| 94 |
+
sid: int,
|
| 95 |
+
idx0: int,
|
| 96 |
+
chunk_len: float,
|
| 97 |
+
overlap_len: float,
|
| 98 |
+
):
|
| 99 |
+
chunk_length = int(self.sr * chunk_len)
|
| 100 |
+
overlap_length = int(self.sr * overlap_len)
|
| 101 |
+
i = 0
|
| 102 |
+
while i < len(audio):
|
| 103 |
+
chunk = audio[i : i + chunk_length]
|
| 104 |
+
if len(chunk) == chunk_length:
|
| 105 |
+
# full SR for training
|
| 106 |
+
wavfile.write(
|
| 107 |
+
os.path.join(
|
| 108 |
+
self.gt_wavs_dir,
|
| 109 |
+
f"{sid}_{idx0}_{i // (chunk_length - overlap_length)}.wav",
|
| 110 |
+
),
|
| 111 |
+
self.sr,
|
| 112 |
+
chunk.astype(np.float32),
|
| 113 |
+
)
|
| 114 |
+
# 16KHz for feature extraction
|
| 115 |
+
chunk_16k = librosa.resample(
|
| 116 |
+
chunk, orig_sr=self.sr, target_sr=SAMPLE_RATE_16K, res_type=RES_TYPE
|
| 117 |
+
)
|
| 118 |
+
wavfile.write(
|
| 119 |
+
os.path.join(
|
| 120 |
+
self.wavs16k_dir,
|
| 121 |
+
f"{sid}_{idx0}_{i // (chunk_length - overlap_length)}.wav",
|
| 122 |
+
),
|
| 123 |
+
SAMPLE_RATE_16K,
|
| 124 |
+
chunk_16k.astype(np.float32),
|
| 125 |
+
)
|
| 126 |
+
i += chunk_length - overlap_length
|
| 127 |
+
|
| 128 |
+
def process_audio(
|
| 129 |
+
self,
|
| 130 |
+
path: str,
|
| 131 |
+
idx0: int,
|
| 132 |
+
sid: int,
|
| 133 |
+
cut_preprocess: str,
|
| 134 |
+
process_effects: bool,
|
| 135 |
+
noise_reduction: bool,
|
| 136 |
+
reduction_strength: float,
|
| 137 |
+
chunk_len: float,
|
| 138 |
+
overlap_len: float,
|
| 139 |
+
):
|
| 140 |
+
audio_length = 0
|
| 141 |
+
try:
|
| 142 |
+
audio = load_audio(path, self.sr)
|
| 143 |
+
audio_length = librosa.get_duration(y=audio, sr=self.sr)
|
| 144 |
+
|
| 145 |
+
if process_effects:
|
| 146 |
+
audio = signal.lfilter(self.b_high, self.a_high, audio)
|
| 147 |
+
audio = self._normalize_audio(audio)
|
| 148 |
+
if noise_reduction:
|
| 149 |
+
audio = nr.reduce_noise(
|
| 150 |
+
y=audio, sr=self.sr, prop_decrease=reduction_strength
|
| 151 |
+
)
|
| 152 |
+
if cut_preprocess == "Skip":
|
| 153 |
+
# no cutting
|
| 154 |
+
self.process_audio_segment(
|
| 155 |
+
audio,
|
| 156 |
+
sid,
|
| 157 |
+
idx0,
|
| 158 |
+
0,
|
| 159 |
+
)
|
| 160 |
+
elif cut_preprocess == "Simple":
|
| 161 |
+
# simple
|
| 162 |
+
self.simple_cut(audio, sid, idx0, chunk_len, overlap_len)
|
| 163 |
+
elif cut_preprocess == "Automatic":
|
| 164 |
+
idx1 = 0
|
| 165 |
+
# legacy
|
| 166 |
+
for audio_segment in self.slicer.slice(audio):
|
| 167 |
+
i = 0
|
| 168 |
+
while True:
|
| 169 |
+
start = int(self.sr * (PERCENTAGE - OVERLAP) * i)
|
| 170 |
+
i += 1
|
| 171 |
+
if (
|
| 172 |
+
len(audio_segment[start:])
|
| 173 |
+
> (PERCENTAGE + OVERLAP) * self.sr
|
| 174 |
+
):
|
| 175 |
+
tmp_audio = audio_segment[
|
| 176 |
+
start : start + int(PERCENTAGE * self.sr)
|
| 177 |
+
]
|
| 178 |
+
self.process_audio_segment(
|
| 179 |
+
tmp_audio,
|
| 180 |
+
sid,
|
| 181 |
+
idx0,
|
| 182 |
+
idx1,
|
| 183 |
+
)
|
| 184 |
+
idx1 += 1
|
| 185 |
+
else:
|
| 186 |
+
tmp_audio = audio_segment[start:]
|
| 187 |
+
self.process_audio_segment(
|
| 188 |
+
tmp_audio,
|
| 189 |
+
sid,
|
| 190 |
+
idx0,
|
| 191 |
+
idx1,
|
| 192 |
+
)
|
| 193 |
+
idx1 += 1
|
| 194 |
+
break
|
| 195 |
+
|
| 196 |
+
except Exception as error:
|
| 197 |
+
print(f"Error processing audio: {error}")
|
| 198 |
+
return audio_length
|
| 199 |
+
|
| 200 |
+
|
| 201 |
+
def format_duration(seconds):
|
| 202 |
+
hours = int(seconds // 3600)
|
| 203 |
+
minutes = int((seconds % 3600) // 60)
|
| 204 |
+
seconds = int(seconds % 60)
|
| 205 |
+
return f"{hours:02}:{minutes:02}:{seconds:02}"
|
| 206 |
+
|
| 207 |
+
|
| 208 |
+
def save_dataset_duration(file_path, dataset_duration):
|
| 209 |
+
try:
|
| 210 |
+
with open(file_path, "r") as f:
|
| 211 |
+
data = json.load(f)
|
| 212 |
+
except FileNotFoundError:
|
| 213 |
+
data = {}
|
| 214 |
+
|
| 215 |
+
formatted_duration = format_duration(dataset_duration)
|
| 216 |
+
new_data = {
|
| 217 |
+
"total_dataset_duration": formatted_duration,
|
| 218 |
+
"total_seconds": dataset_duration,
|
| 219 |
+
}
|
| 220 |
+
data.update(new_data)
|
| 221 |
+
|
| 222 |
+
with open(file_path, "w") as f:
|
| 223 |
+
json.dump(data, f, indent=4)
|
| 224 |
+
|
| 225 |
+
|
| 226 |
+
def process_audio_wrapper(args):
|
| 227 |
+
(
|
| 228 |
+
pp,
|
| 229 |
+
file,
|
| 230 |
+
cut_preprocess,
|
| 231 |
+
process_effects,
|
| 232 |
+
noise_reduction,
|
| 233 |
+
reduction_strength,
|
| 234 |
+
chunk_len,
|
| 235 |
+
overlap_len,
|
| 236 |
+
) = args
|
| 237 |
+
file_path, idx0, sid = file
|
| 238 |
+
return pp.process_audio(
|
| 239 |
+
file_path,
|
| 240 |
+
idx0,
|
| 241 |
+
sid,
|
| 242 |
+
cut_preprocess,
|
| 243 |
+
process_effects,
|
| 244 |
+
noise_reduction,
|
| 245 |
+
reduction_strength,
|
| 246 |
+
chunk_len,
|
| 247 |
+
overlap_len,
|
| 248 |
+
)
|
| 249 |
+
|
| 250 |
+
|
| 251 |
+
def preprocess_training_set(
|
| 252 |
+
input_root: str,
|
| 253 |
+
sr: int,
|
| 254 |
+
num_processes: int,
|
| 255 |
+
exp_dir: str,
|
| 256 |
+
cut_preprocess: str,
|
| 257 |
+
process_effects: bool,
|
| 258 |
+
noise_reduction: bool,
|
| 259 |
+
reduction_strength: float,
|
| 260 |
+
chunk_len: float,
|
| 261 |
+
overlap_len: float,
|
| 262 |
+
):
|
| 263 |
+
start_time = time.time()
|
| 264 |
+
pp = PreProcess(sr, exp_dir)
|
| 265 |
+
print(f"Starting preprocess with {num_processes} processes...")
|
| 266 |
+
|
| 267 |
+
files = []
|
| 268 |
+
idx = 0
|
| 269 |
+
|
| 270 |
+
for root, _, filenames in os.walk(input_root):
|
| 271 |
+
try:
|
| 272 |
+
sid = 0 if root == input_root else int(os.path.basename(root))
|
| 273 |
+
for f in filenames:
|
| 274 |
+
if f.lower().endswith((".wav", ".mp3", ".flac", ".ogg")):
|
| 275 |
+
files.append((os.path.join(root, f), idx, sid))
|
| 276 |
+
idx += 1
|
| 277 |
+
except ValueError:
|
| 278 |
+
print(
|
| 279 |
+
f'Speaker ID folder is expected to be integer, got "{os.path.basename(root)}" instead.'
|
| 280 |
+
)
|
| 281 |
+
|
| 282 |
+
# print(f"Number of files: {len(files)}")
|
| 283 |
+
audio_length = []
|
| 284 |
+
with tqdm(total=len(files)) as pbar:
|
| 285 |
+
with concurrent.futures.ProcessPoolExecutor(
|
| 286 |
+
max_workers=num_processes
|
| 287 |
+
) as executor:
|
| 288 |
+
futures = [
|
| 289 |
+
executor.submit(
|
| 290 |
+
process_audio_wrapper,
|
| 291 |
+
(
|
| 292 |
+
pp,
|
| 293 |
+
file,
|
| 294 |
+
cut_preprocess,
|
| 295 |
+
process_effects,
|
| 296 |
+
noise_reduction,
|
| 297 |
+
reduction_strength,
|
| 298 |
+
chunk_len,
|
| 299 |
+
overlap_len,
|
| 300 |
+
),
|
| 301 |
+
)
|
| 302 |
+
for file in files
|
| 303 |
+
]
|
| 304 |
+
for future in concurrent.futures.as_completed(futures):
|
| 305 |
+
audio_length.append(future.result())
|
| 306 |
+
pbar.update(1)
|
| 307 |
+
|
| 308 |
+
audio_length = sum(audio_length)
|
| 309 |
+
save_dataset_duration(
|
| 310 |
+
os.path.join(exp_dir, "model_info.json"), dataset_duration=audio_length
|
| 311 |
+
)
|
| 312 |
+
elapsed_time = time.time() - start_time
|
| 313 |
+
print(
|
| 314 |
+
f"Preprocess completed in {elapsed_time:.2f} seconds on {format_duration(audio_length)} seconds of audio."
|
| 315 |
+
)
|
| 316 |
+
|
| 317 |
+
|
| 318 |
+
if __name__ == "__main__":
|
| 319 |
+
experiment_directory = str(sys.argv[1])
|
| 320 |
+
input_root = str(sys.argv[2])
|
| 321 |
+
sample_rate = int(sys.argv[3])
|
| 322 |
+
num_processes = sys.argv[4]
|
| 323 |
+
if num_processes.lower() == "none":
|
| 324 |
+
num_processes = multiprocessing.cpu_count()
|
| 325 |
+
else:
|
| 326 |
+
num_processes = int(num_processes)
|
| 327 |
+
cut_preprocess = str(sys.argv[5])
|
| 328 |
+
process_effects = strtobool(sys.argv[6])
|
| 329 |
+
noise_reduction = strtobool(sys.argv[7])
|
| 330 |
+
reduction_strength = float(sys.argv[8])
|
| 331 |
+
chunk_len = float(sys.argv[9])
|
| 332 |
+
overlap_len = float(sys.argv[10])
|
| 333 |
+
|
| 334 |
+
preprocess_training_set(
|
| 335 |
+
input_root,
|
| 336 |
+
sample_rate,
|
| 337 |
+
num_processes,
|
| 338 |
+
experiment_directory,
|
| 339 |
+
cut_preprocess,
|
| 340 |
+
process_effects,
|
| 341 |
+
noise_reduction,
|
| 342 |
+
reduction_strength,
|
| 343 |
+
chunk_len,
|
| 344 |
+
overlap_len,
|
| 345 |
+
)
|
rvc/train/preprocess/slicer.py
ADDED
|
@@ -0,0 +1,235 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
class Slicer:
|
| 5 |
+
"""
|
| 6 |
+
A class for slicing audio waveforms into segments based on silence detection.
|
| 7 |
+
|
| 8 |
+
Attributes:
|
| 9 |
+
sr (int): Sampling rate of the audio waveform.
|
| 10 |
+
threshold (float): RMS threshold for silence detection, in dB.
|
| 11 |
+
min_length (int): Minimum length of a segment, in milliseconds.
|
| 12 |
+
min_interval (int): Minimum interval between segments, in milliseconds.
|
| 13 |
+
hop_size (int): Hop size for RMS calculation, in milliseconds.
|
| 14 |
+
max_sil_kept (int): Maximum length of silence to keep at the beginning or end of a segment, in milliseconds.
|
| 15 |
+
|
| 16 |
+
Methods:
|
| 17 |
+
slice(waveform): Slices the given waveform into segments.
|
| 18 |
+
"""
|
| 19 |
+
|
| 20 |
+
def __init__(
|
| 21 |
+
self,
|
| 22 |
+
sr: int,
|
| 23 |
+
threshold: float = -40.0,
|
| 24 |
+
min_length: int = 5000,
|
| 25 |
+
min_interval: int = 300,
|
| 26 |
+
hop_size: int = 20,
|
| 27 |
+
max_sil_kept: int = 5000,
|
| 28 |
+
):
|
| 29 |
+
"""
|
| 30 |
+
Initializes a Slicer object.
|
| 31 |
+
|
| 32 |
+
Args:
|
| 33 |
+
sr (int): Sampling rate of the audio waveform.
|
| 34 |
+
threshold (float, optional): RMS threshold for silence detection, in dB. Defaults to -40.0.
|
| 35 |
+
min_length (int, optional): Minimum length of a segment, in milliseconds. Defaults to 5000.
|
| 36 |
+
min_interval (int, optional): Minimum interval between segments, in milliseconds. Defaults to 300.
|
| 37 |
+
hop_size (int, optional): Hop size for RMS calculation, in milliseconds. Defaults to 20.
|
| 38 |
+
max_sil_kept (int, optional): Maximum length of silence to keep at the beginning or end of a segment, in milliseconds. Defaults to 5000.
|
| 39 |
+
|
| 40 |
+
Raises:
|
| 41 |
+
ValueError: If the input parameters are not valid.
|
| 42 |
+
"""
|
| 43 |
+
if not min_length >= min_interval >= hop_size:
|
| 44 |
+
raise ValueError("min_length >= min_interval >= hop_size is required")
|
| 45 |
+
if not max_sil_kept >= hop_size:
|
| 46 |
+
raise ValueError("max_sil_kept >= hop_size is required")
|
| 47 |
+
|
| 48 |
+
# Convert time-based parameters to sample-based parameters
|
| 49 |
+
min_interval = sr * min_interval / 1000
|
| 50 |
+
self.threshold = 10 ** (threshold / 20.0)
|
| 51 |
+
self.hop_size = round(sr * hop_size / 1000)
|
| 52 |
+
self.win_size = min(round(min_interval), 4 * self.hop_size)
|
| 53 |
+
self.min_length = round(sr * min_length / 1000 / self.hop_size)
|
| 54 |
+
self.min_interval = round(min_interval / self.hop_size)
|
| 55 |
+
self.max_sil_kept = round(sr * max_sil_kept / 1000 / self.hop_size)
|
| 56 |
+
|
| 57 |
+
def _apply_slice(self, waveform, begin, end):
|
| 58 |
+
"""
|
| 59 |
+
Applies a slice to the waveform.
|
| 60 |
+
|
| 61 |
+
Args:
|
| 62 |
+
waveform (numpy.ndarray): The waveform to slice.
|
| 63 |
+
begin (int): Start frame index.
|
| 64 |
+
end (int): End frame index.
|
| 65 |
+
"""
|
| 66 |
+
start_idx = begin * self.hop_size
|
| 67 |
+
if len(waveform.shape) > 1:
|
| 68 |
+
end_idx = min(waveform.shape[1], end * self.hop_size)
|
| 69 |
+
return waveform[:, start_idx:end_idx]
|
| 70 |
+
else:
|
| 71 |
+
end_idx = min(waveform.shape[0], end * self.hop_size)
|
| 72 |
+
return waveform[start_idx:end_idx]
|
| 73 |
+
|
| 74 |
+
def slice(self, waveform):
|
| 75 |
+
"""
|
| 76 |
+
Slices the given waveform into segments.
|
| 77 |
+
|
| 78 |
+
Args:
|
| 79 |
+
waveform (numpy.ndarray): The waveform to slice.
|
| 80 |
+
"""
|
| 81 |
+
# Calculate RMS for each frame
|
| 82 |
+
samples = waveform.mean(axis=0) if len(waveform.shape) > 1 else waveform
|
| 83 |
+
if samples.shape[0] <= self.min_length:
|
| 84 |
+
return [waveform]
|
| 85 |
+
|
| 86 |
+
rms_list = get_rms(
|
| 87 |
+
y=samples, frame_length=self.win_size, hop_length=self.hop_size
|
| 88 |
+
).squeeze(0)
|
| 89 |
+
|
| 90 |
+
# Detect silence segments and mark them
|
| 91 |
+
sil_tags = []
|
| 92 |
+
silence_start, clip_start = None, 0
|
| 93 |
+
for i, rms in enumerate(rms_list):
|
| 94 |
+
# If current frame is silent
|
| 95 |
+
if rms < self.threshold:
|
| 96 |
+
if silence_start is None:
|
| 97 |
+
silence_start = i
|
| 98 |
+
continue
|
| 99 |
+
|
| 100 |
+
# If current frame is not silent
|
| 101 |
+
if silence_start is None:
|
| 102 |
+
continue
|
| 103 |
+
|
| 104 |
+
# Check if current silence segment is leading silence or need to slice
|
| 105 |
+
is_leading_silence = silence_start == 0 and i > self.max_sil_kept
|
| 106 |
+
need_slice_middle = (
|
| 107 |
+
i - silence_start >= self.min_interval
|
| 108 |
+
and i - clip_start >= self.min_length
|
| 109 |
+
)
|
| 110 |
+
|
| 111 |
+
# If not leading silence and not need to slice middle
|
| 112 |
+
if not is_leading_silence and not need_slice_middle:
|
| 113 |
+
silence_start = None
|
| 114 |
+
continue
|
| 115 |
+
|
| 116 |
+
# Handle different cases of silence segments
|
| 117 |
+
if i - silence_start <= self.max_sil_kept:
|
| 118 |
+
# Short silence
|
| 119 |
+
pos = rms_list[silence_start : i + 1].argmin() + silence_start
|
| 120 |
+
if silence_start == 0:
|
| 121 |
+
sil_tags.append((0, pos))
|
| 122 |
+
else:
|
| 123 |
+
sil_tags.append((pos, pos))
|
| 124 |
+
clip_start = pos
|
| 125 |
+
elif i - silence_start <= self.max_sil_kept * 2:
|
| 126 |
+
# Medium silence
|
| 127 |
+
pos = rms_list[
|
| 128 |
+
i - self.max_sil_kept : silence_start + self.max_sil_kept + 1
|
| 129 |
+
].argmin()
|
| 130 |
+
pos += i - self.max_sil_kept
|
| 131 |
+
pos_l = (
|
| 132 |
+
rms_list[
|
| 133 |
+
silence_start : silence_start + self.max_sil_kept + 1
|
| 134 |
+
].argmin()
|
| 135 |
+
+ silence_start
|
| 136 |
+
)
|
| 137 |
+
pos_r = (
|
| 138 |
+
rms_list[i - self.max_sil_kept : i + 1].argmin()
|
| 139 |
+
+ i
|
| 140 |
+
- self.max_sil_kept
|
| 141 |
+
)
|
| 142 |
+
if silence_start == 0:
|
| 143 |
+
sil_tags.append((0, pos_r))
|
| 144 |
+
clip_start = pos_r
|
| 145 |
+
else:
|
| 146 |
+
sil_tags.append((min(pos_l, pos), max(pos_r, pos)))
|
| 147 |
+
clip_start = max(pos_r, pos)
|
| 148 |
+
else:
|
| 149 |
+
# Long silence
|
| 150 |
+
pos_l = (
|
| 151 |
+
rms_list[
|
| 152 |
+
silence_start : silence_start + self.max_sil_kept + 1
|
| 153 |
+
].argmin()
|
| 154 |
+
+ silence_start
|
| 155 |
+
)
|
| 156 |
+
pos_r = (
|
| 157 |
+
rms_list[i - self.max_sil_kept : i + 1].argmin()
|
| 158 |
+
+ i
|
| 159 |
+
- self.max_sil_kept
|
| 160 |
+
)
|
| 161 |
+
if silence_start == 0:
|
| 162 |
+
sil_tags.append((0, pos_r))
|
| 163 |
+
else:
|
| 164 |
+
sil_tags.append((pos_l, pos_r))
|
| 165 |
+
clip_start = pos_r
|
| 166 |
+
silence_start = None
|
| 167 |
+
|
| 168 |
+
# Handle trailing silence
|
| 169 |
+
total_frames = rms_list.shape[0]
|
| 170 |
+
if (
|
| 171 |
+
silence_start is not None
|
| 172 |
+
and total_frames - silence_start >= self.min_interval
|
| 173 |
+
):
|
| 174 |
+
silence_end = min(total_frames, silence_start + self.max_sil_kept)
|
| 175 |
+
pos = rms_list[silence_start : silence_end + 1].argmin() + silence_start
|
| 176 |
+
sil_tags.append((pos, total_frames + 1))
|
| 177 |
+
|
| 178 |
+
# Extract segments based on silence tags
|
| 179 |
+
if not sil_tags:
|
| 180 |
+
return [waveform]
|
| 181 |
+
else:
|
| 182 |
+
chunks = []
|
| 183 |
+
if sil_tags[0][0] > 0:
|
| 184 |
+
chunks.append(self._apply_slice(waveform, 0, sil_tags[0][0]))
|
| 185 |
+
|
| 186 |
+
for i in range(len(sil_tags) - 1):
|
| 187 |
+
chunks.append(
|
| 188 |
+
self._apply_slice(waveform, sil_tags[i][1], sil_tags[i + 1][0])
|
| 189 |
+
)
|
| 190 |
+
|
| 191 |
+
if sil_tags[-1][1] < total_frames:
|
| 192 |
+
chunks.append(
|
| 193 |
+
self._apply_slice(waveform, sil_tags[-1][1], total_frames)
|
| 194 |
+
)
|
| 195 |
+
|
| 196 |
+
return chunks
|
| 197 |
+
|
| 198 |
+
|
| 199 |
+
def get_rms(
|
| 200 |
+
y,
|
| 201 |
+
frame_length=2048,
|
| 202 |
+
hop_length=512,
|
| 203 |
+
pad_mode="constant",
|
| 204 |
+
):
|
| 205 |
+
"""
|
| 206 |
+
Calculates the root mean square (RMS) of a waveform.
|
| 207 |
+
|
| 208 |
+
Args:
|
| 209 |
+
y (numpy.ndarray): The waveform.
|
| 210 |
+
frame_length (int, optional): The length of the frame in samples. Defaults to 2048.
|
| 211 |
+
hop_length (int, optional): The hop length between frames in samples. Defaults to 512.
|
| 212 |
+
pad_mode (str, optional): The padding mode used for the waveform. Defaults to "constant".
|
| 213 |
+
"""
|
| 214 |
+
padding = (int(frame_length // 2), int(frame_length // 2))
|
| 215 |
+
y = np.pad(y, padding, mode=pad_mode)
|
| 216 |
+
|
| 217 |
+
axis = -1
|
| 218 |
+
out_strides = y.strides + tuple([y.strides[axis]])
|
| 219 |
+
x_shape_trimmed = list(y.shape)
|
| 220 |
+
x_shape_trimmed[axis] -= frame_length - 1
|
| 221 |
+
out_shape = tuple(x_shape_trimmed) + tuple([frame_length])
|
| 222 |
+
xw = np.lib.stride_tricks.as_strided(y, shape=out_shape, strides=out_strides)
|
| 223 |
+
|
| 224 |
+
if axis < 0:
|
| 225 |
+
target_axis = axis - 1
|
| 226 |
+
else:
|
| 227 |
+
target_axis = axis + 1
|
| 228 |
+
|
| 229 |
+
xw = np.moveaxis(xw, -1, target_axis)
|
| 230 |
+
slices = [slice(None)] * xw.ndim
|
| 231 |
+
slices[axis] = slice(0, None, hop_length)
|
| 232 |
+
x = xw[tuple(slices)]
|
| 233 |
+
|
| 234 |
+
power = np.mean(np.abs(x) ** 2, axis=-2, keepdims=True)
|
| 235 |
+
return np.sqrt(power)
|
rvc/train/process/change_info.py
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import torch
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
def change_info(path, info, name):
|
| 6 |
+
try:
|
| 7 |
+
ckpt = torch.load(path, map_location="cpu", weights_only=True)
|
| 8 |
+
ckpt["info"] = info
|
| 9 |
+
|
| 10 |
+
if not name:
|
| 11 |
+
name = os.path.splitext(os.path.basename(path))[0]
|
| 12 |
+
|
| 13 |
+
target_dir = os.path.join("logs", name)
|
| 14 |
+
os.makedirs(target_dir, exist_ok=True)
|
| 15 |
+
|
| 16 |
+
torch.save(ckpt, os.path.join(target_dir, f"{name}.pth"))
|
| 17 |
+
|
| 18 |
+
return "Success."
|
| 19 |
+
|
| 20 |
+
except Exception as error:
|
| 21 |
+
print(f"An error occurred while changing the info: {error}")
|
| 22 |
+
return f"Error: {error}"
|