Commit
·
8b5626c
1
Parent(s):
d9dff52
Add voices pickle file
Browse files- compute.py +138 -0
- voices.pkl +3 -0
compute.py
ADDED
|
@@ -0,0 +1,138 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from cached_path import cached_path
|
| 2 |
+
|
| 3 |
+
# from dp.phonemizer import Phonemizer
|
| 4 |
+
print("NLTK")
|
| 5 |
+
import nltk
|
| 6 |
+
nltk.download('punkt')
|
| 7 |
+
print("SCIPY")
|
| 8 |
+
from scipy.io.wavfile import write
|
| 9 |
+
print("TORCH STUFF")
|
| 10 |
+
import torch
|
| 11 |
+
print("START")
|
| 12 |
+
torch.manual_seed(0)
|
| 13 |
+
torch.backends.cudnn.benchmark = False
|
| 14 |
+
torch.backends.cudnn.deterministic = True
|
| 15 |
+
|
| 16 |
+
import random
|
| 17 |
+
random.seed(0)
|
| 18 |
+
|
| 19 |
+
import numpy as np
|
| 20 |
+
np.random.seed(0)
|
| 21 |
+
|
| 22 |
+
# load packages
|
| 23 |
+
import time
|
| 24 |
+
import random
|
| 25 |
+
import yaml
|
| 26 |
+
from munch import Munch
|
| 27 |
+
import numpy as np
|
| 28 |
+
import torch
|
| 29 |
+
from torch import nn
|
| 30 |
+
import torch.nn.functional as F
|
| 31 |
+
import torchaudio
|
| 32 |
+
import librosa
|
| 33 |
+
from nltk.tokenize import word_tokenize
|
| 34 |
+
|
| 35 |
+
from models import *
|
| 36 |
+
from utils import *
|
| 37 |
+
from text_utils import TextCleaner
|
| 38 |
+
textclenaer = TextCleaner()
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
to_mel = torchaudio.transforms.MelSpectrogram(
|
| 42 |
+
n_mels=80, n_fft=2048, win_length=1200, hop_length=300)
|
| 43 |
+
mean, std = -4, 4
|
| 44 |
+
|
| 45 |
+
def length_to_mask(lengths):
|
| 46 |
+
mask = torch.arange(lengths.max()).unsqueeze(0).expand(lengths.shape[0], -1).type_as(lengths)
|
| 47 |
+
mask = torch.gt(mask+1, lengths.unsqueeze(1))
|
| 48 |
+
return mask
|
| 49 |
+
|
| 50 |
+
def preprocess(wave):
|
| 51 |
+
wave_tensor = torch.from_numpy(wave).float()
|
| 52 |
+
mel_tensor = to_mel(wave_tensor)
|
| 53 |
+
mel_tensor = (torch.log(1e-5 + mel_tensor.unsqueeze(0)) - mean) / std
|
| 54 |
+
return mel_tensor
|
| 55 |
+
|
| 56 |
+
def compute_style(path):
|
| 57 |
+
wave, sr = librosa.load(path, sr=24000)
|
| 58 |
+
audio, index = librosa.effects.trim(wave, top_db=30)
|
| 59 |
+
if sr != 24000:
|
| 60 |
+
audio = librosa.resample(audio, sr, 24000)
|
| 61 |
+
mel_tensor = preprocess(audio).to(device)
|
| 62 |
+
|
| 63 |
+
with torch.no_grad():
|
| 64 |
+
ref_s = model.style_encoder(mel_tensor.unsqueeze(1))
|
| 65 |
+
ref_p = model.predictor_encoder(mel_tensor.unsqueeze(1))
|
| 66 |
+
|
| 67 |
+
return torch.cat([ref_s, ref_p], dim=1)
|
| 68 |
+
|
| 69 |
+
device = 'cpu'
|
| 70 |
+
if torch.cuda.is_available():
|
| 71 |
+
device = 'cuda'
|
| 72 |
+
elif torch.backends.mps.is_available():
|
| 73 |
+
print("MPS would be available but cannot be used rn")
|
| 74 |
+
# device = 'mps'
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
|
| 78 |
+
# config = yaml.safe_load(open("Models/LibriTTS/config.yml"))
|
| 79 |
+
config = yaml.safe_load(open(str(cached_path("hf://yl4579/StyleTTS2-LibriTTS/Models/LibriTTS/config.yml"))))
|
| 80 |
+
|
| 81 |
+
# load pretrained ASR model
|
| 82 |
+
ASR_config = config.get('ASR_config', False)
|
| 83 |
+
ASR_path = config.get('ASR_path', False)
|
| 84 |
+
text_aligner = load_ASR_models(ASR_path, ASR_config)
|
| 85 |
+
|
| 86 |
+
# load pretrained F0 model
|
| 87 |
+
F0_path = config.get('F0_path', False)
|
| 88 |
+
pitch_extractor = load_F0_models(F0_path)
|
| 89 |
+
|
| 90 |
+
# load BERT model
|
| 91 |
+
from Utils.PLBERT.util import load_plbert
|
| 92 |
+
BERT_path = config.get('PLBERT_dir', False)
|
| 93 |
+
plbert = load_plbert(BERT_path)
|
| 94 |
+
|
| 95 |
+
model_params = recursive_munch(config['model_params'])
|
| 96 |
+
model = build_model(model_params, text_aligner, pitch_extractor, plbert)
|
| 97 |
+
_ = [model[key].eval() for key in model]
|
| 98 |
+
_ = [model[key].to(device) for key in model]
|
| 99 |
+
|
| 100 |
+
# params_whole = torch.load("Models/LibriTTS/epochs_2nd_00020.pth", map_location='cpu')
|
| 101 |
+
params_whole = torch.load(str(cached_path("hf://yl4579/StyleTTS2-LibriTTS/Models/LibriTTS/epochs_2nd_00020.pth")), map_location='cpu')
|
| 102 |
+
params = params_whole['net']
|
| 103 |
+
|
| 104 |
+
for key in model:
|
| 105 |
+
if key in params:
|
| 106 |
+
print('%s loaded' % key)
|
| 107 |
+
try:
|
| 108 |
+
model[key].load_state_dict(params[key])
|
| 109 |
+
except:
|
| 110 |
+
from collections import OrderedDict
|
| 111 |
+
state_dict = params[key]
|
| 112 |
+
new_state_dict = OrderedDict()
|
| 113 |
+
for k, v in state_dict.items():
|
| 114 |
+
name = k[7:] # remove `module.`
|
| 115 |
+
new_state_dict[name] = v
|
| 116 |
+
# load params
|
| 117 |
+
model[key].load_state_dict(new_state_dict, strict=False)
|
| 118 |
+
# except:
|
| 119 |
+
# _load(params[key], model[key])
|
| 120 |
+
_ = [model[key].eval() for key in model]
|
| 121 |
+
|
| 122 |
+
from Modules.diffusion.sampler import DiffusionSampler, ADPM2Sampler, KarrasSchedule
|
| 123 |
+
|
| 124 |
+
sampler = DiffusionSampler(
|
| 125 |
+
model.diffusion.diffusion,
|
| 126 |
+
sampler=ADPM2Sampler(),
|
| 127 |
+
sigma_schedule=KarrasSchedule(sigma_min=0.0001, sigma_max=3.0, rho=9.0), # empirical parameters
|
| 128 |
+
clamp=False
|
| 129 |
+
)
|
| 130 |
+
voicelist = ['f-us-1', 'f-us-2', 'f-us-3', 'f-us-4', 'm-us-1', 'm-us-2', 'm-us-3', 'm-us-4']
|
| 131 |
+
voices = {}
|
| 132 |
+
# todo: cache computed style, load using pickle
|
| 133 |
+
for v in voicelist:
|
| 134 |
+
print(f"Loading voice {v}")
|
| 135 |
+
voices[v] = compute_style(f'voices/{v}.wav')
|
| 136 |
+
import pickle
|
| 137 |
+
with open('voices.pkl', 'wb') as f:
|
| 138 |
+
pickle.dump(voices, f)
|
voices.pkl
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:58e11e1d6726c8992f5325aca8b381ad37facbd7380ebb5f5e04d77a017b4ee3
|
| 3 |
+
size 10739
|