python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
---|---|---|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from collections import OrderedDict
from statistics import mode
from typing import Dict, Optional
import torch
from nemo.collections.asr.parts.utils.offline_clustering import get_argmin_mat
from nemo.collections.asr.parts.utils.speaker_utils import convert_rttm_line, prepare_split_data
from nemo.collections.common.parts.preprocessing.collections import DiarizationSpeechLabel
from nemo.core.classes import Dataset
from nemo.core.neural_types import AudioSignal, EncodedRepresentation, LengthsType, NeuralType, ProbsType
def get_scale_mapping_list(uniq_timestamps):
"""
Call get_argmin_mat function to find the index of the non-base-scale segment that is closest to the
given base-scale segment. For each scale and each segment, a base-scale segment is assigned.
Args:
uniq_timestamps: (dict)
The dictionary containing embeddings, timestamps and multiscale weights.
If uniq_timestamps contains only one scale, single scale diarization is performed.
Returns:
scale_mapping_argmat (torch.tensor):
The element at the m-th row and the n-th column of the scale mapping matrix indicates the (m+1)-th scale
segment index which has the closest center distance with (n+1)-th segment in the base scale.
- Example:
`scale_mapping_argmat[2][101] = 85`
In the above example, the code snippet means that 86-th segment in the 3rd scale (python index is 2) is
mapped to the 102-th segment in the base scale. Thus, the longer segments bound to have more repeating
numbers since multiple base scale segments (since the base scale has the shortest length) fall into the
range of the longer segments. At the same time, each row contains N numbers of indices where N is number
of segments in the base-scale (i.e., the finest scale).
"""
timestamps_in_scales = []
for key, val in uniq_timestamps['scale_dict'].items():
timestamps_in_scales.append(torch.tensor(val['time_stamps']))
session_scale_mapping_list = get_argmin_mat(timestamps_in_scales)
scale_mapping_argmat = [[] for _ in range(len(uniq_timestamps['scale_dict'].keys()))]
for scale_idx in range(len(session_scale_mapping_list)):
scale_mapping_argmat[scale_idx] = session_scale_mapping_list[scale_idx]
scale_mapping_argmat = torch.stack(scale_mapping_argmat)
return scale_mapping_argmat
def extract_seg_info_from_rttm(uniq_id, rttm_lines, mapping_dict=None, target_spks=None):
"""
Get RTTM lines containing speaker labels, start time and end time. target_spks contains two targeted
speaker indices for creating groundtruth label files. Only speakers in target_spks variable will be
included in the output lists.
Args:
uniq_id (str):
Unique file ID that refers to an input audio file and corresponding RTTM (Annotation) file.
rttm_lines (list):
List containing RTTM lines in str format.
mapping_dict (dict):
Mapping between the estimated speakers and the speakers in the ground-truth annotation.
`mapping_dict` variable is only provided when the inference mode is running in sequence-eval mode.
Sequence eval mode uses the mapping between the estimated speakers and the speakers in ground-truth annotation.
Returns:
rttm_tup (tuple):
Tuple containing lists of start time, end time and speaker labels.
"""
stt_list, end_list, speaker_list, pairwise_infer_spks = [], [], [], []
if target_spks:
inv_map = {v: k for k, v in mapping_dict.items()}
for spk_idx in target_spks:
spk_str = f'speaker_{spk_idx}'
if spk_str in inv_map:
pairwise_infer_spks.append(inv_map[spk_str])
for rttm_line in rttm_lines:
start, end, speaker = convert_rttm_line(rttm_line)
if target_spks is None or speaker in pairwise_infer_spks:
end_list.append(end)
stt_list.append(start)
speaker_list.append(speaker)
rttm_tup = (stt_list, end_list, speaker_list)
return rttm_tup
def assign_frame_level_spk_vector(rttm_timestamps, round_digits, frame_per_sec, target_spks, min_spks=2):
"""
Create a multi-dimensional vector sequence containing speaker timestamp information in RTTM.
The unit-length is the frame shift length of the acoustic feature. The feature-level annotations
`fr_level_target` will later be converted to base-segment level diarization label.
Args:
rttm_timestamps (list):
List containing start and end time for each speaker segment label.
stt_list, end_list and speaker_list are contained.
frame_per_sec (int):
Number of feature frames per second. This quantity is determined by window_stride variable in preprocessing module.
target_spks (tuple):
Speaker indices that are generated from combinations. If there are only one or two speakers,
only a single target_spks variable is generated.
Returns:
fr_level_target (torch.tensor):
Tensor containing label for each feature level frame.
"""
stt_list, end_list, speaker_list = rttm_timestamps
if len(speaker_list) == 0:
return None
else:
sorted_speakers = sorted(list(set(speaker_list)))
total_fr_len = int(max(end_list) * (10 ** round_digits))
spk_num = max(len(sorted_speakers), min_spks)
speaker_mapping_dict = {rttm_key: x_int for x_int, rttm_key in enumerate(sorted_speakers)}
fr_level_target = torch.zeros(total_fr_len, spk_num)
# If RTTM is not provided, then there is no speaker mapping dict in target_spks.
# Thus, return a zero-filled tensor as a placeholder.
for count, (stt, end, spk_rttm_key) in enumerate(zip(stt_list, end_list, speaker_list)):
stt, end = round(stt, round_digits), round(end, round_digits)
spk = speaker_mapping_dict[spk_rttm_key]
stt_fr, end_fr = int(round(stt, 2) * frame_per_sec), int(round(end, round_digits) * frame_per_sec)
fr_level_target[stt_fr:end_fr, spk] = 1
return fr_level_target
class _AudioMSDDTrainDataset(Dataset):
"""
Dataset class that loads a json file containing paths to audio files,
RTTM files and number of speakers. This Dataset class is designed for
training or fine-tuning speaker embedding extractor and diarization decoder
at the same time.
Example:
{"audio_filepath": "/path/to/audio_0.wav", "num_speakers": 2,
"rttm_filepath": "/path/to/diar_label_0.rttm}
...
{"audio_filepath": "/path/to/audio_n.wav", "num_speakers": 2,
"rttm_filepath": "/path/to/diar_label_n.rttm}
Args:
manifest_filepath (str):
Path to input manifest json files.
multiscale_args_dict (dict):
Dictionary containing the parameters for multiscale segmentation and clustering.
emb_dir (str):
Path to a temporary folder where segmentation information for embedding extraction is saved.
soft_label_thres (float):
Threshold that determines the label of each segment based on RTTM file information.
featurizer:
Featurizer instance for generating features from the raw waveform.
window_stride (float):
Window stride for acoustic feature. This value is used for calculating the numbers of feature-level frames.
emb_batch_size (int):
Number of embedding vectors that are trained with attached computational graphs.
pairwise_infer (bool):
This variable should be True if dataloader is created for an inference task.
random_flip (bool):
If True, the two labels and input signals are randomly flipped per every epoch while training.
"""
@property
def output_types(self) -> Optional[Dict[str, NeuralType]]:
"""Returns definitions of module output ports."""
output_types = {
"features": NeuralType(('B', 'T'), AudioSignal()),
"feature_length": NeuralType(('B'), LengthsType()),
"ms_seg_timestamps": NeuralType(('B', 'C', 'T', 'D'), LengthsType()),
"ms_seg_counts": NeuralType(('B', 'C'), LengthsType()),
"clus_label_index": NeuralType(('B', 'T'), LengthsType()),
"scale_mapping": NeuralType(('B', 'C', 'T'), LengthsType()),
"targets": NeuralType(('B', 'T', 'C'), ProbsType()),
}
return output_types
def __init__(
self,
*,
manifest_filepath: str,
multiscale_args_dict: str,
emb_dir: str,
soft_label_thres: float,
featurizer,
window_stride,
emb_batch_size,
pairwise_infer: bool,
random_flip: bool = True,
global_rank: int = 0,
):
super().__init__()
self.collection = DiarizationSpeechLabel(
manifests_files=manifest_filepath.split(','),
emb_dict=None,
clus_label_dict=None,
pairwise_infer=pairwise_infer,
)
self.featurizer = featurizer
self.multiscale_args_dict = multiscale_args_dict
self.emb_dir = emb_dir
self.round_digits = 2
self.decim = 10 ** self.round_digits
self.soft_label_thres = soft_label_thres
self.pairwise_infer = pairwise_infer
self.max_spks = 2
self.frame_per_sec = int(1 / window_stride)
self.emb_batch_size = emb_batch_size
self.random_flip = random_flip
self.global_rank = global_rank
self.manifest_filepath = manifest_filepath
self.multiscale_timestamp_dict = prepare_split_data(
self.manifest_filepath, self.emb_dir, self.multiscale_args_dict, self.global_rank,
)
def __len__(self):
return len(self.collection)
def assign_labels_to_longer_segs(self, uniq_id, base_scale_clus_label):
"""
Assign the generated speaker labels from the base scale (the finest scale) to the longer scales.
This process is needed to get the cluster labels for each scale. The cluster labels are needed to
calculate the cluster-average speaker embedding for each scale.
Args:
uniq_id (str):
Unique sample ID for training.
base_scale_clus_label (torch.tensor):
Tensor variable containing the speaker labels for the base-scale segments.
Returns:
per_scale_clus_label (torch.tensor):
Tensor variable containing the speaker labels for each segment in each scale.
Note that the total length of the speaker label sequence differs over scale since
each scale has a different number of segments for the same session.
scale_mapping (torch.tensor):
Matrix containing the segment indices of each scale. scale_mapping is necessary for reshaping the
multiscale embeddings to form an input matrix for the MSDD model.
"""
per_scale_clus_label = []
self.scale_n = len(self.multiscale_timestamp_dict[uniq_id]['scale_dict'])
uniq_scale_mapping = get_scale_mapping_list(self.multiscale_timestamp_dict[uniq_id])
for scale_index in range(self.scale_n):
new_clus_label = []
scale_seq_len = len(self.multiscale_timestamp_dict[uniq_id]["scale_dict"][scale_index]["time_stamps"])
for seg_idx in range(scale_seq_len):
if seg_idx in uniq_scale_mapping[scale_index]:
seg_clus_label = mode(base_scale_clus_label[uniq_scale_mapping[scale_index] == seg_idx])
else:
seg_clus_label = 0 if len(new_clus_label) == 0 else new_clus_label[-1]
new_clus_label.append(seg_clus_label)
per_scale_clus_label.extend(new_clus_label)
per_scale_clus_label = torch.tensor(per_scale_clus_label)
return per_scale_clus_label, uniq_scale_mapping
def get_diar_target_labels(self, uniq_id, sample, fr_level_target):
"""
Convert frame-level diarization target variable into segment-level target variable. Since the granularity is reduced
from frame level (10ms) to segment level (100ms~500ms), we need a threshold value, `soft_label_thres`, which determines
the label of each segment based on the overlap between a segment range (start and end time) and the frame-level target variable.
Args:
uniq_id (str):
Unique file ID that refers to an input audio file and corresponding RTTM (Annotation) file.
sample:
`DiarizationSpeechLabel` instance containing sample information such as audio filepath and RTTM filepath.
fr_level_target (torch.tensor):
Tensor containing label for each feature-level frame.
Returns:
seg_target (torch.tensor):
Tensor containing binary speaker labels for base-scale segments.
base_clus_label (torch.tensor):
Representative speaker label for each segment. This variable only has one speaker label for each base-scale segment.
-1 means that there is no corresponding speaker in the target_spks tuple.
"""
seg_target_list, base_clus_label = [], []
self.scale_n = len(self.multiscale_timestamp_dict[uniq_id]['scale_dict'])
subseg_time_stamp_list = self.multiscale_timestamp_dict[uniq_id]["scale_dict"][self.scale_n - 1]["time_stamps"]
for (seg_stt, seg_end) in subseg_time_stamp_list:
seg_stt_fr, seg_end_fr = int(seg_stt * self.frame_per_sec), int(seg_end * self.frame_per_sec)
soft_label_vec_sess = torch.sum(fr_level_target[seg_stt_fr:seg_end_fr, :], axis=0) / (
seg_end_fr - seg_stt_fr
)
label_int_sess = torch.argmax(soft_label_vec_sess)
soft_label_vec = soft_label_vec_sess.unsqueeze(0)[:, sample.target_spks].squeeze()
if label_int_sess in sample.target_spks and torch.sum(soft_label_vec_sess) > 0:
label_int = sample.target_spks.index(label_int_sess)
else:
label_int = -1
label_vec = (soft_label_vec > self.soft_label_thres).float()
seg_target_list.append(label_vec.detach())
base_clus_label.append(label_int)
seg_target = torch.stack(seg_target_list)
base_clus_label = torch.tensor(base_clus_label)
return seg_target, base_clus_label
def parse_rttm_for_ms_targets(self, sample):
"""
Generate target tensor variable by extracting groundtruth diarization labels from an RTTM file.
This function converts (start, end, speaker_id) format into base-scale (the finest scale) segment level
diarization label in a matrix form.
Example of seg_target:
[[0., 1.], [0., 1.], [1., 1.], [1., 0.], [1., 0.], ..., [0., 1.]]
Args:
sample:
`DiarizationSpeechLabel` instance containing sample information such as audio filepath and RTTM filepath.
target_spks (tuple):
Speaker indices that are generated from combinations. If there are only one or two speakers,
only a single target_spks tuple is generated.
Returns:
clus_label_index (torch.tensor):
Groundtruth clustering label (cluster index for each segment) from RTTM files for training purpose.
seg_target (torch.tensor):
Tensor variable containing hard-labels of speaker activity in each base-scale segment.
scale_mapping (torch.tensor):
Matrix containing the segment indices of each scale. scale_mapping is necessary for reshaping the
multiscale embeddings to form an input matrix for the MSDD model.
"""
rttm_lines = open(sample.rttm_file).readlines()
uniq_id = self.get_uniq_id_with_range(sample)
rttm_timestamps = extract_seg_info_from_rttm(uniq_id, rttm_lines)
fr_level_target = assign_frame_level_spk_vector(
rttm_timestamps, self.round_digits, self.frame_per_sec, target_spks=sample.target_spks
)
seg_target, base_clus_label = self.get_diar_target_labels(uniq_id, sample, fr_level_target)
clus_label_index, scale_mapping = self.assign_labels_to_longer_segs(uniq_id, base_clus_label)
return clus_label_index, seg_target, scale_mapping
def get_uniq_id_with_range(self, sample, deci=3):
"""
Generate unique training sample ID from unique file ID, offset and duration. The start-end time added
unique ID is required for identifying the sample since multiple short audio samples are generated from a single
audio file. The start time and end time of the audio stream uses millisecond units if `deci=3`.
Args:
sample:
`DiarizationSpeechLabel` instance from collections.
Returns:
uniq_id (str):
Unique sample ID which includes start and end time of the audio stream.
Example: abc1001_3122_6458
"""
bare_uniq_id = os.path.splitext(os.path.basename(sample.rttm_file))[0]
offset = str(int(round(sample.offset, deci) * pow(10, deci)))
endtime = str(int(round(sample.offset + sample.duration, deci) * pow(10, deci)))
uniq_id = f"{bare_uniq_id}_{offset}_{endtime}"
return uniq_id
def get_ms_seg_timestamps(self, sample):
"""
Get start and end time of segments in each scale.
Args:
sample:
`DiarizationSpeechLabel` instance from preprocessing.collections
Returns:
ms_seg_timestamps (torch.tensor):
Tensor containing Multiscale segment timestamps.
ms_seg_counts (torch.tensor):
Number of segments for each scale. This information is used for reshaping embedding batch
during forward propagation.
"""
uniq_id = self.get_uniq_id_with_range(sample)
ms_seg_timestamps_list = []
max_seq_len = len(self.multiscale_timestamp_dict[uniq_id]["scale_dict"][self.scale_n - 1]["time_stamps"])
ms_seg_counts = [0 for _ in range(self.scale_n)]
for scale_idx in range(self.scale_n):
scale_ts_list = []
for k, (seg_stt, seg_end) in enumerate(
self.multiscale_timestamp_dict[uniq_id]["scale_dict"][scale_idx]["time_stamps"]
):
stt, end = (
int((seg_stt - sample.offset) * self.frame_per_sec),
int((seg_end - sample.offset) * self.frame_per_sec),
)
scale_ts_list.append(torch.tensor([stt, end]).detach())
ms_seg_counts[scale_idx] = len(
self.multiscale_timestamp_dict[uniq_id]["scale_dict"][scale_idx]["time_stamps"]
)
scale_ts = torch.stack(scale_ts_list)
scale_ts_padded = torch.cat([scale_ts, torch.zeros(max_seq_len - len(scale_ts_list), 2)], dim=0)
ms_seg_timestamps_list.append(scale_ts_padded.detach())
ms_seg_timestamps = torch.stack(ms_seg_timestamps_list)
ms_seg_counts = torch.tensor(ms_seg_counts)
return ms_seg_timestamps, ms_seg_counts
def __getitem__(self, index):
sample = self.collection[index]
if sample.offset is None:
sample.offset = 0
clus_label_index, targets, scale_mapping = self.parse_rttm_for_ms_targets(sample)
features = self.featurizer.process(sample.audio_file, offset=sample.offset, duration=sample.duration)
feature_length = torch.tensor(features.shape[0]).long()
ms_seg_timestamps, ms_seg_counts = self.get_ms_seg_timestamps(sample)
if self.random_flip:
torch.manual_seed(index)
flip = torch.cat([torch.randperm(self.max_spks), torch.tensor(-1).unsqueeze(0)])
clus_label_index, targets = flip[clus_label_index], targets[:, flip[: self.max_spks]]
return features, feature_length, ms_seg_timestamps, ms_seg_counts, clus_label_index, scale_mapping, targets
class _AudioMSDDInferDataset(Dataset):
"""
Dataset class that loads a json file containing paths to audio files,
RTTM files and number of speakers. This Dataset class is built for diarization inference and
evaluation. Speaker embedding sequences, segment timestamps, cluster-average speaker embeddings
are loaded from memory and fed into the dataloader.
Example:
{"audio_filepath": "/path/to/audio_0.wav", "num_speakers": 2,
"rttm_filepath": "/path/to/diar_label_0.rttm}
...
{"audio_filepath": "/path/to/audio_n.wav", "num_speakers": 2,
"rttm_filepath": "/path/to/diar_label_n.rttm}
Args:
manifest_filepath (str):
Path to input manifest json files.
emb_dict (dict):
Dictionary containing cluster-average embeddings and speaker mapping information.
emb_seq (dict):
Dictionary containing multiscale speaker embedding sequence, scale mapping and corresponding segment timestamps.
clus_label_dict (dict):
Subsegment-level (from base-scale) speaker labels from clustering results.
soft_label_thres (float):
A threshold that determines the label of each segment based on RTTM file information.
featurizer:
Featurizer instance for generating features from raw waveform.
seq_eval_mode (bool):
If True, F1 score will be calculated for each speaker pair during inference mode.
window_stride (float):
Window stride for acoustic feature. This value is used for calculating the numbers of feature-level frames.
use_single_scale_clus (bool):
Use only one scale for clustering instead of using multiple scales of embeddings for clustering.
pairwise_infer (bool):
This variable should be True if dataloader is created for an inference task.
"""
@property
def output_types(self) -> Optional[Dict[str, NeuralType]]:
"""Returns definitions of module output ports."""
output_types = OrderedDict(
{
"ms_emb_seq": NeuralType(('B', 'T', 'C', 'D'), SpectrogramType()),
"length": NeuralType(tuple('B'), LengthsType()),
"ms_avg_embs": NeuralType(('B', 'C', 'D', 'C'), EncodedRepresentation()),
"targets": NeuralType(('B', 'T', 'C'), ProbsType()),
}
)
return output_types
def __init__(
self,
*,
manifest_filepath: str,
emb_dict: Dict,
emb_seq: Dict,
clus_label_dict: Dict,
soft_label_thres: float,
seq_eval_mode: bool,
window_stride: float,
use_single_scale_clus: bool,
pairwise_infer: bool,
):
super().__init__()
self.collection = DiarizationSpeechLabel(
manifests_files=manifest_filepath.split(','),
emb_dict=emb_dict,
clus_label_dict=clus_label_dict,
seq_eval_mode=seq_eval_mode,
pairwise_infer=pairwise_infer,
)
self.emb_dict = emb_dict
self.emb_seq = emb_seq
self.clus_label_dict = clus_label_dict
self.round_digits = 2
self.decim = 10 ** self.round_digits
self.frame_per_sec = int(1 / window_stride)
self.soft_label_thres = soft_label_thres
self.pairwise_infer = pairwise_infer
self.max_spks = 2
self.use_single_scale_clus = use_single_scale_clus
self.seq_eval_mode = seq_eval_mode
def __len__(self):
return len(self.collection)
def parse_rttm_multiscale(self, sample):
"""
Generate target tensor variable by extracting groundtruth diarization labels from an RTTM file.
This function is only used when ``self.seq_eval_mode=True`` and RTTM files are provided. This function converts
(start, end, speaker_id) format into base-scale (the finest scale) segment level diarization label in a matrix
form to create target matrix.
Args:
sample:
DiarizationSpeechLabel instance containing sample information such as audio filepath and RTTM filepath.
target_spks (tuple):
Two Indices of targeted speakers for evaluation.
Example of target_spks: (2, 3)
Returns:
seg_target (torch.tensor):
Tensor variable containing hard-labels of speaker activity in each base-scale segment.
"""
if sample.rttm_file is None:
raise ValueError(f"RTTM file is not provided for this sample {sample}")
rttm_lines = open(sample.rttm_file).readlines()
uniq_id = os.path.splitext(os.path.basename(sample.rttm_file))[0]
mapping_dict = self.emb_dict[max(self.emb_dict.keys())][uniq_id]['mapping']
rttm_timestamps = extract_seg_info_from_rttm(uniq_id, rttm_lines, mapping_dict, sample.target_spks)
fr_level_target = assign_frame_level_spk_vector(
rttm_timestamps, self.round_digits, self.frame_per_sec, sample.target_spks
)
seg_target = self.get_diar_target_labels_from_fr_target(uniq_id, fr_level_target)
return seg_target
def get_diar_target_labels_from_fr_target(self, uniq_id, fr_level_target):
"""
Generate base-scale level binary diarization label from frame-level target matrix. For the given frame-level
speaker target matrix fr_level_target, we count the number of frames that belong to each speaker and calculate
ratios for each speaker into the `soft_label_vec` variable. Finally, `soft_label_vec` variable is compared with `soft_label_thres`
to determine whether a label vector should contain 0 or 1 for each speaker bin. Note that seg_target variable has
dimension of (number of base-scale segments x 2) dimension.
Example of seg_target:
[[0., 1.], [0., 1.], [1., 1.], [1., 0.], [1., 0.], ..., [0., 1.]]
Args:
uniq_id (str):
Unique file ID that refers to an input audio file and corresponding RTTM (Annotation) file.
fr_level_target (torch.tensor):
frame-level binary speaker annotation (1: exist 0: non-exist) generated from RTTM file.
Returns:
seg_target (torch.tensor):
Tensor variable containing binary hard-labels of speaker activity in each base-scale segment.
"""
if fr_level_target is None:
return None
else:
seg_target_list = []
for (seg_stt, seg_end, label_int) in self.clus_label_dict[uniq_id]:
seg_stt_fr, seg_end_fr = int(seg_stt * self.frame_per_sec), int(seg_end * self.frame_per_sec)
soft_label_vec = torch.sum(fr_level_target[seg_stt_fr:seg_end_fr, :], axis=0) / (
seg_end_fr - seg_stt_fr
)
label_vec = (soft_label_vec > self.soft_label_thres).int()
seg_target_list.append(label_vec)
seg_target = torch.stack(seg_target_list)
return seg_target
def __getitem__(self, index):
sample = self.collection[index]
if sample.offset is None:
sample.offset = 0
uniq_id = os.path.splitext(os.path.basename(sample.audio_file))[0]
scale_n = len(self.emb_dict.keys())
_avg_embs = torch.stack([self.emb_dict[scale_index][uniq_id]['avg_embs'] for scale_index in range(scale_n)])
if self.pairwise_infer:
avg_embs = _avg_embs[:, :, self.collection[index].target_spks]
else:
avg_embs = _avg_embs
if avg_embs.shape[2] > self.max_spks:
raise ValueError(
f" avg_embs.shape[2] {avg_embs.shape[2]} should be less than or equal to self.max_num_speakers {self.max_spks}"
)
feats = []
for scale_index in range(scale_n):
repeat_mat = self.emb_seq["session_scale_mapping"][uniq_id][scale_index]
feats.append(self.emb_seq[scale_index][uniq_id][repeat_mat, :])
feats_out = torch.stack(feats).permute(1, 0, 2)
feats_len = feats_out.shape[0]
if self.seq_eval_mode:
targets = self.parse_rttm_multiscale(sample)
else:
targets = torch.zeros(feats_len, 2).float()
return feats_out, feats_len, targets, avg_embs
def _msdd_train_collate_fn(self, batch):
"""
Collate batch of variables that are needed for raw waveform to diarization label training.
The following variables are included in training/validation batch:
Args:
batch (tuple):
Batch tuple containing the variables for the diarization training.
Returns:
features (torch.tensor):
Raw waveform samples (time series) loaded from the audio_filepath in the input manifest file.
feature lengths (time series sample length):
A list of lengths of the raw waveform samples.
ms_seg_timestamps (torch.tensor):
Matrix containing the start time and end time (timestamps) for each segment and each scale.
ms_seg_timestamps is needed for extracting acoustic features from raw waveforms.
ms_seg_counts (torch.tensor):
Matrix containing The number of segments for each scale. ms_seg_counts is necessary for reshaping
the input matrix for the MSDD model.
clus_label_index (torch.tensor):
Groundtruth Clustering label (cluster index for each segment) from RTTM files for training purpose.
clus_label_index is necessary for calculating cluster-average embedding.
scale_mapping (torch.tensor):
Matrix containing the segment indices of each scale. scale_mapping is necessary for reshaping the
multiscale embeddings to form an input matrix for the MSDD model.
targets (torch.tensor):
Groundtruth Speaker label for the given input embedding sequence.
"""
packed_batch = list(zip(*batch))
features, feature_length, ms_seg_timestamps, ms_seg_counts, clus_label_index, scale_mapping, targets = packed_batch
features_list, feature_length_list = [], []
ms_seg_timestamps_list, ms_seg_counts_list, scale_clus_label_list, scale_mapping_list, targets_list = (
[],
[],
[],
[],
[],
)
max_raw_feat_len = max([x.shape[0] for x in features])
max_target_len = max([x.shape[0] for x in targets])
max_total_seg_len = max([x.shape[0] for x in clus_label_index])
for feat, feat_len, ms_seg_ts, ms_seg_ct, scale_clus, scl_map, tgt in batch:
seq_len = tgt.shape[0]
pad_feat = (0, max_raw_feat_len - feat_len)
pad_tgt = (0, 0, 0, max_target_len - seq_len)
pad_sm = (0, max_target_len - seq_len)
pad_ts = (0, 0, 0, max_target_len - seq_len)
pad_sc = (0, max_total_seg_len - scale_clus.shape[0])
padded_feat = torch.nn.functional.pad(feat, pad_feat)
padded_tgt = torch.nn.functional.pad(tgt, pad_tgt)
padded_sm = torch.nn.functional.pad(scl_map, pad_sm)
padded_ms_seg_ts = torch.nn.functional.pad(ms_seg_ts, pad_ts)
padded_scale_clus = torch.nn.functional.pad(scale_clus, pad_sc)
features_list.append(padded_feat)
feature_length_list.append(feat_len.clone().detach())
ms_seg_timestamps_list.append(padded_ms_seg_ts)
ms_seg_counts_list.append(ms_seg_ct.clone().detach())
scale_clus_label_list.append(padded_scale_clus)
scale_mapping_list.append(padded_sm)
targets_list.append(padded_tgt)
features = torch.stack(features_list)
feature_length = torch.stack(feature_length_list)
ms_seg_timestamps = torch.stack(ms_seg_timestamps_list)
clus_label_index = torch.stack(scale_clus_label_list)
ms_seg_counts = torch.stack(ms_seg_counts_list)
scale_mapping = torch.stack(scale_mapping_list)
targets = torch.stack(targets_list)
return features, feature_length, ms_seg_timestamps, ms_seg_counts, clus_label_index, scale_mapping, targets
def _msdd_infer_collate_fn(self, batch):
"""
Collate batch of feats (speaker embeddings), feature lengths, target label sequences and cluster-average embeddings.
Args:
batch (tuple):
Batch tuple containing feats, feats_len, targets and ms_avg_embs.
Returns:
feats (torch.tensor):
Collated speaker embedding with unified length.
feats_len (torch.tensor):
The actual length of each embedding sequence without zero padding.
targets (torch.tensor):
Groundtruth Speaker label for the given input embedding sequence.
ms_avg_embs (torch.tensor):
Cluster-average speaker embedding vectors.
"""
packed_batch = list(zip(*batch))
feats, feats_len, targets, ms_avg_embs = packed_batch
feats_list, flen_list, targets_list, ms_avg_embs_list = [], [], [], []
max_audio_len = max(feats_len)
max_target_len = max([x.shape[0] for x in targets])
for feature, feat_len, target, ivector in batch:
flen_list.append(feat_len)
ms_avg_embs_list.append(ivector)
if feat_len < max_audio_len:
pad_a = (0, 0, 0, 0, 0, max_audio_len - feat_len)
pad_t = (0, 0, 0, max_target_len - target.shape[0])
padded_feature = torch.nn.functional.pad(feature, pad_a)
padded_target = torch.nn.functional.pad(target, pad_t)
feats_list.append(padded_feature)
targets_list.append(padded_target)
else:
targets_list.append(target.clone().detach())
feats_list.append(feature.clone().detach())
feats = torch.stack(feats_list)
feats_len = torch.tensor(flen_list)
targets = torch.stack(targets_list)
ms_avg_embs = torch.stack(ms_avg_embs_list)
return feats, feats_len, targets, ms_avg_embs
class AudioToSpeechMSDDTrainDataset(_AudioMSDDTrainDataset):
"""
Dataset class that loads a json file containing paths to audio files,
rttm files and number of speakers. This Dataset class is designed for
training or fine-tuning speaker embedding extractor and diarization decoder
at the same time.
Example:
{"audio_filepath": "/path/to/audio_0.wav", "num_speakers": 2,
"rttm_filepath": "/path/to/diar_label_0.rttm}
...
{"audio_filepath": "/path/to/audio_n.wav", "num_speakers": 2,
"rttm_filepath": "/path/to/diar_label_n.rttm}
Args:
manifest_filepath (str):
Path to input manifest json files.
multiscale_args_dict (dict):
Dictionary containing the parameters for multiscale segmentation and clustering.
emb_dir (str):
Path to a temporary folder where segmentation information for embedding extraction is saved.
soft_label_thres (float):
A threshold that determines the label of each segment based on RTTM file information.
featurizer:
Featurizer instance for generating features from the raw waveform.
window_stride (float):
Window stride for acoustic feature. This value is used for calculating the numbers of feature-level frames.
emb_batch_size (int):
Number of embedding vectors that are trained with attached computational graphs.
pairwise_infer (bool):
This variable should be True if dataloader is created for an inference task.
"""
def __init__(
self,
*,
manifest_filepath: str,
multiscale_args_dict: Dict,
emb_dir: str,
soft_label_thres: float,
featurizer,
window_stride,
emb_batch_size,
pairwise_infer: bool,
global_rank: int,
):
super().__init__(
manifest_filepath=manifest_filepath,
multiscale_args_dict=multiscale_args_dict,
emb_dir=emb_dir,
soft_label_thres=soft_label_thres,
featurizer=featurizer,
window_stride=window_stride,
emb_batch_size=emb_batch_size,
pairwise_infer=pairwise_infer,
global_rank=global_rank,
)
def msdd_train_collate_fn(self, batch):
return _msdd_train_collate_fn(self, batch)
class AudioToSpeechMSDDInferDataset(_AudioMSDDInferDataset):
"""
Dataset class that loads a json file containing paths to audio files,
rttm files and number of speakers. The created labels are used for diarization inference.
Example:
{"audio_filepath": "/path/to/audio_0.wav", "num_speakers": 2,
"rttm_filepath": "/path/to/diar_label_0.rttm}
...
{"audio_filepath": "/path/to/audio_n.wav", "num_speakers": 2,
"rttm_filepath": "/path/to/diar_label_n.rttm}
Args:
manifest_filepath (str):
Path to input manifest json files.
emb_dict (dict):
Dictionary containing cluster-average embeddings and speaker mapping information.
emb_seq (dict):
Dictionary containing multiscale speaker embedding sequence, scale mapping and corresponding segment timestamps.
clus_label_dict (dict):
Subsegment-level (from base-scale) speaker labels from clustering results.
soft_label_thres (float):
Threshold that determines speaker labels of segments depending on the overlap with groundtruth speaker timestamps.
featurizer:
Featurizer instance for generating features from raw waveform.
use_single_scale_clus (bool):
Use only one scale for clustering instead of using multiple scales of embeddings for clustering.
seq_eval_mode (bool):
If True, F1 score will be calculated for each speaker pair during inference mode.
window_stride (float):
Window stride for acoustic feature. This value is used for calculating the numbers of feature-level frames.
pairwise_infer (bool):
If True, this Dataset class operates in inference mode. In inference mode, a set of speakers in the input audio
is split into multiple pairs of speakers and speaker tuples (e.g. 3 speakers: [(0,1), (1,2), (0,2)]) and then
fed into the MSDD to merge the individual results.
"""
def __init__(
self,
*,
manifest_filepath: str,
emb_dict: Dict,
emb_seq: Dict,
clus_label_dict: Dict,
soft_label_thres: float,
use_single_scale_clus: bool,
seq_eval_mode: bool,
window_stride: float,
pairwise_infer: bool,
):
super().__init__(
manifest_filepath=manifest_filepath,
emb_dict=emb_dict,
emb_seq=emb_seq,
clus_label_dict=clus_label_dict,
soft_label_thres=soft_label_thres,
use_single_scale_clus=use_single_scale_clus,
window_stride=window_stride,
seq_eval_mode=seq_eval_mode,
pairwise_infer=pairwise_infer,
)
def msdd_infer_collate_fn(self, batch):
return _msdd_infer_collate_fn(self, batch)
| NeMo-main | nemo/collections/asr/data/audio_to_diar_label.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import math
import random
from collections import OrderedDict, namedtuple
from dataclasses import dataclass
from typing import Callable, Dict, List, Optional, Tuple, Type, Union
import librosa
import numpy as np
import torch
from nemo.collections.asr.parts.preprocessing.segment import AudioSegment
from nemo.collections.asr.parts.utils.audio_utils import ChannelSelectorType
from nemo.collections.common.parts.preprocessing import collections
from nemo.collections.common.parts.utils import flatten
from nemo.core.classes import Dataset
from nemo.core.neural_types import AudioSignal, EncodedRepresentation, LengthsType, NeuralType
from nemo.utils import logging
__all__ = [
'AudioToTargetDataset',
'AudioToTargetWithReferenceDataset',
'AudioToTargetWithEmbeddingDataset',
]
def _audio_collate_fn(batch: List[dict]) -> Tuple[torch.Tensor]:
"""Collate a batch of items returned by __getitem__.
Examples for each signal are zero padded to the same length
(batch_length), which is determined by the longest example.
Lengths of the original signals are returned in the output.
Args:
batch: List of dictionaries. Each element of the list
has the following format
```
{
'signal_0': 1D or 2D tensor,
'signal_1': 1D or 2D tensor,
...
'signal_N': 1D or 2D tensor,
}
```
1D tensors have shape (num_samples,) and 2D tensors
have shape (num_channels, num_samples)
Returns:
A tuple containing signal tensor and signal length tensor (in samples)
for each signal.
The output has the following format:
```
(signal_0, signal_0_length, signal_1, signal_1_length, ..., signal_N, signal_N_length)
```
Note that the output format is obtained by interleaving signals and their length.
"""
signals = batch[0].keys()
batched = tuple()
for signal in signals:
signal_length = [b[signal].shape[-1] for b in batch]
# Batch length is determined by the longest signal in the batch
batch_length = max(signal_length)
b_signal = []
for s_len, b in zip(signal_length, batch):
# check if padding is necessary
if s_len < batch_length:
if b[signal].ndim == 1:
# single-channel signal
pad = (0, batch_length - s_len)
elif b[signal].ndim == 2:
# multi-channel signal
pad = (0, batch_length - s_len, 0, 0)
else:
raise RuntimeError(
f'Signal {signal} has unsuported dimensions {signal.shape}. Currently, only 1D and 2D arrays are supported.'
)
b[signal] = torch.nn.functional.pad(b[signal], pad)
# append the current padded signal
b_signal.append(b[signal])
# (signal_batched, signal_length)
batched += (torch.stack(b_signal), torch.tensor(signal_length, dtype=torch.int32))
# Currently, outputs are expected to be in a tuple, where each element must correspond
# to the output type in the OrderedDict returned by output_types.
#
# Therefore, we return batched signals by interleaving signals and their length:
# (signal_0, signal_0_length, signal_1, signal_1_length, ...)
return batched
@dataclass
class SignalSetup:
signals: List[str] # signal names
duration: Optional[Union[float, list]] = None # duration for each signal
channel_selectors: Optional[List[ChannelSelectorType]] = None # channel selector for loading each signal
class ASRAudioProcessor:
"""Class that processes an example from Audio collection and returns
a dictionary with prepared signals.
For example, the output dictionary may be the following
```
{
'input_signal': input_signal_tensor,
'target_signal': target_signal_tensor,
'reference_signal': reference_signal_tensor,
'embedding_vector': embedding_vector
}
```
Keys in the output dictionary are ordered with synchronous signals given first,
followed by asynchronous signals and embedding.
Args:
sample_rate: sample rate used for all audio signals
random_offset: If `True`, offset will be randomized when loading a subsegment
from a file.
"""
def __init__(
self, sample_rate: float, random_offset: bool,
):
self.sample_rate = sample_rate
self.random_offset = random_offset
self.sync_setup = None
self.async_setup = None
self.embedding_setup = None
@property
def sample_rate(self) -> float:
return self._sample_rate
@sample_rate.setter
def sample_rate(self, value: float):
if value <= 0:
raise ValueError(f'Sample rate must be positive, received {value}')
self._sample_rate = value
@property
def random_offset(self) -> bool:
return self._random_offset
@random_offset.setter
def random_offset(self, value: bool):
self._random_offset = value
@property
def sync_setup(self) -> SignalSetup:
"""Return the current setup for synchronous signals.
Returns:
A dataclass containing the list of signals, their
duration and channel selectors.
"""
return self._sync_setup
@sync_setup.setter
def sync_setup(self, value: Optional[SignalSetup]):
"""Setup signals to be loaded synchronously.
Args:
value: An instance of SignalSetup with the following fields
- signals: list of signals (keys of example.audio_signals) which will be loaded
synchronously with the same start time and duration.
- duration: Duration for each signal to be loaded.
If duration is set to None, the whole file will be loaded.
- channel_selectors: A list of channel selector for each signal. If channel selector
is None, all channels in the audio file will be loaded.
"""
if value is None or isinstance(value, SignalSetup):
self._sync_setup = value
else:
raise ValueError(f'Unexpected type {type(value)} for value {value}.')
@property
def async_setup(self) -> SignalSetup:
"""Return the current setup for asynchronous signals.
Returns:
A dataclass containing the list of signals, their
duration and channel selectors.
"""
return self._async_setup
@async_setup.setter
def async_setup(self, value: Optional[SignalSetup]):
"""Setup signals to be loaded asynchronously.
Args:
Args:
value: An instance of SignalSetup with the following fields
- signals: list of signals (keys of example.audio_signals) which will be loaded
asynchronously with signals possibly having different start and duration
- duration: Duration for each signal to be loaded.
If duration is set to None, the whole file will be loaded.
- channel_selectors: A list of channel selector for each signal. If channel selector
is None, all channels in the audio file will be loaded.
"""
if value is None or isinstance(value, SignalSetup):
self._async_setup = value
else:
raise ValueError(f'Unexpected type {type(value)} for value {value}.')
@property
def embedding_setup(self) -> SignalSetup:
"""Setup signals corresponding to an embedding vector.
"""
return self._embedding_setup
@embedding_setup.setter
def embedding_setup(self, value: SignalSetup):
"""Setup signals corresponding to an embedding vector.
Args:
value: An instance of SignalSetup with the following fields
- signals: list of signals (keys of example.audio_signals) which will be loaded
as embedding vectors.
"""
if value is None or isinstance(value, SignalSetup):
self._embedding_setup = value
else:
raise ValueError(f'Unexpected type {type(value)} for value {value}.')
def process(self, example: collections.Audio.OUTPUT_TYPE) -> Dict[str, torch.Tensor]:
"""Process an example from a collection of audio examples.
Args:
example: an example from Audio collection.
Returns:
An ordered dictionary of signals and their tensors.
For example, the output dictionary may be the following
```
{
'input_signal': input_signal_tensor,
'target_signal': target_signal_tensor,
'reference_signal': reference_signal_tensor,
'embedding_vector': embedding_vector
}
```
Keys in the output dictionary are ordered with synchronous signals given first,
followed by asynchronous signals and embedding.
"""
audio = self.load_audio(example=example)
audio = self.process_audio(audio=audio)
return audio
def load_audio(self, example: collections.Audio.OUTPUT_TYPE) -> Dict[str, torch.Tensor]:
"""Given an example, load audio from `example.audio_files` and prepare
the output dictionary.
Args:
example: An example from an audio collection
Returns:
An ordered dictionary of signals and their tensors.
For example, the output dictionary may be the following
```
{
'input_signal': input_signal_tensor,
'target_signal': target_signal_tensor,
'reference_signal': reference_signal_tensor,
'embedding_vector': embedding_vector
}
```
Keys in the output dictionary are ordered with synchronous signals given first,
followed by asynchronous signals and embedding.
"""
output = OrderedDict()
if self.sync_setup is not None:
# Load all signals with the same start and duration
sync_signals = self.load_sync_signals(example)
output.update(sync_signals)
if self.async_setup is not None:
# Load each signal independently
async_signals = self.load_async_signals(example)
output.update(async_signals)
# Load embedding vector
if self.embedding_setup is not None:
embedding = self.load_embedding(example)
output.update(embedding)
if not output:
raise RuntimeError('Output dictionary is empty. Please use `_setup` methods to setup signals to be loaded')
return output
def process_audio(self, audio: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
"""Process audio signals available in the input dictionary.
Args:
audio: A dictionary containing loaded signals `signal: tensor`
Returns:
An ordered dictionary of signals and their tensors.
"""
# Currently, not doing any processing of the loaded signals.
return audio
def load_sync_signals(self, example: collections.Audio.OUTPUT_TYPE) -> Dict[str, torch.Tensor]:
"""Load signals with the same start and duration.
Args:
example: an example from audio collection
Returns:
An ordered dictionary of signals and their tensors.
"""
output = OrderedDict()
sync_audio_files = [example.audio_files[s] for s in self.sync_setup.signals]
sync_samples = self.get_samples_synchronized(
audio_files=sync_audio_files,
channel_selectors=self.sync_setup.channel_selectors,
sample_rate=self.sample_rate,
duration=self.sync_setup.duration,
fixed_offset=example.offset,
random_offset=self.random_offset,
)
for signal, samples in zip(self.sync_setup.signals, sync_samples):
output[signal] = torch.tensor(samples)
return output
def load_async_signals(self, example: collections.Audio.OUTPUT_TYPE) -> Dict[str, torch.Tensor]:
"""Load each async signal independently, no constraints on starting
from the same time.
Args:
example: an example from audio collection
Returns:
An ordered dictionary of signals and their tensors.
"""
output = OrderedDict()
for idx, signal in enumerate(self.async_setup.signals):
samples = self.get_samples(
audio_file=example.audio_files[signal],
sample_rate=self.sample_rate,
duration=self.async_setup.duration[idx],
channel_selector=self.async_setup.channel_selectors[idx],
fixed_offset=example.offset,
random_offset=self.random_offset,
)
output[signal] = torch.tensor(samples)
return output
@classmethod
def get_samples(
cls,
audio_file: str,
sample_rate: int,
duration: Optional[float] = None,
channel_selector: ChannelSelectorType = None,
fixed_offset: float = 0,
random_offset: bool = False,
) -> np.ndarray:
"""Get samples from an audio file.
For a single-channel signal, the output is shape (num_samples,).
For a multi-channel signal, the output is shape (num_samples, num_channels).
Args:
audio_file: path to an audio file
sample_rate: desired sample rate for output samples
duration: Optional desired duration of output samples.
If `None`, the complete file will be loaded.
If set, a segment of `duration` seconds will be loaded.
channel_selector: Optional channel selector, for selecting a subset of channels.
fixed_offset: Optional fixed offset when loading samples.
random_offset: If `True`, offset will be randomized when loading a short segment
from a file. The value is randomized between fixed_offset and
max_offset (set depending on the duration and fixed_offset).
Returns:
Numpy array with samples from audio file.
The array has shape (num_samples,) for a single-channel signal
or (num_channels, num_samples) for a multi-channel signal.
"""
output = cls.get_samples_synchronized(
audio_files=[audio_file],
sample_rate=sample_rate,
duration=duration,
channel_selectors=[channel_selector],
fixed_offset=fixed_offset,
random_offset=random_offset,
)
return output[0]
@classmethod
def get_samples_synchronized(
cls,
audio_files: List[str],
sample_rate: int,
duration: Optional[float] = None,
channel_selectors: Optional[List[ChannelSelectorType]] = None,
fixed_offset: float = 0,
random_offset: bool = False,
) -> List[np.ndarray]:
"""Get samples from multiple files with the same start and end point.
Args:
audio_files: list of paths to audio files
sample_rate: desired sample rate for output samples
duration: Optional desired duration of output samples.
If `None`, the complete files will be loaded.
If set, a segment of `duration` seconds will be loaded from
all files. Segment is synchronized across files, so that
start and end points are the same.
channel_selectors: Optional channel selector for each signal, for selecting
a subset of channels.
fixed_offset: Optional fixed offset when loading samples.
random_offset: If `True`, offset will be randomized when loading a short segment
from a file. The value is randomized between fixed_offset and
max_offset (set depending on the duration and fixed_offset).
Returns:
List with the same size as `audio_files` but containing numpy arrays
with samples from each audio file.
Each array has shape (num_samples,) or (num_channels, num_samples), for single-
or multi-channel signal, respectively.
For example, if `audio_files = [path/to/file_1.wav, path/to/file_2.wav]`,
the output will be a list `output = [samples_file_1, samples_file_2]`.
"""
if channel_selectors is None:
channel_selectors = [None] * len(audio_files)
if duration is None:
# Load complete files starting from a fixed offset
offset = fixed_offset # fixed offset
num_samples = None # no constrain on the number of samples
else:
# Fixed duration of the output
audio_durations = cls.get_duration(audio_files)
min_audio_duration = min(audio_durations)
available_duration = min_audio_duration - fixed_offset
if available_duration <= 0:
raise ValueError(f'Fixed offset {fixed_offset}s is larger than shortest file {min_duration}s.')
if duration + fixed_offset > min_audio_duration:
# The shortest file is shorter than the requested duration
logging.debug(
f'Shortest file ({min_audio_duration}s) is less than the desired duration {duration}s + fixed offset {fixed_offset}s. Returned signals will be shortened to {available_duration} seconds.'
)
offset = fixed_offset
duration = available_duration
elif random_offset:
# Randomize offset based on the shortest file
max_offset = min_audio_duration - duration
offset = random.uniform(fixed_offset, max_offset)
else:
# Fixed offset
offset = fixed_offset
# Fixed number of samples
num_samples = math.floor(duration * sample_rate)
output = []
# Prepare segments
for idx, audio_file in enumerate(audio_files):
segment_samples = cls.get_samples_from_file(
audio_file=audio_file,
sample_rate=sample_rate,
offset=offset,
num_samples=num_samples,
channel_selector=channel_selectors[idx],
)
output.append(segment_samples)
return output
@classmethod
def get_samples_from_file(
cls,
audio_file: Union[str, List[str]],
sample_rate: int,
offset: float,
num_samples: Optional[int] = None,
channel_selector: Optional[ChannelSelectorType] = None,
) -> np.ndarray:
"""Get samples from a single or multiple files.
If loading samples from multiple files, they will
be concatenated along the channel dimension.
Args:
audio_file: path or a list of paths.
sample_rate: sample rate of the loaded samples
offset: fixed offset in seconds
num_samples: Optional, number of samples to load.
If `None`, all available samples will be loaded.
channel_selector: Select a subset of available channels.
Returns:
An array with shape (samples,) or (channels, samples)
"""
if isinstance(audio_file, str):
# Load samples from a single file
segment_samples = cls.get_segment_from_file(
audio_file=audio_file,
sample_rate=sample_rate,
offset=offset,
num_samples=num_samples,
channel_selector=channel_selector,
)
elif isinstance(audio_file, list):
# Load samples from multiple files and form a multi-channel signal
segment_samples = []
for a_file in audio_file:
a_file_samples = cls.get_segment_from_file(
audio_file=a_file,
sample_rate=sample_rate,
offset=offset,
num_samples=num_samples,
channel_selector=channel_selector,
)
segment_samples.append(a_file_samples)
segment_samples = cls.list_to_multichannel(segment_samples)
elif audio_file is None:
# Support for inference, when the target signal is `None`
segment_samples = []
else:
raise RuntimeError(f'Unexpected audio_file type {type(audio_file)}')
return segment_samples
@staticmethod
def get_segment_from_file(
audio_file: str,
sample_rate: int,
offset: float,
num_samples: Optional[int] = None,
channel_selector: Optional[ChannelSelectorType] = None,
) -> np.ndarray:
"""Get a segment of samples from a single audio file.
Args:
audio_file: path to an audio file
sample_rate: sample rate of the loaded samples
offset: fixed offset in seconds
num_samples: Optional, number of samples to load.
If `None`, all available samples will be loaded.
channel_selector: Select a subset of available channels.
Returns:
An array with shape (samples,) or (channels, samples)
"""
if num_samples is None:
segment = AudioSegment.from_file(
audio_file=audio_file, target_sr=sample_rate, offset=offset, channel_selector=channel_selector,
)
else:
segment = AudioSegment.segment_from_file(
audio_file=audio_file,
target_sr=sample_rate,
n_segments=num_samples,
offset=offset,
channel_selector=channel_selector,
)
if segment.samples.ndim == 1:
# Single-channel signal
return segment.samples
elif segment.samples.ndim == 2:
# Use multi-channel format as (channels, samples)
return segment.samples.T
else:
raise RuntimeError(f'Unexpected samples shape: {segment.samples.shape}')
@staticmethod
def list_to_multichannel(signal: Union[np.ndarray, List[np.ndarray]]) -> np.ndarray:
"""Convert a list of signals into a multi-channel signal by concatenating
the elements of the list along the channel dimension.
If input is not a list, it is returned unmodified.
Args:
signal: list of arrays
Returns:
Numpy array obtained by concatenating the elements of the list
along the channel dimension (axis=0).
"""
if not isinstance(signal, list):
# Nothing to do there
return signal
elif len(signal) == 0:
# Nothing to do, return as is
return signal
elif len(signal) == 1:
# Nothing to concatenate, return the original format
return signal[0]
# If multiple signals are provided in a list, we concatenate them along the channel dimension
if signal[0].ndim == 1:
# Single-channel individual files
mc_signal = np.stack(signal, axis=0)
elif signal[0].ndim == 2:
# Multi-channel individual files
mc_signal = np.concatenate(signal, axis=0)
else:
raise RuntimeError(f'Unexpected target with {signal[0].ndim} dimensions.')
return mc_signal
@staticmethod
def get_duration(audio_files: List[str]) -> List[float]:
"""Get duration for each audio file in `audio_files`.
Args:
audio_files: list of paths to audio files
Returns:
List of durations in seconds.
"""
duration = [librosa.get_duration(filename=f) for f in flatten(audio_files)]
return duration
def load_embedding(self, example: collections.Audio.OUTPUT_TYPE) -> Dict[str, torch.Tensor]:
"""Given an example, load embedding from `example.audio_files[embedding]`
and return it in a dictionary.
Args:
example: An example from audio collection
Returns:
An dictionary of embedding keys and their tensors.
"""
output = OrderedDict()
for idx, signal in enumerate(self.embedding_setup.signals):
embedding_file = example.audio_files[signal]
embedding = self.load_embedding_vector(embedding_file)
output[signal] = torch.tensor(embedding)
return output
@staticmethod
def load_embedding_vector(filepath: str) -> np.ndarray:
"""Load an embedding vector from a file.
Args:
filepath: path to a file storing a vector.
Currently, it is assumed the file is a npy file.
Returns:
Array loaded from filepath.
"""
if filepath.endswith('.npy'):
with open(filepath, 'rb') as f:
embedding = np.load(f)
else:
raise RuntimeError(f'Unknown embedding file format in file: {filepath}')
return embedding
class BaseAudioDataset(Dataset):
"""Base class of audio datasets, providing common functionality
for other audio datasets.
Args:
collection: Collection of audio examples prepared from manifest files.
audio_processor: Used to process every example from the collection.
A callable with `process` method. For reference,
please check ASRAudioProcessor.
"""
@property
@abc.abstractmethod
def output_types(self) -> Optional[Dict[str, NeuralType]]:
"""Returns definitions of module output ports.
"""
def __init__(self, collection: collections.Audio, audio_processor: Callable, output_type: Type[namedtuple]):
"""Instantiates an audio dataset.
"""
super().__init__()
self.collection = collection
self.audio_processor = audio_processor
self.output_type = output_type
def num_channels(self, signal_key) -> int:
"""Returns the number of channels for a particular signal in
items prepared by this dictionary.
More specifically, this will get the tensor from the first
item in the dataset, check if it's a one- or two-dimensional
tensor, and return the number of channels based on the size
of the first axis (shape[0]).
NOTE:
This assumes that all examples have the same number of channels.
Args:
signal_key: string, used to select a signal from the dictionary
output by __getitem__
Returns:
Number of channels for the selected signal.
"""
# Assumption: whole dataset has the same number of channels
item = self.__getitem__(0)
if item[signal_key].ndim == 1:
return 1
elif item[signal_key].ndim == 2:
return item[signal_key].shape[0]
else:
raise RuntimeError(
f'Unexpected number of dimension for signal {signal_key} with shape {item[signal_key].shape}'
)
def __getitem__(self, index: int) -> Dict[str, torch.Tensor]:
"""Return a single example from the dataset.
Args:
index: integer index of an example in the collection
Returns:
Dictionary providing mapping from signal to its tensor.
For example:
```
{
'input_signal': input_signal_tensor,
'target_signal': target_signal_tensor,
}
```
"""
example = self.collection[index]
output = self.audio_processor.process(example=example)
return output
def __len__(self) -> int:
"""Return the number of examples in the dataset.
"""
return len(self.collection)
def _collate_fn(self, batch) -> Tuple[torch.Tensor]:
"""Collate items in a batch.
"""
return self.output_type(*_audio_collate_fn(batch))
AudioToTargetExample = namedtuple(
typename='AudioToTargetExample', field_names='input_signal input_length target_signal target_length'
)
class AudioToTargetDataset(BaseAudioDataset):
"""A dataset for audio-to-audio tasks where the goal is to use
an input signal to recover the corresponding target signal.
Each line of the manifest file is expected to have the following format
```
{
'input_key': 'path/to/input.wav',
'target_key': 'path/to/path_to_target.wav',
'duration': duration_of_input,
}
```
Additionally, multiple audio files may be provided for each key in the manifest, for example,
```
{
'input_key': 'path/to/input.wav',
'target_key': ['path/to/path_to_target_ch0.wav', 'path/to/path_to_target_ch1.wav'],
'duration': duration_of_input,
}
```
Keys for input and target signals can be configured in the constructor (`input_key` and `target_key`).
Args:
manifest_filepath: Path to manifest file in a format described above.
sample_rate: Sample rate for loaded audio signals.
input_key: Key pointing to input audio files in the manifest
target_key: Key pointing to target audio files in manifest
audio_duration: Optional duration of each item returned by __getitem__.
If `None`, complete audio will be loaded.
If set, a random subsegment will be loaded synchronously from
target and audio, i.e., with the same start and end point.
random_offset: If `True`, offset will be randomized when loading a subsegment
from a file.
max_duration: If audio exceeds this length, do not include in dataset.
min_duration: If audio is less than this length, do not include in dataset.
max_utts: Limit number of utterances.
input_channel_selector: Optional, select subset of channels from each input audio file.
If `None`, all channels will be loaded.
target_channel_selector: Optional, select subset of channels from each input audio file.
If `None`, all channels will be loaded.
"""
def __init__(
self,
manifest_filepath: str,
sample_rate: int,
input_key: str,
target_key: str,
audio_duration: Optional[float] = None,
random_offset: bool = False,
max_duration: Optional[float] = None,
min_duration: Optional[float] = None,
max_utts: Optional[int] = None,
input_channel_selector: Optional[int] = None,
target_channel_selector: Optional[int] = None,
):
audio_to_manifest_key = {
'input_signal': input_key,
'target_signal': target_key,
}
collection = collections.AudioCollection(
manifest_files=manifest_filepath,
audio_to_manifest_key=audio_to_manifest_key,
min_duration=min_duration,
max_duration=max_duration,
max_number=max_utts,
)
audio_processor = ASRAudioProcessor(sample_rate=sample_rate, random_offset=random_offset,)
audio_processor.sync_setup = SignalSetup(
signals=['input_signal', 'target_signal'],
duration=audio_duration,
channel_selectors=[input_channel_selector, target_channel_selector],
)
super().__init__(collection=collection, audio_processor=audio_processor, output_type=AudioToTargetExample)
@property
def output_types(self) -> Optional[Dict[str, NeuralType]]:
"""Returns definitions of module output ports.
Returns:
Ordered dictionary in the following form:
```
{
'input_signal': batched single- or multi-channel format,
'input_length': batched original length of each input signal
'target_signal': batched single- or multi-channel format,
'target_length': batched original length of each target signal
}
```
"""
sc_audio_type = NeuralType(('B', 'T'), AudioSignal())
mc_audio_type = NeuralType(('B', 'C', 'T'), AudioSignal())
return OrderedDict(
input_signal=sc_audio_type if self.num_channels('input_signal') == 1 else mc_audio_type,
input_length=NeuralType(('B',), LengthsType()),
target_signal=sc_audio_type if self.num_channels('target_signal') == 1 else mc_audio_type,
target_length=NeuralType(('B',), LengthsType()),
)
AudioToTargetWithReferenceExample = namedtuple(
typename='AudioToTargetWithReferenceExample',
field_names='input_signal input_length target_signal target_length reference_signal reference_length',
)
class AudioToTargetWithReferenceDataset(BaseAudioDataset):
"""A dataset for audio-to-audio tasks where the goal is to use
an input signal to recover the corresponding target signal and an
additional reference signal is available.
This can be used, for example, when a reference signal is
available from
- enrollment utterance for the target signal
- echo reference from playback
- reference from another sensor that correlates with the target signal
Each line of the manifest file is expected to have the following format
```
{
'input_key': 'path/to/input.wav',
'target_key': 'path/to/path_to_target.wav',
'reference_key': 'path/to/path_to_reference.wav',
'duration': duration_of_input,
}
```
Keys for input, target and reference signals can be configured in the constructor.
Args:
manifest_filepath: Path to manifest file in a format described above.
sample_rate: Sample rate for loaded audio signals.
input_key: Key pointing to input audio files in the manifest
target_key: Key pointing to target audio files in manifest
reference_key: Key pointing to reference audio files in manifest
audio_duration: Optional duration of each item returned by __getitem__.
If `None`, complete audio will be loaded.
If set, a random subsegment will be loaded synchronously from
target and audio, i.e., with the same start and end point.
random_offset: If `True`, offset will be randomized when loading a subsegment
from a file.
max_duration: If audio exceeds this length, do not include in dataset.
min_duration: If audio is less than this length, do not include in dataset.
max_utts: Limit number of utterances.
input_channel_selector: Optional, select subset of channels from each input audio file.
If `None`, all channels will be loaded.
target_channel_selector: Optional, select subset of channels from each input audio file.
If `None`, all channels will be loaded.
reference_channel_selector: Optional, select subset of channels from each input audio file.
If `None`, all channels will be loaded.
reference_is_synchronized: If True, it is assumed that the reference signal is synchronized
with the input signal, so the same subsegment will be loaded as for
input and target. If False, reference signal will be loaded independently
from input and target.
reference_duration: Optional, can be used to set a fixed duration of the reference utterance. If `None`,
complete audio file will be loaded.
"""
def __init__(
self,
manifest_filepath: str,
sample_rate: int,
input_key: str,
target_key: str,
reference_key: str,
audio_duration: Optional[float] = None,
random_offset: bool = False,
max_duration: Optional[float] = None,
min_duration: Optional[float] = None,
max_utts: Optional[int] = None,
input_channel_selector: Optional[int] = None,
target_channel_selector: Optional[int] = None,
reference_channel_selector: Optional[int] = None,
reference_is_synchronized: bool = True,
reference_duration: Optional[float] = None,
):
audio_to_manifest_key = {
'input_signal': input_key,
'target_signal': target_key,
'reference_signal': reference_key,
}
collection = collections.AudioCollection(
manifest_files=manifest_filepath,
audio_to_manifest_key=audio_to_manifest_key,
min_duration=min_duration,
max_duration=max_duration,
max_number=max_utts,
)
audio_processor = ASRAudioProcessor(sample_rate=sample_rate, random_offset=random_offset,)
if reference_is_synchronized:
audio_processor.sync_setup = SignalSetup(
signals=['input_signal', 'target_signal', 'reference_signal'],
duration=audio_duration,
channel_selectors=[input_channel_selector, target_channel_selector, reference_channel_selector],
)
else:
audio_processor.sync_setup = SignalSetup(
signals=['input_signal', 'target_signal'],
duration=audio_duration,
channel_selectors=[input_channel_selector, target_channel_selector],
)
audio_processor.async_setup = SignalSetup(
signals=['reference_signal'],
duration=[reference_duration],
channel_selectors=[reference_channel_selector],
)
super().__init__(
collection=collection, audio_processor=audio_processor, output_type=AudioToTargetWithReferenceExample
)
@property
def output_types(self) -> Optional[Dict[str, NeuralType]]:
"""Returns definitions of module output ports.
Returns:
Ordered dictionary in the following form:
```
{
'input_signal': batched single- or multi-channel format,
'input_length': batched original length of each input signal
'target_signal': batched single- or multi-channel format,
'target_length': batched original length of each target signal
'reference_signal': single- or multi-channel format,
'reference_length': original length of each reference signal
}
```
"""
sc_audio_type = NeuralType(('B', 'T'), AudioSignal())
mc_audio_type = NeuralType(('B', 'C', 'T'), AudioSignal())
return OrderedDict(
input_signal=sc_audio_type if self.num_channels('input_signal') == 1 else mc_audio_type,
input_length=NeuralType(('B',), LengthsType()),
target_signal=sc_audio_type if self.num_channels('target_signal') == 1 else mc_audio_type,
target_length=NeuralType(('B',), LengthsType()),
reference_signal=sc_audio_type if self.num_channels('reference_signal') == 1 else mc_audio_type,
reference_length=NeuralType(('B',), LengthsType()),
)
AudioToTargetWithEmbeddingExample = namedtuple(
typename='AudioToTargetWithEmbeddingExample',
field_names='input_signal input_length target_signal target_length embedding_vector embedding_length',
)
class AudioToTargetWithEmbeddingDataset(BaseAudioDataset):
"""A dataset for audio-to-audio tasks where the goal is to use
an input signal to recover the corresponding target signal and an
additional embedding signal. It is assumed that the embedding
is in a form of a vector.
Each line of the manifest file is expected to have the following format
```
{
input_key: 'path/to/input.wav',
target_key: 'path/to/path_to_target.wav',
embedding_key: 'path/to/path_to_reference.npy',
'duration': duration_of_input,
}
```
Keys for input, target and embedding signals can be configured in the constructor.
Args:
manifest_filepath: Path to manifest file in a format described above.
sample_rate: Sample rate for loaded audio signals.
input_key: Key pointing to input audio files in the manifest
target_key: Key pointing to target audio files in manifest
embedding_key: Key pointing to embedding files in manifest
audio_duration: Optional duration of each item returned by __getitem__.
If `None`, complete audio will be loaded.
If set, a random subsegment will be loaded synchronously from
target and audio, i.e., with the same start and end point.
random_offset: If `True`, offset will be randomized when loading a subsegment
from a file.
max_duration: If audio exceeds this length, do not include in dataset.
min_duration: If audio is less than this length, do not include in dataset.
max_utts: Limit number of utterances.
input_channel_selector: Optional, select subset of channels from each input audio file.
If `None`, all channels will be loaded.
target_channel_selector: Optional, select subset of channels from each input audio file.
If `None`, all channels will be loaded.
"""
def __init__(
self,
manifest_filepath: str,
sample_rate: int,
input_key: str,
target_key: str,
embedding_key: str,
audio_duration: Optional[float] = None,
random_offset: bool = False,
max_duration: Optional[float] = None,
min_duration: Optional[float] = None,
max_utts: Optional[int] = None,
input_channel_selector: Optional[int] = None,
target_channel_selector: Optional[int] = None,
):
audio_to_manifest_key = {
'input_signal': input_key,
'target_signal': target_key,
'embedding_vector': embedding_key,
}
collection = collections.AudioCollection(
manifest_files=manifest_filepath,
audio_to_manifest_key=audio_to_manifest_key,
min_duration=min_duration,
max_duration=max_duration,
max_number=max_utts,
)
audio_processor = ASRAudioProcessor(sample_rate=sample_rate, random_offset=random_offset,)
audio_processor.sync_setup = SignalSetup(
signals=['input_signal', 'target_signal'],
duration=audio_duration,
channel_selectors=[input_channel_selector, target_channel_selector],
)
audio_processor.embedding_setup = SignalSetup(signals=['embedding_vector'])
super().__init__(
collection=collection, audio_processor=audio_processor, output_type=AudioToTargetWithEmbeddingExample
)
@property
def output_types(self) -> Optional[Dict[str, NeuralType]]:
"""Returns definitions of module output ports.
Returns:
Ordered dictionary in the following form:
```
{
'input_signal': batched single- or multi-channel format,
'input_length': batched original length of each input signal
'target_signal': batched single- or multi-channel format,
'target_length': batched original length of each target signal
'embedding_vector': batched embedded vector format,
'embedding_length': batched original length of each embedding vector
}
```
"""
sc_audio_type = NeuralType(('B', 'T'), AudioSignal())
mc_audio_type = NeuralType(('B', 'C', 'T'), AudioSignal())
return OrderedDict(
input_signal=sc_audio_type if self.num_channels('input_signal') == 1 else mc_audio_type,
input_length=NeuralType(('B',), LengthsType()),
target_signal=sc_audio_type if self.num_channels('target_signal') == 1 else mc_audio_type,
target_length=NeuralType(('B',), LengthsType()),
embedding_vector=NeuralType(('B', 'D'), EncodedRepresentation()),
embedding_length=NeuralType(('B',), LengthsType()),
)
| NeMo-main | nemo/collections/asr/data/audio_to_audio.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo.collections.tts import data, losses, models, modules
from nemo.package_info import __version__
# Set collection version equal to NeMo version.
__version = __version__
# Authorship.
__author__ = "NVIDIA Corporation"
# Set collection name.
__description__ = "Text to Speech collection"
| NeMo-main | nemo/collections/tts/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# BSD 3-Clause License
#
# Copyright (c) 2021, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import torch
import torch.nn.functional as F
from nemo.collections.tts.modules.transformer import mask_from_lens
from nemo.core.classes import Loss, typecheck
from nemo.core.neural_types.elements import (
LengthsType,
LossType,
MelSpectrogramType,
RegressionValuesType,
TokenDurationType,
TokenLogDurationType,
)
from nemo.core.neural_types.neural_type import NeuralType
class DurationLoss(Loss):
def __init__(self, loss_scale=0.1):
super().__init__()
self.loss_scale = loss_scale
@property
def input_types(self):
return {
"log_durs_predicted": NeuralType(('B', 'T'), TokenLogDurationType()),
"durs_tgt": NeuralType(('B', 'T'), TokenDurationType()),
"len": NeuralType(('B'), LengthsType()),
}
@property
def output_types(self):
return {
"loss": NeuralType(elements_type=LossType()),
}
@typecheck()
def forward(self, log_durs_predicted, durs_tgt, len):
dur_mask = mask_from_lens(len, max_len=durs_tgt.size(1))
log_durs_tgt = torch.log(durs_tgt.float() + 1)
loss_fn = F.mse_loss
dur_loss = loss_fn(log_durs_predicted, log_durs_tgt, reduction='none')
dur_loss = (dur_loss * dur_mask).sum() / dur_mask.sum()
dur_loss *= self.loss_scale
return dur_loss
class PitchLoss(Loss):
def __init__(self, loss_scale=0.1):
super().__init__()
self.loss_scale = loss_scale
@property
def input_types(self):
return {
"pitch_predicted": NeuralType(('B', 'T'), RegressionValuesType()),
"pitch_tgt": NeuralType(('B', 'T'), RegressionValuesType()),
"len": NeuralType(('B'), LengthsType()),
}
@property
def output_types(self):
return {
"loss": NeuralType(elements_type=LossType()),
}
@typecheck()
def forward(self, pitch_predicted, pitch_tgt, len):
dur_mask = mask_from_lens(len, max_len=pitch_tgt.size(1))
ldiff = pitch_tgt.size(1) - pitch_predicted.size(1)
pitch_predicted = F.pad(pitch_predicted, (0, ldiff, 0, 0), value=0.0)
pitch_loss = F.mse_loss(pitch_tgt, pitch_predicted, reduction='none')
pitch_loss = (pitch_loss * dur_mask).sum() / dur_mask.sum()
pitch_loss *= self.loss_scale
return pitch_loss
class EnergyLoss(Loss):
def __init__(self, loss_scale=0.1):
super().__init__()
self.loss_scale = loss_scale
@property
def input_types(self):
return {
"energy_predicted": NeuralType(('B', 'T'), RegressionValuesType()),
"energy_tgt": NeuralType(('B', 'T'), RegressionValuesType()),
"length": NeuralType(('B'), LengthsType()),
}
@property
def output_types(self):
return {
"loss": NeuralType(elements_type=LossType()),
}
@typecheck()
def forward(self, energy_predicted, energy_tgt, length):
if energy_tgt is None:
return 0.0
dur_mask = mask_from_lens(length, max_len=energy_tgt.size(1))
energy_loss = F.mse_loss(energy_tgt, energy_predicted, reduction='none')
energy_loss = (energy_loss * dur_mask).sum() / dur_mask.sum()
energy_loss *= self.loss_scale
return energy_loss
class MelLoss(Loss):
@property
def input_types(self):
return {
"spect_predicted": NeuralType(('B', 'D', 'T'), MelSpectrogramType()),
"spect_tgt": NeuralType(('B', 'D', 'T'), MelSpectrogramType()),
}
@property
def output_types(self):
return {
"loss": NeuralType(elements_type=LossType()),
}
@typecheck()
def forward(self, spect_predicted, spect_tgt):
spect_tgt.requires_grad = False
spect_tgt = spect_tgt.transpose(1, 2) # (B, T, H)
spect_predicted = spect_predicted.transpose(1, 2) # (B, T, H)
ldiff = spect_tgt.size(1) - spect_predicted.size(1)
spect_predicted = F.pad(spect_predicted, (0, 0, 0, ldiff, 0, 0), value=0.0)
mel_mask = spect_tgt.ne(0).float()
loss_fn = F.mse_loss
mel_loss = loss_fn(spect_predicted, spect_tgt, reduction='none')
mel_loss = (mel_loss * mel_mask).sum() / mel_mask.sum()
return mel_loss
| NeMo-main | nemo/collections/tts/losses/fastpitchloss.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List
import torch
import torch.nn.functional as F
from einops import rearrange
from nemo.collections.asr.parts.preprocessing.features import FilterbankFeatures
from nemo.core.classes import Loss, typecheck
from nemo.core.neural_types import (
AudioSignal,
LengthsType,
LossType,
NeuralType,
PredictionsType,
RegressionValuesType,
VoidType,
)
class MaskedLoss(Loss):
def __init__(self, loss_fn, loss_scale: float = 1.0):
super(MaskedLoss, self).__init__()
self.loss_scale = loss_scale
self.loss_fn = loss_fn
@property
def input_types(self):
return {
"target": NeuralType(('B', 'D', 'T'), RegressionValuesType()),
"predicted": NeuralType(('B', 'D', 'T'), PredictionsType()),
"target_len": NeuralType(tuple('B'), LengthsType()),
}
@property
def output_types(self):
return {
"loss": NeuralType(elements_type=LossType()),
}
@typecheck()
def forward(self, predicted, target, target_len):
assert target.shape[2] == predicted.shape[2]
# [B, D, T]
loss = self.loss_fn(input=predicted, target=target)
# [B, T]
loss = torch.mean(loss, dim=1)
# [B]
loss = torch.sum(loss, dim=1) / torch.clamp(target_len, min=1.0)
# [1]
loss = torch.mean(loss)
loss = self.loss_scale * loss
return loss
class MaskedMAELoss(MaskedLoss):
def __init__(self, loss_scale: float = 1.0):
loss_fn = torch.nn.L1Loss(reduction='none')
super(MaskedMAELoss, self).__init__(loss_fn=loss_fn, loss_scale=loss_scale)
class MaskedMSELoss(MaskedLoss):
def __init__(self, loss_scale: float = 1.0):
loss_fn = torch.nn.MSELoss(reduction='none')
super(MaskedMSELoss, self).__init__(loss_fn=loss_fn, loss_scale=loss_scale)
class TimeDomainLoss(Loss):
def __init__(self):
super(TimeDomainLoss, self).__init__()
self.loss_fn = MaskedMAELoss()
@property
def input_types(self):
return {
"audio_real": NeuralType(('B', 'T'), AudioSignal()),
"audio_gen": NeuralType(('B', 'T'), AudioSignal()),
"audio_len": NeuralType(tuple('B'), LengthsType()),
}
@property
def output_types(self):
return {
"loss": [NeuralType(elements_type=LossType())],
}
@typecheck()
def forward(self, audio_real, audio_gen, audio_len):
audio_real = rearrange(audio_real, "B T -> B 1 T")
audio_gen = rearrange(audio_gen, "B T -> B 1 T")
loss = self.loss_fn(target=audio_real, predicted=audio_gen, target_len=audio_len)
return loss
class MultiResolutionMelLoss(Loss):
def __init__(self, sample_rate: int, mel_dim: int, resolutions: List[List], l1_scale: float = 1.0):
super(MultiResolutionMelLoss, self).__init__()
self.l1_loss_fn = MaskedMAELoss(loss_scale=l1_scale)
self.l2_loss_fn = MaskedMSELoss()
self.mel_features = torch.nn.ModuleList()
for n_fft, hop_len, win_len in resolutions:
mel_feature = FilterbankFeatures(
sample_rate=sample_rate,
nfilt=mel_dim,
n_window_size=win_len,
n_window_stride=hop_len,
n_fft=n_fft,
pad_to=1,
mag_power=1.0,
log_zero_guard_type="add",
log_zero_guard_value=1.0,
mel_norm=None,
normalize=None,
preemph=None,
dither=0.0,
use_grads=True,
)
self.mel_features.append(mel_feature)
@property
def input_types(self):
return {
"audio_real": NeuralType(('B', 'T'), AudioSignal()),
"audio_gen": NeuralType(('B', 'T'), AudioSignal()),
"audio_len": NeuralType(tuple('B'), LengthsType()),
}
@property
def output_types(self):
return {
"loss": [NeuralType(elements_type=LossType())],
}
@typecheck()
def forward(self, audio_real, audio_gen, audio_len):
loss = 0.0
for mel_feature in self.mel_features:
mel_real, mel_real_len = mel_feature(x=audio_real, seq_len=audio_len)
mel_gen, _ = mel_feature(x=audio_gen, seq_len=audio_len)
loss += self.l1_loss_fn(predicted=mel_gen, target=mel_real, target_len=mel_real_len)
loss += self.l2_loss_fn(predicted=mel_gen, target=mel_real, target_len=mel_real_len)
loss /= len(self.mel_features)
return loss
class RelativeFeatureMatchingLoss(Loss):
def __init__(self, div_guard=1e-3):
super(RelativeFeatureMatchingLoss, self).__init__()
self.div_guard = div_guard
@property
def input_types(self):
return {
"fmaps_real": [[NeuralType(elements_type=VoidType())]],
"fmaps_gen": [[NeuralType(elements_type=VoidType())]],
}
@property
def output_types(self):
return {
"loss": NeuralType(elements_type=LossType()),
}
@typecheck()
def forward(self, fmaps_real, fmaps_gen):
loss = 0.0
for fmap_real, fmap_gen in zip(fmaps_real, fmaps_gen):
# [B, ..., time]
for feat_real, feat_gen in zip(fmap_real, fmap_gen):
# [B, ...]
feat_mean = torch.mean(torch.abs(feat_real), dim=-1)
diff = torch.mean(torch.abs(feat_real - feat_gen), dim=-1)
feat_loss = diff / (feat_mean + self.div_guard)
# [1]
feat_loss = torch.mean(feat_loss) / len(fmap_real)
loss += feat_loss
loss /= len(fmaps_real)
return loss
class GeneratorHingedLoss(Loss):
@property
def input_types(self):
return {
"disc_scores_gen": [NeuralType(('B', 'C', 'T'), VoidType())],
}
@property
def output_types(self):
return {"loss": NeuralType(elements_type=LossType())}
@typecheck()
def forward(self, disc_scores_gen):
loss = 0.0
for disc_score_gen in disc_scores_gen:
loss += torch.mean(F.relu(1 - disc_score_gen))
loss /= len(disc_scores_gen)
return loss
class GeneratorSquaredLoss(Loss):
@property
def input_types(self):
return {
"disc_scores_gen": [NeuralType(('B', 'C', 'T'), VoidType())],
}
@property
def output_types(self):
return {"loss": NeuralType(elements_type=LossType())}
@typecheck()
def forward(self, disc_scores_gen):
loss = 0.0
for disc_score_gen in disc_scores_gen:
loss += torch.mean((1 - disc_score_gen) ** 2)
loss /= len(disc_scores_gen)
return loss
class DiscriminatorHingedLoss(Loss):
@property
def input_types(self):
return {
"disc_scores_real": [NeuralType(('B', 'C', 'T'), VoidType())],
"disc_scores_gen": [NeuralType(('B', 'C', 'T'), VoidType())],
}
@property
def output_types(self):
return {"loss": NeuralType(elements_type=LossType())}
@typecheck()
def forward(self, disc_scores_real, disc_scores_gen):
loss = 0.0
for disc_score_real, disc_score_gen in zip(disc_scores_real, disc_scores_gen):
loss_real = torch.mean(F.relu(1 - disc_score_real))
loss_gen = torch.mean(F.relu(1 + disc_score_gen))
loss += (loss_real + loss_gen) / 2
loss /= len(disc_scores_real)
return loss
class DiscriminatorSquaredLoss(Loss):
@property
def input_types(self):
return {
"disc_scores_real": [NeuralType(('B', 'C', 'T'), VoidType())],
"disc_scores_gen": [NeuralType(('B', 'C', 'T'), VoidType())],
}
@property
def output_types(self):
return {"loss": NeuralType(elements_type=LossType())}
@typecheck()
def forward(self, disc_scores_real, disc_scores_gen):
loss = 0.0
for disc_score_real, disc_score_gen in zip(disc_scores_real, disc_scores_gen):
loss_real = torch.mean((1 - disc_score_real) ** 2)
loss_gen = torch.mean(disc_score_gen ** 2)
loss += (loss_real + loss_gen) / 2
loss /= len(disc_scores_real)
return loss
| NeMo-main | nemo/collections/tts/losses/audio_codec_loss.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# MIT License
#
# Copyright (c) 2020 Jungil Kong
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# The forward functions of the following classes are based on code from https://github.com/jik876/hifi-gan:
# FeatureMatchingLoss, DiscriminatorLoss, GeneratorLoss
import torch
from nemo.core.classes import Loss, typecheck
from nemo.core.neural_types.elements import LossType, VoidType
from nemo.core.neural_types.neural_type import NeuralType
class FeatureMatchingLoss(Loss):
"""Feature Matching Loss module"""
@property
def input_types(self):
return {
"fmap_r": [[NeuralType(elements_type=VoidType())]],
"fmap_g": [[NeuralType(elements_type=VoidType())]],
}
@property
def output_types(self):
return {
"loss": NeuralType(elements_type=LossType()),
}
@typecheck()
def forward(self, fmap_r, fmap_g):
loss = 0
for dr, dg in zip(fmap_r, fmap_g):
for rl, gl in zip(dr, dg):
loss += torch.mean(torch.abs(rl - gl))
return loss * 2
class DiscriminatorLoss(Loss):
"""Discriminator Loss module"""
@property
def input_types(self):
return {
"disc_real_outputs": [NeuralType(('B', 'T'), VoidType())],
"disc_generated_outputs": [NeuralType(('B', 'T'), VoidType())],
}
@property
def output_types(self):
return {
"loss": NeuralType(elements_type=LossType()),
"real_losses": [NeuralType(elements_type=LossType())],
"fake_losses": [NeuralType(elements_type=LossType())],
}
@typecheck()
def forward(self, disc_real_outputs, disc_generated_outputs):
loss = 0
r_losses = []
g_losses = []
for dr, dg in zip(disc_real_outputs, disc_generated_outputs):
r_loss = torch.mean((1 - dr) ** 2)
g_loss = torch.mean(dg ** 2)
loss += r_loss + g_loss
r_losses.append(r_loss.item())
g_losses.append(g_loss.item())
return loss, r_losses, g_losses
class GeneratorLoss(Loss):
"""Generator Loss module"""
@property
def input_types(self):
return {
"disc_outputs": [NeuralType(('B', 'T'), VoidType())],
}
@property
def output_types(self):
return {
"loss": NeuralType(elements_type=LossType()),
"fake_losses": [NeuralType(elements_type=LossType())],
}
@typecheck()
def forward(self, disc_outputs):
loss = 0
gen_losses = []
for dg in disc_outputs:
l = torch.mean((1 - dg) ** 2)
gen_losses.append(l)
loss += l
return loss, gen_losses
| NeMo-main | nemo/collections/tts/losses/hifigan_losses.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import torch
from torch.nn import functional as F
from nemo.collections.tts.losses.aligner_loss import ForwardSumLoss
from nemo.collections.tts.parts.utils.helpers import get_mask_from_lengths
from nemo.core.classes import Loss
def compute_flow_loss(z, log_det_W_list, log_s_list, n_elements, n_dims, mask, sigma=1.0):
log_det_W_total = 0.0
for i, log_s in enumerate(log_s_list):
if i == 0:
log_s_total = torch.sum(log_s * mask)
if len(log_det_W_list):
log_det_W_total = log_det_W_list[i]
else:
log_s_total = log_s_total + torch.sum(log_s * mask)
if len(log_det_W_list):
log_det_W_total += log_det_W_list[i]
if len(log_det_W_list):
log_det_W_total *= n_elements
z = z * mask
prior_NLL = torch.sum(z * z) / (2 * sigma * sigma)
loss = prior_NLL - log_s_total - log_det_W_total
denom = n_elements * n_dims
loss = loss / denom
loss_prior = prior_NLL / denom
return loss, loss_prior
def compute_regression_loss(x_hat, x, mask, name=False):
x = x[:, None] if len(x.shape) == 2 else x # add channel dim
mask = mask[:, None] if len(mask.shape) == 2 else mask # add channel dim
assert len(x.shape) == len(mask.shape)
x = x * mask
x_hat = x_hat * mask
if name == 'vpred':
loss = F.binary_cross_entropy_with_logits(x_hat, x, reduction='sum')
else:
loss = F.mse_loss(x_hat, x, reduction='sum')
loss = loss / mask.sum()
loss_dict = {"loss_{}".format(name): loss}
return loss_dict
class AttributePredictionLoss(torch.nn.Module):
def __init__(self, name, model_config, loss_weight, sigma=1.0):
super(AttributePredictionLoss, self).__init__()
self.name = name
self.sigma = sigma
self.model_name = model_config['name']
self.loss_weight = loss_weight
self.n_group_size = 1
if 'n_group_size' in model_config['hparams']:
self.n_group_size = model_config['hparams']['n_group_size']
def forward(self, model_output, lens):
mask = get_mask_from_lengths(lens // self.n_group_size)
mask = mask[:, None].float()
loss_dict = {}
if 'z' in model_output:
n_elements = lens.sum() // self.n_group_size
n_dims = model_output['z'].size(1)
loss, loss_prior = compute_flow_loss(
model_output['z'],
model_output['log_det_W_list'],
model_output['log_s_list'],
n_elements,
n_dims,
mask,
self.sigma,
)
loss_dict = {
"loss_{}".format(self.name): (loss, self.loss_weight),
"loss_prior_{}".format(self.name): (loss_prior, 0.0),
}
elif 'x_hat' in model_output:
loss_dict = compute_regression_loss(model_output['x_hat'], model_output['x'], mask, self.name)
for k, v in loss_dict.items():
loss_dict[k] = (v, self.loss_weight)
if len(loss_dict) == 0:
raise Exception("loss not supported")
return loss_dict
class AttentionBinarizationLoss(torch.nn.Module):
def __init__(self):
super(AttentionBinarizationLoss, self).__init__()
def forward(self, hard_attention, soft_attention):
log_sum = torch.log(soft_attention[hard_attention == 1]).sum()
return -log_sum / hard_attention.sum()
class RADTTSLoss(Loss):
def __init__(
self,
sigma=1.0,
n_group_size=1,
dur_model_config=None,
f0_model_config=None,
energy_model_config=None,
vpred_model_config=None,
loss_weights=None,
):
super(RADTTSLoss, self).__init__()
self.sigma = sigma
self.n_group_size = n_group_size
self.loss_weights = loss_weights
self.attn_ctc_loss = ForwardSumLoss()
self.loss_weights = loss_weights
self.loss_fns = {}
if dur_model_config is not None:
self.loss_fns['duration_model_outputs'] = AttributePredictionLoss(
'duration', dur_model_config, loss_weights['dur_loss_weight']
)
if f0_model_config is not None:
self.loss_fns['f0_model_outputs'] = AttributePredictionLoss(
'f0', f0_model_config, loss_weights['f0_loss_weight'], sigma=1.0
)
if energy_model_config is not None:
self.loss_fns['energy_model_outputs'] = AttributePredictionLoss(
'energy', energy_model_config, loss_weights['energy_loss_weight']
)
if vpred_model_config is not None:
self.loss_fns['vpred_model_outputs'] = AttributePredictionLoss(
'vpred', vpred_model_config, loss_weights['vpred_loss_weight']
)
def forward(self, model_output, in_lens, out_lens):
loss_dict = {}
if len(model_output['z_mel']):
n_elements = out_lens.sum() // self.n_group_size
mask = get_mask_from_lengths(out_lens // self.n_group_size)
mask = mask[:, None].float()
n_dims = model_output['z_mel'].size(1)
loss_mel, loss_prior_mel = compute_flow_loss(
model_output['z_mel'],
model_output['log_det_W_list'],
model_output['log_s_list'],
n_elements,
n_dims,
mask,
self.sigma,
)
loss_dict['loss_mel'] = (loss_mel, 1.0) # loss, weight
loss_dict['loss_prior_mel'] = (loss_prior_mel, 0.0)
ctc_cost = self.attn_ctc_loss(attn_logprob=model_output['attn_logprob'], in_lens=in_lens, out_lens=out_lens)
loss_dict['loss_ctc'] = (ctc_cost, self.loss_weights['ctc_loss_weight'])
for k in model_output:
if k in self.loss_fns:
if model_output[k] is not None and len(model_output[k]) > 0:
t_lens = in_lens if 'dur' in k else out_lens
mout = model_output[k]
for loss_name, v in self.loss_fns[k](mout, t_lens).items():
loss_dict[loss_name] = v
return loss_dict
| NeMo-main | nemo/collections/tts/losses/radttsloss.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import nemo.collections.tts.losses.tacotron2loss
import nemo.collections.tts.losses.waveglowloss
| NeMo-main | nemo/collections/tts/losses/__init__.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# The MIT License
#
# Copyright (c) 2019 Tomoki Hayashi
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
# The following functions/classes were based on:
# https://github.com/kan-bayashi/ParallelWaveGAN/blob/master/parallel_wavegan/losses/stft_loss.py
# stft
# SpectralConvergenceLoss
# LogSTFTMagnitudeLoss
# STFTLoss
# MultiResolutionSTFTLoss
import torch
import torch.nn.functional as F
from nemo.core.classes import Loss, typecheck
from nemo.core.neural_types.elements import AudioSignal, LengthsType, LossType, SpectrogramType
from nemo.core.neural_types.neural_type import NeuralType
def stft(x, fft_size, hop_size, win_length, window):
"""Perform STFT and convert to magnitude spectrogram.
Args:
x (Tensor): Input signal tensor (B, T).
fft_size (int): FFT size.
hop_size (int): Hop size.
win_length (int): Window length.
window (str): Window function type.
Returns:
Tensor: Magnitude spectrogram (B, #frames, fft_size // 2 + 1).
"""
x_stft = torch.view_as_real(torch.stft(x, fft_size, hop_size, win_length, window, return_complex=True))
real = x_stft[..., 0]
imag = x_stft[..., 1]
# NOTE(kan-bayashi): clamp is needed to avoid nan or inf
return torch.sqrt(torch.clamp(real ** 2 + imag ** 2, min=1e-7)).transpose(2, 1)
class SpectralConvergenceLoss(Loss):
"""Spectral convergence loss module."""
@property
def input_types(self):
return {
"x_mag": NeuralType(('B', 'T', 'D'), SpectrogramType()),
"y_mag": NeuralType(('B', 'T', 'D'), SpectrogramType()),
}
@property
def output_types(self):
return {
"loss": NeuralType(elements_type=LossType()),
}
@typecheck()
def forward(self, *, x_mag, y_mag):
"""Calculate forward propagation. It is assumed that x_mag and y_mag were padded to fit the maximum batch
sequence length with silence, hence it is assumed that the norm of these extra padded values are 0. Therefore,
input_lengths is not a argument unlike in LogSTFTMagnitudeLoss.
Args:
x_mag (Tensor): Magnitude spectrogram of predicted signal (B, #frames, #freq_bins).
y_mag (Tensor): Magnitude spectrogram of groundtruth signal (B, #frames, #freq_bins).
Returns:
Tensor: Spectral convergence loss value.
"""
# Mean across time and freq_bins first
loss = torch.norm(y_mag - x_mag, p="fro", dim=(1, 2)) / torch.norm(y_mag, p="fro", dim=(1, 2))
# Mean across batches
loss = torch.mean(loss)
return loss
class LogSTFTMagnitudeLoss(Loss):
"""Log STFT magnitude loss module."""
@property
def input_types(self):
return {
"x_mag": NeuralType(('B', 'T', 'D'), SpectrogramType()),
"y_mag": NeuralType(('B', 'T', 'D'), SpectrogramType()),
"input_lengths": NeuralType(('B'), LengthsType(), optional=True),
}
@property
def output_types(self):
return {
"loss": NeuralType(elements_type=LossType()),
}
@typecheck()
def forward(self, *, x_mag, y_mag, input_lengths=None):
"""Calculate forward propagation.
Args:
x_mag (Tensor): Magnitude spectrogram of predicted signal (B, #frames, #freq_bins).
y_mag (Tensor): Magnitude spectrogram of groundtruth signal (B, #frames, #freq_bins).
input_lengths (Tensor): Length of groundtruth sample in samples (B).
Returns:
Tensor: Log STFT magnitude loss value.
"""
if input_lengths is None:
# During training, we used fixed sequence length, so just average across all dimensions
return F.l1_loss(torch.log(y_mag), torch.log(x_mag))
loss = F.l1_loss(torch.log(y_mag), torch.log(x_mag), reduction='none')
# First sum and average across time and freq bins
loss = loss / loss.shape[2]
loss = torch.sum(loss, dim=[1, 2])
loss = loss / input_lengths
# Last average across batch
return torch.sum(loss) / loss.shape[0]
class STFTLoss(Loss):
"""STFT loss module."""
def __init__(self, fft_size=1024, shift_size=120, win_length=600, window="hann_window"):
"""Initialize STFT loss module."""
super(STFTLoss, self).__init__()
self.fft_size = fft_size
self.shift_size = shift_size
self.win_length = win_length
self.window = getattr(torch, window)(win_length)
self.spectral_convergence_loss = SpectralConvergenceLoss()
self.log_stft_magnitude_loss = LogSTFTMagnitudeLoss()
@property
def input_types(self):
return {
"x": NeuralType(('B', 'T'), AudioSignal()),
"y": NeuralType(('B', 'T'), AudioSignal()),
"input_lengths": NeuralType(('B'), LengthsType(), optional=True),
}
@property
def output_types(self):
return {
"sc_loss": NeuralType(elements_type=LossType()),
"mag_loss": NeuralType(elements_type=LossType()),
}
@typecheck()
def forward(self, *, x, y, input_lengths=None):
"""Calculate forward propagation.
Args:
x (Tensor): Predicted signal (B, T).
y (Tensor): Groundtruth signal (B, T).
input_lengths (Tensor): Length of groundtruth sample in samples (B).
Returns:
Tensor: Spectral convergence loss value.
Tensor: Log STFT magnitude loss value.
"""
if self.window.device != x.device:
self.window = self.window.to(x.device)
x_mag = stft(x, self.fft_size, self.shift_size, self.win_length, self.window)
y_mag = stft(y, self.fft_size, self.shift_size, self.win_length, self.window)
sc_loss = self.spectral_convergence_loss(x_mag=x_mag, y_mag=y_mag)
if input_lengths is not None:
input_lengths = torch.floor(input_lengths / float(self.shift_size)) + 1
assert max(input_lengths) == x_mag.shape[1], f"{max(input_lengths)} != {x_mag.shape[1]}"
mag_loss = self.log_stft_magnitude_loss(x_mag=x_mag, y_mag=y_mag, input_lengths=input_lengths)
return sc_loss, mag_loss
class MultiResolutionSTFTLoss(Loss):
"""Multi resolution STFT loss module."""
def __init__(self, fft_sizes, hop_sizes, win_lengths, window="hann_window"):
"""Initialize Multi resolution STFT loss module.
Args:
fft_sizes (list): List of FFT sizes.
hop_sizes (list): List of hop sizes.
win_lengths (list): List of window lengths.
window (str): Window function type.
"""
super(MultiResolutionSTFTLoss, self).__init__()
assert len(fft_sizes) == len(hop_sizes) == len(win_lengths)
self.stft_losses = torch.nn.ModuleList()
for fs, ss, wl in zip(fft_sizes, hop_sizes, win_lengths):
self.stft_losses += [STFTLoss(fs, ss, wl, window)]
@property
def input_types(self):
return {
"x": NeuralType(('B', 'T'), AudioSignal()),
"y": NeuralType(('B', 'T'), AudioSignal()),
"input_lengths": NeuralType(('B'), LengthsType(), optional=True),
}
@property
def output_types(self):
return {
"sc_loss": [NeuralType(elements_type=LossType())],
"mag_loss": [NeuralType(elements_type=LossType())],
}
@typecheck()
def forward(self, *, x, y, input_lengths=None):
"""Calculate forward propagation.
Args:
x (Tensor): Predicted signal (B, T).
y (Tensor): Groundtruth signal (B, T).
input_lengths (Tensor): Length of groundtruth sample in samples (B).
Returns:
List[Tensor]: Multi resolution spectral convergence loss value.
List[Tensor]: Multi resolution log STFT magnitude loss value.
"""
sc_loss = [0.0] * len(self.stft_losses)
mag_loss = [0.0] * len(self.stft_losses)
for i, f in enumerate(self.stft_losses):
sc_l, mag_l = f(x=x, y=y, input_lengths=input_lengths)
sc_loss[i] = sc_l
mag_loss[i] = mag_l
return sc_loss, mag_loss
| NeMo-main | nemo/collections/tts/losses/stftlosses.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from nemo.core.classes import Loss, typecheck
from nemo.core.neural_types.elements import LossType, NormalDistributionSamplesType, VoidType
from nemo.core.neural_types.neural_type import NeuralType
class WaveGlowLoss(Loss):
""" A Loss module that computes loss for WaveGlow
"""
@property
def input_types(self):
return {
"z": NeuralType(('B', 'flowgroup', 'T'), NormalDistributionSamplesType()),
"log_s_list": [NeuralType(('B', 'flowgroup', 'T'), VoidType())], # TODO: Figure out a good typing
"log_det_W_list": [NeuralType(elements_type=VoidType())], # TODO: Figure out a good typing
"sigma": NeuralType(optional=True),
}
@property
def output_types(self):
return {
"loss": NeuralType(elements_type=LossType()),
}
@typecheck()
def forward(self, *, z, log_s_list, log_det_W_list, sigma=1.0):
for i, log_s in enumerate(log_s_list):
if i == 0:
log_s_total = torch.sum(log_s)
log_det_W_total = log_det_W_list[i]
else:
log_s_total = log_s_total + torch.sum(log_s)
log_det_W_total += log_det_W_list[i]
loss = torch.sum(z * z) / (2 * sigma * sigma) - log_s_total - log_det_W_total
return loss / (z.size(0) * z.size(1) * z.size(2))
| NeMo-main | nemo/collections/tts/losses/waveglowloss.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# MIT License
#
# Copyright (c) 2021 Jaehyeon Kim
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# The forward functions of the following classes are based on code from https://github.com/jaywalnut310/vits:
# KlLoss
import torch
from nemo.core.classes import Loss, typecheck
from nemo.core.neural_types.elements import LossType, VoidType
from nemo.core.neural_types.neural_type import NeuralType
class KlLoss(Loss):
@property
def input_types(self):
return {
"z_p": [NeuralType(('B', 'D', 'T'), VoidType())],
"logs_q": [NeuralType(('B', 'D', 'T'), VoidType())],
"m_p": [NeuralType(('B', 'D', 'T'), VoidType())],
"logs_p": [NeuralType(('B', 'D', 'T'), VoidType())],
"z_mask": [NeuralType(('B', 'D', 'T'), VoidType())],
}
@property
def output_types(self):
return {
"loss": NeuralType(elements_type=LossType()),
}
@typecheck()
def forward(self, z_p, logs_q, m_p, logs_p, z_mask):
"""
z_p: Input distribution
logs_q: LogVariance of target distrubution
m_p: Mean of input distrubution
logs_p: LogVariance of input distrubution
"""
z_p = z_p.float()
logs_q = logs_q.float()
m_p = m_p.float()
logs_p = logs_p.float()
z_mask = z_mask.float()
kl = logs_p - logs_q - 0.5
kl += 0.5 * ((z_p - m_p) ** 2) * torch.exp(-2.0 * logs_p)
kl = torch.sum(kl * z_mask)
l = kl / torch.sum(z_mask)
return l
class FeatureMatchingLoss(Loss):
"""VITS Feature Matching Loss module"""
@property
def input_types(self):
return {
"fmap_r": [[NeuralType(elements_type=VoidType())]],
"fmap_g": [[NeuralType(elements_type=VoidType())]],
}
@property
def output_types(self):
return {
"loss": NeuralType(elements_type=LossType()),
}
@typecheck()
def forward(self, fmap_r, fmap_g):
"""
fmap_r, fmap_g: List[List[Tensor]]
"""
loss = 0
for dr, dg in zip(fmap_r, fmap_g):
for rl, gl in zip(dr, dg):
rl = rl.float().detach()
gl = gl.float()
loss += torch.mean(torch.abs(rl - gl))
return loss * 2
class DiscriminatorLoss(Loss):
"""Discriminator Loss module"""
@property
def input_types(self):
return {
"disc_real_outputs": [NeuralType(('B', 'T'), VoidType())],
"disc_generated_outputs": [NeuralType(('B', 'T'), VoidType())],
}
@property
def output_types(self):
return {
"loss": NeuralType(elements_type=LossType()),
"real_losses": [NeuralType(elements_type=LossType())],
"fake_losses": [NeuralType(elements_type=LossType())],
}
@typecheck()
def forward(self, disc_real_outputs, disc_generated_outputs):
r_losses = []
g_losses = []
loss = 0
for i, (dr, dg) in enumerate(zip(disc_real_outputs, disc_generated_outputs)):
dr = dr.float()
dg = dg.float()
r_loss = torch.mean((1 - dr) ** 2)
g_loss = torch.mean(dg ** 2)
loss += r_loss + g_loss
r_losses.append(r_loss.item())
g_losses.append(g_loss.item())
return loss, r_losses, g_losses
class GeneratorLoss(Loss):
"""Generator Loss module"""
@property
def input_types(self):
return {
"disc_outputs": [NeuralType(('B', 'T'), VoidType())],
}
@property
def output_types(self):
return {
"loss": NeuralType(elements_type=LossType()),
"fake_losses": [NeuralType(elements_type=LossType())],
}
@typecheck()
def forward(self, disc_outputs):
loss = 0
gen_losses = []
for dg in disc_outputs:
dg = dg.float()
l = torch.mean((1 - dg) ** 2)
gen_losses.append(l)
loss += l
return loss, gen_losses
| NeMo-main | nemo/collections/tts/losses/vits_losses.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn.functional as F
from nemo.core.classes import Loss, typecheck
from nemo.core.neural_types.elements import LengthsType, LogprobsType, LossType, ProbsType
from nemo.core.neural_types.neural_type import NeuralType
class ForwardSumLoss(Loss):
def __init__(self, blank_logprob=-1, loss_scale=1.0):
super().__init__()
self.log_softmax = torch.nn.LogSoftmax(dim=-1)
self.ctc_loss = torch.nn.CTCLoss(zero_infinity=True)
self.blank_logprob = blank_logprob
self.loss_scale = loss_scale
@property
def input_types(self):
return {
"attn_logprob": NeuralType(('B', 'S', 'T_spec', 'T_text'), LogprobsType()),
"in_lens": NeuralType(tuple('B'), LengthsType()),
"out_lens": NeuralType(tuple('B'), LengthsType()),
}
@property
def output_types(self):
return {
"forward_sum_loss": NeuralType(elements_type=LossType()),
}
@typecheck()
def forward(self, attn_logprob, in_lens, out_lens):
key_lens = in_lens
query_lens = out_lens
max_key_len = attn_logprob.size(-1)
# Reorder input to [query_len, batch_size, key_len]
attn_logprob = attn_logprob.squeeze(1)
attn_logprob = attn_logprob.permute(1, 0, 2)
# Add blank label
attn_logprob = F.pad(input=attn_logprob, pad=(1, 0, 0, 0, 0, 0), value=self.blank_logprob)
# Convert to log probabilities
# Note: Mask out probs beyond key_len
key_inds = torch.arange(max_key_len + 1, device=attn_logprob.device, dtype=torch.long)
attn_logprob.masked_fill_(key_inds.view(1, 1, -1) > key_lens.view(1, -1, 1), -1e15) # key_inds >= key_lens+1
attn_logprob = self.log_softmax(attn_logprob)
# Target sequences
target_seqs = key_inds[1:].unsqueeze(0)
target_seqs = target_seqs.repeat(key_lens.numel(), 1)
# Evaluate CTC loss
cost = self.ctc_loss(attn_logprob, target_seqs, input_lengths=query_lens, target_lengths=key_lens)
cost *= self.loss_scale
return cost
class BinLoss(Loss):
def __init__(self, loss_scale=1.0):
super().__init__()
self.loss_scale = loss_scale
@property
def input_types(self):
return {
"hard_attention": NeuralType(('B', 'S', 'T_spec', 'T_text'), ProbsType()),
"soft_attention": NeuralType(('B', 'S', 'T_spec', 'T_text'), ProbsType()),
}
@property
def output_types(self):
return {
"bin_loss": NeuralType(elements_type=LossType()),
}
@typecheck()
def forward(self, hard_attention, soft_attention):
log_sum = torch.log(torch.clamp(soft_attention[hard_attention == 1], min=1e-12)).sum()
loss = -log_sum / hard_attention.sum()
loss *= self.loss_scale
return loss
| NeMo-main | nemo/collections/tts/losses/aligner_loss.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# MIT License
#
# Copyright (c) 2020 Phil Wang
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# The following is largely based on code from https://github.com/lucidrains/stylegan2-pytorch
import torch
import torch.nn.functional as F
from einops import rearrange
from torch.autograd import grad as torch_grad
from nemo.collections.tts.parts.utils.helpers import mask_sequence_tensor
class GradientPenaltyLoss(torch.nn.Module):
"""
R1 loss from [1], used following [2]
[1] Mescheder et. al. - Which Training Methods for GANs do actually Converge? 2018, https://arxiv.org/abs/1801.04406
[2] Karras et. al. - A Style-Based Generator Architecture for Generative Adversarial Networks, 2018 (https://arxiv.org/abs/1812.04948)
"""
def __init__(self, weight: float = 10.0):
super().__init__()
self.weight = weight
def __call__(self, images, output):
batch_size, *_ = images.shape
gradients = torch_grad(
outputs=output,
inputs=images,
grad_outputs=torch.ones(output.size(), device=images.device),
create_graph=True,
retain_graph=True,
only_inputs=True,
)[0]
gradients = gradients.reshape(batch_size, -1)
return self.weight * ((gradients.norm(2, dim=1) - 1) ** 2).mean()
class GeneratorLoss(torch.nn.Module):
def __call__(self, fake_logits):
return fake_logits.mean()
class HingeLoss(torch.nn.Module):
def __call__(self, real_logits, fake_logits):
return (F.relu(1 + real_logits) + F.relu(1 - fake_logits)).mean()
class ConsistencyLoss(torch.nn.Module):
"""
Loss to keep SpectrogramEnhancer from generating extra sounds.
L1 distance on x0.25 Mel scale (20 bins for typical 80-bin scale)
"""
def __init__(self, weight: float = 10):
super().__init__()
self.weight = weight
def __call__(self, condition, output, lengths):
*_, w, h = condition.shape
w, h = w // 4, h
condition = F.interpolate(condition, size=(w, h), mode="bilinear", antialias=True)
output = F.interpolate(output, size=(w, h), mode="bilinear", antialias=True)
dist = (condition - output).abs()
dist = mask_sequence_tensor(dist, lengths)
return (dist / rearrange(lengths, "b -> b 1 1 1")).sum(dim=-1).mean() * self.weight
| NeMo-main | nemo/collections/tts/losses/spectrogram_enhancer_losses.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from nemo.collections.tts.parts.utils.helpers import get_mask_from_lengths
from nemo.core.classes import Loss, typecheck
from nemo.core.neural_types.elements import LengthsType, LogitsType, LossType, MelSpectrogramType
from nemo.core.neural_types.neural_type import NeuralType
class Tacotron2Loss(Loss):
"""A Loss module that computes loss for Tacotron2"""
@property
def input_types(self):
return {
"spec_pred_dec": NeuralType(('B', 'D', 'T'), MelSpectrogramType()),
"spec_pred_postnet": NeuralType(('B', 'D', 'T'), MelSpectrogramType()),
"gate_pred": NeuralType(('B', 'T'), LogitsType()),
"spec_target": NeuralType(('B', 'D', 'T'), MelSpectrogramType()),
"spec_target_len": NeuralType(('B'), LengthsType()),
"pad_value": NeuralType(),
}
@property
def output_types(self):
return {
"loss": NeuralType(elements_type=LossType()),
"gate_target": NeuralType(('B', 'T'), LogitsType()), # Used for evaluation
}
@typecheck()
def forward(self, *, spec_pred_dec, spec_pred_postnet, gate_pred, spec_target, spec_target_len, pad_value):
# Make the gate target
max_len = spec_target.shape[2]
gate_target = torch.zeros(spec_target_len.shape[0], max_len)
gate_target = gate_target.type_as(gate_pred)
for i, length in enumerate(spec_target_len):
gate_target[i, length.data - 1 :] = 1
spec_target.requires_grad = False
gate_target.requires_grad = False
gate_target = gate_target.view(-1, 1)
max_len = spec_target.shape[2]
if max_len < spec_pred_dec.shape[2]:
# Predicted len is larger than reference
# Need to slice
spec_pred_dec = spec_pred_dec.narrow(2, 0, max_len)
spec_pred_postnet = spec_pred_postnet.narrow(2, 0, max_len)
gate_pred = gate_pred.narrow(1, 0, max_len).contiguous()
elif max_len > spec_pred_dec.shape[2]:
# Need to do padding
pad_amount = max_len - spec_pred_dec.shape[2]
spec_pred_dec = torch.nn.functional.pad(spec_pred_dec, (0, pad_amount), value=pad_value)
spec_pred_postnet = torch.nn.functional.pad(spec_pred_postnet, (0, pad_amount), value=pad_value)
gate_pred = torch.nn.functional.pad(gate_pred, (0, pad_amount), value=1e3)
mask = ~get_mask_from_lengths(spec_target_len, spec_pred_dec)
mask = mask.expand(spec_target.shape[1], mask.size(0), mask.size(1))
mask = mask.permute(1, 0, 2)
spec_pred_dec.data.masked_fill_(mask, pad_value)
spec_pred_postnet.data.masked_fill_(mask, pad_value)
gate_pred.data.masked_fill_(mask[:, 0, :], 1e3)
gate_pred = gate_pred.view(-1, 1)
rnn_mel_loss = torch.nn.functional.mse_loss(spec_pred_dec, spec_target)
postnet_mel_loss = torch.nn.functional.mse_loss(spec_pred_postnet, spec_target)
gate_loss = torch.nn.functional.binary_cross_entropy_with_logits(gate_pred, gate_target)
return rnn_mel_loss + postnet_mel_loss + gate_loss, gate_target
| NeMo-main | nemo/collections/tts/losses/tacotron2loss.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-main | nemo/collections/tts/g2p/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import csv
import os
import re
import string
from typing import Dict, List, Union
__all__ = [
"read_wordids",
"set_grapheme_case",
"GRAPHEME_CASE_UPPER",
"GRAPHEME_CASE_LOWER",
"GRAPHEME_CASE_MIXED",
"get_heteronym_spans",
]
# define grapheme cases.
GRAPHEME_CASE_UPPER = "upper"
GRAPHEME_CASE_LOWER = "lower"
GRAPHEME_CASE_MIXED = "mixed"
def read_wordids(wordid_map: str):
"""
Reads wordid file from WikiHomograph dataset,
https://github.com/google-research-datasets/WikipediaHomographData/blob/master/data/wordids.tsv
Args:
wordid_map: path to wordids.tsv
Returns:
data_dict: a dictionary of graphemes with corresponding word_id - ipa_form pairs
wordid_to_idx: word id to label id mapping
"""
if not os.path.exists(wordid_map):
raise ValueError(f"{wordid_map} not found")
data_dict = {}
wordid_to_idx = {}
with open(wordid_map, "r", encoding="utf-8") as f:
tsv_file = csv.reader(f, delimiter="\t")
for i, line in enumerate(tsv_file):
if i == 0:
continue
grapheme = line[0]
word_id = line[1]
ipa_form = line[3]
wordid_to_idx[word_id] = len(wordid_to_idx)
if grapheme not in data_dict:
data_dict[grapheme] = {}
data_dict[grapheme][word_id] = ipa_form
return data_dict, wordid_to_idx
def get_wordid_to_phonemes(wordid_to_phonemes_file: str, to_lower: bool = True):
"""
WikiHomograph and NeMo use slightly different phoneme sets, this function reads WikiHomograph word_ids to NeMo
IPA heteronyms mapping.
Args:
wordid_to_phonemes_file: Path to a file with mapping from wordid predicted by the model to phonemes, e.g.,
NeMo/scripts/tts_dataset_files/wordid_to_ipa-0.7b_nv22.10.tsv
to_lower: set to True to lower case wordid
"""
if not os.path.exists(wordid_to_phonemes_file):
raise ValueError(f"{wordid_to_phonemes_file} not found")
wordid_to_nemo_cmu = {}
with open(wordid_to_phonemes_file, "r", encoding="utf-8") as f:
for i, line in enumerate(f):
if to_lower:
line = line.lower()
line = line.strip().split(" ")
wordid_to_nemo_cmu[line[0]] = line[1]
return wordid_to_nemo_cmu
def remove_punctuation(text: str, remove_spaces: bool = False, do_lower: bool = False, exclude: List[str] = None):
"""
Remove punctuation marks form text
Args:
text: input text
remove_spaces: set to True to remove spaces
do_lower: set to True to lower case the text
exclude: specify list of punctuation marks keep in the output, e.g., exclude=["'", "."]
Return:
processed text with punctuation marks removed
"""
all_punct_marks = string.punctuation
if exclude is not None:
for p in exclude:
all_punct_marks = all_punct_marks.replace(p, "")
text = re.sub("[" + all_punct_marks + "]", " ", text)
text = re.sub(r" +", " ", text)
if remove_spaces:
text = text.replace(" ", "").replace("\u00A0", "").strip()
if do_lower:
text = text.lower()
return text.strip()
def get_heteronym_spans(sentences: List[str], supported_heteronyms: Union[Dict, List]):
"""
Find heteronyms in sentences and returns span indices
Args:
sentences: sentences to find heteronyms in
supported_heteronyms: heteronyms to look for
Return:
start_end: List[Tuple[int]] - start-end indices that indicate location of found heteronym in the sentence
heteronyms: List[List[str]] - heteronyms found in sentences, each sentence can contain more than one heteronym
"""
start_end = []
heteronyms = []
for sent in sentences:
cur_start_end = []
cur_heteronyms = []
start_idx = 0
for word in sent.lower().split():
word_by_hyphen = word.split("-")
for sub_word in word_by_hyphen:
no_punct_word = remove_punctuation(sub_word, do_lower=True, remove_spaces=False)
if no_punct_word in supported_heteronyms:
start_idx = sent.lower().index(no_punct_word, start_idx)
end_idx = start_idx + len(no_punct_word)
cur_start_end.append((start_idx, end_idx))
cur_heteronyms.append(no_punct_word)
start_idx = end_idx
else:
start_idx += len(sub_word) + 1
heteronyms.append(cur_heteronyms)
start_end.append(cur_start_end)
return start_end, heteronyms
def set_grapheme_case(text: str, case: str = "upper") -> str:
if case == "upper":
text_new = text.upper()
elif case == "lower":
text_new = text.lower()
elif case == "mixed": # keep as-is, mix-cases
text_new = text
else:
raise ValueError(f"Case <{case}> is not supported. Please specify either 'upper', 'lower', or 'mixed'.")
return text_new
| NeMo-main | nemo/collections/tts/g2p/utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo.collections.tts.g2p.models.en_us_arpabet import EnglishG2p
from nemo.collections.tts.g2p.models.i18n_ipa import IpaG2p as IPAG2P
# TODO @xueyang: This file is kept for backward-compatibility purposes since all older NGC models that were trained on
# and before NeMo 1.16.0 used this import path. We will remove this file soon; `IPAG2P` will be also renamed as
# `IpaG2p`. Please start using new import path and the new `IpaG2p` name from NeMo 1.16.0.
from nemo.collections.tts.g2p.models.zh_cn_pinyin import ChineseG2p
| NeMo-main | nemo/collections/tts/g2p/modules.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
from typing import Any, Dict, List, Optional, Union
import torch
from hydra.utils import instantiate
from omegaconf import DictConfig, OmegaConf
from pytorch_lightning import Trainer
from transformers import AutoTokenizer, T5ForConditionalGeneration
from nemo.collections.asr.metrics.wer import word_error_rate
from nemo.collections.tts.g2p.data.t5 import T5G2PDataset
from nemo.collections.tts.models.base import G2PModel
from nemo.core.classes.common import PretrainedModelInfo, typecheck
from nemo.core.neural_types import LabelsType, LossType, MaskType, NeuralType, TokenIndex
from nemo.utils import logging
__all__ = ['T5G2PModel']
@dataclass
class T5G2PConfig:
train_ds: Optional[Dict[Any, Any]] = None
validation_ds: Optional[Dict[Any, Any]] = None
test_ds: Optional[Dict[Any, Any]] = None
class T5G2PModel(G2PModel):
"""
T5-based grapheme-to-phoneme model.
"""
@property
def input_types(self) -> Optional[Dict[str, NeuralType]]:
return {
"input_ids": NeuralType(('B', 'T'), TokenIndex()),
"attention_mask": NeuralType(('B', 'T'), MaskType(), optional=True),
"labels": NeuralType(('B', 'T'), LabelsType()),
}
@property
def output_types(self) -> Optional[Dict[str, NeuralType]]:
return {"loss": NeuralType((), LossType())}
def __init__(self, cfg: DictConfig, trainer: Trainer = None):
self.world_size = 1
if trainer is not None:
self.world_size = trainer.num_nodes * trainer.num_devices
# Load appropriate tokenizer from HuggingFace
self.model_name = cfg.model_name
self._tokenizer = AutoTokenizer.from_pretrained(self.model_name)
self.max_source_len = cfg.get("max_source_len", self._tokenizer.model_max_length)
self.max_target_len = cfg.get("max_target_len", self._tokenizer.model_max_length)
self.do_lower = cfg.get("do_lower", False)
# Ensure passed cfg is compliant with schema
schema = OmegaConf.structured(T5G2PConfig)
# ModelPT ensures that cfg is a DictConfig, but do this second check in case ModelPT changes
if isinstance(cfg, dict):
cfg = OmegaConf.create(cfg)
elif not isinstance(cfg, DictConfig):
raise ValueError(f"cfg was type: {type(cfg)}. Expected either a dict or a DictConfig")
OmegaConf.merge(cfg, schema)
super().__init__(cfg, trainer)
# Load pretrained T5 model from HuggingFace
self.model = T5ForConditionalGeneration.from_pretrained(self.model_name)
@typecheck()
def forward(self, input_ids, attention_mask, labels):
outputs = self.model(input_ids=input_ids, attention_mask=attention_mask, labels=labels)
return outputs.loss
# ===== Training Functions ===== #
def training_step(self, batch, batch_idx):
input_ids, attention_mask, labels = batch
train_loss = self.forward(input_ids=input_ids, attention_mask=attention_mask, labels=labels,)
self.log('train_loss', train_loss)
return train_loss
def on_train_epoch_end(self):
return super().on_train_epoch_end()
def _setup_infer_dataloader(self, cfg) -> 'torch.utils.data.DataLoader':
"""
Setup function for a infer data loader.
Returns:
A pytorch DataLoader.
"""
dataset = T5G2PDataset(
manifest_filepath=cfg.manifest_filepath,
tokenizer=self._tokenizer,
max_source_len=self._tokenizer.model_max_length,
max_target_len=-1,
do_lower=self.do_lower,
grapheme_field=cfg.get("grapheme_field", "text_graphemes"),
with_labels=False,
)
return torch.utils.data.DataLoader(
dataset,
collate_fn=dataset.collate_fn,
batch_size=cfg.batch_size,
shuffle=False,
num_workers=cfg.num_workers,
drop_last=False,
)
# Functions for inference
@torch.no_grad()
def _infer(self, config: DictConfig,) -> List[int]:
"""
Runs model inference.
Args:
Config: configuration file to set up DataLoader
Returns:
all_preds: model predictions
"""
# store predictions for all queries in a single list
all_preds = []
mode = self.training
try:
device = 'cuda' if torch.cuda.is_available() else 'cpu'
# Switch model to evaluation mode
self.eval()
self.to(device)
infer_datalayer = self._setup_infer_dataloader(DictConfig(config))
for batch in infer_datalayer:
input_ids, _ = batch
generated_str, _, _ = self._generate_predictions(input_ids=input_ids.to(device))
all_preds.extend(generated_str)
del batch
finally:
# set mode back to its original value
self.train(mode=mode)
return all_preds
# ===== Validation Functions ===== #
def validation_step(self, batch, batch_idx, dataloader_idx=0, split="val"):
input_ids, attention_mask, labels = batch
# Get loss from forward step
val_loss = self.forward(input_ids=input_ids, attention_mask=attention_mask, labels=labels,)
# Get preds from generate function and calculate PER
labels_str = self._tokenizer.batch_decode(
# Need to do the following to zero out the -100s (ignore_index).
torch.ones_like(labels) * ((labels == -100) * 100) + labels,
skip_special_tokens=True,
)
generated_str, _, _ = self._generate_predictions(input_ids=input_ids, model_max_target_len=self.max_target_len)
per = word_error_rate(hypotheses=generated_str, references=labels_str, use_cer=True)
return {f"{split}_loss": val_loss, 'per': per}
def test_step(self, batch, batch_idx, dataloader_idx=0):
"""
Lightning calls this inside the test loop with the data from the test dataloader
passed in as `batch`.
"""
return self.validation_step(batch, batch_idx, dataloader_idx, split="test")
def multi_validation_epoch_end(self, outputs, dataloader_idx=0, split="val"):
"""
Called at the end of validation to aggregate outputs (reduces across batches, not workers).
"""
avg_loss = torch.stack([x[f"{split}_loss"] for x in outputs]).mean()
self.log(f"{split}_loss", avg_loss, sync_dist=True)
if split == "test":
dataloader_name = self._test_names[dataloader_idx].upper()
else:
dataloader_name = self._validation_names[dataloader_idx].upper()
avg_per = sum([x['per'] for x in outputs]) / len(outputs)
self.log(f"{split}_per", avg_per)
# to save all PER values for each dataset in WANDB
self.log(f"{split}_per_{dataloader_name}", avg_per)
logging.info(f"PER: {round(avg_per * 100, 2)}% {dataloader_name}, {len(outputs)}examples")
return {'loss': avg_loss}
def multi_test_epoch_end(self, outputs, dataloader_idx=0):
self.multi_validation_epoch_end(outputs, dataloader_idx, split="test")
@torch.no_grad()
def _generate_predictions(self, input_ids: torch.Tensor, model_max_target_len: int = 512):
"""
Generates predictions and converts IDs to text.
"""
outputs = self.model.generate(
input_ids, output_scores=True, return_dict_in_generate=True, max_length=model_max_target_len
)
generated_ids, sequence_toks_scores = outputs['sequences'], outputs['scores']
generated_texts = self._tokenizer.batch_decode(generated_ids, skip_special_tokens=True)
return generated_texts, generated_ids, sequence_toks_scores
# ===== Dataset Setup Functions ===== #
def _setup_dataloader_from_config(self, cfg, name):
if "dataloader_params" not in cfg or not isinstance(cfg.dataloader_params, DictConfig):
raise ValueError(f"No dataloader_params for {name}")
dataset = instantiate(
cfg.dataset,
manifest_filepath=cfg.manifest_filepath,
tokenizer=self._tokenizer,
max_source_len=self.max_source_len,
max_target_len=self.max_target_len,
do_lower=self.do_lower,
grapheme_field=cfg.get("grapheme_field", "text_graphemes"),
phoneme_field=cfg.get("phoneme_field", "text"),
with_labels=True,
)
return torch.utils.data.DataLoader(dataset, collate_fn=dataset.collate_fn, **cfg.dataloader_params)
def setup_training_data(self, cfg):
self._train_dl = self._setup_dataloader_from_config(cfg, name="train")
def setup_validation_data(self, cfg):
self._validation_dl = self._setup_dataloader_from_config(cfg, name="validation")
def setup_test_data(self, cfg):
self._test_dl = self._setup_dataloader_from_config(cfg, name="test")
def setup_multiple_validation_data(self, val_data_config: Union[DictConfig, Dict] = None):
if not val_data_config or val_data_config.manifest_filepath is None:
self._validation_dl = None
return
return super().setup_multiple_validation_data(val_data_config)
def setup_multiple_test_data(self, test_data_config: Union[DictConfig, Dict] = None):
if not test_data_config or test_data_config.manifest_filepath is None:
self._test_dl = None
return
return super().setup_multiple_test_data(test_data_config)
# ===== List Available Models - N/A =====$
@classmethod
def list_available_models(cls) -> 'List[PretrainedModelInfo]':
return []
| NeMo-main | nemo/collections/tts/g2p/models/t5.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import string
from dataclasses import dataclass
from typing import Any, Dict, List, Optional, Union
import torch
from hydra.utils import instantiate
from omegaconf import DictConfig, ListConfig, OmegaConf, open_dict
from pytorch_lightning import Trainer
from transformers import AutoConfig, AutoModel, AutoTokenizer
from nemo.collections.tts.g2p.data.ctc import CTCG2PBPEDataset
from nemo.collections.tts.models.base import G2PModel
from nemo.core.classes.common import PretrainedModelInfo
from nemo.utils import logging
try:
from nemo.collections.asr.losses.ctc import CTCLoss
from nemo.collections.asr.metrics.wer_bpe import WERBPE, CTCBPEDecoding, CTCBPEDecodingConfig
from nemo.collections.asr.models import EncDecCTCModel
from nemo.collections.asr.parts.mixins import ASRBPEMixin
ASR_AVAILABLE = True
except (ModuleNotFoundError, ImportError) as e:
ASR_AVAILABLE = False
__all__ = ['CTCG2PModel']
@dataclass
class CTCG2PConfig:
train_ds: Optional[Dict[Any, Any]] = None
validation_ds: Optional[Dict[Any, Any]] = None
class CTCG2PModel(G2PModel, ASRBPEMixin):
"""
CTC-based grapheme-to-phoneme model.
"""
def __init__(self, cfg: DictConfig, trainer: Trainer = None):
self.world_size = 1
if trainer is not None:
self.world_size = trainer.num_nodes * trainer.num_devices
self.mode = cfg.model_name.lower()
self.supported_modes = ["byt5", "conformer_bpe"]
if self.mode not in self.supported_modes:
raise ValueError(f"{self.mode} is not supported, choose from {self.supported_modes}")
# Setup phoneme tokenizer
self._setup_tokenizer(cfg.tokenizer)
# Setup grapheme tokenizer
self.tokenizer_grapheme = self.setup_grapheme_tokenizer(cfg)
# Initialize vocabulary
vocabulary = self.tokenizer.tokenizer.get_vocab()
cfg.decoder.vocabulary = ListConfig(list(vocabulary.keys()))
self.vocabulary = cfg.decoder.vocabulary
self.labels_tkn2id = {l: i for i, l in enumerate(self.vocabulary)}
self.labels_id2tkn = {i: l for i, l in enumerate(self.vocabulary)}
super().__init__(cfg, trainer)
self._setup_encoder()
self.decoder = EncDecCTCModel.from_config_dict(self._cfg.decoder)
self.loss = CTCLoss(
num_classes=self.decoder.num_classes_with_blank - 1,
zero_infinity=True,
reduction=self._cfg.get("ctc_reduction", "mean_batch"),
)
# Setup decoding objects
decoding_cfg = self.cfg.get('decoding', None)
# In case decoding config not found, use default config
if decoding_cfg is None:
decoding_cfg = OmegaConf.structured(CTCBPEDecodingConfig)
with open_dict(self.cfg):
self.cfg.decoding = decoding_cfg
self.decoding = CTCBPEDecoding(self.cfg.decoding, tokenizer=self.tokenizer)
self._wer = WERBPE(decoding=self.decoding, use_cer=False, log_prediction=False, dist_sync_on_step=True,)
self._per = WERBPE(decoding=self.decoding, use_cer=True, log_prediction=False, dist_sync_on_step=True,)
def setup_grapheme_tokenizer(self, cfg):
""" Initialized grapheme tokenizer """
if self.mode == "byt5":
# Load appropriate tokenizer from HuggingFace
grapheme_tokenizer = AutoTokenizer.from_pretrained(cfg.tokenizer_grapheme.pretrained)
self.max_source_len = cfg.get("max_source_len", grapheme_tokenizer.model_max_length)
self.max_target_len = cfg.get("max_target_len", grapheme_tokenizer.model_max_length)
# TODO store byt5 vocab file
elif self.mode == "conformer_bpe":
grapheme_unk_token = (
cfg.tokenizer_grapheme.unk_token if cfg.tokenizer_grapheme.unk_token is not None else ""
)
chars = string.ascii_lowercase + grapheme_unk_token + " " + "'"
if not cfg.tokenizer_grapheme.do_lower:
chars += string.ascii_uppercase
if cfg.tokenizer_grapheme.add_punctuation:
punctuation_marks = string.punctuation.replace('"', "").replace("\\", "").replace("'", "")
chars += punctuation_marks
vocab_file = "/tmp/char_vocab.txt"
with open(vocab_file, "w") as f:
[f.write(f'"{ch}"\n') for ch in chars]
f.write('"\\""\n') # add " to the vocab
self.register_artifact("tokenizer_grapheme.vocab_file", vocab_file)
grapheme_tokenizer = instantiate(cfg.tokenizer_grapheme.dataset, vocab_file=vocab_file)
self.max_source_len = cfg.get("max_source_len", 512)
self.max_target_len = cfg.get("max_target_len", 512)
else:
raise ValueError(f"{self.mode} is not supported. Choose from {self.supported_modes}")
return grapheme_tokenizer
def _setup_encoder(self):
if self.mode == "byt5":
config = AutoConfig.from_pretrained(self._cfg.tokenizer_grapheme.pretrained)
if self._cfg.encoder.dropout is not None:
config.dropout_rate = self._cfg.encoder.dropout
print(f"\nDROPOUT: {config.dropout_rate}")
self.encoder = AutoModel.from_pretrained(self._cfg.encoder.transformer, config=config).encoder
# add encoder hidden dim size to the config
if self.cfg.decoder.feat_in is None:
self._cfg.decoder.feat_in = self.encoder.config.d_model
elif self.mode == "conformer_bpe":
self.embedding = torch.nn.Embedding(
embedding_dim=self._cfg.embedding.d_model, num_embeddings=self.tokenizer.vocab_size, padding_idx=0
)
self.encoder = EncDecCTCModel.from_config_dict(self._cfg.encoder)
with open_dict(self._cfg):
if "feat_in" not in self._cfg.decoder or (
not self._cfg.decoder.feat_in and hasattr(self.encoder, '_feat_out')
):
self._cfg.decoder.feat_in = self.encoder._feat_out
if "feat_in" not in self._cfg.decoder or not self._cfg.decoder.feat_in:
raise ValueError("param feat_in of the decoder's config is not set!")
else:
raise ValueError(f"{self.mode} is not supported. Choose from {self.supported_modes}")
# @typecheck()
def forward(self, input_ids, attention_mask, input_len):
if self.mode == "byt5":
encoded_input = self.encoder(input_ids=input_ids, attention_mask=attention_mask)[0]
encoded_len = input_len
# encoded_input = [B, seq_len, hid_dim]
# swap seq_len and hid_dim dimensions to get [B, hid_dim, seq_len]
encoded_input = encoded_input.transpose(1, 2)
elif self.mode == "conformer_bpe":
input_embedding = self.embedding(input_ids)
input_embedding = input_embedding.transpose(1, 2)
encoded_input, encoded_len = self.encoder(audio_signal=input_embedding, length=input_len)
else:
raise ValueError(f"{self.mode} is not supported. Choose from {self.supported_modes}")
log_probs = self.decoder(encoder_output=encoded_input)
greedy_predictions = log_probs.argmax(dim=-1, keepdim=False)
return log_probs, greedy_predictions, encoded_len
# ===== Training Functions ===== #
def training_step(self, batch, batch_idx):
input_ids, attention_mask, input_len, targets, target_lengths = batch
log_probs, predictions, encoded_len = self.forward(
input_ids=input_ids, attention_mask=attention_mask, input_len=input_len
)
loss = self.loss(
log_probs=log_probs, targets=targets, input_lengths=encoded_len, target_lengths=target_lengths
)
self.log("train_loss", loss)
return loss
def on_train_epoch_end(self):
return super().on_train_epoch_end()
# ===== Validation Functions ===== #
def validation_step(self, batch, batch_idx, dataloader_idx=0, split="val"):
input_ids, attention_mask, input_len, targets, target_lengths = batch
log_probs, greedy_predictions, encoded_len = self.forward(
input_ids=input_ids, attention_mask=attention_mask, input_len=input_len
)
val_loss = self.loss(
log_probs=log_probs, targets=targets, input_lengths=encoded_len, target_lengths=target_lengths
)
self._wer.update(
predictions=log_probs, targets=targets, target_lengths=target_lengths, predictions_lengths=encoded_len
)
wer, wer_num, wer_denom = self._wer.compute()
self._wer.reset()
self._per.update(
predictions=log_probs, targets=targets, target_lengths=target_lengths, predictions_lengths=encoded_len
)
per, per_num, per_denom = self._per.compute()
self._per.reset()
self.log(f"{split}_loss", val_loss)
loss = {
f"{split}_loss": val_loss,
f"{split}_wer_num": wer_num,
f"{split}_wer_denom": wer_denom,
f"{split}_wer": wer,
f"{split}_per_num": per_num,
f"{split}_per_denom": per_denom,
f"{split}_per": per,
}
if split == 'val':
if type(self.trainer.val_dataloaders) == list and len(self.trainer.val_dataloaders) > 1:
self.validation_step_outputs[dataloader_idx].append(loss)
else:
self.validation_step_outputs.append(loss)
elif split == 'test':
if type(self.trainer.test_dataloaders) == list and len(self.trainer.test_dataloaders) > 1:
self.test_step_outputs[dataloader_idx].append(loss)
else:
self.test_step_outputs.append(loss)
return loss
def test_step(self, batch, batch_idx, dataloader_idx=0):
"""
Lightning calls this inside the test loop with the data from the test dataloader
passed in as `batch`.
"""
return self.validation_step(batch, batch_idx, dataloader_idx, split="test")
def multi_validation_epoch_end(self, outputs, dataloader_idx=0, split="val"):
"""
Called at the end of validation to aggregate outputs (reduces across batches, not workers).
"""
avg_loss = torch.stack([x[f"{split}_loss"] for x in outputs]).mean()
self.log(f"{split}_loss", avg_loss, prog_bar=True)
wer_num = torch.stack([x[f"{split}_wer_num"] for x in outputs]).sum()
wer_denom = torch.stack([x[f"{split}_wer_denom"] for x in outputs]).sum()
wer = wer_num / wer_denom
per_num = torch.stack([x[f"{split}_per_num"] for x in outputs]).sum()
per_denom = torch.stack([x[f"{split}_per_denom"] for x in outputs]).sum()
per = per_num / per_denom
if split == "test":
dataloader_name = self._test_names[dataloader_idx].upper()
else:
dataloader_name = self._validation_names[dataloader_idx].upper()
self.log(f"{split}_wer", wer)
self.log(f"{split}_per", per)
self.log(f"{split}_per", per)
# to save all PER values for each dataset in WANDB
self.log(f"{split}_per_{dataloader_name}", per)
logging.info(f"PER: {per * 100}% {dataloader_name}")
logging.info(f"WER: {wer * 100}% {dataloader_name}")
def multi_test_epoch_end(self, outputs, dataloader_idx=0):
self.multi_validation_epoch_end(outputs, dataloader_idx, split="test")
def _setup_infer_dataloader(self, cfg: DictConfig) -> 'torch.utils.data.DataLoader':
"""
Setup function for a infer data loader.
Returns:
A pytorch DataLoader.
"""
dataset = CTCG2PBPEDataset(
manifest_filepath=cfg.manifest_filepath,
grapheme_field=cfg.grapheme_field,
tokenizer_graphemes=self.tokenizer_grapheme,
tokenizer_phonemes=self.tokenizer,
do_lower=self._cfg.tokenizer_grapheme.do_lower,
labels=self.vocabulary,
max_source_len=self._cfg.max_source_len,
with_labels=False,
)
return torch.utils.data.DataLoader(
dataset,
collate_fn=dataset.collate_fn,
batch_size=cfg.batch_size,
shuffle=False,
num_workers=cfg.num_workers,
drop_last=False,
)
@torch.no_grad()
def _infer(self, config: DictConfig,) -> List[int]:
"""
Runs model inference.
Args:
Config: configuration file to set up DataLoader
Returns:
all_preds: model predictions
"""
# store predictions for all queries in a single list
all_preds = []
mode = self.training
try:
device = 'cuda' if torch.cuda.is_available() else 'cpu'
# Switch model to evaluation mode
self.eval()
self.to(device)
infer_datalayer = self._setup_infer_dataloader(config)
for batch in infer_datalayer:
input_ids, attention_mask, input_len = batch
log_probs, greedy_predictions, encoded_len = self.forward(
input_ids=input_ids.to(device),
attention_mask=attention_mask if attention_mask is None else attention_mask.to(device),
input_len=input_len.to(device),
)
preds_str, _ = self.decoding.ctc_decoder_predictions_tensor(
log_probs, decoder_lengths=encoded_len, return_hypotheses=False
)
all_preds.extend(preds_str)
del greedy_predictions
del log_probs
del batch
del input_len
finally:
# set mode back to its original value
self.train(mode=mode)
return all_preds
# ===== Dataset Setup Functions ===== #
def _setup_dataloader_from_config(self, cfg: DictConfig, name: str):
if "dataloader_params" not in cfg or not isinstance(cfg.dataloader_params, DictConfig):
raise ValueError(f"No dataloader_params for {name}")
if not os.path.exists(cfg.manifest_filepath):
raise ValueError(f"{cfg.dataset.manifest_filepath} not found")
dataset = instantiate(
cfg.dataset,
manifest_filepath=cfg.manifest_filepath,
phoneme_field=cfg.dataset.phoneme_field,
grapheme_field=cfg.dataset.grapheme_field,
tokenizer_graphemes=self.tokenizer_grapheme,
do_lower=self._cfg.tokenizer_grapheme.do_lower,
tokenizer_phonemes=self.tokenizer,
labels=self.vocabulary,
max_source_len=self.max_source_len,
with_labels=True,
)
return torch.utils.data.DataLoader(dataset, collate_fn=dataset.collate_fn, **cfg.dataloader_params)
def setup_training_data(self, cfg: DictConfig):
if not cfg or cfg.manifest_filepath is None:
logging.info(
f"Dataloader config or file_path for the train is missing, so no data loader for train is created!"
)
self._train_dl = None
return
self._train_dl = self._setup_dataloader_from_config(cfg, name="train")
def setup_multiple_validation_data(self, val_data_config: Union[DictConfig, Dict] = None):
if not val_data_config or val_data_config.manifest_filepath is None:
self._validation_dl = None
return
super().setup_multiple_validation_data(val_data_config)
def setup_multiple_test_data(self, test_data_config: Union[DictConfig, Dict] = None):
if not test_data_config or test_data_config.manifest_filepath is None:
self._test_dl = None
return
super().setup_multiple_test_data(test_data_config)
def setup_validation_data(self, cfg: Optional[DictConfig]):
if not cfg or cfg.manifest_filepath is None:
logging.info(
f"Dataloader config or file_path for the validation is missing, so no data loader for validation is created!"
)
self._validation_dl = None
return
self._validation_dl = self._setup_dataloader_from_config(cfg, name="val")
def setup_test_data(self, cfg: Optional[DictConfig]):
if not cfg or cfg.manifest_filepath is None:
logging.info(
f"Dataloader config or file_path for the test is missing, so no data loader for test is created!"
)
self._test_dl = None
return
self._test_dl = self._setup_dataloader_from_config(cfg, name="test")
# ===== List Available Models - N/A =====$
@classmethod
def list_available_models(cls) -> 'List[PretrainedModelInfo]':
return []
| NeMo-main | nemo/collections/tts/g2p/models/ctc.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-main | nemo/collections/tts/g2p/models/__init__.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from typing import List, Optional
import torch
from hydra.utils import instantiate
from omegaconf import DictConfig
from pytorch_lightning import Trainer
from nemo.collections.common.losses import CrossEntropyLoss
from nemo.collections.nlp.metrics.classification_report import ClassificationReport
from nemo.collections.nlp.modules.common import TokenClassifier
from nemo.collections.nlp.parts.utils_funcs import tensor2list
from nemo.collections.tts.g2p.data.heteronym_classification import HeteronymClassificationDataset
from nemo.collections.tts.g2p.utils import get_heteronym_spans, get_wordid_to_phonemes, read_wordids
from nemo.core.classes.common import PretrainedModelInfo
from nemo.utils import logging
try:
from nemo.collections.nlp.models.nlp_model import NLPModel
NLP_AVAILABLE = True
except (ModuleNotFoundError, ImportError):
NLP_AVAILABLE = False
__all__ = ['HeteronymClassificationModel']
class HeteronymClassificationModel(NLPModel):
"""
This is a classification model that selects the best heteronym option out of possible dictionary entries.
Supports only heteronyms, no OOV.
"""
def __init__(self, cfg: DictConfig, trainer: Trainer = None):
self.max_seq_length = cfg.max_seq_length
self.wordids = self.register_artifact("wordids", cfg.wordids)
self.heteronym_dict, self.wordid_to_idx = read_wordids(self.wordids)
self.idx_to_wordid = {v: k for k, v in self.wordid_to_idx.items()}
self.supported_heteronyms = list(self.heteronym_dict.keys())
if cfg.class_labels.class_labels_file is None:
label_ids_file = "/tmp/label_ids.csv"
with open(label_ids_file, 'w') as f:
for idx in range(len(self.idx_to_wordid)):
f.write(self.idx_to_wordid[idx] + "\n")
self.register_artifact("class_labels.class_labels_file", label_ids_file)
super().__init__(cfg=cfg, trainer=trainer)
self.lang = self._cfg.get('lang', None)
num_classes = len(self.wordid_to_idx)
self.classifier = TokenClassifier(
hidden_size=self.hidden_size,
num_classes=num_classes,
num_layers=self._cfg.head.num_fc_layers,
activation=self._cfg.head.activation,
log_softmax=False,
dropout=self._cfg.head.fc_dropout,
use_transformer_init=self._cfg.head.use_transformer_init,
)
# Loss Functions
self.loss = CrossEntropyLoss(logits_ndim=3)
# setup to track metrics
self.classification_report = ClassificationReport(
num_classes=num_classes, mode='macro', dist_sync_on_step=True, label_ids=self.wordid_to_idx
)
# used for inference to convert predicted wordids to phonemes
self.wordid_to_phonemes_file = None
self.wordid_to_phonemes = None
def forward(self, input_ids, attention_mask, token_type_ids):
hidden_states = self.bert_model(
input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids
)
if isinstance(hidden_states, tuple):
hidden_states = hidden_states[0]
logits = self.classifier(hidden_states=hidden_states)
return logits
def make_step(self, batch):
logits = self.forward(
input_ids=batch["input_ids"],
attention_mask=batch["attention_mask"],
token_type_ids=torch.zeros_like(batch["input_ids"]),
)
if "targets" in batch:
loss = self.loss(logits=logits, labels=batch["targets"])
else:
# skip loss calculation for inference
loss = None
return loss, logits
# Training
def training_step(self, batch, batch_idx):
"""
Lightning calls this inside the training loop with the data from the training dataloader
passed in as `batch`.
"""
loss, logits = self.make_step(batch)
self.log('train_loss', loss)
return loss
def on_train_epoch_end(self):
return super().on_train_epoch_end()
# Validation and Testing
def validation_step(self, batch, batch_idx, split="val"):
"""
Lightning calls this inside the validation loop with the data from the validation dataloader
passed in as `batch`.
"""
val_loss, logits = self.make_step(batch)
subtokens_mask = batch["subtokens_mask"]
targets = batch["targets"]
targets = targets[targets != -100]
self.log(f"{split}_loss", val_loss)
tag_preds = torch.argmax(logits, axis=-1)[subtokens_mask > 0]
tp, fn, fp, _ = self.classification_report(tag_preds, targets)
loss = {f'{split}_loss': val_loss, 'tp': tp, 'fn': fn, 'fp': fp}
if split == 'val':
self.validation_step_outputs.append(loss)
elif split == 'test':
self.test_step_outputs.append(loss)
return loss
def on_validation_epoch_end(self):
split = "test" if self.trainer.testing else "val"
if split == 'val':
avg_loss = torch.stack([x[f'{split}_loss'] for x in self.validation_step_outputs]).mean()
elif split == 'test':
avg_loss = torch.stack([x[f'{split}_loss'] for x in self.test_step_outputs]).mean()
# calculate metrics and classification report
precision, recall, f1, report = self.classification_report.compute()
# remove examples with support=0
report = "\n".join(
[
x
for x in report.split("\n")
if not x.endswith(" 0") and "100.00 100.00 100.00" not in x
]
)
logging.info(f"{split}_report: {report}")
logging.info(f"{split}_f1: {f1:.2f}%")
self.log(f"{split}_loss", avg_loss, prog_bar=True)
self.log(f"{split}_precision", precision)
self.log(f"{split}_f1", f1)
self.log(f"{split}_recall", recall)
f1_macro = report[report.index("macro") :].split("\n")[0].replace("macro avg", "").strip().split()[-2]
f1_micro = report[report.index("micro") :].split("\n")[0].replace("micro avg", "").strip().split()[-2]
self.log(f"{split}_f1_macro", torch.Tensor([float(f1_macro)]))
self.log(f"{split}_f1_micro", torch.Tensor([float(f1_micro)]))
self.classification_report.reset()
if split == 'val':
self.validation_step_outputs.clear() # free memory
elif split == 'test':
self.test_step_outputs.clear()
def test_step(self, batch, batch_idx):
"""
Lightning calls this inside the test loop with the data from the test dataloader passed in as `batch`.
"""
return self.validation_step(batch, batch_idx, "test")
def on_test_epoch_end(self):
"""
Called at the end of test to aggregate outputs.
Args:
outputs: list of individual outputs of each test step.
"""
return self.on_validation_epoch_end()
def set_wordid_to_phonemes(self, wordid_to_phonemes_file: str):
if wordid_to_phonemes_file is None or not os.path.exists(wordid_to_phonemes_file):
logging.warning(f"{wordid_to_phonemes_file} not found, skip setting wordid_to_phonemes.")
else:
self.wordid_to_phonemes_file = wordid_to_phonemes_file
self.wordid_to_phonemes = get_wordid_to_phonemes(self.wordid_to_phonemes_file)
logging.info(f"Wordid to phonemes file is set to {wordid_to_phonemes_file}")
# Functions for inference
def _process_sentence(self, text: str, start_end: List[List[int]], predictions: List[str]):
text_with_heteronym_replaced = ""
last_idx = 0
for heteronym_idx, cur_start_end in enumerate(start_end):
cur_start, cur_end = cur_start_end
cur_pred = predictions[heteronym_idx]
if self.wordid_to_phonemes is None or cur_pred not in self.wordid_to_phonemes:
cur_pred = f"[{cur_pred}]"
else:
cur_pred = self.wordid_to_phonemes[cur_pred]
# to use mixed grapheme format as an input for a TTS model, we need to have vertical bars around phonemes
cur_pred = "".join([f"|{p}|" for p in cur_pred])
text_with_heteronym_replaced += text[last_idx:cur_start] + cur_pred
last_idx = cur_end
if last_idx < len(text):
text_with_heteronym_replaced += text[last_idx:]
return text_with_heteronym_replaced
@torch.no_grad()
def disambiguate(
self,
sentences: List[str],
batch_size: int = 4,
num_workers: int = 0,
wordid_to_phonemes_file: Optional[str] = None,
):
"""
Replaces heteronyms, supported by the model, with the phoneme form (if wordid_to_phonemes_file)
or with predicted wordids.
Args:
sentences: Sentences to use for inference
batch_size: batch size to use during inference.
Bigger will result in better throughput performance but would use more memory.
num_workers: number of workers for DataLoader
wordid_to_phonemes_file: (Optional) file with mapping between wordid predicted by the model to phonemes
Returns:
preds: model predictions
output: sentences with heteronym replaced with phonemes (if wordid_to_phonemes_file specified)
"""
if isinstance(sentences, str):
sentences = [sentences]
batch_size = min(batch_size, len(sentences))
start_end, heteronyms = get_heteronym_spans(sentences, self.heteronym_dict)
if len(sentences) != len(start_end) != len(heteronyms):
raise ValueError(
f"Number of sentences should match the lengths of provided start-end indices, {len(sentences)} != {len(start_end)}"
)
tmp_manifest = "/tmp/manifest.json"
with open(tmp_manifest, "w") as f:
for cur_sentence, cur_start_ends, cur_heteronyms in zip(sentences, start_end, heteronyms):
item = {"text_graphemes": cur_sentence, "start_end": cur_start_ends, "heteronym_span": cur_heteronyms}
f.write(json.dumps(item, ensure_ascii=False) + '\n')
all_preds = self._disambiguate(manifest=tmp_manifest, batch_size=batch_size, num_workers=num_workers,)
if wordid_to_phonemes_file is not None:
self.set_wordid_to_phonemes(wordid_to_phonemes_file)
output = []
for sent_idx, sent_start_end in enumerate(start_end):
output.append(
self._process_sentence(
text=sentences[sent_idx], start_end=sent_start_end, predictions=all_preds[sent_idx]
),
)
return all_preds, output
@torch.no_grad()
def _disambiguate(self, manifest: str, batch_size: int, num_workers: int = 0, grapheme_field="text_graphemes"):
# store predictions for all queries in a single list
all_preds = []
mode = self.training
try:
device = 'cuda' if torch.cuda.is_available() else 'cpu'
# Switch model to evaluation mode
self.eval()
self.to(device)
infer_datalayer = self._setup_infer_dataloader(
manifest, grapheme_field=grapheme_field, batch_size=batch_size, num_workers=num_workers
)
for batch in infer_datalayer:
subtokens_mask = batch["subtokens_mask"]
batch = {
"input_ids": batch["input_ids"].to(device),
"attention_mask": batch["attention_mask"].to(device),
}
_, logits = self.make_step(batch)
preds = tensor2list(torch.argmax(logits, axis=-1)[subtokens_mask > 0])
# preds are flatten for all the samples, we need to separate predictions per sample
preds_num = [len([p_ for p_ in p if p_ == 1]) for p in tensor2list(subtokens_mask)]
last_idx = 0
for num in preds_num:
preds_ = preds[last_idx : last_idx + num]
preds_ = [self.idx_to_wordid[p] for p in preds_]
all_preds.append(preds_)
last_idx += num
finally:
# set mode back to its original value
self.train(mode=mode)
return all_preds
@torch.no_grad()
def disambiguate_manifest(
self,
manifest,
output_manifest: str,
grapheme_field: str = "text_graphemes",
batch_size: int = 4,
num_workers: int = 0,
wordid_to_phonemes_file: Optional[str] = None,
):
all_preds = self._disambiguate(
manifest=manifest, batch_size=batch_size, num_workers=num_workers, grapheme_field=grapheme_field
)
self.set_wordid_to_phonemes(wordid_to_phonemes_file)
with open(manifest, "r", encoding="utf-8") as f_in, open(output_manifest, "w", encoding="utf-8") as f_preds:
for idx, line in enumerate(f_in):
line = json.loads(line)
start_end = line["start_end"]
if len(start_end) > 0 and isinstance(start_end[0], int):
start_end = [start_end]
text_with_heteronym_replaced = self._process_sentence(
text=line[grapheme_field], start_end=start_end, predictions=all_preds[idx]
)
line["pred_text"] = text_with_heteronym_replaced
line["pred_wordid"] = all_preds[idx]
f_preds.write(json.dumps(line, ensure_ascii=False) + '\n')
logging.info(f"Predictions save at {output_manifest}")
return all_preds
# Functions for processing data
def setup_training_data(self, train_data_config: Optional[DictConfig]):
if not train_data_config or train_data_config.dataset.manifest is None:
logging.info(
f"Dataloader config or file_path for the train is missing, so no data loader for train is created!"
)
self._train_dl = None
return
self._train_dl = self._setup_dataloader_from_config(cfg=train_data_config, data_split="train")
def setup_validation_data(self, val_data_config: Optional[DictConfig]):
if not val_data_config or val_data_config.dataset.manifest is None:
logging.info(
f"Dataloader config or file_path for the validation is missing, so no data loader for validation is created!"
)
self._validation_dl = None
return
self._validation_dl = self._setup_dataloader_from_config(cfg=val_data_config, data_split="val")
def setup_test_data(self, test_data_config: Optional[DictConfig]):
if not test_data_config or test_data_config.dataset.manifest is None:
logging.info(
f"Dataloader config or file_path for the test is missing, so no data loader for test is created!"
)
self._test_dl = None
return
self._test_dl = self._setup_dataloader_from_config(cfg=test_data_config, data_split="test")
def _setup_dataloader_from_config(self, cfg: DictConfig, data_split: str):
if "dataset" not in cfg or not isinstance(cfg.dataset, DictConfig):
raise ValueError(f"No dataset for {data_split}")
if "dataloader_params" not in cfg or not isinstance(cfg.dataloader_params, DictConfig):
raise ValueError(f"No dataloader_params for {data_split}")
dataset = instantiate(
cfg.dataset,
manifest=cfg.dataset.manifest,
grapheme_field=cfg.dataset.grapheme_field,
tokenizer=self.tokenizer,
wordid_to_idx=self.wordid_to_idx,
heteronym_dict=self.heteronym_dict,
max_seq_len=self.max_seq_length,
with_labels=True,
)
return torch.utils.data.DataLoader(dataset, collate_fn=dataset.collate_fn, **cfg.dataloader_params)
def _setup_infer_dataloader(
self, manifest: str, grapheme_field: str, batch_size: int, num_workers: int
) -> 'torch.utils.data.DataLoader':
dataset = HeteronymClassificationDataset(
manifest=manifest,
grapheme_field=grapheme_field,
tokenizer=self.tokenizer,
wordid_to_idx=self.wordid_to_idx,
heteronym_dict=self.heteronym_dict,
max_seq_len=self.tokenizer.tokenizer.model_max_length,
with_labels=False,
)
return torch.utils.data.DataLoader(
dataset,
collate_fn=dataset.collate_fn,
batch_size=batch_size,
shuffle=False,
num_workers=num_workers,
drop_last=False,
)
@classmethod
def list_available_models(cls) -> Optional[PretrainedModelInfo]:
"""
This method returns a list of pre-trained model which can be instantiated directly from NVIDIA's NGC cloud.
Returns:
List of available pre-trained models.
"""
return []
| NeMo-main | nemo/collections/tts/g2p/models/heteronym_classification.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pathlib
from collections import defaultdict
from typing import Dict, List, Optional, Union
from nemo.collections.common.tokenizers.text_to_speech.ipa_lexicon import get_grapheme_character_set
from nemo.collections.tts.g2p.models.base import BaseG2p
from nemo.collections.tts.g2p.utils import set_grapheme_case
from nemo.utils import logging
class ChineseG2p(BaseG2p):
def __init__(
self,
phoneme_dict: Union[str, pathlib.Path, Dict[str, List[str]]],
phoneme_prefix: str = "#",
phoneme_case: str = "upper",
tone_prefix: str = "#",
ascii_letter_prefix: str = "",
ascii_letter_case: str = "lower",
word_tokenize_func=None,
apply_to_oov_word=None,
mapping_file: Optional[str] = None,
word_segmenter: Optional[str] = None,
):
"""
Chinese G2P module. This module first converts Chinese characters into pinyin sequences using pypinyin, then
pinyin sequences would be further converted into phoneme sequences by looking them up in the `phoneme_dict`.
This G2P module also works with Chinese/English bilingual sentences where English words would be converted
into letters. It is advised to attach prefix symbols for Chinese phonemes and tones to discriminate them
from English letters to avoid any potential symbol set overlaps.
Args:
phoneme_dict (str, Path, Dict): Path to pinyin_dict_nv_22.10.txt dict file or a dict object.
phoneme_prefix (str): Prepend a special symbol to any phonemes in order to distinguish phonemes from
graphemes because there may be overlaps between the two sets. Phoneme dictionary typically applies
uppercase initials and finals. It is suggested to choose a prefix that
is not used or preserved somewhere else. Default to "#".
phoneme_case (str): Specify the case chosen from `"lower"`, `"upper"`, or `"mixed"`, and process the
cases of Chinese phonemes. Default to `"upper"`.
tone_prefix (str): Prepend a special symbol to any tone digits. Default to "#".
ascii_letter_prefix (str): Prepend a special symbol to any ASCII letters. Default to "".
ascii_letter_case (str): Specify the case chosen from `"lower"`, `"upper"`, or `"mixed"`, and process the
cases of non-Chinese words. Default to `"lower"`.
word_tokenize_func: Function for tokenizing text to words.
It has to return List[Tuple[Union[str, List[str]], bool]] where every tuple denotes word representation
and flag whether to leave unchanged or not.
It is expected that unchangeable word representation will be represented as List[str], other cases are
represented as str.
It is useful to mark word as unchangeable which is already in phoneme representation.
apply_to_oov_word: Function that will be applied to out of phoneme_dict word.
word_segmenter: method that will be applied to segment utterances into words for better polyphone disambiguation.
"""
assert phoneme_dict is not None, "Please set the phoneme_dict path."
assert word_segmenter in [
None,
'jieba',
], f"{word_segmenter} is not supported now. Please choose correct word_segmenter."
if phoneme_prefix is None:
phoneme_prefix = ""
if tone_prefix is None:
tone_prefix = ""
if ascii_letter_prefix is None:
ascii_letter_prefix = ""
# phonemes
phoneme_dict = (
self._parse_as_pinyin_dict(phoneme_dict, phoneme_prefix, phoneme_case)
if isinstance(phoneme_dict, str) or isinstance(phoneme_dict, pathlib.Path)
else phoneme_dict
)
self.phoneme_list = sorted({pron for prons in phoneme_dict.values() for pron in prons})
# tones
self.tone_dict = {str(x): tone_prefix + str(x) for x in range(1, 6)}
self.tone_list = sorted(self.tone_dict.values())
# ascii letters
self.ascii_letter_dict = {
x: ascii_letter_prefix + x for x in get_grapheme_character_set(locale="en-US", case=ascii_letter_case)
}
self.ascii_letter_list = sorted(self.ascii_letter_dict)
self.ascii_letter_case = ascii_letter_case
if apply_to_oov_word is None:
logging.warning(
"apply_to_oov_word=None, This means that some of words will remain unchanged "
"if they are not handled by any of the rules in self.parse_one_word(). "
"This may be intended if phonemes and chars are both valid inputs, otherwise, "
"you may see unexpected deletions in your input."
)
super().__init__(
phoneme_dict=phoneme_dict,
word_tokenize_func=word_tokenize_func,
apply_to_oov_word=apply_to_oov_word,
mapping_file=mapping_file,
)
if word_segmenter == "jieba":
try:
import jieba
except ImportError as e:
logging.error(e)
# Cut sentences into words to improve polyphone disambiguation
self.word_segmenter = jieba.cut
else:
self.word_segmenter = lambda x: [x]
try:
from pypinyin import Style, lazy_pinyin
from pypinyin_dict.pinyin_data import cc_cedict
except ImportError as e:
logging.error(e)
# replace pypinyin default dict with cc_cedict.txt for polyphone disambiguation
cc_cedict.load()
self._lazy_pinyin = lazy_pinyin
self._Style = Style
@staticmethod
def _parse_as_pinyin_dict(
phoneme_dict_path: Union[str, pathlib.Path], phoneme_prefix: str, phoneme_case: str
) -> Dict[str, List[str]]:
"""Loads pinyin dict file, and generates a set of all valid symbols."""
g2p_dict = defaultdict(list)
with open(phoneme_dict_path, 'r') as file:
for line in file:
# skip empty lines and comment lines starting with `;;;`.
if line.startswith(";;;") or len(line.strip()) == 0:
continue
parts = line.split('\t')
# Convert the cases of Chinese syllables loaded from the dictionary to lowercase to match the lowercase
# Chinese syllable outputs generated by the function `pypinyin.lazy_pinyin`. Note that the function
# `pypinyin.lazy_pinyin` preserves the cases of ASCII letters.
syllable = parts[0].lower()
pronunciation = set_grapheme_case(parts[1], case=phoneme_case).split()
# add a prefix to distinguish phoneme symbols from non-phoneme symbols.
pronunciation_with_prefix = [phoneme_prefix + pron for pron in pronunciation]
g2p_dict[syllable] = pronunciation_with_prefix
return g2p_dict
def __call__(self, text: str) -> List[str]:
"""
This forward pass function translates Chinese characters into pinyin sequences and then converts the pinyin
into phonemes. It is primarily designed to process texts containing with Chinese characters, but we have
extended its support to handle texts that include both Chinese and English. This extension was mainly
necessitated by the limited availability of bilingual datasets. The `errors` argument used in the
`pypinyin.lazy_pinyin` function below is used to process non-Chinese words, where each English word is split
into letters.
For example, The text "我今天去了Apple Store, 买了一个iPhone。" would be converted as a list,
`['wo3', 'jin1', 'tian1', 'qu4', 'le5', 'A', 'p', 'p', 'l', 'e', ' ', 'S', 't', 'o', 'r', 'e', ',', ' ', 'mai3',
'le5', 'yi2', 'ge4', 'i', 'P', 'h', 'o', 'n', 'e', '。']`
"""
text = set_grapheme_case(text, case=self.ascii_letter_case)
pinyin_seq = []
words_list = self.word_segmenter(text)
# TODO @xueyang: add a g2p process for non-pinyin words by customizing a function for `errors` argument. For
# example, add a dict look up for English words.
for word in words_list:
pinyin_seq += self._lazy_pinyin(
word,
style=self._Style.TONE3,
neutral_tone_with_five=True,
errors=lambda en_words: [letter for letter in en_words],
)
phoneme_seq = []
for pinyin in pinyin_seq:
# only pinyin has tones while non-pinyin doesn't.
tone_hyp = pinyin[-1]
if tone_hyp in self.tone_dict:
syllable = pinyin[:-1]
assert syllable in self.phoneme_dict, f"Syllable <{syllable}> does not exist in the dictionary."
phoneme_seq += self.phoneme_dict[syllable]
phoneme_seq.append(self.tone_dict[tone_hyp])
# All pinyin would end up with a number in 1-5, which represents tones of the pinyin.
# For symbols which are not pinyin, such as English letters and Chinese punctuations, we directly
# use them as inputs.
elif tone_hyp in self.ascii_letter_dict:
phoneme_seq.append(self.ascii_letter_dict[tone_hyp])
else:
phoneme_seq.append(pinyin)
return phoneme_seq
| NeMo-main | nemo/collections/tts/g2p/models/zh_cn_pinyin.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pathlib
import random
import re
from collections import defaultdict
from typing import Callable, Dict, List, Optional, Set, Tuple, Union
from nemo.collections.common.tokenizers.text_to_speech.ipa_lexicon import validate_locale
from nemo.collections.common.tokenizers.text_to_speech.tokenizer_utils import (
LATIN_CHARS_ALL,
any_locale_word_tokenize,
english_word_tokenize,
normalize_unicode_text,
)
from nemo.collections.tts.g2p.models.base import BaseG2p
from nemo.collections.tts.g2p.utils import GRAPHEME_CASE_MIXED, GRAPHEME_CASE_UPPER, set_grapheme_case
from nemo.utils import logging
from nemo.utils.decorators import experimental
@experimental
class IpaG2p(BaseG2p):
# fmt: off
STRESS_SYMBOLS = ["ˈ", "ˌ"]
# Regex for roman characters, accented characters, and locale-agnostic numbers/digits
CHAR_REGEX = re.compile(fr"[{LATIN_CHARS_ALL}\d]")
PUNCT_REGEX = re.compile(fr"[^{LATIN_CHARS_ALL}\d]")
# fmt: on
def __init__(
self,
phoneme_dict: Union[str, pathlib.Path, Dict[str, List[List[str]]]],
locale: str = "en-US",
apply_to_oov_word: Optional[Callable[[str], str]] = None,
ignore_ambiguous_words: bool = True,
heteronyms: Optional[Union[str, pathlib.Path, List[str]]] = None,
use_chars: bool = False,
phoneme_probability: Optional[float] = None,
use_stresses: Optional[bool] = True,
grapheme_case: Optional[str] = GRAPHEME_CASE_UPPER,
grapheme_prefix: Optional[str] = "",
mapping_file: Optional[str] = None,
) -> None:
"""
Generic IPA G2P module. This module converts words from graphemes to International Phonetic Alphabet
representations. Optionally, it can ignore heteronyms, ambiguous words, or words marked as unchangeable
by `word_tokenize_func` (see code for details). Ignored words are left unchanged or passed through
`apply_to_oov_word` for handling.
Args:
phoneme_dict (str, Path, or Dict): Path to file in CMUdict format or an IPA dict object with CMUdict-like
entries. For example,
a dictionary file: scripts/tts_dataset_files/ipa_cmudict-0.7b_nv22.06.txt;
a dictionary object: {..., "Wire": [["ˈ", "w", "a", "ɪ", "ɚ"], ["ˈ", "w", "a", "ɪ", "ɹ"]], ...}.
locale (str): Locale used to determine a locale-specific tokenization logic. Currently, it supports "en-US",
"de-DE", and "es-ES". Defaults to "en-US". Specify None if implementing custom logic for a new locale.
apply_to_oov_word (Callable): Function that deals with the out-of-vocabulary (OOV) words that do not exist
in the `phoneme_dict`.
ignore_ambiguous_words (bool): Whether to handle word via phoneme_dict with ambiguous phoneme sequences.
Defaults to True.
heteronyms (str, Path, List[str]): Path to file that includes heteronyms (one word entry per line), or a
list of words.
use_chars (bool): Whether to include chars/graphemes in the token list. It is True if `phoneme_probability`
is not None or if `apply_to_oov_word` function ever returns graphemes.
phoneme_probability (Optional[float]): The probability (0.0 <= ε <= 1.0) that is used to balance the action
that a word in a sentence is whether transliterated into a sequence of phonemes, or kept as a sequence
of graphemes. If a random number for a word is greater than ε, then the word is kept as graphemes;
otherwise, the word is transliterated as phonemes. Defaults to None which is equivalent to setting it
to 1.0, meaning always transliterating the word into phonemes. Note that this code path is only run if
the word can be transliterated into phonemes, otherwise, if a word does not have an entry in the g2p
dict, it will be kept as graphemes. If a word has multiple pronunciations as shown in the g2p dict and
`ignore_ambiguous_words` is True, it will be kept as graphemes as well.
use_stresses (Optional[bool]): Whether to include the stress symbols (ˈ and ˌ).
grapheme_case (Optional[str]): Trigger converting all graphemes to uppercase, lowercase, or keeping them as
original mix-cases. You may want to use this feature to distinguish the grapheme set from the phoneme
set if there is an overlap in between. Defaults to `upper` because phoneme set only uses lowercase
symbols. You could explicitly prepend `grapheme_prefix` to distinguish them.
grapheme_prefix (Optional[str]): Prepend a special symbol to any graphemes in order to distinguish graphemes
from phonemes because there may be overlaps between the two set. It is suggested to choose a prefix that
is not used or preserved somewhere else. "#" could be a good candidate. Default to "".
TODO @borisfom: add docstring for newly added `mapping_file` argument.
"""
self.use_stresses = use_stresses
self.grapheme_case = grapheme_case
self.grapheme_prefix = grapheme_prefix
self.phoneme_probability = phoneme_probability
self.locale = locale
self._rng = random.Random()
if locale is not None:
validate_locale(locale)
if not use_chars and self.phoneme_probability is not None:
self.use_chars = True
logging.warning(
"phoneme_probability was not None, characters will be enabled even though "
"use_chars was set to False."
)
else:
self.use_chars = use_chars
phoneme_dict_obj = self._parse_phoneme_dict(phoneme_dict)
# verify if phoneme dict obj is empty
if phoneme_dict_obj:
_phoneme_dict, self.symbols = self._normalize_dict(phoneme_dict_obj)
else:
raise ValueError(f"{phoneme_dict} contains no entries!")
if apply_to_oov_word is None:
logging.warning(
"apply_to_oov_word=None, This means that some of words will remain unchanged "
"if they are not handled by any of the rules in self.parse_one_word(). "
"This may be intended if phonemes and chars are both valid inputs, otherwise, "
"you may see unexpected deletions in your input."
)
# word_tokenize_func returns a List[Tuple[List[str], bool]] where every tuple denotes
# a word representation (a list tokens) and a flag indicating whether to process the word or
# leave it unchanged.
if locale == "en-US":
word_tokenize_func = english_word_tokenize
else:
word_tokenize_func = any_locale_word_tokenize
super().__init__(
phoneme_dict=_phoneme_dict,
word_tokenize_func=word_tokenize_func,
apply_to_oov_word=apply_to_oov_word,
mapping_file=mapping_file,
)
self.ignore_ambiguous_words = ignore_ambiguous_words
if isinstance(heteronyms, str) or isinstance(heteronyms, pathlib.Path):
self.heteronyms = set(self._parse_file_by_lines(heteronyms))
elif isinstance(heteronyms, list) and all(isinstance(het, str) for het in heteronyms):
self.heteronyms = set(heteronyms)
else:
self.heteronyms = None
if self.heteronyms:
self.heteronyms = {set_grapheme_case(het, case=self.grapheme_case) for het in self.heteronyms}
@staticmethod
def _parse_phoneme_dict(
phoneme_dict: Union[str, pathlib.Path, Dict[str, List[List[str]]]]
) -> Dict[str, List[List[str]]]:
"""
parse an input IPA dictionary and save it as a dict object.
Args:
phoneme_dict (Union[str, pathlib.Path, dict]): Path to file in CMUdict format or an IPA dict object with
CMUdict-like entries. For example,
a dictionary file: scripts/tts_dataset_files/ipa_cmudict-0.7b_nv22.06.txt;
a dictionary object: {..., "Wire": [["ˈ", "w", "a", "ɪ", "ɚ"], ["ˈ", "w", "a", "ɪ", "ɹ"]], ...}.
Returns: a dict object (Dict[str, List[List[str]]]).
"""
if isinstance(phoneme_dict, str) or isinstance(phoneme_dict, pathlib.Path):
# load the dictionary file where there may exist a digit suffix after a word, e.g. "Word(2)", which
# represents the pronunciation variant of that word.
phoneme_dict_obj = defaultdict(list)
_alt_re = re.compile(r"\([0-9]+\)")
with open(phoneme_dict, "r", encoding="utf-8") as fdict:
for line in fdict:
# skip the empty lines
if len(line) == 0:
continue
# Note that latin character pattern should be consistent with
# nemo.collections.tts.g2p.data.data_utils.LATIN_CHARS_ALL. It is advised to extend its character
# coverage if adding the support of new languages.
# TODO @xueyang: unify hardcoded range of characters with LATIN_CHARS_ALL to avoid duplicates.
line = normalize_unicode_text(line)
if (
'A' <= line[0] <= 'Z'
or 'a' <= line[0] <= 'z'
or 'À' <= line[0] <= 'Ö'
or 'Ø' <= line[0] <= 'ö'
or 'ø' <= line[0] <= 'ÿ'
or line[0] == "'"
):
parts = line.strip().split(maxsplit=1)
word = re.sub(_alt_re, "", parts[0])
prons = re.sub(r"\s+", "", parts[1])
phoneme_dict_obj[word].append(list(prons))
else:
# Load phoneme_dict as dictionary object
logging.info("Loading phoneme_dict as a Dict object, and validating its entry format.")
phoneme_dict_obj = {}
for word, prons in phoneme_dict.items():
# validate dict entry format
assert isinstance(
prons, list
), f"Pronunciation type <{type(prons)}> is not supported. Please convert to <list>."
# normalize word with NFC form
word = normalize_unicode_text(word)
# normalize phonemes with NFC form
prons = [[normalize_unicode_text(p) for p in pron] for pron in prons]
phoneme_dict_obj.update({word: prons})
return phoneme_dict_obj
def replace_dict(self, phoneme_dict: Union[str, pathlib.Path, Dict[str, List[List[str]]]]):
"""
Replace model's phoneme dictionary with a custom one
"""
self.phoneme_dict = self._parse_phoneme_dict(phoneme_dict)
@staticmethod
def _parse_file_by_lines(p: Union[str, pathlib.Path]) -> List[str]:
with open(p, 'r') as f:
return [line.rstrip() for line in f.readlines()]
def _prepend_prefix_for_one_word(self, word: str) -> List[str]:
return [f"{self.grapheme_prefix}{character}" for character in word]
def _normalize_dict(self, phoneme_dict_obj: Dict[str, List[List[str]]]) -> Tuple[Dict[str, List[List[str]]], Set]:
"""
Parse a python dict object according to the decision on word cases and removal of lexical stress markers.
Args:
phoneme_dict_obj (Dict[str, List[List[str]]]): a dictionary object.
e.g. {..., "Wire": [["ˈ", "w", "a", "ɪ", "ɚ"], ["ˈ", "w", "a", "ɪ", "ɹ"]], ...}
Returns:
g2p_dict (dict): processed dict.
symbols (set): a IPA phoneme set, or its union with grapheme set.
"""
g2p_dict = defaultdict(list)
symbols = set()
for word, prons in phoneme_dict_obj.items():
# process word
# update word cases.
word_new = set_grapheme_case(word, case=self.grapheme_case)
# add grapheme symbols if `use_chars=True`.
if self.use_chars:
# remove punctuations within a word. Punctuations can exist at the start, middle, and end of a word.
word_no_punct = self.PUNCT_REGEX.sub('', word_new)
# add prefix to distinguish graphemes from phonemes.
symbols.update(self._prepend_prefix_for_one_word(word_no_punct))
# process IPA pronunciations
# update phoneme symbols by removing lexical stress markers if `use_stresses=False`.
prons_new = list()
if not self.use_stresses:
for pron in prons:
prons_new.append([symbol for symbol in pron if symbol not in self.STRESS_SYMBOLS])
else:
prons_new = prons
# update symbols
for pron in prons_new:
symbols.update(pron) # This will insert each char individually
# update dict entry
g2p_dict[word_new] = prons_new
# duplicate word entries if grapheme_case is mixed. Even though grapheme_case is set to mixed, the words in
# the original input text and the g2p_dict remain unchanged, so they could still be either lowercase,
# uppercase, or mixed-case as defined in `set_grapheme_case` func. When mapping an uppercase word, e.g.
# "HELLO", into phonemes using the g2p_dict with {"Hello": [["həˈɫoʊ"]]}, "HELLO" can't find its
# pronunciations in the g2p_dict due to the case-mismatch of the words. Augmenting the g2p_dict with its
# uppercase word entry, e.g. {"Hello": [["həˈɫoʊ"]], "HELLO": [["həˈɫoʊ"]]} would provide possibility to
# find "HELLO"'s pronunciations rather than directly considering it as an OOV.
if self.grapheme_case == GRAPHEME_CASE_MIXED and not word_new.isupper():
g2p_dict[word_new.upper()] = prons_new
return g2p_dict, symbols
def replace_symbols(self, symbols, keep_alternate=True):
"""Replaces the vocabulary of symbols with the one given.
Also filters out any entries with illegal graphemes or phonemes according to the new vocab.
Args:
symbols (List, Set): User-provided set of valid symbols, both graphemes and phonemes
keep_alternate (bool): Whether to keep the other pronunciation(s) of a word if not all contain
illegal phonemes (and the word doesn't contain illegal graphemes).
Warning: this may change a word from being ambiguous to having only one valid pronunciation.
Defaults to True.
"""
new_symbols = set(symbols)
# Keep track of what will need to be deleted or (if keep_alternate=True) replaced
deletion_words = []
replacement_dict = {}
for word, prons in self.phoneme_dict.items():
# Check for illegal grapheme in the word itself
word_graphemes = set(self._prepend_prefix_for_one_word(set_grapheme_case(word, self.grapheme_case)))
word_diff = word_graphemes - new_symbols
if word_diff:
deletion_words.append(word)
continue
# Check for illegal phonemes in the pronunciation(s)
legal_prons = []
for pron in prons:
pron_diff = set(pron) - new_symbols
if not pron_diff:
legal_prons.append(pron)
# Check if at least one pronunciation was illegal
if len(legal_prons) != len(prons):
if not keep_alternate: # Remove the word and entry fully
deletion_words.append(word)
else: # Need to check if all prons were illegal
if not legal_prons:
deletion_words.append(word)
else:
replacement_dict[word] = legal_prons
# Update pronunciation dictionary as needed
for del_word in deletion_words:
del self.phoneme_dict[del_word]
if keep_alternate:
self.phoneme_dict.update(replacement_dict)
self.symbols = new_symbols
def is_unique_in_phoneme_dict(self, word: str) -> bool:
return len(self.phoneme_dict[word]) == 1
def parse_one_word(self, word: str) -> Tuple[List[str], bool]:
"""Returns parsed `word` and `status` (bool: False if word wasn't handled, True otherwise).
"""
word = set_grapheme_case(word, case=self.grapheme_case)
# Punctuation (assumes other chars have been stripped)
if self.CHAR_REGEX.search(word) is None:
return list(word), True
# Keep graphemes of a word with a probability.
if self.phoneme_probability is not None and self._rng.random() > self.phoneme_probability:
return self._prepend_prefix_for_one_word(word), True
# Heteronyms
if self.heteronyms and word in self.heteronyms:
return self._prepend_prefix_for_one_word(word), True
# special cases for en-US when transliterating a word into a list of phonemes.
# TODO @xueyang: add special cases for any other languages upon new findings.
if self.locale == "en-US":
# `'s` suffix (with apostrophe) - not in phoneme dict
if len(word) > 2 and (word.endswith("'s") or word.endswith("'S")):
word_found = None
if (word not in self.phoneme_dict) and (word.upper() not in self.phoneme_dict):
if word[:-2] in self.phoneme_dict:
word_found = word[:-2]
elif word[:-2].upper() in self.phoneme_dict:
word_found = word[:-2].upper()
if word_found is not None and (
not self.ignore_ambiguous_words or self.is_unique_in_phoneme_dict(word_found)
):
if word_found[-1] in ['T', 't']:
# for example, "airport's" doesn't exist in the dict while "airport" exists. So append a phoneme
# /s/ at the end of "airport"'s first pronunciation.
return self.phoneme_dict[word_found][0] + ["s"], True
elif word_found[-1] in ['S', 's']:
# for example, "jones's" doesn't exist in the dict while "jones" exists. So append two phonemes,
# /ɪ/ and /z/ at the end of "jones"'s first pronunciation.
return self.phoneme_dict[word_found][0] + ["ɪ", "z"], True
else:
return self.phoneme_dict[word_found][0] + ["z"], True
# `s` suffix (without apostrophe) - not in phoneme dict
if len(word) > 1 and (word.endswith("s") or word.endswith("S")):
word_found = None
if (word not in self.phoneme_dict) and (word.upper() not in self.phoneme_dict):
if word[:-1] in self.phoneme_dict:
word_found = word[:-1]
elif word[:-1].upper() in self.phoneme_dict:
word_found = word[:-1].upper()
if word_found is not None and (
not self.ignore_ambiguous_words or self.is_unique_in_phoneme_dict(word_found)
):
if word_found[-1] in ['T', 't']:
# for example, "airports" doesn't exist in the dict while "airport" exists. So append a phoneme
# /s/ at the end of "airport"'s first pronunciation.
return self.phoneme_dict[word_found][0] + ["s"], True
else:
return self.phoneme_dict[word_found][0] + ["z"], True
# For the words that have a single pronunciation, directly look it up in the phoneme_dict; for the
# words that have multiple pronunciation variants, if we don't want to ignore them, then directly choose their
# first pronunciation variant as the target phonemes.
# TODO @xueyang: this is a temporary solution, but it is not optimal if always choosing the first pronunciation
# variant as the target if a word has multiple pronunciation variants. We need explore better approach to
# select its optimal pronunciation variant aligning with its reference audio.
if word in self.phoneme_dict and (not self.ignore_ambiguous_words or self.is_unique_in_phoneme_dict(word)):
return self.phoneme_dict[word][0], True
if (
self.grapheme_case == GRAPHEME_CASE_MIXED
and word not in self.phoneme_dict
and word.upper() in self.phoneme_dict
):
word = word.upper()
if not self.ignore_ambiguous_words or self.is_unique_in_phoneme_dict(word):
return self.phoneme_dict[word][0], True
if self.apply_to_oov_word is not None:
return self.apply_to_oov_word(word), True
else:
return self._prepend_prefix_for_one_word(word), False
def __call__(self, text: str) -> List[str]:
text = normalize_unicode_text(text)
if self.heteronym_model is not None:
try:
text = self.heteronym_model.disambiguate(sentences=[text])[1][0]
except Exception as e:
logging.warning(f"Heteronym model failed {e}, skipping")
words_list_of_tuple = self.word_tokenize_func(text)
prons = []
for words, without_changes in words_list_of_tuple:
if without_changes:
# for example: (["NVIDIA", "unchanged"], True). "NVIDIA" is considered as a single token.
prons.extend([f"{self.grapheme_prefix}{word}" for word in words])
else:
assert (
len(words) == 1
), f"{words} should only have a single item when `without_changes` is False, but found {len(words)}."
word = words[0]
pron, is_handled = self.parse_one_word(word)
# If `is_handled` is False, then the only possible case is that the word is an OOV. The OOV may have a
# hyphen so that it doesn't show up in the g2p dictionary. We need split it into sub-words by a hyphen,
# and parse the sub-words again just in case any sub-word exists in the g2p dictionary.
if not is_handled:
subwords_by_hyphen = word.split("-")
if len(subwords_by_hyphen) > 1:
pron = [] # reset the previous pron
for sub_word in subwords_by_hyphen:
p, _ = self.parse_one_word(sub_word)
pron.extend(p)
pron.append("-")
pron.pop() # remove the redundant hyphen that is previously appended at the end of the word.
prons.extend(pron)
return prons
| NeMo-main | nemo/collections/tts/g2p/models/i18n_ipa.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABC, abstractmethod
from typing import Optional
from nemo.utils import logging
class BaseG2p(ABC):
def __init__(
self,
phoneme_dict=None,
word_tokenize_func=lambda x: x,
apply_to_oov_word=None,
mapping_file: Optional[str] = None,
):
"""Abstract class for creating an arbitrary module to convert grapheme words
to phoneme sequences, leave unchanged, or use apply_to_oov_word.
Args:
phoneme_dict: Arbitrary representation of dictionary (phoneme -> grapheme) for known words.
word_tokenize_func: Function for tokenizing text to words.
apply_to_oov_word: Function that will be applied to out of phoneme_dict word.
"""
self.phoneme_dict = phoneme_dict
self.word_tokenize_func = word_tokenize_func
self.apply_to_oov_word = apply_to_oov_word
self.mapping_file = mapping_file
self.heteronym_model = None # heteronym classification model
@abstractmethod
def __call__(self, text: str) -> str:
pass
# TODO @xueyang: replace `wordid_to_phonemes_file` default variable with a global variable defined in util file.
def setup_heteronym_model(
self,
heteronym_model,
wordid_to_phonemes_file: str = "../../../scripts/tts_dataset_files/wordid_to_ipa-0.7b_nv22.10.tsv",
):
"""
Add heteronym classification model to TTS preprocessing pipeline to disambiguate heteronyms.
Heteronym model has a list of supported heteronyms but only heteronyms specified in
wordid_to_phonemes_file will be converted to phoneme form during heteronym model inference;
the rest will be left in grapheme form.
Args:
heteronym_model: Initialized HeteronymClassificationModel
wordid_to_phonemes_file: Path to a file with mapping from wordid predicted by heteronym model to phonemes
"""
try:
from nemo.collections.tts.g2p.models.heteronym_classification import HeteronymClassificationModel
self.heteronym_model = heteronym_model
self.heteronym_model.set_wordid_to_phonemes(wordid_to_phonemes_file)
except ImportError as e:
logging.warning("Heteronym model setup will be skipped")
logging.error(e)
| NeMo-main | nemo/collections/tts/g2p/models/base.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pathlib
import random
import re
import time
from typing import Optional
import nltk
import torch
from nemo.collections.common.tokenizers.text_to_speech.tokenizer_utils import english_word_tokenize
from nemo.collections.tts.g2p.models.base import BaseG2p
from nemo.utils import logging
from nemo.utils.get_rank import is_global_rank_zero
class EnglishG2p(BaseG2p):
def __init__(
self,
phoneme_dict=None,
word_tokenize_func=english_word_tokenize,
apply_to_oov_word=None,
ignore_ambiguous_words=True,
heteronyms=None,
encoding='latin-1',
phoneme_probability: Optional[float] = None,
mapping_file: Optional[str] = None,
):
"""English G2P module. This module converts words from grapheme to phoneme representation using phoneme_dict in CMU dict format.
Optionally, it can ignore words which are heteronyms, ambiguous or marked as unchangeable by word_tokenize_func (see code for details).
Ignored words are left unchanged or passed through apply_to_oov_word for handling.
Args:
phoneme_dict (str, Path, Dict): Path to file in CMUdict format or dictionary of CMUdict-like entries.
word_tokenize_func: Function for tokenizing text to words.
It has to return List[Tuple[Union[str, List[str]], bool]] where every tuple denotes word representation and flag whether to leave unchanged or not.
It is expected that unchangeable word representation will be represented as List[str], other cases are represented as str.
It is useful to mark word as unchangeable which is already in phoneme representation.
apply_to_oov_word: Function that will be applied to out of phoneme_dict word.
ignore_ambiguous_words: Whether to not handle word via phoneme_dict with ambiguous phoneme sequences. Defaults to True.
heteronyms (str, Path, List): Path to file with heteronyms (every line is new word) or list of words.
encoding: Encoding type.
phoneme_probability (Optional[float]): The probability (0.<var<1.) that each word is phonemized. Defaults to None which is the same as 1.
Note that this code path is only run if the word can be phonemized. For example: If the word does not have an entry in the g2p dict, it will be returned
as characters. If the word has multiple entries and ignore_ambiguous_words is True, it will be returned as characters.
"""
phoneme_dict = (
self._parse_as_cmu_dict(phoneme_dict, encoding)
if isinstance(phoneme_dict, str) or isinstance(phoneme_dict, pathlib.Path) or phoneme_dict is None
else phoneme_dict
)
if apply_to_oov_word is None:
logging.warning(
"apply_to_oov_word=None, This means that some of words will remain unchanged "
"if they are not handled by any of the rules in self.parse_one_word(). "
"This may be intended if phonemes and chars are both valid inputs, otherwise, "
"you may see unexpected deletions in your input."
)
super().__init__(
phoneme_dict=phoneme_dict,
word_tokenize_func=word_tokenize_func,
apply_to_oov_word=apply_to_oov_word,
mapping_file=mapping_file,
)
self.ignore_ambiguous_words = ignore_ambiguous_words
self.heteronyms = (
set(self._parse_file_by_lines(heteronyms, encoding))
if isinstance(heteronyms, str) or isinstance(heteronyms, pathlib.Path)
else heteronyms
)
self.phoneme_probability = phoneme_probability
self._rng = random.Random()
@staticmethod
def _parse_as_cmu_dict(phoneme_dict_path=None, encoding='latin-1'):
if phoneme_dict_path is None:
# this part of code downloads file, but it is not rank zero guarded
# Try to check if torch distributed is available, if not get global rank zero to download corpora and make
# all other ranks sleep for a minute
if torch.distributed.is_available() and torch.distributed.is_initialized():
group = torch.distributed.group.WORLD
if is_global_rank_zero():
try:
nltk.data.find('corpora/cmudict.zip')
except LookupError:
nltk.download('cmudict', quiet=True)
torch.distributed.barrier(group=group)
elif is_global_rank_zero():
logging.error(
f"Torch distributed needs to be initialized before you initialized EnglishG2p. This class is prone to "
"data access race conditions. Now downloading corpora from global rank 0. If other ranks pass this "
"before rank 0, errors might result."
)
try:
nltk.data.find('corpora/cmudict.zip')
except LookupError:
nltk.download('cmudict', quiet=True)
else:
logging.error(
f"Torch distributed needs to be initialized before you initialized EnglishG2p. This class is prone to "
"data access race conditions. This process is not rank 0, and now going to sleep for 1 min. If this "
"rank wakes from sleep prior to rank 0 finishing downloading, errors might result."
)
time.sleep(60)
logging.warning(
f"English g2p_dict will be used from nltk.corpus.cmudict.dict(), because phoneme_dict_path=None. "
"Note that nltk.corpus.cmudict.dict() has old version (0.6) of CMUDict. "
"You can use the latest official version of CMUDict (0.7b) with additional changes from NVIDIA directly from NeMo "
"using the path scripts/tts_dataset_files/cmudict-0.7b_nv22.10."
)
return nltk.corpus.cmudict.dict()
_alt_re = re.compile(r'\([0-9]+\)')
g2p_dict = {}
with open(phoneme_dict_path, encoding=encoding) as file:
for line in file:
if len(line) and ('A' <= line[0] <= 'Z' or line[0] == "'"):
parts = line.split(' ')
word = re.sub(_alt_re, '', parts[0])
word = word.lower()
pronunciation = parts[1].strip().split(" ")
if word in g2p_dict:
g2p_dict[word].append(pronunciation)
else:
g2p_dict[word] = [pronunciation]
return g2p_dict
@staticmethod
def _parse_file_by_lines(p, encoding):
with open(p, encoding=encoding) as f:
return [l.rstrip() for l in f.readlines()]
def is_unique_in_phoneme_dict(self, word):
return len(self.phoneme_dict[word]) == 1
def parse_one_word(self, word: str):
"""
Returns parsed `word` and `status` as bool.
`status` will be `False` if word wasn't handled, `True` otherwise.
"""
if self.phoneme_probability is not None and self._rng.random() > self.phoneme_probability:
return word, True
# punctuation or whitespace.
if re.search(r"[a-zA-ZÀ-ÿ\d]", word) is None:
return list(word), True
# heteronyms
if self.heteronyms is not None and word in self.heteronyms:
return word, True
# `'s` suffix
if (
len(word) > 2
and word.endswith("'s")
and (word not in self.phoneme_dict)
and (word[:-2] in self.phoneme_dict)
and (not self.ignore_ambiguous_words or self.is_unique_in_phoneme_dict(word[:-2]))
):
return self.phoneme_dict[word[:-2]][0] + ["Z"], True
# `s` suffix
if (
len(word) > 1
and word.endswith("s")
and (word not in self.phoneme_dict)
and (word[:-1] in self.phoneme_dict)
and (not self.ignore_ambiguous_words or self.is_unique_in_phoneme_dict(word[:-1]))
):
return self.phoneme_dict[word[:-1]][0] + ["Z"], True
# phoneme dict
if word in self.phoneme_dict and (not self.ignore_ambiguous_words or self.is_unique_in_phoneme_dict(word)):
return self.phoneme_dict[word][0], True
if self.apply_to_oov_word is not None:
return self.apply_to_oov_word(word), True
else:
return word, False
def __call__(self, text):
words = self.word_tokenize_func(text)
prons = []
for word, without_changes in words:
if without_changes:
prons.extend(word)
continue
word_str = word[0]
word_by_hyphen = word_str.split("-")
pron, is_handled = self.parse_one_word(word_str)
if not is_handled and len(word_by_hyphen) > 1:
pron = []
for sub_word in word_by_hyphen:
p, _ = self.parse_one_word(sub_word)
pron.extend(p)
pron.extend(["-"])
pron.pop()
prons.extend(pron)
return prons
| NeMo-main | nemo/collections/tts/g2p/models/en_us_arpabet.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import torch
from transformers import PreTrainedTokenizerBase
from nemo.core.classes import Dataset
from nemo.utils import logging
__all__ = ["T5G2PDataset"]
class T5G2PDataset(Dataset):
"""
Creates a dataset to train a T5G2P model.
"""
def __init__(
self,
manifest_filepath: str,
tokenizer: PreTrainedTokenizerBase,
max_source_len: int = 512,
max_target_len: int = 512,
do_lower: bool = False,
grapheme_field: str = "text_graphemes",
phoneme_field: str = "text",
with_labels: bool = True,
):
"""
Dataset to train T5-based G2P generative model.
Args:
manifest_filepath: path to a .json manifest that contains "phoneme_field" and "grapheme_field"
tokenizer: pretrained T5 tokenizer
max_source_len: max length of the grapheme input sequence (examples exceeding len will be dropped)
max_target_len: max length of the phoneme sequence (examples exceeding len will be dropped)
do_lower: a flag that indicates whether to lower case input grapheme sequence
phoneme_field: name of the field in manifest_filepath for ground truth phonemes
grapheme_field: name of the field in manifest_filepath for input grapheme text
with_labels: set to True for training and False for inference
"""
super().__init__()
if not os.path.exists(manifest_filepath):
raise ValueError(f"{manifest_filepath} not found")
self.tokenizer = tokenizer
self.max_source_len = max_source_len
self.max_target_len = max_target_len
self.do_lower = do_lower
self.with_labels = with_labels
self.data = []
num_filtered = 0
# Load grapheme/phoneme sequence pairs into self.data
with open(manifest_filepath, 'r') as f_in:
logging.info(f"Loading dataset from: {manifest_filepath}")
for line in f_in:
item = json.loads(line)
graphemes = item[grapheme_field]
if do_lower:
graphemes = graphemes.lower()
if with_labels:
graphemes_len = len(self.tokenizer.tokenize(graphemes))
if graphemes_len > max_source_len:
num_filtered += 1
logging.debug(f"dropping {graphemes_len} longer max_source_len")
continue
target_len = len(self.tokenizer.tokenize(item[phoneme_field]))
if max_target_len > 0 and target_len > max_target_len:
num_filtered += 1
logging.debug(f"dropping {target_len} longer max_target_len")
continue
self.data.append({"graphemes": graphemes, "phonemes": item[phoneme_field]})
else:
# truncate input graphemes for inference if the length exceeds max_source_len
graphemes_tokenized = self.tokenizer(graphemes)["input_ids"]
if len(graphemes_tokenized) > max_source_len:
# -1 for special token at the end, <\s>
graphemes_tokenized_truncated = graphemes_tokenized[: max_source_len - 1]
graphemes = self.tokenizer.batch_decode([graphemes_tokenized_truncated])[0]
self.data.append({"graphemes": graphemes})
logging.info(f"Filtered {num_filtered} too long entries from {manifest_filepath}.")
def __len__(self):
return len(self.data)
def __getitem__(self, index):
return self.data[index]
def _collate_fn(self, batch):
graphemes_batch = [entry["graphemes"] for entry in batch]
# Encode inputs (graphemes)
input_encoding = self.tokenizer(
graphemes_batch, padding='longest', max_length=self.max_source_len, truncation=True, return_tensors='pt',
)
input_ids, attention_mask = input_encoding.input_ids, input_encoding.attention_mask
output = (input_ids, attention_mask)
# labels are available during training and evaluation but not inference
if self.with_labels:
# Encode targets (phonemes)
phonemes_batch = [entry["phonemes"] for entry in batch]
target_encoding = self.tokenizer(
phonemes_batch, padding='longest', max_length=self.max_target_len, truncation=True,
)
labels = target_encoding.input_ids
# Need to replace padding tokens w/ -100 for loss to ignore them
labels = [
[(label if label != self.tokenizer.pad_token_id else -100) for label in labels_example]
for labels_example in labels
]
labels = torch.tensor(labels)
output = (input_ids, attention_mask, labels) # grapheme IDs, attention mask, phoneme IDs
return output
| NeMo-main | nemo/collections/tts/g2p/data/t5.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from typing import List
import torch
from transformers import PreTrainedTokenizerBase
from nemo.core.classes import Dataset
from nemo.utils import logging
__all__ = ["CTCG2PBPEDataset"]
class CTCG2PBPEDataset(Dataset):
def __init__(
self,
manifest_filepath: str,
tokenizer_graphemes: PreTrainedTokenizerBase,
tokenizer_phonemes: PreTrainedTokenizerBase,
do_lower: bool = True,
labels: List[str] = None,
max_source_len: int = 512,
phoneme_field: str = "text",
grapheme_field: str = "text_graphemes",
with_labels: bool = True,
):
"""
Creates a dataset to train a CTC-based G2P models.
Args:
manifest_filepath: path to a .json manifest that contains "phoneme_field" and "grapheme_field"
tokenizer_graphemes: tokenizer for graphemes
tokenizer_phonemes: tokenizer for phonemes
do_lower: set to True to lower case input graphemes
labels: output labels (tokenizer_phonemes vocabulary)
max_source_len: max length of the grapheme input sequence (examples exceeding len will be dropped)
phoneme_field: name of the field in manifest_filepath for ground truth phonemes
grapheme_field: name of the field in manifest_filepath for input grapheme text
with_labels: set to True for training and False for inference
"""
super().__init__()
if not os.path.exists(manifest_filepath):
raise ValueError(f"{manifest_filepath} not found")
self.manifest = manifest_filepath
self.tokenizer_graphemes = tokenizer_graphemes
self.tokenizer_phonemes = tokenizer_phonemes
self.max_source_len = max_source_len
self.labels = labels
self.labels_tkn2id = {l: i for i, l in enumerate(labels)}
self.data = []
self.pad_token = 0
self.with_labels = with_labels
removed_ctc_max = 0
removed_source_max = 0
with open(manifest_filepath, "r") as f_in:
logging.debug(f"Loading dataset from: {manifest_filepath}")
for i, line in enumerate(f_in):
item = json.loads(line)
if do_lower:
item[grapheme_field] = item[grapheme_field].lower()
if isinstance(self.tokenizer_graphemes, PreTrainedTokenizerBase):
grapheme_tokens = self.tokenizer_graphemes(item[grapheme_field])
grapheme_tokens_len = len(grapheme_tokens["input_ids"])
else:
grapheme_tokens = self.tokenizer_graphemes.text_to_ids(item[grapheme_field])
grapheme_tokens_len = len(grapheme_tokens)
if with_labels:
target_tokens = self.tokenizer_phonemes.text_to_ids(item[phoneme_field])
target_len = len(target_tokens)
if target_len > grapheme_tokens_len:
removed_ctc_max += 1
continue
if grapheme_tokens_len > max_source_len:
removed_source_max += 1
continue
self.data.append(
{
"graphemes": item[grapheme_field],
"phonemes": item[phoneme_field],
"target": target_tokens,
"target_len": target_len,
}
)
else:
if len(grapheme_tokens) > max_source_len:
item[grapheme_field] = item[grapheme_field][:max_source_len]
removed_source_max += 1
self.data.append(
{"graphemes": item[grapheme_field],}
)
logging.info(
f"Removed {removed_ctc_max} examples on CTC constraint, {removed_source_max} examples based on max_source_len from {manifest_filepath}"
)
def __len__(self):
return len(self.data)
def __getitem__(self, index):
return self.data[index]
def map(self, text: str) -> List[int]:
""" Creates a mapping from target labels to ids."""
tokens = []
for word_id, word in enumerate(text.split()):
tokens.append(self.labels_tkn2id[word])
return tokens
def _collate_fn(self, batch):
graphemes_batch = [entry["graphemes"] for entry in batch]
# Encode inputs (graphemes)
# for ByT5 encoder
if isinstance(self.tokenizer_graphemes, PreTrainedTokenizerBase):
input_encoding = self.tokenizer_graphemes(
graphemes_batch,
padding='longest',
max_length=self.max_source_len,
truncation=True,
return_tensors='pt',
)
input_ids, attention_mask = input_encoding.input_ids, input_encoding.attention_mask
input_len = torch.sum(attention_mask, 1) - 1
else:
# for Conformer encoder
input_ids = [self.tokenizer_graphemes.text_to_ids(sentence) for sentence in graphemes_batch]
input_len = [len(entry) for entry in input_ids]
max_len = max(input_len)
input_ids = [entry + [0] * (max_len - entry_len) for entry, entry_len in zip(input_ids, input_len)]
attention_mask = None # not used with Conformer encoder
input_ids = torch.tensor(input_ids)
input_len = torch.tensor(input_len)
# inference
if not self.with_labels:
output = (input_ids, attention_mask, input_len)
# Encode targets (phonemes)
else:
targets = [torch.tensor(entry["target"]) for entry in batch]
target_lengths = [torch.tensor(entry["target_len"]) for entry in batch]
max_target_len = max(target_lengths)
padded_targets = []
for target, target_len in zip(targets, target_lengths):
pad = (0, max_target_len - target_len)
target_pad = torch.nn.functional.pad(target, pad, value=len(self.labels))
padded_targets.append(target_pad)
padded_targets = torch.stack(padded_targets)
target_lengths = torch.stack(target_lengths)
output = (input_ids, attention_mask, input_len, padded_targets, target_lengths)
return output
| NeMo-main | nemo/collections/tts/g2p/data/ctc.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-main | nemo/collections/tts/g2p/data/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import re
from typing import Dict, List, Optional, Tuple
import numpy as np
import torch
from nemo.collections.common.tokenizers.tokenizer_spec import TokenizerSpec
from nemo.core.classes import Dataset
from nemo.utils import logging
__all__ = ["HeteronymClassificationDataset"]
class HeteronymClassificationDataset(Dataset):
def __init__(
self,
manifest: str,
tokenizer: TokenizerSpec,
heteronym_dict: Dict[str, Dict[str, str]],
wordid_to_idx: Dict[str, int],
max_seq_len: int = 512,
grapheme_field: str = "text_graphemes",
with_labels: bool = True,
):
"""
Creates dataset to use to run training and inference on G2PClassificationModel.
Processes WikiHomograph raw data files:
https://github.com/google-research-datasets/WikipediaHomographData/tree/master/data
Args:
manifest: path to manifest with "heteronym_span", "start_end", "text_graphemes"
and (optional) "word_id" fields. "word_id" is required for model training.
tokenizer: pretrained tokenizer
heteronym_dict: a dictionary where each grapheme contains word_id to ipa_form mappings, e.g.,
{'use': {'use_nou': "'juːs", 'use_vrb': "'juːz"}}
wordid_to_idx: mapping from word id to index
max_seq_len: maximum input sequence length
grapheme_field: name of the field in the .json manifest with grapheme input
with_labels: indicates whether labels are provided in the manifest. False for inference, True for training
"""
super().__init__()
if not os.path.exists(manifest):
raise ValueError(f"{manifest} not found")
self.tokenizer = tokenizer
self.max_seq_len = max_seq_len
self.data = []
self.pad_token = 0
self.with_labels = with_labels
self.heteronym_dict = heteronym_dict
self.wordid_to_idx = wordid_to_idx
self.LOSS_PAD_TOKEN = -100
self.PAD_TOKEN = 0
num_skipped = 0
with open(manifest, "r") as f:
for line in f:
line = json.loads(line)
cur_start_end, cur_heteronyms = (line["start_end"], line["heteronym_span"])
# during inference word_id is not present in the manifest
if "word_id" in line:
cur_word_ids = line["word_id"]
else:
if isinstance(cur_heteronyms, str):
cur_word_ids = None
else:
cur_word_ids = [None] * len(cur_heteronyms)
if isinstance(cur_heteronyms, str):
cur_start_end, cur_heteronyms, cur_word_ids = [cur_start_end], [cur_heteronyms], [cur_word_ids]
example = self._prepare_sample(line[grapheme_field], cur_start_end, cur_heteronyms, cur_word_ids)
if example is None:
num_skipped += 1
else:
example_dict = {
"input_ids": example[0],
"subtokens_mask": example[1],
"target": example[2], # None if self.with_labels is False
}
self.data.append(example_dict)
logging.info(f"Number of samples in {manifest}: {len(self.data)}, remove {num_skipped} lines")
def _prepare_sample(
self,
sentence: str,
start_end: List[Tuple[int, int]],
heteronyms: List[str],
word_ids: Optional[List[str]] = None,
):
"""
Prepares a single training sample
Args:
sentence: input sentence in grapheme form
start_end: start and end indices of the heteronym spans, start_end indices should be in increasing order
heteronyms: heteronyms present in the sentence
word_ids: [Optional] target word_ids, use None for inference, e.g. ['diffuse_adj']
"""
# drop example where sequence length exceeds max sequence length, +2 for special tokens
length = len(self.tokenizer.text_to_tokens(sentence)) + 2
if length > self.max_seq_len:
logging.debug(f"Sequence length exceeds max sequence length ({self.max_seq_len}): {sentence}.")
return None
# check the correctness on start-end indices
for heteronym_, start_end_ in zip(heteronyms, start_end):
if heteronym_.lower() != sentence[start_end_[0] : start_end_[1]].lower():
logging.debug(f"Span for {heteronym_} is incorrect. Skipping example.")
return None
input_ids, subtokens_mask, target_word_ids = [], [], []
# add bos token
if hasattr(self.tokenizer, "bos_id"):
input_ids.append(self.tokenizer.bos_id)
subtokens_mask.append(
self.PAD_TOKEN
) # the first tokens of heteronym spans are 1s, the rest of the tokens are 0s
if self.with_labels:
target_word_ids.append(self.LOSS_PAD_TOKEN) # -100 to pad plain tokens
else:
target_word_ids = None # for inference when labels are not available
heteronym_span_idx = 0
# split sentence by space and keep track of word boundaries
# we assume heteronym is a standalone word
matches = [(m.group(0), (m.start(), m.end() - 1)) for m in re.finditer(r'\S+', sentence)]
for match in matches:
word, word_start_end = match
# check if the start of the next heteronym span is within the word indices
if (
heteronym_span_idx < len(start_end)
and word_start_end[0] <= start_end[heteronym_span_idx][0] < word_start_end[1]
):
heteronym_start_end = start_end[heteronym_span_idx]
prefix = ""
prefix_ids = []
# for cases when word also includes punctuation marks at the beginning or a prefix,
# e.g. "diffuse" vs. diffuse vs. pre-diffuse for heteronym {diffuse}
if word_start_end[0] < heteronym_start_end[0]:
prefix = sentence[word_start_end[0] : heteronym_start_end[0]]
prefix_ids = self.tokenizer.text_to_ids(prefix)
subtokens_mask.extend([self.PAD_TOKEN] * len(prefix_ids))
word = word[word.index(prefix) + len(prefix) :]
word_input_ids = self.tokenizer.text_to_ids(word)
input_ids.extend(prefix_ids + word_input_ids)
subtokens_mask.extend([1] + [self.PAD_TOKEN] * (len(word_input_ids) - 1))
if self.with_labels:
cur_target_word_id = self.wordid_to_idx[word_ids[heteronym_span_idx]]
target_word_ids.extend(
[self.LOSS_PAD_TOKEN] * len(prefix_ids)
+ [cur_target_word_id]
+ [self.LOSS_PAD_TOKEN] * (len(word_input_ids) - 1)
)
heteronym = sentence.lower()[heteronym_start_end[0] : heteronym_start_end[1]]
if heteronym not in self.heteronym_dict:
logging.debug(f"{heteronym} is not supported. Skipping example.")
return None
heteronym_span_idx += 1
else:
ids = self.tokenizer.text_to_ids(word)
input_ids.extend(ids)
subtokens_mask.extend([self.PAD_TOKEN] * len(ids))
if self.with_labels:
target_word_ids.extend([self.LOSS_PAD_TOKEN] * len(ids))
if heteronym_span_idx < len(start_end):
logging.info("Not all heteronym spans were processed. Skipping example.")
return None
# add eos token
if hasattr(self.tokenizer, "eos_id"):
input_ids.append(self.tokenizer.eos_id)
subtokens_mask.append(self.PAD_TOKEN)
if self.with_labels:
target_word_ids.append(self.LOSS_PAD_TOKEN)
# target_word_ids are None for inference when labels are not available
return input_ids, subtokens_mask, target_word_ids
def __len__(self):
return len(self.data)
def __getitem__(self, index):
return self.data[index]
def _collate_fn(self, batch):
"""
Args:
batch: A list of tuples of (input_ids, subtokens_mask, [Optional] target_word_ids).
"""
max_length = max([len(entry["input_ids"]) for entry in batch])
padded_input_ids = []
padded_subtokens_mask = []
padded_attention_mask = []
if self.with_labels:
padded_targets = []
for item in batch:
input_ids = item["input_ids"]
if len(input_ids) < max_length:
pad_width = max_length - len(input_ids)
padded_attention_mask.append([1] * len(input_ids) + [0] * pad_width)
padded_input_ids.append(np.pad(input_ids, pad_width=[0, pad_width], constant_values=self.PAD_TOKEN))
padded_subtokens_mask.append(
np.pad(item["subtokens_mask"], pad_width=[0, pad_width], constant_values=self.PAD_TOKEN)
)
if self.with_labels:
padded_targets.append(
np.pad(item["target"], pad_width=[0, pad_width], constant_values=self.LOSS_PAD_TOKEN)
)
else:
padded_attention_mask.append([1] * len(input_ids))
padded_input_ids.append(input_ids)
padded_subtokens_mask.append(item["subtokens_mask"])
if self.with_labels:
padded_targets.append(item["target"])
output = {
"input_ids": torch.LongTensor(np.array(padded_input_ids)),
"attention_mask": torch.LongTensor(np.array(padded_attention_mask)),
"subtokens_mask": torch.LongTensor(np.array(padded_subtokens_mask)),
}
if self.with_labels:
output["targets"] = torch.LongTensor(padded_targets)
return output
| NeMo-main | nemo/collections/tts/g2p/data/heteronym_classification.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO @xueyang: This file is kept for backward-compatibility purposes since all older NGC models that were trained on
# and before NeMo 1.16.0 used this import path. We will remove this file soon; `IPAG2P` will be also renamed as
# `IpaG2p`. Please start using new import path and the new `IpaG2p` name from NeMo 1.16.0.
from nemo.collections.tts.g2p.models.en_us_arpabet import EnglishG2p
from nemo.collections.tts.g2p.models.i18n_ipa import IpaG2p as IPAG2P
from nemo.collections.tts.g2p.models.zh_cn_pinyin import ChineseG2p
| NeMo-main | nemo/collections/tts/torch/g2ps.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-main | nemo/collections/tts/torch/__init__.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO @xueyang: deprecate this file since no other places import modules from here anymore. However,
# all checkpoints uploaded in ngc used this path. So it requires to update all ngc checkpoints path as well.
from nemo.collections.common.tokenizers.text_to_speech.tts_tokenizers import (
BaseCharsTokenizer,
BaseTokenizer,
EnglishCharsTokenizer,
EnglishPhonemesTokenizer,
GermanCharsTokenizer,
GermanPhonemesTokenizer,
IPATokenizer,
)
| NeMo-main | nemo/collections/tts/torch/tts_tokenizers.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class TTSDataType:
"""Represent TTSDataType."""
name = None
class WithLens:
"""Represent that this data type also returns lengths for data."""
class Audio(TTSDataType, WithLens):
name = "audio"
class Text(TTSDataType, WithLens):
name = "text"
class LogMel(TTSDataType, WithLens):
name = "log_mel"
class Durations(TTSDataType):
name = "durations"
class AlignPriorMatrix(TTSDataType):
name = "align_prior_matrix"
class Pitch(TTSDataType, WithLens):
name = "pitch"
class Energy(TTSDataType, WithLens):
name = "energy"
class SpeakerID(TTSDataType):
name = "speaker_id"
class Voiced_mask(TTSDataType):
name = "voiced_mask"
class P_voiced(TTSDataType):
name = "p_voiced"
class LMTokens(TTSDataType):
name = "lm_tokens"
class ReferenceAudio(TTSDataType, WithLens):
name = "reference_audio"
MAIN_DATA_TYPES = [Audio, Text]
VALID_SUPPLEMENTARY_DATA_TYPES = [
LogMel,
Durations,
AlignPriorMatrix,
Pitch,
Energy,
SpeakerID,
LMTokens,
Voiced_mask,
P_voiced,
ReferenceAudio,
]
DATA_STR2DATA_CLASS = {d.name: d for d in MAIN_DATA_TYPES + VALID_SUPPLEMENTARY_DATA_TYPES}
| NeMo-main | nemo/collections/tts/torch/tts_data_types.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
from dataclasses import dataclass
from pathlib import Path
from typing import List, Optional
import torch
from hydra.utils import instantiate
from omegaconf import DictConfig, OmegaConf, open_dict
from pytorch_lightning import Trainer
from pytorch_lightning.loggers import TensorBoardLogger
from nemo.collections.common.parts.preprocessing import parsers
from nemo.collections.tts.losses.aligner_loss import BinLoss, ForwardSumLoss
from nemo.collections.tts.losses.fastpitchloss import DurationLoss, EnergyLoss, MelLoss, PitchLoss
from nemo.collections.tts.models.base import SpectrogramGenerator
from nemo.collections.tts.modules.fastpitch import FastPitchModule
from nemo.collections.tts.parts.mixins import FastPitchAdapterModelMixin
from nemo.collections.tts.parts.utils.callbacks import LoggingCallback
from nemo.collections.tts.parts.utils.helpers import (
batch_from_ragged,
g2p_backward_compatible_support,
plot_alignment_to_numpy,
plot_spectrogram_to_numpy,
process_batch,
sample_tts_input,
)
from nemo.core.classes import Exportable
from nemo.core.classes.common import PretrainedModelInfo, typecheck
from nemo.core.neural_types.elements import (
Index,
LengthsType,
MelSpectrogramType,
ProbsType,
RegressionValuesType,
TokenDurationType,
TokenIndex,
TokenLogDurationType,
)
from nemo.core.neural_types.neural_type import NeuralType
from nemo.utils import logging, model_utils
@dataclass
class G2PConfig:
_target_: str = "nemo.collections.tts.g2p.models.en_us_arpabet.EnglishG2p"
phoneme_dict: str = "scripts/tts_dataset_files/cmudict-0.7b_nv22.10"
heteronyms: str = "scripts/tts_dataset_files/heteronyms-052722"
phoneme_probability: float = 0.5
@dataclass
class TextTokenizer:
_target_: str = "nemo.collections.common.tokenizers.text_to_speech.tts_tokenizers.EnglishPhonemesTokenizer"
punct: bool = True
stresses: bool = True
chars: bool = True
apostrophe: bool = True
pad_with_space: bool = True
add_blank_at: bool = True
g2p: G2PConfig = G2PConfig()
@dataclass
class TextTokenizerConfig:
text_tokenizer: TextTokenizer = TextTokenizer()
class FastPitchModel(SpectrogramGenerator, Exportable, FastPitchAdapterModelMixin):
"""FastPitch model (https://arxiv.org/abs/2006.06873) that is used to generate mel spectrogram from text."""
def __init__(self, cfg: DictConfig, trainer: Trainer = None):
# Convert to Hydra 1.0 compatible DictConfig
cfg = model_utils.convert_model_config_to_dict_config(cfg)
cfg = model_utils.maybe_update_config_version(cfg)
# Setup normalizer
self.normalizer = None
self.text_normalizer_call = None
self.text_normalizer_call_kwargs = {}
self._setup_normalizer(cfg)
self.learn_alignment = cfg.get("learn_alignment", False)
# Setup vocabulary (=tokenizer) and input_fft_kwargs (supported only with self.learn_alignment=True)
input_fft_kwargs = {}
if self.learn_alignment:
self.vocab = None
self.ds_class = cfg.train_ds.dataset._target_
self.ds_class_name = self.ds_class.split(".")[-1]
if not self.ds_class in [
"nemo.collections.tts.data.dataset.TTSDataset",
"nemo.collections.tts.data.text_to_speech_dataset.TextToSpeechDataset",
"nemo.collections.tts.torch.data.TTSDataset",
]:
raise ValueError(f"Unknown dataset class: {self.ds_class}.")
self._setup_tokenizer(cfg)
assert self.vocab is not None
input_fft_kwargs["n_embed"] = len(self.vocab.tokens)
input_fft_kwargs["padding_idx"] = self.vocab.pad
self._parser = None
self._tb_logger = None
super().__init__(cfg=cfg, trainer=trainer)
self.bin_loss_warmup_epochs = cfg.get("bin_loss_warmup_epochs", 100)
self.log_images = cfg.get("log_images", False)
self.log_train_images = False
default_prosody_loss_scale = 0.1 if self.learn_alignment else 1.0
dur_loss_scale = cfg.get("dur_loss_scale", default_prosody_loss_scale)
pitch_loss_scale = cfg.get("pitch_loss_scale", default_prosody_loss_scale)
energy_loss_scale = cfg.get("energy_loss_scale", default_prosody_loss_scale)
self.mel_loss_fn = MelLoss()
self.pitch_loss_fn = PitchLoss(loss_scale=pitch_loss_scale)
self.duration_loss_fn = DurationLoss(loss_scale=dur_loss_scale)
self.energy_loss_fn = EnergyLoss(loss_scale=energy_loss_scale)
self.aligner = None
if self.learn_alignment:
aligner_loss_scale = cfg.get("aligner_loss_scale", 1.0)
self.aligner = instantiate(self._cfg.alignment_module)
self.forward_sum_loss_fn = ForwardSumLoss(loss_scale=aligner_loss_scale)
self.bin_loss_fn = BinLoss(loss_scale=aligner_loss_scale)
self.preprocessor = instantiate(self._cfg.preprocessor)
input_fft = instantiate(self._cfg.input_fft, **input_fft_kwargs)
output_fft = instantiate(self._cfg.output_fft)
duration_predictor = instantiate(self._cfg.duration_predictor)
pitch_predictor = instantiate(self._cfg.pitch_predictor)
speaker_encoder = instantiate(self._cfg.get("speaker_encoder", None))
energy_embedding_kernel_size = cfg.get("energy_embedding_kernel_size", 0)
energy_predictor = instantiate(self._cfg.get("energy_predictor", None))
# [TODO] may remove if we change the pre-trained config
# cfg: condition_types = [ "add" ]
n_speakers = cfg.get("n_speakers", 0)
speaker_emb_condition_prosody = cfg.get("speaker_emb_condition_prosody", False)
speaker_emb_condition_decoder = cfg.get("speaker_emb_condition_decoder", False)
speaker_emb_condition_aligner = cfg.get("speaker_emb_condition_aligner", False)
min_token_duration = cfg.get("min_token_duration", 0)
use_log_energy = cfg.get("use_log_energy", True)
if n_speakers > 1 and "add" not in input_fft.cond_input.condition_types:
input_fft.cond_input.condition_types.append("add")
if speaker_emb_condition_prosody:
duration_predictor.cond_input.condition_types.append("add")
pitch_predictor.cond_input.condition_types.append("add")
if speaker_emb_condition_decoder:
output_fft.cond_input.condition_types.append("add")
if speaker_emb_condition_aligner and self.aligner is not None:
self.aligner.cond_input.condition_types.append("add")
self.fastpitch = FastPitchModule(
input_fft,
output_fft,
duration_predictor,
pitch_predictor,
energy_predictor,
self.aligner,
speaker_encoder,
n_speakers,
cfg.symbols_embedding_dim,
cfg.pitch_embedding_kernel_size,
energy_embedding_kernel_size,
cfg.n_mel_channels,
min_token_duration,
cfg.max_token_duration,
use_log_energy,
)
self._input_types = self._output_types = None
self.export_config = {
"emb_range": (0, self.fastpitch.encoder.word_emb.num_embeddings),
"enable_volume": False,
"enable_ragged_batches": False,
}
if self.fastpitch.speaker_emb is not None:
self.export_config["num_speakers"] = cfg.n_speakers
self.log_config = cfg.get("log_config", None)
# Adapter modules setup (from FastPitchAdapterModelMixin)
self.setup_adapters()
def _get_default_text_tokenizer_conf(self):
text_tokenizer: TextTokenizerConfig = TextTokenizerConfig()
return OmegaConf.create(OmegaConf.to_yaml(text_tokenizer))
def _setup_normalizer(self, cfg):
if "text_normalizer" in cfg:
normalizer_kwargs = {}
if "whitelist" in cfg.text_normalizer:
normalizer_kwargs["whitelist"] = self.register_artifact(
'text_normalizer.whitelist', cfg.text_normalizer.whitelist
)
try:
import nemo_text_processing
self.normalizer = instantiate(cfg.text_normalizer, **normalizer_kwargs)
except Exception as e:
logging.error(e)
raise ImportError(
"`nemo_text_processing` not installed, see https://github.com/NVIDIA/NeMo-text-processing for more details"
)
self.text_normalizer_call = self.normalizer.normalize
if "text_normalizer_call_kwargs" in cfg:
self.text_normalizer_call_kwargs = cfg.text_normalizer_call_kwargs
def _setup_tokenizer(self, cfg):
text_tokenizer_kwargs = {}
if "g2p" in cfg.text_tokenizer:
# for backward compatibility
if (
self._is_model_being_restored()
and (cfg.text_tokenizer.g2p.get('_target_', None) is not None)
and cfg.text_tokenizer.g2p["_target_"].startswith("nemo_text_processing.g2p")
):
cfg.text_tokenizer.g2p["_target_"] = g2p_backward_compatible_support(
cfg.text_tokenizer.g2p["_target_"]
)
g2p_kwargs = {}
if "phoneme_dict" in cfg.text_tokenizer.g2p:
g2p_kwargs["phoneme_dict"] = self.register_artifact(
'text_tokenizer.g2p.phoneme_dict', cfg.text_tokenizer.g2p.phoneme_dict,
)
if "heteronyms" in cfg.text_tokenizer.g2p:
g2p_kwargs["heteronyms"] = self.register_artifact(
'text_tokenizer.g2p.heteronyms', cfg.text_tokenizer.g2p.heteronyms,
)
# for backward compatability
text_tokenizer_kwargs["g2p"] = instantiate(cfg.text_tokenizer.g2p, **g2p_kwargs)
# TODO @xueyang: rename the instance of tokenizer because vocab is misleading.
self.vocab = instantiate(cfg.text_tokenizer, **text_tokenizer_kwargs)
@property
def tb_logger(self):
if self._tb_logger is None:
if self.logger is None and self.logger.experiment is None:
return None
tb_logger = self.logger.experiment
for logger in self.trainer.loggers:
if isinstance(logger, TensorBoardLogger):
tb_logger = logger.experiment
break
self._tb_logger = tb_logger
return self._tb_logger
@property
def parser(self):
if self._parser is not None:
return self._parser
if self.learn_alignment:
self._parser = self.vocab.encode
else:
self._parser = parsers.make_parser(
labels=self._cfg.labels,
name='en',
unk_id=-1,
blank_id=-1,
do_normalize=True,
abbreviation_version="fastpitch",
make_table=False,
)
return self._parser
def parse(self, str_input: str, normalize=True) -> torch.tensor:
if self.training:
logging.warning("parse() is meant to be called in eval mode.")
if normalize and self.text_normalizer_call is not None:
str_input = self.text_normalizer_call(str_input, **self.text_normalizer_call_kwargs)
if self.learn_alignment:
eval_phon_mode = contextlib.nullcontext()
if hasattr(self.vocab, "set_phone_prob"):
eval_phon_mode = self.vocab.set_phone_prob(prob=1.0)
# Disable mixed g2p representation if necessary
with eval_phon_mode:
tokens = self.parser(str_input)
else:
tokens = self.parser(str_input)
x = torch.tensor(tokens).unsqueeze_(0).long().to(self.device)
return x
@typecheck(
input_types={
"text": NeuralType(('B', 'T_text'), TokenIndex()),
"durs": NeuralType(('B', 'T_text'), TokenDurationType()),
"pitch": NeuralType(('B', 'T_audio'), RegressionValuesType()),
"energy": NeuralType(('B', 'T_audio'), RegressionValuesType(), optional=True),
"speaker": NeuralType(('B'), Index(), optional=True),
"pace": NeuralType(optional=True),
"spec": NeuralType(('B', 'D', 'T_spec'), MelSpectrogramType(), optional=True),
"attn_prior": NeuralType(('B', 'T_spec', 'T_text'), ProbsType(), optional=True),
"mel_lens": NeuralType(('B'), LengthsType(), optional=True),
"input_lens": NeuralType(('B'), LengthsType(), optional=True),
# reference_* data is used for multi-speaker FastPitch training
"reference_spec": NeuralType(('B', 'D', 'T_spec'), MelSpectrogramType(), optional=True),
"reference_spec_lens": NeuralType(('B'), LengthsType(), optional=True),
}
)
def forward(
self,
*,
text,
durs=None,
pitch=None,
energy=None,
speaker=None,
pace=1.0,
spec=None,
attn_prior=None,
mel_lens=None,
input_lens=None,
reference_spec=None,
reference_spec_lens=None,
):
return self.fastpitch(
text=text,
durs=durs,
pitch=pitch,
energy=energy,
speaker=speaker,
pace=pace,
spec=spec,
attn_prior=attn_prior,
mel_lens=mel_lens,
input_lens=input_lens,
reference_spec=reference_spec,
reference_spec_lens=reference_spec_lens,
)
@typecheck(output_types={"spect": NeuralType(('B', 'D', 'T_spec'), MelSpectrogramType())})
def generate_spectrogram(
self,
tokens: 'torch.tensor',
speaker: Optional[int] = None,
pace: float = 1.0,
reference_spec: Optional['torch.tensor'] = None,
reference_spec_lens: Optional['torch.tensor'] = None,
) -> torch.tensor:
if self.training:
logging.warning("generate_spectrogram() is meant to be called in eval mode.")
if isinstance(speaker, int):
speaker = torch.tensor([speaker]).to(self.device)
spect, *_ = self(
text=tokens,
durs=None,
pitch=None,
speaker=speaker,
pace=pace,
reference_spec=reference_spec,
reference_spec_lens=reference_spec_lens,
)
return spect
def training_step(self, batch, batch_idx):
attn_prior, durs, speaker, energy, reference_audio, reference_audio_len = (
None,
None,
None,
None,
None,
None,
)
if self.learn_alignment:
if self.ds_class == "nemo.collections.tts.data.text_to_speech_dataset.TextToSpeechDataset":
batch_dict = batch
else:
batch_dict = process_batch(batch, self._train_dl.dataset.sup_data_types_set)
audio = batch_dict.get("audio")
audio_lens = batch_dict.get("audio_lens")
text = batch_dict.get("text")
text_lens = batch_dict.get("text_lens")
attn_prior = batch_dict.get("align_prior_matrix", None)
pitch = batch_dict.get("pitch", None)
energy = batch_dict.get("energy", None)
speaker = batch_dict.get("speaker_id", None)
reference_audio = batch_dict.get("reference_audio", None)
reference_audio_len = batch_dict.get("reference_audio_lens", None)
else:
audio, audio_lens, text, text_lens, durs, pitch, speaker = batch
mels, spec_len = self.preprocessor(input_signal=audio, length=audio_lens)
reference_spec, reference_spec_len = None, None
if reference_audio is not None:
reference_spec, reference_spec_len = self.preprocessor(
input_signal=reference_audio, length=reference_audio_len
)
(
mels_pred,
_,
_,
log_durs_pred,
pitch_pred,
attn_soft,
attn_logprob,
attn_hard,
attn_hard_dur,
pitch,
energy_pred,
energy_tgt,
) = self(
text=text,
durs=durs,
pitch=pitch,
energy=energy,
speaker=speaker,
pace=1.0,
spec=mels if self.learn_alignment else None,
reference_spec=reference_spec,
reference_spec_lens=reference_spec_len,
attn_prior=attn_prior,
mel_lens=spec_len,
input_lens=text_lens,
)
if durs is None:
durs = attn_hard_dur
mel_loss = self.mel_loss_fn(spect_predicted=mels_pred, spect_tgt=mels)
dur_loss = self.duration_loss_fn(log_durs_predicted=log_durs_pred, durs_tgt=durs, len=text_lens)
loss = mel_loss + dur_loss
if self.learn_alignment:
ctc_loss = self.forward_sum_loss_fn(attn_logprob=attn_logprob, in_lens=text_lens, out_lens=spec_len)
bin_loss_weight = min(self.current_epoch / self.bin_loss_warmup_epochs, 1.0) * 1.0
bin_loss = self.bin_loss_fn(hard_attention=attn_hard, soft_attention=attn_soft) * bin_loss_weight
loss += ctc_loss + bin_loss
pitch_loss = self.pitch_loss_fn(pitch_predicted=pitch_pred, pitch_tgt=pitch, len=text_lens)
energy_loss = self.energy_loss_fn(energy_predicted=energy_pred, energy_tgt=energy_tgt, length=text_lens)
loss += pitch_loss + energy_loss
self.log("t_loss", loss)
self.log("t_mel_loss", mel_loss)
self.log("t_dur_loss", dur_loss)
self.log("t_pitch_loss", pitch_loss)
if energy_tgt is not None:
self.log("t_energy_loss", energy_loss)
if self.learn_alignment:
self.log("t_ctc_loss", ctc_loss)
self.log("t_bin_loss", bin_loss)
# Log images to tensorboard
if self.log_images and self.log_train_images and isinstance(self.logger, TensorBoardLogger):
self.log_train_images = False
self.tb_logger.add_image(
"train_mel_target",
plot_spectrogram_to_numpy(mels[0].data.cpu().float().numpy()),
self.global_step,
dataformats="HWC",
)
spec_predict = mels_pred[0].data.cpu().float().numpy()
self.tb_logger.add_image(
"train_mel_predicted", plot_spectrogram_to_numpy(spec_predict), self.global_step, dataformats="HWC",
)
if self.learn_alignment:
attn = attn_hard[0].data.cpu().float().numpy().squeeze()
self.tb_logger.add_image(
"train_attn", plot_alignment_to_numpy(attn.T), self.global_step, dataformats="HWC",
)
soft_attn = attn_soft[0].data.cpu().float().numpy().squeeze()
self.tb_logger.add_image(
"train_soft_attn", plot_alignment_to_numpy(soft_attn.T), self.global_step, dataformats="HWC",
)
return loss
def validation_step(self, batch, batch_idx):
attn_prior, durs, speaker, energy, reference_audio, reference_audio_len = (
None,
None,
None,
None,
None,
None,
)
if self.learn_alignment:
if self.ds_class == "nemo.collections.tts.data.text_to_speech_dataset.TextToSpeechDataset":
batch_dict = batch
else:
batch_dict = process_batch(batch, self._train_dl.dataset.sup_data_types_set)
audio = batch_dict.get("audio")
audio_lens = batch_dict.get("audio_lens")
text = batch_dict.get("text")
text_lens = batch_dict.get("text_lens")
attn_prior = batch_dict.get("align_prior_matrix", None)
pitch = batch_dict.get("pitch", None)
energy = batch_dict.get("energy", None)
speaker = batch_dict.get("speaker_id", None)
reference_audio = batch_dict.get("reference_audio", None)
reference_audio_len = batch_dict.get("reference_audio_lens", None)
else:
audio, audio_lens, text, text_lens, durs, pitch, speaker = batch
mels, mel_lens = self.preprocessor(input_signal=audio, length=audio_lens)
reference_spec, reference_spec_len = None, None
if reference_audio is not None:
reference_spec, reference_spec_len = self.preprocessor(
input_signal=reference_audio, length=reference_audio_len
)
# Calculate val loss on ground truth durations to better align L2 loss in time
(mels_pred, _, _, log_durs_pred, pitch_pred, _, _, _, attn_hard_dur, pitch, energy_pred, energy_tgt,) = self(
text=text,
durs=durs,
pitch=pitch,
energy=energy,
speaker=speaker,
pace=1.0,
spec=mels if self.learn_alignment else None,
reference_spec=reference_spec,
reference_spec_lens=reference_spec_len,
attn_prior=attn_prior,
mel_lens=mel_lens,
input_lens=text_lens,
)
if durs is None:
durs = attn_hard_dur
mel_loss = self.mel_loss_fn(spect_predicted=mels_pred, spect_tgt=mels)
dur_loss = self.duration_loss_fn(log_durs_predicted=log_durs_pred, durs_tgt=durs, len=text_lens)
pitch_loss = self.pitch_loss_fn(pitch_predicted=pitch_pred, pitch_tgt=pitch, len=text_lens)
energy_loss = self.energy_loss_fn(energy_predicted=energy_pred, energy_tgt=energy_tgt, length=text_lens)
loss = mel_loss + dur_loss + pitch_loss + energy_loss
val_outputs = {
"val_loss": loss,
"mel_loss": mel_loss,
"dur_loss": dur_loss,
"pitch_loss": pitch_loss,
"energy_loss": energy_loss if energy_tgt is not None else None,
"mel_target": mels if batch_idx == 0 else None,
"mel_pred": mels_pred if batch_idx == 0 else None,
}
self.validation_step_outputs.append(val_outputs)
return val_outputs
def on_validation_epoch_end(self):
collect = lambda key: torch.stack([x[key] for x in self.validation_step_outputs]).mean()
val_loss = collect("val_loss")
mel_loss = collect("mel_loss")
dur_loss = collect("dur_loss")
pitch_loss = collect("pitch_loss")
self.log("val_loss", val_loss, sync_dist=True)
self.log("val_mel_loss", mel_loss, sync_dist=True)
self.log("val_dur_loss", dur_loss, sync_dist=True)
self.log("val_pitch_loss", pitch_loss, sync_dist=True)
if self.validation_step_outputs[0]["energy_loss"] is not None:
energy_loss = collect("energy_loss")
self.log("val_energy_loss", energy_loss, sync_dist=True)
_, _, _, _, _, spec_target, spec_predict = self.validation_step_outputs[0].values()
if self.log_images and isinstance(self.logger, TensorBoardLogger):
self.tb_logger.add_image(
"val_mel_target",
plot_spectrogram_to_numpy(spec_target[0].data.cpu().float().numpy()),
self.global_step,
dataformats="HWC",
)
spec_predict = spec_predict[0].data.cpu().float().numpy()
self.tb_logger.add_image(
"val_mel_predicted", plot_spectrogram_to_numpy(spec_predict), self.global_step, dataformats="HWC",
)
self.log_train_images = True
self.validation_step_outputs.clear() # free memory)
def _setup_train_dataloader(self, cfg):
phon_mode = contextlib.nullcontext()
if hasattr(self.vocab, "set_phone_prob"):
phon_mode = self.vocab.set_phone_prob(self.vocab.phoneme_probability)
with phon_mode:
dataset = instantiate(cfg.dataset, text_tokenizer=self.vocab,)
sampler = dataset.get_sampler(cfg.dataloader_params.batch_size)
return torch.utils.data.DataLoader(
dataset, collate_fn=dataset.collate_fn, sampler=sampler, **cfg.dataloader_params
)
def _setup_test_dataloader(self, cfg):
phon_mode = contextlib.nullcontext()
if hasattr(self.vocab, "set_phone_prob"):
phon_mode = self.vocab.set_phone_prob(0.0)
with phon_mode:
dataset = instantiate(cfg.dataset, text_tokenizer=self.vocab,)
return torch.utils.data.DataLoader(dataset, collate_fn=dataset.collate_fn, **cfg.dataloader_params)
def __setup_dataloader_from_config(self, cfg, shuffle_should_be: bool = True, name: str = "train"):
if "dataset" not in cfg or not isinstance(cfg.dataset, DictConfig):
raise ValueError(f"No dataset for {name}")
if "dataloader_params" not in cfg or not isinstance(cfg.dataloader_params, DictConfig):
raise ValueError(f"No dataloader_params for {name}")
if shuffle_should_be:
if 'shuffle' not in cfg.dataloader_params:
logging.warning(
f"Shuffle should be set to True for {self}'s {name} dataloader but was not found in its "
"config. Manually setting to True"
)
with open_dict(cfg.dataloader_params):
cfg.dataloader_params.shuffle = True
elif not cfg.dataloader_params.shuffle:
logging.error(f"The {name} dataloader for {self} has shuffle set to False!!!")
elif cfg.dataloader_params.shuffle:
logging.error(f"The {name} dataloader for {self} has shuffle set to True!!!")
if self.ds_class == "nemo.collections.tts.data.dataset.TTSDataset":
phon_mode = contextlib.nullcontext()
if hasattr(self.vocab, "set_phone_prob"):
phon_mode = self.vocab.set_phone_prob(prob=None if name == "val" else self.vocab.phoneme_probability)
with phon_mode:
dataset = instantiate(
cfg.dataset,
text_normalizer=self.normalizer,
text_normalizer_call_kwargs=self.text_normalizer_call_kwargs,
text_tokenizer=self.vocab,
)
else:
dataset = instantiate(cfg.dataset)
return torch.utils.data.DataLoader(dataset, collate_fn=dataset.collate_fn, **cfg.dataloader_params)
def setup_training_data(self, cfg):
if self.ds_class == "nemo.collections.tts.data.text_to_speech_dataset.TextToSpeechDataset":
self._train_dl = self._setup_train_dataloader(cfg)
else:
self._train_dl = self.__setup_dataloader_from_config(cfg)
def setup_validation_data(self, cfg):
if self.ds_class == "nemo.collections.tts.data.text_to_speech_dataset.TextToSpeechDataset":
self._validation_dl = self._setup_test_dataloader(cfg)
else:
self._validation_dl = self.__setup_dataloader_from_config(cfg, shuffle_should_be=False, name="val")
def setup_test_data(self, cfg):
"""Omitted."""
pass
def configure_callbacks(self):
if not self.log_config:
return []
sample_ds_class = self.log_config.dataset._target_
if sample_ds_class != "nemo.collections.tts.data.text_to_speech_dataset.TextToSpeechDataset":
raise ValueError(f"Logging callback only supported for TextToSpeechDataset, got {sample_ds_class}")
data_loader = self._setup_test_dataloader(self.log_config)
generators = instantiate(self.log_config.generators)
log_dir = Path(self.log_config.log_dir) if self.log_config.log_dir else None
log_callback = LoggingCallback(
generators=generators,
data_loader=data_loader,
log_epochs=self.log_config.log_epochs,
epoch_frequency=self.log_config.epoch_frequency,
output_dir=log_dir,
loggers=self.trainer.loggers,
log_tensorboard=self.log_config.log_tensorboard,
log_wandb=self.log_config.log_wandb,
)
return [log_callback]
@classmethod
def list_available_models(cls) -> 'List[PretrainedModelInfo]':
"""
This method returns a list of pre-trained model which can be instantiated directly from NVIDIA's NGC cloud.
Returns:
List of available pre-trained models.
"""
list_of_models = []
# en-US, single speaker, 22050Hz, LJSpeech (ARPABET).
model = PretrainedModelInfo(
pretrained_model_name="tts_en_fastpitch",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/tts_en_fastpitch/versions/1.8.1/files/tts_en_fastpitch_align.nemo",
description="This model is trained on LJSpeech sampled at 22050Hz with and can be used to generate female English voices with an American accent. It is ARPABET-based.",
class_=cls,
)
list_of_models.append(model)
# en-US, single speaker, 22050Hz, LJSpeech (IPA).
model = PretrainedModelInfo(
pretrained_model_name="tts_en_fastpitch_ipa",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/tts_en_fastpitch/versions/IPA_1.13.0/files/tts_en_fastpitch_align_ipa.nemo",
description="This model is trained on LJSpeech sampled at 22050Hz with and can be used to generate female English voices with an American accent. It is IPA-based.",
class_=cls,
)
list_of_models.append(model)
# en-US, multi-speaker, 44100Hz, HiFiTTS.
model = PretrainedModelInfo(
pretrained_model_name="tts_en_fastpitch_multispeaker",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/tts_en_multispeaker_fastpitchhifigan/versions/1.10.0/files/tts_en_fastpitch_multispeaker.nemo",
description="This model is trained on HiFITTS sampled at 44100Hz with and can be used to generate male and female English voices with an American accent.",
class_=cls,
)
list_of_models.append(model)
# de-DE, single male speaker, grapheme-based tokenizer, 22050 Hz, Thorsten Müller’s German Neutral-TTS Dataset, 21.02
model = PretrainedModelInfo(
pretrained_model_name="tts_de_fastpitch_singleSpeaker_thorstenNeutral_2102",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/tts_de_fastpitchhifigan/versions/1.15.0/files/tts_de_fastpitch_thorstens2102.nemo",
description="This model is trained on a single male speaker data in Thorsten Müller’s German Neutral 21.02 Dataset sampled at 22050Hz and can be used to generate male German voices.",
class_=cls,
)
list_of_models.append(model)
# de-DE, single male speaker, grapheme-based tokenizer, 22050 Hz, Thorsten Müller’s German Neutral-TTS Dataset, 22.10
model = PretrainedModelInfo(
pretrained_model_name="tts_de_fastpitch_singleSpeaker_thorstenNeutral_2210",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/tts_de_fastpitchhifigan/versions/1.15.0/files/tts_de_fastpitch_thorstens2210.nemo",
description="This model is trained on a single male speaker data in Thorsten Müller’s German Neutral 22.10 Dataset sampled at 22050Hz and can be used to generate male German voices.",
class_=cls,
)
list_of_models.append(model)
# de-DE, multi-speaker, 5 speakers, 44100 Hz, HUI-Audio-Corpus-German Clean.
model = PretrainedModelInfo(
pretrained_model_name="tts_de_fastpitch_multispeaker_5",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/tts_de_fastpitch_multispeaker_5/versions/1.11.0/files/tts_de_fastpitch_multispeaker_5.nemo",
description="This model is trained on 5 speakers in HUI-Audio-Corpus-German clean subset sampled at 44100Hz with and can be used to generate male and female German voices.",
class_=cls,
)
list_of_models.append(model)
# es, 174 speakers, 44100Hz, OpenSLR (IPA)
model = PretrainedModelInfo(
pretrained_model_name="tts_es_fastpitch_multispeaker",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/tts_es_multispeaker_fastpitchhifigan/versions/1.15.0/files/tts_es_fastpitch_multispeaker.nemo",
description="This model is trained on 174 speakers in 6 crowdsourced Latin American Spanish OpenSLR datasets sampled at 44100Hz and can be used to generate male and female Spanish voices with Latin American accents.",
class_=cls,
)
list_of_models.append(model)
# zh, single female speaker, 22050Hz, SFSpeech Bilingual Chinese/English dataset, improved model using richer
# dict and jieba word segmenter for polyphone disambiguation.
model = PretrainedModelInfo(
pretrained_model_name="tts_zh_fastpitch_sfspeech",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/tts_zh_fastpitch_hifigan_sfspeech/versions/1.15.0/files/tts_zh_fastpitch_sfspeech.nemo",
description="This model is trained on a single female speaker in SFSpeech Bilingual Chinese/English dataset"
" sampled at 22050Hz and can be used to generate female Mandarin Chinese voices. It is improved"
" using richer dict and jieba word segmenter for polyphone disambiguation.",
class_=cls,
)
list_of_models.append(model)
# en, multi speaker, LibriTTS, 16000 Hz
# stft 25ms 10ms matching ASR params
# for use during Enhlish ASR training/adaptation
model = PretrainedModelInfo(
pretrained_model_name="tts_en_fastpitch_for_asr_finetuning",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/tts_en_fastpitch_spectrogram_enhancer_for_asr_finetuning/versions/1.20.0/files/tts_en_fastpitch_for_asr_finetuning.nemo",
description="This model is trained on LibriSpeech, train-960 subset."
" STFT parameters follow those commonly used in ASR: 25 ms window, 10 ms hop."
" This model is supposed to be used with its companion SpetrogramEnhancer for "
" ASR fine-tuning. Usage for regular TTS tasks is not advised.",
class_=cls,
)
list_of_models.append(model)
return list_of_models
# Methods for model exportability
def _prepare_for_export(self, **kwargs):
super()._prepare_for_export(**kwargs)
tensor_shape = ('T') if self.export_config["enable_ragged_batches"] else ('B', 'T')
# Define input_types and output_types as required by export()
self._input_types = {
"text": NeuralType(tensor_shape, TokenIndex()),
"pitch": NeuralType(tensor_shape, RegressionValuesType()),
"pace": NeuralType(tensor_shape),
"volume": NeuralType(tensor_shape, optional=True),
"batch_lengths": NeuralType(('B'), optional=True),
"speaker": NeuralType(('B'), Index(), optional=True),
}
self._output_types = {
"spect": NeuralType(('B', 'D', 'T'), MelSpectrogramType()),
"num_frames": NeuralType(('B'), TokenDurationType()),
"durs_predicted": NeuralType(('B', 'T'), TokenDurationType()),
"log_durs_predicted": NeuralType(('B', 'T'), TokenLogDurationType()),
"pitch_predicted": NeuralType(('B', 'T'), RegressionValuesType()),
}
if self.export_config["enable_volume"]:
self._output_types["volume_aligned"] = NeuralType(('B', 'T'), RegressionValuesType())
def _export_teardown(self):
self._input_types = self._output_types = None
@property
def disabled_deployment_input_names(self):
"""Implement this method to return a set of input names disabled for export"""
disabled_inputs = set()
if self.fastpitch.speaker_emb is None:
disabled_inputs.add("speaker")
if not self.export_config["enable_ragged_batches"]:
disabled_inputs.add("batch_lengths")
if not self.export_config["enable_volume"]:
disabled_inputs.add("volume")
return disabled_inputs
@property
def input_types(self):
return self._input_types
@property
def output_types(self):
return self._output_types
def input_example(self, max_batch=1, max_dim=44):
"""
Generates input examples for tracing etc.
Returns:
A tuple of input examples.
"""
par = next(self.fastpitch.parameters())
inputs = sample_tts_input(self.export_config, par.device, max_batch=max_batch, max_dim=max_dim)
if 'enable_ragged_batches' not in self.export_config:
inputs.pop('batch_lengths', None)
return (inputs,)
def forward_for_export(self, text, pitch, pace, volume=None, batch_lengths=None, speaker=None):
if self.export_config["enable_ragged_batches"]:
text, pitch, pace, volume_tensor, lens = batch_from_ragged(
text, pitch, pace, batch_lengths, padding_idx=self.fastpitch.encoder.padding_idx, volume=volume
)
if volume is not None:
volume = volume_tensor
return self.fastpitch.infer(text=text, pitch=pitch, pace=pace, volume=volume, speaker=speaker)
def interpolate_speaker(
self, original_speaker_1, original_speaker_2, weight_speaker_1, weight_speaker_2, new_speaker_id
):
"""
This method performs speaker interpolation between two original speakers the model is trained on.
Inputs:
original_speaker_1: Integer speaker ID of first existing speaker in the model
original_speaker_2: Integer speaker ID of second existing speaker in the model
weight_speaker_1: Floating point weight associated in to first speaker during weight combination
weight_speaker_2: Floating point weight associated in to second speaker during weight combination
new_speaker_id: Integer speaker ID of new interpolated speaker in the model
"""
if self.fastpitch.speaker_emb is None:
raise Exception(
"Current FastPitch model is not a multi-speaker FastPitch model. Speaker interpolation can only \
be performed with a multi-speaker model"
)
n_speakers = self.fastpitch.speaker_emb.weight.data.size()[0]
if original_speaker_1 >= n_speakers or original_speaker_2 >= n_speakers or new_speaker_id >= n_speakers:
raise Exception(
f"Parameters original_speaker_1, original_speaker_2, new_speaker_id should be less than the total \
total number of speakers FastPitch was trained on (n_speakers = {n_speakers})."
)
speaker_emb_1 = (
self.fastpitch.speaker_emb(torch.tensor(original_speaker_1, dtype=torch.int32).cuda()).clone().detach()
)
speaker_emb_2 = (
self.fastpitch.speaker_emb(torch.tensor(original_speaker_2, dtype=torch.int32).cuda()).clone().detach()
)
new_speaker_emb = weight_speaker_1 * speaker_emb_1 + weight_speaker_2 * speaker_emb_2
self.fastpitch.speaker_emb.weight.data[new_speaker_id] = new_speaker_emb
| NeMo-main | nemo/collections/tts/models/fastpitch.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List
import numpy as np
import omegaconf
import torch
from hydra.utils import instantiate
from omegaconf import DictConfig
from pytorch_lightning import Trainer
from pytorch_lightning.loggers import WandbLogger
from torch import nn
from nemo.collections.tts.losses.aligner_loss import BinLoss, ForwardSumLoss
from nemo.collections.tts.parts.utils.helpers import (
binarize_attention,
g2p_backward_compatible_support,
get_mask_from_lengths,
plot_alignment_to_numpy,
)
from nemo.core.classes import ModelPT
from nemo.core.classes.common import PretrainedModelInfo
from nemo.utils import logging, model_utils
HAVE_WANDB = True
try:
import wandb
except ModuleNotFoundError:
HAVE_WANDB = False
class AlignerModel(ModelPT):
"""Speech-to-text alignment model (https://arxiv.org/pdf/2108.10447.pdf) that is used to learn alignments between mel spectrogram and text."""
def __init__(self, cfg: DictConfig, trainer: 'Trainer' = None):
# Convert to Hydra 1.0 compatible DictConfig
cfg = model_utils.convert_model_config_to_dict_config(cfg)
cfg = model_utils.maybe_update_config_version(cfg)
# Setup normalizer
self.normalizer = None
self.text_normalizer_call = None
self.text_normalizer_call_kwargs = {}
self._setup_normalizer(cfg)
# Setup tokenizer
self.tokenizer = None
self._setup_tokenizer(cfg)
assert self.tokenizer is not None
num_tokens = len(self.tokenizer.tokens)
self.tokenizer_pad = self.tokenizer.pad
self.tokenizer_unk = self.tokenizer.oov
super().__init__(cfg=cfg, trainer=trainer)
self.embed = nn.Embedding(num_tokens, cfg.symbols_embedding_dim)
self.preprocessor = instantiate(cfg.preprocessor)
self.alignment_encoder = instantiate(cfg.alignment_encoder)
self.forward_sum_loss = ForwardSumLoss()
self.bin_loss = BinLoss()
self.add_bin_loss = False
self.bin_loss_scale = 0.0
self.bin_loss_start_ratio = cfg.bin_loss_start_ratio
self.bin_loss_warmup_epochs = cfg.bin_loss_warmup_epochs
def _setup_normalizer(self, cfg):
if "text_normalizer" in cfg:
normalizer_kwargs = {}
if "whitelist" in cfg.text_normalizer:
normalizer_kwargs["whitelist"] = self.register_artifact(
'text_normalizer.whitelist', cfg.text_normalizer.whitelist
)
try:
import nemo_text_processing
self.normalizer = instantiate(cfg.text_normalizer, **normalizer_kwargs)
except Exception as e:
logging.error(e)
raise ImportError(
"`nemo_text_processing` not installed, see https://github.com/NVIDIA/NeMo-text-processing for more details"
)
self.text_normalizer_call = self.normalizer.normalize
if "text_normalizer_call_kwargs" in cfg:
self.text_normalizer_call_kwargs = cfg.text_normalizer_call_kwargs
def _setup_tokenizer(self, cfg):
text_tokenizer_kwargs = {}
if "g2p" in cfg.text_tokenizer:
# for backward compatibility
if (
self._is_model_being_restored()
and (cfg.text_tokenizer.g2p.get('_target_', None) is not None)
and cfg.text_tokenizer.g2p["_target_"].startswith("nemo_text_processing.g2p")
):
cfg.text_tokenizer.g2p["_target_"] = g2p_backward_compatible_support(
cfg.text_tokenizer.g2p["_target_"]
)
g2p_kwargs = {}
if "phoneme_dict" in cfg.text_tokenizer.g2p:
g2p_kwargs["phoneme_dict"] = self.register_artifact(
'text_tokenizer.g2p.phoneme_dict', cfg.text_tokenizer.g2p.phoneme_dict,
)
if "heteronyms" in cfg.text_tokenizer.g2p:
g2p_kwargs["heteronyms"] = self.register_artifact(
'text_tokenizer.g2p.heteronyms', cfg.text_tokenizer.g2p.heteronyms,
)
text_tokenizer_kwargs["g2p"] = instantiate(cfg.text_tokenizer.g2p, **g2p_kwargs)
self.tokenizer = instantiate(cfg.text_tokenizer, **text_tokenizer_kwargs)
def forward(self, *, spec, spec_len, text, text_len, attn_prior=None):
with torch.cuda.amp.autocast(enabled=False):
attn_soft, attn_logprob = self.alignment_encoder(
queries=spec,
keys=self.embed(text).transpose(1, 2),
mask=get_mask_from_lengths(text_len).unsqueeze(-1) == 0,
attn_prior=attn_prior,
)
return attn_soft, attn_logprob
def _metrics(self, attn_soft, attn_logprob, spec_len, text_len):
loss, bin_loss, attn_hard = 0.0, None, None
forward_sum_loss = self.forward_sum_loss(attn_logprob=attn_logprob, in_lens=text_len, out_lens=spec_len)
loss += forward_sum_loss
if self.add_bin_loss:
attn_hard = binarize_attention(attn_soft, text_len, spec_len)
bin_loss = self.bin_loss(hard_attention=attn_hard, soft_attention=attn_soft)
loss += bin_loss
return loss, forward_sum_loss, bin_loss, attn_hard
def on_train_epoch_start(self):
bin_loss_start_epoch = np.ceil(self.bin_loss_start_ratio * self._trainer.max_epochs)
# Add bin loss when current_epoch >= bin_start_epoch
if not self.add_bin_loss and self.current_epoch >= bin_loss_start_epoch:
logging.info(f"Using hard attentions after epoch: {self.current_epoch}")
self.add_bin_loss = True
if self.add_bin_loss:
self.bin_loss_scale = min((self.current_epoch - bin_loss_start_epoch) / self.bin_loss_warmup_epochs, 1.0)
def training_step(self, batch, batch_idx):
audio, audio_len, text, text_len, attn_prior = batch
spec, spec_len = self.preprocessor(input_signal=audio, length=audio_len)
attn_soft, attn_logprob = self(
spec=spec, spec_len=spec_len, text=text, text_len=text_len, attn_prior=attn_prior
)
loss, forward_sum_loss, bin_loss, _ = self._metrics(attn_soft, attn_logprob, spec_len, text_len)
train_log = {
'train_forward_sum_loss': forward_sum_loss,
'train_bin_loss': torch.tensor(1.0).to(forward_sum_loss.device) if bin_loss is None else bin_loss,
}
return {'loss': loss, 'progress_bar': {k: v.detach() for k, v in train_log.items()}, 'log': train_log}
def validation_step(self, batch, batch_idx):
audio, audio_len, text, text_len, attn_prior = batch
spec, spec_len = self.preprocessor(input_signal=audio, length=audio_len)
attn_soft, attn_logprob = self(
spec=spec, spec_len=spec_len, text=text, text_len=text_len, attn_prior=attn_prior
)
loss, forward_sum_loss, bin_loss, attn_hard = self._metrics(attn_soft, attn_logprob, spec_len, text_len)
# plot once per epoch
if batch_idx == 0 and isinstance(self.logger, WandbLogger) and HAVE_WANDB:
if attn_hard is None:
attn_hard = binarize_attention(attn_soft, text_len, spec_len)
attn_matrices = []
for i in range(min(5, audio.shape[0])):
attn_matrices.append(
wandb.Image(
plot_alignment_to_numpy(
np.fliplr(np.rot90(attn_soft[i, 0, : spec_len[i], : text_len[i]].data.cpu().numpy()))
),
caption=f"attn soft",
),
)
attn_matrices.append(
wandb.Image(
plot_alignment_to_numpy(
np.fliplr(np.rot90(attn_hard[i, 0, : spec_len[i], : text_len[i]].data.cpu().numpy()))
),
caption=f"attn hard",
)
)
self.logger.experiment.log({"attn_matrices": attn_matrices})
val_log = {
'val_loss': loss,
'val_forward_sum_loss': forward_sum_loss,
'val_bin_loss': torch.tensor(1.0).to(forward_sum_loss.device) if bin_loss is None else bin_loss,
}
self.log_dict(val_log, prog_bar=False, on_epoch=True, logger=True, sync_dist=True)
def _loader(self, cfg):
try:
_ = cfg.dataset.manifest_filepath
except omegaconf.errors.MissingMandatoryValue:
logging.warning("manifest_filepath was skipped. No dataset for this model.")
return None
dataset = instantiate(
cfg.dataset,
text_normalizer=self.normalizer,
text_normalizer_call_kwargs=self.text_normalizer_call_kwargs,
text_tokenizer=self.tokenizer,
)
return torch.utils.data.DataLoader( # noqa
dataset=dataset, collate_fn=dataset.collate_fn, **cfg.dataloader_params,
)
def setup_training_data(self, cfg):
self._train_dl = self._loader(cfg)
def setup_validation_data(self, cfg):
self._validation_dl = self._loader(cfg)
def setup_test_data(self, cfg):
"""Omitted."""
pass
@classmethod
def list_available_models(cls) -> List[PretrainedModelInfo]:
"""
This method returns a list of pre-trained model which can be instantiated directly from NVIDIA's NGC cloud.
Returns:
List of available pre-trained models.
"""
list_of_models = []
# en-US, ARPABET-based
model = PretrainedModelInfo(
pretrained_model_name="tts_en_radtts_aligner",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/tts_en_radtts_aligner/versions/ARPABET_1.11.0/files/Aligner.nemo",
description="This model is trained on LJSpeech sampled at 22050Hz with and can be used to align text and audio.",
class_=cls,
)
list_of_models.append(model)
# en-US, IPA-based
model = PretrainedModelInfo(
pretrained_model_name="tts_en_radtts_aligner_ipa",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/tts_en_radtts_aligner/versions/IPA_1.13.0/files/Aligner.nemo",
description="This model is trained on LJSpeech sampled at 22050Hz with and can be used to align text and audio.",
class_=cls,
)
list_of_models.append(model)
return list_of_models
| NeMo-main | nemo/collections/tts/models/aligner.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import torch
from hydra.utils import instantiate
from omegaconf import DictConfig, OmegaConf
from pytorch_lightning import Trainer
from pytorch_lightning.loggers import TensorBoardLogger
from nemo.collections.common.tokenizers.text_to_speech.tts_tokenizers import BaseTokenizer
from nemo.collections.tts.losses.radttsloss import AttentionBinarizationLoss, RADTTSLoss
from nemo.collections.tts.models.base import SpectrogramGenerator
from nemo.collections.tts.parts.utils.helpers import (
batch_from_ragged,
g2p_backward_compatible_support,
plot_alignment_to_numpy,
regulate_len,
sample_tts_input,
)
from nemo.core.classes import Exportable
from nemo.core.classes.common import typecheck
from nemo.core.neural_types.elements import (
Index,
MelSpectrogramType,
RegressionValuesType,
TokenDurationType,
TokenIndex,
)
from nemo.core.neural_types.neural_type import NeuralType
from nemo.core.optim.radam import RAdam
from nemo.utils import logging
from nemo.utils.decorators import experimental
@experimental
class RadTTSModel(SpectrogramGenerator, Exportable):
def __init__(self, cfg: DictConfig, trainer: Trainer = None):
if isinstance(cfg, dict):
cfg = OmegaConf.create(cfg)
self.normalizer = None
self.text_normalizer_call = None
self.text_normalizer_call_kwargs = {}
self._setup_normalizer(cfg)
self.tokenizer = None
self._setup_tokenizer(cfg)
assert self.tokenizer is not None
self.tokenizer_pad = self.tokenizer.pad
self.tokenizer_unk = self.tokenizer.oov
self.text_tokenizer_pad_id = None
self.tokens = None
super().__init__(cfg=cfg, trainer=trainer)
self.feat_loss_weight = 1.0
self.model_config = cfg.modelConfig
self.train_config = cfg.trainerConfig
self.optim = cfg.optim
self.criterion = RADTTSLoss(
self.train_config.sigma,
self.model_config.n_group_size,
self.model_config.dur_model_config,
self.model_config.f0_model_config,
self.model_config.energy_model_config,
vpred_model_config=self.model_config.v_model_config,
loss_weights=self.train_config.loss_weights,
)
self.attention_kl_loss = AttentionBinarizationLoss()
self.model = instantiate(cfg.modelConfig)
self._parser = None
self._tb_logger = None
self.cfg = cfg
self.log_train_images = False
self.export_config = {
"emb_range": (0, self.model.embedding.num_embeddings),
"enable_volume": True,
"enable_ragged_batches": False,
"num_speakers": self.model_config.n_speakers,
}
# print("intial self normalizer", self.normalizer)
def batch_dict(self, batch_data):
if len(batch_data) < 14:
spk_id = torch.tensor([0] * (batch_data[3]).size(0)).cuda().to(self.device)
v_m = batch_data[9]
p_v = batch_data[10]
else:
spk_id = batch_data[13]
v_m = batch_data[9]
p_v = batch_data[10]
batch_data_dict = {
"audio": batch_data[0],
"audio_lens": batch_data[1],
"text": batch_data[2],
"text_lens": batch_data[3],
"log_mel": batch_data[4],
"log_mel_lens": batch_data[5],
"align_prior_matrix": batch_data[6],
"pitch": batch_data[7],
"pitch_lens": batch_data[8],
"voiced_mask": v_m,
"p_voiced": p_v,
"energy": batch_data[11],
"energy_lens": batch_data[12],
"speaker_id": spk_id,
}
return batch_data_dict
def training_step(self, batch, batch_idx):
batch = self.batch_dict(batch)
mel = batch['log_mel']
speaker_ids = batch['speaker_id']
text = batch['text']
in_lens = batch['text_lens']
out_lens = batch['log_mel_lens']
attn_prior = batch['align_prior_matrix']
f0 = batch['pitch']
voiced_mask = batch['voiced_mask']
energy_avg = batch['energy']
if (
self.train_config.binarization_start_iter >= 0
and self.global_step >= self.train_config.binarization_start_iter
):
# binarization training phase
binarize = True
else:
# no binarization, soft-only
binarize = False
outputs = self.model(
mel,
speaker_ids,
text,
in_lens,
out_lens,
binarize_attention=binarize,
attn_prior=attn_prior,
f0=f0,
energy_avg=energy_avg,
voiced_mask=voiced_mask,
)
loss_outputs = self.criterion(outputs, in_lens, out_lens)
loss = None
for k, (v, w) in loss_outputs.items():
if w > 0:
loss = v * w if loss is None else loss + v * w
if binarize and self.global_step >= self.train_config.kl_loss_start_iter:
binarization_loss = self.attention_kl_loss(outputs['attn'], outputs['attn_soft'])
loss += binarization_loss
else:
binarization_loss = torch.zeros_like(loss)
loss_outputs['binarization_loss'] = (binarization_loss, 1.0)
for k, (v, w) in loss_outputs.items():
self.log("train/" + k, loss_outputs[k][0], on_step=True)
return {'loss': loss}
def validation_step(self, batch, batch_idx):
batch = self.batch_dict(batch)
speaker_ids = batch['speaker_id']
text = batch['text']
in_lens = batch['text_lens']
out_lens = batch['log_mel_lens']
attn_prior = batch['align_prior_matrix']
f0 = batch['pitch']
voiced_mask = batch['voiced_mask']
energy_avg = batch['energy']
mel = batch['log_mel']
if (
self.train_config.binarization_start_iter >= 0
and self.global_step >= self.train_config.binarization_start_iter
):
# binarization training phase
binarize = True
else:
# no binarization, soft-only
binarize = False
outputs = self.model(
mel,
speaker_ids,
text,
in_lens,
out_lens,
binarize_attention=True,
attn_prior=attn_prior,
f0=f0,
energy_avg=energy_avg,
voiced_mask=voiced_mask,
)
loss_outputs = self.criterion(outputs, in_lens, out_lens)
loss = None
for k, (v, w) in loss_outputs.items():
if w > 0:
loss = v * w if loss is None else loss + v * w
if (
binarize
and self.train_config.kl_loss_start_iter >= 0
and self.global_step >= self.train_config.kl_loss_start_iter
):
binarization_loss = self.attention_kl_loss(outputs['attn'], outputs['attn_soft'])
loss += binarization_loss
else:
binarization_loss = torch.zeros_like(loss)
loss_outputs['binarization_loss'] = binarization_loss
val_outputs = {
"loss_outputs": loss_outputs,
"attn": outputs["attn"] if batch_idx == 0 else None,
"attn_soft": outputs["attn_soft"] if batch_idx == 0 else None,
"audiopaths": "audio_1" if batch_idx == 0 else None,
}
self.validation_step_outputs.append(val_outputs)
return val_outputs
def on_validation_epoch_end(self):
loss_outputs = self.validation_step_outputs[0]["loss_outputs"]
for k, v in loss_outputs.items():
if k != "binarization_loss":
self.log("val/" + k, loss_outputs[k][0], sync_dist=True, on_epoch=True)
attn = self.validation_step_outputs[0]["attn"]
attn_soft = self.validation_step_outputs[0]["attn_soft"]
self.tb_logger.add_image(
'attention_weights_mas',
plot_alignment_to_numpy(attn[0, 0].data.cpu().numpy().T, title="audio"),
self.global_step,
dataformats='HWC',
)
self.tb_logger.add_image(
'attention_weights',
plot_alignment_to_numpy(attn_soft[0, 0].data.cpu().numpy().T, title="audio"),
self.global_step,
dataformats='HWC',
)
self.log_train_images = True
self.validation_step_outputs.clear() # free memory
def configure_optimizers(self):
logging.info("Initializing %s optimizer" % (self.optim.name))
if len(self.train_config.finetune_layers):
for name, param in model.named_parameters():
if any([l in name for l in self.train_config.finetune_layers]): # short list hack
logging.info("Fine-tuning parameter", name)
param.requires_grad = True
else:
param.requires_grad = False
if self.optim.name == 'Adam':
optimizer = torch.optim.Adam(
self.model.parameters(), lr=self.optim.lr, weight_decay=self.optim.weight_decay
)
elif self.optim.name == 'RAdam': # False for inference riva
optimizer = RAdam(self.model.parameters(), lr=self.optim.lr, weight_decay=self.optim.weight_decay)
else:
logging.info("Unrecognized optimizer %s! Please choose the right optimizer" % (self.optim.name))
exit(1)
return optimizer
def _loader(self, cfg):
try:
_ = cfg.dataset.manifest_filepath
except omegaconf.errors.MissingMandatoryValue:
logging.warning("manifest_filepath was skipped. No dataset for this model.")
return None
# print("inside loader self normalizer", self.normalizer)
dataset = instantiate(
cfg.dataset,
text_normalizer=self.normalizer,
text_normalizer_call_kwargs=self.text_normalizer_call_kwargs,
text_tokenizer=self.tokenizer,
)
return torch.utils.data.DataLoader( # noqa
dataset=dataset, collate_fn=dataset.collate_fn, **cfg.dataloader_params,
)
def setup_training_data(self, cfg):
self._train_dl = self._loader(cfg)
def setup_validation_data(self, cfg):
self._validation_dl = self._loader(cfg)
def setup_test_data(self, cfg):
"""Omitted."""
pass
@typecheck(
input_types={
"tokens": NeuralType(('B', 'T_text'), TokenIndex(), optional=True),
"speaker": NeuralType(('B'), Index(), optional=True),
"sigma": NeuralType(optional=True),
},
output_types={"spect": NeuralType(('B', 'D', 'T_spec'), MelSpectrogramType()),},
)
def generate_spectrogram(self, tokens: 'torch.tensor', speaker: int = 0, sigma: float = 1.0) -> torch.tensor:
self.eval()
if self.training:
logging.warning("generate_spectrogram() is meant to be called in eval mode.")
speaker = torch.tensor([speaker]).long().cuda().to(self.device)
outputs = self.model.infer(speaker, tokens, sigma=sigma)
spect = outputs['mel']
return spect
@property
def parser(self):
if self._parser is not None:
return self._parser
return self._parser
def _setup_tokenizer(self, cfg):
text_tokenizer_kwargs = {}
if "g2p" in cfg.text_tokenizer:
# for backward compatibility
if (
self._is_model_being_restored()
and (cfg.text_tokenizer.g2p.get('_target_', None) is not None)
and cfg.text_tokenizer.g2p["_target_"].startswith("nemo_text_processing.g2p")
):
cfg.text_tokenizer.g2p["_target_"] = g2p_backward_compatible_support(
cfg.text_tokenizer.g2p["_target_"]
)
g2p_kwargs = {}
if "phoneme_dict" in cfg.text_tokenizer.g2p:
g2p_kwargs["phoneme_dict"] = self.register_artifact(
'text_tokenizer.g2p.phoneme_dict', cfg.text_tokenizer.g2p.phoneme_dict,
)
if "heteronyms" in cfg.text_tokenizer.g2p:
g2p_kwargs["heteronyms"] = self.register_artifact(
'text_tokenizer.g2p.heteronyms', cfg.text_tokenizer.g2p.heteronyms,
)
text_tokenizer_kwargs["g2p"] = instantiate(cfg.text_tokenizer.g2p, **g2p_kwargs)
self.tokenizer = instantiate(cfg.text_tokenizer, **text_tokenizer_kwargs)
if isinstance(self.tokenizer, BaseTokenizer):
self.text_tokenizer_pad_id = self.tokenizer.pad
self.tokens = self.tokenizer.tokens
else:
if text_tokenizer_pad_id is None:
raise ValueError(f"text_tokenizer_pad_id must be specified if text_tokenizer is not BaseTokenizer")
if tokens is None:
raise ValueError(f"tokens must be specified if text_tokenizer is not BaseTokenizer")
self.text_tokenizer_pad_id = text_tokenizer_pad_id
self.tokens = tokens
def _setup_normalizer(self, cfg):
if "text_normalizer" in cfg:
normalizer_kwargs = {}
if "whitelist" in cfg.text_normalizer:
normalizer_kwargs["whitelist"] = self.register_artifact(
'text_normalizer.whitelist', cfg.text_normalizer.whitelist
)
try:
import nemo_text_processing
self.normalizer = instantiate(cfg.text_normalizer, **normalizer_kwargs)
self.text_normalizer_call = self.normalizer.normalize
except Exception as e:
logging.error(e)
raise ImportError(
"`nemo_text_processing` not installed, see https://github.com/NVIDIA/NeMo-text-processing for more details"
)
self.text_normalizer_call = self.normalizer.normalize
if "text_normalizer_call_kwargs" in cfg:
self.text_normalizer_call_kwargs = cfg.text_normalizer_call_kwargs
def parse(self, text: str, normalize=False) -> torch.Tensor:
if self.training:
logging.warning("parse() is meant to be called in eval mode.")
if normalize and self.text_normalizer_call is not None:
text = self.text_normalizer_call(text, **self.text_normalizer_call_kwargs)
eval_phon_mode = contextlib.nullcontext()
if hasattr(self.tokenizer, "set_phone_prob"):
eval_phon_mode = self.tokenizer.set_phone_prob(prob=1)
print("changed to one")
with eval_phon_mode:
tokens = self.tokenizer.encode(text)
print("text to token phone_prob")
return torch.tensor(tokens).long().unsqueeze(0).cuda().to(self.device)
@property
def tb_logger(self):
if self._tb_logger is None:
if self.logger is None and self.logger.experiment is None:
return None
tb_logger = self.logger.experiment
for logger in self.trainer.loggers:
if isinstance(logger, TensorBoardLogger):
tb_logger = logger.experiment
break
self._tb_logger = tb_logger
return self._tb_logger
def load_state_dict(self, state_dict, strict=True):
# Override load_state_dict to be backward-compatible with old checkpoints
new_state_dict = {}
for k, v in state_dict.items():
k = k.replace("projection_fn.weight", "projection_fn.conv.weight")
k = k.replace("projection_fn.bias", "projection_fn.conv.bias")
new_state_dict[k] = v
super().load_state_dict(new_state_dict, strict=strict)
# Methods for model exportability
@property
def input_types(self):
return self._input_types
@property
def output_types(self):
return self._output_types
def _prepare_for_export(self, **kwargs):
self.model.remove_norms()
super()._prepare_for_export(**kwargs)
tensor_shape = ('T') if self.export_config["enable_ragged_batches"] else ('B', 'T')
# Define input_types and output_types as required by export()
self._input_types = {
"text": NeuralType(tensor_shape, TokenIndex()),
"batch_lengths": NeuralType(('B')),
"speaker_id": NeuralType(('B'), Index()),
"speaker_id_text": NeuralType(('B'), Index()),
"speaker_id_attributes": NeuralType(('B'), Index()),
"pitch": NeuralType(tensor_shape, RegressionValuesType()),
"pace": NeuralType(tensor_shape),
}
self._output_types = {
"spect": NeuralType(('B', 'D', 'T_spec'), MelSpectrogramType()),
"num_frames": NeuralType(('B'), TokenDurationType()),
"durs_predicted": NeuralType(('B', 'T_text'), TokenDurationType()),
}
if self.export_config["enable_volume"]:
self._input_types["volume"] = NeuralType(tensor_shape, optional=True)
self._output_types["volume_aligned"] = NeuralType(('B', 'T_spec'), RegressionValuesType())
def input_example(self, max_batch=1, max_dim=400):
par = next(self.model.parameters())
inputs = sample_tts_input(self.export_config, par.device, max_batch=max_batch, max_dim=max_dim)
speaker = inputs.pop("speaker")
inp = inputs['text']
pad_id = self.tokenizer.pad
inp[inp == pad_id] = pad_id - 1 if pad_id > 0 else pad_id + 1
inputs.update(
{'speaker_id': speaker, 'speaker_id_text': speaker, 'speaker_id_attributes': speaker,}
)
new_inputs = {
'text': inp,
'batch_lengths': inputs['batch_lengths'],
'speaker_id': speaker,
'speaker_id_text': speaker,
'speaker_id_attributes': speaker,
'pitch': inputs['pitch'],
'pace': inputs['pace'],
'volume': inputs['volume'],
}
return (new_inputs,)
def forward_for_export(
self, text, batch_lengths, speaker_id, speaker_id_text, speaker_id_attributes, pitch, pace, volume,
):
if self.export_config["enable_ragged_batches"]:
text, pitch, pace, volume_tensor, lens = batch_from_ragged(
text, pitch, pace, batch_lengths=batch_lengths, padding_idx=self.tokenizer_pad, volume=volume,
)
if volume is not None:
volume = volume_tensor
else:
lens = batch_lengths.to(dtype=torch.int64)
(mel, n_frames, dur, _, _) = self.model.infer(
speaker_id,
text,
speaker_id_text=speaker_id_text,
speaker_id_attributes=speaker_id_attributes,
sigma=0.7,
f0_mean=0.0,
f0_std=0.0,
in_lens=lens,
pitch_shift=pitch,
pace=pace,
).values()
ret_values = (mel.float(), n_frames, dur.float())
if volume is not None:
# Need to reshape as in infer patch
durs_predicted = dur.float()
truncated_length = torch.max(lens)
volume_extended, _ = regulate_len(
durs_predicted,
volume[:, :truncated_length].unsqueeze(-1),
pace[:, :truncated_length],
group_size=self.model.n_group_size,
dur_lens=lens,
)
volume_extended = volume_extended.squeeze(2).float()
ret_values = ret_values + (volume_extended,)
return ret_values
| NeMo-main | nemo/collections/tts/models/radtts.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
from typing import Any, Dict, List
import librosa
import torch
from hydra.utils import instantiate
from omegaconf import MISSING, DictConfig, OmegaConf
from nemo.collections.tts.models.base import MelToSpec, Vocoder
from nemo.collections.tts.parts.utils.helpers import OperationMode, griffin_lim
from nemo.core.classes.common import PretrainedModelInfo
from nemo.core.neural_types.elements import AudioSignal, MelSpectrogramType
from nemo.core.neural_types.neural_type import NeuralType
class MelPsuedoInverseModel(MelToSpec):
def __init__(self, cfg: DictConfig):
if isinstance(cfg, dict):
cfg = OmegaConf.create(cfg)
super().__init__(cfg=cfg)
sampling_rate = self._cfg['sampling_rate']
n_fft = self._cfg['n_fft']
mel_fmin = self._cfg['mel_fmin']
mel_fmax = self._cfg['mel_fmax']
mel_freq = self._cfg['mel_freq']
melinv = librosa.filters.mel(sr=sampling_rate, n_fft=n_fft, fmin=mel_fmin, fmax=mel_fmax, n_mels=mel_freq)
self.mel_pseudo_inverse = torch.tensor(melinv, dtype=torch.float)
def convert_mel_spectrogram_to_linear(self, mel):
lin_spec = torch.tensordot(mel, self.mel_pseudo_inverse, dims=[[1], [0]])
lin_spec = lin_spec.permute(0, 2, 1)
return lin_spec
def setup_training_data(self, cfg):
pass
def setup_validation_data(self, cfg):
pass
def cuda(self, *args, **kwargs):
self.mel_pseudo_inverse = self.mel_pseudo_inverse.cuda(*args, **kwargs)
return self
class GriffinLimModel(Vocoder):
def __init__(self, cfg: DictConfig):
if isinstance(cfg, dict):
cfg = OmegaConf.create(cfg)
super().__init__(cfg=cfg)
self.n_iters = self._cfg['n_iters']
self.n_fft = self._cfg['n_fft']
self.l_hop = self._cfg['l_hop']
def convert_spectrogram_to_audio(self, spec, Ts=None):
batch_size = spec.shape[0]
T_max = spec.shape[2]
if Ts is None:
Ts = [T_max] * batch_size
max_size = (max(Ts) - 1) * self.l_hop
audios = torch.zeros(batch_size, max_size)
# Lazy GL implementation. Could be improved by moving to pytorch.
for i in range(batch_size):
audio = griffin_lim(spec[i, :, 0 : Ts[i]].cpu().numpy(), n_iters=self.n_iters, n_fft=self.n_fft)
my_len = audio.shape[0]
audios[i, 0:my_len] = torch.from_numpy(audio)
return audios
def setup_training_data(self, cfg):
pass
def setup_validation_data(self, cfg):
pass
def cuda(self, *args, **kwargs):
return self
@dataclass
class TwoStagesConfig:
mel2spec: Dict[Any, Any] = MISSING
linvocoder: Dict[Any, Any] = MISSING
class TwoStagesModel(Vocoder):
"""Two Stages model used to convert mel spectrograms, to linear spectrograms, and then to audio"""
def __init__(self, cfg: DictConfig):
if isinstance(cfg, dict):
cfg = OmegaConf.create(cfg)
super().__init__(cfg=cfg)
schema = OmegaConf.structured(TwoStagesConfig)
# ModelPT ensures that cfg is a DictConfig, but do this second check in case ModelPT changes
if isinstance(cfg, dict):
cfg = OmegaConf.create(cfg)
elif not isinstance(cfg, DictConfig):
raise ValueError(f"cfg was type: {type(cfg)}. Expected either a dict or a DictConfig")
# Ensure passed cfg is compliant with schema
OmegaConf.merge(cfg, schema)
if '_target_' in self._cfg.mel2spec:
self.mel2spec = instantiate(self._cfg.mel2spec)
else:
self.mel2spec = None
if '_target_' in self._cfg.linvocoder:
self.linvocoder = instantiate(self._cfg.linvocoder)
else:
self.linvocoder = None
def set_mel_to_spec_model(self, mel2spec: MelToSpec):
self.mel2spec = mel2spec
def set_linear_vocoder(self, linvocoder: Vocoder):
self.linvocoder = linvocoder
def cuda(self, *args, **kwargs):
self.mel2spec.cuda(*args, **kwargs)
self.linvocoder.cuda(*args, **kwargs)
return super().cuda(*args, **kwargs)
@property
def input_types(self):
return {
"mel": NeuralType(('B', 'D', 'T'), MelSpectrogramType()),
}
@property
def output_types(self):
return {
"wave": NeuralType(('B', 'T'), AudioSignal()),
}
def forward(self, *, mel):
pass
def convert_spectrogram_to_audio(self, spec: torch.Tensor, **kwargs) -> torch.Tensor:
self.eval()
try:
self.mel2spec.mode = OperationMode.infer
except AttributeError:
pass
try:
self.linvocoder.mode = OperationMode.infer
except AttributeError:
pass
with torch.no_grad():
exp_spec = torch.exp(spec)
linear_spec = self.mel2spec.convert_mel_spectrogram_to_linear(exp_spec)
audio = self.linvocoder.convert_spectrogram_to_audio(linear_spec, **kwargs)
return audio
def training_step(self, batch, batch_idx):
pass
def validation_step(self, batch, batch_idx):
pass
def on_validation_epoch_end(self, outputs):
pass
def __setup_dataloader_from_config(self, cfg, shuffle_should_be: bool = True, name: str = "train"):
pass
def setup_training_data(self, cfg):
pass
def setup_validation_data(self, cfg):
pass
@classmethod
def list_available_models(cls) -> 'List[PretrainedModelInfo]':
"""
This method returns a list of pre-trained model which can be instantiated directly from NVIDIA's NGC cloud.
Returns:
List of available pre-trained models.
"""
list_of_models = []
return list_of_models
| NeMo-main | nemo/collections/tts/models/two_stages.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
from dataclasses import dataclass
from typing import Any, Dict, List, Optional
import torch
from hydra.utils import instantiate
from omegaconf import MISSING, DictConfig, OmegaConf, open_dict
from omegaconf.errors import ConfigAttributeError
from pytorch_lightning.loggers import TensorBoardLogger, WandbLogger
from torch import nn
from nemo.collections.common.parts.preprocessing import parsers
from nemo.collections.tts.losses.tacotron2loss import Tacotron2Loss
from nemo.collections.tts.models.base import SpectrogramGenerator
from nemo.collections.tts.parts.utils.helpers import (
g2p_backward_compatible_support,
get_mask_from_lengths,
tacotron2_log_to_tb_func,
tacotron2_log_to_wandb_func,
)
from nemo.core.classes.common import PretrainedModelInfo, typecheck
from nemo.core.neural_types.elements import (
AudioSignal,
EmbeddedTextType,
LengthsType,
LogitsType,
MelSpectrogramType,
SequenceToSequenceAlignmentType,
)
from nemo.core.neural_types.neural_type import NeuralType
from nemo.utils import logging, model_utils
@dataclass
class Preprocessor:
_target_: str = MISSING
pad_value: float = MISSING
@dataclass
class Tacotron2Config:
preprocessor: Preprocessor = Preprocessor()
encoder: Dict[Any, Any] = MISSING
decoder: Dict[Any, Any] = MISSING
postnet: Dict[Any, Any] = MISSING
labels: List = MISSING
train_ds: Optional[Dict[Any, Any]] = None
validation_ds: Optional[Dict[Any, Any]] = None
class Tacotron2Model(SpectrogramGenerator):
"""Tacotron 2 Model that is used to generate mel spectrograms from text"""
def __init__(self, cfg: DictConfig, trainer: 'Trainer' = None):
# Convert to Hydra 1.0 compatible DictConfig
cfg = model_utils.convert_model_config_to_dict_config(cfg)
cfg = model_utils.maybe_update_config_version(cfg)
# setup normalizer
self.normalizer = None
self.text_normalizer_call = None
self.text_normalizer_call_kwargs = {}
self._setup_normalizer(cfg)
# setup tokenizer
self.tokenizer = None
if hasattr(cfg, 'text_tokenizer'):
self._setup_tokenizer(cfg)
self.num_tokens = len(self.tokenizer.tokens)
self.tokenizer_pad = self.tokenizer.pad
self.tokenizer_unk = self.tokenizer.oov
# assert self.tokenizer is not None
else:
self.num_tokens = len(cfg.labels) + 3
super().__init__(cfg=cfg, trainer=trainer)
schema = OmegaConf.structured(Tacotron2Config)
# ModelPT ensures that cfg is a DictConfig, but do this second check in case ModelPT changes
if isinstance(cfg, dict):
cfg = OmegaConf.create(cfg)
elif not isinstance(cfg, DictConfig):
raise ValueError(f"cfg was type: {type(cfg)}. Expected either a dict or a DictConfig")
# Ensure passed cfg is compliant with schema
try:
OmegaConf.merge(cfg, schema)
self.pad_value = cfg.preprocessor.pad_value
except ConfigAttributeError:
self.pad_value = cfg.preprocessor.params.pad_value
logging.warning(
"Your config is using an old NeMo yaml configuration. Please ensure that the yaml matches the "
"current version in the main branch for future compatibility."
)
self._parser = None
self.audio_to_melspec_precessor = instantiate(cfg.preprocessor)
self.text_embedding = nn.Embedding(self.num_tokens, 512)
self.encoder = instantiate(self._cfg.encoder)
self.decoder = instantiate(self._cfg.decoder)
self.postnet = instantiate(self._cfg.postnet)
self.loss = Tacotron2Loss()
self.calculate_loss = True
@property
def parser(self):
if self._parser is not None:
return self._parser
ds_class_name = self._cfg.train_ds.dataset._target_.split(".")[-1]
if ds_class_name == "TTSDataset":
self._parser = None
elif hasattr(self._cfg, "labels"):
self._parser = parsers.make_parser(
labels=self._cfg.labels,
name='en',
unk_id=-1,
blank_id=-1,
do_normalize=True,
abbreviation_version="fastpitch",
make_table=False,
)
else:
raise ValueError("Wanted to setup parser, but model does not have necessary paramaters")
return self._parser
def parse(self, text: str, normalize=True) -> torch.Tensor:
if self.training:
logging.warning("parse() is meant to be called in eval mode.")
if normalize and self.text_normalizer_call is not None:
text = self.text_normalizer_call(text, **self.text_normalizer_call_kwargs)
eval_phon_mode = contextlib.nullcontext()
if hasattr(self.tokenizer, "set_phone_prob"):
eval_phon_mode = self.tokenizer.set_phone_prob(prob=1.0)
with eval_phon_mode:
if self.tokenizer is not None:
tokens = self.tokenizer.encode(text)
else:
tokens = self.parser(text)
# Old parser doesn't add bos and eos ids, so maunally add it
tokens = [len(self._cfg.labels)] + tokens + [len(self._cfg.labels) + 1]
tokens_tensor = torch.tensor(tokens).unsqueeze_(0).to(self.device)
return tokens_tensor
@property
def input_types(self):
if self.training:
return {
"tokens": NeuralType(('B', 'T'), EmbeddedTextType()),
"token_len": NeuralType(('B'), LengthsType()),
"audio": NeuralType(('B', 'T'), AudioSignal()),
"audio_len": NeuralType(('B'), LengthsType()),
}
else:
return {
"tokens": NeuralType(('B', 'T'), EmbeddedTextType()),
"token_len": NeuralType(('B'), LengthsType()),
"audio": NeuralType(('B', 'T'), AudioSignal(), optional=True),
"audio_len": NeuralType(('B'), LengthsType(), optional=True),
}
@property
def output_types(self):
if not self.calculate_loss and not self.training:
return {
"spec_pred_dec": NeuralType(('B', 'D', 'T'), MelSpectrogramType()),
"spec_pred_postnet": NeuralType(('B', 'D', 'T'), MelSpectrogramType()),
"gate_pred": NeuralType(('B', 'T'), LogitsType()),
"alignments": NeuralType(('B', 'T', 'T'), SequenceToSequenceAlignmentType()),
"pred_length": NeuralType(('B'), LengthsType()),
}
return {
"spec_pred_dec": NeuralType(('B', 'D', 'T'), MelSpectrogramType()),
"spec_pred_postnet": NeuralType(('B', 'D', 'T'), MelSpectrogramType()),
"gate_pred": NeuralType(('B', 'T'), LogitsType()),
"spec_target": NeuralType(('B', 'D', 'T'), MelSpectrogramType()),
"spec_target_len": NeuralType(('B'), LengthsType()),
"alignments": NeuralType(('B', 'T', 'T'), SequenceToSequenceAlignmentType()),
}
@typecheck()
def forward(self, *, tokens, token_len, audio=None, audio_len=None):
if audio is not None and audio_len is not None:
spec_target, spec_target_len = self.audio_to_melspec_precessor(audio, audio_len)
else:
if self.training or self.calculate_loss:
raise ValueError(
f"'audio' and 'audio_len' can not be None when either 'self.training' or 'self.calculate_loss' is True."
)
token_embedding = self.text_embedding(tokens).transpose(1, 2)
encoder_embedding = self.encoder(token_embedding=token_embedding, token_len=token_len)
if self.training:
spec_pred_dec, gate_pred, alignments = self.decoder(
memory=encoder_embedding, decoder_inputs=spec_target, memory_lengths=token_len
)
else:
spec_pred_dec, gate_pred, alignments, pred_length = self.decoder(
memory=encoder_embedding, memory_lengths=token_len
)
spec_pred_postnet = self.postnet(mel_spec=spec_pred_dec)
if not self.calculate_loss and not self.training:
return spec_pred_dec, spec_pred_postnet, gate_pred, alignments, pred_length
return spec_pred_dec, spec_pred_postnet, gate_pred, spec_target, spec_target_len, alignments
@typecheck(
input_types={"tokens": NeuralType(('B', 'T'), EmbeddedTextType())},
output_types={"spec": NeuralType(('B', 'D', 'T'), MelSpectrogramType())},
)
def generate_spectrogram(self, *, tokens):
self.eval()
self.calculate_loss = False
token_len = torch.tensor([len(i) for i in tokens]).to(self.device)
tensors = self(tokens=tokens, token_len=token_len)
spectrogram_pred = tensors[1]
if spectrogram_pred.shape[0] > 1:
# Silence all frames past the predicted end
mask = ~get_mask_from_lengths(tensors[-1])
mask = mask.expand(spectrogram_pred.shape[1], mask.size(0), mask.size(1))
mask = mask.permute(1, 0, 2)
spectrogram_pred.data.masked_fill_(mask, self.pad_value)
return spectrogram_pred
def training_step(self, batch, batch_idx):
audio, audio_len, tokens, token_len = batch
spec_pred_dec, spec_pred_postnet, gate_pred, spec_target, spec_target_len, _ = self.forward(
audio=audio, audio_len=audio_len, tokens=tokens, token_len=token_len
)
loss, _ = self.loss(
spec_pred_dec=spec_pred_dec,
spec_pred_postnet=spec_pred_postnet,
gate_pred=gate_pred,
spec_target=spec_target,
spec_target_len=spec_target_len,
pad_value=self.pad_value,
)
output = {
'loss': loss,
'progress_bar': {'training_loss': loss},
'log': {'loss': loss},
}
return output
def validation_step(self, batch, batch_idx):
audio, audio_len, tokens, token_len = batch
spec_pred_dec, spec_pred_postnet, gate_pred, spec_target, spec_target_len, alignments = self.forward(
audio=audio, audio_len=audio_len, tokens=tokens, token_len=token_len
)
loss, gate_target = self.loss(
spec_pred_dec=spec_pred_dec,
spec_pred_postnet=spec_pred_postnet,
gate_pred=gate_pred,
spec_target=spec_target,
spec_target_len=spec_target_len,
pad_value=self.pad_value,
)
loss = {
"val_loss": loss,
"mel_target": spec_target,
"mel_postnet": spec_pred_postnet,
"gate": gate_pred,
"gate_target": gate_target,
"alignments": alignments,
}
self.validation_step_outputs.append(loss)
return loss
def on_validation_epoch_end(self):
if self.logger is not None and self.logger.experiment is not None:
logger = self.logger.experiment
for logger in self.trainer.loggers:
if isinstance(logger, TensorBoardLogger):
logger = logger.experiment
break
if isinstance(logger, TensorBoardLogger):
tacotron2_log_to_tb_func(
logger,
self.validation_step_outputs[0].values(),
self.global_step,
tag="val",
log_images=True,
add_audio=False,
)
elif isinstance(logger, WandbLogger):
tacotron2_log_to_wandb_func(
logger,
self.validation_step_outputs[0].values(),
self.global_step,
tag="val",
log_images=True,
add_audio=False,
)
avg_loss = torch.stack(
[x['val_loss'] for x in self.validation_step_outputs]
).mean() # This reduces across batches, not workers!
self.log('val_loss', avg_loss)
self.validation_step_outputs.clear() # free memory
def _setup_normalizer(self, cfg):
if "text_normalizer" in cfg:
normalizer_kwargs = {}
if "whitelist" in cfg.text_normalizer:
normalizer_kwargs["whitelist"] = self.register_artifact(
'text_normalizer.whitelist', cfg.text_normalizer.whitelist
)
try:
import nemo_text_processing
self.normalizer = instantiate(cfg.text_normalizer, **normalizer_kwargs)
except Exception as e:
logging.error(e)
raise ImportError(
"`nemo_text_processing` not installed, see https://github.com/NVIDIA/NeMo-text-processing for more details"
)
self.text_normalizer_call = self.normalizer.normalize
if "text_normalizer_call_kwargs" in cfg:
self.text_normalizer_call_kwargs = cfg.text_normalizer_call_kwargs
def _setup_tokenizer(self, cfg):
text_tokenizer_kwargs = {}
if "g2p" in cfg.text_tokenizer and cfg.text_tokenizer.g2p is not None:
# for backward compatibility
if (
self._is_model_being_restored()
and (cfg.text_tokenizer.g2p.get('_target_', None) is not None)
and cfg.text_tokenizer.g2p["_target_"].startswith("nemo_text_processing.g2p")
):
cfg.text_tokenizer.g2p["_target_"] = g2p_backward_compatible_support(
cfg.text_tokenizer.g2p["_target_"]
)
g2p_kwargs = {}
if "phoneme_dict" in cfg.text_tokenizer.g2p:
g2p_kwargs["phoneme_dict"] = self.register_artifact(
'text_tokenizer.g2p.phoneme_dict', cfg.text_tokenizer.g2p.phoneme_dict,
)
if "heteronyms" in cfg.text_tokenizer.g2p:
g2p_kwargs["heteronyms"] = self.register_artifact(
'text_tokenizer.g2p.heteronyms', cfg.text_tokenizer.g2p.heteronyms,
)
text_tokenizer_kwargs["g2p"] = instantiate(cfg.text_tokenizer.g2p, **g2p_kwargs)
self.tokenizer = instantiate(cfg.text_tokenizer, **text_tokenizer_kwargs)
def __setup_dataloader_from_config(self, cfg, shuffle_should_be: bool = True, name: str = "train"):
if "dataset" not in cfg or not isinstance(cfg.dataset, DictConfig):
raise ValueError(f"No dataset for {name}")
if "dataloader_params" not in cfg or not isinstance(cfg.dataloader_params, DictConfig):
raise ValueError(f"No dataloder_params for {name}")
if shuffle_should_be:
if 'shuffle' not in cfg.dataloader_params:
logging.warning(
f"Shuffle should be set to True for {self}'s {name} dataloader but was not found in its "
"config. Manually setting to True"
)
with open_dict(cfg.dataloader_params):
cfg.dataloader_params.shuffle = True
elif not cfg.dataloader_params.shuffle:
logging.error(f"The {name} dataloader for {self} has shuffle set to False!!!")
elif not shuffle_should_be and cfg.dataloader_params.shuffle:
logging.error(f"The {name} dataloader for {self} has shuffle set to True!!!")
dataset = instantiate(
cfg.dataset,
text_normalizer=self.normalizer,
text_normalizer_call_kwargs=self.text_normalizer_call_kwargs,
text_tokenizer=self.tokenizer,
)
return torch.utils.data.DataLoader(dataset, collate_fn=dataset.collate_fn, **cfg.dataloader_params)
def setup_training_data(self, cfg):
self._train_dl = self.__setup_dataloader_from_config(cfg)
def setup_validation_data(self, cfg):
self._validation_dl = self.__setup_dataloader_from_config(cfg, shuffle_should_be=False, name="validation")
@classmethod
def list_available_models(cls) -> 'List[PretrainedModelInfo]':
"""
This method returns a list of pre-trained model which can be instantiated directly from NVIDIA's NGC cloud.
Returns:
List of available pre-trained models.
"""
list_of_models = []
model = PretrainedModelInfo(
pretrained_model_name="tts_en_tacotron2",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/tts_en_tacotron2/versions/1.10.0/files/tts_en_tacotron2.nemo",
description="This model is trained on LJSpeech sampled at 22050Hz, and can be used to generate female English voices with an American accent.",
class_=cls,
aliases=["Tacotron2-22050Hz"],
)
list_of_models.append(model)
return list_of_models
| NeMo-main | nemo/collections/tts/models/tacotron2.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
from typing import Iterable, Optional
import editdistance
import librosa
import torch
from hydra.utils import instantiate
from omegaconf import DictConfig, OmegaConf
from pytorch_lightning import Trainer
from pytorch_lightning.loggers import TensorBoardLogger
from pytorch_lightning.utilities.combined_loader import CombinedLoader
from nemo.collections.asr.losses.angularloss import AngularSoftmaxLoss
from nemo.collections.tts.data.dataset import TTSDataset
from nemo.collections.tts.modules.ssl_tts import GreedyCTCDecoder
from nemo.collections.tts.torch.tts_tokenizers import BaseTokenizer, EnglishCharsTokenizer
from nemo.core.classes import ModelPT
from nemo.core.classes.common import PretrainedModelInfo
from nemo.core.optim.lr_scheduler import WarmupPolicy
from nemo.utils import logging
from nemo.utils.decorators import experimental
@experimental
class SSLDisentangler(ModelPT):
"""
SSLDisentangler is a Conformer based model for extracting disentangled content and speaker embeddings
from an audio waveform. This model uses a pre-trained Conformer SSL model. To extract the linguistic content
and speaker representations using a pre-trained Conformer, two randomly initialized downstream
heads are added and the entire setup is finetuned in multi-task manner for speech recognition and speaker verification.
These representations can be used by FastPitchModel_SSL for voice conversion by swapping the speaker embedding
of a given source utterance, with the speaker embedding of a target speaker.
"""
def __init__(self, cfg: DictConfig, trainer: Trainer = None):
super().__init__(cfg=cfg, trainer=trainer)
self.preprocessor_disentangler = SSLDisentangler.from_config_dict(self._cfg.preprocessor)
self.encoder = SSLDisentangler.from_config_dict(self._cfg.encoder)
self._text_tokenizer = EnglishCharsTokenizer(add_blank_at="last")
self._tb_logger = None
self.downstream_nets = torch.nn.ModuleDict()
for task in self._cfg.downstream_heads.task_names:
if task == 'speaker_verification':
# setting up downstream heads and loss functions for speaker verification task
in_dim = self._cfg.encoder.d_model
out_dim = self._cfg.downstream_heads.speaker_embed_size
num_speakers = self._cfg.downstream_heads.num_speakers
self.downstream_nets[task] = torch.nn.Linear(in_dim, out_dim)
self.sv_linear = torch.nn.Linear(out_dim, num_speakers)
self.sv_loss = AngularSoftmaxLoss(scale=30, margin=0.4)
elif task == 'content':
# setting up downstream heads and loss functions for text/content recognition task
in_dim = self._cfg.encoder.d_model
out_dim = self._cfg.downstream_heads.content_embed_size
num_chars = len(self._text_tokenizer.tokens) # list of english tokens
self.downstream_nets[task] = torch.nn.Linear(in_dim, out_dim)
self.content_linear = torch.nn.Linear(out_dim, num_chars)
self.ctc_loss = torch.nn.CTCLoss(blank=self._text_tokenizer.blank, zero_infinity=True)
self.pitch_augment = self._cfg.get('pitch_augment', False)
self.augment_ctc = self._cfg.get('augment_ctc', False)
self.aug_loss_type = self._cfg.get('aug_loss_type', 'mse')
self.stop_gradient = self._cfg.get('stop_gradient', False)
assert (
self.stop_gradient and self.augment_ctc
) == False, "stop_gradient and augment_ctc cannot be true at the same time"
self.mse_loss = torch.nn.MSELoss()
self.ctc_decoder = GreedyCTCDecoder(self._text_tokenizer.tokens, self._text_tokenizer.blank)
else:
raise ValueError(f"{task} is not a valid task. Task must be speaker_verification or content.")
self.automatic_optimization = False
stft_cfg = self._cfg.preprocessor
librosa_mel_filter = librosa.filters.mel(
sr=stft_cfg.sample_rate, n_fft=stft_cfg.n_fft, n_mels=stft_cfg.features, fmin=0, fmax=8000
)
fb = torch.tensor(librosa_mel_filter, dtype=torch.float,).unsqueeze(0)
self.register_buffer("fb", fb)
@classmethod
def list_available_models(cls) -> Optional[PretrainedModelInfo]:
"""
This method returns a list of pre-trained model which can be instantiated directly from NVIDIA's NGC cloud.
Returns:
List of available pre-trained models.
"""
results = []
model = PretrainedModelInfo(
pretrained_model_name="ssl_en_conformer_large",
description="For details about this model, please visit https://ngc.nvidia.com/catalog/models/nvidia:nemo:ssl_en_conformer_large",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/ssl_en_conformer_large/versions/1.10.1/files/ssl_en_conformer_large.nemo",
)
results.append(model)
model = PretrainedModelInfo(
pretrained_model_name="ssl_en_conformer_xlarge",
description="For details about this model, please visit https://ngc.nvidia.com/catalog/models/nvidia:nemo:ssl_en_conformer_xlarge",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/ssl_en_conformer_xlarge/versions/1.10.0/files/ssl_en_conformer_xlarge.nemo",
)
results.append(model)
return results
@property
def tb_logger(self):
if self._tb_logger is None:
if self.logger is None and self.logger.experiment is None:
return None
tb_logger = self.logger.experiment
if isinstance(self.logger, Iterable):
for logger in self.logger:
if isinstance(logger, TensorBoardLogger):
tb_logger = logger.experiment
break
self._tb_logger = tb_logger
return self._tb_logger
def __setup_dataloader_from_config(self, data_config):
if hasattr(self, '_text_tokenizer') and isinstance(self._text_tokenizer, BaseTokenizer):
_text_tokenizer = self._text_tokenizer
else:
if hasattr(self, '_text_tokenizer') and not isinstance(self._text_tokenizer, BaseTokenizer):
logging.warning(f"test_tokenizer is set but not a BaseTokenizer. Will be set to EnglishCharsTokenizer")
_text_tokenizer = self._text_tokenizer = EnglishCharsTokenizer(add_blank_at="last")
for task in self._cfg.downstream_heads.task_names:
if task == 'speaker_verification':
sv_dataset = TTSDataset(
manifest_filepath=data_config['manifest_speaker_verification_fp'],
sample_rate=self._cfg.sample_rate,
text_tokenizer=_text_tokenizer,
segment_max_duration=data_config['segment_max_duration'],
sup_data_types=['speaker_id'],
sup_data_path=data_config['sup_data_path'],
pad_multiple=data_config.get('pad_multiple', 1),
)
sv_loader = torch.utils.data.DataLoader(
sv_dataset,
batch_size=data_config['batch_size_sv'],
collate_fn=sv_dataset.general_collate_fn,
shuffle=data_config['shuffle'],
num_workers=data_config.get('num_workers_sv', 0),
pin_memory=data_config.get('pin_memory', False),
)
elif task == 'content':
content_dataset = TTSDataset(
manifest_filepath=data_config['manifest_content_fp'],
sample_rate=self._cfg.sample_rate,
text_tokenizer=_text_tokenizer,
min_duration=data_config['min_duration_content'],
max_duration=data_config['max_duration_content'],
pitch_augment=data_config.get('pitch_augment', False),
cache_pitch_augment=data_config.get('cache_pitch_augment', True),
sup_data_path=data_config['sup_data_path'],
pad_multiple=data_config.get('pad_multiple', 1),
)
content_loader = torch.utils.data.DataLoader(
content_dataset,
batch_size=data_config['batch_size_content'],
collate_fn=content_dataset.general_collate_fn,
shuffle=data_config['shuffle'],
num_workers=data_config.get('num_workers_content', 0),
pin_memory=data_config.get('pin_memory', False),
)
else:
raise ValueError(f"{task} is not a valid task. Task must be speaker_verification or content.")
loaders = {"sv": sv_loader, "content": content_loader}
return loaders
def setup_training_data(self, cfg):
self._train_dl = self.__setup_dataloader_from_config(self._cfg.train_ds)
def setup_validation_data(self, cfg):
self._validation_dl = CombinedLoader(self.__setup_dataloader_from_config(self._cfg.validation_ds))
def configure_optimizers(self):
optim_backbone_config = self._cfg.optim_backbone.copy()
optim_downstream_config = self._cfg.optim_downstream.copy()
OmegaConf.set_struct(optim_backbone_config, False)
sched_backbone_config = optim_backbone_config.pop("sched", None)
OmegaConf.set_struct(optim_backbone_config, True)
OmegaConf.set_struct(optim_downstream_config, False)
sched_downstream_config = optim_downstream_config.pop("sched", None)
OmegaConf.set_struct(optim_downstream_config, True)
optim_backbone = instantiate(optim_backbone_config, params=self.encoder.parameters(),)
optim_downstream = instantiate(
optim_downstream_config,
params=itertools.chain(
self.downstream_nets.parameters(),
self.sv_linear.parameters(),
self.content_linear.parameters(),
self.sv_loss.parameters(),
),
)
if sched_backbone_config is not None and sched_downstream_config is not None:
scheduler_backbone = WarmupPolicy(
optimizer=optim_backbone,
max_steps=None,
min_lr=sched_backbone_config.min_lr,
warmup_steps=sched_backbone_config.warmup_steps,
) # Use warmup to delay start
sch1_dict = {
'scheduler': scheduler_backbone,
'interval': 'step',
}
scheduler_downstream = WarmupPolicy(
optimizer=optim_downstream,
max_steps=None,
min_lr=sched_downstream_config.min_lr,
warmup_steps=sched_downstream_config.warmup_steps,
)
sch2_dict = {
'scheduler': scheduler_downstream,
'interval': 'step',
}
return [optim_backbone, optim_downstream], [sch1_dict, sch2_dict]
else:
return [optim_backbone, optim_downstream]
def forward(self, input_signal=None, input_signal_length=None, normalize_content=True):
processed_signal, processed_signal_length = self.preprocessor_disentangler(
input_signal=input_signal, length=input_signal_length,
)
encoded, encoded_len = self.encoder(audio_signal=processed_signal, length=processed_signal_length) # b,c,t
for task in self._cfg.downstream_heads.task_names:
if task == "speaker_verification":
speaker_embedding = self.downstream_nets['speaker_verification'](encoded[:, :, 0])
l2_norm = torch.norm(speaker_embedding, p=2, dim=-1, keepdim=True)
speaker_embedding_normalized = speaker_embedding / l2_norm
speaker_logits = self.sv_linear(speaker_embedding_normalized)
elif task == "content":
encoded_btc = encoded.permute(0, 2, 1)
content_embedding = self.downstream_nets['content'](encoded_btc)
if normalize_content:
l2_norm_content = torch.norm(content_embedding, p=2, dim=-1, keepdim=True)
content_embedding = content_embedding / l2_norm_content
content_logits = self.content_linear(content_embedding)
content_log_probs = content_logits.log_softmax(dim=2)
content_log_probs = content_log_probs.permute(1, 0, 2) # t,b,c for ctc
else:
raise ValueError(f"{task} is not a valid task. Task must be speaker_verification or content.")
return (
speaker_logits,
speaker_embedding_normalized,
content_embedding,
content_log_probs,
encoded_len,
)
def forward_for_export(self, input_signal=None, input_signal_length=None, normalize_content=True):
# Same as forward right now. Earlier version of encoder had a different forward for export.
# This function is still kept for compatibility with older evaluation/inference scripts.
return self.forward(
input_signal=input_signal, input_signal_length=input_signal_length, normalize_content=normalize_content,
)
def training_step(self, batch, batch_idx):
loss = 0.0
optim_backbone, optim_downstream = self.optimizers()
schedulers = self.lr_schedulers()
for key in batch.keys():
if key == 'sv':
signal = batch[key]['audio']
signal_len = batch[key]['audio_lens']
speaker_id = batch[key]['speaker_id']
sv_logits, sv_emb, _, _, _ = self.forward(input_signal=signal, input_signal_length=signal_len)
pred_speaker = torch.argmax(sv_logits, dim=1)
sv_loss = self.sv_loss(logits=sv_logits, labels=speaker_id)
loss += sv_loss
if not self._cfg.combined_loss:
optim_backbone.zero_grad()
optim_downstream.zero_grad()
self.manual_backward(sv_loss)
optim_backbone.step()
optim_downstream.step()
correct = pred_speaker.eq(speaker_id.data.view_as(pred_speaker)).sum().item()
acc = (correct / len(speaker_id)) * 100
self.log("t_sv_loss", sv_loss.item())
self.log("t_sv_accuracy", acc)
elif key == "content":
content_loss = 0
signal = batch[key]['audio']
signal_len = batch[key]['audio_lens']
target = batch[key]['text'] # (B, T)
target_len = batch[key]['text_lens']
_, _, content_embedding, content_log_probs, encoded_len = self.forward(
input_signal=signal, input_signal_length=signal_len
)
ctc_loss = self.ctc_loss(content_log_probs, target, encoded_len, target_len)
# check if ctc loss is nan
if torch.isfinite(ctc_loss):
self.log("t_ctc_loss", ctc_loss.item())
content_loss += ctc_loss
else:
logging.warning(f"ctc_loss is not finite")
if self.pitch_augment:
augmented_signal = batch[key]['audio_shifted']
if self.stop_gradient:
with torch.no_grad():
_, _, content_embedding_aug, content_log_probs_aug, _ = self.forward(
input_signal=augmented_signal, input_signal_length=signal_len
)
else:
_, _, content_embedding_aug, content_log_probs_aug, _ = self.forward(
input_signal=augmented_signal, input_signal_length=signal_len
)
if self.aug_loss_type == "mse":
sim_loss = self.mse_loss(content_embedding, content_embedding_aug)
elif self.aug_loss_type == "cosine":
cosine_similarity = torch.nn.functional.cosine_similarity(
content_embedding, content_embedding_aug, dim=-1
).mean()
sim_loss = 1.0 - cosine_similarity
content_loss += self._cfg.augment_sim_alpha * sim_loss
self.log("t_sim_loss", sim_loss.item())
if self.augment_ctc:
ctc_loss_aug = self.ctc_loss(content_log_probs_aug, target, encoded_len, target_len)
if torch.isfinite(ctc_loss_aug):
content_loss += ctc_loss_aug
self.log("t_ctc_loss_aug", ctc_loss_aug.item())
else:
logging.warning(f"ctc_loss_aug is not finite. Add min duration to avoid getting here.")
loss += content_loss
if not self._cfg.combined_loss:
optim_backbone.zero_grad()
optim_downstream.zero_grad()
self.manual_backward(content_loss)
optim_backbone.step()
optim_downstream.step()
if isinstance(content_loss, torch.Tensor):
self.log("t_content_loss", content_loss.item())
if self._cfg.combined_loss:
optim_backbone.zero_grad()
optim_downstream.zero_grad()
self.manual_backward(loss)
optim_backbone.step()
optim_downstream.step()
if schedulers is not None:
sch1, sch2 = schedulers
sch1.step()
sch2.step()
if self.trainer.global_step % 10 == 0:
self.log("lr_backbone", optim_backbone.param_groups[0]['lr'])
self.log("lr_downstream", optim_downstream.param_groups[0]['lr'])
self.log("t_loss", loss)
def validation_step(self, batch, batch_idx):
loss_total = 0
for key in batch.keys():
if key == 'sv':
signal = batch[key]['audio']
signal_len = batch[key]['audio_lens']
speaker_id = batch[key]['speaker_id']
sv_logits, sv_emb, _, _, _ = self.forward(input_signal=signal, input_signal_length=signal_len)
pred_speaker = torch.argmax(sv_logits, dim=1)
sv_loss = self.sv_loss(logits=sv_logits, labels=speaker_id)
loss_total += sv_loss
correct = pred_speaker.eq(speaker_id.data.view_as(pred_speaker)).sum().item()
acc = (correct / len(speaker_id)) * 100
acc_val = torch.as_tensor(acc)
if key == 'content':
content_loss = 0
signal = batch[key]['audio']
signal_len = batch[key]['audio_lens']
target = batch[key]['text'] # (B, T)
target_len = batch[key]['text_lens']
_, _, content_embedding, content_log_probs, encoded_len = self.forward(
input_signal=signal, input_signal_length=signal_len
)
ctc_loss = self.ctc_loss(content_log_probs, target, encoded_len, target_len)
# check if ctc loss is nan
if torch.isfinite(ctc_loss):
content_loss += ctc_loss
else:
logging.warning(f"ctc_loss is not finite. Add min duration to avoid getting here.")
if self.pitch_augment:
augmented_signal = batch[key]['audio_shifted']
_, _, content_embedding_aug, content_log_probs_aug, _ = self.forward(
input_signal=augmented_signal, input_signal_length=signal_len
)
if self.aug_loss_type == "mse":
sim_loss = self.mse_loss(content_embedding, content_embedding_aug)
elif self.aug_loss_type == "cosine":
cosine_similarity = torch.nn.functional.cosine_similarity(
content_embedding, content_embedding_aug, dim=-1
).mean()
sim_loss = 1.0 - cosine_similarity
content_loss += self._cfg.augment_sim_alpha * sim_loss
loss_total += content_loss
cers = []
for _idx in range(target.shape[0]):
item_log_prob = content_log_probs[:, _idx, :][: encoded_len[_idx]].cpu()
item_target = target[_idx][: target_len[_idx]].cpu()
_, predicted_str = self.ctc_decoder(item_log_prob)
tokenizer = self._text_tokenizer
target_str = tokenizer.sep.join(tokenizer._id2token[t] for t in item_target.tolist())
ed = editdistance.eval(predicted_str, target_str)
if max(len(predicted_str), len(target_str)) > 0:
normalized_ed = (1.0 * ed) / max(len(predicted_str), len(target_str))
else:
normalized_ed = 1.0
cers.append(normalized_ed)
return {
'val_loss': loss_total.cpu(),
'sv_loss': sv_loss.cpu(),
'ctc_loss': ctc_loss.cpu(),
'content_loss': content_loss.cpu(),
'accuracy_sv': acc_val.cpu(),
'cer': torch.tensor(cers).mean().cpu(),
}
def on_validation_epoch_end(self, outputs):
collect = lambda key: torch.stack([x[key] for x in outputs if torch.isfinite(x[key])]).mean()
val_loss = collect("val_loss")
val_sv_loss = collect("sv_loss")
val_ctc_loss = collect("ctc_loss")
val_content_loss = collect("content_loss")
accuracy_sv = collect("accuracy_sv")
cer = collect("cer")
self.log("val_loss", val_loss)
self.log("sv_loss", val_sv_loss)
self.log("val_ctc_loss", val_ctc_loss)
self.log("val_content_loss", val_content_loss)
self.log("accuracy_sv", accuracy_sv)
self.log("cer", cer)
| NeMo-main | nemo/collections/tts/models/ssl_tts.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
from typing import Dict
import torch
import torch.nn.functional as F
from hydra.utils import instantiate
from omegaconf import DictConfig, OmegaConf, open_dict
from pytorch_lightning.loggers.wandb import WandbLogger
from nemo.collections.tts.losses.hifigan_losses import DiscriminatorLoss, GeneratorLoss
from nemo.collections.tts.losses.stftlosses import MultiResolutionSTFTLoss
from nemo.collections.tts.models.base import Vocoder
from nemo.collections.tts.modules.univnet_modules import MultiPeriodDiscriminator, MultiResolutionDiscriminator
from nemo.collections.tts.parts.utils.helpers import get_batch_size, get_num_workers, plot_spectrogram_to_numpy
from nemo.core import Exportable
from nemo.core.classes.common import PretrainedModelInfo, typecheck
from nemo.core.neural_types.elements import AudioSignal, MelSpectrogramType
from nemo.core.neural_types.neural_type import NeuralType
from nemo.core.optim.lr_scheduler import compute_max_steps, prepare_lr_scheduler
from nemo.utils import logging, model_utils
HAVE_WANDB = True
try:
import wandb
except ModuleNotFoundError:
HAVE_WANDB = False
class UnivNetModel(Vocoder, Exportable):
"""UnivNet model (https://arxiv.org/abs/2106.07889) that is used to generate audio from mel spectrogram."""
def __init__(self, cfg: DictConfig, trainer: 'Trainer' = None):
# Convert to Hydra 1.0 compatible DictConfig
cfg = model_utils.convert_model_config_to_dict_config(cfg)
cfg = model_utils.maybe_update_config_version(cfg)
super().__init__(cfg=cfg, trainer=trainer)
self.audio_to_melspec_precessor = instantiate(cfg.preprocessor)
# We use separate preprocessor for training, because we need to pass grads and remove pitch fmax limitation
self.trg_melspec_fn = instantiate(cfg.preprocessor, highfreq=None, use_grads=True)
self.generator = instantiate(
cfg.generator, n_mel_channels=cfg.preprocessor.nfilt, hop_length=cfg.preprocessor.n_window_stride
)
self.mpd = MultiPeriodDiscriminator(cfg.discriminator.mpd, debug=cfg.debug if "debug" in cfg else False)
self.mrd = MultiResolutionDiscriminator(cfg.discriminator.mrd, debug=cfg.debug if "debug" in cfg else False)
self.discriminator_loss = DiscriminatorLoss()
self.generator_loss = GeneratorLoss()
# Reshape MRD resolutions hyperparameter and apply them to MRSTFT loss
self.stft_resolutions = cfg.discriminator.mrd.resolutions
self.fft_sizes = [res[0] for res in self.stft_resolutions]
self.hop_sizes = [res[1] for res in self.stft_resolutions]
self.win_lengths = [res[2] for res in self.stft_resolutions]
self.mrstft_loss = MultiResolutionSTFTLoss(self.fft_sizes, self.hop_sizes, self.win_lengths)
self.stft_lamb = cfg.stft_lamb
self.sample_rate = self._cfg.preprocessor.sample_rate
self.stft_bias = None
self.input_as_mel = False
if self._train_dl:
self.input_as_mel = self._train_dl.dataset.load_precomputed_mel
self.automatic_optimization = False
def _get_max_steps(self):
return compute_max_steps(
max_epochs=self._cfg.max_epochs,
accumulate_grad_batches=self.trainer.accumulate_grad_batches,
limit_train_batches=self.trainer.limit_train_batches,
num_workers=get_num_workers(self.trainer),
num_samples=len(self._train_dl.dataset),
batch_size=get_batch_size(self._train_dl),
drop_last=self._train_dl.drop_last,
)
@staticmethod
def get_warmup_steps(max_steps, warmup_steps, warmup_ratio):
if warmup_steps is not None and warmup_ratio is not None:
raise ValueError(f'Either use warmup_steps or warmup_ratio for scheduler')
if warmup_steps is not None:
return warmup_steps
if warmup_ratio is not None:
return warmup_ratio * max_steps
raise ValueError(f'Specify warmup_steps or warmup_ratio for scheduler')
def configure_optimizers(self):
optim_config = self._cfg.optim.copy()
OmegaConf.set_struct(optim_config, False)
sched_config = optim_config.pop("sched", None)
OmegaConf.set_struct(optim_config, True)
# Backward compatibility
if sched_config is None and 'sched' in self._cfg:
sched_config = self._cfg.sched
optim_g = instantiate(optim_config, params=self.generator.parameters(),)
optim_d = instantiate(optim_config, params=itertools.chain(self.mrd.parameters(), self.mpd.parameters()),)
if sched_config is not None:
max_steps = self._cfg.get("max_steps", None)
if max_steps is None or max_steps < 0:
max_steps = self._get_max_steps()
warmup_steps = UnivNetModel.get_warmup_steps(
max_steps=max_steps,
warmup_steps=sched_config.get("warmup_steps", None),
warmup_ratio=sched_config.get("warmup_ratio", None),
)
OmegaConf.set_struct(sched_config, False)
sched_config["max_steps"] = max_steps
sched_config["warmup_steps"] = warmup_steps
sched_config.pop("warmup_ratio", None)
OmegaConf.set_struct(sched_config, True)
scheduler_g = prepare_lr_scheduler(
optimizer=optim_g, scheduler_config=sched_config, train_dataloader=self._train_dl
)
scheduler_d = prepare_lr_scheduler(
optimizer=optim_d, scheduler_config=sched_config, train_dataloader=self._train_dl
)
return [optim_g, optim_d], [scheduler_g, scheduler_d]
else:
return [optim_g, optim_d]
@typecheck()
def forward(self, *, spec):
"""
Runs the generator, for inputs and outputs see input_types, and output_types
"""
return self.generator(x=spec)
@typecheck(
input_types={"spec": NeuralType(('B', 'C', 'T'), MelSpectrogramType())},
output_types={"audio": NeuralType(('B', 'T'), AudioSignal())},
)
def convert_spectrogram_to_audio(self, spec: 'torch.tensor') -> 'torch.tensor':
return self(spec=spec).squeeze(1)
def training_step(self, batch, batch_idx):
if self.input_as_mel:
# Pre-computed spectrograms will be used as input
audio, audio_len, audio_mel = batch
else:
audio, audio_len = batch
audio_mel, _ = self.audio_to_melspec_precessor(audio, audio_len)
audio = audio.unsqueeze(1)
audio_pred = self.generator(x=audio_mel)
audio_pred_mel, _ = self.trg_melspec_fn(audio_pred.squeeze(1), audio_len)
optim_g, optim_d = self.optimizers()
# Train discriminator
optim_d.zero_grad()
mpd_score_real, mpd_score_gen, _, _ = self.mpd(y=audio, y_hat=audio_pred.detach())
loss_disc_mpd, _, _ = self.discriminator_loss(
disc_real_outputs=mpd_score_real, disc_generated_outputs=mpd_score_gen
)
mrd_score_real, mrd_score_gen, _, _ = self.mrd(y=audio, y_hat=audio_pred.detach())
loss_disc_mrd, _, _ = self.discriminator_loss(
disc_real_outputs=mrd_score_real, disc_generated_outputs=mrd_score_gen
)
loss_d = loss_disc_mrd + loss_disc_mpd
self.manual_backward(loss_d)
optim_d.step()
# Train generator
optim_g.zero_grad()
loss_sc, loss_mag = self.mrstft_loss(x=audio_pred.squeeze(1), y=audio.squeeze(1), input_lengths=audio_len)
loss_sc = torch.stack(loss_sc).mean()
loss_mag = torch.stack(loss_mag).mean()
loss_mrstft = (loss_sc + loss_mag) * self.stft_lamb
_, mpd_score_gen, _, _ = self.mpd(y=audio, y_hat=audio_pred)
_, mrd_score_gen, _, _ = self.mrd(y=audio, y_hat=audio_pred)
loss_gen_mpd, _ = self.generator_loss(disc_outputs=mpd_score_gen)
loss_gen_mrd, _ = self.generator_loss(disc_outputs=mrd_score_gen)
loss_g = loss_gen_mrd + loss_gen_mpd + loss_mrstft
self.manual_backward(loss_g)
optim_g.step()
metrics = {
"g_loss_sc": loss_sc,
"g_loss_mag": loss_mag,
"g_loss_mrstft": loss_mrstft,
"g_loss_gen_mpd": loss_gen_mpd,
"g_loss_gen_mrd": loss_gen_mrd,
"g_loss": loss_g,
"d_loss_mpd": loss_disc_mpd,
"d_loss_mrd": loss_disc_mrd,
"d_loss": loss_d,
"global_step": self.global_step,
"lr": optim_g.param_groups[0]['lr'],
}
self.log_dict(metrics, on_step=True, sync_dist=True)
self.log("g_mrstft_loss", loss_mrstft, prog_bar=True, logger=False, sync_dist=True)
def validation_step(self, batch, batch_idx):
if self.input_as_mel:
audio, audio_len, audio_mel = batch
audio_mel_len = [audio_mel.shape[1]] * audio_mel.shape[0]
else:
audio, audio_len = batch
audio_mel, audio_mel_len = self.audio_to_melspec_precessor(audio, audio_len)
audio_pred = self(spec=audio_mel)
# Perform bias denoising
pred_denoised = self._bias_denoise(audio_pred, audio_mel).squeeze(1)
pred_denoised_mel, _ = self.audio_to_melspec_precessor(pred_denoised, audio_len)
if self.input_as_mel:
gt_mel, gt_mel_len = self.audio_to_melspec_precessor(audio, audio_len)
audio_pred_mel, _ = self.audio_to_melspec_precessor(audio_pred.squeeze(1), audio_len)
loss_mel = F.l1_loss(audio_mel, audio_pred_mel)
self.log_dict({"val_loss": loss_mel}, on_epoch=True, sync_dist=True)
# Plot audio once per epoch
if batch_idx == 0 and isinstance(self.logger, WandbLogger) and HAVE_WANDB:
clips = []
specs = []
for i in range(min(5, audio.shape[0])):
clips += [
wandb.Audio(
audio[i, : audio_len[i]].data.cpu().numpy(),
caption=f"real audio {i}",
sample_rate=self.sample_rate,
),
wandb.Audio(
audio_pred[i, 0, : audio_len[i]].data.cpu().numpy().astype('float32'),
caption=f"generated audio {i}",
sample_rate=self.sample_rate,
),
wandb.Audio(
pred_denoised[i, : audio_len[i]].data.cpu().numpy(),
caption=f"denoised audio {i}",
sample_rate=self.sample_rate,
),
]
specs += [
wandb.Image(
plot_spectrogram_to_numpy(audio_mel[i, :, : audio_mel_len[i]].data.cpu().numpy()),
caption=f"input mel {i}",
),
wandb.Image(
plot_spectrogram_to_numpy(audio_pred_mel[i, :, : audio_mel_len[i]].data.cpu().numpy()),
caption=f"output mel {i}",
),
wandb.Image(
plot_spectrogram_to_numpy(pred_denoised_mel[i, :, : audio_mel_len[i]].data.cpu().numpy()),
caption=f"denoised mel {i}",
),
]
if self.input_as_mel:
specs += [
wandb.Image(
plot_spectrogram_to_numpy(gt_mel[i, :, : audio_mel_len[i]].data.cpu().numpy()),
caption=f"gt mel {i}",
),
]
self.logger.experiment.log({"audio": clips, "specs": specs})
def _bias_denoise(self, audio, mel):
def stft(x):
comp = torch.stft(x.squeeze(1), n_fft=1024, hop_length=256, win_length=1024, return_complex=True)
comp = torch.view_as_real(comp)
real, imag = comp[..., 0], comp[..., 1]
mags = torch.sqrt(real ** 2 + imag ** 2)
phase = torch.atan2(imag, real)
return mags, phase
def istft(mags, phase):
comp = torch.stack([mags * torch.cos(phase), mags * torch.sin(phase)], dim=-1)
x = torch.istft(torch.view_as_complex(comp), n_fft=1024, hop_length=256, win_length=1024)
return x
# Create bias tensor
if self.stft_bias is None or self.stft_bias.shape[0] != audio.shape[0]:
audio_bias = self(spec=torch.zeros_like(mel, device=mel.device))
self.stft_bias, _ = stft(audio_bias)
self.stft_bias = self.stft_bias[:, :, 0][:, :, None]
audio_mags, audio_phase = stft(audio)
audio_mags = audio_mags - self.cfg.get("denoise_strength", 0.0025) * self.stft_bias
audio_mags = torch.clamp(audio_mags, 0.0)
audio_denoised = istft(audio_mags, audio_phase).unsqueeze(1)
return audio_denoised
def __setup_dataloader_from_config(self, cfg, shuffle_should_be: bool = True, name: str = "train"):
if "dataset" not in cfg or not isinstance(cfg.dataset, DictConfig):
raise ValueError(f"No dataset for {name}")
if "dataloader_params" not in cfg or not isinstance(cfg.dataloader_params, DictConfig):
raise ValueError(f"No dataloder_params for {name}")
if shuffle_should_be:
if 'shuffle' not in cfg.dataloader_params:
logging.warning(
f"Shuffle should be set to True for {self}'s {name} dataloader but was not found in its "
"config. Manually setting to True"
)
with open_dict(cfg["dataloader_params"]):
cfg.dataloader_params.shuffle = True
elif not cfg.dataloader_params.shuffle:
logging.error(f"The {name} dataloader for {self} has shuffle set to False!!!")
elif not shuffle_should_be and cfg.dataloader_params.shuffle:
logging.error(f"The {name} dataloader for {self} has shuffle set to True!!!")
dataset = instantiate(cfg.dataset)
return torch.utils.data.DataLoader(dataset, collate_fn=dataset.collate_fn, **cfg.dataloader_params)
def setup_training_data(self, cfg):
self._train_dl = self.__setup_dataloader_from_config(cfg)
def setup_validation_data(self, cfg):
self._validation_dl = self.__setup_dataloader_from_config(cfg, shuffle_should_be=False, name="validation")
def setup_test_data(self, cfg):
pass
@classmethod
def list_available_models(cls) -> 'Optional[Dict[str, str]]':
list_of_models = []
model = PretrainedModelInfo(
pretrained_model_name="tts_en_lj_univnet",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/tts_en_lj_univnet/versions/1.7.0/files/tts_en_lj_univnet.nemo",
description="This model is trained on LJSpeech sampled at 22050Hz, and has been tested on generating female English voices with an American accent.",
class_=cls,
)
list_of_models.append(model)
model = PretrainedModelInfo(
pretrained_model_name="tts_en_libritts_univnet",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/tts_en_libritts_univnet/versions/1.7.0/files/tts_en_libritts_multispeaker_univnet.nemo",
description="This model is trained on all LibriTTS training data (train-clean-100, train-clean-360, and train-other-500) sampled at 22050Hz, and has been tested on generating English voices.",
class_=cls,
)
list_of_models.append(model)
return list_of_models
# Methods for model exportability
def _prepare_for_export(self, **kwargs):
if self.generator is not None:
try:
self.generator.remove_weight_norm()
except ValueError:
return
@property
def input_types(self):
return {
"spec": NeuralType(('B', 'D', 'T'), MelSpectrogramType()),
}
@property
def output_types(self):
return {
"audio": NeuralType(('B', 'S', 'T'), AudioSignal(self.sample_rate)),
}
def input_example(self, max_batch=1, max_dim=256):
"""
Generates input examples for tracing etc.
Returns:
A tuple of input examples.
"""
par = next(self.parameters())
mel = torch.randn((max_batch, self.cfg['preprocessor']['nfilt'], max_dim), device=par.device, dtype=par.dtype)
return ({'spec': mel},)
def forward_for_export(self, spec):
"""
Runs the generator, for inputs and outputs see input_types, and output_types
"""
return self.generator(x=spec)
| NeMo-main | nemo/collections/tts/models/univnet.py |
# Copyright (c) 2020, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo.collections.tts.models.aligner import AlignerModel
from nemo.collections.tts.models.audio_codec import AudioCodecModel
from nemo.collections.tts.models.fastpitch import FastPitchModel
from nemo.collections.tts.models.fastpitch_ssl import FastPitchModel_SSL
from nemo.collections.tts.models.hifigan import HifiGanModel
from nemo.collections.tts.models.mixer_tts import MixerTTSModel
from nemo.collections.tts.models.radtts import RadTTSModel
from nemo.collections.tts.models.spectrogram_enhancer import SpectrogramEnhancerModel
from nemo.collections.tts.models.ssl_tts import SSLDisentangler
from nemo.collections.tts.models.tacotron2 import Tacotron2Model
from nemo.collections.tts.models.two_stages import GriffinLimModel, MelPsuedoInverseModel, TwoStagesModel
from nemo.collections.tts.models.univnet import UnivNetModel
from nemo.collections.tts.models.vits import VitsModel
from nemo.collections.tts.models.waveglow import WaveGlowModel
__all__ = [
"AlignerModel",
"AudioCodecModel",
"FastPitchModel",
"FastPitchModel_SSL",
"SSLDisentangler",
"GriffinLimModel",
"HifiGanModel",
"MelPsuedoInverseModel",
"MixerTTSModel",
"RadTTSModel",
"Tacotron2Model",
"TwoStagesModel",
"UnivNetModel",
"VitsModel",
"WaveGlowModel",
"SpectrogramEnhancerModel",
]
| NeMo-main | nemo/collections/tts/models/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
from typing import List, Optional
import numpy as np
import omegaconf
import torch
import transformers
import wandb
from hydra.utils import instantiate
from omegaconf import DictConfig
from pytorch_lightning import Trainer
from pytorch_lightning.loggers import WandbLogger
from torch import nn
from torch.nn import functional as F
from transformers import AlbertTokenizer
from nemo.collections.common.tokenizers.text_to_speech.tts_tokenizers import (
EnglishCharsTokenizer,
EnglishPhonemesTokenizer,
)
from nemo.collections.tts.losses.aligner_loss import BinLoss, ForwardSumLoss
from nemo.collections.tts.models.base import SpectrogramGenerator
from nemo.collections.tts.modules.fastpitch import average_features, regulate_len
from nemo.collections.tts.parts.utils.helpers import (
binarize_attention_parallel,
g2p_backward_compatible_support,
get_mask_from_lengths,
plot_pitch_to_numpy,
plot_spectrogram_to_numpy,
)
from nemo.core import Exportable
from nemo.core.classes.common import PretrainedModelInfo, typecheck
from nemo.core.neural_types.elements import (
LengthsType,
LogprobsType,
MelSpectrogramType,
ProbsType,
RegressionValuesType,
TokenDurationType,
TokenIndex,
TokenLogDurationType,
)
from nemo.core.neural_types.neural_type import NeuralType
from nemo.utils import logging, model_utils
class MixerTTSModel(SpectrogramGenerator, Exportable):
"""Mixer-TTS and Mixer-TTS-X models (https://arxiv.org/abs/2110.03584) that is used to generate mel spectrogram from text."""
def __init__(self, cfg: DictConfig, trainer: 'Trainer' = None):
# Convert to Hydra 1.0 compatible DictConfig
cfg = model_utils.convert_model_config_to_dict_config(cfg)
cfg = model_utils.maybe_update_config_version(cfg)
# Setup normalizer
self.normalizer = None
self.text_normalizer_call = None
self.text_normalizer_call_kwargs = {}
self._setup_normalizer(cfg)
# Setup tokenizer
self.tokenizer = None
self._setup_tokenizer(cfg)
assert self.tokenizer is not None
num_tokens = len(self.tokenizer.tokens)
self.tokenizer_pad = self.tokenizer.pad
self.tokenizer_unk = self.tokenizer.oov
super().__init__(cfg=cfg, trainer=trainer)
self.pitch_loss_scale = cfg.pitch_loss_scale
self.durs_loss_scale = cfg.durs_loss_scale
self.mel_loss_scale = cfg.mel_loss_scale
self.aligner = instantiate(cfg.alignment_module)
self.forward_sum_loss = ForwardSumLoss()
self.bin_loss = BinLoss()
self.add_bin_loss = False
self.bin_loss_scale = 0.0
self.bin_loss_start_ratio = cfg.bin_loss_start_ratio
self.bin_loss_warmup_epochs = cfg.bin_loss_warmup_epochs
self.cond_on_lm_embeddings = cfg.get("cond_on_lm_embeddings", False)
if self.cond_on_lm_embeddings:
self.lm_padding_value = (
self._train_dl.dataset.lm_padding_value
if self._train_dl is not None
else self._get_lm_padding_value(cfg.lm_model)
)
self.lm_embeddings = self._get_lm_embeddings(cfg.lm_model)
self.lm_embeddings.weight.requires_grad = False
self.self_attention_module = instantiate(
cfg.self_attention_module, n_lm_tokens_channels=self.lm_embeddings.weight.shape[1]
)
self.encoder = instantiate(cfg.encoder, num_tokens=num_tokens, padding_idx=self.tokenizer_pad)
self.symbol_emb = self.encoder.to_embed
self.duration_predictor = instantiate(cfg.duration_predictor)
self.pitch_mean, self.pitch_std = float(cfg.pitch_mean), float(cfg.pitch_std)
self.pitch_predictor = instantiate(cfg.pitch_predictor)
self.pitch_emb = instantiate(cfg.pitch_emb)
self.preprocessor = instantiate(cfg.preprocessor)
self.decoder = instantiate(cfg.decoder)
self.proj = nn.Linear(self.decoder.d_model, cfg.n_mel_channels)
def _setup_normalizer(self, cfg):
if "text_normalizer" in cfg:
normalizer_kwargs = {}
if "whitelist" in cfg.text_normalizer:
normalizer_kwargs["whitelist"] = self.register_artifact(
'text_normalizer.whitelist', cfg.text_normalizer.whitelist
)
try:
import nemo_text_processing
self.normalizer = instantiate(cfg.text_normalizer, **normalizer_kwargs)
except Exception as e:
logging.error(e)
raise ImportError(
"`nemo_text_processing` not installed, see https://github.com/NVIDIA/NeMo-text-processing for more details"
)
self.text_normalizer_call = self.normalizer.normalize
if "text_normalizer_call_kwargs" in cfg:
self.text_normalizer_call_kwargs = cfg.text_normalizer_call_kwargs
def _setup_tokenizer(self, cfg):
text_tokenizer_kwargs = {}
if "g2p" in cfg.text_tokenizer:
# for backward compatibility
if (
self._is_model_being_restored()
and (cfg.text_tokenizer.g2p.get('_target_', None) is not None)
and cfg.text_tokenizer.g2p["_target_"].startswith("nemo_text_processing.g2p")
):
cfg.text_tokenizer.g2p["_target_"] = g2p_backward_compatible_support(
cfg.text_tokenizer.g2p["_target_"]
)
g2p_kwargs = {}
if "phoneme_dict" in cfg.text_tokenizer.g2p:
g2p_kwargs["phoneme_dict"] = self.register_artifact(
'text_tokenizer.g2p.phoneme_dict', cfg.text_tokenizer.g2p.phoneme_dict,
)
if "heteronyms" in cfg.text_tokenizer.g2p:
g2p_kwargs["heteronyms"] = self.register_artifact(
'text_tokenizer.g2p.heteronyms', cfg.text_tokenizer.g2p.heteronyms,
)
text_tokenizer_kwargs["g2p"] = instantiate(cfg.text_tokenizer.g2p, **g2p_kwargs)
self.tokenizer = instantiate(cfg.text_tokenizer, **text_tokenizer_kwargs)
def _get_lm_model_tokenizer(self, lm_model="albert"):
if getattr(self, "_lm_model_tokenizer", None) is not None:
return self._lm_model_tokenizer
if self._train_dl is not None and self._train_dl.dataset is not None:
self._lm_model_tokenizer = self._train_dl.dataset.lm_model_tokenizer
if lm_model == "albert":
self._lm_model_tokenizer = AlbertTokenizer.from_pretrained('albert-base-v2')
else:
raise NotImplementedError(
f"{lm_model} lm model is not supported. Only albert is supported at this moment."
)
return self._lm_model_tokenizer
def _get_lm_embeddings(self, lm_model="albert"):
if lm_model == "albert":
return transformers.AlbertModel.from_pretrained('albert-base-v2').embeddings.word_embeddings
else:
raise NotImplementedError(
f"{lm_model} lm model is not supported. Only albert is supported at this moment."
)
def _get_lm_padding_value(self, lm_model="albert"):
if lm_model == "albert":
return transformers.AlbertTokenizer.from_pretrained('albert-base-v2')._convert_token_to_id('<pad>')
else:
raise NotImplementedError(
f"{lm_model} lm model is not supported. Only albert is supported at this moment."
)
def _metrics(
self,
true_durs,
true_text_len,
pred_durs,
true_pitch,
pred_pitch,
true_spect=None,
pred_spect=None,
true_spect_len=None,
attn_logprob=None,
attn_soft=None,
attn_hard=None,
attn_hard_dur=None,
):
text_mask = get_mask_from_lengths(true_text_len)
mel_mask = get_mask_from_lengths(true_spect_len)
loss = 0.0
# Dur loss and metrics
durs_loss = F.mse_loss(pred_durs, (true_durs + 1).float().log(), reduction='none')
durs_loss = durs_loss * text_mask.float()
durs_loss = durs_loss.sum() / text_mask.sum()
durs_pred = pred_durs.exp() - 1
durs_pred = torch.clamp_min(durs_pred, min=0)
durs_pred = durs_pred.round().long()
acc = ((true_durs == durs_pred) * text_mask).sum().float() / text_mask.sum() * 100
acc_dist_1 = (((true_durs - durs_pred).abs() <= 1) * text_mask).sum().float() / text_mask.sum() * 100
acc_dist_3 = (((true_durs - durs_pred).abs() <= 3) * text_mask).sum().float() / text_mask.sum() * 100
pred_spect = pred_spect.transpose(1, 2)
# Mel loss
mel_loss = F.mse_loss(pred_spect, true_spect, reduction='none').mean(dim=-2)
mel_loss = mel_loss * mel_mask.float()
mel_loss = mel_loss.sum() / mel_mask.sum()
loss = loss + self.durs_loss_scale * durs_loss + self.mel_loss_scale * mel_loss
# Aligner loss
bin_loss, ctc_loss = None, None
ctc_loss = self.forward_sum_loss(attn_logprob=attn_logprob, in_lens=true_text_len, out_lens=true_spect_len)
loss = loss + ctc_loss
if self.add_bin_loss:
bin_loss = self.bin_loss(hard_attention=attn_hard, soft_attention=attn_soft)
loss = loss + self.bin_loss_scale * bin_loss
true_avg_pitch = average_features(true_pitch.unsqueeze(1), attn_hard_dur).squeeze(1)
# Pitch loss
pitch_loss = F.mse_loss(pred_pitch, true_avg_pitch, reduction='none') # noqa
pitch_loss = (pitch_loss * text_mask).sum() / text_mask.sum()
loss = loss + self.pitch_loss_scale * pitch_loss
return loss, durs_loss, acc, acc_dist_1, acc_dist_3, pitch_loss, mel_loss, ctc_loss, bin_loss
@torch.jit.unused
def run_aligner(self, text, text_len, text_mask, spect, spect_len, attn_prior):
text_emb = self.symbol_emb(text)
attn_soft, attn_logprob = self.aligner(
spect, text_emb.permute(0, 2, 1), mask=text_mask == 0, attn_prior=attn_prior,
)
attn_hard = binarize_attention_parallel(attn_soft, text_len, spect_len)
attn_hard_dur = attn_hard.sum(2)[:, 0, :]
assert torch.all(torch.eq(attn_hard_dur.sum(dim=1), spect_len))
return attn_soft, attn_logprob, attn_hard, attn_hard_dur
@typecheck(
input_types={
"text": NeuralType(('B', 'T_text'), TokenIndex()),
"text_len": NeuralType(('B',), LengthsType()),
"pitch": NeuralType(('B', 'T_audio'), RegressionValuesType(), optional=True),
"spect": NeuralType(('B', 'D', 'T_spec'), MelSpectrogramType(), optional=True),
"spect_len": NeuralType(('B',), LengthsType(), optional=True),
"attn_prior": NeuralType(('B', 'T_spec', 'T_text'), ProbsType(), optional=True),
"lm_tokens": NeuralType(('B', 'T_lm_tokens'), TokenIndex(), optional=True),
},
output_types={
"pred_spect": NeuralType(('B', 'D', 'T_spec'), MelSpectrogramType()),
"durs_predicted": NeuralType(('B', 'T_text'), TokenDurationType()),
"log_durs_predicted": NeuralType(('B', 'T_text'), TokenLogDurationType()),
"pitch_predicted": NeuralType(('B', 'T_text'), RegressionValuesType()),
"attn_soft": NeuralType(('B', 'S', 'T_spec', 'T_text'), ProbsType()),
"attn_logprob": NeuralType(('B', 'S', 'T_spec', 'T_text'), LogprobsType()),
"attn_hard": NeuralType(('B', 'S', 'T_spec', 'T_text'), ProbsType()),
"attn_hard_dur": NeuralType(('B', 'T_text'), TokenDurationType()),
},
)
def forward(self, text, text_len, pitch=None, spect=None, spect_len=None, attn_prior=None, lm_tokens=None):
if self.training:
assert pitch is not None
text_mask = get_mask_from_lengths(text_len).unsqueeze(2)
enc_out, enc_mask = self.encoder(text, text_mask)
# Aligner
attn_soft, attn_logprob, attn_hard, attn_hard_dur = None, None, None, None
if spect is not None:
attn_soft, attn_logprob, attn_hard, attn_hard_dur = self.run_aligner(
text, text_len, text_mask, spect, spect_len, attn_prior
)
if self.cond_on_lm_embeddings:
lm_emb = self.lm_embeddings(lm_tokens)
lm_features = self.self_attention_module(
enc_out, lm_emb, lm_emb, q_mask=enc_mask.squeeze(2), kv_mask=lm_tokens != self.lm_padding_value
)
# Duration predictor
log_durs_predicted = self.duration_predictor(enc_out, enc_mask)
durs_predicted = torch.clamp(log_durs_predicted.exp() - 1, 0)
# Pitch predictor
pitch_predicted = self.pitch_predictor(enc_out, enc_mask)
# Avg pitch, add pitch_emb
if not self.training:
if pitch is not None:
pitch = average_features(pitch.unsqueeze(1), attn_hard_dur).squeeze(1)
pitch_emb = self.pitch_emb(pitch.unsqueeze(1))
else:
pitch_emb = self.pitch_emb(pitch_predicted.unsqueeze(1))
else:
pitch = average_features(pitch.unsqueeze(1), attn_hard_dur).squeeze(1)
pitch_emb = self.pitch_emb(pitch.unsqueeze(1))
enc_out = enc_out + pitch_emb.transpose(1, 2)
if self.cond_on_lm_embeddings:
enc_out = enc_out + lm_features
# Regulate length
len_regulated_enc_out, dec_lens = regulate_len(attn_hard_dur, enc_out)
dec_out, dec_lens = self.decoder(len_regulated_enc_out, get_mask_from_lengths(dec_lens).unsqueeze(2))
pred_spect = self.proj(dec_out)
return (
pred_spect,
durs_predicted,
log_durs_predicted,
pitch_predicted,
attn_soft,
attn_logprob,
attn_hard,
attn_hard_dur,
)
def infer(
self,
text,
text_len=None,
text_mask=None,
spect=None,
spect_len=None,
attn_prior=None,
use_gt_durs=False,
lm_tokens=None,
pitch=None,
):
if text_mask is None:
text_mask = get_mask_from_lengths(text_len).unsqueeze(2)
enc_out, enc_mask = self.encoder(text, text_mask)
# Aligner
attn_hard_dur = None
if use_gt_durs:
attn_soft, attn_logprob, attn_hard, attn_hard_dur = self.run_aligner(
text, text_len, text_mask, spect, spect_len, attn_prior
)
if self.cond_on_lm_embeddings:
lm_emb = self.lm_embeddings(lm_tokens)
lm_features = self.self_attention_module(
enc_out, lm_emb, lm_emb, q_mask=enc_mask.squeeze(2), kv_mask=lm_tokens != self.lm_padding_value
)
# Duration predictor
log_durs_predicted = self.duration_predictor(enc_out, enc_mask)
durs_predicted = torch.clamp(log_durs_predicted.exp() - 1, 0)
# Avg pitch, pitch predictor
if use_gt_durs and pitch is not None:
pitch = average_features(pitch.unsqueeze(1), attn_hard_dur).squeeze(1)
pitch_emb = self.pitch_emb(pitch.unsqueeze(1))
else:
pitch_predicted = self.pitch_predictor(enc_out, enc_mask)
pitch_emb = self.pitch_emb(pitch_predicted.unsqueeze(1))
# Add pitch emb
enc_out = enc_out + pitch_emb.transpose(1, 2)
if self.cond_on_lm_embeddings:
enc_out = enc_out + lm_features
if use_gt_durs:
if attn_hard_dur is not None:
len_regulated_enc_out, dec_lens = regulate_len(attn_hard_dur, enc_out)
else:
raise NotImplementedError
else:
len_regulated_enc_out, dec_lens = regulate_len(durs_predicted, enc_out)
dec_out, _ = self.decoder(len_regulated_enc_out, get_mask_from_lengths(dec_lens).unsqueeze(2))
pred_spect = self.proj(dec_out)
return pred_spect
def on_train_epoch_start(self):
bin_loss_start_epoch = np.ceil(self.bin_loss_start_ratio * self._trainer.max_epochs)
# Add bin loss when current_epoch >= bin_start_epoch
if not self.add_bin_loss and self.current_epoch >= bin_loss_start_epoch:
logging.info(f"Using hard attentions after epoch: {self.current_epoch}")
self.add_bin_loss = True
if self.add_bin_loss:
self.bin_loss_scale = min((self.current_epoch - bin_loss_start_epoch) / self.bin_loss_warmup_epochs, 1.0)
def training_step(self, batch, batch_idx):
attn_prior, lm_tokens = None, None
if self.cond_on_lm_embeddings:
audio, audio_len, text, text_len, attn_prior, pitch, _, lm_tokens = batch
else:
audio, audio_len, text, text_len, attn_prior, pitch, _ = batch
spect, spect_len = self.preprocessor(input_signal=audio, length=audio_len)
# pitch normalization
zero_pitch_idx = pitch == 0
pitch = (pitch - self.pitch_mean) / self.pitch_std
pitch[zero_pitch_idx] = 0.0
(pred_spect, _, pred_log_durs, pred_pitch, attn_soft, attn_logprob, attn_hard, attn_hard_dur,) = self(
text=text,
text_len=text_len,
pitch=pitch,
spect=spect,
spect_len=spect_len,
attn_prior=attn_prior,
lm_tokens=lm_tokens,
)
(loss, durs_loss, acc, acc_dist_1, acc_dist_3, pitch_loss, mel_loss, ctc_loss, bin_loss,) = self._metrics(
pred_durs=pred_log_durs,
pred_pitch=pred_pitch,
true_durs=attn_hard_dur,
true_text_len=text_len,
true_pitch=pitch,
true_spect=spect,
pred_spect=pred_spect,
true_spect_len=spect_len,
attn_logprob=attn_logprob,
attn_soft=attn_soft,
attn_hard=attn_hard,
attn_hard_dur=attn_hard_dur,
)
train_log = {
'train_loss': loss,
'train_durs_loss': durs_loss,
'train_pitch_loss': torch.tensor(1.0).to(durs_loss.device) if pitch_loss is None else pitch_loss,
'train_mel_loss': mel_loss,
'train_durs_acc': acc,
'train_durs_acc_dist_3': acc_dist_3,
'train_ctc_loss': torch.tensor(1.0).to(durs_loss.device) if ctc_loss is None else ctc_loss,
'train_bin_loss': torch.tensor(1.0).to(durs_loss.device) if bin_loss is None else bin_loss,
}
return {'loss': loss, 'progress_bar': {k: v.detach() for k, v in train_log.items()}, 'log': train_log}
def validation_step(self, batch, batch_idx):
attn_prior, lm_tokens = None, None
if self.cond_on_lm_embeddings:
audio, audio_len, text, text_len, attn_prior, pitch, _, lm_tokens = batch
else:
audio, audio_len, text, text_len, attn_prior, pitch, _ = batch
spect, spect_len = self.preprocessor(input_signal=audio, length=audio_len)
# pitch normalization
zero_pitch_idx = pitch == 0
pitch = (pitch - self.pitch_mean) / self.pitch_std
pitch[zero_pitch_idx] = 0.0
(pred_spect, _, pred_log_durs, pred_pitch, attn_soft, attn_logprob, attn_hard, attn_hard_dur,) = self(
text=text,
text_len=text_len,
pitch=pitch,
spect=spect,
spect_len=spect_len,
attn_prior=attn_prior,
lm_tokens=lm_tokens,
)
(loss, durs_loss, acc, acc_dist_1, acc_dist_3, pitch_loss, mel_loss, ctc_loss, bin_loss,) = self._metrics(
pred_durs=pred_log_durs,
pred_pitch=pred_pitch,
true_durs=attn_hard_dur,
true_text_len=text_len,
true_pitch=pitch,
true_spect=spect,
pred_spect=pred_spect,
true_spect_len=spect_len,
attn_logprob=attn_logprob,
attn_soft=attn_soft,
attn_hard=attn_hard,
attn_hard_dur=attn_hard_dur,
)
# without ground truth internal features except for durations
pred_spect, _, pred_log_durs, pred_pitch, attn_soft, attn_logprob, attn_hard, attn_hard_dur = self(
text=text,
text_len=text_len,
pitch=None,
spect=spect,
spect_len=spect_len,
attn_prior=attn_prior,
lm_tokens=lm_tokens,
)
*_, with_pred_features_mel_loss, _, _ = self._metrics(
pred_durs=pred_log_durs,
pred_pitch=pred_pitch,
true_durs=attn_hard_dur,
true_text_len=text_len,
true_pitch=pitch,
true_spect=spect,
pred_spect=pred_spect,
true_spect_len=spect_len,
attn_logprob=attn_logprob,
attn_soft=attn_soft,
attn_hard=attn_hard,
attn_hard_dur=attn_hard_dur,
)
val_log = {
'val_loss': loss,
'val_durs_loss': durs_loss,
'val_pitch_loss': torch.tensor(1.0).to(durs_loss.device) if pitch_loss is None else pitch_loss,
'val_mel_loss': mel_loss,
'val_with_pred_features_mel_loss': with_pred_features_mel_loss,
'val_durs_acc': acc,
'val_durs_acc_dist_3': acc_dist_3,
'val_ctc_loss': torch.tensor(1.0).to(durs_loss.device) if ctc_loss is None else ctc_loss,
'val_bin_loss': torch.tensor(1.0).to(durs_loss.device) if bin_loss is None else bin_loss,
}
self.log_dict(val_log, prog_bar=False, on_epoch=True, logger=True, sync_dist=True)
if batch_idx == 0 and self.current_epoch % 5 == 0 and isinstance(self.logger, WandbLogger):
specs = []
pitches = []
for i in range(min(3, spect.shape[0])):
specs += [
wandb.Image(
plot_spectrogram_to_numpy(spect[i, :, : spect_len[i]].data.cpu().numpy()),
caption=f"gt mel {i}",
),
wandb.Image(
plot_spectrogram_to_numpy(pred_spect.transpose(1, 2)[i, :, : spect_len[i]].data.cpu().numpy()),
caption=f"pred mel {i}",
),
]
pitches += [
wandb.Image(
plot_pitch_to_numpy(
average_features(pitch.unsqueeze(1), attn_hard_dur)
.squeeze(1)[i, : text_len[i]]
.data.cpu()
.numpy(),
ylim_range=[-2.5, 2.5],
),
caption=f"gt pitch {i}",
),
]
pitches += [
wandb.Image(
plot_pitch_to_numpy(pred_pitch[i, : text_len[i]].data.cpu().numpy(), ylim_range=[-2.5, 2.5]),
caption=f"pred pitch {i}",
),
]
self.logger.experiment.log({"specs": specs, "pitches": pitches})
@typecheck(
input_types={
"tokens": NeuralType(('B', 'T_text'), TokenIndex(), optional=True),
"tokens_len": NeuralType(('B'), LengthsType(), optional=True),
"lm_tokens": NeuralType(('B', 'T_lm_tokens'), TokenIndex(), optional=True),
"raw_texts": [NeuralType(optional=True)],
"lm_model": NeuralType(optional=True),
},
output_types={"spect": NeuralType(('B', 'D', 'T_spec'), MelSpectrogramType()),},
)
def generate_spectrogram(
self,
tokens: Optional[torch.Tensor] = None,
tokens_len: Optional[torch.Tensor] = None,
lm_tokens: Optional[torch.Tensor] = None,
raw_texts: Optional[List[str]] = None,
norm_text_for_lm_model: bool = True,
lm_model: str = "albert",
):
if tokens is not None:
if tokens_len is None:
# It is assumed that padding is consecutive and only at the end
tokens_len = (tokens != self.tokenizer.pad).sum(dim=-1)
else:
if raw_texts is None:
raise ValueError("raw_texts must be specified if tokens is None")
t_seqs = [self.tokenizer(t) for t in raw_texts]
tokens = torch.nn.utils.rnn.pad_sequence(
sequences=[torch.tensor(t, dtype=torch.long, device=self.device) for t in t_seqs],
batch_first=True,
padding_value=self.tokenizer.pad,
)
tokens_len = torch.tensor([len(t) for t in t_seqs], dtype=torch.long, device=tokens.device)
if self.cond_on_lm_embeddings and lm_tokens is None:
if raw_texts is None:
raise ValueError("raw_texts must be specified if lm_tokens is None")
lm_model_tokenizer = self._get_lm_model_tokenizer(lm_model)
lm_padding_value = lm_model_tokenizer._convert_token_to_id('<pad>')
lm_space_value = lm_model_tokenizer._convert_token_to_id('▁')
assert isinstance(self.tokenizer, EnglishCharsTokenizer) or isinstance(
self.tokenizer, EnglishPhonemesTokenizer
)
if norm_text_for_lm_model and self.text_normalizer_call is not None:
raw_texts = [self.text_normalizer_call(t, **self.text_normalizer_call_kwargs) for t in raw_texts]
preprocess_texts_as_tts_input = [self.tokenizer.text_preprocessing_func(t) for t in raw_texts]
lm_tokens_as_ids_list = [
lm_model_tokenizer.encode(t, add_special_tokens=False) for t in preprocess_texts_as_tts_input
]
if self.tokenizer.pad_with_space:
lm_tokens_as_ids_list = [[lm_space_value] + t + [lm_space_value] for t in lm_tokens_as_ids_list]
lm_tokens = torch.full(
(len(lm_tokens_as_ids_list), max([len(t) for t in lm_tokens_as_ids_list])),
fill_value=lm_padding_value,
device=tokens.device,
)
for i, lm_tokens_i in enumerate(lm_tokens_as_ids_list):
lm_tokens[i, : len(lm_tokens_i)] = torch.tensor(lm_tokens_i, device=tokens.device)
pred_spect = self.infer(tokens, tokens_len, lm_tokens=lm_tokens).transpose(1, 2)
return pred_spect
def parse(self, text: str, normalize=True) -> torch.Tensor:
if self.training:
logging.warning("parse() is meant to be called in eval mode.")
if normalize and self.text_normalizer_call is not None:
text = self.text_normalizer_call(text, **self.text_normalizer_call_kwargs)
eval_phon_mode = contextlib.nullcontext()
if hasattr(self.tokenizer, "set_phone_prob"):
eval_phon_mode = self.tokenizer.set_phone_prob(prob=1.0)
with eval_phon_mode:
tokens = self.tokenizer.encode(text)
return torch.tensor(tokens).long().unsqueeze(0).to(self.device)
def _loader(self, cfg):
try:
_ = cfg.dataset.manifest_filepath
except omegaconf.errors.MissingMandatoryValue:
logging.warning("manifest_filepath was skipped. No dataset for this model.")
return None
dataset = instantiate(
cfg.dataset,
text_normalizer=self.normalizer,
text_normalizer_call_kwargs=self.text_normalizer_call_kwargs,
text_tokenizer=self.tokenizer,
)
return torch.utils.data.DataLoader( # noqa
dataset=dataset, collate_fn=dataset.collate_fn, **cfg.dataloader_params,
)
def setup_training_data(self, cfg):
self._train_dl = self._loader(cfg)
def setup_validation_data(self, cfg):
self._validation_dl = self._loader(cfg)
def setup_test_data(self, cfg):
"""Omitted."""
pass
@classmethod
def list_available_models(cls) -> 'List[PretrainedModelInfo]':
"""
This method returns a list of pre-trained model which can be instantiated directly from NVIDIA's NGC cloud.
Returns:
List of available pre-trained models.
"""
list_of_models = []
model = PretrainedModelInfo(
pretrained_model_name="tts_en_lj_mixertts",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/tts_en_lj_mixertts/versions/1.6.0/files/tts_en_lj_mixertts.nemo",
description="This model is trained on LJSpeech sampled at 22050Hz with and can be used to generate female English voices with an American accent.",
class_=cls, # noqa
)
list_of_models.append(model)
model = PretrainedModelInfo(
pretrained_model_name="tts_en_lj_mixerttsx",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/tts_en_lj_mixerttsx/versions/1.6.0/files/tts_en_lj_mixerttsx.nemo",
description="This model is trained on LJSpeech sampled at 22050Hz with and can be used to generate female English voices with an American accent.",
class_=cls, # noqa
)
list_of_models.append(model)
return list_of_models
# Methods for model exportability
@property
def input_types(self):
return {
"text": NeuralType(('B', 'T_text'), TokenIndex()),
"lm_tokens": NeuralType(('B', 'T_lm_tokens'), TokenIndex(), optional=True),
}
@property
def output_types(self):
return {
"spect": NeuralType(('B', 'D', 'T_spec'), MelSpectrogramType()),
}
def input_example(self, max_text_len=10, max_lm_tokens_len=10):
text = torch.randint(
low=0, high=len(self.tokenizer.tokens), size=(1, max_text_len), device=self.device, dtype=torch.long,
)
inputs = {'text': text}
if self.cond_on_lm_embeddings:
inputs['lm_tokens'] = torch.randint(
low=0,
high=self.lm_embeddings.weight.shape[0],
size=(1, max_lm_tokens_len),
device=self.device,
dtype=torch.long,
)
return (inputs,)
def forward_for_export(self, text, lm_tokens=None):
text_mask = (text != self.tokenizer_pad).unsqueeze(2)
spect = self.infer(text=text, text_mask=text_mask, lm_tokens=lm_tokens).transpose(1, 2)
return spect.to(torch.float)
| NeMo-main | nemo/collections/tts/models/mixer_tts.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
from pathlib import Path
import torch
import torch.nn.functional as F
from hydra.utils import instantiate
from omegaconf import DictConfig, OmegaConf, open_dict
from pytorch_lightning.loggers.wandb import WandbLogger
from nemo.collections.tts.losses.hifigan_losses import DiscriminatorLoss, FeatureMatchingLoss, GeneratorLoss
from nemo.collections.tts.models.base import Vocoder
from nemo.collections.tts.modules.hifigan_modules import MultiPeriodDiscriminator, MultiScaleDiscriminator
from nemo.collections.tts.parts.utils.callbacks import LoggingCallback
from nemo.collections.tts.parts.utils.helpers import get_batch_size, get_num_workers, plot_spectrogram_to_numpy
from nemo.core.classes import Exportable
from nemo.core.classes.common import PretrainedModelInfo, typecheck
from nemo.core.neural_types.elements import AudioSignal, MelSpectrogramType
from nemo.core.neural_types.neural_type import NeuralType
from nemo.core.optim.lr_scheduler import compute_max_steps, prepare_lr_scheduler
from nemo.utils import logging, model_utils
HAVE_WANDB = True
try:
import wandb
except ModuleNotFoundError:
HAVE_WANDB = False
class HifiGanModel(Vocoder, Exportable):
"""
HiFi-GAN model (https://arxiv.org/abs/2010.05646) that is used to generate audio from mel spectrogram.
"""
def __init__(self, cfg: DictConfig, trainer: 'Trainer' = None):
# Convert to Hydra 1.0 compatible DictConfig
cfg = model_utils.convert_model_config_to_dict_config(cfg)
cfg = model_utils.maybe_update_config_version(cfg)
self.ds_class = cfg.train_ds.dataset._target_
super().__init__(cfg=cfg, trainer=trainer)
self.audio_to_melspec_precessor = instantiate(cfg.preprocessor)
# We use separate preprocessor for training, because we need to pass grads and remove pitch fmax limitation
self.trg_melspec_fn = instantiate(cfg.preprocessor, highfreq=None, use_grads=True)
self.generator = instantiate(cfg.generator)
self.mpd = MultiPeriodDiscriminator(debug=cfg.debug if "debug" in cfg else False)
self.msd = MultiScaleDiscriminator(debug=cfg.debug if "debug" in cfg else False)
self.feature_loss = FeatureMatchingLoss()
self.discriminator_loss = DiscriminatorLoss()
self.generator_loss = GeneratorLoss()
self.l1_factor = cfg.get("l1_loss_factor", 45)
self.sample_rate = self._cfg.preprocessor.sample_rate
self.stft_bias = None
self.input_as_mel = False
if self._train_dl:
self.input_as_mel = self._train_dl.dataset.load_precomputed_mel
self.log_audio = cfg.get("log_audio", False)
self.log_config = cfg.get("log_config", None)
self.lr_schedule_interval = None
# Important: this property activates manual optimization.
self.automatic_optimization = False
@property
def max_steps(self):
if "max_steps" in self._cfg:
return self._cfg.get("max_steps")
if "max_epochs" not in self._cfg:
raise ValueError("Must specify 'max_steps' or 'max_epochs'.")
if "steps_per_epoch" in self._cfg:
return self._cfg.max_epochs * self._cfg.steps_per_epoch
return compute_max_steps(
max_epochs=self._cfg.max_epochs,
accumulate_grad_batches=self.trainer.accumulate_grad_batches,
limit_train_batches=self.trainer.limit_train_batches,
num_workers=get_num_workers(self.trainer),
num_samples=len(self._train_dl.dataset),
batch_size=get_batch_size(self._train_dl),
drop_last=self._train_dl.drop_last,
)
@staticmethod
def get_warmup_steps(max_steps, warmup_steps, warmup_ratio):
if warmup_steps is not None:
return warmup_steps
if warmup_ratio is not None:
return warmup_ratio * max_steps
return None
def configure_optimizers(self):
optim_config = self._cfg.optim.copy()
OmegaConf.set_struct(optim_config, False)
sched_config = optim_config.pop("sched", None)
OmegaConf.set_struct(optim_config, True)
gen_params = self.generator.parameters()
disc_params = itertools.chain(self.msd.parameters(), self.mpd.parameters())
optim_g = instantiate(optim_config, params=gen_params)
optim_d = instantiate(optim_config, params=disc_params)
if sched_config is None:
return [optim_g, optim_d]
max_steps = self.max_steps
warmup_steps = self.get_warmup_steps(
max_steps=max_steps,
warmup_steps=sched_config.get("warmup_steps", None),
warmup_ratio=sched_config.get("warmup_ratio", None),
)
OmegaConf.set_struct(sched_config, False)
sched_config["max_steps"] = max_steps
if warmup_steps:
sched_config["warmup_steps"] = warmup_steps
sched_config.pop("warmup_ratio", None)
OmegaConf.set_struct(sched_config, True)
scheduler_g = prepare_lr_scheduler(
optimizer=optim_g, scheduler_config=sched_config, train_dataloader=self._train_dl
)
scheduler_d = prepare_lr_scheduler(
optimizer=optim_d, scheduler_config=sched_config, train_dataloader=self._train_dl
)
self.lr_schedule_interval = scheduler_g["interval"]
return [optim_g, optim_d], [scheduler_g, scheduler_d]
def update_lr(self, interval="step"):
schedulers = self.lr_schedulers()
if schedulers is not None and self.lr_schedule_interval == interval:
sch1, sch2 = schedulers
sch1.step()
sch2.step()
@typecheck()
def forward(self, *, spec):
"""
Runs the generator, for inputs and outputs see input_types, and output_types
"""
return self.generator(x=spec)
@typecheck(
input_types={"spec": NeuralType(('B', 'C', 'T'), MelSpectrogramType())},
output_types={"audio": NeuralType(('B', 'T'), AudioSignal())},
)
def convert_spectrogram_to_audio(self, spec: 'torch.tensor') -> 'torch.tensor':
return self(spec=spec).squeeze(1)
def training_step(self, batch, batch_idx):
audio, audio_len, audio_mel, _ = self._process_batch(batch)
# Mel as input for L1 mel loss
audio_trg_mel, _ = self.trg_melspec_fn(audio, audio_len)
audio = audio.unsqueeze(1)
audio_pred = self.generator(x=audio_mel)
audio_pred_mel, _ = self.trg_melspec_fn(audio_pred.squeeze(1), audio_len)
optim_g, optim_d = self.optimizers()
# Train discriminator
optim_d.zero_grad()
mpd_score_real, mpd_score_gen, _, _ = self.mpd(y=audio, y_hat=audio_pred.detach())
loss_disc_mpd, _, _ = self.discriminator_loss(
disc_real_outputs=mpd_score_real, disc_generated_outputs=mpd_score_gen
)
msd_score_real, msd_score_gen, _, _ = self.msd(y=audio, y_hat=audio_pred.detach())
loss_disc_msd, _, _ = self.discriminator_loss(
disc_real_outputs=msd_score_real, disc_generated_outputs=msd_score_gen
)
loss_d = loss_disc_msd + loss_disc_mpd
self.manual_backward(loss_d)
optim_d.step()
# Train generator
optim_g.zero_grad()
loss_mel = F.l1_loss(audio_pred_mel, audio_trg_mel)
_, mpd_score_gen, fmap_mpd_real, fmap_mpd_gen = self.mpd(y=audio, y_hat=audio_pred)
_, msd_score_gen, fmap_msd_real, fmap_msd_gen = self.msd(y=audio, y_hat=audio_pred)
loss_fm_mpd = self.feature_loss(fmap_r=fmap_mpd_real, fmap_g=fmap_mpd_gen)
loss_fm_msd = self.feature_loss(fmap_r=fmap_msd_real, fmap_g=fmap_msd_gen)
loss_gen_mpd, _ = self.generator_loss(disc_outputs=mpd_score_gen)
loss_gen_msd, _ = self.generator_loss(disc_outputs=msd_score_gen)
loss_g = loss_gen_msd + loss_gen_mpd + loss_fm_msd + loss_fm_mpd + loss_mel * self.l1_factor
self.manual_backward(loss_g)
optim_g.step()
self.update_lr()
metrics = {
"g_loss_fm_mpd": loss_fm_mpd,
"g_loss_fm_msd": loss_fm_msd,
"g_loss_gen_mpd": loss_gen_mpd,
"g_loss_gen_msd": loss_gen_msd,
"g_loss": loss_g,
"d_loss_mpd": loss_disc_mpd,
"d_loss_msd": loss_disc_msd,
"d_loss": loss_d,
"global_step": self.global_step,
"lr": optim_g.param_groups[0]['lr'],
}
self.log_dict(metrics, on_step=True, sync_dist=True)
self.log("g_l1_loss", loss_mel, prog_bar=True, logger=False, sync_dist=True)
def on_train_epoch_end(self) -> None:
self.update_lr("epoch")
def validation_step(self, batch, batch_idx):
audio, audio_len, audio_mel, audio_mel_len = self._process_batch(batch)
audio_pred = self(spec=audio_mel)
if self.input_as_mel:
gt_mel, gt_mel_len = self.audio_to_melspec_precessor(audio, audio_len)
audio_pred_mel, _ = self.audio_to_melspec_precessor(audio_pred.squeeze(1), audio_len)
loss_mel = F.l1_loss(audio_mel, audio_pred_mel)
self.log_dict({"val_loss": loss_mel}, on_epoch=True, sync_dist=True)
# Plot audio once per epoch
if self.log_audio and batch_idx == 0 and isinstance(self.logger, WandbLogger) and HAVE_WANDB:
# Perform bias denoising
pred_denoised = self._bias_denoise(audio_pred, audio_mel).squeeze(1)
pred_denoised_mel, _ = self.audio_to_melspec_precessor(pred_denoised, audio_len)
clips = []
specs = []
for i in range(min(5, audio.shape[0])):
clips += [
wandb.Audio(
audio[i, : audio_len[i]].data.cpu().numpy(),
caption=f"real audio {i}",
sample_rate=self.sample_rate,
),
wandb.Audio(
audio_pred[i, 0, : audio_len[i]].data.cpu().numpy().astype('float32'),
caption=f"generated audio {i}",
sample_rate=self.sample_rate,
),
wandb.Audio(
pred_denoised[i, : audio_len[i]].data.cpu().numpy(),
caption=f"denoised audio {i}",
sample_rate=self.sample_rate,
),
]
specs += [
wandb.Image(
plot_spectrogram_to_numpy(audio_mel[i, :, : audio_mel_len[i]].data.cpu().numpy()),
caption=f"input mel {i}",
),
wandb.Image(
plot_spectrogram_to_numpy(audio_pred_mel[i, :, : audio_mel_len[i]].data.cpu().numpy()),
caption=f"output mel {i}",
),
wandb.Image(
plot_spectrogram_to_numpy(pred_denoised_mel[i, :, : audio_mel_len[i]].data.cpu().numpy()),
caption=f"denoised mel {i}",
),
]
if self.input_as_mel:
specs += [
wandb.Image(
plot_spectrogram_to_numpy(gt_mel[i, :, : audio_mel_len[i]].data.cpu().numpy()),
caption=f"gt mel {i}",
),
]
self.logger.experiment.log({"audio": clips, "specs": specs})
def _process_batch(self, batch):
if self.input_as_mel:
audio, audio_len, audio_mel = batch
audio_mel_len = [audio_mel.shape[1]] * audio_mel.shape[0]
return audio, audio_len, audio_mel, audio_mel_len
if self.ds_class == "nemo.collections.tts.data.vocoder_dataset.VocoderDataset":
audio = batch.get("audio")
audio_len = batch.get("audio_lens")
else:
audio, audio_len = batch
audio_mel, audio_mel_len = self.audio_to_melspec_precessor(audio, audio_len)
return audio, audio_len, audio_mel, audio_mel_len
def _bias_denoise(self, audio, mel):
def stft(x):
comp = torch.stft(x.squeeze(1), n_fft=1024, hop_length=256, win_length=1024, return_complex=True)
comp = torch.view_as_real(comp)
real, imag = comp[..., 0], comp[..., 1]
mags = torch.sqrt(real ** 2 + imag ** 2)
phase = torch.atan2(imag, real)
return mags, phase
def istft(mags, phase):
comp = torch.stack([mags * torch.cos(phase), mags * torch.sin(phase)], dim=-1)
x = torch.istft(torch.view_as_complex(comp), n_fft=1024, hop_length=256, win_length=1024)
return x
# Create bias tensor
if self.stft_bias is None or self.stft_bias.shape[0] != audio.shape[0]:
audio_bias = self(spec=torch.zeros_like(mel, device=mel.device))
self.stft_bias, _ = stft(audio_bias)
self.stft_bias = self.stft_bias[:, :, 0][:, :, None]
audio_mags, audio_phase = stft(audio)
audio_mags = audio_mags - self.cfg.get("denoise_strength", 0.0025) * self.stft_bias
audio_mags = torch.clamp(audio_mags, 0.0)
audio_denoised = istft(audio_mags, audio_phase).unsqueeze(1)
return audio_denoised
def _setup_train_dataloader(self, cfg):
dataset = instantiate(cfg.dataset)
sampler = dataset.get_sampler(cfg.dataloader_params.batch_size)
data_loader = torch.utils.data.DataLoader(
dataset, collate_fn=dataset.collate_fn, sampler=sampler, **cfg.dataloader_params
)
return data_loader
def _setup_test_dataloader(self, cfg):
dataset = instantiate(cfg.dataset)
data_loader = torch.utils.data.DataLoader(dataset, collate_fn=dataset.collate_fn, **cfg.dataloader_params)
return data_loader
def __setup_dataloader_from_config(self, cfg, shuffle_should_be: bool = True, name: str = "train"):
if "dataset" not in cfg or not isinstance(cfg.dataset, DictConfig):
raise ValueError(f"No dataset for {name}")
if "dataloader_params" not in cfg or not isinstance(cfg.dataloader_params, DictConfig):
raise ValueError(f"No dataloader_params for {name}")
if shuffle_should_be:
if 'shuffle' not in cfg.dataloader_params:
logging.warning(
f"Shuffle should be set to True for {self}'s {name} dataloader but was not found in its "
"config. Manually setting to True"
)
with open_dict(cfg["dataloader_params"]):
cfg.dataloader_params.shuffle = True
elif not cfg.dataloader_params.shuffle:
logging.error(f"The {name} dataloader for {self} has shuffle set to False!!!")
elif not shuffle_should_be and cfg.dataloader_params.shuffle:
logging.error(f"The {name} dataloader for {self} has shuffle set to True!!!")
dataset = instantiate(cfg.dataset)
return torch.utils.data.DataLoader(dataset, collate_fn=dataset.collate_fn, **cfg.dataloader_params)
def setup_training_data(self, cfg):
if self.ds_class == "nemo.collections.tts.data.vocoder_dataset.VocoderDataset":
self._train_dl = self._setup_train_dataloader(cfg)
else:
self._train_dl = self.__setup_dataloader_from_config(cfg)
def setup_validation_data(self, cfg):
if self.ds_class == "nemo.collections.tts.data.vocoder_dataset.VocoderDataset":
self._validation_dl = self._setup_test_dataloader(cfg)
else:
self._validation_dl = self.__setup_dataloader_from_config(cfg, shuffle_should_be=False, name="validation")
def setup_test_data(self, cfg):
pass
def configure_callbacks(self):
if not self.log_config:
return []
sample_ds_class = self.log_config.dataset._target_
if sample_ds_class != "nemo.collections.tts.data.vocoder_dataset.VocoderDataset":
raise ValueError(f"Sample logging only supported for VocoderDataset, got {sample_ds_class}")
data_loader = self._setup_test_dataloader(self.log_config)
generators = instantiate(self.log_config.generators)
log_dir = Path(self.log_config.log_dir) if self.log_config.log_dir else None
log_callback = LoggingCallback(
generators=generators,
data_loader=data_loader,
log_epochs=self.log_config.log_epochs,
epoch_frequency=self.log_config.epoch_frequency,
output_dir=log_dir,
loggers=self.trainer.loggers,
log_tensorboard=self.log_config.log_tensorboard,
log_wandb=self.log_config.log_wandb,
)
return [log_callback]
@classmethod
def list_available_models(cls) -> 'Optional[Dict[str, str]]':
list_of_models = []
model = PretrainedModelInfo(
pretrained_model_name="tts_en_hifigan",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/tts_hifigan/versions/1.0.0rc1/files/tts_hifigan.nemo",
description="This model is trained on LJSpeech audio sampled at 22050Hz and mel spectrograms generated from"
" Tacotron2, TalkNet, and FastPitch. This model has been tested on generating female English "
"voices with an American accent.",
class_=cls,
)
list_of_models.append(model)
model = PretrainedModelInfo(
pretrained_model_name="tts_en_lj_hifigan_ft_mixertts",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/tts_en_lj_hifigan/versions/1.6.0/files/tts_en_lj_hifigan_ft_mixertts.nemo",
description="This model is trained on LJSpeech audio sampled at 22050Hz and mel spectrograms generated from"
" Mixer-TTS. This model has been tested on generating female English voices with an American accent.",
class_=cls,
)
list_of_models.append(model)
model = PretrainedModelInfo(
pretrained_model_name="tts_en_lj_hifigan_ft_mixerttsx",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/tts_en_lj_hifigan/versions/1.6.0/files/tts_en_lj_hifigan_ft_mixerttsx.nemo",
description="This model is trained on LJSpeech audio sampled at 22050Hz and mel spectrograms generated from"
" Mixer-TTS-X. This model has been tested on generating female English voices with an American accent.",
class_=cls,
)
list_of_models.append(model)
model = PretrainedModelInfo(
pretrained_model_name="tts_en_hifitts_hifigan_ft_fastpitch",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/tts_en_multispeaker_fastpitchhifigan/versions/1.10.0/files/tts_en_hifitts_hifigan_ft_fastpitch.nemo",
description="This model is trained on HiFiTTS audio sampled at 44100Hz and mel spectrograms generated from"
" FastPitch. This model has been tested on generating male and female English voices with an American accent.",
class_=cls,
)
list_of_models.append(model)
# de-DE, single male speaker, 22050 Hz, Thorsten Müller’s German Neutral-TTS Dataset, 21.02
model = PretrainedModelInfo(
pretrained_model_name="tts_de_hifigan_singleSpeaker_thorstenNeutral_2102",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/tts_de_fastpitchhifigan/versions/1.15.0/files/tts_de_hifigan_thorstens2102.nemo",
description="This model is finetuned from the HiFiGAN pretrained checkpoint `tts_en_lj_hifigan_ft_mixerttsx`"
" by the mel-spectrograms generated from the FastPitch checkpoint `tts_de_fastpitch_singleSpeaker_thorstenNeutral_2102`."
" This model has been tested on generating male German neutral voices.",
class_=cls,
)
list_of_models.append(model)
# de-DE, single male speaker, 22050 Hz, Thorsten Müller’s German Neutral-TTS Dataset, 22.10
model = PretrainedModelInfo(
pretrained_model_name="tts_de_hifigan_singleSpeaker_thorstenNeutral_2210",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/tts_de_fastpitchhifigan/versions/1.15.0/files/tts_de_hifigan_thorstens2210.nemo",
description="This model is finetuned from the HiFiGAN pretrained checkpoint `tts_en_lj_hifigan_ft_mixerttsx`"
" by the mel-spectrograms generated from the FastPitch checkpoint `tts_de_fastpitch_singleSpeaker_thorstenNeutral_2210`."
" This model has been tested on generating male German neutral voices.",
class_=cls,
)
list_of_models.append(model)
# de-DE, multi-speaker, 5 speakers, 44100 Hz, HUI-Audio-Corpus-German Clean.
model = PretrainedModelInfo(
pretrained_model_name="tts_de_hui_hifigan_ft_fastpitch_multispeaker_5",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/tts_de_fastpitch_multispeaker_5/versions/1.11.0/files/tts_de_hui_hifigan_ft_fastpitch_multispeaker_5.nemo",
description="This model is finetuned from the HiFiGAN pretrained checkpoint `tts_en_hifitts_hifigan_ft_fastpitch` "
"by the mel-spectrograms generated from the FastPitch checkpoint `tts_de_fastpitch_multispeaker_5`. This model "
"has been tested on generating male and female German voices.",
class_=cls,
)
list_of_models.append(model)
# Spanish, multi-speaker, 44100 Hz, Latin American Spanish OpenSLR
model = PretrainedModelInfo(
pretrained_model_name="tts_es_hifigan_ft_fastpitch_multispeaker",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/tts_es_multispeaker_fastpitchhifigan/versions/1.15.0/files/tts_es_hifigan_ft_fastpitch_multispeaker.nemo",
description="This model is trained on the audio from 6 crowdsourced Latin American Spanish OpenSLR "
"datasets and finetuned on the mel-spectrograms generated from the FastPitch checkpoint "
"`tts_es_fastpitch_multispeaker`. This model has been tested on generating male and female "
"Spanish voices with Latin American accents.",
class_=cls,
)
list_of_models.append(model)
# zh, single female speaker, 22050Hz, SFSpeech Bilingual Chinese/English dataset, improved model using richer
# dict and jieba word segmenter for polyphone disambiguation.
model = PretrainedModelInfo(
pretrained_model_name="tts_zh_hifigan_sfspeech",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/tts_zh_fastpitch_hifigan_sfspeech/versions/1.15.0/files/tts_zh_hifigan_sfspeech.nemo",
description="This model is finetuned from the HiFiGAN pretrained checkpoint `tts_en_lj_hifigan_ft_mixerttsx`"
" by the mel-spectrograms generated from the FastPitch checkpoint `tts_zh_fastpitch_sfspeech`."
" This model has been tested on generating female Mandarin Chinese voices.",
class_=cls,
)
list_of_models.append(model)
return list_of_models
def load_state_dict(self, state_dict, strict=True):
# Override load_state_dict to give us some flexibility to be backward-compatible with old checkpoints
new_state_dict = {}
num_resblocks = len(self.cfg['generator']['resblock_kernel_sizes'])
for k, v in state_dict.items():
new_k = k
if 'resblocks' in k:
parts = k.split(".")
# only do this is the checkpoint type is older
if len(parts) == 6:
layer = int(parts[2])
new_layer = f"{layer // num_resblocks}.{layer % num_resblocks}"
new_k = f"generator.resblocks.{new_layer}.{'.'.join(parts[3:])}"
new_state_dict[new_k] = v
super().load_state_dict(new_state_dict, strict=strict)
# Methods for model exportability
def _prepare_for_export(self, **kwargs):
if self.generator is not None:
try:
self.generator.remove_weight_norm()
except ValueError:
return
@property
def input_types(self):
return {
"spec": NeuralType(('B', 'D', 'T'), MelSpectrogramType()),
}
@property
def output_types(self):
return {
"audio": NeuralType(('B', 'S', 'T'), AudioSignal(self.sample_rate)),
}
def input_example(self, max_batch=1, max_dim=256):
"""
Generates input examples for tracing etc.
Returns:
A tuple of input examples.
"""
par = next(self.parameters())
mel = torch.randn((max_batch, self.cfg['preprocessor']['nfilt'], max_dim), device=self.device, dtype=par.dtype)
return ({'spec': mel},)
def forward_for_export(self, spec):
"""
Runs the generator, for inputs and outputs see input_types, and output_types
"""
return self.generator(x=spec)
| NeMo-main | nemo/collections/tts/models/hifigan.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
from typing import Iterable
import torch
from hydra.utils import instantiate
from omegaconf import DictConfig
from pytorch_lightning import Trainer
from pytorch_lightning.loggers import TensorBoardLogger
from nemo.collections.tts.losses.fastpitchloss import DurationLoss, MelLoss, PitchLoss
from nemo.collections.tts.modules.fastpitch import FastPitchSSLModule, average_features
from nemo.collections.tts.modules.transformer import mask_from_lens
from nemo.collections.tts.parts.utils.helpers import plot_multipitch_to_numpy, plot_spectrogram_to_numpy
from nemo.core.classes import ModelPT
from nemo.core.classes.common import PretrainedModelInfo
from nemo.utils import logging, model_utils
from nemo.utils.decorators import experimental
@experimental
class FastPitchModel_SSL(ModelPT):
"""
FastPitch based model that can synthesize mel spectrograms from content and speaker embeddings
obtained from SSLDisentangler. This model can be used for voice conversion by swapping the speaker embedding
of a given source utterance, with the speaker embedding of a target speaker.
"""
def __init__(self, cfg: DictConfig, trainer: Trainer = None, vocoder=None):
# Convert to Hydra 1.0 compatible DictConfig
cfg = model_utils.convert_model_config_to_dict_config(cfg)
cfg = model_utils.maybe_update_config_version(cfg)
self.learn_alignment = False
self._parser = None
self._tb_logger = None
super().__init__(cfg=cfg, trainer=trainer)
self.bin_loss_warmup_epochs = cfg.get("bin_loss_warmup_epochs", 100)
self.log_train_images = False
# Same defaults as FastPitch
loss_scale = 0.1 if self.learn_alignment else 1.0
dur_loss_scale = loss_scale
pitch_loss_scale = loss_scale
if "dur_loss_scale" in cfg:
dur_loss_scale = cfg.dur_loss_scale
if "pitch_loss_scale" in cfg:
pitch_loss_scale = cfg.pitch_loss_scale
self.mel_loss = MelLoss()
self.pitch_loss = PitchLoss(loss_scale=pitch_loss_scale)
self.duration_loss = DurationLoss(loss_scale=dur_loss_scale)
input_fft = None
self.use_encoder = use_encoder = cfg.get("use_encoder", False)
if use_encoder:
self.encoder = instantiate(self._cfg.encoder)
output_fft = instantiate(self._cfg.output_fft)
duration_predictor = None
self.use_duration_predictor = cfg.get("use_duration_predictor", False)
if self.use_duration_predictor:
assert self.encoder is not None, "use_encoder must be True if use_duration_predictor is True"
# this means we are using unique tokens
duration_predictor = instantiate(self._cfg.duration_predictor)
self.pitch_conditioning = pitch_conditioning = cfg.get("pitch_conditioning", True)
if pitch_conditioning:
pitch_predictor = instantiate(self._cfg.pitch_predictor)
else:
pitch_predictor = None
self.content_projection_layer = torch.nn.Linear(self._cfg.content_emb_indim, self._cfg.content_emb_outdim)
self.speaker_projection_layer = torch.nn.Linear(self._cfg.speaker_emb_indim, self._cfg.speaker_emb_outdim)
self.num_datasets = cfg.get("n_datasets", 1)
if self.num_datasets > 1:
# Data ID conditioning if num_datasets > 1. During inference, can set data_id to be that of the cleaner dataset.
# Maybe useful if we have clean and noisy datasets
self.dataset_embedding_layer = torch.nn.Embedding(self.num_datasets, self._cfg.symbols_embedding_dim)
self.fastpitch = FastPitchSSLModule(
input_fft,
output_fft,
duration_predictor,
pitch_predictor,
cfg.symbols_embedding_dim,
cfg.pitch_embedding_kernel_size,
cfg.n_mel_channels,
)
self.non_trainable_models = {}
self.non_trainable_models['vocoder'] = vocoder
def vocode_spectrogram(self, spectrogram):
# spectrogram [C, T] numpy
if self.non_trainable_models['vocoder'] is None:
logging.error("Vocoder is none, should be instantiated as a HiFiGAN vocoder")
with torch.no_grad():
vocoder_device = self.non_trainable_models['vocoder'].device
_spec = torch.from_numpy(spectrogram).unsqueeze(0).to(torch.float32).to(vocoder_device)
wav_generated = self.non_trainable_models['vocoder'].generator(x=_spec)[0]
return wav_generated.cpu().numpy()
@property
def tb_logger(self):
if self._tb_logger is None:
if self.logger is None and self.logger.experiment is None:
return None
tb_logger = self.logger.experiment
if isinstance(self.logger, Iterable):
for logger in self.logger:
if isinstance(logger, Iterable):
tb_logger = logger.experiment
break
self._tb_logger = tb_logger
return self._tb_logger
def forward(
self, *, enc_out=None, enc_mask=None, durs=None, pitch=None, pace=1.0,
):
return self.fastpitch(enc_out=enc_out, enc_mask=enc_mask, durs=durs, pitch=pitch, pace=pace,)
def compute_encoding(self, content_embedding, speaker_embedding, dataset_id=None):
# content embedding is (B, C, T)
# speaker embedding is (B, C)
# pitch_contour is (B, T)
content_embedding = content_embedding.permute(0, 2, 1) # (B, C, T) -> (B, T, C)
content_embedding_projected = self.content_projection_layer(content_embedding)
content_embedding_projected = content_embedding_projected.permute(0, 2, 1) # (B, T, C) -> (B, C, T)
speaker_embedding_projected = self.speaker_projection_layer(speaker_embedding)
speaker_embedding_repeated = speaker_embedding_projected[:, :, None].repeat(
1, 1, content_embedding_projected.shape[2]
)
encoded = torch.cat([content_embedding_projected, speaker_embedding_repeated], dim=1)
encoded = encoded.permute(0, 2, 1) # (B, C, T) -> (B, T, C)
if self.num_datasets > 1:
dataset_embedding = self.dataset_embedding_layer(dataset_id) # (B, C)
dataset_embedding_repeated = dataset_embedding[:, None, :].repeat(1, encoded.shape[1], 1)
encoded = encoded + dataset_embedding_repeated
return encoded
def training_step(self, batch, batch_idx):
content_embedding = batch["content_embedding"]
encoded_len = batch["encoded_len"]
speaker_embedding = batch["speaker_embedding"]
mels = batch["mel_spectrogram"]
pitch = batch["pitch_contour"]
dataset_id = batch["dataset_id"]
durs = batch["duration"]
enc_out = self.compute_encoding(content_embedding, speaker_embedding, dataset_id)
if self.use_encoder:
enc_out, _ = self.encoder(input=enc_out, seq_lens=encoded_len)
enc_mask = mask_from_lens(encoded_len)
enc_mask = enc_mask[:, :, None]
mels_pred, _, _, log_durs_pred, pitch_pred, pitch = self(
enc_out=enc_out, enc_mask=enc_mask, durs=durs, pitch=pitch, pace=1.0,
)
loss = 0
mel_loss = self.mel_loss(spect_predicted=mels_pred, spect_tgt=mels)
loss += mel_loss
if self.use_duration_predictor:
dur_loss = self.duration_loss(log_durs_predicted=log_durs_pred, durs_tgt=durs, len=encoded_len)
self.log("t_dur_loss", dur_loss)
loss += dur_loss
if self.pitch_conditioning:
pitch_loss = self.pitch_loss(pitch_predicted=pitch_pred, pitch_tgt=pitch, len=encoded_len)
loss += pitch_loss
self.log("t_pitch_loss", pitch_loss)
self.log("t_loss", loss)
self.log("t_mel_loss", mel_loss)
# Log images to tensorboard
if self.log_train_images and isinstance(self.logger, TensorBoardLogger):
self.log_train_images = False
self.tb_logger.add_image(
"train_mel_target",
plot_spectrogram_to_numpy(mels[0].data.cpu().float().numpy()),
self.global_step,
dataformats="HWC",
)
spec_predict = mels_pred[0].data.cpu().float().numpy()
self.tb_logger.add_image(
"train_mel_predicted", plot_spectrogram_to_numpy(spec_predict), self.global_step, dataformats="HWC",
)
return loss
def validation_step(self, batch, batch_idx):
content_embedding = batch["content_embedding"]
encoded_len = batch["encoded_len"]
speaker_embedding = batch["speaker_embedding"]
mels = batch["mel_spectrogram"]
spec_len = batch["mel_len"]
pitch = batch["pitch_contour"]
dataset_id = batch["dataset_id"]
durs = batch["duration"]
enc_out = self.compute_encoding(content_embedding, speaker_embedding, dataset_id)
if self.use_encoder:
enc_out, _ = self.encoder(input=enc_out, seq_lens=encoded_len)
enc_mask = mask_from_lens(encoded_len)
enc_mask = enc_mask[:, :, None]
mels_pred, _, _, log_durs_pred, pitch_pred, pitch = self(
enc_out=enc_out, enc_mask=enc_mask, durs=durs, pitch=pitch, pace=1.0
)
mel_loss = self.mel_loss(spect_predicted=mels_pred, spect_tgt=mels)
val_out = {
"val_loss": mel_loss,
"mel_loss": mel_loss,
"mel_target": mels if batch_idx == 0 else None,
"mel_pred": mels_pred if batch_idx == 0 else None,
"spec_len": spec_len if batch_idx == 0 else None,
"pitch_target": pitch if batch_idx == 0 else None,
"pitch_pred": pitch_pred if batch_idx == 0 else None,
}
if self.use_duration_predictor:
dur_loss = self.duration_loss(log_durs_predicted=log_durs_pred, durs_tgt=durs, len=encoded_len)
val_out["dur_loss"] = dur_loss
if self.pitch_conditioning:
pitch_loss = self.pitch_loss(pitch_predicted=pitch_pred, pitch_tgt=pitch, len=encoded_len)
val_out["pitch_loss"] = pitch_loss
val_out["val_loss"] = mel_loss + pitch_loss
return val_out
def on_validation_epoch_end(self, outputs):
collect = lambda key: torch.stack([x[key] for x in outputs]).mean()
val_loss = collect("val_loss")
mel_loss = collect("mel_loss")
self.log("v_loss", val_loss)
self.log("v_mel_loss", mel_loss)
if self.pitch_conditioning:
pitch_loss = collect("pitch_loss")
self.log("v_pitch_loss", pitch_loss)
single_output = outputs[0]
spec_target = single_output['mel_target']
spec_predict = single_output['mel_pred']
spec_len = single_output['spec_len']
pitch_target = single_output['pitch_target']
pitch_pred = single_output['pitch_pred']
if isinstance(self.logger, TensorBoardLogger):
_rand_idx = random.randint(0, spec_target.shape[0] - 1)
self.tb_logger.add_image(
"val_mel_target",
plot_spectrogram_to_numpy(spec_target[_rand_idx].data.cpu().float().numpy()),
self.global_step,
dataformats="HWC",
)
spec_predict = spec_predict[_rand_idx].data.cpu().float().numpy()
self.tb_logger.add_image(
"val_mel_predicted", plot_spectrogram_to_numpy(spec_predict), self.global_step, dataformats="HWC",
)
if self.pitch_conditioning:
_pitch_pred = pitch_pred[_rand_idx].data.cpu().numpy()
_pitch_target = pitch_target[_rand_idx].data.cpu().numpy()
self.tb_logger.add_image(
"val_pitch",
plot_multipitch_to_numpy(_pitch_target, _pitch_pred),
self.global_step,
dataformats="HWC",
)
_spec_len = spec_len[_rand_idx].data.cpu().item()
wav_vocoded = self.vocode_spectrogram(spec_target[_rand_idx].data.cpu().float().numpy()[:, :_spec_len])
self.tb_logger.add_audio("Real audio", wav_vocoded[0], self.global_step, 22050)
wav_vocoded = self.vocode_spectrogram(spec_predict[:, :_spec_len])
self.tb_logger.add_audio("Generated Audio", wav_vocoded[0], self.global_step, 22050)
self.log_train_images = True
def generate_wav(
self,
content_embedding,
speaker_embedding,
encoded_len=None,
pitch_contour=None,
compute_pitch=False,
compute_duration=False,
durs_gt=None,
dataset_id=0,
):
"""
Args:
content_embedding : Content embedding from SSL backbone (B, C, T)
speaker_embedding : Speaker embedding from SSL backbone (B, C)
pitch_contour : Normalized Pitch contour derived from the mel spectrogram
encoded_len: Length of each content embedding, optional if batch size is 1.
compute_pitch: if true, predict pitch contour from content and speaker embedding.
compute_duration: if true, predict duration from content and speaker embedding.
durs_gt: Ground truth duration of each content embedding, ignored if compute_duration is True.
dataset_id: Dataset id if training is conditioned on multiple datasets
Returns:
List of waveforms
"""
_bs, _, _n_time = content_embedding.size()
if encoded_len is None:
encoded_len = (torch.ones(_bs) * _n_time).long().to(self.device)
dataset_id = (torch.ones(_bs) * dataset_id).long().to(self.device)
enc_out = self.compute_encoding(content_embedding, speaker_embedding, dataset_id=dataset_id)
if self.use_encoder:
enc_out, _ = self.encoder(input=enc_out, seq_lens=encoded_len)
enc_mask = mask_from_lens(encoded_len)
if compute_duration:
durs = None
elif durs_gt is not None:
durs = durs_gt
else:
ssl_downsampling_factor = self._cfg.get("ssl_downsampling_factor", 4) # backward compatibility
durs = torch.ones_like(enc_mask) * ssl_downsampling_factor
enc_mask = enc_mask[:, :, None]
if pitch_contour is not None and compute_pitch == False:
if durs_gt is not None:
pitch = average_features(pitch_contour.unsqueeze(1), durs_gt).squeeze(1)
elif durs is not None:
pitch = average_features(pitch_contour.unsqueeze(1), durs).squeeze(1)
else:
raise ValueError("durs or durs_gt must be provided")
else:
pitch = None
mels_pred, *_ = self(enc_out=enc_out, enc_mask=enc_mask, durs=durs, pitch=pitch, pace=1.0)
wavs = []
for idx in range(_bs):
mel_pred = mels_pred[idx].data.cpu().float().numpy()
wav = self.vocode_spectrogram(mel_pred)
wavs.append(wav)
return wavs
def __setup_dataloader_from_config(self, cfg):
dataset = instantiate(cfg.dataset)
return torch.utils.data.DataLoader(dataset, collate_fn=dataset.pad_collate_fn, **cfg.dataloader_params)
def setup_training_data(self, cfg):
self._train_dl = self.__setup_dataloader_from_config(cfg)
def setup_validation_data(self, cfg):
self._validation_dl = self.__setup_dataloader_from_config(cfg)
def setup_test_data(self, cfg):
"""Omitted."""
pass
@classmethod
def list_available_models(cls) -> 'List[PretrainedModelInfo]':
return []
| NeMo-main | nemo/collections/tts/models/fastpitch_ssl.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import omegaconf
import torch
from hydra.utils import instantiate
from omegaconf import DictConfig, OmegaConf
from pytorch_lightning import Trainer
from pytorch_lightning.loggers import WandbLogger
from torch.cuda.amp import autocast
from torch.nn import functional as F
from nemo.collections.tts.data.dataset import DistributedBucketSampler
from nemo.collections.tts.losses.vits_losses import DiscriminatorLoss, FeatureMatchingLoss, GeneratorLoss, KlLoss
from nemo.collections.tts.models.base import TextToWaveform
from nemo.collections.tts.modules.vits_modules import MultiPeriodDiscriminator
from nemo.collections.tts.parts.utils.helpers import (
clip_grad_value_,
g2p_backward_compatible_support,
plot_spectrogram_to_numpy,
slice_segments,
)
from nemo.collections.tts.torch.tts_data_types import SpeakerID
from nemo.core.classes.common import PretrainedModelInfo, typecheck
from nemo.core.neural_types.elements import AudioSignal, FloatType, Index, IntType, TokenIndex
from nemo.core.neural_types.neural_type import NeuralType
from nemo.core.optim.lr_scheduler import CosineAnnealing
from nemo.utils import logging, model_utils
from nemo.utils.decorators.experimental import experimental
HAVE_WANDB = True
try:
import wandb
except ModuleNotFoundError:
HAVE_WANDB = False
@experimental
class VitsModel(TextToWaveform):
def __init__(self, cfg: DictConfig, trainer: 'Trainer' = None):
# Convert to Hydra 1.0 compatible DictConfig
cfg = model_utils.convert_model_config_to_dict_config(cfg)
cfg = model_utils.maybe_update_config_version(cfg)
# setup normalizer
self.normalizer = None
self.text_normalizer_call = None
self.text_normalizer_call_kwargs = {}
self._setup_normalizer(cfg)
# setup tokenizer
self.tokenizer = None
self._setup_tokenizer(cfg)
assert self.tokenizer is not None
num_tokens = len(self.tokenizer.tokens)
self.tokenizer_pad = self.tokenizer.pad
super().__init__(cfg=cfg, trainer=trainer)
self.audio_to_melspec_processor = instantiate(cfg.preprocessor, highfreq=cfg.train_ds.dataset.highfreq)
self.feat_matching_loss = FeatureMatchingLoss()
self.disc_loss = DiscriminatorLoss()
self.gen_loss = GeneratorLoss()
self.kl_loss = KlLoss()
self.net_g = instantiate(
cfg.synthesizer,
n_vocab=num_tokens,
spec_channels=cfg.n_fft // 2 + 1,
segment_size=cfg.segment_size // cfg.n_window_stride,
padding_idx=self.tokenizer_pad,
)
self.net_d = MultiPeriodDiscriminator(cfg.use_spectral_norm)
self.automatic_optimization = False
def _setup_normalizer(self, cfg):
if "text_normalizer" in cfg:
normalizer_kwargs = {}
if "whitelist" in cfg.text_normalizer:
normalizer_kwargs["whitelist"] = self.register_artifact(
'text_normalizer.whitelist', cfg.text_normalizer.whitelist
)
try:
import nemo_text_processing
self.normalizer = instantiate(cfg.text_normalizer, **normalizer_kwargs)
self.text_normalizer_call = self.normalizer.normalize
except Exception as e:
logging.error(e)
raise ImportError(
"`nemo_text_processing` not installed, see https://github.com/NVIDIA/NeMo-text-processing for more details"
)
if "text_normalizer_call_kwargs" in cfg:
self.text_normalizer_call_kwargs = cfg.text_normalizer_call_kwargs
def _setup_tokenizer(self, cfg):
text_tokenizer_kwargs = {}
if "g2p" in cfg.text_tokenizer and cfg.text_tokenizer.g2p is not None:
# for backward compatibility
if (
self._is_model_being_restored()
and (cfg.text_tokenizer.g2p.get('_target_', None) is not None)
and cfg.text_tokenizer.g2p["_target_"].startswith("nemo_text_processing.g2p")
):
cfg.text_tokenizer.g2p["_target_"] = g2p_backward_compatible_support(
cfg.text_tokenizer.g2p["_target_"]
)
g2p_kwargs = {}
if "phoneme_dict" in cfg.text_tokenizer.g2p:
g2p_kwargs["phoneme_dict"] = self.register_artifact(
'text_tokenizer.g2p.phoneme_dict', cfg.text_tokenizer.g2p.phoneme_dict,
)
if "heteronyms" in cfg.text_tokenizer.g2p:
g2p_kwargs["heteronyms"] = self.register_artifact(
'text_tokenizer.g2p.heteronyms', cfg.text_tokenizer.g2p.heteronyms,
)
text_tokenizer_kwargs["g2p"] = instantiate(cfg.text_tokenizer.g2p, **g2p_kwargs)
self.tokenizer = instantiate(cfg.text_tokenizer, **text_tokenizer_kwargs)
def parse(self, text: str, normalize=True) -> torch.tensor:
if self.training:
logging.warning("parse() is meant to be called in eval mode.")
if normalize and self.text_normalizer_call is not None:
text = self.text_normalizer_call(text, **self.text_normalizer_call_kwargs)
eval_phon_mode = contextlib.nullcontext()
if hasattr(self.tokenizer, "set_phone_prob"):
eval_phon_mode = self.tokenizer.set_phone_prob(prob=1.0)
with eval_phon_mode:
tokens = self.tokenizer.encode(text)
return torch.tensor(tokens).long().unsqueeze(0).to(self.device)
def configure_optimizers(self):
optim_config = self._cfg.optim.copy()
OmegaConf.set_struct(optim_config, False)
sched_config = optim_config.pop("sched", None)
OmegaConf.set_struct(optim_config, True)
optim_g = instantiate(optim_config, params=self.net_g.parameters(),)
optim_d = instantiate(optim_config, params=self.net_d.parameters(),)
if sched_config is not None:
if sched_config.name == 'ExponentialLR':
scheduler_d = torch.optim.lr_scheduler.ExponentialLR(optim_d, gamma=sched_config.lr_decay)
scheduler_g = torch.optim.lr_scheduler.ExponentialLR(optim_g, gamma=sched_config.lr_decay)
elif sched_config.name == 'CosineAnnealing':
scheduler_g = CosineAnnealing(
optimizer=optim_g, max_steps=sched_config.max_steps, min_lr=sched_config.min_lr,
)
scheduler_d = CosineAnnealing(
optimizer=optim_d, max_steps=sched_config.max_steps, min_lr=sched_config.min_lr,
)
else:
raise ValueError("Unknown optimizer.")
scheduler_g_dict = {'scheduler': scheduler_g, 'interval': 'step'}
scheduler_d_dict = {'scheduler': scheduler_d, 'interval': 'step'}
return [optim_g, optim_d], [scheduler_g_dict, scheduler_d_dict]
else:
return [optim_g, optim_d]
# for inference
@typecheck(
input_types={
"tokens": NeuralType(('B', 'T_text'), TokenIndex()),
"speakers": NeuralType(('B',), Index(), optional=True),
"noise_scale": NeuralType(('B',), FloatType(), optional=True),
"length_scale": NeuralType(('B',), FloatType(), optional=True),
"noise_scale_w": NeuralType(('B',), FloatType(), optional=True),
"max_len": NeuralType(('B',), IntType(), optional=True),
}
)
def forward(self, tokens, speakers=None, noise_scale=1, length_scale=1, noise_scale_w=1.0, max_len=1000):
text_len = torch.tensor([tokens.size(-1)]).to(int).to(tokens.device)
audio_pred, attn, y_mask, (z, z_p, m_p, logs_p) = self.net_g.infer(
tokens,
text_len,
speakers=speakers,
noise_scale=noise_scale,
length_scale=length_scale,
noise_scale_w=noise_scale_w,
max_len=max_len,
)
return audio_pred, attn, y_mask, (z, z_p, m_p, logs_p)
def training_step(self, batch, batch_idx):
speakers = None
if SpeakerID in self._train_dl.dataset.sup_data_types_set:
(audio, audio_len, text, text_len, speakers) = batch
else:
(audio, audio_len, text, text_len) = batch
spec, spec_lengths = self.audio_to_melspec_processor(audio, audio_len, linear_spec=True)
with autocast(enabled=True):
audio_pred, l_length, attn, ids_slice, text_mask, z_mask, (z, z_p, m_p, logs_p, m_q, logs_q) = self.net_g(
text, text_len, spec, spec_lengths, speakers
)
audio_pred = audio_pred.float()
audio_pred_mel, _ = self.audio_to_melspec_processor(audio_pred.squeeze(1), audio_len, linear_spec=False)
audio = slice_segments(audio.unsqueeze(1), ids_slice * self.cfg.n_window_stride, self._cfg.segment_size)
audio_mel, _ = self.audio_to_melspec_processor(audio.squeeze(1), audio_len, linear_spec=False)
with autocast(enabled=True):
y_d_hat_r, y_d_hat_g, _, _ = self.net_d(audio, audio_pred.detach())
with autocast(enabled=False):
loss_disc, losses_disc_r, losses_disc_g = self.disc_loss(
disc_real_outputs=y_d_hat_r, disc_generated_outputs=y_d_hat_g
)
loss_disc_all = loss_disc
# get optimizers
optim_g, optim_d = self.optimizers()
# train discriminator
optim_d.zero_grad()
self.manual_backward(loss_disc_all)
norm_d = clip_grad_value_(self.net_d.parameters(), None)
optim_d.step()
with autocast(enabled=True):
y_d_hat_r, y_d_hat_g, fmap_r, fmap_g = self.net_d(audio, audio_pred)
# Generator
with autocast(enabled=False):
loss_dur = torch.sum(l_length.float())
loss_mel = F.l1_loss(audio_mel, audio_pred_mel) * self._cfg.c_mel
loss_kl = self.kl_loss(z_p=z_p, logs_q=logs_q, m_p=m_p, logs_p=logs_p, z_mask=z_mask) * self._cfg.c_kl
loss_fm = self.feat_matching_loss(fmap_r=fmap_r, fmap_g=fmap_g)
loss_gen, losses_gen = self.gen_loss(disc_outputs=y_d_hat_g)
loss_gen_all = loss_gen + loss_fm + loss_mel + loss_dur + loss_kl
# train generator
optim_g.zero_grad()
self.manual_backward(loss_gen_all)
norm_g = clip_grad_value_(self.net_g.parameters(), None)
optim_g.step()
schedulers = self.lr_schedulers()
if schedulers is not None:
sch1, sch2 = schedulers
if (
self.trainer.is_last_batch
and isinstance(sch1, torch.optim.lr_scheduler.ExponentialLR)
or isinstance(sch1, CosineAnnealing)
):
sch1.step()
sch2.step()
metrics = {
"loss_gen": loss_gen,
"loss_fm": loss_fm,
"loss_mel": loss_mel,
"loss_dur": loss_dur,
"loss_kl": loss_kl,
"loss_gen_all": loss_gen_all,
"loss_disc_all": loss_disc_all,
"grad_gen": norm_g,
"grad_disc": norm_d,
}
for i, v in enumerate(losses_gen):
metrics[f"loss_gen_i_{i}"] = v
for i, v in enumerate(losses_disc_r):
metrics[f"loss_disc_r_{i}"] = v
for i, v in enumerate(losses_disc_g):
metrics[f"loss_disc_g_{i}"] = v
self.log_dict(metrics, on_step=True, sync_dist=True)
def validation_step(self, batch, batch_idx):
speakers = None
if self.cfg.n_speakers > 1:
(audio, audio_len, text, text_len, speakers) = batch
else:
(audio, audio_len, text, text_len) = batch
audio_pred, _, mask, *_ = self.net_g.infer(text, text_len, speakers, max_len=1000)
audio_pred = audio_pred.squeeze()
audio_pred_len = mask.sum([1, 2]).long() * self._cfg.validation_ds.dataset.hop_length
mel, mel_lengths = self.audio_to_melspec_processor(audio, audio_len)
audio_pred_mel, audio_pred_mel_len = self.audio_to_melspec_processor(audio_pred, audio_pred_len)
# plot audio once per epoch
if batch_idx == 0 and isinstance(self.logger, WandbLogger) and HAVE_WANDB:
logger = self.logger.experiment
specs = []
audios = []
specs += [
wandb.Image(
plot_spectrogram_to_numpy(mel[0, :, : mel_lengths[0]].data.cpu().numpy()),
caption=f"val_mel_target",
),
wandb.Image(
plot_spectrogram_to_numpy(audio_pred_mel[0, :, : audio_pred_mel_len[0]].data.cpu().numpy()),
caption=f"val_mel_predicted",
),
]
audios += [
wandb.Audio(
audio[0, : audio_len[0]].data.cpu().to(torch.float).numpy(),
caption=f"val_wav_target",
sample_rate=self._cfg.sample_rate,
),
wandb.Audio(
audio_pred[0, : audio_pred_len[0]].data.cpu().to(torch.float).numpy(),
caption=f"val_wav_predicted",
sample_rate=self._cfg.sample_rate,
),
]
logger.log({"specs": specs, "audios": audios})
def _loader(self, cfg):
try:
_ = cfg['dataset']['manifest_filepath']
except omegaconf.errors.MissingMandatoryValue:
logging.warning("manifest_filepath was skipped. No dataset for this model.")
return None
dataset = instantiate(
cfg.dataset,
text_normalizer=self.normalizer,
text_normalizer_call_kwargs=self.text_normalizer_call_kwargs,
text_tokenizer=self.tokenizer,
)
return torch.utils.data.DataLoader( # noqa
dataset=dataset, collate_fn=dataset.collate_fn, **cfg.dataloader_params,
)
def train_dataloader(self):
# default used by the Trainer
dataset = instantiate(
self.cfg.train_ds.dataset,
text_normalizer=self.normalizer,
text_normalizer_call_kwargs=self.text_normalizer_call_kwargs,
text_tokenizer=self.tokenizer,
)
train_sampler = DistributedBucketSampler(dataset, **self.cfg.train_ds.batch_sampler)
dataloader = torch.utils.data.DataLoader(
dataset, collate_fn=dataset.collate_fn, batch_sampler=train_sampler, **self.cfg.train_ds.dataloader_params,
)
return dataloader
def setup_training_data(self, cfg):
self._train_dl = self._loader(cfg)
def setup_validation_data(self, cfg):
self._validation_dl = self._loader(cfg)
def setup_test_data(self, cfg):
"""Omitted."""
pass
@classmethod
def list_available_models(cls) -> 'List[PretrainedModelInfo]':
list_of_models = []
model = PretrainedModelInfo(
pretrained_model_name="tts_en_lj_vits",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/tts_en_lj_vits/versions/1.13.0/files/vits_ljspeech_fp16_full.nemo",
description="This model is trained on LJSpeech audio sampled at 22050Hz. This model has been tested on generating female English "
"voices with an American accent.",
class_=cls,
)
list_of_models.append(model)
model = PretrainedModelInfo(
pretrained_model_name="tts_en_hifitts_vits",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/tts_en_hifitts_vits/versions/r1.15.0/files/vits_en_hifitts.nemo",
description="This model is trained on HiFITTS sampled at 44100Hz with and can be used to generate male and female English voices with an American accent.",
class_=cls,
)
list_of_models.append(model)
return list_of_models
@typecheck(
input_types={"tokens": NeuralType(('B', 'T_text'), TokenIndex(), optional=True),},
output_types={"audio": NeuralType(('B', 'T_audio'), AudioSignal())},
)
def convert_text_to_waveform(self, *, tokens, speakers=None):
audio = self(tokens=tokens, speakers=speakers)[0].squeeze(1)
return audio
| NeMo-main | nemo/collections/tts/models/vits.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from hydra.utils import instantiate
from omegaconf import DictConfig, open_dict
from pytorch_lightning.loggers import TensorBoardLogger
from nemo.collections.tts.losses.waveglowloss import WaveGlowLoss
from nemo.collections.tts.models.base import GlowVocoder
from nemo.collections.tts.parts.utils.helpers import OperationMode, waveglow_log_to_tb_func
from nemo.core.classes import Exportable
from nemo.core.classes.common import PretrainedModelInfo, typecheck
from nemo.core.neural_types.elements import (
AudioSignal,
LengthsType,
LogDeterminantType,
MelSpectrogramType,
NormalDistributionSamplesType,
VoidType,
)
from nemo.core.neural_types.neural_type import NeuralType
from nemo.utils import logging, model_utils
class WaveGlowModel(GlowVocoder, Exportable):
"""WaveGlow model (https://arxiv.org/abs/1811.00002) that is used to generate audio from mel spectrogram."""
def __init__(self, cfg: DictConfig, trainer: 'Trainer' = None):
# Convert to Hydra 1.0 compatible DictConfig
cfg = model_utils.convert_model_config_to_dict_config(cfg)
cfg = model_utils.maybe_update_config_version(cfg)
super().__init__(cfg=cfg, trainer=trainer)
self.sigma = self._cfg.sigma
self.audio_to_melspec_precessor = instantiate(self._cfg.preprocessor)
self.waveglow = instantiate(self._cfg.waveglow)
self.loss = WaveGlowLoss()
@GlowVocoder.mode.setter
def mode(self, new_mode):
if new_mode == OperationMode.training:
self.train()
else:
self.eval()
self._mode = new_mode
self.waveglow.mode = new_mode
@typecheck()
def forward(self, *, audio, audio_len, run_inverse=True):
if self.mode != self.waveglow.mode:
raise ValueError(
f"WaveGlowModel's mode {self.mode} does not match WaveGlowModule's mode {self.waveglow.mode}"
)
spec, spec_len = self.audio_to_melspec_precessor(audio, audio_len)
tensors = self.waveglow(spec=spec, audio=audio, run_inverse=run_inverse, sigma=self.sigma)
if self.mode == OperationMode.training:
return tensors[:-1] # z, log_s_list, log_det_W_list
elif self.mode == OperationMode.validation:
z, log_s_list, log_det_W_list, audio_pred = tensors
return z, log_s_list, log_det_W_list, audio_pred, spec, spec_len
return tensors # audio_pred
@typecheck(
input_types={
"spec": NeuralType(('B', 'D', 'T'), MelSpectrogramType()),
"sigma": NeuralType(optional=True),
"denoise": NeuralType(optional=True),
"denoiser_strength": NeuralType(optional=True),
},
output_types={"audio": NeuralType(('B', 'T'), AudioSignal())},
)
def convert_spectrogram_to_audio(
self, spec: torch.Tensor, sigma: float = 1.0, denoise: bool = True, denoiser_strength: float = 0.01
) -> torch.Tensor:
with self.nemo_infer():
self.waveglow.remove_weightnorm()
audio = self.waveglow(spec=spec.to(self.waveglow.upsample.weight.dtype), sigma=sigma)
if denoise:
audio = self.denoise(audio=audio, strength=denoiser_strength)
return audio
def training_step(self, batch, batch_idx):
self.mode = OperationMode.training
audio, audio_len = batch
z, log_s_list, log_det_W_list = self(audio=audio, audio_len=audio_len, run_inverse=False)
loss = self.loss(z=z, log_s_list=log_s_list, log_det_W_list=log_det_W_list, sigma=self.sigma)
output = {
'loss': loss,
'progress_bar': {'training_loss': loss},
'log': {'loss': loss},
}
return output
def validation_step(self, batch, batch_idx):
self.mode = OperationMode.validation
audio, audio_len = batch
z, log_s_list, log_det_W_list, audio_pred, spec, spec_len = self(
audio=audio, audio_len=audio_len, run_inverse=(batch_idx == 0)
)
loss = self.loss(z=z, log_s_list=log_s_list, log_det_W_list=log_det_W_list, sigma=self.sigma)
loss = {
"val_loss": loss,
"audio_pred": audio_pred,
"mel_target": spec,
"mel_len": spec_len,
}
self.validation_step_outputs.append(loss)
return loss
def on_validation_epoch_end(self):
if self.logger is not None and self.logger.experiment is not None:
tb_logger = self.logger.experiment
for logger in self.trainer.loggers:
if isinstance(logger, TensorBoardLogger):
tb_logger = logger.experiment
break
waveglow_log_to_tb_func(
tb_logger,
self.validation_step_outputs[0].values(),
self.global_step,
tag="eval",
mel_fb=self.audio_to_melspec_precessor.fb,
)
avg_loss = torch.stack([x['val_loss'] for x in self.validation_step_outputs]).mean()
self.log('val_loss', avg_loss)
self.validation_step_outputs.clear() # free memory
def __setup_dataloader_from_config(self, cfg, shuffle_should_be: bool = True, name: str = "train"):
if "dataset" not in cfg or not isinstance(cfg.dataset, DictConfig):
raise ValueError(f"No dataset for {name}")
if "dataloader_params" not in cfg or not isinstance(cfg.dataloader_params, DictConfig):
raise ValueError(f"No dataloder_params for {name}")
if shuffle_should_be:
if 'shuffle' not in cfg.dataloader_params:
logging.warning(
f"Shuffle should be set to True for {self}'s {name} dataloader but was not found in its "
"config. Manually setting to True"
)
with open_dict(cfg["dataloader_params"]):
cfg.dataloader_params.shuffle = True
elif not cfg.dataloader_params.shuffle:
logging.error(f"The {name} dataloader for {self} has shuffle set to False!!!")
elif not shuffle_should_be and cfg.dataloader_params.shuffle:
logging.error(f"The {name} dataloader for {self} has shuffle set to True!!!")
dataset = instantiate(cfg.dataset)
return torch.utils.data.DataLoader(dataset, collate_fn=dataset.collate_fn, **cfg.dataloader_params)
def setup_training_data(self, cfg):
self._train_dl = self.__setup_dataloader_from_config(cfg)
def setup_validation_data(self, cfg):
self._validation_dl = self.__setup_dataloader_from_config(cfg, shuffle_should_be=False, name="validation")
@classmethod
def list_available_models(cls) -> 'List[PretrainedModelInfo]':
"""
This method returns a list of pre-trained model which can be instantiated directly from NVIDIA's NGC cloud.
Returns:
List of available pre-trained models.
"""
list_of_models = []
model = PretrainedModelInfo(
pretrained_model_name="tts_en_waveglow_88m",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/tts_waveglow_88m/versions/1.0.0/files/tts_waveglow.nemo",
description="This model is trained on LJSpeech sampled at 22050Hz, and has been tested on generating female English voices with an American accent and Mandarin voices.",
class_=cls,
aliases=["WaveGlow-22050Hz", "tts_waveglow"],
)
list_of_models.append(model)
return list_of_models
def load_state_dict(self, state_dict, strict=True):
# Remove convinv.inv_conv weights since they are not initialized until forward is called during training
# and can be computed from convinv.conv.weight
# Ideally, we should remove this during saving instead of ignoring during loading
for i in range(self._cfg.waveglow.n_flows):
if f"waveglow.convinv.{i}.inv_conv.weight" in state_dict:
del state_dict[f"waveglow.convinv.{i}.inv_conv.weight"]
super().load_state_dict(state_dict, strict=strict)
# Methods for model exportability
@property
def input_module(self):
return self.waveglow
@property
def output_module(self):
return self.waveglow
def _prepare_for_export(self, **kwargs):
self.update_bias_spect()
self.waveglow._prepare_for_export(**kwargs)
@property
def input_types(self):
return {
"audio": NeuralType(('B', 'T'), AudioSignal()),
"audio_len": NeuralType(('B'), LengthsType()),
"run_inverse": NeuralType(optional=True),
}
@property
def output_types(self):
if self.mode == OperationMode.training or self.mode == OperationMode.validation:
output_dict = {
"pred_normal_dist": NeuralType(('B', 'flowgroup', 'T'), NormalDistributionSamplesType()),
"log_s_list": [NeuralType(('B', 'flowgroup', 'T'), VoidType())], # TODO: Figure out a good typing
"log_det_W_list": [NeuralType(elements_type=LogDeterminantType())],
}
if self.mode == OperationMode.validation:
output_dict["audio_pred"] = NeuralType(('B', 'T'), AudioSignal())
output_dict["spec"] = NeuralType(('B', 'T', 'D'), MelSpectrogramType())
output_dict["spec_len"] = NeuralType(('B'), LengthsType())
return output_dict
return {
"audio_pred": NeuralType(('B', 'T'), AudioSignal()),
}
def forward_for_export(self, spec, z=None):
return self.waveglow(spec, z)
| NeMo-main | nemo/collections/tts/models/waveglow.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# MIT License
#
# Copyright (c) 2020 Phil Wang
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# The following is largely based on code from https://github.com/lucidrains/stylegan2-pytorch
from random import random, randrange
from typing import List, Optional
import torch
import torch.nn.functional as F
import torchvision
from einops import rearrange
from hydra.utils import instantiate
from omegaconf import DictConfig
from pytorch_lightning import Trainer
from pytorch_lightning.loggers import TensorBoardLogger, WandbLogger
from torch.utils.tensorboard.writer import SummaryWriter
from nemo.collections.tts.losses.spectrogram_enhancer_losses import (
ConsistencyLoss,
GeneratorLoss,
GradientPenaltyLoss,
HingeLoss,
)
from nemo.collections.tts.parts.utils.helpers import mask_sequence_tensor, to_device_recursive
from nemo.core import Exportable, ModelPT, PretrainedModelInfo, typecheck
from nemo.core.neural_types import LengthsType, MelSpectrogramType, NeuralType
from nemo.core.neural_types.elements import BoolType
from nemo.utils import logging
class SpectrogramEnhancerModel(ModelPT, Exportable):
"""
GAN-based model to add details to blurry spectrograms from TTS models like Tacotron or FastPitch. Based on StyleGAN 2 [1]
[1] Karras et. al. - Analyzing and Improving the Image Quality of StyleGAN (https://arxiv.org/abs/1912.04958)
"""
def __init__(self, cfg: DictConfig, trainer: Trainer = None) -> None:
self.spectrogram_model = None
super().__init__(cfg=cfg, trainer=trainer)
self.generator = instantiate(cfg.generator)
self.discriminator = instantiate(cfg.discriminator)
self.generator_loss = GeneratorLoss()
self.discriminator_loss = HingeLoss()
self.consistency_loss = ConsistencyLoss(cfg.consistency_loss_weight)
self.gradient_penalty_loss = GradientPenaltyLoss(cfg.gradient_penalty_loss_weight)
def move_to_correct_device(self, e):
return to_device_recursive(e, next(iter(self.generator.parameters())).device)
def normalize_spectrograms(self, spectrogram: torch.Tensor, lengths: torch.Tensor) -> torch.Tensor:
spectrogram = spectrogram - self._cfg.spectrogram_min_value
spectrogram = spectrogram / (self._cfg.spectrogram_max_value - self._cfg.spectrogram_min_value)
return mask_sequence_tensor(spectrogram, lengths)
def unnormalize_spectrograms(self, spectrogram: torch.Tensor, lengths: torch.Tensor) -> torch.Tensor:
spectrogram = spectrogram * (self._cfg.spectrogram_max_value - self._cfg.spectrogram_min_value)
spectrogram = spectrogram + self._cfg.spectrogram_min_value
return mask_sequence_tensor(spectrogram, lengths)
def generate_zs(self, batch_size: int = 1, mixing: bool = False):
if mixing and self._cfg.mixed_prob < random():
mixing_point = randrange(1, self.generator.num_layers)
first_part = [torch.randn(batch_size, self._cfg.latent_dim)] * mixing_point
second_part = [torch.randn(batch_size, self._cfg.latent_dim)] * (self.generator.num_layers - mixing_point)
zs = [*first_part, *second_part]
else:
zs = [torch.randn(batch_size, self._cfg.latent_dim)] * self.generator.num_layers
return self.move_to_correct_device(zs)
def generate_noise(self, batch_size: int = 1) -> torch.Tensor:
noise = torch.rand(batch_size, self._cfg.n_bands, 4096, 1)
return self.move_to_correct_device(noise)
def pad_spectrograms(self, spectrograms):
multiplier = self.generator.upsample_factor
*_, max_length = spectrograms.shape
return F.pad(spectrograms, (0, multiplier - max_length % multiplier))
@typecheck(
input_types={
"input_spectrograms": NeuralType(("B", "D", "T_spec"), MelSpectrogramType()),
"lengths": NeuralType(("B",), LengthsType()),
"mixing": NeuralType(None, BoolType(), optional=True),
"normalize": NeuralType(None, BoolType(), optional=True),
}
)
def forward(
self, *, input_spectrograms: torch.Tensor, lengths: torch.Tensor, mixing: bool = False, normalize: bool = True,
):
"""
Generator forward pass. Noise inputs will be generated.
input_spectrograms: batch of spectrograms, typically synthetic
lengths: length for every spectrogam in the batch
mixing: style mixing, usually True during training
normalize: normalize spectrogram range to ~[0, 1], True for normal use
returns: batch of enhanced spectrograms
For explanation of style mixing refer to [1]
[1] Karras et. al. - A Style-Based Generator Architecture for Generative Adversarial Networks, 2018 (https://arxiv.org/abs/1812.04948)
"""
return self.forward_with_custom_noise(
input_spectrograms=input_spectrograms,
lengths=lengths,
mixing=mixing,
normalize=normalize,
zs=None,
ws=None,
noise=None,
)
def forward_with_custom_noise(
self,
input_spectrograms: torch.Tensor,
lengths: torch.Tensor,
zs: Optional[List[torch.Tensor]] = None,
ws: Optional[List[torch.Tensor]] = None,
noise: Optional[torch.Tensor] = None,
mixing: bool = False,
normalize: bool = True,
):
"""
Generator forward pass. Noise inputs will be generated if None.
input_spectrograms: batch of spectrograms, typically synthetic
lenghts: length for every spectrogam in the batch
zs: latent noise inputs on the unit sphere (either this or ws or neither)
ws: latent noise inputs in the style space (either this or zs or neither)
noise: per-pixel indepentent gaussian noise
mixing: style mixing, usually True during training
normalize: normalize spectrogram range to ~[0, 1], True for normal use
returns: batch of enhanced spectrograms
For explanation of style mixing refer to [1]
For definititions of z, w [2]
[1] Karras et. al. - A Style-Based Generator Architecture for Generative Adversarial Networks, 2018 (https://arxiv.org/abs/1812.04948)
[2] Karras et. al. - Analyzing and Improving the Image Quality of StyleGAN, 2019 (https://arxiv.org/abs/1912.04958)
"""
batch_size, *_, max_length = input_spectrograms.shape
# generate noise
if zs is not None and ws is not None:
raise ValueError(
"Please specify either zs or ws or neither, but not both. It is not clear which one to use."
)
if zs is None:
zs = self.generate_zs(batch_size, mixing)
if ws is None:
ws = [self.generator.style_mapping(z) for z in zs]
if noise is None:
noise = self.generate_noise(batch_size)
input_spectrograms = rearrange(input_spectrograms, "b c l -> b 1 c l")
# normalize if needed, mask and pad appropriately
if normalize:
input_spectrograms = self.normalize_spectrograms(input_spectrograms, lengths)
input_spectrograms = self.pad_spectrograms(input_spectrograms)
# the main call
enhanced_spectrograms = self.generator(input_spectrograms, lengths, ws, noise)
# denormalize if needed, mask and remove padding
if normalize:
enhanced_spectrograms = self.unnormalize_spectrograms(enhanced_spectrograms, lengths)
enhanced_spectrograms = enhanced_spectrograms[:, :, :, :max_length]
enhanced_spectrograms = rearrange(enhanced_spectrograms, "b 1 c l -> b c l")
return enhanced_spectrograms
def training_step(self, batch, batch_idx, optimizer_idx):
input_spectrograms, target_spectrograms, lengths = batch
with torch.no_grad():
input_spectrograms = self.normalize_spectrograms(input_spectrograms, lengths)
target_spectrograms = self.normalize_spectrograms(target_spectrograms, lengths)
# train discriminator
if optimizer_idx == 0:
enhanced_spectrograms = self.forward(
input_spectrograms=input_spectrograms, lengths=lengths, mixing=True, normalize=False
)
enhanced_spectrograms = rearrange(enhanced_spectrograms, "b c l -> b 1 c l")
fake_logits = self.discriminator(enhanced_spectrograms, input_spectrograms, lengths)
target_spectrograms_ = rearrange(target_spectrograms, "b c l -> b 1 c l").requires_grad_()
real_logits = self.discriminator(target_spectrograms_, input_spectrograms, lengths)
d_loss = self.discriminator_loss(real_logits, fake_logits)
self.log("d_loss", d_loss, prog_bar=True)
if batch_idx % self._cfg.gradient_penalty_loss_every_n_steps == 0:
gp_loss = self.gradient_penalty_loss(target_spectrograms_, real_logits)
self.log("d_loss_gp", gp_loss, prog_bar=True)
return d_loss + gp_loss
return d_loss
# train generator
if optimizer_idx == 1:
enhanced_spectrograms = self.forward(
input_spectrograms=input_spectrograms, lengths=lengths, mixing=True, normalize=False
)
input_spectrograms = rearrange(input_spectrograms, "b c l -> b 1 c l")
enhanced_spectrograms = rearrange(enhanced_spectrograms, "b c l -> b 1 c l")
fake_logits = self.discriminator(enhanced_spectrograms, input_spectrograms, lengths)
g_loss = self.generator_loss(fake_logits)
c_loss = self.consistency_loss(input_spectrograms, enhanced_spectrograms, lengths)
self.log("g_loss", g_loss, prog_bar=True)
self.log("c_loss", c_loss, prog_bar=True)
with torch.no_grad():
target_spectrograms = rearrange(target_spectrograms, "b c l -> b 1 c l")
self.log_illustration(target_spectrograms, input_spectrograms, enhanced_spectrograms, lengths)
return g_loss + c_loss
def configure_optimizers(self):
generator_opt = instantiate(self._cfg.generator_opt, params=self.generator.parameters(),)
discriminator_opt = instantiate(self._cfg.discriminator_opt, params=self.discriminator.parameters())
return [discriminator_opt, generator_opt], []
def setup_training_data(self, train_data_config):
dataset = instantiate(train_data_config.dataset)
self._train_dl = torch.utils.data.DataLoader(
dataset, collate_fn=dataset.collate_fn, **train_data_config.dataloader_params
)
def setup_validation_data(self, val_data_config):
"""
There is no validation step for this model.
It is not clear whether any of used losses is a sensible metric for choosing between two models.
This might change in the future.
"""
pass
@classmethod
def list_available_models(cls):
list_of_models = []
# en, multi speaker, LibriTTS, 16000 Hz
# stft 25ms 10ms matching ASR params
# for use during Enhlish ASR training/adaptation
model = PretrainedModelInfo(
pretrained_model_name="tts_en_spectrogram_enhancer_for_asr_finetuning",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/tts_en_fastpitch_spectrogram_enhancer_for_asr_finetuning/versions/1.20.0/files/tts_en_spectrogram_enhancer_for_asr_finetuning.nemo",
description="This model is trained to add details to synthetic spectrograms."
" It was trained on pairs of real-synthesized spectrograms generated by FastPitch."
" STFT parameters follow ASR with 25 ms window and 10 ms hop."
" It is supposed to be used in conjunction with that model for ASR training/adaptation.",
class_=cls,
)
list_of_models.append(model)
return list_of_models
def log_illustration(self, target_spectrograms, input_spectrograms, enhanced_spectrograms, lengths):
if self.global_rank != 0:
return
if not self.loggers:
return
step = self.trainer.global_step // 2 # because of G/D training
if step % self.trainer.log_every_n_steps != 0:
return
idx = 0
length = int(lengths.flatten()[idx].item())
tensor = torch.stack(
[
enhanced_spectrograms - input_spectrograms,
input_spectrograms,
enhanced_spectrograms,
target_spectrograms,
],
dim=0,
).cpu()[:, idx, :, :, :length]
grid = torchvision.utils.make_grid(tensor, nrow=1).clamp(0.0, 1.0)
for logger in self.loggers:
if isinstance(logger, TensorBoardLogger):
writer: SummaryWriter = logger.experiment
writer.add_image("spectrograms", grid, global_step=step)
writer.flush()
elif isinstance(logger, WandbLogger):
logger.log_image("spectrograms", [grid], caption=["residual, input, output, ground truth"], step=step)
else:
logging.warning("Unsupported logger type: %s", str(type(logger)))
| NeMo-main | nemo/collections/tts/models/spectrogram_enhancer.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
from pathlib import Path
from typing import List, Tuple
import torch
import torch.nn.functional as F
from einops import rearrange
from hydra.utils import instantiate
from omegaconf import DictConfig, OmegaConf
from pytorch_lightning import Trainer
from nemo.collections.tts.losses.audio_codec_loss import (
MultiResolutionMelLoss,
RelativeFeatureMatchingLoss,
TimeDomainLoss,
)
from nemo.collections.tts.modules.common import GaussianDropout
from nemo.collections.tts.parts.utils.callbacks import LoggingCallback
from nemo.collections.tts.parts.utils.helpers import get_batch_size, get_num_workers
from nemo.core import ModelPT
from nemo.core.classes.common import PretrainedModelInfo, typecheck
from nemo.core.neural_types.elements import AudioSignal, EncodedRepresentation, LengthsType, TokenIndex
from nemo.core.neural_types.neural_type import NeuralType
from nemo.core.optim.lr_scheduler import compute_max_steps, prepare_lr_scheduler
from nemo.utils import logging, model_utils
from nemo.utils.decorators import experimental
@experimental
class AudioCodecModel(ModelPT):
def __init__(self, cfg: DictConfig, trainer: Trainer = None):
# Convert to Hydra 1.0 compatible DictConfig
cfg = model_utils.convert_model_config_to_dict_config(cfg)
cfg = model_utils.maybe_update_config_version(cfg)
super().__init__(cfg=cfg, trainer=trainer)
# Expected sample rate for the input audio
self.sample_rate = cfg.sample_rate
# Number of samples in each audio frame that is encoded
self.samples_per_frame = cfg.samples_per_frame
# Discriminator updates
self.disc_updates_per_period = cfg.get("disc_updates_per_period", 1)
self.disc_update_period = cfg.get("disc_update_period", 1)
if self.disc_updates_per_period > self.disc_update_period:
raise ValueError(
f'Number of discriminator updates ({self.disc_updates_per_period}) per period must be less or equal to the configured period ({self.disc_update_period})'
)
# Encoder setup
self.audio_encoder = instantiate(cfg.audio_encoder)
# Optionally, add gaussian noise to encoder output as an information bottleneck
encoder_noise_stdev = cfg.get("encoder_noise_stdev", 0.0)
if encoder_noise_stdev:
self.encoder_noise = GaussianDropout(stdev=encoder_noise_stdev)
else:
self.encoder_noise = None
if "vector_quantizer" in cfg:
self.vector_quantizer = instantiate(cfg.vector_quantizer)
else:
logging.warning('Vector quantizer will not be used.')
self.vector_quantizer = None
# Decoder setup
self.audio_decoder = instantiate(cfg.audio_decoder)
# Discriminator setup
self.discriminator = instantiate(cfg.discriminator)
# Loss setup
mel_loss_dim = cfg.get("mel_loss_dim", 64)
mel_loss_resolutions = cfg.mel_loss_resolutions
self.time_domain_loss_scale = cfg.get("time_domain_loss_scale", 1.0)
self.mel_loss_scale = cfg.get("mel_loss_scale", 1.0)
mel_loss_l1_scale = cfg.get("mel_loss_l1_scale", 1.0)
self.gen_loss_scale = cfg.get("gen_loss_scale", 1.0)
self.feature_loss_scale = cfg.get("feature_loss_scale", 1.0)
self.time_domain_loss_fn = TimeDomainLoss()
self.mel_loss_fn = MultiResolutionMelLoss(
sample_rate=self.sample_rate,
mel_dim=mel_loss_dim,
resolutions=mel_loss_resolutions,
l1_scale=mel_loss_l1_scale,
)
self.gen_loss_fn = instantiate(cfg.generator_loss)
self.disc_loss_fn = instantiate(cfg.discriminator_loss)
self.feature_loss_fn = RelativeFeatureMatchingLoss()
# Log setup
self.log_config = cfg.get("log_config", None)
# Optimizer setup
self.lr_schedule_interval = None
self.automatic_optimization = False
@typecheck(
input_types={
"audio": NeuralType(('B', 'T_audio'), AudioSignal()),
"audio_len": NeuralType(tuple('B'), LengthsType()),
},
output_types={
"encoded": NeuralType(('B', 'D', 'T_encoded'), EncodedRepresentation()),
"encoded_len": NeuralType(tuple('B'), LengthsType()),
},
)
def encode_audio(self, audio: torch.Tensor, audio_len: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""Apply encoder on the input audio signal. Input will be padded with zeros so
the last frame has full `self.samples_per_frame` samples.
Args:
audio: input time-domain signal
audio_len: valid length for each example in the batch
Returns:
Encoder output `encoded` and its length in number of frames `encoded_len`
"""
audio, audio_len = self.pad_audio(audio, audio_len)
encoded, encoded_len = self.audio_encoder(audio=audio, audio_len=audio_len)
return encoded, encoded_len
@typecheck(
input_types={
"inputs": NeuralType(('B', 'D', 'T_encoded'), EncodedRepresentation()),
"input_len": NeuralType(tuple('B'), LengthsType()),
},
output_types={
"audio": NeuralType(('B', 'T_audio'), AudioSignal()),
"audio_len": NeuralType(tuple('B'), LengthsType()),
},
)
def decode_audio(self, inputs: torch.Tensor, input_len: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""Apply decoder on the input encoded representation. Note that the input is a
non-quantized or dequantized representation.
Args:
inputs: encoded signal
input_len: valid length for each example in the batch
Returns:
Decoded output `audio` in the time domain and its length in number of samples `audio_len`.
Note that `audio_len` will be a multiple of `self.samples_per_frame`.
"""
audio, audio_len = self.audio_decoder(inputs=inputs, input_len=input_len)
return audio, audio_len
@typecheck(
input_types={
"encoded": NeuralType(('B', 'D', 'T_encoded'), EncodedRepresentation()),
"encoded_len": NeuralType(tuple('B'), LengthsType()),
},
output_types={"tokens": NeuralType(('B', 'C', 'T_encoded'), TokenIndex())},
)
def quantize(self, encoded: torch.Tensor, encoded_len: torch.Tensor) -> torch.Tensor:
"""Quantize the continuous encoded representation into a discrete
representation for each frame.
Args:
encoded: encoded signal representation
encoded_len: valid length of the encoded representation in frames
Returns:
A tensor of tokens for each codebook for each frame.
"""
if not self.vector_quantizer:
raise ValueError("Cannot quantize without quantizer")
# vector quantizer is returning [C, B, T], where C is the number of codebooks
tokens = self.vector_quantizer.encode(inputs=encoded, input_len=encoded_len)
# use batch first for the output
tokens = rearrange(tokens, 'C B T -> B C T')
return tokens
@typecheck(
input_types={
"tokens": NeuralType(('B', 'C', 'T_encoded'), TokenIndex()),
"tokens_len": NeuralType(tuple('B'), LengthsType()),
},
output_types={"dequantized": NeuralType(('B', 'D', 'T_encoded'), EncodedRepresentation()),},
)
def dequantize(self, tokens: torch.Tensor, tokens_len: torch.Tensor) -> torch.Tensor:
"""Convert the discrete input tokens into a continuous encoded representation.
Args:
tokens: discrete tokens for each codebook for each time frame
tokens_len: valid length of each example in the batch
Returns:
Continuous encoded representation of the discrete input representation.
"""
if not self.vector_quantizer:
raise ValueError("Cannot dequantize without quantizer")
# vector quantizer is using [C, B, T], where C is the number of codebooks
tokens = rearrange(tokens, 'B C T -> C B T')
dequantized = self.vector_quantizer.decode(indices=tokens, input_len=tokens_len)
return dequantized
@typecheck(
input_types={
"audio": NeuralType(('B', 'T_audio'), AudioSignal()),
"audio_len": NeuralType(tuple('B'), LengthsType()),
},
output_types={
"tokens": NeuralType(('B', 'C', 'T_encoded'), TokenIndex()),
"tokens_len": NeuralType(tuple('B'), LengthsType()),
},
)
def encode(self, audio: torch.Tensor, audio_len: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""Convert input time-domain audio signal into a discrete representation (tokens).
Args:
audio: input time-domain signal, shape (batch, number of samples)
audio_len: valid length for each example in the batch, shape (batch size,)
Returns:
Tokens for each codebook for each frame, shape (batch, number of codebooks, number of frames),
and the corresponding valid lengths, shape (batch,)
"""
# Apply encoder to obtain a continuous vector for each frame
encoded, encoded_len = self.encode_audio(audio=audio, audio_len=audio_len)
# Apply quantizer to obtain discrete representation per frame
tokens = self.quantize(encoded=encoded, encoded_len=encoded_len)
return tokens, encoded_len
@typecheck(
input_types={
"tokens": NeuralType(('B', 'C', 'T_encoded'), TokenIndex()),
"tokens_len": NeuralType(tuple('B'), LengthsType()),
},
output_types={
"audio": NeuralType(('B', 'T_audio'), AudioSignal()),
"audio_len": NeuralType(tuple('B'), LengthsType()),
},
)
def decode(self, tokens: torch.Tensor, tokens_len: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""Convert discrete input tokens into a continuous time-domain signal.
Args:
tokens: discrete tokens for each codebook for each time frame, shape (batch, number of codebooks, number of frames)
tokens_len: valid lengths, shape (batch,)
Returns:
Decoded output `audio` in the time domain and its length in number of samples `audio_len`.
Note that `audio_len` will be a multiple of `self.samples_per_frame`.
"""
# Convert a discrete representation to a dequantized vector for each frame
dequantized = self.dequantize(tokens=tokens, tokens_len=tokens_len)
# Apply decoder to obtain time-domain audio for each frame
audio, audio_len = self.decode_audio(inputs=dequantized, input_len=tokens_len)
return audio, audio_len
@typecheck(
input_types={
"audio": NeuralType(('B', 'T_audio'), AudioSignal()),
"audio_len": NeuralType(tuple('B'), LengthsType()),
},
output_types={
"output_audio": NeuralType(('B', 'T_audio'), EncodedRepresentation()),
"output_audio_len": NeuralType(tuple('B'), LengthsType()),
},
)
def forward(self, audio: torch.Tensor, audio_len: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""Apply encoder, quantizer, decoder on the input time-domain signal.
Args:
audio: input time-domain signal
audio_len: valid length for each example in the batch
Returns:
Reconstructed time-domain signal `output_audio` and its length in number of samples `output_audio_len`.
"""
encoded, encoded_len = self.encode_audio(audio=audio, audio_len=audio_len)
if self.vector_quantizer:
# quantize to discrete tokens
tokens = self.quantize(encoded=encoded, encoded_len=encoded_len)
# decode tokens to audio
output_audio, output_audio_len = self.decode(tokens=tokens, tokens_len=encoded_len)
else:
# no quantization, directly decode to audio
output_audio, output_audio_len = self.decode_audio(inputs=encoded, input_len=encoded_len)
return output_audio, output_audio_len
def pad_audio(self, audio, audio_len):
"""Zero pad the end of the audio so that we do not have a partial end frame.
The output will be zero-padded to have an integer number of frames of
length `self.samples_per_frame`.
Args:
audio: input time-domain signal
audio_len: valid length for each example in the batch
Returns:
Padded time-domain signal `padded_audio` and its length `padded_len`.
"""
padded_len = self.samples_per_frame * torch.ceil(audio_len / self.samples_per_frame).int()
max_len = padded_len.max().item()
num_padding = max_len - audio.shape[1]
padded_audio = F.pad(audio, (0, num_padding))
return padded_audio, padded_len
def _process_batch(self, batch):
# [B, T_audio]
audio = batch.get("audio")
# [B]
audio_len = batch.get("audio_lens")
audio, audio_len = self.pad_audio(audio, audio_len)
# [B, D, T_encoded]
encoded, encoded_len = self.audio_encoder(audio=audio, audio_len=audio_len)
if self.encoder_noise is not None:
encoded = self.encoder_noise(encoded)
if self.vector_quantizer:
encoded, _, commit_loss = self.vector_quantizer(inputs=encoded, input_len=encoded_len)
else:
commit_loss = None
# [B, T]
audio_gen, audio_gen_len = self.audio_decoder(inputs=encoded, input_len=encoded_len)
return audio, audio_len, audio_gen, commit_loss
@property
def disc_update_prob(self) -> float:
"""Probability of updating the discriminator.
"""
return self.disc_updates_per_period / self.disc_update_period
def should_update_disc(self, batch_idx) -> bool:
"""Decide whether to update the descriminator based
on the batch index and configured discriminator update period.
"""
disc_update_step = batch_idx % self.disc_update_period
return disc_update_step < self.disc_updates_per_period
def training_step(self, batch, batch_idx):
optim_gen, optim_disc = self.optimizers()
audio, audio_len, audio_gen, commit_loss = self._process_batch(batch)
if self.should_update_disc(batch_idx):
# Train discriminator
disc_scores_real, disc_scores_gen, _, _ = self.discriminator(
audio_real=audio, audio_gen=audio_gen.detach()
)
loss_disc = self.disc_loss_fn(disc_scores_real=disc_scores_real, disc_scores_gen=disc_scores_gen)
train_disc_loss = loss_disc
optim_disc.zero_grad()
self.manual_backward(train_disc_loss)
optim_disc.step()
else:
loss_disc = None
loss_time_domain = self.time_domain_loss_fn(audio_real=audio, audio_gen=audio_gen, audio_len=audio_len)
train_loss_time_domain = self.time_domain_loss_scale * loss_time_domain
loss_mel = self.mel_loss_fn(audio_real=audio, audio_gen=audio_gen, audio_len=audio_len)
train_loss_mel = self.mel_loss_scale * loss_mel
_, disc_scores_gen, fmaps_real, fmaps_gen = self.discriminator(audio_real=audio, audio_gen=audio_gen)
loss_gen = self.gen_loss_fn(disc_scores_gen=disc_scores_gen)
train_loss_gen = self.gen_loss_scale * loss_gen
loss_feature = self.feature_loss_fn(fmaps_real=fmaps_real, fmaps_gen=fmaps_gen)
train_loss_feature = self.feature_loss_scale * loss_feature
loss_gen_all = train_loss_time_domain + train_loss_mel + train_loss_gen + train_loss_feature
if commit_loss is not None:
loss_gen_all += commit_loss
optim_gen.zero_grad()
self.manual_backward(loss_gen_all)
optim_gen.step()
self.update_lr()
metrics = {
"g_loss_time_domain": loss_time_domain,
"g_loss_mel": loss_mel,
"g_loss_gen": loss_gen,
"g_loss_feature": loss_feature,
"g_loss": loss_gen_all,
"global_step": self.global_step,
"lr": optim_gen.param_groups[0]['lr'],
}
if loss_disc is not None:
metrics["d_loss"] = loss_disc
if commit_loss is not None:
metrics["g_loss_commit"] = commit_loss
self.log_dict(metrics, on_step=True, sync_dist=True)
self.log("t_loss", train_loss_mel, prog_bar=True, logger=False, sync_dist=True)
def on_train_epoch_end(self):
self.update_lr("epoch")
def validation_step(self, batch, batch_idx):
audio, audio_len, audio_gen, _ = self._process_batch(batch)
loss_time_domain = self.time_domain_loss_fn(audio_real=audio, audio_gen=audio_gen, audio_len=audio_len)
loss_mel = self.mel_loss_fn(audio_real=audio, audio_gen=audio_gen, audio_len=audio_len)
metrics = {
"val_loss": loss_time_domain + loss_mel,
"val_loss_time_domain": loss_time_domain,
"val_loss_mel": loss_mel,
}
self.log_dict(metrics, on_epoch=True, sync_dist=True)
@staticmethod
def _setup_train_dataloader(cfg):
dataset = instantiate(cfg.dataset)
sampler = dataset.get_sampler(cfg.dataloader_params.batch_size)
data_loader = torch.utils.data.DataLoader(
dataset, collate_fn=dataset.collate_fn, sampler=sampler, **cfg.dataloader_params
)
return data_loader
@staticmethod
def _setup_test_dataloader(cfg):
dataset = instantiate(cfg.dataset)
data_loader = torch.utils.data.DataLoader(dataset, collate_fn=dataset.collate_fn, **cfg.dataloader_params)
return data_loader
def setup_training_data(self, cfg):
self._train_dl = self._setup_train_dataloader(cfg)
def setup_validation_data(self, cfg):
self._validation_dl = self._setup_test_dataloader(cfg)
def setup_test_data(self, cfg):
pass
@property
def max_steps(self):
if "max_steps" in self._cfg:
return self._cfg.get("max_steps")
if "max_epochs" not in self._cfg:
raise ValueError("Must specify 'max_steps' or 'max_epochs'.")
if "steps_per_epoch" in self._cfg:
return self._cfg.max_epochs * self._cfg.steps_per_epoch
return compute_max_steps(
max_epochs=self._cfg.max_epochs,
accumulate_grad_batches=self.trainer.accumulate_grad_batches,
limit_train_batches=self.trainer.limit_train_batches,
num_workers=get_num_workers(self.trainer),
num_samples=len(self._train_dl.dataset),
batch_size=get_batch_size(self._train_dl),
drop_last=self._train_dl.drop_last,
)
def configure_optimizers(self):
optim_config = self._cfg.optim.copy()
OmegaConf.set_struct(optim_config, False)
sched_config = optim_config.pop("sched", None)
OmegaConf.set_struct(optim_config, True)
gen_params = itertools.chain(self.audio_encoder.parameters(), self.audio_decoder.parameters())
disc_params = self.discriminator.parameters()
optim_g = instantiate(optim_config, params=gen_params)
optim_d = instantiate(optim_config, params=disc_params)
if sched_config is None:
logging.debug('Scheduler is not used')
return [optim_g, optim_d]
logging.debug('Setting up schedulers')
OmegaConf.set_struct(sched_config, False)
sched_config["max_steps"] = self.max_steps
OmegaConf.set_struct(sched_config, True)
scheduler_g = prepare_lr_scheduler(
optimizer=optim_g, scheduler_config=sched_config, train_dataloader=self._train_dl
)
scheduler_d = prepare_lr_scheduler(
optimizer=optim_d, scheduler_config=sched_config, train_dataloader=self._train_dl
)
self.lr_schedule_interval = scheduler_g["interval"]
return [optim_g, optim_d], [scheduler_g, scheduler_d]
def update_lr(self, interval="step"):
schedulers = self.lr_schedulers()
if schedulers is not None and self.lr_schedule_interval == interval:
sch1, sch2 = schedulers
sch1.step()
sch2.step()
def configure_callbacks(self):
if not self.log_config:
return []
data_loader = self._setup_test_dataloader(self.log_config)
generators = instantiate(self.log_config.generators)
log_dir = Path(self.log_config.log_dir) if self.log_config.log_dir else None
log_callback = LoggingCallback(
generators=generators,
data_loader=data_loader,
log_epochs=self.log_config.log_epochs,
epoch_frequency=self.log_config.epoch_frequency,
output_dir=log_dir,
loggers=self.trainer.loggers,
log_tensorboard=self.log_config.log_tensorboard,
log_wandb=self.log_config.log_wandb,
)
return [log_callback]
@classmethod
def list_available_models(cls) -> List[PretrainedModelInfo]:
return []
| NeMo-main | nemo/collections/tts/models/audio_codec.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from abc import ABC, abstractmethod
from contextlib import ExitStack, contextmanager
from typing import List, Optional
import torch
from omegaconf import DictConfig
from tqdm import tqdm
from nemo.collections.tts.parts.utils.helpers import OperationMode
from nemo.core.classes import ModelPT
from nemo.core.classes.common import PretrainedModelInfo, typecheck
from nemo.core.neural_types.elements import AudioSignal
from nemo.core.neural_types.neural_type import NeuralType
from nemo.utils import logging, model_utils
class SpectrogramGenerator(ModelPT, ABC):
""" Base class for all TTS models that turn text into a spectrogram """
@abstractmethod
def parse(self, str_input: str, **kwargs) -> 'torch.tensor':
"""
A helper function that accepts raw python strings and turns them into a tensor. The tensor should have 2
dimensions. The first is the batch, which should be of size 1. The second should represent time. The tensor
should represent either tokenized or embedded text, depending on the model.
Note that some models have `normalize` parameter in this function which will apply normalizer if it is available.
"""
@abstractmethod
def generate_spectrogram(self, tokens: 'torch.tensor', **kwargs) -> 'torch.tensor':
"""
Accepts a batch of text or text_tokens and returns a batch of spectrograms
Args:
tokens: A torch tensor representing the text to be generated
Returns:
spectrograms
"""
@classmethod
def list_available_models(cls) -> 'List[PretrainedModelInfo]':
"""
This method returns a list of pre-trained model which can be instantiated directly from NVIDIA's NGC cloud.
Returns:
List of available pre-trained models.
"""
list_of_models = []
for subclass in cls.__subclasses__():
subclass_models = subclass.list_available_models()
if subclass_models is not None and len(subclass_models) > 0:
list_of_models.extend(subclass_models)
return list_of_models
def set_export_config(self, args):
for k in ['enable_volume', 'enable_ragged_batches']:
if k in args:
self.export_config[k] = bool(args[k])
args.pop(k)
if 'num_speakers' in args:
self.export_config['num_speakers'] = int(args['num_speakers'])
args.pop('num_speakers')
if 'emb_range' in args:
raise Exception('embedding range is not user-settable')
super().set_export_config(args)
class Vocoder(ModelPT, ABC):
"""
A base class for models that convert spectrograms to audios. Note that this class takes as input either linear
or mel spectrograms.
"""
@abstractmethod
def convert_spectrogram_to_audio(self, spec: 'torch.tensor', **kwargs) -> 'torch.tensor':
"""
Accepts a batch of spectrograms and returns a batch of audio.
Args:
spec: ['B', 'n_freqs', 'T'], A torch tensor representing the spectrograms to be vocoded.
Returns:
audio
"""
@classmethod
def list_available_models(cls) -> 'List[PretrainedModelInfo]':
"""
This method returns a list of pre-trained model which can be instantiated directly from NVIDIA's NGC cloud.
Returns:
List of available pre-trained models.
"""
list_of_models = []
for subclass in cls.__subclasses__():
subclass_models = subclass.list_available_models()
if subclass_models is not None and len(subclass_models) > 0:
list_of_models.extend(subclass_models)
return list_of_models
class GlowVocoder(Vocoder):
""" Base class for all Vocoders that use a Glow or reversible Flow-based setup. All child class are expected
to have a parameter called audio_to_melspec_precessor that is an instance of
nemo.collections.asr.parts.FilterbankFeatures"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._mode = OperationMode.infer
self.stft = None
self.istft = None
self.n_mel = None
self.bias_spect = None
@property
def mode(self):
return self._mode
@contextmanager
def temp_mode(self, mode):
old_mode = self.mode
self.mode = mode
try:
yield
finally:
self.mode = old_mode
@contextmanager
def nemo_infer(self): # Prepend with nemo to avoid any .infer() clashes with lightning or pytorch
with ExitStack() as stack:
stack.enter_context(self.temp_mode(OperationMode.infer))
stack.enter_context(torch.no_grad())
yield
def check_children_attributes(self):
if self.stft is None:
try:
n_fft = self.audio_to_melspec_precessor.n_fft
hop_length = self.audio_to_melspec_precessor.hop_length
win_length = self.audio_to_melspec_precessor.win_length
window = self.audio_to_melspec_precessor.window.to(self.device)
except AttributeError as e:
raise AttributeError(
f"{self} could not find a valid audio_to_melspec_precessor. GlowVocoder requires child class "
"to have audio_to_melspec_precessor defined to obtain stft parameters. "
"audio_to_melspec_precessor requires n_fft, hop_length, win_length, window, and nfilt to be "
"defined."
) from e
def yet_another_patch(audio, n_fft, hop_length, win_length, window):
spec = torch.stft(
audio,
n_fft=n_fft,
hop_length=hop_length,
win_length=win_length,
window=window,
return_complex=True,
)
spec = torch.view_as_real(spec)
return torch.sqrt(spec.pow(2).sum(-1)), torch.atan2(spec[..., -1], spec[..., 0])
self.stft = lambda x: yet_another_patch(
x, n_fft=n_fft, hop_length=hop_length, win_length=win_length, window=window,
)
self.istft = lambda x, y: torch.istft(
torch.complex(x * torch.cos(y), x * torch.sin(y)),
n_fft=n_fft,
hop_length=hop_length,
win_length=win_length,
window=window,
)
if self.n_mel is None:
try:
self.n_mel = self.audio_to_melspec_precessor.nfilt
except AttributeError as e:
raise AttributeError(
f"{self} could not find a valid audio_to_melspec_precessor. GlowVocoder requires child class to "
"have audio_to_melspec_precessor defined to obtain stft parameters. audio_to_melspec_precessor "
"requires nfilt to be defined."
) from e
def update_bias_spect(self):
self.check_children_attributes() # Ensure stft parameters are defined
with self.nemo_infer():
spect = torch.zeros((1, self.n_mel, 88)).to(self.device)
bias_audio = self.convert_spectrogram_to_audio(spec=spect, sigma=0.0, denoise=False)
bias_spect, _ = self.stft(bias_audio)
self.bias_spect = bias_spect[..., 0][..., None]
@typecheck(
input_types={"audio": NeuralType(('B', 'T'), AudioSignal()), "strength": NeuralType(optional=True)},
output_types={"audio": NeuralType(('B', 'T'), AudioSignal())},
)
def denoise(self, audio: 'torch.tensor', strength: float = 0.01):
self.check_children_attributes() # Ensure self.n_mel and self.stft are defined
if self.bias_spect is None:
self.update_bias_spect()
audio_spect, audio_angles = self.stft(audio)
audio_spect_denoised = audio_spect - self.bias_spect.to(audio.device) * strength
audio_spect_denoised = torch.clamp(audio_spect_denoised, 0.0)
audio_denoised = self.istft(audio_spect_denoised, audio_angles)
return audio_denoised
class MelToSpec(ModelPT, ABC):
"""
A base class for models that convert mel spectrograms to linear (magnitude) spectrograms
"""
@abstractmethod
def convert_mel_spectrogram_to_linear(self, mel: 'torch.tensor', **kwargs) -> 'torch.tensor':
"""
Accepts a batch of spectrograms and returns a batch of linear spectrograms
Args:
mel: A torch tensor representing the mel spectrograms ['B', 'mel_freqs', 'T']
Returns:
spec: A torch tensor representing the linear spectrograms ['B', 'n_freqs', 'T']
"""
@classmethod
def list_available_models(cls) -> 'List[PretrainedModelInfo]':
"""
This method returns a list of pre-trained model which can be instantiated directly from NVIDIA's NGC cloud.
Returns:
List of available pre-trained models.
"""
list_of_models = []
for subclass in cls.__subclasses__():
subclass_models = subclass.list_available_models()
if subclass_models is not None and len(subclass_models) > 0:
list_of_models.extend(subclass_models)
return list_of_models
class TextToWaveform(ModelPT, ABC):
""" Base class for all end-to-end TTS models that generate a waveform from text """
@abstractmethod
def parse(self, str_input: str, **kwargs) -> 'torch.tensor':
"""
A helper function that accepts a raw python string and turns it into a tensor. The tensor should have 2
dimensions. The first is the batch, which should be of size 1. The second should represent time. The tensor
should represent either tokenized or embedded text, depending on the model.
"""
@abstractmethod
def convert_text_to_waveform(self, *, tokens: 'torch.tensor', **kwargs) -> 'List[torch.tensor]':
"""
Accepts a batch of text and returns a list containing a batch of audio
Args:
tokens: A torch tensor representing the text to be converted to speech
Returns:
audio: A list of length batch_size containing torch tensors representing the waveform output
"""
@classmethod
def list_available_models(cls) -> 'List[PretrainedModelInfo]':
"""
This method returns a list of pre-trained model which can be instantiated directly from NVIDIA's NGC cloud.
Returns:
List of available pre-trained models.
"""
list_of_models = []
for subclass in cls.__subclasses__():
subclass_models = subclass.list_available_models()
if subclass_models is not None and len(subclass_models) > 0:
list_of_models.extend(subclass_models)
return list_of_models
class G2PModel(ModelPT, ABC):
@torch.no_grad()
def convert_graphemes_to_phonemes(
self,
manifest_filepath: str,
output_manifest_filepath: str,
grapheme_field: str = "text_graphemes",
batch_size: int = 32,
num_workers: int = 0,
pred_field: Optional[str] = "pred_text",
) -> List[str]:
"""
Main function for Inference. Converts grapheme entries from the manifest "graheme_field" to phonemes
Args:
manifest_filepath: Path to .json manifest file
output_manifest_filepath: Path to .json manifest file to save predictions, will be saved in "target_field"
grapheme_field: name of the field in manifest_filepath for input grapheme text
pred_field: name of the field in the output_file to save predictions
batch_size: int = 32 # Batch size to use for inference
num_workers: int = 0 # Number of workers to use for DataLoader during inference
Returns: Predictions generated by the model
"""
config = {
"manifest_filepath": manifest_filepath,
"grapheme_field": grapheme_field,
"drop_last": False,
"shuffle": False,
"batch_size": batch_size,
"num_workers": num_workers,
}
all_preds = self._infer(DictConfig(config))
with open(manifest_filepath, "r") as f_in:
with open(output_manifest_filepath, 'w', encoding="utf-8") as f_out:
for i, line in tqdm(enumerate(f_in)):
line = json.loads(line)
line[pred_field] = all_preds[i]
f_out.write(json.dumps(line, ensure_ascii=False) + "\n")
logging.info(f"Predictions saved to {output_manifest_filepath}.")
return all_preds
@classmethod
def list_available_models(cls) -> 'List[PretrainedModelInfo]':
"""
This method returns a list of pre-trained model which can be instantiated directly from NVIDIA's NGC cloud.
Returns:
List of available pre-trained models.
"""
# recursively walk the subclasses to generate pretrained model info
list_of_models = model_utils.resolve_subclass_pretrained_model_info(cls)
return list_of_models
| NeMo-main | nemo/collections/tts/models/base.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-main | nemo/collections/tts/parts/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo.collections.tts.parts.mixins.fastpitch_adapter_mixins import FastPitchAdapterModelMixin
| NeMo-main | nemo/collections/tts/parts/mixins/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Optional
from omegaconf import DictConfig, open_dict
from nemo.core.classes.mixins.adapter_mixins import AdapterModelPTMixin, AdapterModuleMixin
from nemo.utils import logging, logging_mode
class FastPitchAdapterModelMixin(AdapterModelPTMixin):
""" FastPitch Adapter Mixin that can augment any Encoder module with Adapter module support.
This mixin class should be used only with a top level ModelPT subclass, that includes an `encoder` submodule.
This mixin class adds several utility methods which are propagated to the `encoder`.
An Adapter module is any Pytorch nn.Module that possess a few properties :
- It's input and output dimension are the same, while the hidden dimension need not be the same.
- The final layer of the Adapter module is zero-initialized, so that the residual connection to the adapter
yields the original output.
This mixin adds the following instance variables to the class this inherits it:
- `adapter_layer`: A torch.nn.ModuleDict(), whose keys are the names of the adapter (globally unique),
and values are the Adapter nn.Module().
- `adapter_cfg`: A OmegaConf DictConfig object that holds the config of the adapters that are initialized.
- `adapter_global_cfg_key`: A str representing a key in the model config that can be provided by the user.
The value resolves to `global_cfg`, and can be overridden via `model.cfg.adapters.global_cfg.*`.
**Note**: This module **is** responsible for maintaining its config. At the ModelPT level, it will access and
write Adapter config information to `self.cfg.adapters`.
"""
def setup_adapters(self):
"""
Utility method that is called in the ASR ModelPT-implementation constructor, so as to restore any
adapters that were previously added.
This method should be called just once at constructor time.
"""
supports_adapters = False
# At least the encoder must extend AdapterModuleMixin
if hasattr(self.fastpitch, 'encoder') and isinstance(self.fastpitch.encoder, AdapterModuleMixin):
supports_adapters |= True
if hasattr(self.fastpitch, 'decoder') and isinstance(self.fastpitch.decoder, AdapterModuleMixin):
supports_adapters |= True
if hasattr(self.fastpitch, 'duration_predictor') and isinstance(
self.fastpitch.duration_predictor, AdapterModuleMixin
):
supports_adapters |= True
if hasattr(self.fastpitch, 'pitch_predictor') and isinstance(
self.fastpitch.pitch_predictor, AdapterModuleMixin
):
supports_adapters |= True
if hasattr(self.fastpitch, 'aligner') and isinstance(self.fastpitch.aligner, AdapterModuleMixin):
supports_adapters |= True
# If adapters are supported, setup the adapter config + any modules (pre-existing adapter modules)
if supports_adapters:
super().setup_adapters()
def add_adapter(self, name: str, cfg: DictConfig):
"""
Add an Adapter module to this model.
Args:
name: A globally unique name for the adapter. Will be used to access, enable and disable adapters.
cfg: A DictConfig that contains at the bare minimum `__target__` to instantiate a new Adapter module.
"""
# setup the config for adapters
super().add_adapter(name=name, cfg=cfg)
# Resolve module name and adapter name
module_name, _ = self.resolve_adapter_module_name_(name)
# Use + as a splitter, in order to share one name across multiple modules
if '+' in module_name:
module_names = module_name.split('+')
else:
module_names = [module_name]
with open_dict(self.cfg):
for module_name in module_names:
# Check if encoder adapters should be added
if module_name == 'encoder':
# Dispatch the call to the encoder.
self.fastpitch.encoder.add_adapter(name=name, cfg=cfg)
# Check if decoder adapters should be added
if module_name in ('', 'decoder'):
# Dispatch call to the decoder. (default use decoder)
self.fastpitch.decoder.add_adapter(name=name, cfg=cfg)
# Check if duration_predictor adapters should be added
if module_name in ('', 'duration_predictor'):
# Dispatch call to the duration_predictor. (default use duration_predictor)
self.fastpitch.duration_predictor.add_adapter(name=name, cfg=cfg)
# Check if pitch_predictor adapters should be added
if module_name in ('', 'pitch_predictor'):
# Dispatch call to the pitch_predictor. (default use pitch_predictor)
self.fastpitch.pitch_predictor.add_adapter(name=name, cfg=cfg)
# Check if aligner adapters should be added
if module_name in ('', 'aligner'):
# Dispatch call to the aligner. (default use aligner)
self.fastpitch.aligner.add_adapter(name=name, cfg=cfg)
def is_adapter_available(self) -> bool:
"""
Checks if any Adapter module has been instantiated.
Returns:
bool, determining if any Adapter module has been instantiated. Returns true even if the adapters are
enabled or disabled, false only if no adapters exist.
"""
config_contains_adapter = super().is_adapter_available()
# Forward the method call to the individual modules
if hasattr(self.fastpitch, 'encoder') and isinstance(self.fastpitch.encoder, AdapterModuleMixin):
config_contains_adapter |= self.fastpitch.encoder.is_adapter_available()
if hasattr(self.fastpitch, 'decoder') and isinstance(self.fastpitch.decoder, AdapterModuleMixin):
config_contains_adapter |= self.fastpitch.decoder.is_adapter_available()
if hasattr(self.fastpitch, 'duration_predictor') and isinstance(
self.fastpitch.duration_predictor, AdapterModuleMixin
):
config_contains_adapter |= self.fastpitch.duration_predictor.is_adapter_available()
if hasattr(self.fastpitch, 'pitch_predictor') and isinstance(
self.fastpitch.pitch_predictor, AdapterModuleMixin
):
config_contains_adapter |= self.fastpitch.pitch_predictor.is_adapter_available()
if hasattr(self.fastpitch, 'aligner') and isinstance(self.fastpitch.aligner, AdapterModuleMixin):
config_contains_adapter |= self.fastpitch.aligner.is_adapter_available()
return config_contains_adapter
def set_enabled_adapters(self, name: Optional[str] = None, enabled: bool = True):
"""
Updated the internal adapter config, determining if an adapter (or all adapters) are either
enabled or disabled.
A common user pattern would be to disable all adapters (either after adding them, or restoring a model
with pre-existing adapters) and then simply enable one of the adapters.
.. code::
model.set_enabled_adapters(enabled=False)
model.set_enabled_adapters(name=<some adapter name>, enabled=True)
Args:
name: Optional str. If a str name is given, the config will be updated to the value of `enabled`.
If no name is given, then all adapters will be enabled/disabled.
enabled: Bool, determines if the adapter(s) will be enabled/disabled.
"""
super().set_enabled_adapters(name=name, enabled=enabled)
# Resolve the module name and adapter name
if name is not None:
module_name, _ = self.resolve_adapter_module_name_(name)
else:
module_name = None
# Use + as a splitter, in order to share one name across multiple modules
if module_name is not None and '+' in module_name:
module_names = module_name.split('+')
else:
module_names = [module_name]
for module_name in module_names:
# Check if encoder adapters should be used
# Dispatch the call to the encoder.
if name is None or module_name == 'encoder':
if self.fastpitch.encoder.is_adapter_available():
self.fastpitch.encoder.set_enabled_adapters(name=name, enabled=enabled)
# Dispatch the call to the decoder.
if name is None or module_name in ('', 'decoder'):
if self.fastpitch.decoder.is_adapter_available():
self.fastpitch.decoder.set_enabled_adapters(name=name, enabled=enabled)
# Dispatch the call to the duration_predictor.
if name is None or module_name in ('', 'duration_predictor'):
if self.fastpitch.duration_predictor.is_adapter_available():
self.fastpitch.duration_predictor.set_enabled_adapters(name=name, enabled=enabled)
# Dispatch the call to the pitch_predictor.
if name is None or module_name in ('', 'pitch_predictor'):
if self.fastpitch.pitch_predictor.is_adapter_available():
self.fastpitch.pitch_predictor.set_enabled_adapters(name=name, enabled=enabled)
# Dispatch the call to the aligner.
if name is None or module_name in ('', 'aligner'):
if self.fastpitch.aligner.is_adapter_available():
self.fastpitch.aligner.set_enabled_adapters(name=name, enabled=enabled)
def get_enabled_adapters(self) -> List[str]:
"""
Returns a list of all enabled adapters.
Returns:
A list of str names of each enabled adapter(s).
"""
enabled_adapters = super().get_enabled_adapters()
# Check if encoder adapters should be used or are enabled
if hasattr(self.fastpitch, 'encoder') and isinstance(self.fastpitch.encoder, AdapterModuleMixin):
enabled_adapters.extend(self.fastpitch.encoder.get_enabled_adapters())
if hasattr(self.fastpitch, 'decoder') and isinstance(self.fastpitch.decoder, AdapterModuleMixin):
enabled_adapters.extend(self.fastpitch.decoder.get_enabled_adapters())
if hasattr(self.fastpitch, 'duration_predictor') and isinstance(
self.fastpitch.duration_predictor, AdapterModuleMixin
):
enabled_adapters.extend(self.fastpitch.duration_predictor.get_enabled_adapters())
if hasattr(self.fastpitch, 'pitch_predictor') and isinstance(
self.fastpitch.pitch_predictor, AdapterModuleMixin
):
enabled_adapters.extend(self.fastpitch.pitch_predictor.get_enabled_adapters())
if hasattr(self.fastpitch, 'aligner') and isinstance(self.fastpitch.aligner, AdapterModuleMixin):
enabled_adapters.extend(self.fastpitch.aligner.get_enabled_adapters())
enabled_adapters = list(sorted(list(set(enabled_adapters))))
return enabled_adapters
def check_valid_model_with_adapter_support_(self):
"""
Utility method to test if the subclass of this mixin is an appropriate subclass of ModelPT itself.
"""
# Obtain the global adapter config if possible, otherwise use sensible defaults.
global_cfg = self._get_global_cfg()
# Test whether the encoder supports adapters
use_encoder_adapter = global_cfg.get('check_encoder_adapter', False)
if use_encoder_adapter:
if not hasattr(self.fastpitch, 'encoder'):
logging.warning(
"Cannot add adapter to this object as it does not have an `fastpitch.encoder` sub-module!",
mode=logging_mode.ONCE,
)
if hasattr(self.fastpitch, 'encoder') and not isinstance(self.fastpitch.encoder, AdapterModuleMixin):
logging.warning(
f'{self.fastpitch.encoder.__class__.__name__} does not implement `AdapterModuleMixin`',
mode=logging_mode.ONCE,
)
# Test whether the decoder supports adapters
use_decoder_adapter = global_cfg.get('check_decoder_adapter', True)
if use_decoder_adapter:
if not hasattr(self.fastpitch, 'decoder'):
logging.warning(
"Cannot add adapter to this object as it does not have an `fastpitch.decoder` sub-module!",
mode=logging_mode.ONCE,
)
if hasattr(self.fastpitch, 'decoder') and not isinstance(self.fastpitch.decoder, AdapterModuleMixin):
logging.warning(
f'{self.fastpitch.decoder.__class__.__name__} does not implement `AdapterModuleMixin`',
mode=logging_mode.ONCE,
)
# Test whether the duration_predictor supports adapters
use_duration_predictor_adapter = global_cfg.get('check_duration_predictor_adapter', True)
if use_duration_predictor_adapter:
if not hasattr(self.fastpitch, 'duration_predictor'):
logging.warning(
"Cannot add adapter to this object as it does not have an `fastpitch.duration_predictor` sub-module!",
mode=logging_mode.ONCE,
)
if hasattr(self.fastpitch, 'duration_predictor') and not isinstance(
self.fastpitch.duration_predictor, AdapterModuleMixin
):
logging.warning(
f'{self.fastpitch.duration_predictor.__class__.__name__} does not implement `AdapterModuleMixin`',
mode=logging_mode.ONCE,
)
# Test whether the pitch_predictor supports adapters
use_pitch_predictor_adapter = global_cfg.get('check_pitch_predictor_adapter', True)
if use_pitch_predictor_adapter:
if not hasattr(self.fastpitch, 'pitch_predictor'):
logging.warning(
"Cannot add adapter to this object as it does not have an `fastpitch.pitch_predictor` sub-module!",
mode=logging_mode.ONCE,
)
if hasattr(self.fastpitch, 'pitch_predictor') and not isinstance(
self.fastpitch.pitch_predictor, AdapterModuleMixin
):
logging.warning(
f'{self.fastpitch.pitch_predictor.__class__.__name__} does not implement `AdapterModuleMixin`',
mode=logging_mode.ONCE,
)
# Test whether the aligner supports adapters
use_aligner_adapter = global_cfg.get('check_aligner_adapter', True)
if use_aligner_adapter:
if not hasattr(self.fastpitch, 'aligner'):
logging.warning(
"Cannot add adapter to this object as it does not have an `fastpitch.aligner` sub-module!",
mode=logging_mode.ONCE,
)
if hasattr(self.fastpitch, 'aligner') and not isinstance(self.fastpitch.aligner, AdapterModuleMixin):
logging.warning(
f'{self.fastpitch.aligner.__class__.__name__} does not implement `AdapterModuleMixin`',
mode=logging_mode.ONCE,
)
def resolve_adapter_module_name_(self, name: str) -> (str, str):
"""
Utility method to resolve a given global/module adapter name to its components.
Always returns a tuple representing (module_name, adapter_name). ":" is used as the
delimiter for denoting the module name vs the adapter name.
Will attempt to also resolve a given adapter_name alone back to (module_name, adapter_name)
if the metadata config exists for access.
Args:
name: A global adapter, or a module adapter name (with structure module_name:adapter_name).
Returns:
A tuple representing (module_name, adapter_name). If a global adapter is provided,
module_name is set to ''.
"""
module_name, adapter_name = super().resolve_adapter_module_name_(name)
# Use + as a splitter, in order to share one name across multiple modules
if '+' in module_name:
module_names = module_name.split('+')
else:
module_names = [module_name]
# resolve name and module only for valid modules
valid_module_names = self.adapter_module_names
for mod_name in module_names:
if mod_name not in valid_module_names:
raise ValueError(f"Provided module name `{mod_name}` is not in valid list : {valid_module_names}")
return (module_name, adapter_name)
def _get_global_cfg(self):
"""
Utility method, to either extract or construct the global config inside adapters config.
"""
global_config = DictConfig({})
if 'adapters' in self.cfg and self.adapter_global_cfg_key in self.cfg.adapters:
global_config = self.adapter_cfg[self.adapter_global_cfg_key]
return global_config
@property
def adapter_module_names(self) -> List[str]:
module_names = super().adapter_module_names # "Default" adapter module: ''
module_names.extend(
['encoder', 'decoder', 'duration_predictor', 'pitch_predictor', 'aligner']
) # Add support for `encoder` and `decoder` modules
return module_names
| NeMo-main | nemo/collections/tts/parts/mixins/fastpitch_adapter_mixins.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import os
from pathlib import Path
from typing import Any, Dict, List, Tuple
import numpy as np
import torch
from einops import rearrange
from scipy import ndimage
from torch.special import gammaln
def get_abs_rel_paths(input_path: Path, base_path: Path) -> Tuple[Path, Path]:
"""
Get the absolute and relative paths of input file path.
Args:
input_path: An absolute or relative path.
base_path: base directory the input is relative to.
Returns:
The absolute and relative paths of the file.
"""
if os.path.isabs(input_path):
abs_path = input_path
rel_path = input_path.relative_to(base_path)
else:
rel_path = input_path
abs_path = base_path / rel_path
return abs_path, rel_path
def get_audio_filepaths(manifest_entry: Dict[str, Any], audio_dir: Path) -> Tuple[Path, Path]:
"""
Get the absolute and relative paths of audio from a manifest entry.
Args:
manifest_entry: Manifest entry dictionary.
audio_dir: base directory where audio is stored.
Returns:
The absolute and relative paths of the audio.
"""
audio_filepath = Path(manifest_entry["audio_filepath"])
audio_filepath_abs, audio_filepath_rel = get_abs_rel_paths(input_path=audio_filepath, base_path=audio_dir)
return audio_filepath_abs, audio_filepath_rel
def normalize_volume(audio: np.array, volume_level: float) -> np.array:
"""Apply peak normalization to the input audio.
"""
if not (0.0 <= volume_level <= 1.0):
raise ValueError(f"Volume must be in range [0.0, 1.0], received {volume_level}")
if audio.size == 0:
return audio
max_sample = np.max(np.abs(audio))
if max_sample == 0:
return audio
return volume_level * (audio / np.max(np.abs(audio)))
class BetaBinomialInterpolator:
"""
This module calculates alignment prior matrices (based on beta-binomial distribution) using cached popular sizes and image interpolation.
The implementation is taken from https://github.com/NVIDIA/DeepLearningExamples/blob/master/PyTorch/SpeechSynthesis/FastPitch/fastpitch/data_function.py
"""
def __init__(self, round_mel_len_to=50, round_text_len_to=10, cache_size=500):
self.round_mel_len_to = round_mel_len_to
self.round_text_len_to = round_text_len_to
self.bank = functools.lru_cache(maxsize=cache_size)(beta_binomial_prior_distribution)
@staticmethod
def round(val, to):
return max(1, int(np.round((val + 1) / to))) * to
def __call__(self, w, h):
bw = BetaBinomialInterpolator.round(w, to=self.round_mel_len_to)
bh = BetaBinomialInterpolator.round(h, to=self.round_text_len_to)
ret = ndimage.zoom(self.bank(bw, bh).T, zoom=(w / bw, h / bh), order=1)
assert ret.shape[0] == w, ret.shape
assert ret.shape[1] == h, ret.shape
return ret
def general_padding(item, item_len, max_len, pad_value=0):
if item_len < max_len:
item = torch.nn.functional.pad(item, (0, max_len - item_len), value=pad_value)
return item
def stack_tensors(tensors: List[torch.Tensor], max_lens: List[int], pad_value: float = 0.0) -> torch.Tensor:
"""
Create batch by stacking input tensor list along the time axes.
Args:
tensors: List of tensors to pad and stack
max_lens: List of lengths to pad each axis to, starting with the last axis
pad_value: Value for padding
Returns:
Padded and stacked tensor.
"""
padded_tensors = []
for tensor in tensors:
padding = []
for i, max_len in enumerate(max_lens, 1):
padding += [0, max_len - tensor.shape[-i]]
padded_tensor = torch.nn.functional.pad(tensor, pad=padding, value=pad_value)
padded_tensors.append(padded_tensor)
stacked_tensor = torch.stack(padded_tensors)
return stacked_tensor
def logbeta(x, y):
return gammaln(x) + gammaln(y) - gammaln(x + y)
def logcombinations(n, k):
return gammaln(n + 1) - gammaln(k + 1) - gammaln(n - k + 1)
def logbetabinom(n, a, b, x):
return logcombinations(n, x) + logbeta(x + a, n - x + b) - logbeta(a, b)
def beta_binomial_prior_distribution(phoneme_count: int, mel_count: int, scaling_factor: float = 1.0) -> np.array:
x = rearrange(torch.arange(0, phoneme_count), "b -> 1 b")
y = rearrange(torch.arange(1, mel_count + 1), "b -> b 1")
a = scaling_factor * y
b = scaling_factor * (mel_count + 1 - y)
n = torch.FloatTensor([phoneme_count - 1])
return logbetabinom(n, a, b, x).exp().numpy()
def get_base_dir(paths):
def is_relative_to(path1, path2):
try:
path1.relative_to(path2)
return True
except ValueError:
return False
def common_path(path1, path2):
while path1 is not None:
if is_relative_to(path2, path1):
return path1
path1 = path1.parent if path1 != path1.parent else None
return None
base_dir = None
for p in paths:
audio_dir = Path(p).parent
if base_dir is None:
base_dir = audio_dir
continue
base_dir = common_path(base_dir, audio_dir)
return base_dir
def filter_dataset_by_duration(entries: List[Dict[str, Any]], min_duration: float, max_duration: float):
"""
Filter out manifest entries based on duration.
Args:
entries: List of manifest entry dictionaries.
min_duration: Minimum duration below which entries are removed.
max_duration: Maximum duration above which entries are removed.
Returns:
filtered_entries: List of manifest entries after filtering.
total_hours: Total duration of original dataset, in hours
filtered_hours: Total duration of dataset after filtering, in hours
"""
filtered_entries = []
total_duration = 0.0
filtered_duration = 0.0
for entry in entries:
duration = entry["duration"]
total_duration += duration
if (min_duration and duration < min_duration) or (max_duration and duration > max_duration):
continue
filtered_duration += duration
filtered_entries.append(entry)
total_hours = total_duration / 3600.0
filtered_hours = filtered_duration / 3600.0
return filtered_entries, total_hours, filtered_hours
def get_weighted_sampler(
sample_weights: List[float], batch_size: int, num_steps: int
) -> torch.utils.data.WeightedRandomSampler:
"""
Create pytorch sampler for doing weighted random sampling.
Args:
sample_weights: List of sampling weights for all elements in the dataset.
batch_size: Batch size to sample.
num_steps: Number of steps to be considered an epoch.
Returns:
Pytorch sampler
"""
weights = torch.tensor(sample_weights, dtype=torch.float64)
num_samples = batch_size * num_steps
sampler = torch.utils.data.WeightedRandomSampler(weights=weights, num_samples=num_samples)
return sampler
| NeMo-main | nemo/collections/tts/parts/utils/tts_dataset_utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-main | nemo/collections/tts/parts/utils/__init__.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# The MIT License (MIT)
# Copyright (c) 2020, nicolas deutschmann
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import numpy as np
import torch
import torch.nn.functional as F
def piecewise_linear_transform(x, q_tilde, compute_jacobian=True, outlier_passthru=True):
"""Apply an element-wise piecewise-linear transformation to some variables
Args:
x : torch.Tensor
a tensor with shape (N,k) where N is the batch dimension while k is the dimension of the variable space. This variable span the k-dimensional unit
hypercube
q_tilde: torch.Tensor
is a tensor with shape (N,k,b) where b is the number of bins.
This contains the un-normalized heights of the bins of the piecewise-constant PDF for dimension k,
i.e. q_tilde lives in all of R and we don't impose a constraint on their sum yet.
Normalization is imposed in this function using softmax.
compute_jacobian : bool, optional
determines whether the jacobian should be compute or None is returned
Returns:
tuple of torch.Tensor
pair `(y,h)`.
- `y` is a tensor with shape (N,k) living in the k-dimensional unit hypercube
- `j` is the jacobian of the transformation with shape (N,) if compute_jacobian==True, else None.
"""
logj = None
third_dimension_softmax = torch.nn.Softmax(dim=2)
# Compute the bin width w
N, k, b = q_tilde.shape
Nx, kx = x.shape
assert N == Nx and k == kx, "Shape mismatch"
w = 1.0 / b
# Compute normalized bin heights with softmax function on bin dimension
q = 1.0 / w * third_dimension_softmax(q_tilde)
# x is in the mx-th bin: x \in [0,1],
# mx \in [[0,b-1]], so we clamp away the case x == 1
mx = torch.clamp(torch.floor(b * x), 0, b - 1).to(torch.long)
# Need special error handling because trying to index with mx
# if it contains nans will lock the GPU. (device-side assert triggered)
if torch.any(torch.isnan(mx)).item() or torch.any(mx < 0) or torch.any(mx >= b):
raise AvertedCUDARuntimeError("NaN detected in PWLinear bin indexing")
# We compute the output variable in-place
out = x - mx * w # alpha (element of [0.,w], the position of x in its bin
# Multiply by the slope
# q has shape (N,k,b), mxu = mx.unsqueeze(-1) has shape (N,k) with entries that are a b-index
# gather defines slope[i, j, k] = q[i, j, mxu[i, j, k]] with k taking only 0 as a value
# i.e. we say slope[i, j] = q[i, j, mx [i, j]]
slopes = torch.gather(q, 2, mx.unsqueeze(-1)).squeeze(-1)
out = out * slopes
# The jacobian is the product of the slopes in all dimensions
# Compute the integral over the left-bins.
# 1. Compute all integrals: cumulative sum of bin height * bin weight.
# We want that index i contains the cumsum *strictly to the left* so we shift by 1
# leaving the first entry null, which is achieved with a roll and assignment
q_left_integrals = torch.roll(torch.cumsum(q, 2) * w, 1, 2)
q_left_integrals[:, :, 0] = 0
# 2. Access the correct index to get the left integral of each point and add it to our transformation
out = out + torch.gather(q_left_integrals, 2, mx.unsqueeze(-1)).squeeze(-1)
# Regularization: points must be strictly within the unit hypercube
# Use the dtype information from pytorch
eps = torch.finfo(out.dtype).eps
out = out.clamp(min=eps, max=1.0 - eps)
oob_mask = torch.logical_or(x < 0.0, x > 1.0).detach().float()
if outlier_passthru:
out = out * (1 - oob_mask) + x * oob_mask
slopes = slopes * (1 - oob_mask) + oob_mask
if compute_jacobian:
# logj = torch.log(torch.prod(slopes.float(), 1))
logj = torch.sum(torch.log(slopes), 1)
del slopes
return out, logj
def piecewise_linear_inverse_transform(y, q_tilde, compute_jacobian=True, outlier_passthru=True):
"""
Apply inverse of an element-wise piecewise-linear transformation to some
variables
Args:
y : torch.Tensor
a tensor with shape (N,k) where N is the batch dimension while k is the dimension of the variable space. This variable span the k-dimensional unit hypercube
q_tilde: torch.Tensor
is a tensor with shape (N,k,b) where b is the number of bins. This contains the un-normalized heights of the bins of the piecewise-constant PDF for dimension k, i.e. q_tilde lives in all of R and we don't impose a constraint on their sum yet. Normalization is imposed in this function using softmax.
compute_jacobian : bool, optional
determines whether the jacobian should be compute or None is returned
Returns:
tuple of torch.Tensor
pair `(x,h)`.
- `x` is a tensor with shape (N,k) living in the k-dimensional unit hypercube
- `j` is the jacobian of the transformation with shape (N,) if compute_jacobian==True, else None.
"""
third_dimension_softmax = torch.nn.Softmax(dim=2)
# Compute the bin width w
N, k, b = q_tilde.shape
Ny, ky = y.shape
assert N == Ny and k == ky, "Shape mismatch"
w = 1.0 / b
# Compute normalized bin heights with softmax function on the bin dimension
q = 1.0 / w * third_dimension_softmax(q_tilde)
# Compute the integral over the left-bins in the forward transform.
# 1. Compute all integrals: cumulative sum of bin height * bin weight.
# We want that index i contains the cumsum *strictly to the left*,
# so we shift by 1 leaving the first entry null,
# which is achieved with a roll and assignment
q_left_integrals = torch.roll(torch.cumsum(q.float(), 2) * w, 1, 2)
q_left_integrals[:, :, 0] = 0
# Find which bin each y belongs to by finding the smallest bin such that
# y - q_left_integral is positive
edges = (y.unsqueeze(-1) - q_left_integrals).detach()
# y and q_left_integrals are between 0 and 1,
# so that their difference is at most 1.
# By setting the negative values to 2., we know that the
# smallest value left is the smallest positive
edges[edges < 0] = 2.0
edges = torch.clamp(torch.argmin(edges, dim=2), 0, b - 1).to(torch.long)
# Need special error handling because trying to index with mx
# if it contains nans will lock the GPU. (device-side assert triggered)
if torch.any(torch.isnan(edges)).item() or torch.any(edges < 0) or torch.any(edges >= b):
raise AvertedCUDARuntimeError("NaN detected in PWLinear bin indexing")
# Gather the left integrals at each edge. See comment about gathering in q_left_integrals
# for the unsqueeze
q_left_integrals = q_left_integrals.gather(2, edges.unsqueeze(-1)).squeeze(-1)
# Gather the slope at each edge.
q = q.gather(2, edges.unsqueeze(-1)).squeeze(-1)
# Build the output
x = (y - q_left_integrals) / q + edges * w
# Regularization: points must be strictly within the unit hypercube
# Use the dtype information from pytorch
eps = torch.finfo(x.dtype).eps
x = x.clamp(min=eps, max=1.0 - eps)
oob_mask = torch.logical_or(y < 0.0, y > 1.0).detach().float()
if outlier_passthru:
x = x * (1 - oob_mask) + y * oob_mask
q = q * (1 - oob_mask) + oob_mask
# Prepare the jacobian
logj = None
if compute_jacobian:
# logj = - torch.log(torch.prod(q, 1))
logj = -torch.sum(torch.log(q.float()), 1)
return x.detach(), logj
def unbounded_piecewise_quadratic_transform(x, w_tilde, v_tilde, upper=1, lower=0, inverse=False):
assert upper > lower
_range = upper - lower
inside_interval_mask = (x >= lower) & (x < upper)
outside_interval_mask = ~inside_interval_mask
outputs = torch.zeros_like(x)
log_j = torch.zeros_like(x)
outputs[outside_interval_mask] = x[outside_interval_mask]
log_j[outside_interval_mask] = 0
output, _log_j = piecewise_quadratic_transform(
(x[inside_interval_mask] - lower) / _range,
w_tilde[inside_interval_mask, :],
v_tilde[inside_interval_mask, :],
inverse=inverse,
)
outputs[inside_interval_mask] = output * _range + lower
if not inverse:
# the before and after transformation cancel out, so the log_j would be just as it is.
log_j[inside_interval_mask] = _log_j
else:
log_j = None
return outputs, log_j
def weighted_softmax(v, w):
# to avoid NaN...
v = v - torch.max(v, dim=-1, keepdim=True)[0]
v = torch.exp(v) + 1e-8 # to avoid NaN...
v_sum = torch.sum((v[..., :-1] + v[..., 1:]) / 2 * w, dim=-1, keepdim=True)
return v / v_sum
def piecewise_quadratic_transform(x, w_tilde, v_tilde, inverse=False):
"""Element-wise piecewise-quadratic transformation
Args:
x : torch.Tensor
*, The variable spans the D-dim unit hypercube ([0,1))
w_tilde : torch.Tensor
* x K defined in the paper
v_tilde : torch.Tensor
* x (K+1) defined in the paper
inverse : bool
forward or inverse
Returns:
c : torch.Tensor
*, transformed value
log_j : torch.Tensor
*, log determinant of the Jacobian matrix
"""
w = torch.softmax(w_tilde, dim=-1)
v = weighted_softmax(v_tilde, w)
w_cumsum = torch.cumsum(w, dim=-1)
# force sum = 1
w_cumsum[..., -1] = 1.0
w_cumsum_shift = F.pad(w_cumsum, (1, 0), 'constant', 0)
cdf = torch.cumsum((v[..., 1:] + v[..., :-1]) / 2 * w, dim=-1)
# force sum = 1
cdf[..., -1] = 1.0
cdf_shift = F.pad(cdf, (1, 0), 'constant', 0)
if not inverse:
# * x D x 1, (w_cumsum[idx-1] < x <= w_cumsum[idx])
bin_index = torch.searchsorted(w_cumsum, x.unsqueeze(-1))
else:
# * x D x 1, (cdf[idx-1] < x <= cdf[idx])
bin_index = torch.searchsorted(cdf, x.unsqueeze(-1))
w_b = torch.gather(w, -1, bin_index).squeeze(-1)
w_bn1 = torch.gather(w_cumsum_shift, -1, bin_index).squeeze(-1)
v_b = torch.gather(v, -1, bin_index).squeeze(-1)
v_bp1 = torch.gather(v, -1, bin_index + 1).squeeze(-1)
cdf_bn1 = torch.gather(cdf_shift, -1, bin_index).squeeze(-1)
if not inverse:
alpha = (x - w_bn1) / w_b.clamp(min=torch.finfo(w_b.dtype).eps)
c = (alpha ** 2) / 2 * (v_bp1 - v_b) * w_b + alpha * v_b * w_b + cdf_bn1
# just sum of log pdfs
log_j = torch.lerp(v_b, v_bp1, alpha).clamp(min=torch.finfo(c.dtype).eps).log()
# make sure it falls into [0,1)
c = c.clamp(min=torch.finfo(c.dtype).eps, max=1.0 - torch.finfo(c.dtype).eps)
return c, log_j
else:
# quadratic equation for alpha
# alpha should fall into (0, 1]. Since a, b > 0, the symmetry axis -b/2a < 0 and we should pick the larger root
# skip calculating the log_j in inverse since we don't need it
a = (v_bp1 - v_b) * w_b / 2
b = v_b * w_b
c = cdf_bn1 - x
alpha = (-b + torch.sqrt((b ** 2) - 4 * a * c)) / (2 * a)
inv = alpha * w_b + w_bn1
# make sure it falls into [0,1)
inv = inv.clamp(min=torch.finfo(c.dtype).eps, max=1.0 - torch.finfo(inv.dtype).eps)
return inv, None
def piecewise_rational_quadratic_transform(
inputs,
unnormalized_widths,
unnormalized_heights,
unnormalized_derivatives,
inverse=False,
tails=None,
tail_bound=1.0,
min_bin_width=1e-3,
min_bin_height=1e-3,
min_derivative=1e-3,
):
if tails is None:
spline_fn = rational_quadratic_spline
spline_kwargs = {}
else:
spline_fn = unconstrained_rational_quadratic_spline
spline_kwargs = {'tails': tails, 'tail_bound': tail_bound}
outputs, logabsdet = spline_fn(
inputs=inputs,
unnormalized_widths=unnormalized_widths,
unnormalized_heights=unnormalized_heights,
unnormalized_derivatives=unnormalized_derivatives,
inverse=inverse,
min_bin_width=min_bin_width,
min_bin_height=min_bin_height,
min_derivative=min_derivative,
**spline_kwargs
)
return outputs, logabsdet
def searchsorted(bin_locations, inputs, eps=1e-6):
bin_locations[..., -1] += eps
return torch.sum(inputs[..., None] >= bin_locations, dim=-1) - 1
def unconstrained_rational_quadratic_spline(
inputs,
unnormalized_widths,
unnormalized_heights,
unnormalized_derivatives,
inverse=False,
tails='linear',
tail_bound=1.0,
min_bin_width=1e-3,
min_bin_height=1e-3,
min_derivative=1e-3,
):
inside_interval_mask = (inputs >= -tail_bound) & (inputs <= tail_bound)
outside_interval_mask = ~inside_interval_mask
outputs = torch.zeros_like(inputs)
logabsdet = torch.zeros_like(inputs)
if tails == 'linear':
unnormalized_derivatives = F.pad(unnormalized_derivatives, pad=(1, 1))
constant = np.log(np.exp(1 - min_derivative) - 1)
unnormalized_derivatives[..., 0] = constant
unnormalized_derivatives[..., -1] = constant
outputs[outside_interval_mask] = inputs[outside_interval_mask]
logabsdet[outside_interval_mask] = 0
else:
raise RuntimeError('{} tails are not implemented.'.format(tails))
outputs[inside_interval_mask], logabsdet[inside_interval_mask] = rational_quadratic_spline(
inputs=inputs[inside_interval_mask],
unnormalized_widths=unnormalized_widths[inside_interval_mask, :],
unnormalized_heights=unnormalized_heights[inside_interval_mask, :],
unnormalized_derivatives=unnormalized_derivatives[inside_interval_mask, :],
inverse=inverse,
left=-tail_bound,
right=tail_bound,
bottom=-tail_bound,
top=tail_bound,
min_bin_width=min_bin_width,
min_bin_height=min_bin_height,
min_derivative=min_derivative,
)
return outputs, logabsdet
def rational_quadratic_spline(
inputs,
unnormalized_widths,
unnormalized_heights,
unnormalized_derivatives,
inverse=False,
left=0.0,
right=1.0,
bottom=0.0,
top=1.0,
min_bin_width=1e-3,
min_bin_height=1e-3,
min_derivative=1e-3,
):
if torch.min(inputs) < left or torch.max(inputs) > right:
raise ValueError('Input to a transform is not within its domain')
num_bins = unnormalized_widths.shape[-1]
if min_bin_width * num_bins > 1.0:
raise ValueError('Minimal bin width too large for the number of bins')
if min_bin_height * num_bins > 1.0:
raise ValueError('Minimal bin height too large for the number of bins')
widths = F.softmax(unnormalized_widths, dim=-1)
widths = min_bin_width + (1 - min_bin_width * num_bins) * widths
cumwidths = torch.cumsum(widths, dim=-1)
cumwidths = F.pad(cumwidths, pad=(1, 0), mode='constant', value=0.0)
cumwidths = (right - left) * cumwidths + left
cumwidths[..., 0] = left
cumwidths[..., -1] = right
widths = cumwidths[..., 1:] - cumwidths[..., :-1]
derivatives = min_derivative + F.softplus(unnormalized_derivatives)
heights = F.softmax(unnormalized_heights, dim=-1)
heights = min_bin_height + (1 - min_bin_height * num_bins) * heights
cumheights = torch.cumsum(heights, dim=-1)
cumheights = F.pad(cumheights, pad=(1, 0), mode='constant', value=0.0)
cumheights = (top - bottom) * cumheights + bottom
cumheights[..., 0] = bottom
cumheights[..., -1] = top
heights = cumheights[..., 1:] - cumheights[..., :-1]
if inverse:
bin_idx = searchsorted(cumheights, inputs)[..., None]
else:
bin_idx = searchsorted(cumwidths, inputs)[..., None]
input_cumwidths = cumwidths.gather(-1, bin_idx)[..., 0]
input_bin_widths = widths.gather(-1, bin_idx)[..., 0]
input_cumheights = cumheights.gather(-1, bin_idx)[..., 0]
delta = heights / widths
input_delta = delta.gather(-1, bin_idx)[..., 0]
input_derivatives = derivatives.gather(-1, bin_idx)[..., 0]
input_derivatives_plus_one = derivatives[..., 1:].gather(-1, bin_idx)[..., 0]
input_heights = heights.gather(-1, bin_idx)[..., 0]
if inverse:
a = (inputs - input_cumheights) * (
input_derivatives + input_derivatives_plus_one - 2 * input_delta
) + input_heights * (input_delta - input_derivatives)
b = input_heights * input_derivatives - (inputs - input_cumheights) * (
input_derivatives + input_derivatives_plus_one - 2 * input_delta
)
c = -input_delta * (inputs - input_cumheights)
discriminant = b.pow(2) - 4 * a * c
assert (discriminant >= 0).all()
root = (2 * c) / (-b - torch.sqrt(discriminant))
outputs = root * input_bin_widths + input_cumwidths
theta_one_minus_theta = root * (1 - root)
denominator = input_delta + (
(input_derivatives + input_derivatives_plus_one - 2 * input_delta) * theta_one_minus_theta
)
derivative_numerator = input_delta.pow(2) * (
input_derivatives_plus_one * root.pow(2)
+ 2 * input_delta * theta_one_minus_theta
+ input_derivatives * (1 - root).pow(2)
)
logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator)
return outputs, -logabsdet
else:
theta = (inputs - input_cumwidths) / input_bin_widths
theta_one_minus_theta = theta * (1 - theta)
numerator = input_heights * (input_delta * theta.pow(2) + input_derivatives * theta_one_minus_theta)
denominator = input_delta + (
(input_derivatives + input_derivatives_plus_one - 2 * input_delta) * theta_one_minus_theta
)
outputs = input_cumheights + numerator / denominator
derivative_numerator = input_delta.pow(2) * (
input_derivatives_plus_one * theta.pow(2)
+ 2 * input_delta * theta_one_minus_theta
+ input_derivatives * (1 - theta).pow(2)
)
logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator)
return outputs, logabsdet
| NeMo-main | nemo/collections/tts/parts/utils/splines.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# MIT License
#
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from typing import Iterable
import torch
def _is_distributed():
return torch.distributed.is_initialized() and torch.distributed.get_world_size() > 1
def _is_complex_or_float(tensor):
return torch.is_floating_point(tensor) or torch.is_complex(tensor)
def broadcast_tensors(tensors: Iterable[torch.Tensor], src: int = 0):
"""
Broadcast the tensors from the given parameters to all workers.
This can be used to ensure that all workers have the same model to start with.
"""
if not _is_distributed():
return
tensors = [tensor for tensor in tensors if _is_complex_or_float(tensor)]
handles = []
for tensor in tensors:
handle = torch.distributed.broadcast(tensor.data, src=src, async_op=True)
handles.append(handle)
for handle in handles:
handle.wait()
| NeMo-main | nemo/collections/tts/parts/utils/distributed.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from abc import ABC, abstractmethod
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, List, Optional, Tuple, Type
import librosa
import numpy as np
import soundfile as sf
import torch
from einops import rearrange
from pytorch_lightning import Callback, LightningModule, Trainer
from pytorch_lightning.loggers import TensorBoardLogger
from pytorch_lightning.loggers.logger import Logger
from pytorch_lightning.loggers.wandb import WandbLogger
from nemo.collections.tts.parts.utils.helpers import create_plot
from nemo.utils import logging
from nemo.utils.decorators import experimental
HAVE_WANDB = True
try:
import wandb
except ModuleNotFoundError:
HAVE_WANDB = False
def _get_logger(loggers: List[Logger], logger_type: Type[Logger]):
for logger in loggers:
if isinstance(logger, logger_type):
if hasattr(logger, "experiment"):
return logger.experiment
else:
return logger
raise ValueError(f"Could not find {logger_type} logger in {loggers}.")
def _load_vocoder(model_name: Optional[str], checkpoint_path: Optional[str], type: str):
assert (model_name is None) != (
checkpoint_path is None
), f"Must provide exactly one of vocoder model_name or checkpoint: ({model_name}, {checkpoint_path})"
checkpoint_path = str(checkpoint_path)
if type == "hifigan":
from nemo.collections.tts.models import HifiGanModel
model_type = HifiGanModel
elif type == "univnet":
from nemo.collections.tts.models import UnivNetModel
model_type = UnivNetModel
else:
raise ValueError(f"Unknown vocoder type '{type}'")
if model_name is not None:
vocoder = model_type.from_pretrained(model_name)
elif checkpoint_path.endswith(".nemo"):
vocoder = model_type.restore_from(checkpoint_path)
else:
vocoder = model_type.load_from_checkpoint(checkpoint_path)
return vocoder.eval()
@dataclass
class AudioArtifact:
id: str
data: np.ndarray
sample_rate: int
filename: str
@dataclass
class ImageArtifact:
id: str
data: np.ndarray
filename: str
x_axis: str
y_axis: str
@dataclass
class LogAudioParams:
vocoder_type: str
vocoder_name: str
vocoder_checkpoint_path: str
log_audio_gta: bool = False
def create_id(filepath: Path) -> str:
path_prefix = str(filepath.with_suffix(""))
file_id = path_prefix.replace(os.sep, "_")
return file_id
class ArtifactGenerator(ABC):
@abstractmethod
def generate_artifacts(
self, model: LightningModule, batch_dict: Dict, initial_log: bool = False
) -> Tuple[List[AudioArtifact], List[ImageArtifact]]:
"""
Create artifacts for the input model and test batch.
Args:
model: Model instance being trained to use for inference.
batch_dict: Test batch to generate artifacts for.
initial_log: Flag to denote if this is the initial log, can
be used to save ground-truth data only once.
Returns:
List of audio and image artifacts to log.
"""
@experimental
class LoggingCallback(Callback):
"""
Callback which can log artifacts (eg. model predictions, graphs) to local disk, Tensorboard, and/or WandB.
Args:
generators: List of generators to create and log artifacts from.
data_loader: Data to log artifacts for.
log_epochs: Optional list of specific training epoch numbers to log artifacts for.
epoch_frequency: Frequency with which to log
output_dir: Optional local directory. If provided, artifacts will be saved in output_dir.
loggers: Optional list of loggers to use if logging to tensorboard or wandb.
log_tensorboard: Whether to log artifacts to tensorboard.
log_wandb: Whether to log artifacts to WandB.
"""
def __init__(
self,
generators: List[ArtifactGenerator],
data_loader: torch.utils.data.DataLoader,
log_epochs: Optional[List[int]] = None,
epoch_frequency: int = 1,
output_dir: Optional[Path] = None,
loggers: Optional[List[Logger]] = None,
log_tensorboard: bool = False,
log_wandb: bool = False,
):
self.generators = generators
self.data_loader = data_loader
self.log_epochs = log_epochs if log_epochs else []
self.epoch_frequency = epoch_frequency
self.output_dir = Path(output_dir) if output_dir else None
self.loggers = loggers if loggers else []
self.log_tensorboard = log_tensorboard
self.log_wandb = log_wandb
if log_tensorboard:
logging.info('Creating tensorboard logger')
self.tensorboard_logger = _get_logger(self.loggers, TensorBoardLogger)
else:
logging.debug('Not using tensorbord logger')
self.tensorboard_logger = None
if log_wandb:
if not HAVE_WANDB:
raise ValueError("Wandb not installed.")
logging.info('Creating wandb logger')
self.wandb_logger = _get_logger(self.loggers, WandbLogger)
else:
logging.debug('Not using wandb logger')
self.wandb_logger = None
logging.debug('Initialized %s with', self.__class__.__name__)
logging.debug('\tlog_epochs: %s', self.log_epochs)
logging.debug('\tepoch_frequency: %s', self.epoch_frequency)
logging.debug('\toutput_dir: %s', self.output_dir)
logging.debug('\tlog_tensorboard: %s', self.log_tensorboard)
logging.debug('\tlog_wandb: %s', self.log_wandb)
def _log_audio(self, audio: AudioArtifact, log_dir: Path, step: int):
if log_dir:
filepath = log_dir / audio.filename
sf.write(file=filepath, data=audio.data, samplerate=audio.sample_rate)
if self.tensorboard_logger:
self.tensorboard_logger.add_audio(
tag=audio.id, snd_tensor=audio.data, global_step=step, sample_rate=audio.sample_rate,
)
if self.wandb_logger:
wandb_audio = (wandb.Audio(audio.data, sample_rate=audio.sample_rate, caption=audio.id),)
self.wandb_logger.log({audio.id: wandb_audio})
def _log_image(self, image: ImageArtifact, log_dir: Path, step: int):
if log_dir:
filepath = log_dir / image.filename
else:
filepath = None
image_plot = create_plot(output_filepath=filepath, data=image.data, x_axis=image.x_axis, y_axis=image.y_axis)
if self.tensorboard_logger:
self.tensorboard_logger.add_image(
tag=image.id, img_tensor=image_plot, global_step=step, dataformats="HWC",
)
if self.wandb_logger:
wandb_image = (wandb.Image(image_plot, caption=image.id),)
self.wandb_logger.log({image.id: wandb_image})
def _log_artifacts(self, audio_list: list, image_list: list, log_dir: Optional[Path] = None, global_step: int = 0):
"""Log audio and image artifacts.
"""
if log_dir is not None:
log_dir.mkdir(parents=True, exist_ok=True)
for audio in audio_list:
self._log_audio(audio=audio, log_dir=log_dir, step=global_step)
for image in image_list:
self._log_image(image=image, log_dir=log_dir, step=global_step)
def on_fit_start(self, trainer: Trainer, model: LightningModule):
"""Log initial data artifacts.
"""
audio_list = []
image_list = []
for batch_dict in self.data_loader:
for key, value in batch_dict.items():
if isinstance(value, torch.Tensor):
batch_dict[key] = value.to(model.device)
for generator in self.generators:
audio, images = generator.generate_artifacts(model=model, batch_dict=batch_dict, initial_log=True)
audio_list += audio
image_list += images
if len(audio_list) == len(image_list) == 0:
logging.debug('List are empty, no initial artifacts to log.')
return
log_dir = self.output_dir / f"initial" if self.output_dir else None
self._log_artifacts(audio_list=audio_list, image_list=image_list, log_dir=log_dir)
def on_train_epoch_end(self, trainer: Trainer, model: LightningModule):
"""Log artifacts at the end of an epoch.
"""
epoch = 1 + model.current_epoch
if (epoch not in self.log_epochs) and (epoch % self.epoch_frequency != 0):
return
audio_list = []
image_list = []
for batch_dict in self.data_loader:
for key, value in batch_dict.items():
if isinstance(value, torch.Tensor):
batch_dict[key] = value.to(model.device)
for generator in self.generators:
audio, images = generator.generate_artifacts(model=model, batch_dict=batch_dict)
audio_list += audio
image_list += images
if len(audio_list) == len(image_list) == 0:
logging.debug('List are empty, no artifacts to log at epoch %d.', epoch)
return
log_dir = self.output_dir / f"epoch_{epoch}" if self.output_dir else None
self._log_artifacts(audio_list=audio_list, image_list=image_list, log_dir=log_dir)
class VocoderArtifactGenerator(ArtifactGenerator):
"""
Generator for logging Vocoder model outputs.
"""
def generate_artifacts(
self, model: LightningModule, batch_dict: Dict, initial_log: bool = False
) -> Tuple[List[AudioArtifact], List[ImageArtifact]]:
if initial_log:
# Currently, nothing to log before training starts
return [], []
audio_artifacts = []
audio_filepaths = batch_dict.get("audio_filepaths")
audio_ids = [create_id(p) for p in audio_filepaths]
audio = batch_dict.get("audio")
audio_len = batch_dict.get("audio_lens")
spec, spec_len = model.audio_to_melspec_precessor(audio, audio_len)
with torch.no_grad():
audio_pred = model.forward(spec=spec)
audio_pred = rearrange(audio_pred, "B 1 T -> B T")
for i, audio_id in enumerate(audio_ids):
audio_pred_i = audio_pred[i][: audio_len[i]].cpu().numpy()
audio_artifact = AudioArtifact(
id=f"audio_{audio_id}", data=audio_pred_i, filename=f"{audio_id}.wav", sample_rate=model.sample_rate,
)
audio_artifacts.append(audio_artifact)
return audio_artifacts, []
class AudioCodecArtifactGenerator(ArtifactGenerator):
"""
Generator for logging Audio Codec model outputs.
"""
def __init__(self, log_audio: bool = True, log_encoding: bool = False, log_dequantized: bool = False):
# Log reconstructed audio (decoder output)
self.log_audio = log_audio
# Log encoded representation of the input audio (encoder output)
self.log_encoding = log_encoding
# Log dequantized encoded representation of the input audio (decoder input)
self.log_dequantized = log_dequantized
# Input audio will be logged only once
self.input_audio_logged = False
logging.debug('Initialized %s with', self.__class__.__name__)
logging.debug('\tlog_audio: %s', self.log_audio)
logging.debug('\tlog_encoding: %s', self.log_encoding)
logging.debug('\tlog_dequantized: %s', self.log_dequantized)
def _generate_audio(self, model, audio_ids, audio, audio_len, save_input: bool = False):
"""Generate audio artifacts.
Args:
model: callable model, outputs (audio_pred, audio_pred_len)
audio_ids: list of IDs for the examples in audio batch
audio: tensor of input audio signals, shape (B, T)
audio_len: tensor of lengths for each example in the batch, shape (B,)
save_input: if True, save input audio signals
"""
if not self.log_audio:
return []
with torch.no_grad():
# [B, T]
audio_pred, audio_pred_len = model(audio=audio, audio_len=audio_len)
audio_artifacts = []
# Log output audio
for i, audio_id in enumerate(audio_ids):
audio_pred_i = audio_pred[i, : audio_pred_len[i]].cpu().numpy()
audio_artifact = AudioArtifact(
id=f"audio_out_{audio_id}",
data=audio_pred_i,
filename=f"{audio_id}_audio_out.wav",
sample_rate=model.sample_rate,
)
audio_artifacts.append(audio_artifact)
if save_input:
# save input audio
for i, audio_id in enumerate(audio_ids):
audio_in_i = audio[i, : audio_len[i]].cpu().numpy()
audio_artifact = AudioArtifact(
id=f"audio_in_{audio_id}",
data=audio_in_i,
filename=f"{audio_id}_audio_in.wav",
sample_rate=model.sample_rate,
)
audio_artifacts.append(audio_artifact)
return audio_artifacts
def _generate_images(self, model, audio_ids, audio, audio_len):
"""Generate image artifacts.
Args:
model: model, needs to support `model.encode_audio`, `model.quantize` and `model.dequantize`
audio_ids: list of IDs for the examples in audio batch
audio: tensor of input audio signals, shape (B, T)
audio_len: tensor of lengths for each example in the batch, shape (B,)
"""
image_artifacts = []
if not self.log_encoding and not self.log_dequantized:
return image_artifacts
with torch.no_grad():
# [B, D, T]
encoded, encoded_len = model.encode_audio(audio=audio, audio_len=audio_len)
if self.log_encoding:
for i, audio_id in enumerate(audio_ids):
encoded_i = encoded[i, :, : encoded_len[i]].cpu().numpy()
encoded_artifact = ImageArtifact(
id=f"encoded_{audio_id}",
data=encoded_i,
filename=f"{audio_id}_encoded.png",
x_axis="Audio Frames",
y_axis="Channels",
)
image_artifacts.append(encoded_artifact)
if not self.log_dequantized:
return image_artifacts
with torch.no_grad():
# [B, D, T]
tokens = model.quantize(encoded=encoded, encoded_len=encoded_len)
dequantized = model.dequantize(tokens=tokens, tokens_len=encoded_len)
for i, audio_id in enumerate(audio_ids):
dequantized_i = dequantized[i, :, : encoded_len[i]].cpu().numpy()
dequantized_artifact = ImageArtifact(
id=f"dequantized_{audio_id}",
data=dequantized_i,
filename=f"{audio_id}_dequantized.png",
x_axis="Audio Frames",
y_axis="Channels",
)
image_artifacts.append(dequantized_artifact)
return image_artifacts
def generate_artifacts(
self, model: LightningModule, batch_dict: Dict, initial_log: bool = False
) -> Tuple[List[AudioArtifact], List[ImageArtifact]]:
"""
Args:
model: model used to process input to generate artifacts
batch_dict: dictionary obtained form the dataloader
initial_log: save input audio for the initial log
"""
audio_filepaths = batch_dict.get("audio_filepaths")
audio_ids = [create_id(p) for p in audio_filepaths]
audio = batch_dict.get("audio")
audio_len = batch_dict.get("audio_lens")
audio_artifacts = self._generate_audio(
model=model, audio_ids=audio_ids, audio=audio, audio_len=audio_len, save_input=initial_log
)
image_artifacts = self._generate_images(model=model, audio_ids=audio_ids, audio=audio, audio_len=audio_len)
return audio_artifacts, image_artifacts
class FastPitchArtifactGenerator(ArtifactGenerator):
"""
Generator for logging FastPitch model outputs.
Args:
log_spectrogram: Whether to log predicted spectrograms.
log_alignment: Whether to log alignment graphs.
audio_params: Optional parameters for saving predicted audio.
Requires a vocoder model checkpoint for generating audio from predicted spectrograms.
"""
def __init__(
self,
log_spectrogram: bool = False,
log_alignment: bool = False,
audio_params: Optional[LogAudioParams] = None,
):
self.log_spectrogram = log_spectrogram
self.log_alignment = log_alignment
if not audio_params:
self.log_audio = False
self.log_audio_gta = False
self.vocoder = None
else:
self.log_audio = True
self.log_audio_gta = audio_params.log_audio_gta
self.vocoder = _load_vocoder(
model_name=audio_params.vocoder_name,
checkpoint_path=audio_params.vocoder_checkpoint_path,
type=audio_params.vocoder_type,
)
def _generate_audio(self, mels, mels_len, hop_length):
voc_input = mels.to(self.vocoder.device)
with torch.no_grad():
audio_pred = self.vocoder.convert_spectrogram_to_audio(spec=voc_input)
mels_len_array = mels_len.cpu().numpy()
audio_pred_lens = librosa.core.frames_to_samples(mels_len_array, hop_length=hop_length)
return audio_pred, audio_pred_lens
def _generate_predictions(self, model: LightningModule, audio_ids: List[str], batch_dict: Dict):
audio_artifacts = []
image_artifacts = []
text = batch_dict.get("text")
text_lens = batch_dict.get("text_lens")
speaker = batch_dict.get("speaker_id", None)
with torch.no_grad():
# [B, C, T_spec]
mels_pred, mels_pred_len, *_ = model.forward(text=text, input_lens=text_lens, speaker=speaker,)
if self.log_spectrogram:
for i, audio_id in enumerate(audio_ids):
spec_i = mels_pred[i][:, : mels_pred_len[i]].cpu().numpy()
spec_artifact = ImageArtifact(
id=f"spec_{audio_id}",
data=spec_i,
filename=f"{audio_id}_spec.png",
x_axis="Audio Frames",
y_axis="Channels",
)
image_artifacts.append(spec_artifact)
if self.log_audio:
# [B, T_audio]
audio_pred, audio_pred_lens = self._generate_audio(
mels=mels_pred, mels_len=mels_pred_len, hop_length=model.preprocessor.hop_length
)
for i, audio_id in enumerate(audio_ids):
audio_pred_i = audio_pred[i][: audio_pred_lens[i]].cpu().numpy()
audio_artifact = AudioArtifact(
id=f"audio_{audio_id}",
data=audio_pred_i,
filename=f"{audio_id}.wav",
sample_rate=self.vocoder.sample_rate,
)
audio_artifacts.append(audio_artifact)
return audio_artifacts, image_artifacts
def _generate_gta_predictions(self, model: LightningModule, audio_ids: List[str], batch_dict: Dict):
audio_artifacts = []
image_artifacts = []
audio = batch_dict.get("audio")
audio_lens = batch_dict.get("audio_lens")
text = batch_dict.get("text")
text_lens = batch_dict.get("text_lens")
attn_prior = batch_dict.get("align_prior_matrix", None)
pitch = batch_dict.get("pitch", None)
energy = batch_dict.get("energy", None)
speaker = batch_dict.get("speaker_id", None)
mels, spec_len = model.preprocessor(input_signal=audio, length=audio_lens)
with torch.no_grad():
mels_pred, mels_pred_len, _, _, _, attn, _, _, _, _, _, _ = model.forward(
text=text,
input_lens=text_lens,
pitch=pitch,
energy=energy,
speaker=speaker,
spec=mels,
mel_lens=spec_len,
attn_prior=attn_prior,
)
if self.log_alignment:
attn = rearrange(attn, "B 1 T_spec T_text -> B T_text T_spec")
for i, audio_id in enumerate(audio_ids):
attn_i = attn[i][: text_lens[i], : mels_pred_len[i]].cpu().numpy()
alignment_artifact = ImageArtifact(
id=f"align_{audio_id}",
data=attn_i,
filename=f"{audio_id}_align.png",
x_axis="Audio Frames",
y_axis="Text Tokens",
)
image_artifacts.append(alignment_artifact)
if self.log_audio_gta:
# [B, T_audio]
audio_pred, audio_pred_lens = self._generate_audio(
mels=mels_pred, mels_len=mels_pred_len, hop_length=model.preprocessor.hop_length
)
for i, audio_id in enumerate(audio_ids):
audio_pred_i = audio_pred[i][: audio_pred_lens[i]].cpu().numpy()
audio_artifact = AudioArtifact(
id=f"audio_gta_{audio_id}",
data=audio_pred_i,
filename=f"{audio_id}_gta.wav",
sample_rate=self.vocoder.sample_rate,
)
audio_artifacts.append(audio_artifact)
return audio_artifacts, image_artifacts
def generate_artifacts(
self, model: LightningModule, batch_dict: Dict, initial_log: bool = False
) -> Tuple[List[AudioArtifact], List[ImageArtifact]]:
if initial_log:
# Currently, nothing to log before training starts
return [], []
audio_artifacts = []
image_artifacts = []
audio_filepaths = batch_dict.get("audio_filepaths")
audio_ids = [create_id(p) for p in audio_filepaths]
if self.log_audio or self.log_spectrogram:
audio_pred, spec_pred = self._generate_predictions(model=model, batch_dict=batch_dict, audio_ids=audio_ids)
audio_artifacts += audio_pred
image_artifacts += spec_pred
if self.log_audio_gta or self.log_alignment:
audio_gta_pred, alignments = self._generate_gta_predictions(
model=model, batch_dict=batch_dict, audio_ids=audio_ids
)
audio_artifacts += audio_gta_pred
image_artifacts += alignments
return audio_artifacts, image_artifacts
| NeMo-main | nemo/collections/tts/parts/utils/callbacks.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# BSD 3-Clause License
#
# Copyright (c) 2021, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from enum import Enum
from typing import Optional, Tuple
import librosa
import matplotlib.pylab as plt
import numpy as np
import torch
from einops import rearrange
from numba import jit, prange
from nemo.collections.tts.torch.tts_data_types import DATA_STR2DATA_CLASS, MAIN_DATA_TYPES, WithLens
from nemo.utils import logging
from nemo.utils.decorators import deprecated
HAVE_WANDB = True
try:
import wandb
except ModuleNotFoundError:
HAVE_WANDB = False
try:
from pytorch_lightning.utilities import rank_zero_only
except ModuleNotFoundError:
from functools import wraps
def rank_zero_only(fn):
@wraps(fn)
def wrapped_fn(*args, **kwargs):
logging.error(
f"Function {fn} requires lighting to be installed, but it was not found. Please install lightning first"
)
exit(1)
class OperationMode(Enum):
"""Training or Inference (Evaluation) mode"""
training = 0
validation = 1
infer = 2
def get_batch_size(train_dataloader):
if train_dataloader.batch_size is not None:
return train_dataloader.batch_size
elif train_dataloader.batch_sampler is not None:
if train_dataloader.batch_sampler.micro_batch_size is not None:
return train_dataloader.batch_sampler.micro_batch_size
else:
raise ValueError(f'Could not find batch_size from batch_sampler: {train_dataloader.batch_sampler}')
else:
raise ValueError(f'Could not find batch_size from train_dataloader: {train_dataloader}')
def get_num_workers(trainer):
return trainer.num_devices * trainer.num_nodes
def binarize_attention(attn, in_len, out_len):
"""Convert soft attention matrix to hard attention matrix.
Args:
attn (torch.Tensor): B x 1 x max_mel_len x max_text_len. Soft attention matrix.
in_len (torch.Tensor): B. Lengths of texts.
out_len (torch.Tensor): B. Lengths of spectrograms.
Output:
attn_out (torch.Tensor): B x 1 x max_mel_len x max_text_len. Hard attention matrix, final dim max_text_len should sum to 1.
"""
b_size = attn.shape[0]
with torch.no_grad():
attn_cpu = attn.data.cpu().numpy()
attn_out = torch.zeros_like(attn)
for ind in range(b_size):
hard_attn = mas(attn_cpu[ind, 0, : out_len[ind], : in_len[ind]])
attn_out[ind, 0, : out_len[ind], : in_len[ind]] = torch.tensor(hard_attn, device=attn.device)
return attn_out
def binarize_attention_parallel(attn, in_lens, out_lens):
"""For training purposes only. Binarizes attention with MAS.
These will no longer receive a gradient.
Args:
attn: B x 1 x max_mel_len x max_text_len
"""
with torch.no_grad():
log_attn_cpu = torch.log(attn.data).cpu().numpy()
attn_out = b_mas(log_attn_cpu, in_lens.cpu().numpy(), out_lens.cpu().numpy(), width=1)
return torch.from_numpy(attn_out).to(attn.device)
def get_mask_from_lengths(lengths: Optional[torch.Tensor] = None, x: Optional[torch.Tensor] = None,) -> torch.Tensor:
"""Constructs binary mask from a 1D torch tensor of input lengths
Args:
lengths: Optional[torch.tensor] (torch.tensor): 1D tensor with lengths
x: Optional[torch.tensor] = tensor to be used on, last dimension is for mask
Returns:
mask (torch.tensor): num_sequences x max_length x 1 binary tensor
"""
if lengths is None:
assert x is not None
return torch.ones(x.shape[-1], dtype=torch.bool, device=x.device)
else:
if x is None:
max_len = torch.max(lengths)
else:
max_len = x.shape[-1]
ids = torch.arange(0, max_len, device=lengths.device, dtype=lengths.dtype)
mask = ids < lengths.unsqueeze(1)
return mask
def sort_tensor(
context: torch.Tensor, lens: torch.Tensor, dim: Optional[int] = 0, descending: Optional[bool] = True
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""Sorts elements in context by the dim lengths specified in lens
Args:
context: source tensor, sorted by lens
lens: lengths of elements of context along the dimension dim
dim: Optional[int] : dimension to sort by
Returns:
context: tensor sorted by lens along dimension dim
lens_sorted: lens tensor, sorted
ids_sorted: reorder ids to be used to restore original order
"""
lens_sorted, ids_sorted = torch.sort(lens, descending=descending)
context = torch.index_select(context, dim, ids_sorted)
return context, lens_sorted, ids_sorted
def unsort_tensor(ordered: torch.Tensor, indices: torch.Tensor, dim: Optional[int] = 0) -> torch.Tensor:
"""Reverses the result of sort_tensor function:
o, _, ids = sort_tensor(x,l)
assert unsort_tensor(o,ids) == x
Args:
ordered: context tensor, sorted by lengths
indices: torch.tensor: 1D tensor with 're-order' indices returned by sort_tensor
Returns:
ordered tensor in original order (before calling sort_tensor)
"""
return torch.index_select(ordered, dim, indices.argsort(0))
@jit(nopython=True)
def mas(attn_map, width=1):
# assumes mel x text
opt = np.zeros_like(attn_map)
attn_map = np.log(attn_map)
attn_map[0, 1:] = -np.inf
log_p = np.zeros_like(attn_map)
log_p[0, :] = attn_map[0, :]
prev_ind = np.zeros_like(attn_map, dtype=np.int64)
for i in range(1, attn_map.shape[0]):
for j in range(attn_map.shape[1]): # for each text dim
prev_j = np.arange(max(0, j - width), j + 1)
prev_log = np.array([log_p[i - 1, prev_idx] for prev_idx in prev_j])
ind = np.argmax(prev_log)
log_p[i, j] = attn_map[i, j] + prev_log[ind]
prev_ind[i, j] = prev_j[ind]
# now backtrack
curr_text_idx = attn_map.shape[1] - 1
for i in range(attn_map.shape[0] - 1, -1, -1):
opt[i, curr_text_idx] = 1
curr_text_idx = prev_ind[i, curr_text_idx]
opt[0, curr_text_idx] = 1
assert opt.sum(0).all()
assert opt.sum(1).all()
return opt
@jit(nopython=True)
def mas_width1(log_attn_map):
"""mas with hardcoded width=1"""
# assumes mel x text
neg_inf = log_attn_map.dtype.type(-np.inf)
log_p = log_attn_map.copy()
log_p[0, 1:] = neg_inf
for i in range(1, log_p.shape[0]):
prev_log1 = neg_inf
for j in range(log_p.shape[1]):
prev_log2 = log_p[i - 1, j]
log_p[i, j] += max(prev_log1, prev_log2)
prev_log1 = prev_log2
# now backtrack
opt = np.zeros_like(log_p)
one = opt.dtype.type(1)
j = log_p.shape[1] - 1
for i in range(log_p.shape[0] - 1, 0, -1):
opt[i, j] = one
if log_p[i - 1, j - 1] >= log_p[i - 1, j]:
j -= 1
if j == 0:
opt[1:i, j] = one
break
opt[0, j] = one
return opt
@jit(nopython=True, parallel=True)
def b_mas(b_log_attn_map, in_lens, out_lens, width=1):
assert width == 1
attn_out = np.zeros_like(b_log_attn_map)
for b in prange(b_log_attn_map.shape[0]):
out = mas_width1(b_log_attn_map[b, 0, : out_lens[b], : in_lens[b]])
attn_out[b, 0, : out_lens[b], : in_lens[b]] = out
return attn_out
def griffin_lim(magnitudes, n_iters=50, n_fft=1024):
"""
Griffin-Lim algorithm to convert magnitude spectrograms to audio signals
"""
phase = np.exp(2j * np.pi * np.random.rand(*magnitudes.shape))
complex_spec = magnitudes * phase
signal = librosa.istft(complex_spec)
if not np.isfinite(signal).all():
logging.warning("audio was not finite, skipping audio saving")
return np.array([0])
for _ in range(n_iters):
_, phase = librosa.magphase(librosa.stft(signal, n_fft=n_fft))
complex_spec = magnitudes * phase
signal = librosa.istft(complex_spec)
return signal
@rank_zero_only
def log_audio_to_tb(
swriter,
spect,
name,
step,
griffin_lim_mag_scale=1024,
griffin_lim_power=1.2,
sr=22050,
n_fft=1024,
n_mels=80,
fmax=8000,
):
filterbank = librosa.filters.mel(sr=sr, n_fft=n_fft, n_mels=n_mels, fmax=fmax)
log_mel = spect.data.cpu().numpy().T
mel = np.exp(log_mel)
magnitude = np.dot(mel, filterbank) * griffin_lim_mag_scale
audio = griffin_lim(magnitude.T ** griffin_lim_power)
swriter.add_audio(name, audio / max(np.abs(audio)), step, sample_rate=sr)
@rank_zero_only
def tacotron2_log_to_tb_func(
swriter,
tensors,
step,
tag="train",
log_images=False,
log_images_freq=1,
add_audio=True,
griffin_lim_mag_scale=1024,
griffin_lim_power=1.2,
sr=22050,
n_fft=1024,
n_mels=80,
fmax=8000,
):
_, spec_target, mel_postnet, gate, gate_target, alignments = tensors
if log_images and step % log_images_freq == 0:
swriter.add_image(
f"{tag}_alignment", plot_alignment_to_numpy(alignments[0].data.cpu().numpy().T), step, dataformats="HWC",
)
swriter.add_image(
f"{tag}_mel_target", plot_spectrogram_to_numpy(spec_target[0].data.cpu().numpy()), step, dataformats="HWC",
)
swriter.add_image(
f"{tag}_mel_predicted",
plot_spectrogram_to_numpy(mel_postnet[0].data.cpu().numpy()),
step,
dataformats="HWC",
)
swriter.add_image(
f"{tag}_gate",
plot_gate_outputs_to_numpy(gate_target[0].data.cpu().numpy(), torch.sigmoid(gate[0]).data.cpu().numpy(),),
step,
dataformats="HWC",
)
if add_audio:
filterbank = librosa.filters.mel(sr=sr, n_fft=n_fft, n_mels=n_mels, fmax=fmax)
log_mel = mel_postnet[0].data.cpu().numpy().T
mel = np.exp(log_mel)
magnitude = np.dot(mel, filterbank) * griffin_lim_mag_scale
audio = griffin_lim(magnitude.T ** griffin_lim_power)
swriter.add_audio(f"audio/{tag}_predicted", audio / max(np.abs(audio)), step, sample_rate=sr)
log_mel = spec_target[0].data.cpu().numpy().T
mel = np.exp(log_mel)
magnitude = np.dot(mel, filterbank) * griffin_lim_mag_scale
audio = griffin_lim(magnitude.T ** griffin_lim_power)
swriter.add_audio(f"audio/{tag}_target", audio / max(np.abs(audio)), step, sample_rate=sr)
def tacotron2_log_to_wandb_func(
swriter,
tensors,
step,
tag="train",
log_images=False,
log_images_freq=1,
add_audio=True,
griffin_lim_mag_scale=1024,
griffin_lim_power=1.2,
sr=22050,
n_fft=1024,
n_mels=80,
fmax=8000,
):
_, spec_target, mel_postnet, gate, gate_target, alignments = tensors
if not HAVE_WANDB:
return
if log_images and step % log_images_freq == 0:
alignments = []
specs = []
gates = []
alignments += [
wandb.Image(plot_alignment_to_numpy(alignments[0].data.cpu().numpy().T), caption=f"{tag}_alignment",)
]
alignments += [
wandb.Image(plot_spectrogram_to_numpy(spec_target[0].data.cpu().numpy()), caption=f"{tag}_mel_target",),
wandb.Image(plot_spectrogram_to_numpy(mel_postnet[0].data.cpu().numpy()), caption=f"{tag}_mel_predicted",),
]
gates += [
wandb.Image(
plot_gate_outputs_to_numpy(
gate_target[0].data.cpu().numpy(), torch.sigmoid(gate[0]).data.cpu().numpy(),
),
caption=f"{tag}_gate",
)
]
swriter.log({"specs": specs, "alignments": alignments, "gates": gates})
if add_audio:
audios = []
filterbank = librosa.filters.mel(sr=sr, n_fft=n_fft, n_mels=n_mels, fmax=fmax)
log_mel = mel_postnet[0].data.cpu().numpy().T
mel = np.exp(log_mel)
magnitude = np.dot(mel, filterbank) * griffin_lim_mag_scale
audio_pred = griffin_lim(magnitude.T ** griffin_lim_power)
log_mel = spec_target[0].data.cpu().numpy().T
mel = np.exp(log_mel)
magnitude = np.dot(mel, filterbank) * griffin_lim_mag_scale
audio_true = griffin_lim(magnitude.T ** griffin_lim_power)
audios += [
wandb.Audio(audio_true / max(np.abs(audio_true)), caption=f"{tag}_wav_target", sample_rate=sr,),
wandb.Audio(audio_pred / max(np.abs(audio_pred)), caption=f"{tag}_wav_predicted", sample_rate=sr,),
]
swriter.log({"audios": audios})
def plot_alignment_to_numpy(alignment, title='', info=None, phoneme_seq=None, vmin=None, vmax=None):
if phoneme_seq:
fig, ax = plt.subplots(figsize=(15, 10))
else:
fig, ax = plt.subplots(figsize=(6, 4))
im = ax.imshow(alignment, aspect='auto', origin='lower', interpolation='none', vmin=vmin, vmax=vmax)
ax.set_title(title)
fig.colorbar(im, ax=ax)
xlabel = 'Decoder timestep'
if info is not None:
xlabel += '\n\n' + info
plt.xlabel(xlabel)
plt.ylabel('Encoder timestep')
plt.tight_layout()
if phoneme_seq != None:
# for debugging of phonemes and durs in maps. Not used by def in training code
ax.set_yticks(np.arange(len(phoneme_seq)))
ax.set_yticklabels(phoneme_seq)
ax.hlines(np.arange(len(phoneme_seq)), xmin=0.0, xmax=max(ax.get_xticks()))
fig.canvas.draw()
data = save_figure_to_numpy(fig)
plt.close()
return data
def plot_pitch_to_numpy(pitch, ylim_range=None):
fig, ax = plt.subplots(figsize=(12, 3))
plt.plot(pitch)
if ylim_range is not None:
plt.ylim(ylim_range)
plt.xlabel("Frames")
plt.ylabel("Pitch")
plt.tight_layout()
fig.canvas.draw()
data = save_figure_to_numpy(fig)
plt.close()
return data
def plot_multipitch_to_numpy(pitch_gt, pitch_pred, ylim_range=None):
fig, ax = plt.subplots(figsize=(12, 3))
plt.plot(pitch_gt, label="Ground truth")
plt.plot(pitch_pred, label="Predicted")
if ylim_range is not None:
plt.ylim(ylim_range)
plt.xlabel("Frames")
plt.ylabel("Pitch")
plt.legend()
plt.tight_layout()
fig.canvas.draw()
data = save_figure_to_numpy(fig)
plt.close()
return data
def plot_spectrogram_to_numpy(spectrogram):
spectrogram = spectrogram.astype(np.float32)
fig, ax = plt.subplots(figsize=(12, 3))
im = ax.imshow(spectrogram, aspect="auto", origin="lower", interpolation='none')
plt.colorbar(im, ax=ax)
plt.xlabel("Frames")
plt.ylabel("Channels")
plt.tight_layout()
fig.canvas.draw()
data = save_figure_to_numpy(fig)
plt.close()
return data
def create_plot(data, x_axis, y_axis, output_filepath=None):
fig, ax = plt.subplots(figsize=(12, 3))
im = ax.imshow(data, aspect="auto", origin="lower", interpolation="none")
plt.colorbar(im, ax=ax)
plt.xlabel(x_axis)
plt.ylabel(y_axis)
plt.tight_layout()
if output_filepath:
plt.savefig(output_filepath, format="png")
fig.canvas.draw()
data = save_figure_to_numpy(fig)
plt.close()
return data
def plot_gate_outputs_to_numpy(gate_targets, gate_outputs):
fig, ax = plt.subplots(figsize=(12, 3))
ax.scatter(
range(len(gate_targets)), gate_targets, alpha=0.5, color='green', marker='+', s=1, label='target',
)
ax.scatter(
range(len(gate_outputs)), gate_outputs, alpha=0.5, color='red', marker='.', s=1, label='predicted',
)
plt.xlabel("Frames (Green target, Red predicted)")
plt.ylabel("Gate State")
plt.tight_layout()
fig.canvas.draw()
data = save_figure_to_numpy(fig)
plt.close()
return data
def save_figure_to_numpy(fig):
# save it to a numpy array.
data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
return data
@rank_zero_only
def waveglow_log_to_tb_func(
swriter, tensors, step, tag="train", n_fft=1024, hop_length=256, window="hann", mel_fb=None,
):
_, audio_pred, spec_target, mel_length = tensors
mel_length = mel_length[0]
spec_target = spec_target[0].data.cpu().numpy()[:, :mel_length]
swriter.add_image(
f"{tag}_mel_target", plot_spectrogram_to_numpy(spec_target), step, dataformats="HWC",
)
if mel_fb is not None:
mag, _ = librosa.core.magphase(
librosa.core.stft(
np.nan_to_num(audio_pred[0].cpu().detach().numpy()), n_fft=n_fft, hop_length=hop_length, window=window,
)
)
mel_pred = np.matmul(mel_fb.cpu().numpy(), mag).squeeze()
log_mel_pred = np.log(np.clip(mel_pred, a_min=1e-5, a_max=None))
swriter.add_image(
f"{tag}_mel_predicted", plot_spectrogram_to_numpy(log_mel_pred[:, :mel_length]), step, dataformats="HWC",
)
def remove(conv_list):
new_conv_list = torch.nn.ModuleList()
for old_conv in conv_list:
old_conv = torch.nn.utils.remove_weight_norm(old_conv)
new_conv_list.append(old_conv)
return new_conv_list
def regulate_len(
durations, enc_out, pace=1.0, mel_max_len=None, group_size=1, dur_lens: torch.tensor = None,
):
"""A function that takes predicted durations per encoded token, and repeats enc_out according to the duration.
NOTE: durations.shape[1] == enc_out.shape[1]
Args:
durations (torch.tensor): A tensor of shape (batch x enc_length) that represents how many times to repeat each
token in enc_out.
enc_out (torch.tensor): A tensor of shape (batch x enc_length x enc_hidden) that represents the encoded tokens.
pace (float): The pace of speaker. Higher values result in faster speaking pace. Defaults to 1.0. max_mel_len (int): The maximum length above which the output will be removed. If sum(durations, dim=1) >
max_mel_len, the values after max_mel_len will be removed. Defaults to None, which has no max length.
group_size (int): replicate the last element specified by durations[i, in_lens[i] - 1] until the
full length of the sequence is the next nearest multiple of group_size
in_lens (torch.tensor): input sequence length specifying valid values in the durations input tensor (only needed if group_size >1)
"""
dtype = enc_out.dtype
reps = durations.float() / pace
reps = (reps + 0.5).floor().long()
dec_lens = reps.sum(dim=1)
if group_size > 1:
to_pad = group_size * (torch.div(dec_lens + 1, group_size, rounding_mode='floor')) - dec_lens
reps.index_put_(
indices=[torch.arange(dur_lens.shape[0], dtype=torch.long), dur_lens - 1], values=to_pad, accumulate=True
)
dec_lens = reps.sum(dim=1)
max_len = dec_lens.max()
reps_cumsum = torch.cumsum(torch.nn.functional.pad(reps, (1, 0, 0, 0), value=0.0), dim=1)[:, None, :]
reps_cumsum = reps_cumsum.to(dtype=dtype, device=enc_out.device)
range_ = torch.arange(max_len).to(enc_out.device)[None, :, None]
mult = (reps_cumsum[:, :, :-1] <= range_) & (reps_cumsum[:, :, 1:] > range_)
mult = mult.to(dtype)
enc_rep = torch.matmul(mult, enc_out)
if mel_max_len is not None:
enc_rep = enc_rep[:, :mel_max_len]
dec_lens = torch.clamp_max(dec_lens, mel_max_len)
return enc_rep, dec_lens
def split_view(tensor, split_size: int, dim: int = 0):
if dim < 0: # Support negative indexing
dim = len(tensor.shape) + dim
# If not divisible by split_size, we need to pad with 0
if tensor.shape[dim] % split_size != 0:
to_pad = split_size - (tensor.shape[dim] % split_size)
padding = [0] * len(tensor.shape) * 2
padding[dim * 2 + 1] = to_pad
padding.reverse()
tensor = torch.nn.functional.pad(tensor, padding)
cur_shape = tensor.shape
new_shape = cur_shape[:dim] + (tensor.shape[dim] // split_size, split_size) + cur_shape[dim + 1 :]
return tensor.reshape(*new_shape)
def slice_segments(x, ids_str, segment_size=4):
"""
Time-wise slicing (patching) of bathches for audio/spectrogram
[B x C x T] -> [B x C x segment_size]
"""
ret = torch.zeros_like(x[:, :, :segment_size])
for i in range(x.size(0)):
idx_str = ids_str[i]
idx_end = idx_str + segment_size
x_i = x[i]
if idx_end >= x.size(2):
# pad the sample if it is shorter than the segment size
x_i = torch.nn.functional.pad(x_i, (0, (idx_end + 1) - x.size(2)))
ret[i] = x_i[:, idx_str:idx_end]
return ret
def rand_slice_segments(x, x_lengths=None, segment_size=4):
"""
Chooses random indices and slices segments from batch
[B x C x T] -> [B x C x segment_size]
"""
b, d, t = x.size()
if x_lengths is None:
x_lengths = t
ids_str_max = x_lengths - segment_size + 1
ids_str_max = ids_str_max.to(device=x.device)
ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long)
ret = slice_segments(x, ids_str, segment_size)
return ret, ids_str
def clip_grad_value_(parameters, clip_value, norm_type=2):
if isinstance(parameters, torch.Tensor):
parameters = [parameters]
parameters = list(filter(lambda p: p.grad is not None, parameters))
norm_type = float(norm_type)
if clip_value is not None:
clip_value = float(clip_value)
total_norm = 0
for p in parameters:
param_norm = p.grad.data.norm(norm_type)
total_norm += param_norm.item() ** norm_type
if clip_value is not None:
p.grad.data.clamp_(min=-clip_value, max=clip_value)
total_norm = total_norm ** (1.0 / norm_type)
return total_norm
def convert_pad_shape(pad_shape):
l = pad_shape[::-1]
pad_shape = [item for sublist in l for item in sublist]
return pad_shape
def generate_path(duration, mask):
"""
duration: [b, 1, t_x]
mask: [b, 1, t_y, t_x]
"""
b, _, t_y, t_x = mask.shape
cum_duration = torch.cumsum(duration, -1)
cum_duration_flat = cum_duration.view(b * t_x)
path = get_mask_from_lengths(cum_duration_flat, torch.Tensor(t_y).reshape(1, 1, -1)).to(mask.dtype)
path = path.view(b, t_x, t_y)
path = path - torch.nn.functional.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1]
path = path.unsqueeze(1).transpose(2, 3) * mask
return path
def process_batch(batch_data, sup_data_types_set):
batch_dict = {}
batch_index = 0
for name, datatype in DATA_STR2DATA_CLASS.items():
if datatype in MAIN_DATA_TYPES or datatype in sup_data_types_set:
batch_dict[name] = batch_data[batch_index]
batch_index = batch_index + 1
if issubclass(datatype, WithLens):
batch_dict[name + "_lens"] = batch_data[batch_index]
batch_index = batch_index + 1
return batch_dict
def to_device_recursive(e, device: torch.device):
"""
Use .to(device) on all tensors within nested lists, tuples, values ofdicts
Returns a new structure with tensors moved to target device, leaving other data intact.
The intended use is to move collections of tensors to a device while:
- avoiding calling specific movers like .cpu() or .cuda()
- avoiding stuff like .to(torch.device("cuda:{some_variable}"))
"""
if isinstance(e, (list, tuple)):
return [to_device_recursive(elem, device) for elem in e]
elif isinstance(e, dict):
return {key: to_device_recursive(value, device) for key, value in e.items()}
elif isinstance(e, torch.Tensor):
return e.to(device)
else:
return e
def mask_sequence_tensor(tensor: torch.Tensor, lengths: torch.Tensor):
"""
For tensors containing sequences, zero out out-of-bound elements given lengths of every element in the batch.
tensor: tensor of shape (B, D, L) or (B, D1, D2, L),
lengths: LongTensor of shape (B,)
"""
batch_size, *_, max_lengths = tensor.shape
if len(tensor.shape) == 2:
mask = torch.ones(batch_size, max_lengths).cumsum(dim=-1).type_as(lengths)
mask = mask <= rearrange(lengths, "b -> b 1")
elif len(tensor.shape) == 3:
mask = torch.ones(batch_size, 1, max_lengths).cumsum(dim=-1).type_as(lengths)
mask = mask <= rearrange(lengths, "b -> b 1 1")
elif len(tensor.shape) == 4:
mask = torch.ones(batch_size, 1, 1, max_lengths).cumsum(dim=-1).type_as(lengths)
mask = mask <= rearrange(lengths, "b -> b 1 1 1")
else:
raise ValueError("Can only mask tensors of shape B x D x L and B x D1 x D2 x L")
return tensor * mask
@torch.jit.script
def batch_from_ragged(
text: torch.Tensor,
pitch: torch.Tensor,
pace: torch.Tensor,
batch_lengths: torch.Tensor,
padding_idx: int = -1,
volume: Optional[torch.Tensor] = None,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
batch_lengths = batch_lengths.to(dtype=torch.int64)
max_len = torch.max(batch_lengths[1:] - batch_lengths[:-1])
index = 1
num_batches = batch_lengths.shape[0] - 1
texts = torch.zeros(num_batches, max_len, dtype=torch.int64, device=text.device) + padding_idx
pitches = torch.ones(num_batches, max_len, dtype=torch.float32, device=text.device)
paces = torch.zeros(num_batches, max_len, dtype=torch.float32, device=text.device) + 1.0
volumes = torch.zeros(num_batches, max_len, dtype=torch.float32, device=text.device) + 1.0
lens = torch.zeros(num_batches, dtype=torch.int64, device=text.device)
last_index = index - 1
while index < batch_lengths.shape[0]:
seq_start = batch_lengths[last_index]
seq_end = batch_lengths[index]
cur_seq_len = seq_end - seq_start
lens[last_index] = cur_seq_len
texts[last_index, :cur_seq_len] = text[seq_start:seq_end]
pitches[last_index, :cur_seq_len] = pitch[seq_start:seq_end]
paces[last_index, :cur_seq_len] = pace[seq_start:seq_end]
if volume is not None:
volumes[last_index, :cur_seq_len] = volume[seq_start:seq_end]
last_index = index
index += 1
return texts, pitches, paces, volumes, lens
def sample_tts_input(
export_config, device, max_batch=1, max_dim=127,
):
"""
Generates input examples for tracing etc.
Returns:
A tuple of input examples.
"""
sz = (max_batch * max_dim,) if export_config["enable_ragged_batches"] else (max_batch, max_dim)
inp = torch.randint(*export_config["emb_range"], sz, device=device, dtype=torch.int64)
pitch = torch.randn(sz, device=device, dtype=torch.float32) * 0.5
pace = torch.clamp(torch.randn(sz, device=device, dtype=torch.float32) * 0.1 + 1.0, min=0.2)
inputs = {'text': inp, 'pitch': pitch, 'pace': pace}
if export_config["enable_ragged_batches"]:
batch_lengths = torch.zeros((max_batch + 1), device=device, dtype=torch.int32)
left_over_size = sz[0]
batch_lengths[0] = 0
for i in range(1, max_batch):
equal_len = (left_over_size - (max_batch - i)) // (max_batch - i)
length = torch.randint(equal_len // 2, equal_len, (1,), device=device, dtype=torch.int32)
batch_lengths[i] = length + batch_lengths[i - 1]
left_over_size -= length.detach().cpu().numpy()[0]
batch_lengths[-1] = left_over_size + batch_lengths[-2]
sum = 0
index = 1
while index < len(batch_lengths):
sum += batch_lengths[index] - batch_lengths[index - 1]
index += 1
assert sum == sz[0], f"sum: {sum}, sz: {sz[0]}, lengths:{batch_lengths}"
else:
batch_lengths = torch.randint(max_dim // 2, max_dim, (max_batch,), device=device, dtype=torch.int32)
batch_lengths[0] = max_dim
inputs['batch_lengths'] = batch_lengths
if export_config["enable_volume"]:
volume = torch.clamp(torch.randn(sz, device=device, dtype=torch.float32) * 0.1 + 1, min=0.01)
inputs['volume'] = volume
if "num_speakers" in export_config:
inputs['speaker'] = torch.randint(
0, export_config["num_speakers"], (max_batch,), device=device, dtype=torch.int64
)
return inputs
@deprecated(
explanation="But it will not be removed until a further notice. G2P object root directory "
"`nemo_text_processing.g2p` has been replaced with `nemo.collections.tts.g2p`. "
"Please use the latter instead as of NeMo 1.18.0."
)
def g2p_backward_compatible_support(g2p_target: str) -> str:
# for backward compatibility
g2p_target_new = g2p_target.replace("nemo_text_processing.g2p", "nemo.collections.tts.g2p")
return g2p_target_new
| NeMo-main | nemo/collections/tts/parts/utils/helpers.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABC, abstractmethod
from typing import Tuple
import librosa
import numpy as np
import torch
from nemo.collections.asr.models import EncDecClassificationModel
from nemo.collections.tts.parts.utils.tts_dataset_utils import normalize_volume
from nemo.utils import logging
class AudioTrimmer(ABC):
"""Interface for silence trimming implementations
"""
@abstractmethod
def trim_audio(self, audio: np.array, sample_rate: int, audio_id: str) -> Tuple[np.array, int, int]:
"""Trim starting and trailing silence from the input audio.
Args:
audio: Numpy array containing audio samples. Float [-1.0, 1.0] format.
sample_rate: Sample rate of input audio.
audio_id: String identifier (eg. file name) used for logging.
Returns numpy array with trimmed audio, and integer sample indices representing the start and end
of speech within the original audio array.
"""
raise NotImplementedError
class EnergyAudioTrimmer(AudioTrimmer):
def __init__(
self,
db_threshold: int = 50,
ref_amplitude: float = 1.0,
speech_frame_threshold: int = 1,
trim_win_length: int = 2048,
trim_hop_length: int = 512,
pad_seconds: float = 0.1,
volume_norm: bool = True,
) -> None:
"""Energy/power based silence trimming using Librosa backend.
Args:
db_threshold: Audio frames at least db_threshold decibels below ref_amplitude will be
considered silence.
ref_amplitude: Amplitude threshold for classifying speech versus silence.
speech_frame_threshold: Start and end of speech will be detected where there are at least
speech_frame_threshold consecutive audio frames classified as speech. Setting this value higher
is more robust to false-positives (silence detected as speech), but setting it too high may result
in very short speech segments being cut out from the audio.
trim_win_length: Length of audio frames to use when doing speech detection. This does not need to match
the win_length used any other part of the code or model.
trim_hop_length: Stride of audio frames to use when doing speech detection. This does not need to match
the hop_length used any other part of the code or model.
pad_seconds: Audio duration in seconds to keep before and after each speech segment.
Set this to at least 0.1 to avoid cutting off any speech audio, with larger values
being safer but increasing the average silence duration left afterwards.
volume_norm: Whether to normalize the volume of audio before doing speech detection.
"""
assert db_threshold >= 0
assert ref_amplitude >= 0
assert speech_frame_threshold > 0
assert trim_win_length > 0
assert trim_hop_length > 0
self.db_threshold = db_threshold
self.ref_amplitude = ref_amplitude
self.speech_frame_threshold = speech_frame_threshold
self.trim_win_length = trim_win_length
self.trim_hop_length = trim_hop_length
self.pad_seconds = pad_seconds
self.volume_norm = volume_norm
def trim_audio(self, audio: np.array, sample_rate: int, audio_id: str = "") -> Tuple[np.array, int, int]:
if self.volume_norm:
# Normalize volume so we have a fixed scale relative to the reference amplitude
audio = normalize_volume(audio=audio, volume_level=1.0)
speech_frames = librosa.effects._signal_to_frame_nonsilent(
audio,
ref=self.ref_amplitude,
frame_length=self.trim_win_length,
hop_length=self.trim_hop_length,
top_db=self.db_threshold,
)
start_frame, end_frame = get_start_and_end_of_speech_frames(
is_speech=speech_frames, speech_frame_threshold=self.speech_frame_threshold, audio_id=audio_id,
)
if not start_frame and not end_frame:
return np.array([]), 0, 0
start_sample = librosa.core.frames_to_samples(start_frame, hop_length=self.trim_hop_length)
end_sample = librosa.core.frames_to_samples(end_frame, hop_length=self.trim_hop_length)
start_sample, end_sample = pad_sample_indices(
start_sample=start_sample,
end_sample=end_sample,
max_sample=audio.shape[0],
sample_rate=sample_rate,
pad_seconds=self.pad_seconds,
)
trimmed_audio = audio[start_sample:end_sample]
return trimmed_audio, start_sample, end_sample
class VadAudioTrimmer(AudioTrimmer):
def __init__(
self,
model_name: str = "vad_multilingual_marblenet",
vad_sample_rate: int = 16000,
vad_threshold: float = 0.5,
device: str = "cpu",
speech_frame_threshold: int = 1,
trim_win_length: int = 4096,
trim_hop_length: int = 1024,
pad_seconds: float = 0.1,
volume_norm: bool = True,
) -> None:
"""Voice activity detection (VAD) based silence trimming.
Args:
model_name: NeMo VAD model to load. Valid configurations can be found with
EncDecClassificationModel.list_available_models()
vad_sample_rate: Sample rate used for pretrained VAD model.
vad_threshold: Softmax probability [0, 1] of VAD output, above which audio frames will be classified
as speech.
device: Device "cpu" or "cuda" to use for running the VAD model.
trim_win_length: Length of audio frames to use when doing speech detection. This does not need to match
the win_length used any other part of the code or model.
trim_hop_length: Stride of audio frames to use when doing speech detection. This does not need to match
the hop_length used any other part of the code or model.
pad_seconds: Audio duration in seconds to keep before and after each speech segment.
Set this to at least 0.1 to avoid cutting off any speech audio, with larger values
being safer but increasing the average silence duration left afterwards.
volume_norm: Whether to normalize the volume of audio before doing speech detection.
"""
assert vad_sample_rate > 0
assert vad_threshold >= 0
assert speech_frame_threshold > 0
assert trim_win_length > 0
assert trim_hop_length > 0
self.device = device
self.vad_model = EncDecClassificationModel.from_pretrained(model_name=model_name).eval().to(self.device)
self.vad_sample_rate = vad_sample_rate
self.vad_threshold = vad_threshold
self.speech_frame_threshold = speech_frame_threshold
self.trim_win_length = trim_win_length
self.trim_hop_length = trim_hop_length
# Window shift neeeded in order to center frames
self.trim_shift = self.trim_win_length // 2
self.pad_seconds = pad_seconds
self.volume_norm = volume_norm
def _detect_speech(self, audio: np.array) -> np.array:
if audio.shape[0] < self.trim_win_length:
return np.array([])
# [num_frames, win_length]
audio_frames = librosa.util.frame(
audio, frame_length=self.trim_win_length, hop_length=self.trim_hop_length
).transpose()
audio_frame_lengths = audio_frames.shape[0] * [self.trim_win_length]
# [num_frames, win_length]
audio_signal = torch.tensor(audio_frames, dtype=torch.float32, device=self.device)
# [1]
audio_signal_len = torch.tensor(audio_frame_lengths, dtype=torch.int32, device=self.device)
# VAD outputs 2 values for each audio frame with logits indicating the likelihood that
# each frame is non-speech or speech, respectively.
# [num_frames, 2]
log_probs = self.vad_model(input_signal=audio_signal, input_signal_length=audio_signal_len)
probs = torch.softmax(log_probs, dim=-1)
probs = probs.detach().cpu().numpy()
# [num_frames]
speech_probs = probs[:, 1]
speech_frames = speech_probs >= self.vad_threshold
return speech_frames
def _scale_sample_indices(self, start_sample: int, end_sample: int, sample_rate: int) -> Tuple[int, int]:
sample_rate_ratio = sample_rate / self.vad_sample_rate
start_sample = int(sample_rate_ratio * start_sample)
end_sample = int(sample_rate_ratio * end_sample)
return start_sample, end_sample
def trim_audio(self, audio: np.array, sample_rate: int, audio_id: str = "") -> Tuple[np.array, int, int]:
if sample_rate == self.vad_sample_rate:
vad_audio = audio
else:
# Resample audio to match sample rate of VAD model
vad_audio = librosa.resample(audio, orig_sr=sample_rate, target_sr=self.vad_sample_rate)
if self.volume_norm:
# Normalize volume so we have a fixed scale relative to the reference amplitude
vad_audio = normalize_volume(audio=vad_audio, volume_level=1.0)
speech_frames = self._detect_speech(audio=vad_audio)
start_frame, end_frame = get_start_and_end_of_speech_frames(
is_speech=speech_frames, speech_frame_threshold=self.speech_frame_threshold, audio_id=audio_id,
)
if not start_frame and not end_frame:
return np.array([]), 0, 0
if start_frame == 0:
start_sample = 0
else:
start_sample = librosa.core.frames_to_samples(start_frame, hop_length=self.trim_hop_length)
start_sample += self.trim_shift
# Avoid trimming off the end because VAD model is not trained to classify partial end frames.
if end_frame == speech_frames.shape[0]:
end_sample = vad_audio.shape[0]
else:
end_sample = librosa.core.frames_to_samples(end_frame, hop_length=self.trim_hop_length)
end_sample += self.trim_shift
if sample_rate != self.vad_sample_rate:
# Convert sample indices back to input sample rate
start_sample, end_sample = self._scale_sample_indices(
start_sample=start_sample, end_sample=end_sample, sample_rate=sample_rate
)
start_sample, end_sample = pad_sample_indices(
start_sample=start_sample,
end_sample=end_sample,
max_sample=audio.shape[0],
sample_rate=sample_rate,
pad_seconds=self.pad_seconds,
)
trimmed_audio = audio[start_sample:end_sample]
return trimmed_audio, start_sample, end_sample
def get_start_and_end_of_speech_frames(
is_speech: np.array, speech_frame_threshold: int, audio_id: str = ""
) -> Tuple[int, int]:
"""Finds the speech frames corresponding to the start and end of speech for an utterance.
Args:
is_speech: [num_frames] boolean array with true entries labeling speech frames.
speech_frame_threshold: The number of consecutive speech frames required to classify the speech boundaries.
audio_id: String identifier (eg. file name) used for logging.
Returns integers representing the frame indices of the start (inclusive) and end (exclusive) of speech.
"""
num_frames = is_speech.shape[0]
# Iterate forwards over the utterance until we find the first speech_frame_threshold consecutive speech frames.
start_frame = None
for i in range(0, num_frames - speech_frame_threshold + 1):
high_i = i + speech_frame_threshold
if all(is_speech[i:high_i]):
start_frame = i
break
# Iterate backwards over the utterance until we find the last speech_frame_threshold consecutive speech frames.
end_frame = None
for i in range(num_frames, speech_frame_threshold - 1, -1):
low_i = i - speech_frame_threshold
if all(is_speech[low_i:i]):
end_frame = i
break
if start_frame is None or end_frame is None:
# Algorithm is symmetric, so if the start is not found then the end should also not be found.
logging.warning(f"Could not find start or end of speech for '{audio_id}'")
return 0, 0
return start_frame, end_frame
def pad_sample_indices(
start_sample: int, end_sample: int, max_sample: int, sample_rate: int, pad_seconds: float
) -> Tuple[int, int]:
"""Shift the input sample indices by pad_seconds in front and back within [0, max_sample]
Args:
start_sample: Start sample index
end_sample: End sample index
max_sample: Maximum sample index
sample_rate: Sample rate of audio
pad_seconds: Amount to pad/shift the indices by.
Returns the sample indices after padding by the input amount.
"""
pad_samples = int(pad_seconds * sample_rate)
start_sample = start_sample - pad_samples
end_sample = end_sample + pad_samples
start_sample = max(0, start_sample)
end_sample = min(max_sample, end_sample)
return start_sample, end_sample
| NeMo-main | nemo/collections/tts/parts/preprocessing/audio_trimming.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Optional
import torch
from nemo.utils.decorators import experimental
@experimental
class FeatureProcessor(ABC):
@abstractmethod
def process(self, training_example: dict) -> None:
"""
Process the input training example dictionary, modifying necessary fields in place.
Args:
training_example: training example dictionary.
"""
class FeatureScaler(FeatureProcessor):
def __init__(self, field: str, add_value: float = 0.0, div_value: float = 1.0):
"""
Scales a field by constant factors. For example, for mean-variance normalization.
Specifically: input[field] = (input[field] + add_value) / div_value
Args:
field: Field to scale
add_value: Constant float value to add to feature.
div_value: Constant float value to divide feature by.
"""
self.field = field
self.add_value = add_value
self.div_value = div_value
def process(self, training_example: dict) -> None:
feature = training_example[self.field]
feature = (feature + self.add_value) / self.div_value
training_example[self.field] = feature
class LogCompression(FeatureProcessor):
def __init__(self, field: str, log_zero_guard_type: str = "add", log_zero_guard_value: float = 1.0):
"""
Apply log compression to a field.
By default: input[field] = log(1.0 + input[field])
For clamp mode: input[field] = log(max(log_zero_guard_value, input[field]))
Args:
field: Field to apply log compression to.
log_zero_guard_type: Method to avoid logarithm approaching -inf, either "add" or "clamp".
log_zero_guard_value: Value to add or clamp input with.
"""
self.field = field
if log_zero_guard_type == "add":
self.guard_fn = self._add_guard
elif log_zero_guard_type == "clamp":
self.guard_fn = self._clamp_guard
else:
raise ValueError(f"Unsupported log zero guard type: '{log_zero_guard_type}'")
self.guard_type = log_zero_guard_type
self.guard_value = log_zero_guard_value
def _add_guard(self, feature: torch.Tensor):
return feature + self.guard_value
def _clamp_guard(self, feature: torch.Tensor):
return torch.clamp(feature, min=self.guard_value)
def process(self, training_example: dict) -> None:
feature = training_example[self.field]
feature = self.guard_fn(feature)
feature = torch.log(feature)
training_example[self.field] = feature
class MeanVarianceNormalization(FeatureProcessor):
def __init__(self, field: str, stats_path: Path, mask_field: Optional[str] = "voiced_mask"):
"""
Apply mean and variance to the input field. Statistics are provided in JSON format, and can be
computed using scripts.dataset_processing.tts.compute_feature_stats.py
Specifically: input[field] = (input[field] + mean) / standard_deviation
Stats file format example for field 'pitch':
{
"default": {
"pitch_mean": 100.0,
"pitch_std": 50.0,
}
}
Args:
field: Field to apply normalization to.
stats_path: JSON file with feature mean and variance.
mask_field: Optional, field in example dictionary with boolean array indicating which values to
mask to 0. Defaults to 'voiced_mask', expected to be computed by pyin pitch estimator.
"""
self.field = field
self.mask_field = mask_field
with open(stats_path, 'r', encoding="utf-8") as stats_f:
stats_dict = json.load(stats_f)
self.mean = stats_dict["default"][f"{self.field}_mean"]
self.std = stats_dict["default"][f"{self.field}_std"]
def process(self, training_example: dict) -> None:
feature = training_example[self.field]
feature = (feature - self.mean) / self.std
if self.mask_field:
voiced_mask = training_example[self.mask_field]
feature[~voiced_mask] = 0.0
training_example[self.field] = feature
class MeanVarianceSpeakerNormalization(FeatureProcessor):
def __init__(
self,
field: str,
stats_path: Path,
speaker_field: str = "speaker",
mask_field: Optional[str] = "voiced_mask",
fallback_to_default: bool = False,
):
"""
Apply speaker level mean and variance to the input field. Statistics are provided in JSON format, and can be
computed using scripts.dataset_processing.tts.compute_feature_stats.py
Specifically: input[field] = (input[field] + speaker_mean) / speaker_standard_deviation
Stats file format example for field 'pitch':
{
"default": {
"pitch_mean": 100.0,
"pitch_std": 50.0,
},
"speaker1": {
"pitch_mean": 110.0,
"pitch_std": 45.0,
},
"speaker2": {
"pitch_mean": 105.0,
"pitch_std": 30.0,
},
...
}
Args:
field: Field to apply normalization to.
stats_path: JSON file with feature mean and variance.
speaker_field: field containing speaker ID string.
mask_field: Optional, field in example dictionary with boolean array indicating which values to
mask to 0. Defaults to 'voiced_mask', expected to be computed by pyin pitch estimator.
fallback_to_default: Whether to use 'default' feature statistics when speaker is not found in
the statistics dictionary.
"""
self.field = field
self.key_mean = f"{self.field}_mean"
self.key_std = f"{self.field}_std"
self.speaker_field = speaker_field
self.mask_field = mask_field
self.fallback_to_default = fallback_to_default
with open(stats_path, 'r', encoding="utf-8") as stats_f:
self.stats_dict = json.load(stats_f)
def process(self, training_example: dict) -> None:
feature = training_example[self.field]
speaker = training_example[self.speaker_field]
if speaker in self.stats_dict:
stats = self.stats_dict[speaker]
elif self.fallback_to_default:
stats = self.stats_dict["default"]
else:
raise ValueError(f"Statistics not found for speaker: {speaker}")
feature_mean = stats[self.key_mean]
feature_std = stats[self.key_std]
feature = (feature - feature_mean) / feature_std
if self.mask_field:
mask = training_example[self.mask_field]
feature[~mask] = 0.0
training_example[self.field] = feature
| NeMo-main | nemo/collections/tts/parts/preprocessing/feature_processors.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-main | nemo/collections/tts/parts/preprocessing/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Any, Dict, List, Optional, Tuple, Union
import librosa
import numpy as np
import torch
from torch import Tensor
from nemo.collections.asr.modules import AudioToMelSpectrogramPreprocessor
from nemo.collections.tts.parts.utils.tts_dataset_utils import get_audio_filepaths, stack_tensors
from nemo.utils.decorators import experimental
@experimental
class Featurizer(ABC):
def __init__(self, feature_names: List[str]) -> None:
self.feature_names = feature_names
@abstractmethod
def save(self, manifest_entry: Dict[str, Any], audio_dir: Path, feature_dir: Path) -> None:
"""
Save feature value to disk for given manifest entry.
Args:
manifest_entry: Manifest entry dictionary.
audio_dir: base directory where audio is stored.
feature_dir: base directory where features will be stored.
"""
@abstractmethod
def load(self, manifest_entry: Dict[str, Any], audio_dir: Path, feature_dir: Path) -> Dict[str, Tensor]:
"""
Read saved feature value for given manifest entry.
Args:
manifest_entry: Manifest entry dictionary.
audio_dir: base directory where audio is stored.
feature_dir: base directory where features were stored by save().
Returns:
Dictionary of feature names to Tensors
"""
@abstractmethod
def collate_fn(self, train_batch: List[Dict[str, Tensor]]) -> Dict[str, Tensor]:
"""
Combine list/batch of features into a feature dictionary.
"""
raise NotImplementedError
def _get_feature_filepath(
manifest_entry: Dict[str, Any], audio_dir: Path, feature_dir: Path, feature_name: str
) -> Path:
"""
Get the absolute path for the feature file corresponding to the input manifest entry
Example: audio_filepath "<audio_dir>/speaker1/audio1.wav" becomes
feature_filepath "<feature_dir>/<feature_name>/speaker1/audio1.pt"
"""
_, audio_filepath_rel = get_audio_filepaths(manifest_entry=manifest_entry, audio_dir=audio_dir)
feature_filepath = feature_dir / feature_name / audio_filepath_rel.with_suffix(".pt")
return feature_filepath
def _save_pt_feature(
feature_name: Optional[str],
feature_tensor: Tensor,
manifest_entry: Dict[str, Any],
audio_dir: Path,
feature_dir: Path,
) -> None:
"""
If feature_name is provided, save feature as .pt file.
"""
if feature_name is None:
return
feature_filepath = _get_feature_filepath(
manifest_entry=manifest_entry, audio_dir=audio_dir, feature_dir=feature_dir, feature_name=feature_name
)
feature_filepath.parent.mkdir(exist_ok=True, parents=True)
torch.save(feature_tensor, feature_filepath)
def _load_pt_feature(
feature_dict: Dict[str, Tensor],
feature_name: Optional[str],
manifest_entry: Dict[str, Any],
audio_dir: Path,
feature_dir: Path,
) -> None:
"""
If feature_name is provided, load feature into feature_dict from .pt file.
"""
if feature_name is None:
return
feature_filepath = _get_feature_filepath(
manifest_entry=manifest_entry, audio_dir=audio_dir, feature_dir=feature_dir, feature_name=feature_name
)
feature_tensor = torch.load(feature_filepath)
feature_dict[feature_name] = feature_tensor
def _collate_feature(
feature_dict: Dict[str, Tensor], feature_name: Optional[str], train_batch: List[Dict[str, Tensor]]
) -> None:
if feature_name is None:
return
feature_tensors = []
for example in train_batch:
feature_tensor = example[feature_name]
feature_tensors.append(feature_tensor)
max_len = max([f.shape[0] for f in feature_tensors])
stacked_features = stack_tensors(feature_tensors, max_lens=[max_len])
feature_dict[feature_name] = stacked_features
class MelSpectrogramFeaturizer:
def __init__(
self,
feature_name: str = "mel_spec",
sample_rate: int = 22050,
mel_dim: int = 80,
win_length: int = 1024,
hop_length: int = 256,
lowfreq: int = 0,
highfreq: int = 8000,
log: bool = True,
log_zero_guard_type: str = "add",
log_zero_guard_value: float = 1.0,
mel_norm: Optional[Union[str, int]] = None,
) -> None:
self.feature_name = feature_name
self.sample_rate = sample_rate
self.win_length = win_length
self.hop_length = hop_length
self.preprocessor = AudioToMelSpectrogramPreprocessor(
sample_rate=sample_rate,
features=mel_dim,
pad_to=1,
n_window_size=win_length,
n_window_stride=hop_length,
window_size=False,
window_stride=False,
n_fft=win_length,
lowfreq=lowfreq,
highfreq=highfreq,
mag_power=1.0,
log=log,
log_zero_guard_type=log_zero_guard_type,
log_zero_guard_value=log_zero_guard_value,
mel_norm=mel_norm,
normalize=None,
preemph=None,
dither=0.0,
)
def compute_mel_spec(self, manifest_entry: Dict[str, Any], audio_dir: Path) -> Tensor:
"""
Computes mel spectrogram for the input manifest entry.
Args:
manifest_entry: Manifest entry dictionary.
audio_dir: base directory where audio is store
Returns:
[spec_dim, T_spec] float tensor containing spectrogram features.
"""
audio_filepath, _ = get_audio_filepaths(manifest_entry=manifest_entry, audio_dir=audio_dir)
audio, _ = librosa.load(path=audio_filepath, sr=self.sample_rate)
# [1, T_audio]
audio_tensor = torch.tensor(audio[np.newaxis, :], dtype=torch.float32)
# [1]
audio_len_tensor = torch.tensor([audio.shape[0]], dtype=torch.int32)
# [1, spec_dim, T_spec]
spec_tensor, _ = self.preprocessor(input_signal=audio_tensor, length=audio_len_tensor)
# [spec_dim, T_spec]
spec_tensor = spec_tensor.detach()[0]
return spec_tensor
def save(self, manifest_entry: Dict[str, Any], audio_dir: Path, feature_dir: Path) -> None:
spec_tensor = self.compute_mel_spec(manifest_entry=manifest_entry, audio_dir=audio_dir)
_save_pt_feature(
feature_name=self.feature_name,
feature_tensor=spec_tensor,
manifest_entry=manifest_entry,
audio_dir=audio_dir,
feature_dir=feature_dir,
)
def load(self, manifest_entry: Dict[str, Any], audio_dir: Path, feature_dir: Path) -> Dict[str, Tensor]:
feature_dict = {}
_load_pt_feature(
feature_dict=feature_dict,
feature_name=self.feature_name,
manifest_entry=manifest_entry,
audio_dir=audio_dir,
feature_dir=feature_dir,
)
return feature_dict
def collate_fn(self, train_batch: List[Dict[str, Tensor]]) -> Dict[str, Tensor]:
feature_dict = {}
_collate_feature(feature_dict=feature_dict, feature_name=self.feature_name, train_batch=train_batch)
return feature_dict
class EnergyFeaturizer:
def __init__(self, spec_featurizer: MelSpectrogramFeaturizer, feature_name: str = "energy") -> None:
self.feature_name = feature_name
self.spec_featurizer = spec_featurizer
def compute_energy(self, manifest_entry: Dict[str, Any], audio_dir: Path) -> Tensor:
"""
Computes energy for the input manifest entry.
Args:
manifest_entry: Manifest entry dictionary.
audio_dir: base directory where audio is store
Returns:
[T_spec] float tensor containing energy features.
"""
# [1, T_audio]
spec = self.spec_featurizer.compute_mel_spec(manifest_entry=manifest_entry, audio_dir=audio_dir)
# [T_audio]
energy = torch.linalg.norm(spec, axis=0)
return energy
def save(self, manifest_entry: Dict[str, Any], audio_dir: Path, feature_dir: Path) -> None:
energy_tensor = self.compute_energy(manifest_entry=manifest_entry, audio_dir=audio_dir)
_save_pt_feature(
feature_name=self.feature_name,
feature_tensor=energy_tensor,
manifest_entry=manifest_entry,
audio_dir=audio_dir,
feature_dir=feature_dir,
)
def load(self, manifest_entry: Dict[str, Any], audio_dir: Path, feature_dir: Path) -> Dict[str, Tensor]:
feature_dict = {}
_load_pt_feature(
feature_dict=feature_dict,
feature_name=self.feature_name,
manifest_entry=manifest_entry,
audio_dir=audio_dir,
feature_dir=feature_dir,
)
return feature_dict
def collate_fn(self, train_batch: List[Dict[str, Tensor]]) -> Dict[str, Tensor]:
feature_dict = {}
_collate_feature(feature_dict=feature_dict, feature_name=self.feature_name, train_batch=train_batch)
return feature_dict
class PitchFeaturizer:
def __init__(
self,
pitch_name: Optional[str] = "pitch",
voiced_mask_name: Optional[str] = "voiced_mask",
voiced_prob_name: Optional[str] = None,
sample_rate: int = 22050,
win_length: int = 1024,
hop_length: int = 256,
pitch_fmin: int = librosa.note_to_hz('C2'),
pitch_fmax: int = librosa.note_to_hz('C7'),
) -> None:
self.pitch_name = pitch_name
self.voiced_mask_name = voiced_mask_name
self.voiced_prob_name = voiced_prob_name
self.sample_rate = sample_rate
self.win_length = win_length
self.hop_length = hop_length
self.pitch_fmin = pitch_fmin
self.pitch_fmax = pitch_fmax
def compute_pitch(self, manifest_entry: Dict[str, Any], audio_dir: Path) -> Tuple[Tensor, Tensor, Tensor]:
"""
Computes pitch and optional voiced mask for the input manifest entry.
Args:
manifest_entry: Manifest entry dictionary.
audio_dir: base directory where audio is store
Returns:
pitch: [T_spec] float tensor containing pitch for each audio frame.
voiced_mask: [T_spec] bool tensor indicating whether each audio frame is voiced.
voiced_prob: [T_spec] float array with [0, 1] probability that each audio frame is voiced.
"""
audio_filepath, _ = get_audio_filepaths(manifest_entry=manifest_entry, audio_dir=audio_dir)
audio, _ = librosa.load(path=audio_filepath, sr=self.sample_rate)
pitch, voiced_mask, voiced_prob = librosa.pyin(
audio,
fmin=self.pitch_fmin,
fmax=self.pitch_fmax,
frame_length=self.win_length,
hop_length=self.hop_length,
sr=self.sample_rate,
fill_na=0.0,
)
pitch_tensor = torch.tensor(pitch, dtype=torch.float32)
voiced_mask_tensor = torch.tensor(voiced_mask, dtype=torch.bool)
voiced_prob_tensor = torch.tensor(voiced_prob, dtype=torch.float32)
return pitch_tensor, voiced_mask_tensor, voiced_prob_tensor
def save(self, manifest_entry: Dict[str, Any], audio_dir: Path, feature_dir: Path) -> None:
pitch_tensor, voiced_mask_tensor, voiced_prob_tensor = self.compute_pitch(
manifest_entry=manifest_entry, audio_dir=audio_dir
)
_save_pt_feature(
feature_name=self.pitch_name,
feature_tensor=pitch_tensor,
manifest_entry=manifest_entry,
audio_dir=audio_dir,
feature_dir=feature_dir,
)
_save_pt_feature(
feature_name=self.voiced_mask_name,
feature_tensor=voiced_mask_tensor,
manifest_entry=manifest_entry,
audio_dir=audio_dir,
feature_dir=feature_dir,
)
_save_pt_feature(
feature_name=self.voiced_prob_name,
feature_tensor=voiced_prob_tensor,
manifest_entry=manifest_entry,
audio_dir=audio_dir,
feature_dir=feature_dir,
)
def load(self, manifest_entry: Dict[str, Any], audio_dir: Path, feature_dir: Path) -> Dict[str, Tensor]:
feature_dict = {}
_load_pt_feature(
feature_dict=feature_dict,
feature_name=self.pitch_name,
manifest_entry=manifest_entry,
audio_dir=audio_dir,
feature_dir=feature_dir,
)
_load_pt_feature(
feature_dict=feature_dict,
feature_name=self.voiced_mask_name,
manifest_entry=manifest_entry,
audio_dir=audio_dir,
feature_dir=feature_dir,
)
_load_pt_feature(
feature_dict=feature_dict,
feature_name=self.voiced_prob_name,
manifest_entry=manifest_entry,
audio_dir=audio_dir,
feature_dir=feature_dir,
)
return feature_dict
def collate_fn(self, train_batch: List[Dict[str, Tensor]]) -> Dict[str, Tensor]:
feature_dict = {}
_collate_feature(feature_dict=feature_dict, feature_name=self.pitch_name, train_batch=train_batch)
_collate_feature(feature_dict=feature_dict, feature_name=self.voiced_mask_name, train_batch=train_batch)
_collate_feature(feature_dict=feature_dict, feature_name=self.voiced_prob_name, train_batch=train_batch)
return feature_dict
| NeMo-main | nemo/collections/tts/parts/preprocessing/features.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# BSD 3-Clause License
#
# Copyright (c) 2021, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import torch
from nemo.collections.tts.modules.submodules import ConditionalInput, ConditionalLayerNorm
from nemo.collections.tts.parts.utils.helpers import binarize_attention_parallel, regulate_len
from nemo.core.classes import NeuralModule, adapter_mixins, typecheck
from nemo.core.neural_types.elements import (
EncodedRepresentation,
Index,
LengthsType,
LogprobsType,
MelSpectrogramType,
ProbsType,
RegressionValuesType,
TokenDurationType,
TokenIndex,
TokenLogDurationType,
)
from nemo.core.neural_types.neural_type import NeuralType
def average_features(pitch, durs):
durs_cums_ends = torch.cumsum(durs, dim=1).long()
durs_cums_starts = torch.nn.functional.pad(durs_cums_ends[:, :-1], (1, 0))
pitch_nonzero_cums = torch.nn.functional.pad(torch.cumsum(pitch != 0.0, dim=2), (1, 0))
pitch_cums = torch.nn.functional.pad(torch.cumsum(pitch, dim=2), (1, 0))
bs, l = durs_cums_ends.size()
n_formants = pitch.size(1)
dcs = durs_cums_starts[:, None, :].expand(bs, n_formants, l)
dce = durs_cums_ends[:, None, :].expand(bs, n_formants, l)
pitch_sums = (torch.gather(pitch_cums, 2, dce) - torch.gather(pitch_cums, 2, dcs)).float()
pitch_nelems = (torch.gather(pitch_nonzero_cums, 2, dce) - torch.gather(pitch_nonzero_cums, 2, dcs)).float()
pitch_avg = torch.where(pitch_nelems == 0.0, pitch_nelems, pitch_sums / pitch_nelems)
return pitch_avg
def log_to_duration(log_dur, min_dur, max_dur, mask):
dur = torch.clamp(torch.exp(log_dur) - 1.0, min_dur, max_dur)
dur *= mask.squeeze(2)
return dur
class ConvReLUNorm(torch.nn.Module, adapter_mixins.AdapterModuleMixin):
def __init__(self, in_channels, out_channels, kernel_size=1, dropout=0.0, condition_dim=384, condition_types=[]):
super(ConvReLUNorm, self).__init__()
self.conv = torch.nn.Conv1d(in_channels, out_channels, kernel_size=kernel_size, padding=(kernel_size // 2))
self.norm = ConditionalLayerNorm(out_channels, condition_dim=condition_dim, condition_types=condition_types)
self.dropout = torch.nn.Dropout(dropout)
def forward(self, signal, conditioning=None):
out = torch.nn.functional.relu(self.conv(signal))
out = self.norm(out.transpose(1, 2), conditioning).transpose(1, 2)
out = self.dropout(out)
if self.is_adapter_available():
out = self.forward_enabled_adapters(out.transpose(1, 2)).transpose(1, 2)
return out
class TemporalPredictor(NeuralModule):
"""Predicts a single float per each temporal location"""
def __init__(self, input_size, filter_size, kernel_size, dropout, n_layers=2, condition_types=[]):
super(TemporalPredictor, self).__init__()
self.cond_input = ConditionalInput(input_size, input_size, condition_types)
self.layers = torch.nn.ModuleList()
for i in range(n_layers):
self.layers.append(
ConvReLUNorm(
input_size if i == 0 else filter_size,
filter_size,
kernel_size=kernel_size,
dropout=dropout,
condition_dim=input_size,
condition_types=condition_types,
)
)
self.fc = torch.nn.Linear(filter_size, 1, bias=True)
# Use for adapter input dimension
self.filter_size = filter_size
@property
def input_types(self):
return {
"enc": NeuralType(('B', 'T', 'D'), EncodedRepresentation()),
"enc_mask": NeuralType(('B', 'T', 1), TokenDurationType()),
"conditioning": NeuralType(('B', 'T', 'D'), EncodedRepresentation(), optional=True),
}
@property
def output_types(self):
return {
"out": NeuralType(('B', 'T'), EncodedRepresentation()),
}
def forward(self, enc, enc_mask, conditioning=None):
enc = self.cond_input(enc, conditioning)
out = enc * enc_mask
out = out.transpose(1, 2)
for layer in self.layers:
out = layer(out, conditioning=conditioning)
out = out.transpose(1, 2)
out = self.fc(out) * enc_mask
return out.squeeze(-1)
class FastPitchModule(NeuralModule, adapter_mixins.AdapterModuleMixin):
def __init__(
self,
encoder_module: NeuralModule,
decoder_module: NeuralModule,
duration_predictor: NeuralModule,
pitch_predictor: NeuralModule,
energy_predictor: NeuralModule,
aligner: NeuralModule,
speaker_encoder: NeuralModule,
n_speakers: int,
symbols_embedding_dim: int,
pitch_embedding_kernel_size: int,
energy_embedding_kernel_size: int,
n_mel_channels: int = 80,
min_token_duration: int = 0,
max_token_duration: int = 75,
use_log_energy: bool = True,
):
super().__init__()
self.encoder = encoder_module
self.decoder = decoder_module
self.duration_predictor = duration_predictor
self.pitch_predictor = pitch_predictor
self.energy_predictor = energy_predictor
self.aligner = aligner
self.speaker_encoder = speaker_encoder
self.learn_alignment = aligner is not None
self.use_duration_predictor = True
self.binarize = False
self.use_log_energy = use_log_energy
# TODO: combine self.speaker_emb with self.speaker_encoder
# cfg: remove `n_speakers`, create `speaker_encoder.lookup_module`
# state_dict: move `speaker_emb.weight` to `speaker_encoder.lookup_module.table.weight`
if n_speakers > 1 and speaker_encoder is None:
self.speaker_emb = torch.nn.Embedding(n_speakers, symbols_embedding_dim)
else:
self.speaker_emb = None
self.min_token_duration = min_token_duration
self.max_token_duration = max_token_duration
self.pitch_emb = torch.nn.Conv1d(
1,
symbols_embedding_dim,
kernel_size=pitch_embedding_kernel_size,
padding=int((pitch_embedding_kernel_size - 1) / 2),
)
if self.energy_predictor is not None:
self.energy_emb = torch.nn.Conv1d(
1,
symbols_embedding_dim,
kernel_size=energy_embedding_kernel_size,
padding=int((energy_embedding_kernel_size - 1) / 2),
)
# Store values precomputed from training data for convenience
self.register_buffer('pitch_mean', torch.zeros(1))
self.register_buffer('pitch_std', torch.zeros(1))
self.proj = torch.nn.Linear(self.decoder.d_model, n_mel_channels, bias=True)
@property
def input_types(self):
return {
"text": NeuralType(('B', 'T_text'), TokenIndex()),
"durs": NeuralType(('B', 'T_text'), TokenDurationType()),
"pitch": NeuralType(('B', 'T_audio'), RegressionValuesType()),
"energy": NeuralType(('B', 'T_audio'), RegressionValuesType(), optional=True),
"speaker": NeuralType(('B'), Index(), optional=True),
"pace": NeuralType(optional=True),
"spec": NeuralType(('B', 'D', 'T_spec'), MelSpectrogramType(), optional=True),
"attn_prior": NeuralType(('B', 'T_spec', 'T_text'), ProbsType(), optional=True),
"mel_lens": NeuralType(('B'), LengthsType(), optional=True),
"input_lens": NeuralType(('B'), LengthsType(), optional=True),
"reference_spec": NeuralType(('B', 'D', 'T_spec'), MelSpectrogramType(), optional=True),
"reference_spec_lens": NeuralType(('B'), LengthsType(), optional=True),
}
@property
def output_types(self):
return {
"spect": NeuralType(('B', 'D', 'T_spec'), MelSpectrogramType()),
"num_frames": NeuralType(('B'), TokenDurationType()),
"durs_predicted": NeuralType(('B', 'T_text'), TokenDurationType()),
"log_durs_predicted": NeuralType(('B', 'T_text'), TokenLogDurationType()),
"pitch_predicted": NeuralType(('B', 'T_text'), RegressionValuesType()),
"attn_soft": NeuralType(('B', 'S', 'T_spec', 'T_text'), ProbsType()),
"attn_logprob": NeuralType(('B', 'S', 'T_spec', 'T_text'), LogprobsType()),
"attn_hard": NeuralType(('B', 'S', 'T_spec', 'T_text'), ProbsType()),
"attn_hard_dur": NeuralType(('B', 'T_text'), TokenDurationType()),
"pitch": NeuralType(('B', 'T_audio'), RegressionValuesType()),
"energy_pred": NeuralType(('B', 'T_text'), RegressionValuesType()),
"energy_tgt": NeuralType(('B', 'T_audio'), RegressionValuesType()),
}
def get_speaker_embedding(self, batch_size, speaker, reference_spec, reference_spec_lens):
"""spk_emb: Bx1xD"""
if self.speaker_encoder is not None:
spk_emb = self.speaker_encoder(batch_size, speaker, reference_spec, reference_spec_lens).unsqueeze(1)
elif self.speaker_emb is not None:
if speaker is None:
raise ValueError('Please give speaker id to get lookup speaker embedding.')
spk_emb = self.speaker_emb(speaker).unsqueeze(1)
else:
spk_emb = None
return spk_emb
@typecheck()
def forward(
self,
*,
text,
durs=None,
pitch=None,
energy=None,
speaker=None,
pace=1.0,
spec=None,
attn_prior=None,
mel_lens=None,
input_lens=None,
reference_spec=None,
reference_spec_lens=None,
):
if not self.learn_alignment and self.training:
assert durs is not None
assert pitch is not None
# Calculate speaker embedding
spk_emb = self.get_speaker_embedding(
batch_size=text.shape[0],
speaker=speaker,
reference_spec=reference_spec,
reference_spec_lens=reference_spec_lens,
)
# Input FFT
enc_out, enc_mask = self.encoder(input=text, conditioning=spk_emb)
# Predict duration
log_durs_predicted = self.duration_predictor(enc_out, enc_mask, conditioning=spk_emb)
durs_predicted = log_to_duration(
log_dur=log_durs_predicted, min_dur=self.min_token_duration, max_dur=self.max_token_duration, mask=enc_mask
)
attn_soft, attn_hard, attn_hard_dur, attn_logprob = None, None, None, None
if self.learn_alignment and spec is not None:
text_emb = self.encoder.word_emb(text)
attn_soft, attn_logprob = self.aligner(
spec, text_emb.permute(0, 2, 1), enc_mask == 0, attn_prior, conditioning=spk_emb
)
attn_hard = binarize_attention_parallel(attn_soft, input_lens, mel_lens)
attn_hard_dur = attn_hard.sum(2)[:, 0, :]
# Predict pitch
pitch_predicted = self.pitch_predictor(enc_out, enc_mask, conditioning=spk_emb)
if pitch is not None:
if self.learn_alignment and pitch.shape[-1] != pitch_predicted.shape[-1]:
# Pitch during training is per spectrogram frame, but during inference, it should be per character
pitch = average_features(pitch.unsqueeze(1), attn_hard_dur).squeeze(1)
elif not self.learn_alignment:
# If alignment is not learnt attn_hard_dur is None, hence durs_predicted
pitch = average_features(pitch.unsqueeze(1), durs_predicted).squeeze(1)
pitch_emb = self.pitch_emb(pitch.unsqueeze(1))
else:
pitch_emb = self.pitch_emb(pitch_predicted.unsqueeze(1))
enc_out = enc_out + pitch_emb.transpose(1, 2)
# Predict energy
if self.energy_predictor is not None:
energy_pred = self.energy_predictor(enc_out, enc_mask, conditioning=spk_emb).squeeze(-1)
if energy is not None:
# Average energy over characters
if self.learn_alignment:
energy_tgt = average_features(energy.unsqueeze(1), attn_hard_dur)
else:
energy_tgt = average_features(energy.unsqueeze(1), durs_predicted)
if self.use_log_energy:
energy_tgt = torch.log(1.0 + energy_tgt)
energy_emb = self.energy_emb(energy_tgt)
energy_tgt = energy_tgt.squeeze(1)
else:
energy_emb = self.energy_emb(energy_pred.unsqueeze(1))
energy_tgt = None
enc_out = enc_out + energy_emb.transpose(1, 2)
else:
energy_pred = None
energy_tgt = None
if self.learn_alignment and spec is not None:
len_regulated, dec_lens = regulate_len(attn_hard_dur, enc_out, pace)
elif spec is None and durs is not None:
len_regulated, dec_lens = regulate_len(durs, enc_out, pace)
# Use predictions during inference
elif spec is None:
len_regulated, dec_lens = regulate_len(durs_predicted, enc_out, pace)
else:
raise ValueError(
f"Something unexpected happened when 'spec' is not None and 'self.learn_alignment' is False."
)
# Output FFT
dec_out, _ = self.decoder(input=len_regulated, seq_lens=dec_lens, conditioning=spk_emb)
spect = self.proj(dec_out).transpose(1, 2)
return (
spect,
dec_lens,
durs_predicted,
log_durs_predicted,
pitch_predicted,
attn_soft,
attn_logprob,
attn_hard,
attn_hard_dur,
pitch,
energy_pred,
energy_tgt,
)
def infer(
self,
*,
text,
pitch=None,
speaker=None,
energy=None,
pace=1.0,
volume=None,
reference_spec=None,
reference_spec_lens=None,
):
# Calculate speaker embedding
spk_emb = self.get_speaker_embedding(
batch_size=text.shape[0],
speaker=speaker,
reference_spec=reference_spec,
reference_spec_lens=reference_spec_lens,
)
# Input FFT
enc_out, enc_mask = self.encoder(input=text, conditioning=spk_emb)
# Predict duration and pitch
log_durs_predicted = self.duration_predictor(enc_out, enc_mask, conditioning=spk_emb)
durs_predicted = log_to_duration(
log_dur=log_durs_predicted, min_dur=self.min_token_duration, max_dur=self.max_token_duration, mask=enc_mask
)
pitch_predicted = self.pitch_predictor(enc_out, enc_mask, conditioning=spk_emb) + pitch
pitch_emb = self.pitch_emb(pitch_predicted.unsqueeze(1))
enc_out = enc_out + pitch_emb.transpose(1, 2)
if self.energy_predictor is not None:
if energy is not None:
assert energy.shape[-1] == text.shape[-1], f"energy.shape[-1]: {energy.shape[-1]} != len(text)"
energy_emb = self.energy_emb(energy)
else:
energy_pred = self.energy_predictor(enc_out, enc_mask, conditioning=spk_emb).squeeze(-1)
energy_emb = self.energy_emb(energy_pred.unsqueeze(1))
enc_out = enc_out + energy_emb.transpose(1, 2)
# Expand to decoder time dimension
len_regulated, dec_lens = regulate_len(durs_predicted, enc_out, pace)
volume_extended = None
if volume is not None:
volume_extended, _ = regulate_len(durs_predicted, volume.unsqueeze(-1), pace)
volume_extended = volume_extended.squeeze(-1).float()
# Output FFT
dec_out, _ = self.decoder(input=len_regulated, seq_lens=dec_lens, conditioning=spk_emb)
spect = self.proj(dec_out).transpose(1, 2)
return (
spect.to(torch.float),
dec_lens,
durs_predicted,
log_durs_predicted,
pitch_predicted,
volume_extended,
)
class FastPitchSSLModule(NeuralModule):
def __init__(
self,
encoder_module: NeuralModule,
decoder_module: NeuralModule,
duration_predictor: NeuralModule,
pitch_predictor: NeuralModule,
symbols_embedding_dim: int,
pitch_embedding_kernel_size: int,
n_mel_channels: int = 80,
min_token_duration: int = 0,
max_token_duration: int = 75,
):
super().__init__()
self.encoder = encoder_module
self.decoder = decoder_module
self.duration_predictor = duration_predictor
self.pitch_predictor = pitch_predictor
self.min_token_duration = min_token_duration
self.max_token_duration = max_token_duration
if self.pitch_predictor is not None:
self.pitch_emb = torch.nn.Conv1d(
1,
symbols_embedding_dim,
kernel_size=pitch_embedding_kernel_size,
padding=int((pitch_embedding_kernel_size - 1) / 2),
)
# Store values precomputed from training data for convenience
self.register_buffer('pitch_mean', torch.zeros(1))
self.register_buffer('pitch_std', torch.zeros(1))
self.proj = torch.nn.Linear(self.decoder.d_model, n_mel_channels, bias=True)
@property
def input_types(self):
return {
"enc_out": NeuralType(('B', 'T', 'D'), EncodedRepresentation()),
"enc_mask": NeuralType(('B', 'T', 'D'), EncodedRepresentation()),
"durs": NeuralType(('B', 'T_text'), TokenDurationType(), optional=True),
"pitch": NeuralType(('B', 'T_audio'), RegressionValuesType(), optional=True),
"pace": NeuralType(optional=True),
}
@property
def output_types(self):
return {
"spect": NeuralType(('B', 'D', 'T_spec'), MelSpectrogramType()),
"num_frames": NeuralType(('B'), TokenDurationType()),
"durs_predicted": NeuralType(('B', 'T_text'), TokenDurationType()),
"log_durs_predicted": NeuralType(('B', 'T_text'), TokenLogDurationType()),
"pitch_predicted": NeuralType(('B', 'T_text'), RegressionValuesType()),
"pitch": NeuralType(('B', 'T_audio'), RegressionValuesType()),
}
@typecheck()
def forward(self, *, enc_out=None, enc_mask=None, durs=None, pitch=None, pace=1.0):
log_durs_predicted, durs_predicted = None, None
if self.duration_predictor is not None:
log_durs_predicted = self.duration_predictor(enc_out, enc_mask)
durs_predicted = log_to_duration(
log_dur=log_durs_predicted,
min_dur=self.min_token_duration,
max_dur=self.max_token_duration,
mask=enc_mask,
)
# Predict pitch
pitch_predicted = None
if self.pitch_predictor is not None:
pitch_predicted = self.pitch_predictor(enc_out, enc_mask)
if pitch is not None:
if pitch.shape[-1] != enc_out.shape[1]:
# during inference, we send the averaged pitch over each token so we don't need to average here
# TODO: have a flag to indicate whether the pitch is already averaged or not
pitch = average_features(pitch.unsqueeze(1), durs).squeeze(1)
pitch_emb = self.pitch_emb(pitch.unsqueeze(1))
else:
pitch_emb = self.pitch_emb(pitch_predicted.unsqueeze(1))
enc_out = enc_out + pitch_emb.transpose(1, 2)
if durs is not None:
len_regulated, dec_lens = regulate_len(durs, enc_out, pace)
else:
# Use predictions during inference
assert self.duration_predictor is not None, "Duration predictor cannot be none if durs is not provided"
len_regulated, dec_lens = regulate_len(durs_predicted, enc_out, pace)
# Output FFT
dec_out, _ = self.decoder(input=len_regulated, seq_lens=dec_lens)
spect = self.proj(dec_out).transpose(1, 2)
return (
spect,
dec_lens,
durs_predicted,
log_durs_predicted,
pitch_predicted,
pitch,
)
| NeMo-main | nemo/collections/tts/modules/fastpitch.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from einops import rearrange
from torch import nn
from nemo.collections.tts.modules.submodules import ConditionalInput, ConvNorm
from nemo.collections.tts.parts.utils.helpers import binarize_attention_parallel
class AlignmentEncoder(torch.nn.Module):
"""
Module for alignment text and mel spectrogram.
Args:
n_mel_channels: Dimension of mel spectrogram.
n_text_channels: Dimension of text embeddings.
n_att_channels: Dimension of model
temperature: Temperature to scale distance by.
Suggested to be 0.0005 when using dist_type "l2" and 15.0 when using "cosine".
condition_types: List of types for nemo.collections.tts.modules.submodules.ConditionalInput.
dist_type: Distance type to use for similarity measurement. Supports "l2" and "cosine" distance.
"""
def __init__(
self,
n_mel_channels=80,
n_text_channels=512,
n_att_channels=80,
temperature=0.0005,
condition_types=[],
dist_type="l2",
):
super().__init__()
self.temperature = temperature
self.cond_input = ConditionalInput(n_text_channels, n_text_channels, condition_types)
self.softmax = torch.nn.Softmax(dim=3)
self.log_softmax = torch.nn.LogSoftmax(dim=3)
self.key_proj = nn.Sequential(
ConvNorm(n_text_channels, n_text_channels * 2, kernel_size=3, bias=True, w_init_gain='relu'),
torch.nn.ReLU(),
ConvNorm(n_text_channels * 2, n_att_channels, kernel_size=1, bias=True),
)
self.query_proj = nn.Sequential(
ConvNorm(n_mel_channels, n_mel_channels * 2, kernel_size=3, bias=True, w_init_gain='relu'),
torch.nn.ReLU(),
ConvNorm(n_mel_channels * 2, n_mel_channels, kernel_size=1, bias=True),
torch.nn.ReLU(),
ConvNorm(n_mel_channels, n_att_channels, kernel_size=1, bias=True),
)
if dist_type == "l2":
self.dist_fn = self.get_euclidean_dist
elif dist_type == "cosine":
self.dist_fn = self.get_cosine_dist
else:
raise ValueError(f"Unknown distance type '{dist_type}'")
@staticmethod
def _apply_mask(inputs, mask, mask_value):
if mask is None:
return
mask = rearrange(mask, "B T2 1 -> B 1 1 T2")
inputs.data.masked_fill_(mask, mask_value)
def get_dist(self, keys, queries, mask=None):
"""Calculation of distance matrix.
Args:
queries (torch.tensor): B x C1 x T1 tensor (probably going to be mel data).
keys (torch.tensor): B x C2 x T2 tensor (text data).
mask (torch.tensor): B x T2 x 1 tensor, binary mask for variable length entries and also can be used
for ignoring unnecessary elements from keys in the resulting distance matrix (True = mask element, False = leave unchanged).
Output:
dist (torch.tensor): B x T1 x T2 tensor.
"""
# B x C x T1
queries_enc = self.query_proj(queries)
# B x C x T2
keys_enc = self.key_proj(keys)
# B x 1 x T1 x T2
dist = self.dist_fn(queries_enc=queries_enc, keys_enc=keys_enc)
self._apply_mask(dist, mask, float("inf"))
return dist
@staticmethod
def get_euclidean_dist(queries_enc, keys_enc):
queries_enc = rearrange(queries_enc, "B C T1 -> B C T1 1")
keys_enc = rearrange(keys_enc, "B C T2 -> B C 1 T2")
# B x C x T1 x T2
distance = (queries_enc - keys_enc) ** 2
# B x 1 x T1 x T2
l2_dist = distance.sum(axis=1, keepdim=True)
return l2_dist
@staticmethod
def get_cosine_dist(queries_enc, keys_enc):
queries_enc = rearrange(queries_enc, "B C T1 -> B C T1 1")
keys_enc = rearrange(keys_enc, "B C T2 -> B C 1 T2")
cosine_dist = -torch.nn.functional.cosine_similarity(queries_enc, keys_enc, dim=1)
cosine_dist = rearrange(cosine_dist, "B T1 T2 -> B 1 T1 T2")
return cosine_dist
@staticmethod
def get_durations(attn_soft, text_len, spect_len):
"""Calculation of durations.
Args:
attn_soft (torch.tensor): B x 1 x T1 x T2 tensor.
text_len (torch.tensor): B tensor, lengths of text.
spect_len (torch.tensor): B tensor, lengths of mel spectrogram.
"""
attn_hard = binarize_attention_parallel(attn_soft, text_len, spect_len)
durations = attn_hard.sum(2)[:, 0, :]
assert torch.all(torch.eq(durations.sum(dim=1), spect_len))
return durations
@staticmethod
def get_mean_dist_by_durations(dist, durations, mask=None):
"""Select elements from the distance matrix for the given durations and mask and return mean distance.
Args:
dist (torch.tensor): B x T1 x T2 tensor.
durations (torch.tensor): B x T2 tensor. Dim T2 should sum to T1.
mask (torch.tensor): B x T2 x 1 binary mask for variable length entries and also can be used
for ignoring unnecessary elements in dist by T2 dim (True = mask element, False = leave unchanged).
Output:
mean_dist (torch.tensor): B x 1 tensor.
"""
batch_size, t1_size, t2_size = dist.size()
assert torch.all(torch.eq(durations.sum(dim=1), t1_size))
AlignmentEncoder._apply_mask(dist, mask, 0)
# TODO(oktai15): make it more efficient
mean_dist_by_durations = []
for dist_idx in range(batch_size):
mean_dist_by_durations.append(
torch.mean(
dist[
dist_idx,
torch.arange(t1_size),
torch.repeat_interleave(torch.arange(t2_size), repeats=durations[dist_idx]),
]
)
)
return torch.tensor(mean_dist_by_durations, dtype=dist.dtype, device=dist.device)
@staticmethod
def get_mean_distance_for_word(l2_dists, durs, start_token, num_tokens):
"""Calculates the mean distance between text and audio embeddings given a range of text tokens.
Args:
l2_dists (torch.tensor): L2 distance matrix from Aligner inference. T1 x T2 tensor.
durs (torch.tensor): List of durations corresponding to each text token. T2 tensor. Should sum to T1.
start_token (int): Index of the starting token for the word of interest.
num_tokens (int): Length (in tokens) of the word of interest.
Output:
mean_dist_for_word (float): Mean embedding distance between the word indicated and its predicted audio frames.
"""
# Need to calculate which audio frame we start on by summing all durations up to the start token's duration
start_frame = torch.sum(durs[:start_token]).data
total_frames = 0
dist_sum = 0
# Loop through each text token
for token_ind in range(start_token, start_token + num_tokens):
# Loop through each frame for the given text token
for frame_ind in range(start_frame, start_frame + durs[token_ind]):
# Recall that the L2 distance matrix is shape [spec_len, text_len]
dist_sum += l2_dists[frame_ind, token_ind]
# Update total frames so far & the starting frame for the next token
total_frames += durs[token_ind]
start_frame += durs[token_ind]
return dist_sum / total_frames
def forward(self, queries, keys, mask=None, attn_prior=None, conditioning=None):
"""Forward pass of the aligner encoder.
Args:
queries (torch.tensor): B x C1 x T1 tensor (probably going to be mel data).
keys (torch.tensor): B x C2 x T2 tensor (text data).
mask (torch.tensor): B x T2 x 1 tensor, binary mask for variable length entries (True = mask element, False = leave unchanged).
attn_prior (torch.tensor): prior for attention matrix.
conditioning (torch.tensor): B x 1 x C2 conditioning embedding
Output:
attn (torch.tensor): B x 1 x T1 x T2 attention mask. Final dim T2 should sum to 1.
attn_logprob (torch.tensor): B x 1 x T1 x T2 log-prob attention mask.
"""
keys = self.cond_input(keys.transpose(1, 2), conditioning).transpose(1, 2)
# B x C x T1
queries_enc = self.query_proj(queries)
# B x C x T2
keys_enc = self.key_proj(keys)
# B x 1 x T1 x T2
distance = self.dist_fn(queries_enc=queries_enc, keys_enc=keys_enc)
attn = -self.temperature * distance
if attn_prior is not None:
attn = self.log_softmax(attn) + torch.log(attn_prior[:, None] + 1e-8)
attn_logprob = attn.clone()
self._apply_mask(attn, mask, -float("inf"))
attn = self.softmax(attn) # softmax along T2
return attn, attn_logprob
| NeMo-main | nemo/collections/tts/modules/aligner.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Tuple
import torch
from torch import Tensor
from torch.autograd import Variable
from torch.nn import functional as F
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
from nemo.core.classes import NeuralModule, adapter_mixins
from nemo.core.neural_types.elements import EncodedRepresentation, Index, LengthsType, MelSpectrogramType
from nemo.core.neural_types.neural_type import NeuralType
from nemo.utils import logging
SUPPORTED_CONDITION_TYPES = ["add", "concat", "layernorm"]
def check_support_condition_types(condition_types):
for tp in condition_types:
if tp not in SUPPORTED_CONDITION_TYPES:
raise ValueError(f"Unknown conditioning type {tp}")
def masked_instance_norm(
input: Tensor, mask: Tensor, weight: Tensor, bias: Tensor, momentum: float, eps: float = 1e-5,
) -> Tensor:
r"""Applies Masked Instance Normalization for each channel in each data sample in a batch.
See :class:`~MaskedInstanceNorm1d` for details.
"""
lengths = mask.sum((-1,))
mean = (input * mask).sum((-1,)) / lengths # (N, C)
var = (((input - mean[(..., None)]) * mask) ** 2).sum((-1,)) / lengths # (N, C)
out = (input - mean[(..., None)]) / torch.sqrt(var[(..., None)] + eps) # (N, C, ...)
out = out * weight[None, :][(..., None)] + bias[None, :][(..., None)]
return out
class MaskedInstanceNorm1d(torch.nn.InstanceNorm1d):
r"""Applies Instance Normalization over a masked 3D input
(a mini-batch of 1D inputs with additional channel dimension)..
See documentation of :class:`~torch.nn.InstanceNorm1d` for details.
Shape:
- Input: :math:`(N, C, L)`
- Mask: :math:`(N, 1, L)`
- Output: :math:`(N, C, L)` (same shape as input)
"""
def __init__(
self,
num_features: int,
eps: float = 1e-5,
momentum: float = 0.1,
affine: bool = False,
track_running_stats: bool = False,
) -> None:
super(MaskedInstanceNorm1d, self).__init__(num_features, eps, momentum, affine, track_running_stats)
def forward(self, input: Tensor, mask: Tensor) -> Tensor:
return masked_instance_norm(input, mask, self.weight, self.bias, self.momentum, self.eps,)
class PartialConv1d(torch.nn.Conv1d):
"""
Zero padding creates a unique identifier for where the edge of the data is, such that the model can almost always identify
exactly where it is relative to either edge given a sufficient receptive field. Partial padding goes to some lengths to remove
this affect.
"""
__constants__ = ['slide_winsize']
slide_winsize: float
def __init__(self, *args, **kwargs):
super(PartialConv1d, self).__init__(*args, **kwargs)
weight_maskUpdater = torch.ones(1, 1, self.kernel_size[0])
self.register_buffer("weight_maskUpdater", weight_maskUpdater, persistent=False)
self.slide_winsize = self.weight_maskUpdater.shape[1] * self.weight_maskUpdater.shape[2]
def forward(self, input, mask_in):
if mask_in is None:
mask = torch.ones(1, 1, input.shape[2], dtype=input.dtype, device=input.device)
else:
mask = mask_in
input = torch.mul(input, mask)
with torch.no_grad():
update_mask = F.conv1d(
mask,
self.weight_maskUpdater,
bias=None,
stride=self.stride,
padding=self.padding,
dilation=self.dilation,
groups=1,
)
update_mask_filled = torch.masked_fill(update_mask, update_mask == 0, self.slide_winsize)
mask_ratio = self.slide_winsize / update_mask_filled
update_mask = torch.clamp(update_mask, 0, 1)
mask_ratio = torch.mul(mask_ratio, update_mask)
raw_out = self._conv_forward(input, self.weight, self.bias)
if self.bias is not None:
bias_view = self.bias.view(1, self.out_channels, 1)
output = torch.mul(raw_out - bias_view, mask_ratio) + bias_view
output = torch.mul(output, update_mask)
else:
output = torch.mul(raw_out, mask_ratio)
return output
class LinearNorm(torch.nn.Module):
def __init__(self, in_dim, out_dim, bias=True, w_init_gain='linear'):
super().__init__()
self.linear_layer = torch.nn.Linear(in_dim, out_dim, bias=bias)
torch.nn.init.xavier_uniform_(self.linear_layer.weight, gain=torch.nn.init.calculate_gain(w_init_gain))
def forward(self, x):
return self.linear_layer(x)
class ConvNorm(torch.nn.Module, adapter_mixins.AdapterModuleMixin):
__constants__ = ['use_partial_padding']
use_partial_padding: bool
def __init__(
self,
in_channels,
out_channels,
kernel_size=1,
stride=1,
padding=None,
dilation=1,
bias=True,
w_init_gain='linear',
use_partial_padding=False,
use_weight_norm=False,
norm_fn=None,
):
super(ConvNorm, self).__init__()
if padding is None:
assert kernel_size % 2 == 1
padding = int(dilation * (kernel_size - 1) / 2)
self.use_partial_padding = use_partial_padding
conv_fn = torch.nn.Conv1d
if use_partial_padding:
conv_fn = PartialConv1d
self.conv = conv_fn(
in_channels,
out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias,
)
torch.nn.init.xavier_uniform_(self.conv.weight, gain=torch.nn.init.calculate_gain(w_init_gain))
if use_weight_norm:
self.conv = torch.nn.utils.weight_norm(self.conv)
if norm_fn is not None:
self.norm = norm_fn(out_channels, affine=True)
else:
self.norm = None
def forward(self, signal, mask=None):
if self.use_partial_padding:
ret = self.conv(signal, mask)
if self.norm is not None:
ret = self.norm(ret, mask)
else:
if mask is not None:
signal = signal.mul(mask)
ret = self.conv(signal)
if self.norm is not None:
ret = self.norm(ret)
if self.is_adapter_available():
ret = self.forward_enabled_adapters(ret.transpose(1, 2)).transpose(1, 2)
return ret
class LocationLayer(torch.nn.Module):
def __init__(self, attention_n_filters, attention_kernel_size, attention_dim):
super().__init__()
padding = int((attention_kernel_size - 1) / 2)
self.location_conv = ConvNorm(
2,
attention_n_filters,
kernel_size=attention_kernel_size,
padding=padding,
bias=False,
stride=1,
dilation=1,
)
self.location_dense = LinearNorm(attention_n_filters, attention_dim, bias=False, w_init_gain='tanh')
def forward(self, attention_weights_cat):
processed_attention = self.location_conv(attention_weights_cat)
processed_attention = processed_attention.transpose(1, 2)
processed_attention = self.location_dense(processed_attention)
return processed_attention
class Attention(torch.nn.Module):
def __init__(
self,
attention_rnn_dim,
embedding_dim,
attention_dim,
attention_location_n_filters,
attention_location_kernel_size,
):
super().__init__()
self.query_layer = LinearNorm(attention_rnn_dim, attention_dim, bias=False, w_init_gain='tanh')
self.memory_layer = LinearNorm(embedding_dim, attention_dim, bias=False, w_init_gain='tanh')
self.v = LinearNorm(attention_dim, 1, bias=False)
self.location_layer = LocationLayer(
attention_location_n_filters, attention_location_kernel_size, attention_dim,
)
self.score_mask_value = -float("inf")
def get_alignment_energies(self, query, processed_memory, attention_weights_cat):
"""
PARAMS
------
query: decoder output (batch, n_mel_channels * n_frames_per_step)
processed_memory: processed encoder outputs (B, T_in, attention_dim)
attention_weights_cat: cumulative and prev. att weights (B, 2, max_time)
RETURNS
-------
alignment (batch, max_time)
"""
processed_query = self.query_layer(query.unsqueeze(1))
processed_attention_weights = self.location_layer(attention_weights_cat)
energies = self.v(torch.tanh(processed_query + processed_attention_weights + processed_memory))
energies = energies.squeeze(-1)
return energies
def forward(
self, attention_hidden_state, memory, processed_memory, attention_weights_cat, mask,
):
"""
PARAMS
------
attention_hidden_state: attention rnn last output
memory: encoder outputs
processed_memory: processed encoder outputs
attention_weights_cat: previous and cummulative attention weights
mask: binary mask for padded data
"""
alignment = self.get_alignment_energies(attention_hidden_state, processed_memory, attention_weights_cat)
if mask is not None:
alignment.data.masked_fill_(mask, self.score_mask_value)
attention_weights = F.softmax(alignment, dim=1)
attention_context = torch.bmm(attention_weights.unsqueeze(1), memory)
attention_context = attention_context.squeeze(1)
return attention_context, attention_weights
class Prenet(torch.nn.Module):
def __init__(self, in_dim, sizes, p_dropout=0.5):
super().__init__()
in_sizes = [in_dim] + sizes[:-1]
self.p_dropout = p_dropout
self.layers = torch.nn.ModuleList(
[LinearNorm(in_size, out_size, bias=False) for (in_size, out_size) in zip(in_sizes, sizes)]
)
def forward(self, x, inference=False):
if inference:
for linear in self.layers:
x = F.relu(linear(x))
x0 = x[0].unsqueeze(0)
mask = torch.autograd.Variable(torch.bernoulli(x0.data.new(x0.data.size()).fill_(1 - self.p_dropout)))
mask = mask.expand(x.size(0), x.size(1))
x = x * mask * 1 / (1 - self.p_dropout)
else:
for linear in self.layers:
x = F.dropout(F.relu(linear(x)), p=self.p_dropout, training=True)
return x
def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels_int):
in_act = input_a + input_b
t_act = torch.tanh(in_act[:, :n_channels_int, :])
s_act = torch.sigmoid(in_act[:, n_channels_int:, :])
acts = t_act * s_act
return acts
class Invertible1x1Conv(torch.nn.Module):
"""
The layer outputs both the convolution, and the log determinant
of its weight matrix. If reverse=True it does convolution with
inverse
"""
def __init__(self, c):
super().__init__()
self.conv = torch.nn.Conv1d(c, c, kernel_size=1, stride=1, padding=0, bias=False)
# Sample a random orthonormal matrix to initialize weights
W = torch.linalg.qr(torch.FloatTensor(c, c).normal_())[0]
# Ensure determinant is 1.0 not -1.0
if torch.det(W) < 0:
W[:, 0] = -1 * W[:, 0]
W = W.view(c, c, 1)
self.conv.weight.data = W
self.inv_conv = None
def forward(self, z, reverse: bool = False):
if reverse:
if self.inv_conv is None:
# Inverse convolution - initialized here only for backwards
# compatibility with weights from existing checkpoints.
# Should be moved to init() with next incompatible change.
self.inv_conv = torch.nn.Conv1d(
self.conv.in_channels, self.conv.out_channels, kernel_size=1, stride=1, padding=0, bias=False
)
W_inverse = self.conv.weight.squeeze().data.float().inverse()
W_inverse = Variable(W_inverse[..., None])
self.inv_conv.weight.data = W_inverse
self.inv_conv.to(device=self.conv.weight.device, dtype=self.conv.weight.dtype)
return self.inv_conv(z)
else:
# Forward computation
# shape
W = self.conv.weight.squeeze()
batch_size, group_size, n_of_groups = z.size()
log_det_W = batch_size * n_of_groups * torch.logdet(W.float())
z = self.conv(z)
return (
z,
log_det_W,
)
class WaveNet(torch.nn.Module):
"""
This is the WaveNet like layer for the affine coupling. The primary
difference from WaveNet is the convolutions need not be causal. There is
also no dilation size reset. The dilation only doubles on each layer
"""
def __init__(self, n_in_channels, n_mel_channels, n_layers, n_channels, kernel_size):
super().__init__()
assert kernel_size % 2 == 1
assert n_channels % 2 == 0
self.n_layers = n_layers
self.n_channels = n_channels
self.in_layers = torch.nn.ModuleList()
self.res_skip_layers = torch.nn.ModuleList()
start = torch.nn.Conv1d(n_in_channels, n_channels, 1)
start = torch.nn.utils.weight_norm(start, name='weight')
self.start = start
# Initializing last layer to 0 makes the affine coupling layers
# do nothing at first. This helps with training stability
end = torch.nn.Conv1d(n_channels, 2 * n_in_channels, 1)
end.weight.data.zero_()
end.bias.data.zero_()
self.end = end
cond_layer = torch.nn.Conv1d(n_mel_channels, 2 * n_channels * n_layers, 1)
self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name='weight')
for i in range(n_layers):
dilation = 2 ** i
padding = int((kernel_size * dilation - dilation) / 2)
in_layer = torch.nn.Conv1d(n_channels, 2 * n_channels, kernel_size, dilation=dilation, padding=padding,)
in_layer = torch.nn.utils.weight_norm(in_layer, name='weight')
self.in_layers.append(in_layer)
# last one is not necessary
if i < n_layers - 1:
res_skip_channels = 2 * n_channels
else:
res_skip_channels = n_channels
res_skip_layer = torch.nn.Conv1d(n_channels, res_skip_channels, 1)
res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name='weight')
self.res_skip_layers.append(res_skip_layer)
def forward(self, forward_input: Tuple[torch.Tensor, torch.Tensor]):
audio, spect = forward_input[0], forward_input[1]
audio = self.start(audio)
output = torch.zeros_like(audio)
spect = self.cond_layer(spect)
for i in range(self.n_layers):
spect_offset = i * 2 * self.n_channels
acts = fused_add_tanh_sigmoid_multiply(
self.in_layers[i](audio),
spect[:, spect_offset : spect_offset + 2 * self.n_channels, :],
self.n_channels,
)
res_skip_acts = self.res_skip_layers[i](acts)
if i < self.n_layers - 1:
audio = audio + res_skip_acts[:, : self.n_channels, :]
output = output + res_skip_acts[:, self.n_channels :, :]
else:
output = output + res_skip_acts
return self.end(output)
class ConditionalLayerNorm(torch.nn.LayerNorm):
"""
This module is used to condition torch.nn.LayerNorm.
If we don't have any conditions, this will be a normal LayerNorm.
"""
def __init__(self, hidden_dim, condition_dim=None, condition_types=[]):
check_support_condition_types(condition_types)
self.condition = "layernorm" in condition_types
super().__init__(hidden_dim, elementwise_affine=not self.condition)
if self.condition:
self.cond_weight = torch.nn.Linear(condition_dim, hidden_dim)
self.cond_bias = torch.nn.Linear(condition_dim, hidden_dim)
self.init_parameters()
def init_parameters(self):
torch.nn.init.constant_(self.cond_weight.weight, 0.0)
torch.nn.init.constant_(self.cond_weight.bias, 1.0)
torch.nn.init.constant_(self.cond_bias.weight, 0.0)
torch.nn.init.constant_(self.cond_bias.bias, 0.0)
def forward(self, inputs, conditioning=None):
inputs = super().forward(inputs)
# Normalize along channel
if self.condition:
if conditioning is None:
raise ValueError(
"""You should add additional data types as conditions (e.g. speaker id or reference audio)
and define speaker_encoder in your config."""
)
inputs = inputs * self.cond_weight(conditioning)
inputs = inputs + self.cond_bias(conditioning)
return inputs
class ConditionalInput(torch.nn.Module):
"""
This module is used to condition any model inputs.
If we don't have any conditions, this will be a normal pass.
"""
def __init__(self, hidden_dim, condition_dim, condition_types=[]):
check_support_condition_types(condition_types)
super().__init__()
self.support_types = ["add", "concat"]
self.condition_types = [tp for tp in condition_types if tp in self.support_types]
self.hidden_dim = hidden_dim
self.condition_dim = condition_dim
if "add" in self.condition_types and condition_dim != hidden_dim:
self.add_proj = torch.nn.Linear(condition_dim, hidden_dim)
if "concat" in self.condition_types:
self.concat_proj = torch.nn.Linear(hidden_dim + condition_dim, hidden_dim)
def forward(self, inputs, conditioning=None):
"""
Args:
inputs (torch.tensor): B x T x C tensor.
conditioning (torch.tensor): B x 1 x C conditioning embedding.
"""
if len(self.condition_types) > 0:
if conditioning is None:
raise ValueError(
"""You should add additional data types as conditions (e.g. speaker id or reference audio)
and define speaker_encoder in your config."""
)
if "add" in self.condition_types:
if self.condition_dim != self.hidden_dim:
conditioning = self.add_proj(conditioning)
inputs = inputs + conditioning
if "concat" in self.condition_types:
conditioning = conditioning.repeat(1, inputs.shape[1], 1)
inputs = torch.cat([inputs, conditioning])
inputs = self.concat_proj(inputs)
return inputs
class StyleAttention(NeuralModule):
def __init__(self, gst_size=128, n_style_token=10, n_style_attn_head=4):
super(StyleAttention, self).__init__()
token_size = gst_size // n_style_attn_head
self.tokens = torch.nn.Parameter(torch.FloatTensor(n_style_token, token_size))
self.mha = torch.nn.MultiheadAttention(
embed_dim=gst_size,
num_heads=n_style_attn_head,
dropout=0.0,
bias=True,
kdim=token_size,
vdim=token_size,
batch_first=True,
)
torch.nn.init.normal_(self.tokens)
@property
def input_types(self):
return {
"inputs": NeuralType(('B', 'D'), EncodedRepresentation()),
"token_id": NeuralType(('B'), Index(), optional=True),
}
@property
def output_types(self):
return {
"style_emb": NeuralType(('B', 'D'), EncodedRepresentation()),
}
def forward(self, inputs):
batch_size = inputs.size(0)
query = inputs.unsqueeze(1)
tokens = F.tanh(self.tokens).unsqueeze(0).expand(batch_size, -1, -1)
style_emb, _ = self.mha(query=query, key=tokens, value=tokens)
style_emb = style_emb.squeeze(1)
return style_emb
class Conv2DReLUNorm(torch.nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=3, stride=2, padding=1, bias=True, dropout=0.0):
super(Conv2DReLUNorm, self).__init__()
self.conv = torch.nn.Conv2d(
in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=padding, bias=bias
)
self.norm = torch.nn.LayerNorm(out_channels)
self.dropout = torch.nn.Dropout(dropout)
def forward(self, x, x_mask=None):
if x_mask is not None:
x = x * x_mask
# bhwc -> bchw
x = x.contiguous().permute(0, 3, 1, 2)
x = F.relu(self.conv(x))
# bchw -> bhwc
x = x.contiguous().permute(0, 2, 3, 1)
x = self.norm(x)
x = self.dropout(x)
return x
class ReferenceEncoder(NeuralModule):
"""
Encode mel-spectrograms to an utterance level feature
"""
def __init__(self, n_mels, cnn_filters, dropout, gru_hidden, kernel_size, stride, padding, bias):
super(ReferenceEncoder, self).__init__()
self.filter_size = [1] + list(cnn_filters)
self.layers = torch.nn.ModuleList(
[
Conv2DReLUNorm(
in_channels=int(self.filter_size[i]),
out_channels=int(self.filter_size[i + 1]),
kernel_size=kernel_size,
stride=stride,
padding=padding,
bias=bias,
dropout=dropout,
)
for i in range(len(cnn_filters))
]
)
post_conv_height = self.calculate_post_conv_lengths(n_mels, n_convs=len(cnn_filters))
self.gru = torch.nn.GRU(
input_size=cnn_filters[-1] * post_conv_height, hidden_size=gru_hidden, batch_first=True,
)
@property
def input_types(self):
return {
"inputs": NeuralType(('B', 'D', 'T_spec'), MelSpectrogramType()),
"inputs_lengths": NeuralType(('B'), LengthsType()),
}
@property
def output_types(self):
return {
"out": NeuralType(('B', 'D'), EncodedRepresentation()),
}
def forward(self, inputs, inputs_lengths):
# BMW -> BWMC (M: mels)
x = inputs.transpose(1, 2).unsqueeze(3)
x_lens = inputs_lengths
x_masks = self.lengths_to_masks(x_lens).unsqueeze(2).unsqueeze(3)
for layer in self.layers:
x = layer(x, x_masks)
x_lens = self.calculate_post_conv_lengths(x_lens)
x_masks = self.lengths_to_masks(x_lens).unsqueeze(2).unsqueeze(3)
# BWMC -> BWC
x = x.contiguous().view(x.shape[0], x.shape[1], -1)
self.gru.flatten_parameters()
packed_x = pack_padded_sequence(x, x_lens.cpu(), batch_first=True, enforce_sorted=False)
packed_x, _ = self.gru(packed_x)
x, x_lens = pad_packed_sequence(packed_x, batch_first=True)
x = x[torch.arange(len(x_lens)), (x_lens - 1), :]
return x
@staticmethod
def calculate_post_conv_lengths(lengths, n_convs=1, kernel_size=3, stride=2, pad=1):
"""Batch lengths after n convolution with fixed kernel/stride/pad."""
for _ in range(n_convs):
lengths = (lengths - kernel_size + 2 * pad) // stride + 1
return lengths
@staticmethod
def lengths_to_masks(lengths):
"""Batch of lengths to batch of masks"""
# B -> BxT
masks = torch.arange(lengths.max()).to(lengths.device).expand(
lengths.shape[0], lengths.max()
) < lengths.unsqueeze(1)
return masks
class GlobalStyleToken(NeuralModule):
"""
Global Style Token based Speaker Embedding
"""
def __init__(
self, reference_encoder, gst_size=128, n_style_token=10, n_style_attn_head=4,
):
super(GlobalStyleToken, self).__init__()
self.reference_encoder = reference_encoder
self.style_attention = StyleAttention(
gst_size=gst_size, n_style_token=n_style_token, n_style_attn_head=n_style_attn_head
)
@property
def input_types(self):
return {
"inp": NeuralType(('B', 'D', 'T_spec'), MelSpectrogramType()),
"inp_lengths": NeuralType(('B'), LengthsType()),
}
@property
def output_types(self):
return {
"gst": NeuralType(('B', 'D'), EncodedRepresentation()),
}
def forward(self, inp, inp_lengths):
style_embedding = self.reference_encoder(inp, inp_lengths)
gst = self.style_attention(style_embedding)
return gst
class SpeakerLookupTable(torch.nn.Module):
"""
LookupTable based Speaker Embedding
"""
def __init__(self, n_speakers, embedding_dim):
super(SpeakerLookupTable, self).__init__()
self.table = torch.nn.Embedding(n_speakers, embedding_dim)
def forward(self, speaker):
return self.table(speaker)
class SpeakerEncoder(NeuralModule):
"""
class SpeakerEncoder represents speakers representation.
This module can combine GST (global style token) based speaker embeddings and lookup table speaker embeddings.
"""
def __init__(self, lookup_module=None, gst_module=None, precomputed_embedding_dim=None):
"""
lookup_module: Torch module to get lookup based speaker embedding
gst_module: Neural module to get GST based speaker embedding
precomputed_embedding_dim: Give precomputed speaker embedding dimension to use precompute speaker embedding
"""
super(SpeakerEncoder, self).__init__()
# Multi-speaker embedding
self.lookup_module = lookup_module
# Reference speaker embedding
self.gst_module = gst_module
if precomputed_embedding_dim is not None:
self.precomputed_emb = torch.nn.Parameter(torch.empty(precomputed_embedding_dim))
else:
self.precomputed_emb = None
@property
def input_types(self):
return {
"batch_size": NeuralType(optional=True),
"speaker": NeuralType(('B'), Index(), optional=True),
"reference_spec": NeuralType(('B', 'D', 'T_spec'), MelSpectrogramType(), optional=True),
"reference_spec_lens": NeuralType(('B'), LengthsType(), optional=True),
}
@property
def output_types(self):
return {
"embs": NeuralType(('B', 'D'), EncodedRepresentation()),
}
def overwrite_precomputed_emb(self, emb):
self.precomputed_emb = torch.nn.Parameter(emb)
def forward(self, batch_size=None, speaker=None, reference_spec=None, reference_spec_lens=None):
embs = None
# Get Precomputed speaker embedding
if self.precomputed_emb is not None:
return self.precomputed_emb.unsqueeze(0).repeat(batch_size, 1)
# Get Lookup table speaker embedding
if self.lookup_module is not None and speaker is not None:
embs = self.lookup_module(speaker)
# Get GST based speaker embedding
if reference_spec is not None and reference_spec_lens is not None:
if self.gst_module is not None:
out = self.gst_module(reference_spec, reference_spec_lens)
embs = out if embs is None else embs + out
else:
logging.warning("You may add `gst_module` in speaker_encoder to use reference_audio.")
return embs
| NeMo-main | nemo/collections/tts/modules/submodules.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn.functional as F
from torch import nn
from nemo.collections.tts.modules.attribute_prediction_model import get_attribute_prediction_model
from nemo.collections.tts.modules.common import (
AffineTransformationLayer,
BiLSTM,
ConvAttention,
ExponentialClass,
Invertible1x1Conv,
Invertible1x1ConvLUS,
LinearNorm,
get_radtts_encoder,
)
from nemo.collections.tts.parts.utils.helpers import get_mask_from_lengths, mas_width1, regulate_len
from nemo.core.classes import Exportable, NeuralModule
from nemo.core.neural_types.elements import Index, LengthsType, MelSpectrogramType, TokenDurationType, TokenIndex
from nemo.core.neural_types.neural_type import NeuralType
@torch.jit.script
def pad_dur(dur, txt_enc):
if dur.shape[-1] < txt_enc.shape[-1]:
to_pad = txt_enc.shape[-1] - dur.shape[-1]
dur = F.pad(dur, [0, to_pad])
return dur
@torch.jit.script
def pad_energy_avg_and_f0(energy_avg, f0, max_out_len):
to_pad = int(max_out_len - energy_avg.shape[1])
if to_pad > 0:
f0 = F.pad(f0[None], [0, to_pad])[0]
energy_avg = F.pad(energy_avg[None], [0, to_pad])[0]
to_pad = int(max_out_len - f0.shape[1])
if to_pad > 0:
f0 = F.pad(f0[None], [0, to_pad])[0]
return energy_avg, f0
def adjust_f0(f0, f0_mean, f0_std, vmask_bool, musical_scaling=True):
if f0_mean > 0.0:
if musical_scaling:
f0_mu, f0_sigma = f0[vmask_bool].mean(), f0[vmask_bool].std()
f0_factor = f0_mean / f0_mu
f0[vmask_bool] *= f0_factor
else:
f0_sigma, f0_mu = torch.std_mean(f0[vmask_bool])
f0 = ((f0 - f0_mu) / f0_sigma).to(dtype=f0.dtype)
f0_std = f0_std if f0_std > 0 else f0_sigma
f0 = (f0 * f0_std + f0_mean).to(dtype=f0.dtype)
f0 = f0.masked_fill(~vmask_bool, 0.0)
return f0
class FlowStep(nn.Module):
def __init__(
self,
n_mel_channels,
n_context_dim,
n_layers,
affine_model='simple_conv',
scaling_fn='exp',
matrix_decomposition='',
affine_activation='softplus',
use_partial_padding=False,
):
super(FlowStep, self).__init__()
if matrix_decomposition == 'LUS':
self.invtbl_conv = Invertible1x1ConvLUS(n_mel_channels)
else:
self.invtbl_conv = Invertible1x1Conv(n_mel_channels)
self.affine_tfn = AffineTransformationLayer(
n_mel_channels,
n_context_dim,
n_layers,
affine_model=affine_model,
scaling_fn=scaling_fn,
affine_activation=affine_activation,
use_partial_padding=use_partial_padding,
)
def forward(self, z, context, inverse=False, seq_lens=None):
if inverse: # for inference z-> mel
z = self.affine_tfn(z, context, inverse, seq_lens=seq_lens)
z = self.invtbl_conv(z, inverse)
return z
else: # training mel->z
z, log_det_W = self.invtbl_conv(z)
z, log_s = self.affine_tfn(z, context, seq_lens=seq_lens)
return z, log_det_W, log_s
class RadTTSModule(NeuralModule, Exportable):
"""
Takes model parameters (modelConfig) from config file to initialize radtts module.
Specify the type of training in the include_modules parameter. "decatnvpred" for decoder training. and "decatnunvbiasdpmvpredapm" for feature training
n_speakers (int): Number of speakers
n_speaker_dim (int): number of speakers dimension
n_text (int): Symbols embedding size
n_text_dim (int):
n_flows (int):
n_conv_layers_per_step (int): number of convolution layers per step
dummy_speaker_embedding (bool):
include_modules (string): A string that describes what to train. "decatnvpred" for decoder training. and "decatnunvbiasdpmvpredapm" for feature training.
scaling_fn (string): scaling function
decoder_use_partial_padding (Bool): Set this to True to add partial padding
learn_alignments (Bool): set this to true to learn alignments
attn_use_CTC (Bool): set True to use CTC
n_f0_dims (int): number of Pitch dimension
n_early_size (int):
n_early_every (int):
n_group_size (int):
decoder_use_unvoiced_bias (bool):
context_lstm_w_f0_and_energy (bool):
use_first_order_features (bool):
ap_pred_log_f0 (bool):
dur_model_config: model configuration for duration
f0_model_config: model configuration for Pitch
energy_model_config: model configuration for energy
"""
def __init__(
self,
n_speakers,
n_speaker_dim,
n_text,
n_text_dim,
n_flows,
n_conv_layers_per_step,
n_mel_channels,
dummy_speaker_embedding,
n_early_size,
n_early_every,
n_group_size,
affine_model,
dur_model_config,
f0_model_config,
energy_model_config,
v_model_config=None,
include_modules='dec',
scaling_fn='exp',
matrix_decomposition='',
learn_alignments=False,
affine_activation='softplus',
attn_use_CTC=True,
use_context_lstm=False,
context_lstm_norm=None,
n_f0_dims=0,
n_energy_avg_dims=0,
context_lstm_w_f0_and_energy=True,
use_first_order_features=False,
unvoiced_bias_activation='',
ap_pred_log_f0=False,
**kwargs,
):
super(RadTTSModule, self).__init__()
assert n_early_size % 2 == 0
self.n_mel_channels = n_mel_channels
self.n_f0_dims = n_f0_dims # >= 1 to trains with f0
self.n_energy_avg_dims = n_energy_avg_dims # >= 1 trains with energy
self.decoder_use_partial_padding = kwargs['decoder_use_partial_padding']
self.n_speaker_dim = n_speaker_dim
assert self.n_speaker_dim % 2 == 0
self.speaker_embedding = torch.nn.Embedding(n_speakers, self.n_speaker_dim)
self.embedding = torch.nn.Embedding(n_text, n_text_dim)
self.flows = torch.nn.ModuleList()
self.encoder = get_radtts_encoder(encoder_embedding_dim=n_text_dim)
self.dummy_speaker_embedding = dummy_speaker_embedding
self.learn_alignments = learn_alignments
self.affine_activation = affine_activation
self.include_modules = include_modules
self.attn_use_CTC = bool(attn_use_CTC)
self.use_context_lstm = bool(use_context_lstm)
self.context_lstm_norm = context_lstm_norm
self.context_lstm_w_f0_and_energy = context_lstm_w_f0_and_energy
self.use_first_order_features = bool(use_first_order_features)
self.decoder_use_unvoiced_bias = kwargs['decoder_use_unvoiced_bias']
self.ap_pred_log_f0 = ap_pred_log_f0
self.ap_use_unvoiced_bias = kwargs['ap_use_unvoiced_bias']
if 'atn' in include_modules or 'dec' in include_modules:
if self.learn_alignments:
self.attention = ConvAttention(n_mel_channels, self.n_speaker_dim, n_text_dim)
self.n_flows = n_flows
self.n_group_size = n_group_size
n_flowstep_cond_dims = self.n_speaker_dim + (n_text_dim + n_f0_dims + n_energy_avg_dims) * n_group_size
if self.use_context_lstm:
n_in_context_lstm = self.n_speaker_dim + n_text_dim * n_group_size
n_context_lstm_hidden = int((self.n_speaker_dim + n_text_dim * n_group_size) / 2)
if self.context_lstm_w_f0_and_energy:
n_in_context_lstm = n_f0_dims + n_energy_avg_dims + n_text_dim
n_in_context_lstm *= n_group_size
n_in_context_lstm += self.n_speaker_dim
n_flowstep_cond_dims = self.n_speaker_dim + n_text_dim * n_group_size
self.context_lstm = BiLSTM(
input_size=n_in_context_lstm, hidden_size=n_context_lstm_hidden, num_layers=1,
)
if self.n_group_size > 1:
self.unfold_params = {
'kernel_size': (n_group_size, 1),
'stride': n_group_size,
'padding': 0,
'dilation': 1,
}
self.unfold_mod = nn.Unfold(**self.unfold_params)
self.exit_steps = []
self.n_early_size = n_early_size
n_mel_channels = n_mel_channels * n_group_size
for i in range(self.n_flows):
if i > 0 and i % n_early_every == 0: # early exiting
n_mel_channels -= self.n_early_size
self.exit_steps.append(i)
self.flows.append(
FlowStep(
n_mel_channels,
n_flowstep_cond_dims,
n_conv_layers_per_step,
affine_model,
scaling_fn,
matrix_decomposition,
affine_activation=affine_activation,
use_partial_padding=self.decoder_use_partial_padding,
)
)
if 'dpm' in include_modules:
dur_model_config['hparams']['n_speaker_dim'] = n_speaker_dim
self.dur_pred_layer = get_attribute_prediction_model(dur_model_config)
self.use_unvoiced_bias = False
self.use_vpred_module = False
self.ap_use_voiced_embeddings = kwargs['ap_use_voiced_embeddings']
if self.decoder_use_unvoiced_bias or self.ap_use_unvoiced_bias:
assert unvoiced_bias_activation in {'relu', 'exp'}
self.use_unvoiced_bias = True
if unvoiced_bias_activation == 'relu':
unvbias_nonlin = nn.ReLU()
elif unvoiced_bias_activation == 'exp':
unvbias_nonlin = ExponentialClass()
else:
exit(1) # we won't reach here anyway due to the assertion
self.unvoiced_bias_module = nn.Sequential(LinearNorm(n_text_dim, 1), unvbias_nonlin)
# all situations in which the vpred module is necessary
if self.ap_use_voiced_embeddings or self.use_unvoiced_bias or 'vpred' in include_modules:
self.use_vpred_module = True
if self.use_vpred_module:
v_model_config['hparams']['n_speaker_dim'] = n_speaker_dim
self.v_pred_module = get_attribute_prediction_model(v_model_config)
# 4 embeddings, first two are scales, second two are biases
if self.ap_use_voiced_embeddings:
self.v_embeddings = torch.nn.Embedding(4, n_text_dim)
self.v_pred_threshold = 0.5
if 'apm' in include_modules:
f0_model_config['hparams']['n_speaker_dim'] = n_speaker_dim
energy_model_config['hparams']['n_speaker_dim'] = n_speaker_dim
if self.use_first_order_features:
f0_model_config['hparams']['n_in_dim'] = 2
energy_model_config['hparams']['n_in_dim'] = 2
if (
'spline_flow_params' in f0_model_config['hparams']
and f0_model_config['hparams']['spline_flow_params'] is not None
):
f0_model_config['hparams']['spline_flow_params']['n_in_channels'] = 2
if (
'spline_flow_params' in energy_model_config['hparams']
and energy_model_config['hparams']['spline_flow_params'] is not None
):
energy_model_config['hparams']['spline_flow_params']['n_in_channels'] = 2
else:
if (
'spline_flow_params' in f0_model_config['hparams']
and f0_model_config['hparams']['spline_flow_params'] is not None
):
f0_model_config['hparams']['spline_flow_params']['n_in_channels'] = f0_model_config['hparams'][
'n_in_dim'
]
if (
'spline_flow_params' in energy_model_config['hparams']
and energy_model_config['hparams']['spline_flow_params'] is not None
):
energy_model_config['hparams']['spline_flow_params']['n_in_channels'] = energy_model_config[
'hparams'
]['n_in_dim']
self.f0_pred_module = get_attribute_prediction_model(f0_model_config)
self.energy_pred_module = get_attribute_prediction_model(energy_model_config)
def encode_speaker(self, spk_ids):
spk_ids = spk_ids * 0 if self.dummy_speaker_embedding else spk_ids
spk_vecs = self.speaker_embedding(spk_ids)
return spk_vecs
def encode_text(self, text, in_lens):
# text_embeddings: b x len_text x n_text_dim
text_embeddings = self.embedding(text).transpose(1, 2)
# text_enc: b x n_text_dim x encoder_dim (512)
text_enc = self.encoder(text_embeddings, in_lens).transpose(1, 2)
return text_enc, text_embeddings
def preprocess_context(self, context, speaker_vecs, out_lens, f0, energy_avg, assume_padded=False):
if self.n_group_size > 1:
context = self.unfold(context, assume_padded=assume_padded)
if f0 is not None:
f0 = self.unfold(f0[:, None, :], assume_padded=assume_padded)
if energy_avg is not None:
energy_avg = self.unfold(energy_avg[:, None, :], assume_padded=assume_padded)
speaker_vecs = speaker_vecs[..., None].expand(-1, -1, context.shape[2])
context_w_spkvec = torch.cat((context, speaker_vecs), 1)
if self.use_context_lstm:
if self.context_lstm_w_f0_and_energy:
if f0 is not None:
context_w_spkvec = torch.cat((context_w_spkvec, f0), 1)
if energy_avg is not None:
context_w_spkvec = torch.cat((context_w_spkvec, energy_avg), 1)
unfolded_out_lens = out_lens // self.n_group_size
context_lstm_padded_output = self.context_lstm(context_w_spkvec.transpose(1, 2), unfolded_out_lens)
context_w_spkvec = context_lstm_padded_output.transpose(1, 2)
if not self.context_lstm_w_f0_and_energy:
if f0 is not None:
context_w_spkvec = torch.cat((context_w_spkvec, f0), 1)
if energy_avg is not None:
context_w_spkvec = torch.cat((context_w_spkvec, energy_avg), 1)
return context_w_spkvec
def fold(self, mel):
"""Inverse of the self.unfold() operation used for the
grouping or "squeeze" operation on input
Args:
mel: B x C x T tensor of temporal data
"""
b, d, t = mel.shape
mel = mel.reshape(b, -1, self.n_group_size, t).transpose(2, 3)
return mel.reshape(b, -1, t * self.n_group_size)
def unfold(self, mel, assume_padded=False):
"""operation used for the
grouping or "squeeze" operation on input
Args:
mel: B x C x T tensor of temporal data
"""
# for inference, mel is being padded beforehand
if assume_padded:
b, d, t = mel.shape
mel = mel.reshape(b, d, -1, self.n_group_size).transpose(2, 3)
return mel.reshape(b, d * self.n_group_size, -1)
else:
return self.unfold_mod(mel.unsqueeze(-1))
def binarize_attention(self, attn, in_lens, out_lens):
"""For training purposes only. Binarizes attention with MAS. These will
no longer receive a gradient
Args:
attn: B x 1 x max_mel_len x max_text_len
"""
b_size = attn.shape[0]
with torch.no_grad():
attn_cpu = attn.data.cpu().numpy()
attn_out = torch.zeros_like(attn)
for ind in range(b_size):
hard_attn = mas_width1(attn_cpu[ind, 0, : out_lens[ind], : in_lens[ind]])
attn_out[ind, 0, : out_lens[ind], : in_lens[ind]] = torch.tensor(hard_attn, device=attn.get_device())
return attn_out
def get_first_order_features(self, feats, dilation=1):
"""
feats: b x max_length
out_lens: b-dim
"""
# add an extra column
feats_extended_R = torch.cat((feats, torch.zeros_like(feats[:, 0:dilation])), dim=1)
feats_extended_L = torch.cat((torch.zeros_like(feats[:, 0:dilation]), feats), dim=1)
dfeats_R = feats_extended_R[:, dilation:] - feats
dfeats_L = feats - feats_extended_L[:, 0:-dilation]
return (dfeats_R + dfeats_L) * 0.5
def apply_voice_mask_to_text(self, text_enc, voiced_mask):
"""
text_enc: b x C x N
voiced_mask: b x N
"""
voiced_mask = voiced_mask.unsqueeze(1)
voiced_embedding_s = self.v_embeddings.weight[0:1, :, None]
unvoiced_embedding_s = self.v_embeddings.weight[1:2, :, None]
voiced_embedding_b = self.v_embeddings.weight[2:3, :, None]
unvoiced_embedding_b = self.v_embeddings.weight[3:4, :, None]
scale = torch.sigmoid(voiced_embedding_s * voiced_mask + unvoiced_embedding_s * (1 - voiced_mask))
bias = 0.1 * torch.tanh(voiced_embedding_b * voiced_mask + unvoiced_embedding_b * (1 - voiced_mask))
return text_enc * scale + bias
def forward(
self,
mel,
speaker_ids,
text,
in_lens,
out_lens,
binarize_attention=False,
attn_prior=None,
f0=None,
energy_avg=None,
voiced_mask=None,
):
speaker_vecs = self.encode_speaker(speaker_ids)
text_enc, text_embeddings = self.encode_text(text, in_lens)
log_s_list, log_det_W_list, z_mel = [], [], []
attn_hard = None
if 'atn' in self.include_modules or 'dec' in self.include_modules:
# make sure to do the alignments before folding
attn_mask = ~get_mask_from_lengths(in_lens)[..., None]
# attn_mask should be 1 for unsd t-steps in text_enc_w_spkvec tensor
attn_soft, attn_logprob = self.attention(
mel, text_embeddings, out_lens, attn_mask, key_lens=in_lens, attn_prior=attn_prior
)
if binarize_attention:
attn = self.binarize_attention(attn_soft, in_lens, out_lens)
attn_hard = attn
else:
attn = attn_soft
context = torch.bmm(text_enc, attn.squeeze(1).transpose(1, 2))
else:
raise ValueError(
f"Something unexpected happened. Both 'atn' and 'dec' are not included in 'self.include_modules'. Please double-check."
)
f0_bias = 0
# unvoiced bias forward pass
voiced_mask_bool = voiced_mask.bool()
if self.use_unvoiced_bias:
f0_bias = self.unvoiced_bias_module(context.permute(0, 2, 1))
f0_bias = -f0_bias[..., 0]
f0_bias.masked_fill_(voiced_mask_bool, 0.0)
# mel decoder forward pass
if 'dec' in self.include_modules:
if self.n_group_size > 1:
# might truncate some frames at the end, but that's ok
# sometimes referred to as the "squeeze" operation
# invert this by calling self.fold(mel_or_z)
mel = self.unfold(mel)
# where context is folded
# mask f0 in case values are interpolated
context_w_spkvec = self.preprocess_context(
context, speaker_vecs, out_lens, f0 * voiced_mask + f0_bias, energy_avg
)
log_s_list, log_det_W_list, z_out = [], [], []
unfolded_seq_lens = out_lens // self.n_group_size
for i, flow_step in enumerate(self.flows):
if i in self.exit_steps:
z = mel[:, : self.n_early_size]
z_out.append(z)
mel = mel[:, self.n_early_size :]
mel, log_det_W, log_s = flow_step(mel, context_w_spkvec, seq_lens=unfolded_seq_lens)
log_s_list.append(log_s)
log_det_W_list.append(log_det_W)
z_out.append(mel)
z_mel = torch.cat(z_out, 1)
# duration predictor forward pass
duration_model_outputs = None
if 'dpm' in self.include_modules:
if attn_hard is None:
attn_hard = self.binarize_attention(attn_soft, in_lens, out_lens)
# convert hard attention to durations
attn_hard_reduced = attn_hard.sum(2)[:, 0, :]
duration_model_outputs = self.dur_pred_layer(
torch.detach(text_enc), torch.detach(speaker_vecs), torch.detach(attn_hard_reduced.float()), in_lens
)
# f0, energy, vpred predictors forward pass
f0_model_outputs = None
energy_model_outputs = None
vpred_model_outputs = None
if 'apm' in self.include_modules:
if attn_hard is None:
attn_hard = self.binarize_attention(attn_soft, in_lens, out_lens)
# convert hard attention to durations
if binarize_attention:
text_enc_time_expanded = context.clone()
else:
text_enc_time_expanded = torch.bmm(text_enc, attn_hard.squeeze(1).transpose(1, 2))
if self.use_vpred_module:
# unvoiced bias requires voiced mask prediction
vpred_model_outputs = self.v_pred_module(
torch.detach(text_enc_time_expanded),
torch.detach(speaker_vecs),
torch.detach(voiced_mask),
out_lens,
)
# affine transform context using voiced mask
if self.ap_use_voiced_embeddings:
text_enc_time_expanded = self.apply_voice_mask_to_text(text_enc_time_expanded, voiced_mask)
if self.ap_use_unvoiced_bias: # whether to use the unvoiced bias in the attribute predictor
f0_target = torch.detach(f0 * voiced_mask + f0_bias)
else:
f0_target = torch.detach(f0)
# fit to log f0 in f0 predictor
f0_target[voiced_mask_bool] = torch.log(f0_target[voiced_mask_bool])
f0_target = f0_target / 6 # scale to ~ [0, 1] in log space
energy_avg = energy_avg * 2 - 1 # scale to ~ [-1, 1]
if self.use_first_order_features:
df0 = self.get_first_order_features(f0_target)
denergy_avg = self.get_first_order_features(energy_avg)
f0_voiced = torch.cat((f0_target[:, None], df0[:, None]), dim=1)
energy_avg = torch.cat((energy_avg[:, None], denergy_avg[:, None]), dim=1)
f0_voiced = f0_voiced * 3 # scale to ~ 1 std
energy_avg = energy_avg * 3 # scale to ~ 1 std
else:
f0_voiced = f0_target * 2 # scale to ~ 1 std
energy_avg = energy_avg * 1.4 # scale to ~ 1 std
f0_model_outputs = self.f0_pred_module(
text_enc_time_expanded, torch.detach(speaker_vecs), f0_voiced, out_lens
)
energy_model_outputs = self.energy_pred_module(
text_enc_time_expanded, torch.detach(speaker_vecs), energy_avg, out_lens
)
outputs = {
'z_mel': z_mel,
'log_det_W_list': log_det_W_list,
'log_s_list': log_s_list,
'duration_model_outputs': duration_model_outputs,
'f0_model_outputs': f0_model_outputs,
'energy_model_outputs': energy_model_outputs,
'vpred_model_outputs': vpred_model_outputs,
'attn_soft': attn_soft,
'attn': attn,
'text_embeddings': text_embeddings,
'attn_logprob': attn_logprob,
}
return outputs
def infer(
self,
speaker_id,
text,
sigma=0.7,
speaker_id_text=None,
speaker_id_attributes=None,
pace=None,
token_duration_max=100,
in_lens=None,
dur=None,
f0=None,
f0_mean=0.0,
f0_std=0.0,
energy_avg=None,
voiced_mask=None,
pitch_shift=None,
):
batch_size = text.shape[0]
if in_lens is None:
in_lens = text.new_ones((batch_size,), dtype=torch.int64) * text.shape[1]
txt_len_pad_removed = text.shape[1]
else:
txt_len_pad_removed = torch.max(in_lens)
# borisf : this should not be needed as long as we have properly formed input batch
text = text[:, :txt_len_pad_removed]
spk_vec = self.encode_speaker(speaker_id)
if speaker_id_text is None:
speaker_id_text = speaker_id
if speaker_id_attributes is None:
speaker_id_attributes = speaker_id
spk_vec_text = self.encode_speaker(speaker_id_text)
spk_vec_attributes = self.encode_speaker(speaker_id_attributes)
txt_enc, _ = self.encode_text(text, in_lens)
if dur is None:
# get token durations
dur = self.dur_pred_layer.infer(txt_enc, spk_vec_text, lens=in_lens)
dur = pad_dur(dur, txt_enc)
dur = dur[:, 0]
dur = dur.clamp(0, token_duration_max)
if pace is None:
pace = txt_enc.new_ones((batch_size, txt_len_pad_removed))
else:
pace = pace[:, :txt_len_pad_removed]
txt_enc_time_expanded, out_lens = regulate_len(
dur, txt_enc.transpose(1, 2), pace, group_size=self.n_group_size, dur_lens=in_lens,
)
n_groups = torch.div(out_lens, self.n_group_size, rounding_mode='floor')
max_out_len = torch.max(out_lens)
txt_enc_time_expanded.transpose_(1, 2)
if voiced_mask is None:
if self.use_vpred_module:
# get logits
voiced_mask = self.v_pred_module.infer(txt_enc_time_expanded, spk_vec_attributes, lens=out_lens)
voiced_mask_bool = torch.sigmoid(voiced_mask[:, 0]) > self.v_pred_threshold
voiced_mask = voiced_mask_bool.to(dur.dtype)
else:
voiced_mask_bool = None
else:
voiced_mask_bool = voiced_mask.bool()
ap_txt_enc_time_expanded = txt_enc_time_expanded
# voice mask augmentation only used for attribute prediction
if self.ap_use_voiced_embeddings:
ap_txt_enc_time_expanded = self.apply_voice_mask_to_text(txt_enc_time_expanded, voiced_mask)
f0_bias = 0
# unvoiced bias forward pass
if self.use_unvoiced_bias:
f0_bias = self.unvoiced_bias_module(txt_enc_time_expanded.permute(0, 2, 1))
f0_bias = -f0_bias[..., 0]
if f0 is None:
f0 = self.infer_f0(ap_txt_enc_time_expanded, spk_vec_attributes, voiced_mask_bool, out_lens)[:, 0]
f0 = adjust_f0(f0, f0_mean, f0_std, voiced_mask_bool, musical_scaling=False)
if energy_avg is None:
energy_avg = self.infer_energy(ap_txt_enc_time_expanded, spk_vec, out_lens)[:, 0]
# replication pad, because ungrouping with different group sizes
# may lead to mismatched lengths
# FIXME: use replication pad
(energy_avg, f0) = pad_energy_avg_and_f0(energy_avg, f0, max_out_len)
if pitch_shift is not None:
pitch_shift_spec_len, _ = regulate_len(
dur,
pitch_shift[:, :txt_len_pad_removed].unsqueeze(-1),
pace,
group_size=self.n_group_size,
dur_lens=in_lens,
)
f0_bias = pitch_shift_spec_len.squeeze(-1) + f0_bias
context_w_spkvec = self.preprocess_context(
txt_enc_time_expanded, spk_vec, out_lens, (f0 + f0_bias) * voiced_mask, energy_avg, assume_padded=True,
)
residual = txt_enc.new_zeros(batch_size, 80 * self.n_group_size, torch.max(n_groups))
if sigma > 0.0:
residual = torch.normal(residual) * sigma
# map from z sample to data
num_steps_to_exit = len(self.exit_steps)
split = num_steps_to_exit * self.n_early_size
mel = residual[:, split:]
residual = residual[:, :split]
for i, flow_step in enumerate(reversed(self.flows)):
curr_step = self.n_flows - i - 1
mel = flow_step(mel, context_w_spkvec, inverse=True, seq_lens=n_groups)
if num_steps_to_exit > 0 and curr_step == self.exit_steps[num_steps_to_exit - 1]:
# concatenate the next chunk of z
num_steps_to_exit = num_steps_to_exit - 1
split = num_steps_to_exit * self.n_early_size
residual_to_add = residual[:, split:]
residual = residual[:, :split]
mel = torch.cat((residual_to_add, mel), 1)
if self.n_group_size > 1:
mel = self.fold(mel)
return {'mel': mel, 'out_lens': out_lens, 'dur': dur, 'f0': f0, 'energy_avg': energy_avg}
def infer_f0(self, txt_enc_time_expanded, spk_vec, voiced_mask=None, lens=None):
f0 = self.f0_pred_module.infer(txt_enc_time_expanded, spk_vec, lens)
# constants
if self.ap_pred_log_f0:
if self.use_first_order_features:
f0 = f0[:, 0:1, :] / 3
else:
f0 = f0 / 2
f0 = f0 * 6
else:
f0 = f0 / 6
f0 = f0 / 640
if voiced_mask is None:
voiced_mask = f0 > 0.0
else:
if len(voiced_mask.shape) == 2:
voiced_mask = voiced_mask[:, None]
# due to grouping, f0 might be 1 frame short
voiced_mask = voiced_mask[:, :, : f0.shape[-1]]
if self.ap_pred_log_f0:
# if variable is set, decoder sees linear f0
f0 = torch.exp(f0).to(dtype=f0.dtype)
f0.masked_fill_(~voiced_mask, 0.0)
return f0
def infer_energy(self, txt_enc_time_expanded, spk_vec, lens):
energy = self.energy_pred_module.infer(txt_enc_time_expanded, spk_vec, lens)
# magic constants
if self.use_first_order_features:
energy = energy / 3
else:
energy = energy / 1.4
energy = (energy + 1) / 2
return energy
def remove_norms(self):
"""Removes spectral and weightnorms from model. Call before inference
"""
dev = next(self.parameters()).device
for name, module in self.named_modules():
try:
nn.utils.remove_spectral_norm(module, name='weight_hh_l0')
print("Removed spectral norm from {}".format(name))
except:
pass
try:
nn.utils.remove_spectral_norm(module, name='weight_hh_l0_reverse')
print("Removed spectral norm from {}".format(name))
except:
pass
try:
nn.utils.remove_weight_norm(module)
print("Removed wnorm from {}".format(name))
except:
pass
self.to(device=dev)
@property
def input_types(self):
return {
"text": NeuralType(('B', 'T_text'), TokenIndex()),
"lens": NeuralType(('B'), LengthsType(), optional=True),
"speaker_id": NeuralType(('B'), Index()),
"speaker_id_text": NeuralType(('B'), Index()),
"speaker_id_attributes": NeuralType(('B'), Index()),
}
@property
def output_types(self):
return {
"spect": NeuralType(('B', 'D', 'T_spec'), MelSpectrogramType()),
"num_frames": NeuralType(('B'), TokenDurationType()),
"durs_predicted": NeuralType(('B', 'T_text'), TokenDurationType()),
}
| NeMo-main | nemo/collections/tts/modules/radtts.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from torch.autograd import Variable
from torch.nn import functional as F
from nemo.collections.tts.modules.submodules import Attention, ConvNorm, LinearNorm, Prenet
from nemo.collections.tts.parts.utils.helpers import get_mask_from_lengths
from nemo.core.classes import NeuralModule, typecheck
from nemo.core.neural_types.elements import (
EmbeddedTextType,
LengthsType,
LogitsType,
MelSpectrogramType,
SequenceToSequenceAlignmentType,
)
from nemo.core.neural_types.neural_type import NeuralType
from nemo.utils import logging
class Encoder(NeuralModule):
def __init__(
self, encoder_n_convolutions: int, encoder_embedding_dim: int, encoder_kernel_size: int,
):
"""
Tacotron 2 Encoder. A number of convolution layers that feed into a LSTM
Args:
encoder_n_convolutions (int): Number of convolution layers.
encoder_embedding_dim (int): Final output embedding size. Also used to create the convolution and LSTM layers.
encoder_kernel_size (int): Kernel of the convolution front-end.
"""
super().__init__()
convolutions = []
for _ in range(encoder_n_convolutions):
conv_layer = torch.nn.Sequential(
ConvNorm(
encoder_embedding_dim,
encoder_embedding_dim,
kernel_size=encoder_kernel_size,
stride=1,
padding=int((encoder_kernel_size - 1) / 2),
dilation=1,
w_init_gain='relu',
),
torch.nn.BatchNorm1d(encoder_embedding_dim),
)
convolutions.append(conv_layer)
self.convolutions = torch.nn.ModuleList(convolutions)
self.lstm = torch.nn.LSTM(
encoder_embedding_dim, int(encoder_embedding_dim / 2), 1, batch_first=True, bidirectional=True,
)
@property
def input_types(self):
return {
"token_embedding": NeuralType(('B', 'D', 'T'), EmbeddedTextType()),
"token_len": NeuralType(('B'), LengthsType()),
}
@property
def output_types(self):
return {
"encoder_embedding": NeuralType(('B', 'T', 'D'), EmbeddedTextType()),
}
@typecheck()
def forward(self, *, token_embedding, token_len):
for conv in self.convolutions:
token_embedding = F.dropout(F.relu(conv(token_embedding)), 0.5, self.training)
token_embedding = token_embedding.transpose(1, 2)
# pytorch tensor are not reversible, hence the conversion
input_lengths = token_len.cpu().numpy()
token_embedding = torch.nn.utils.rnn.pack_padded_sequence(
token_embedding, input_lengths, batch_first=True, enforce_sorted=False
)
self.lstm.flatten_parameters()
outputs, _ = self.lstm(token_embedding)
outputs, _ = torch.nn.utils.rnn.pad_packed_sequence(outputs, batch_first=True)
return outputs
class Decoder(NeuralModule):
def __init__(
self,
n_mel_channels: int,
n_frames_per_step: int,
encoder_embedding_dim: int,
attention_dim: int,
attention_location_n_filters: int,
attention_location_kernel_size: int,
attention_rnn_dim: int,
decoder_rnn_dim: int,
prenet_dim: int,
max_decoder_steps: int,
gate_threshold: float,
p_attention_dropout: float,
p_decoder_dropout: float,
early_stopping: bool,
prenet_p_dropout: float = 0.5,
):
"""
Tacotron 2 Decoder. Consists of a 2 layer LSTM, one of which interfaces with the attention mechanism while the
other is used as a regular LSTM. Includes the prenet and attention modules as well.
Args:
n_mel_channels (int): Number of mel channels to output
n_frames_per_step (int): Number of spectrogram frames to predict per decoder step.
encoder_embedding_dim (int): The size of the output from the encoder.
attention_dim (int): The output dimension of the attention layer.
attention_location_n_filters (int): Channel size for the convolution used the attention mechanism.
attention_location_kernel_size (int): Kernel size for the convolution used the attention mechanism.
attention_rnn_dim (int): The output dimension of the attention LSTM layer.
decoder_rnn_dim (int): The output dimension of the second LSTM layer.
prenet_dim (int): The output dimension of the prenet.
max_decoder_steps (int): For evaluation, the max number of steps to predict.
gate_threshold (float): At each step, tacotron 2 predicts a probability of stopping. Rather than sampling,
this module checks if predicted probability is above the gate_threshold. Only in evaluation.
p_attention_dropout (float): Dropout probability on the attention LSTM.
p_decoder_dropout (float): Dropout probability on the second LSTM.
early_stopping (bool): In evaluation mode, whether to stop when all batches hit the gate_threshold or to
continue until max_decoder_steps.
prenet_p_dropout (float): Dropout probability for prenet. Note, dropout is on even in eval() mode.
Defaults to 0.5.
"""
super().__init__()
self.n_mel_channels = n_mel_channels
self.n_frames_per_step = n_frames_per_step
self.encoder_embedding_dim = encoder_embedding_dim
self.attention_rnn_dim = attention_rnn_dim
self.decoder_rnn_dim = decoder_rnn_dim
self.prenet_dim = prenet_dim
self.max_decoder_steps = max_decoder_steps
self.gate_threshold = gate_threshold
self.p_attention_dropout = p_attention_dropout
self.p_decoder_dropout = p_decoder_dropout
self.early_stopping = early_stopping
self.prenet = Prenet(n_mel_channels * n_frames_per_step, [prenet_dim, prenet_dim], prenet_p_dropout)
self.attention_rnn = torch.nn.LSTMCell(prenet_dim + encoder_embedding_dim, attention_rnn_dim)
self.attention_layer = Attention(
attention_rnn_dim,
encoder_embedding_dim,
attention_dim,
attention_location_n_filters,
attention_location_kernel_size,
)
self.decoder_rnn = torch.nn.LSTMCell(attention_rnn_dim + encoder_embedding_dim, decoder_rnn_dim, 1)
self.linear_projection = LinearNorm(
decoder_rnn_dim + encoder_embedding_dim, n_mel_channels * n_frames_per_step
)
self.gate_layer = LinearNorm(decoder_rnn_dim + encoder_embedding_dim, 1, bias=True, w_init_gain='sigmoid')
@property
def input_types(self):
input_dict = {
"memory": NeuralType(('B', 'T', 'D'), EmbeddedTextType()),
"memory_lengths": NeuralType(('B'), LengthsType()),
}
if self.training:
input_dict["decoder_inputs"] = NeuralType(('B', 'D', 'T'), MelSpectrogramType())
return input_dict
@property
def output_types(self):
output_dict = {
"mel_outputs": NeuralType(('B', 'D', 'T'), MelSpectrogramType()),
"gate_outputs": NeuralType(('B', 'T'), LogitsType()),
"alignments": NeuralType(('B', 'T', 'T'), SequenceToSequenceAlignmentType()),
}
if not self.training:
output_dict["mel_lengths"] = NeuralType(('B'), LengthsType())
return output_dict
@typecheck()
def forward(self, *args, **kwargs):
if self.training:
return self.train_forward(**kwargs)
return self.infer(**kwargs)
def get_go_frame(self, memory):
B = memory.size(0)
decoder_input = Variable(memory.data.new(B, self.n_mel_channels * self.n_frames_per_step).zero_())
return decoder_input
def initialize_decoder_states(self, memory, mask):
B = memory.size(0)
MAX_TIME = memory.size(1)
self.attention_hidden = Variable(memory.data.new(B, self.attention_rnn_dim).zero_())
self.attention_cell = Variable(memory.data.new(B, self.attention_rnn_dim).zero_())
self.decoder_hidden = Variable(memory.data.new(B, self.decoder_rnn_dim).zero_())
self.decoder_cell = Variable(memory.data.new(B, self.decoder_rnn_dim).zero_())
self.attention_weights = Variable(memory.data.new(B, MAX_TIME).zero_())
self.attention_weights_cum = Variable(memory.data.new(B, MAX_TIME).zero_())
self.attention_context = Variable(memory.data.new(B, self.encoder_embedding_dim).zero_())
self.memory = memory
self.processed_memory = self.attention_layer.memory_layer(memory)
self.mask = mask
def parse_decoder_inputs(self, decoder_inputs):
# (B, n_mel_channels, T_out) -> (B, T_out, n_mel_channels)
decoder_inputs = decoder_inputs.transpose(1, 2)
decoder_inputs = decoder_inputs.view(
decoder_inputs.size(0), int(decoder_inputs.size(1) / self.n_frames_per_step), -1,
)
# (B, T_out, n_mel_channels) -> (T_out, B, n_mel_channels)
decoder_inputs = decoder_inputs.transpose(0, 1)
return decoder_inputs
def parse_decoder_outputs(self, mel_outputs, gate_outputs, alignments):
# (T_out, B) -> (B, T_out)
alignments = torch.stack(alignments).transpose(0, 1)
# (T_out, B) -> (B, T_out)
# Add a -1 to prevent squeezing the batch dimension in case
# batch is 1
gate_outputs = torch.stack(gate_outputs).squeeze(-1).transpose(0, 1)
gate_outputs = gate_outputs.contiguous()
# (T_out, B, n_mel_channels) -> (B, T_out, n_mel_channels)
mel_outputs = torch.stack(mel_outputs).transpose(0, 1).contiguous()
# decouple frames per step
mel_outputs = mel_outputs.view(mel_outputs.size(0), -1, self.n_mel_channels)
# (B, T_out, n_mel_channels) -> (B, n_mel_channels, T_out)
mel_outputs = mel_outputs.transpose(1, 2)
return mel_outputs, gate_outputs, alignments
def decode(self, decoder_input):
cell_input = torch.cat((decoder_input, self.attention_context), -1)
self.attention_hidden, self.attention_cell = self.attention_rnn(
cell_input, (self.attention_hidden, self.attention_cell)
)
self.attention_hidden = F.dropout(self.attention_hidden, self.p_attention_dropout, self.training)
attention_weights_cat = torch.cat(
(self.attention_weights.unsqueeze(1), self.attention_weights_cum.unsqueeze(1)), dim=1,
)
self.attention_context, self.attention_weights = self.attention_layer(
self.attention_hidden, self.memory, self.processed_memory, attention_weights_cat, self.mask,
)
self.attention_weights_cum += self.attention_weights
decoder_input = torch.cat((self.attention_hidden, self.attention_context), -1)
self.decoder_hidden, self.decoder_cell = self.decoder_rnn(
decoder_input, (self.decoder_hidden, self.decoder_cell)
)
self.decoder_hidden = F.dropout(self.decoder_hidden, self.p_decoder_dropout, self.training)
decoder_hidden_attention_context = torch.cat((self.decoder_hidden, self.attention_context), dim=1)
decoder_output = self.linear_projection(decoder_hidden_attention_context)
gate_prediction = self.gate_layer(decoder_hidden_attention_context)
return decoder_output, gate_prediction, self.attention_weights
def train_forward(self, *, memory, decoder_inputs, memory_lengths):
decoder_input = self.get_go_frame(memory).unsqueeze(0)
decoder_inputs = self.parse_decoder_inputs(decoder_inputs)
decoder_inputs = torch.cat((decoder_input, decoder_inputs), dim=0)
decoder_inputs = self.prenet(decoder_inputs)
self.initialize_decoder_states(memory, mask=~get_mask_from_lengths(memory_lengths))
mel_outputs, gate_outputs, alignments = [], [], []
while len(mel_outputs) < decoder_inputs.size(0) - 1:
decoder_input = decoder_inputs[len(mel_outputs)]
mel_output, gate_output, attention_weights = self.decode(decoder_input)
mel_outputs += [mel_output.squeeze(1)]
gate_outputs += [gate_output]
alignments += [attention_weights]
mel_outputs, gate_outputs, alignments = self.parse_decoder_outputs(mel_outputs, gate_outputs, alignments)
return mel_outputs, gate_outputs, alignments
def infer(self, *, memory, memory_lengths):
decoder_input = self.get_go_frame(memory)
if memory.size(0) > 1:
mask = ~get_mask_from_lengths(memory_lengths)
else:
mask = None
self.initialize_decoder_states(memory, mask=mask)
mel_lengths = torch.zeros([memory.size(0)], dtype=torch.int32)
not_finished = torch.ones([memory.size(0)], dtype=torch.int32)
if torch.cuda.is_available():
mel_lengths = mel_lengths.cuda()
not_finished = not_finished.cuda()
mel_outputs, gate_outputs, alignments = [], [], []
stepped = False
while True:
decoder_input = self.prenet(decoder_input, inference=True)
mel_output, gate_output, alignment = self.decode(decoder_input)
dec = torch.le(torch.sigmoid(gate_output.data), self.gate_threshold).to(torch.int32).squeeze(1)
not_finished = not_finished * dec
mel_lengths += not_finished
if self.early_stopping and torch.sum(not_finished) == 0 and stepped:
break
stepped = True
mel_outputs += [mel_output.squeeze(1)]
gate_outputs += [gate_output]
alignments += [alignment]
if len(mel_outputs) == self.max_decoder_steps:
logging.warning("Reached max decoder steps %d.", self.max_decoder_steps)
break
decoder_input = mel_output
mel_outputs, gate_outputs, alignments = self.parse_decoder_outputs(mel_outputs, gate_outputs, alignments)
return mel_outputs, gate_outputs, alignments, mel_lengths
class Postnet(NeuralModule):
def __init__(
self,
n_mel_channels: int,
postnet_embedding_dim: int,
postnet_kernel_size: int,
postnet_n_convolutions: int,
p_dropout: float = 0.5,
):
"""
Tacotron 2 Postnet. A convolutional network with postnet_n_convolutions number of layers. Each layer has a
kernel of postnet_kernel_size. Each layer apart from the last outputs postnet_embedding_dim channels, the last
outputs n_mel_channels channels. After each layer is a dropout layer with p_dropout% drop. The last linear has
no activation, all intermediate layers have tanh activation.
Args:
n_mel_channels (int): Number of mel channels to output from Posnet.
postnet_embedding_dim (int): Number of channels to output from the intermediate layers.
postnet_kernel_size (int): The kernel size for the convolution layers.
postnet_n_convolutions (int): The number of convolutions layers.
p_dropout (float): Dropout probability. Defaults to 0.5.
"""
super().__init__()
self.convolutions = torch.nn.ModuleList()
self.convolutions.append(
torch.nn.Sequential(
ConvNorm(
n_mel_channels,
postnet_embedding_dim,
kernel_size=postnet_kernel_size,
stride=1,
padding=int((postnet_kernel_size - 1) / 2),
dilation=1,
w_init_gain='tanh',
),
torch.nn.BatchNorm1d(postnet_embedding_dim),
)
)
for _ in range(1, postnet_n_convolutions - 1):
self.convolutions.append(
torch.nn.Sequential(
ConvNorm(
postnet_embedding_dim,
postnet_embedding_dim,
kernel_size=postnet_kernel_size,
stride=1,
padding=int((postnet_kernel_size - 1) / 2),
dilation=1,
w_init_gain='tanh',
),
torch.nn.BatchNorm1d(postnet_embedding_dim),
)
)
self.convolutions.append(
torch.nn.Sequential(
ConvNorm(
postnet_embedding_dim,
n_mel_channels,
kernel_size=postnet_kernel_size,
stride=1,
padding=int((postnet_kernel_size - 1) / 2),
dilation=1,
w_init_gain='linear',
),
torch.nn.BatchNorm1d(n_mel_channels),
)
)
self.p_dropout = p_dropout
@property
def input_types(self):
return {
"mel_spec": NeuralType(('B', 'D', 'T'), MelSpectrogramType()),
}
@property
def output_types(self):
return {
"mel_spec": NeuralType(('B', 'D', 'T'), MelSpectrogramType()),
}
@typecheck()
def forward(self, *, mel_spec):
mel_spec_out = mel_spec
for i in range(len(self.convolutions) - 1):
mel_spec_out = F.dropout(torch.tanh(self.convolutions[i](mel_spec_out)), self.p_dropout, self.training)
mel_spec_out = F.dropout(self.convolutions[-1](mel_spec_out), self.p_dropout, self.training)
return mel_spec + mel_spec_out
| NeMo-main | nemo/collections/tts/modules/tacotron2.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
class GreedyCTCDecoder(torch.nn.Module):
def __init__(self, labels, blank=0):
super().__init__()
self.labels = labels
self.blank = blank
def forward(self, emission):
"""Given a sequence emission over labels, get the best path
Args:
emission (Tensor): Logit tensors. Shape `[num_seq, num_label]`.
Returns:
List[str]: The resulting transcript
"""
indices = torch.argmax(emission, dim=-1) # [num_seq,]
indices = torch.unique_consecutive(indices, dim=-1)
indices = [i for i in indices if i != self.blank]
joined = "".join([self.labels[i] for i in indices])
return indices, joined
| NeMo-main | nemo/collections/tts/modules/ssl_tts.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import nemo.collections.tts.modules.adapters
from nemo.collections.tts.modules.tacotron2 import Decoder as Taco2Decoder
from nemo.collections.tts.modules.tacotron2 import Encoder as Taco2Encoder
from nemo.collections.tts.modules.tacotron2 import Postnet as Taco2Postnet
from nemo.collections.tts.modules.waveglow import WaveGlowModule
| NeMo-main | nemo/collections/tts/modules/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import torch
from torch import nn
from nemo.collections.tts.modules.submodules import ConvNorm
from nemo.collections.tts.modules.transformer import PositionalEmbedding
def get_same_padding(kernel_size, stride, dilation) -> int:
if stride > 1 and dilation > 1:
raise ValueError("Only stride OR dilation may be greater than 1")
return (dilation * (kernel_size - 1)) // 2
class SameLensMaskedConv1d(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride, dilation, padding, groups):
super().__init__()
self.conv = nn.Conv1d(
in_channels,
out_channels,
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
padding=padding,
groups=groups,
)
def forward(self, x, mask):
x = self.conv(x.transpose(1, 2)).transpose(1, 2) * mask
return x, mask
class SameLensMaskedLinear(nn.Module):
def __init__(self, in_channels, out_channels):
super().__init__()
self.linear = nn.Linear(in_channels, out_channels)
def forward(self, x, mask):
x = self.linear(x) * mask
return x, mask
def create_channel_mix_layer(in_feat, out_feat):
return SameLensMaskedLinear(in_feat, out_feat)
def create_time_mix_layer(in_feat, out_feat, kernel_size=3, stride=1, conv_type="depth-wise", dilation=1):
padding = get_same_padding(kernel_size, stride=stride, dilation=dilation)
if conv_type == "original":
groups = 1
elif conv_type == "depth-wise":
groups = in_feat
else:
raise NotImplementedError
conv = SameLensMaskedConv1d(
in_feat, out_feat, kernel_size=kernel_size, stride=stride, dilation=dilation, padding=padding, groups=groups
)
return conv
class Mix(nn.Module):
def __init__(self, first_mix_layer, second_mix_layer, dropout):
super().__init__()
self.first_mix_layer = first_mix_layer
self.act = nn.GELU()
self.drop_1 = nn.Dropout(dropout)
self.second_mix_layer = second_mix_layer
self.drop_2 = nn.Dropout(dropout)
def forward(self, x, mask=None):
x, mask = self.first_mix_layer(x, mask)
x = self.act(x)
x = self.drop_1(x)
x, mask = self.second_mix_layer(x, mask)
x = self.drop_2(x)
return x, mask
class PreNormResidual(nn.Module):
def __init__(self, fn, feature_dim):
super().__init__()
self.fn = fn
self.norm = nn.LayerNorm(feature_dim)
def forward(self, x, mask):
new_x, mask = self.fn(self.norm(x), mask)
x = x + new_x
return x, mask
class MixerTTSBlock(nn.Module):
def __init__(self, in_feat, expansion_factor, kernel_size, conv_type, dropout):
super().__init__()
self.time_mix = PreNormResidual(
fn=Mix(
first_mix_layer=create_time_mix_layer(
in_feat=in_feat, out_feat=in_feat, kernel_size=kernel_size, conv_type=conv_type,
),
second_mix_layer=create_time_mix_layer(
in_feat=in_feat, out_feat=in_feat, kernel_size=kernel_size, conv_type=conv_type,
),
dropout=dropout,
),
feature_dim=in_feat,
)
self.channel_mix = PreNormResidual(
fn=Mix(
first_mix_layer=create_channel_mix_layer(in_feat=in_feat, out_feat=expansion_factor * in_feat,),
second_mix_layer=create_channel_mix_layer(in_feat=expansion_factor * in_feat, out_feat=in_feat,),
dropout=dropout,
),
feature_dim=in_feat,
)
def forward(self, x, mask):
x, mask = self.time_mix(x, mask)
x, mask = self.channel_mix(x, mask)
return x, mask
class MixerTTSModule(nn.Module):
def __init__(
self,
num_tokens,
feature_dim,
num_layers,
kernel_sizes,
padding_idx=0,
conv_type="depth-wise",
expansion_factor=4,
dropout=0.0,
):
super().__init__()
if len(kernel_sizes) != num_layers:
raise ValueError
self.d_model = feature_dim
self.to_embed = (
nn.Embedding(num_tokens, feature_dim, padding_idx=padding_idx) if num_tokens != -1 else nn.Identity()
)
self.mixer_blocks = nn.Sequential(
*[
MixerTTSBlock(feature_dim, expansion_factor, kernel_size, conv_type, dropout)
for kernel_size in kernel_sizes
],
)
self.norm = nn.LayerNorm(feature_dim)
def forward(self, x, mask, conditioning=0):
x = self.to_embed(x)
x = x + conditioning
x = x * mask
for block in self.mixer_blocks:
x, lens = block(x, mask)
x = self.norm(x)
return x, mask
class SelfAttentionModule(nn.Module):
"""Self-attention for lm tokens and text. """
def __init__(self, n_text_channels=384, n_lm_tokens_channels=128):
super().__init__()
self.text_pos_emb = PositionalEmbedding(n_text_channels)
self.lm_pos_emb = PositionalEmbedding(n_lm_tokens_channels)
self.query_proj = nn.Sequential(
ConvNorm(n_text_channels, n_text_channels, kernel_size=3, bias=True, w_init_gain='relu'),
torch.nn.ReLU(),
ConvNorm(n_text_channels, n_text_channels, kernel_size=1, bias=True),
)
self.key_proj = nn.Sequential(
ConvNorm(n_lm_tokens_channels, n_text_channels, kernel_size=3, bias=True, w_init_gain='relu'),
torch.nn.ReLU(),
ConvNorm(n_text_channels, n_text_channels, kernel_size=1, bias=True),
)
self.value_proj = nn.Sequential(
ConvNorm(n_lm_tokens_channels, n_text_channels, kernel_size=3, bias=True, w_init_gain='relu'),
torch.nn.ReLU(),
ConvNorm(n_text_channels, n_text_channels, kernel_size=1, bias=True),
)
self.scale = math.sqrt(n_text_channels)
def forward(self, queries, keys, values, q_mask=None, kv_mask=None):
"""Forward pass of self-attention.
Args:
queries (torch.tensor): B x T1 x C1 tensor
keys (torch.tensor): B x T2 x C2 tensor
values (torch.tensor): B x T2 x C2 tensor
q_mask (torch.tensor): B x T1 tensor, bool mask for variable length entries
kv_mask (torch.tensor): B x T2 tensor, bool mask for variable length entries
Output:
attn_out (torch.tensor): B x T1 x C1 tensor
"""
pos_q_seq = torch.arange(queries.size(-2), device=queries.device).to(queries.dtype)
pos_kv_seq = torch.arange(keys.size(-2), device=queries.device).to(queries.dtype)
pos_q_emb = self.text_pos_emb(pos_q_seq)
pos_kv_emb = self.lm_pos_emb(pos_kv_seq)
if q_mask is not None:
pos_q_emb = pos_q_emb * q_mask.unsqueeze(2)
if kv_mask is not None:
pos_kv_emb = pos_kv_emb * kv_mask.unsqueeze(2)
queries = (queries + pos_q_emb).transpose(1, 2)
keys = (keys + pos_kv_emb).transpose(1, 2)
values = (values + pos_kv_emb).transpose(1, 2)
queries_enc = self.query_proj(queries).transpose(-2, -1) # B x T1 x C1
keys_enc = self.key_proj(keys) # B x C1 x T2
values_enc = self.value_proj(values).transpose(-2, -1) # B x T2 x C1
scores = torch.matmul(queries_enc, keys_enc) / self.scale # B x T1 x T2
if kv_mask is not None:
scores.masked_fill_(~kv_mask.unsqueeze(-2), -float("inf"))
return torch.matmul(torch.softmax(scores, dim=-1), values_enc) # B x T1 x C1
| NeMo-main | nemo/collections/tts/modules/mixer_tts.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
from typing import Optional, Tuple
import numpy as np
import torch
from torch import Tensor, nn
from torch.cuda import amp
from torch.cuda.amp import autocast as autocast
from torch.nn import functional as F
from nemo.collections.tts.modules.submodules import ConvNorm, LinearNorm, MaskedInstanceNorm1d
from nemo.collections.tts.parts.utils.helpers import get_mask_from_lengths, sort_tensor, unsort_tensor
from nemo.collections.tts.parts.utils.splines import (
piecewise_linear_inverse_transform,
piecewise_linear_transform,
unbounded_piecewise_quadratic_transform,
)
@torch.jit.script
def fused_add_tanh_sigmoid_multiply(input_a, input_b):
t_act = torch.tanh(input_a)
s_act = torch.sigmoid(input_b)
acts = t_act * s_act
return acts
class ExponentialClass(torch.nn.Module):
def __init__(self):
super(ExponentialClass, self).__init__()
def forward(self, x):
return torch.exp(x)
class DenseLayer(nn.Module):
def __init__(self, in_dim=1024, sizes=[1024, 1024]):
super(DenseLayer, self).__init__()
in_sizes = [in_dim] + sizes[:-1]
self.layers = nn.ModuleList(
[LinearNorm(in_size, out_size, bias=True) for (in_size, out_size) in zip(in_sizes, sizes)]
)
def forward(self, x):
for linear in self.layers:
x = torch.tanh(linear(x))
return x
class BiLSTM(nn.Module):
def __init__(self, input_size, hidden_size, num_layers=1, lstm_norm_fn="spectral", max_batch_size=64):
super().__init__()
self.bilstm = nn.LSTM(input_size, hidden_size, num_layers, batch_first=True, bidirectional=True)
if lstm_norm_fn is not None:
if 'spectral' in lstm_norm_fn:
print("Applying spectral norm to LSTM")
lstm_norm_fn_pntr = torch.nn.utils.spectral_norm
elif 'weight' in lstm_norm_fn:
print("Applying weight norm to LSTM")
lstm_norm_fn_pntr = torch.nn.utils.weight_norm
lstm_norm_fn_pntr(self.bilstm, 'weight_hh_l0')
lstm_norm_fn_pntr(self.bilstm, 'weight_hh_l0_reverse')
self.real_hidden_size: int = self.bilstm.proj_size if self.bilstm.proj_size > 0 else self.bilstm.hidden_size
self.bilstm.flatten_parameters()
def lstm_sorted(self, context: Tensor, lens: Tensor, hx: Optional[Tuple[Tensor, Tensor]] = None) -> Tensor:
seq = nn.utils.rnn.pack_padded_sequence(context, lens.long().cpu(), batch_first=True, enforce_sorted=True)
ret, _ = self.bilstm(seq, hx)
return nn.utils.rnn.pad_packed_sequence(ret, batch_first=True)[0]
def lstm(self, context: Tensor, lens: Tensor, hx: Optional[Tuple[Tensor, Tensor]] = None) -> Tensor:
# To be ONNX-exportable, we need to sort here rather that while packing
context, lens, unsort_ids = sort_tensor(context, lens)
ret = self.lstm_sorted(context, lens, hx=hx)
return unsort_tensor(ret, unsort_ids)
def lstm_nocast(self, context: Tensor, lens: Tensor) -> Tensor:
dtype = context.dtype
# autocast guard is only needed for Torchscript to run in Triton
# (https://github.com/pytorch/pytorch/issues/89241)
with torch.cuda.amp.autocast(enabled=False):
# Calculate sizes and prepare views to our zero buffer to pass as hx
max_batch_size = context.shape[0]
context = context.to(dtype=torch.float32)
common_shape = (self.bilstm.num_layers * 2, max_batch_size)
hx = (
context.new_zeros(*common_shape, self.real_hidden_size),
context.new_zeros(*common_shape, self.bilstm.hidden_size),
)
return self.lstm(context, lens, hx=hx).to(dtype=dtype)
def forward(self, context: Tensor, lens: Tensor) -> Tensor:
self.bilstm.flatten_parameters()
if torch.jit.is_tracing():
return self.lstm_nocast(context, lens)
return self.lstm(context, lens)
class ConvLSTMLinear(nn.Module):
def __init__(
self,
in_dim=None,
out_dim=None,
n_layers=2,
n_channels=256,
kernel_size=3,
p_dropout=0.1,
use_partial_padding=False,
norm_fn=None,
):
super(ConvLSTMLinear, self).__init__()
self.bilstm = BiLSTM(n_channels, int(n_channels // 2), 1)
self.convolutions = nn.ModuleList()
if n_layers > 0:
self.dropout = nn.Dropout(p=p_dropout)
use_weight_norm = norm_fn is None
for i in range(n_layers):
conv_layer = ConvNorm(
in_dim if i == 0 else n_channels,
n_channels,
kernel_size=kernel_size,
stride=1,
padding=int((kernel_size - 1) / 2),
dilation=1,
w_init_gain='relu',
use_weight_norm=use_weight_norm,
use_partial_padding=use_partial_padding,
norm_fn=norm_fn,
)
if norm_fn is not None:
print("Applying {} norm to {}".format(norm_fn, conv_layer))
else:
print("Applying weight norm to {}".format(conv_layer))
self.convolutions.append(conv_layer)
self.dense = None
if out_dim is not None:
self.dense = nn.Linear(n_channels, out_dim)
def forward(self, context: Tensor, lens: Tensor) -> Tensor:
mask = get_mask_from_lengths(lens, context)
mask = mask.to(dtype=context.dtype).unsqueeze(1)
for conv in self.convolutions:
context = self.dropout(F.relu(conv(context, mask)))
# Apply Bidirectional LSTM
context = self.bilstm(context.transpose(1, 2), lens=lens)
if self.dense is not None:
context = self.dense(context).permute(0, 2, 1)
return context
def get_radtts_encoder(
encoder_n_convolutions=3, encoder_embedding_dim=512, encoder_kernel_size=5, norm_fn=MaskedInstanceNorm1d,
):
return ConvLSTMLinear(
in_dim=encoder_embedding_dim,
n_layers=encoder_n_convolutions,
n_channels=encoder_embedding_dim,
kernel_size=encoder_kernel_size,
p_dropout=0.5,
use_partial_padding=True,
norm_fn=norm_fn,
)
class Invertible1x1ConvLUS(torch.nn.Module):
def __init__(self, c):
super(Invertible1x1ConvLUS, self).__init__()
# Sample a random orthonormal matrix to initialize weights
W, _ = torch.linalg.qr(torch.FloatTensor(c, c).normal_())
# Ensure determinant is 1.0 not -1.0
if torch.det(W) < 0:
W[:, 0] = -1 * W[:, 0]
p, lower, upper = torch.lu_unpack(*torch.lu(W))
self.register_buffer('p', p)
# diagonals of lower will always be 1s anyway
lower = torch.tril(lower, -1)
lower_diag = torch.diag(torch.eye(c, c))
self.register_buffer('lower_diag', lower_diag)
self.lower = nn.Parameter(lower)
self.upper_diag = nn.Parameter(torch.diag(upper))
self.upper = nn.Parameter(torch.triu(upper, 1))
@amp.autocast(False)
def forward(self, z, inverse=False):
U = torch.triu(self.upper, 1) + torch.diag(self.upper_diag)
L = torch.tril(self.lower, -1) + torch.diag(self.lower_diag)
W = torch.mm(self.p, torch.mm(L, U))
if inverse:
if not hasattr(self, 'W_inverse'):
# inverse computation
W_inverse = W.float().inverse().to(dtype=z.dtype)
self.W_inverse = W_inverse[..., None]
z = F.conv1d(z, self.W_inverse.to(dtype=z.dtype), bias=None, stride=1, padding=0)
return z
else:
W = W[..., None]
z = F.conv1d(z, W, bias=None, stride=1, padding=0)
log_det_W = torch.sum(torch.log(torch.abs(self.upper_diag)))
return z, log_det_W
class Invertible1x1Conv(torch.nn.Module):
"""
The layer outputs both the convolution, and the log determinant
of its weight matrix. If inverse=True it does convolution with
inverse
"""
def __init__(self, c):
super(Invertible1x1Conv, self).__init__()
self.conv = torch.nn.Conv1d(c, c, kernel_size=1, stride=1, padding=0, bias=False)
# Sample a random orthonormal matrix to initialize weights
W = torch.qr(torch.FloatTensor(c, c).normal_())[0]
# Ensure determinant is 1.0 not -1.0
if torch.det(W) < 0:
W[:, 0] = -1 * W[:, 0]
W = W.view(c, c, 1)
self.conv.weight.data = W
def forward(self, z, inverse=False):
# DO NOT apply n_of_groups, as it doesn't account for padded sequences
W = self.conv.weight.squeeze()
if inverse:
if not hasattr(self, 'W_inverse'):
# Inverse computation
W_inverse = W.float().inverse().to(dtype=z.dtype)
self.W_inverse = W_inverse[..., None]
z = F.conv1d(z, self.W_inverse, bias=None, stride=1, padding=0)
return z
else:
# Forward computation
log_det_W = torch.logdet(W).clone()
z = self.conv(z)
return z, log_det_W
class SimpleConvNet(torch.nn.Module):
def __init__(
self,
n_mel_channels,
n_context_dim,
final_out_channels,
n_layers=2,
kernel_size=5,
with_dilation=True,
max_channels=1024,
zero_init=True,
use_partial_padding=True,
):
super(SimpleConvNet, self).__init__()
self.layers = torch.nn.ModuleList()
self.n_layers = n_layers
in_channels = n_mel_channels + n_context_dim
out_channels = -1
self.use_partial_padding = use_partial_padding
for i in range(n_layers):
dilation = 2 ** i if with_dilation else 1
padding = int((kernel_size * dilation - dilation) / 2)
out_channels = min(max_channels, in_channels * 2)
self.layers.append(
ConvNorm(
in_channels,
out_channels,
kernel_size=kernel_size,
stride=1,
padding=padding,
dilation=dilation,
bias=True,
w_init_gain='relu',
use_partial_padding=use_partial_padding,
)
)
in_channels = out_channels
self.last_layer = torch.nn.Conv1d(out_channels, final_out_channels, kernel_size=1)
if zero_init:
self.last_layer.weight.data *= 0
self.last_layer.bias.data *= 0
def forward(self, z_w_context, seq_lens: Optional[Tensor] = None):
# seq_lens: tensor array of sequence sequence lengths
# output should be b x n_mel_channels x z_w_context.shape(2)
mask = get_mask_from_lengths(seq_lens, z_w_context).unsqueeze(1).to(dtype=z_w_context.dtype)
for i in range(self.n_layers):
z_w_context = self.layers[i](z_w_context, mask)
z_w_context = torch.relu(z_w_context)
z_w_context = self.last_layer(z_w_context)
return z_w_context
class WN(torch.nn.Module):
"""
Adapted from WN() module in WaveGlow with modififcations to variable names
"""
def __init__(
self,
n_in_channels,
n_context_dim,
n_layers,
n_channels,
kernel_size=5,
affine_activation='softplus',
use_partial_padding=True,
):
super(WN, self).__init__()
assert kernel_size % 2 == 1
assert n_channels % 2 == 0
self.n_layers = n_layers
self.n_channels = n_channels
self.in_layers = torch.nn.ModuleList()
self.res_skip_layers = torch.nn.ModuleList()
start = torch.nn.Conv1d(n_in_channels + n_context_dim, n_channels, 1)
start = torch.nn.utils.weight_norm(start, name='weight')
self.start = start
self.softplus = torch.nn.Softplus()
self.affine_activation = affine_activation
self.use_partial_padding = use_partial_padding
# Initializing last layer to 0 makes the affine coupling layers
# do nothing at first. This helps with training stability
end = torch.nn.Conv1d(n_channels, 2 * n_in_channels, 1)
end.weight.data.zero_()
end.bias.data.zero_()
self.end = end
for i in range(n_layers):
dilation = 2 ** i
padding = int((kernel_size * dilation - dilation) / 2)
in_layer = ConvNorm(
n_channels,
n_channels,
kernel_size=kernel_size,
dilation=dilation,
padding=padding,
use_partial_padding=use_partial_padding,
use_weight_norm=True,
)
self.in_layers.append(in_layer)
res_skip_layer = nn.Conv1d(n_channels, n_channels, 1)
res_skip_layer = nn.utils.weight_norm(res_skip_layer)
self.res_skip_layers.append(res_skip_layer)
def forward(self, forward_input: Tuple[Tensor, Tensor], seq_lens: Tensor = None):
z, context = forward_input
z = torch.cat((z, context), 1) # append context to z as well
z = self.start(z)
output = torch.zeros_like(z)
mask = None
if self.use_partial_padding:
mask = get_mask_from_lengths(seq_lens).unsqueeze(1).float()
non_linearity = torch.relu
if self.affine_activation == 'softplus':
non_linearity = self.softplus
for i in range(self.n_layers):
z = non_linearity(self.in_layers[i](z, mask))
res_skip_acts = non_linearity(self.res_skip_layers[i](z))
output = output + res_skip_acts
output = self.end(output) # [B, dim, seq_len]
return output
# Affine Coupling Layers
class SplineTransformationLayerAR(torch.nn.Module):
def __init__(
self,
n_in_channels,
n_context_dim,
n_layers,
affine_model='simple_conv',
kernel_size=1,
scaling_fn='exp',
affine_activation='softplus',
n_channels=1024,
n_bins=8,
left=-6,
right=6,
bottom=-6,
top=6,
use_quadratic=False,
):
super(SplineTransformationLayerAR, self).__init__()
self.n_in_channels = n_in_channels # input dimensions
self.left = left
self.right = right
self.bottom = bottom
self.top = top
self.n_bins = n_bins
self.spline_fn = piecewise_linear_transform
self.inv_spline_fn = piecewise_linear_inverse_transform
self.use_quadratic = use_quadratic
if self.use_quadratic:
self.spline_fn = unbounded_piecewise_quadratic_transform
self.inv_spline_fn = unbounded_piecewise_quadratic_transform
self.n_bins = 2 * self.n_bins + 1
final_out_channels = self.n_in_channels * self.n_bins
# autoregressive flow, kernel size 1 and no dilation
self.param_predictor = SimpleConvNet(
n_context_dim,
0,
final_out_channels,
n_layers,
with_dilation=False,
kernel_size=1,
zero_init=True,
use_partial_padding=False,
)
# output is unnormalized bin weights
def normalize(self, z, inverse):
# normalize to [0, 1]
if inverse:
z = (z - self.bottom) / (self.top - self.bottom)
else:
z = (z - self.left) / (self.right - self.left)
return z
def denormalize(self, z, inverse):
if inverse:
z = z * (self.right - self.left) + self.left
else:
z = z * (self.top - self.bottom) + self.bottom
return z
def forward(self, z, context, inverse=False):
b_s, c_s, t_s = z.size(0), z.size(1), z.size(2)
z = self.normalize(z, inverse)
if z.min() < 0.0 or z.max() > 1.0:
print('spline z scaled beyond [0, 1]', z.min(), z.max())
z_reshaped = z.permute(0, 2, 1).reshape(b_s * t_s, -1)
affine_params = self.param_predictor(context)
q_tilde = affine_params.permute(0, 2, 1).reshape(b_s * t_s, c_s, -1)
with amp.autocast(enabled=False):
if self.use_quadratic:
w = q_tilde[:, :, : self.n_bins // 2]
v = q_tilde[:, :, self.n_bins // 2 :]
z_tformed, log_s = self.spline_fn(z_reshaped.float(), w.float(), v.float(), inverse=inverse)
else:
z_tformed, log_s = self.spline_fn(z_reshaped.float(), q_tilde.float())
z = z_tformed.reshape(b_s, t_s, -1).permute(0, 2, 1)
z = self.denormalize(z, inverse)
if inverse:
return z
log_s = log_s.reshape(b_s, t_s, -1)
log_s = log_s.permute(0, 2, 1)
log_s = log_s + c_s * (np.log(self.top - self.bottom) - np.log(self.right - self.left))
return z, log_s
class SplineTransformationLayer(torch.nn.Module):
def __init__(
self,
n_mel_channels,
n_context_dim,
n_layers,
with_dilation=True,
kernel_size=5,
scaling_fn='exp',
affine_activation='softplus',
n_channels=1024,
n_bins=8,
left=-4,
right=4,
bottom=-4,
top=4,
use_quadratic=False,
):
super(SplineTransformationLayer, self).__init__()
self.n_mel_channels = n_mel_channels # input dimensions
self.half_mel_channels = int(n_mel_channels / 2) # half, because we split
self.left = left
self.right = right
self.bottom = bottom
self.top = top
self.n_bins = n_bins
self.spline_fn = piecewise_linear_transform
self.inv_spline_fn = piecewise_linear_inverse_transform
self.use_quadratic = use_quadratic
if self.use_quadratic:
self.spline_fn = unbounded_piecewise_quadratic_transform
self.inv_spline_fn = unbounded_piecewise_quadratic_transform
self.n_bins = 2 * self.n_bins + 1
final_out_channels = self.half_mel_channels * self.n_bins
self.param_predictor = SimpleConvNet(
self.half_mel_channels,
n_context_dim,
final_out_channels,
n_layers,
with_dilation=with_dilation,
kernel_size=kernel_size,
zero_init=False,
)
# output is unnormalized bin weights
def forward(self, z, context, inverse=False, seq_lens=None):
b_s, c_s, t_s = z.size(0), z.size(1), z.size(2)
# condition on z_0, transform z_1
n_half = self.half_mel_channels
z_0, z_1 = z[:, :n_half], z[:, n_half:]
# normalize to [0,1]
if inverse:
z_1 = (z_1 - self.bottom) / (self.top - self.bottom)
else:
z_1 = (z_1 - self.left) / (self.right - self.left)
z_w_context = torch.cat((z_0, context), 1)
affine_params = self.param_predictor(z_w_context, seq_lens)
z_1_reshaped = z_1.permute(0, 2, 1).reshape(b_s * t_s, -1)
q_tilde = affine_params.permute(0, 2, 1).reshape(b_s * t_s, n_half, self.n_bins)
with autocast(enabled=False):
if self.use_quadratic:
w = q_tilde[:, :, : self.n_bins // 2]
v = q_tilde[:, :, self.n_bins // 2 :]
z_1_tformed, log_s = self.spline_fn(z_1_reshaped.float(), w.float(), v.float(), inverse=inverse)
if not inverse:
log_s = torch.sum(log_s, 1)
else:
if inverse:
z_1_tformed, _dc = self.inv_spline_fn(z_1_reshaped.float(), q_tilde.float(), False)
else:
z_1_tformed, log_s = self.spline_fn(z_1_reshaped.float(), q_tilde.float())
z_1 = z_1_tformed.reshape(b_s, t_s, -1).permute(0, 2, 1)
# undo [0, 1] normalization
if inverse:
z_1 = z_1 * (self.right - self.left) + self.left
z = torch.cat((z_0, z_1), dim=1)
return z
else: # training
z_1 = z_1 * (self.top - self.bottom) + self.bottom
z = torch.cat((z_0, z_1), dim=1)
log_s = log_s.reshape(b_s, t_s).unsqueeze(1) + n_half * (
np.log(self.top - self.bottom) - np.log(self.right - self.left)
)
return z, log_s
class AffineTransformationLayer(torch.nn.Module):
def __init__(
self,
n_mel_channels,
n_context_dim,
n_layers,
affine_model='simple_conv',
with_dilation=True,
kernel_size=5,
scaling_fn='exp',
affine_activation='softplus',
n_channels=1024,
use_partial_padding=False,
):
super(AffineTransformationLayer, self).__init__()
if affine_model not in ("wavenet", "simple_conv"):
raise Exception("{} affine model not supported".format(affine_model))
if isinstance(scaling_fn, list):
if not all([x in ("translate", "exp", "tanh", "sigmoid") for x in scaling_fn]):
raise Exception("{} scaling fn not supported".format(scaling_fn))
else:
if scaling_fn not in ("translate", "exp", "tanh", "sigmoid"):
raise Exception("{} scaling fn not supported".format(scaling_fn))
self.affine_model = affine_model
self.scaling_fn = scaling_fn
if affine_model == 'wavenet':
self.affine_param_predictor = WN(
int(n_mel_channels / 2),
n_context_dim,
n_layers=n_layers,
n_channels=n_channels,
affine_activation=affine_activation,
use_partial_padding=use_partial_padding,
)
elif affine_model == 'simple_conv':
self.affine_param_predictor = SimpleConvNet(
int(n_mel_channels / 2),
n_context_dim,
n_mel_channels,
n_layers,
with_dilation=with_dilation,
kernel_size=kernel_size,
use_partial_padding=use_partial_padding,
)
else:
raise ValueError(
f"Affine model is not supported: {affine_model}. Please choose either 'wavenet' or"
f"'simple_conv' instead."
)
self.n_mel_channels = n_mel_channels
def get_scaling_and_logs(self, scale_unconstrained):
# (rvalle) re-write this
if self.scaling_fn == 'translate':
s = torch.exp(scale_unconstrained * 0)
log_s = scale_unconstrained * 0
elif self.scaling_fn == 'exp':
s = torch.exp(scale_unconstrained)
log_s = scale_unconstrained # log(exp
elif self.scaling_fn == 'tanh':
s = torch.tanh(scale_unconstrained) + 1 + 1e-6
log_s = torch.log(s)
elif self.scaling_fn == 'sigmoid':
s = torch.sigmoid(scale_unconstrained + 10) + 1e-6
log_s = torch.log(s)
elif isinstance(self.scaling_fn, list):
s_list, log_s_list = [], []
for i in range(scale_unconstrained.shape[1]):
scaling_i = self.scaling_fn[i]
if scaling_i == 'translate':
s_i = torch.exp(scale_unconstrained[:i] * 0)
log_s_i = scale_unconstrained[:, i] * 0
elif scaling_i == 'exp':
s_i = torch.exp(scale_unconstrained[:, i])
log_s_i = scale_unconstrained[:, i]
elif scaling_i == 'tanh':
s_i = torch.tanh(scale_unconstrained[:, i]) + 1 + 1e-6
log_s_i = torch.log(s_i)
elif scaling_i == 'sigmoid':
s_i = torch.sigmoid(scale_unconstrained[:, i]) + 1e-6
log_s_i = torch.log(s_i)
s_list.append(s_i[:, None])
log_s_list.append(log_s_i[:, None])
s = torch.cat(s_list, dim=1)
log_s = torch.cat(log_s_list, dim=1)
else:
raise ValueError(
f"Scaling function is not supported: {self.scaling_fn}. Please choose either 'translate', "
f"'exp', 'tanh', or 'sigmoid' instead."
)
return s, log_s
def forward(self, z, context, inverse=False, seq_lens=None):
n_half = int(self.n_mel_channels / 2)
z_0, z_1 = z[:, :n_half], z[:, n_half:]
if self.affine_model == 'wavenet':
affine_params = self.affine_param_predictor((z_0, context), seq_lens=seq_lens)
elif self.affine_model == 'simple_conv':
z_w_context = torch.cat((z_0, context), 1)
affine_params = self.affine_param_predictor(z_w_context, seq_lens=seq_lens)
else:
raise ValueError(
f"Affine model is not supported: {self.affine_model}. Please choose either 'wavenet' or "
f"'simple_conv' instead."
)
scale_unconstrained = affine_params[:, :n_half, :]
b = affine_params[:, n_half:, :]
s, log_s = self.get_scaling_and_logs(scale_unconstrained)
if inverse:
z_1 = (z_1 - b) / s
z = torch.cat((z_0, z_1), dim=1)
return z
else:
z_1 = s * z_1 + b
z = torch.cat((z_0, z_1), dim=1)
return z, log_s
class ConvAttention(torch.nn.Module):
def __init__(self, n_mel_channels=80, n_speaker_dim=128, n_text_channels=512, n_att_channels=80, temperature=1.0):
super(ConvAttention, self).__init__()
self.temperature = temperature
self.softmax = torch.nn.Softmax(dim=3)
self.log_softmax = torch.nn.LogSoftmax(dim=3)
self.query_proj = Invertible1x1ConvLUS(n_mel_channels)
self.key_proj = nn.Sequential(
ConvNorm(n_text_channels, n_text_channels * 2, kernel_size=3, bias=True, w_init_gain='relu'),
torch.nn.ReLU(),
ConvNorm(n_text_channels * 2, n_att_channels, kernel_size=1, bias=True),
)
self.query_proj = nn.Sequential(
ConvNorm(n_mel_channels, n_mel_channels * 2, kernel_size=3, bias=True, w_init_gain='relu'),
torch.nn.ReLU(),
ConvNorm(n_mel_channels * 2, n_mel_channels, kernel_size=1, bias=True),
torch.nn.ReLU(),
ConvNorm(n_mel_channels, n_att_channels, kernel_size=1, bias=True),
)
def forward(self, queries, keys, query_lens, mask=None, key_lens=None, attn_prior=None):
"""Attention mechanism for radtts. Unlike in Flowtron, we have no
restrictions such as causality etc, since we only need this during
training.
Args:
queries (torch.tensor): B x C x T1 tensor (likely mel data)
keys (torch.tensor): B x C2 x T2 tensor (text data)
query_lens: lengths for sorting the queries in descending order
mask (torch.tensor): uint8 binary mask for variable length entries
(should be in the T2 domain)
Output:
attn (torch.tensor): B x 1 x T1 x T2 attention mask.
Final dim T2 should sum to 1
"""
temp = 0.0005
keys_enc = self.key_proj(keys) # B x n_attn_dims x T2
# Beware can only do this since query_dim = attn_dim = n_mel_channels
queries_enc = self.query_proj(queries)
# Gaussian Isotopic Attention
# B x n_attn_dims x T1 x T2
attn = (queries_enc[:, :, :, None] - keys_enc[:, :, None]) ** 2
# compute log-likelihood from gaussian
eps = 1e-8
attn = -temp * attn.sum(1, keepdim=True)
if attn_prior is not None:
attn = self.log_softmax(attn) + torch.log(attn_prior[:, None] + eps)
attn_logprob = attn.clone()
if mask is not None:
attn.data.masked_fill_(mask.permute(0, 2, 1).unsqueeze(2), -float("inf"))
attn = self.softmax(attn) # softmax along T2
return attn, attn_logprob
class GaussianDropout(torch.nn.Module):
"""
Gaussian dropout using multiplicative gaussian noise.
https://keras.io/api/layers/regularization_layers/gaussian_dropout/
Can be an effective alternative bottleneck to VAE or VQ:
https://www.deepmind.com/publications/gaussian-dropout-as-an-information-bottleneck-layer
Unlike some other implementations, this takes the standard deviation of the noise as input
instead of the 'rate' typically defined as: stdev = sqrt(rate / (1 - rate))
"""
def __init__(self, stdev=1.0):
super(GaussianDropout, self).__init__()
self.stdev = stdev
def forward(self, inputs):
if not self.training:
return inputs
noise = torch.normal(mean=1.0, std=self.stdev, size=inputs.shape, device=inputs.device)
out = noise * inputs
return out
| NeMo-main | nemo/collections/tts/modules/common.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# MIT License
#
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from typing import Iterable, Optional, Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import rearrange, repeat
from torch import Tensor
from nemo.collections.tts.losses.audio_codec_loss import MaskedMSELoss
from nemo.collections.tts.modules.audio_codec_modules import (
Conv1dNorm,
Conv2dNorm,
ConvTranspose1dNorm,
get_down_sample_padding,
)
from nemo.collections.tts.parts.utils.distributed import broadcast_tensors
from nemo.collections.tts.parts.utils.helpers import mask_sequence_tensor
from nemo.core.classes.common import typecheck
from nemo.core.classes.module import NeuralModule
from nemo.core.neural_types.elements import AudioSignal, EncodedRepresentation, Index, LengthsType, LossType, VoidType
from nemo.core.neural_types.neural_type import NeuralType
from nemo.utils.decorators import experimental
class SEANetResnetBlock(NeuralModule):
def __init__(self, channels: int):
super().__init__()
self.activation = nn.ELU()
hidden_channels = channels // 2
self.pre_conv = Conv1dNorm(in_channels=channels, out_channels=channels, kernel_size=1)
self.res_conv1 = Conv1dNorm(in_channels=channels, out_channels=hidden_channels, kernel_size=3)
self.res_conv2 = Conv1dNorm(in_channels=hidden_channels, out_channels=channels, kernel_size=1)
@property
def input_types(self):
return {
"inputs": NeuralType(('B', 'C', 'T_input'), VoidType()),
"lengths": NeuralType(tuple('B'), LengthsType()),
}
@property
def output_types(self):
return {
"out": [NeuralType(('B', 'C', 'T_out'), VoidType())],
}
def remove_weight_norm(self):
self.pre_conv.remove_weight_norm()
self.res_conv1.remove_weight_norm()
self.res_conv2.remove_weight_norm()
def forward(self, inputs, lengths):
res = self.activation(inputs)
res = self.res_conv1(res, lengths)
res = self.activation(res)
res = self.res_conv2(res, lengths)
out = self.pre_conv(inputs, lengths) + res
out = mask_sequence_tensor(out, lengths)
return out
class SEANetRNN(NeuralModule):
def __init__(self, dim: int, num_layers: int, rnn_type: str = "lstm", use_skip: bool = False):
super().__init__()
self.use_skip = use_skip
if rnn_type == "lstm":
self.rnn = torch.nn.LSTM(input_size=dim, hidden_size=dim, num_layers=num_layers)
elif rnn_type == "gru":
self.rnn = torch.nn.GRU(input_size=dim, hidden_size=dim, num_layers=num_layers)
else:
raise ValueError(f"Unknown RNN type {rnn_type}")
@property
def input_types(self):
return {
"inputs": NeuralType(('B', 'C', 'T'), VoidType()),
"lengths": NeuralType(tuple('B'), LengthsType()),
}
@property
def output_types(self):
return {
"out": [NeuralType(('B', 'C', 'T'), VoidType())],
}
def forward(self, inputs, lengths):
inputs = rearrange(inputs, "B C T -> B T C")
packed_inputs = nn.utils.rnn.pack_padded_sequence(
inputs, lengths=lengths.cpu(), batch_first=True, enforce_sorted=False
)
packed_out, _ = self.rnn(packed_inputs)
out, _ = nn.utils.rnn.pad_packed_sequence(packed_out, batch_first=True)
if self.use_skip:
out = out + inputs
out = rearrange(out, "B T C -> B C T")
return out
class SEANetEncoder(NeuralModule):
def __init__(
self,
down_sample_rates: Iterable[int] = (2, 4, 5, 8),
base_channels: int = 32,
in_kernel_size: int = 7,
out_kernel_size: int = 7,
encoded_dim: int = 128,
rnn_layers: int = 2,
rnn_type: str = "lstm",
rnn_skip: bool = True,
):
assert in_kernel_size > 0
assert out_kernel_size > 0
super().__init__()
self.down_sample_rates = down_sample_rates
self.activation = nn.ELU()
self.pre_conv = Conv1dNorm(in_channels=1, out_channels=base_channels, kernel_size=in_kernel_size)
in_channels = base_channels
self.res_blocks = nn.ModuleList([])
self.down_sample_conv_layers = nn.ModuleList([])
for i, down_sample_rate in enumerate(self.down_sample_rates):
res_block = SEANetResnetBlock(channels=in_channels)
self.res_blocks.append(res_block)
out_channels = 2 * in_channels
kernel_size = 2 * down_sample_rate
down_sample_conv = Conv1dNorm(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=down_sample_rate,
padding=get_down_sample_padding(kernel_size, down_sample_rate),
)
in_channels = out_channels
self.down_sample_conv_layers.append(down_sample_conv)
self.rnn = SEANetRNN(dim=in_channels, num_layers=rnn_layers, rnn_type=rnn_type, use_skip=rnn_skip)
self.post_conv = Conv1dNorm(in_channels=in_channels, out_channels=encoded_dim, kernel_size=out_kernel_size)
@property
def input_types(self):
return {
"audio": NeuralType(('B', 'C', 'T_audio'), AudioSignal()),
"audio_len": NeuralType(tuple('B'), LengthsType()),
}
@property
def output_types(self):
return {
"encoded": [NeuralType(('B', 'D', 'T_encoded'), EncodedRepresentation())],
"encoded_len": [NeuralType(tuple('B'), LengthsType())],
}
def remove_weight_norm(self):
self.pre_conv.remove_weight_norm()
for res_block in self.res_blocks:
res_block.remove_weight_norm()
for down_sample_conv in self.down_sample_conv_layers:
down_sample_conv.remove_weight_norm()
def forward(self, audio, audio_len):
encoded_len = audio_len
audio = rearrange(audio, "B T -> B 1 T")
# [B, C, T_audio]
out = self.pre_conv(audio, encoded_len)
for res_block, down_sample_conv, down_sample_rate in zip(
self.res_blocks, self.down_sample_conv_layers, self.down_sample_rates
):
# [B, C, T]
out = res_block(out, encoded_len)
out = self.activation(out)
encoded_len = encoded_len // down_sample_rate
# [B, 2 * C, T / down_sample_rate]
out = down_sample_conv(out, encoded_len)
out = self.rnn(out, encoded_len)
out = self.activation(out)
# [B, encoded_dim, T_encoded]
encoded = self.post_conv(out, encoded_len)
return encoded, encoded_len
class SEANetDecoder(NeuralModule):
def __init__(
self,
up_sample_rates: Iterable[int] = (8, 5, 4, 2),
base_channels: int = 512,
in_kernel_size: int = 7,
out_kernel_size: int = 3,
encoded_dim: int = 128,
rnn_layers: int = 2,
rnn_type: str = "lstm",
rnn_skip: bool = True,
):
assert in_kernel_size > 0
assert out_kernel_size > 0
super().__init__()
self.up_sample_rates = up_sample_rates
self.activation = nn.ELU()
self.pre_conv = Conv1dNorm(in_channels=encoded_dim, out_channels=base_channels, kernel_size=in_kernel_size)
self.rnn = SEANetRNN(dim=base_channels, num_layers=rnn_layers, rnn_type=rnn_type, use_skip=rnn_skip)
in_channels = base_channels
self.res_blocks = nn.ModuleList([])
self.up_sample_conv_layers = nn.ModuleList([])
for i, up_sample_rate in enumerate(self.up_sample_rates):
out_channels = in_channels // 2
kernel_size = 2 * up_sample_rate
up_sample_conv = ConvTranspose1dNorm(
in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=up_sample_rate
)
in_channels = out_channels
self.up_sample_conv_layers.append(up_sample_conv)
res_block = SEANetResnetBlock(channels=in_channels)
self.res_blocks.append(res_block)
self.post_conv = Conv1dNorm(in_channels=in_channels, out_channels=1, kernel_size=out_kernel_size)
self.out_activation = nn.Tanh()
@property
def input_types(self):
return {
"inputs": [NeuralType(('B', 'D', 'T_encoded'), EncodedRepresentation())],
"input_len": [NeuralType(tuple('B'), LengthsType())],
}
@property
def output_types(self):
return {
"audio": NeuralType(('B', 'C', 'T_audio'), AudioSignal()),
"audio_len": NeuralType(tuple('B'), LengthsType()),
}
def remove_weight_norm(self):
self.pre_conv.remove_weight_norm()
for up_sample_conv in self.up_sample_conv_layers:
up_sample_conv.remove_weight_norm()
for res_block in self.res_blocks:
res_block.remove_weight_norm()
def forward(self, inputs, input_len):
audio_len = input_len
# [B, C, T_encoded]
out = self.pre_conv(inputs, audio_len)
out = self.rnn(out, audio_len)
for res_block, up_sample_conv, up_sample_rate in zip(
self.res_blocks, self.up_sample_conv_layers, self.up_sample_rates
):
audio_len = audio_len * up_sample_rate
out = self.activation(out)
# [B, C / 2, T * up_sample_rate]
out = up_sample_conv(out, audio_len)
out = res_block(out, audio_len)
out = self.activation(out)
# [B, 1, T_audio]
out = self.post_conv(out, audio_len)
audio = self.out_activation(out)
audio = rearrange(audio, "B 1 T -> B T")
return audio, audio_len
class DiscriminatorSTFT(NeuralModule):
def __init__(self, resolution, lrelu_slope=0.1):
super().__init__()
self.n_fft, self.hop_length, self.win_length = resolution
self.register_buffer("window", torch.hann_window(self.win_length, periodic=False))
self.activation = nn.LeakyReLU(lrelu_slope)
self.conv_layers = nn.ModuleList(
[
Conv2dNorm(2, 32, kernel_size=(3, 9)),
Conv2dNorm(32, 32, kernel_size=(3, 9), dilation=(1, 1), stride=(1, 2)),
Conv2dNorm(32, 32, kernel_size=(3, 9), dilation=(2, 1), stride=(1, 2)),
Conv2dNorm(32, 32, kernel_size=(3, 9), dilation=(4, 1), stride=(1, 2)),
Conv2dNorm(32, 32, kernel_size=(3, 3)),
]
)
self.conv_post = Conv2dNorm(32, 1, kernel_size=(3, 3))
def stft(self, audio):
# [B, fft, T_spec]
out = torch.stft(
audio,
n_fft=self.n_fft,
hop_length=self.hop_length,
win_length=self.win_length,
window=self.window,
normalized=True,
center=True,
return_complex=True,
)
out = rearrange(out, "B fft T -> B 1 T fft")
# [batch, 2, T_spec, fft]
out = torch.cat([out.real, out.imag], dim=1)
return out
@property
def input_types(self):
return {
"audio": NeuralType(('B', 'T_audio'), AudioSignal()),
}
@property
def output_types(self):
return {
"scores": NeuralType(('B', 'C', 'T_spec'), VoidType()),
"fmap": [NeuralType(('B', 'D', 'T_spec', 'C'), VoidType())],
}
def forward(self, audio):
fmap = []
# [batch, 2, T_spec, fft]
out = self.stft(audio)
for conv in self.conv_layers:
# [batch, filters, T_spec, fft // 2**i]
out = conv(out)
out = self.activation(out)
fmap.append(out)
# [batch, 1, T_spec, fft // 8]
scores = self.conv_post(out)
fmap.append(scores)
scores = rearrange(scores, "B 1 T C -> B C T")
return scores, fmap
class MultiResolutionDiscriminatorSTFT(NeuralModule):
def __init__(self, resolutions):
super().__init__()
self.discriminators = nn.ModuleList([DiscriminatorSTFT(res) for res in resolutions])
@property
def input_types(self):
return {
"audio": NeuralType(('B', 'T_audio'), AudioSignal()),
"audio_gen": NeuralType(('B', 'T_audio'), AudioSignal()),
}
@property
def output_types(self):
return {
"scores_real": [NeuralType(('B', 'C', 'T_spec'), VoidType())],
"scores_gen": [NeuralType(('B', 'C', 'T_spec'), VoidType())],
"fmaps_real": [[NeuralType(('B', 'D', 'T_spec', 'C'), VoidType())]],
"fmaps_gen": [[NeuralType(('B', 'D', 'T_spec', 'C'), VoidType())]],
}
def forward(self, audio_real, audio_gen):
scores_real = []
scores_gen = []
fmaps_real = []
fmaps_gen = []
for disc in self.discriminators:
score_real, fmap_real = disc(audio=audio_real)
scores_real.append(score_real)
fmaps_real.append(fmap_real)
score_gen, fmap_gen = disc(audio=audio_gen)
scores_gen.append(score_gen)
fmaps_gen.append(fmap_gen)
return scores_real, scores_gen, fmaps_real, fmaps_gen
def _ema_inplace(moving_avg: Tensor, new: Tensor, decay: float) -> None:
moving_avg.data.mul_(decay).add_(new, alpha=(1 - decay))
def _laplace_smoothing(inputs: Tensor, n_categories: int, epsilon: float = 1e-5) -> Tensor:
input_sum = inputs.sum()
smoothed = (inputs + epsilon) / (input_sum + n_categories * epsilon)
return input_sum * smoothed
def _compute_distances(input1: Tensor, input2: Tensor) -> Tensor:
"""
Compute pairwise L2 distance between two input tensors
Args:
input1: [B, D] first tensor.
input2: [N, D] second tensor.
Returns:
[(B, D)] tensor of distances.
"""
input2 = rearrange(input2, "N D -> D N")
distances = input1.pow(2).sum(1, keepdim=True) - (2 * input1 @ input2) + input2.pow(2).sum(0, keepdim=True)
return distances
def _sample_vectors(samples: Tensor, num_sample: int) -> Tensor:
"""
Randomly sample from the input batch.
Args:
samples: [B, D] tensor with features to sample.
num_sample: Number of samples to draw.
If the value is less than or equal to B, then the samples will be unique.
If the value is greater than B, then samples will be drawn with replacement.
Returns:
Tensor with num_sample values randomly sampled from the input batch.
"""
device = samples.device
total_samples = samples.shape[0]
if total_samples >= num_sample:
indices = torch.randperm(total_samples, device=device)[:num_sample]
else:
indices = torch.randint(low=0, high=total_samples, size=(num_sample,), device=device)
return samples[indices]
def _k_means(samples: Tensor, num_clusters: int, num_iters: int = 10) -> Tuple[Tensor, Tensor]:
"""
K-means clustering algorithm.
Args:
samples: [B, D] tensor with features to cluster
num_clusters: K, the number of clusters.
num_iters: Number of iterations of K-means to run.
Returns:
[K, D] cluster means and [K] bins counting how many input samples belong to each cluster
"""
assert num_iters > 0
input_dim = samples.shape[1]
# [K, D]
means = _sample_vectors(samples=samples, num_sample=num_clusters)
for _ in range(num_iters):
# [B, K]
dists = _compute_distances(samples, means)
# [N]
buckets = dists.min(dim=1).indices
buckets_repeated = repeat(buckets, "B -> B D", D=input_dim)
# [K]
bin_counts = torch.bincount(buckets, minlength=num_clusters)
bin_counts_expanded = rearrange(bin_counts, "K -> K ()")
# [K, D]
new_means = buckets.new_zeros(num_clusters, input_dim, dtype=samples.dtype)
new_means.scatter_add_(dim=0, index=buckets_repeated, src=samples)
new_means = new_means / torch.clamp(bin_counts_expanded, min=1)
means = torch.where(bin_counts_expanded == 0, means, new_means)
return means, bin_counts
def _mask_3d(tensor: Tensor, lengths: Tensor):
"""
Mask 3d tensor with time on 1st axis.
Args:
tensor: tensor of shape (B, T, D)
lengths: LongTensor of shape (B,)
Returns:
Masked Tensor (B, T, D)
"""
batch_size, max_lengths, _ = tensor.shape
mask = torch.ones(batch_size, max_lengths, 1).cumsum(dim=1).type_as(lengths)
mask = mask <= rearrange(lengths, "b -> b 1 1")
return tensor * mask
@experimental
class EuclideanCodebook(NeuralModule):
"""
Codebook with Euclidean distance.
Args:
codebook_size: Number of codes to use.
codebook_dim: Dimension of each code.
decay: Decay for exponential moving average over the codebooks.
threshold_ema_dead_code: Threshold for dead code expiration.
During every iteration, replace codes with exponential moving average cluster size less than threshold
with randomly selected values from the current batch.
kmeans_iters: Optional int, if provided codes will be initialized from the centroids learned from
kmeans_iters iterations of k-means clustering on the first training batch.
"""
def __init__(
self,
codebook_size: int,
codebook_dim: int,
decay: float = 0.99,
threshold_ema_dead_code: Optional[int] = 2,
kmeans_iters: Optional[int] = None,
):
super().__init__()
self.decay = decay
if kmeans_iters:
codes = nn.init.kaiming_uniform_(torch.empty(codebook_size, codebook_dim))
else:
codes = torch.zeros(codebook_size, codebook_dim)
self.codebook_size = codebook_size
self.kmeans_iters = kmeans_iters
self.threshold_ema_dead_code = threshold_ema_dead_code
self.register_buffer("initialized", Tensor([not kmeans_iters]))
self.register_buffer("cluster_size", torch.zeros(codebook_size))
self.register_buffer("codes", codes)
self.register_buffer("codes_avg", codes.clone())
@torch.jit.ignore
def _init_codes(self, data):
if self.initialized:
return
codes, cluster_size = _k_means(samples=data, num_clusters=self.codebook_size, num_iters=self.kmeans_iters)
self.codes.data.copy_(codes)
self.codes_avg.data.copy_(codes.clone())
self.cluster_size.data.copy_(cluster_size)
self.initialized.data.copy_(Tensor([True]))
broadcast_tensors(self.buffers())
def _expire_codes(self, inputs: Tensor) -> None:
if not self.threshold_ema_dead_code:
return
expired_codes = self.cluster_size < self.threshold_ema_dead_code
if not torch.any(expired_codes):
return
samples = _sample_vectors(samples=inputs, num_sample=self.codebook_size)
expired_codes = rearrange(expired_codes, "K -> K ()")
modified_codes = torch.where(expired_codes, samples, self.codes)
self.codes.data.copy_(modified_codes)
broadcast_tensors(self.buffers())
def _update_codes(self, inputs: Tensor, indices: Tensor) -> None:
code_onehot = F.one_hot(indices, self.codebook_size).type(inputs.dtype)
code_onehot = rearrange(code_onehot, "B N -> N B")
# [N]
code_counts = code_onehot.sum(1)
_ema_inplace(moving_avg=self.cluster_size, new=code_counts, decay=self.decay)
# [N, D]
code_sum = code_onehot @ inputs
_ema_inplace(moving_avg=self.codes_avg, new=code_sum, decay=self.decay)
cluster_size_smoothed = _laplace_smoothing(self.cluster_size, n_categories=self.codebook_size)
cluster_size_smoothed = rearrange(cluster_size_smoothed, "N -> N ()")
codes_normalized = self.codes_avg / cluster_size_smoothed
self.codes.data.copy_(codes_normalized)
def _quantize(self, inputs: Tensor) -> Tensor:
# [B, N]
dist = _compute_distances(inputs, self.codes)
# [B]
indices = dist.min(dim=1).indices
return indices
def _dequantize(self, indices: Tensor) -> Tensor:
# [B, D]
dequantized = F.embedding(indices, self.codes)
return dequantized
@property
def input_types(self):
return {
"inputs": NeuralType(('B', 'T', 'D'), EncodedRepresentation()),
"input_len": NeuralType(tuple('B'), LengthsType()),
}
@property
def output_types(self):
return {
"dequantized": NeuralType(('B', 'T', 'D'), EncodedRepresentation()),
"indices": NeuralType(('B', 'T'), Index()),
}
def forward(self, inputs, input_len):
input_flat = rearrange(inputs, "B T D -> (B T) D")
self._init_codes(input_flat)
# [(B T)]
indices_flat = self._quantize(inputs=input_flat)
# [B, T]
indices = indices_flat.view(*inputs.shape[:-1])
# [B, T, D]
dequantized = self._dequantize(indices=indices)
if self.training:
# We do expiry of codes here because buffers are in sync and all the workers will make the same decision.
self._expire_codes(inputs=input_flat)
self._update_codes(inputs=input_flat, indices=indices_flat)
dequantized = _mask_3d(dequantized, input_len)
indices = mask_sequence_tensor(indices, input_len)
return dequantized, indices
@typecheck(
input_types={
"inputs": NeuralType(('B', 'T', 'D'), EncodedRepresentation()),
"input_len": NeuralType(tuple('B'), LengthsType()),
},
output_types={"indices": NeuralType(('B', 'T'), Index())},
)
def encode(self, inputs, input_len):
input_flat = rearrange(inputs, "B T D -> (B T) D")
# [(B T)]
indices_flat = self._quantize(inputs=input_flat)
# [B, T]
indices = indices_flat.view(*inputs.shape[:-1])
indices = mask_sequence_tensor(indices, input_len)
return indices
@typecheck(
input_types={"indices": NeuralType(('B', 'T'), Index()), "input_len": NeuralType(tuple('B'), LengthsType()),},
output_types={"dequantized": NeuralType(('B', 'T', 'D'), EncodedRepresentation())},
)
def decode(self, indices, input_len):
# [B, T, D]
dequantized = self._dequantize(indices=indices)
dequantized = _mask_3d(dequantized, input_len)
return dequantized
class ResidualVectorQuantizer(NeuralModule):
"""
Residual vector quantization (RVQ) algorithm as described in https://arxiv.org/pdf/2107.03312.pdf.
Args:
num_codebooks: Number of codebooks to use.
commit_loss_scale: Loss scale for codebook commit loss.
codebook_size: Number of codes to use for each codebook.
codebook_dim: Dimension of each code.
decay: Decay for exponential moving average over the codebooks.
threshold_ema_dead_code: Threshold for dead code expiration.
During every iteration, replace codes with exponential moving average cluster size less than threshold
with randomly selected values from the current batch.
kmeans_iters: Optional int, if provided codes will be initialized from the centroids learned from
kmeans_iters iterations of k-means clustering on the first training batch.
"""
def __init__(
self,
num_codebooks: int,
commit_loss_scale: float = 1.0,
codebook_size: int = 1024,
codebook_dim: int = 128,
decay: float = 0.99,
threshold_ema_dead_code: Optional[int] = 2,
kmeans_iters: Optional[int] = 50,
):
super().__init__()
self.codebook_dim = codebook_dim
if commit_loss_scale:
self.commit_loss_fn = MaskedMSELoss(loss_scale=commit_loss_scale)
else:
self.commit_loss_fn = None
self.codebooks = nn.ModuleList(
[
EuclideanCodebook(
codebook_size=codebook_size,
codebook_dim=codebook_dim,
decay=decay,
threshold_ema_dead_code=threshold_ema_dead_code,
kmeans_iters=kmeans_iters,
)
for _ in range(num_codebooks)
]
)
def _commit_loss(self, input, target, input_len):
if not self.commit_loss_fn:
return 0.0
return self.commit_loss_fn(
predicted=rearrange(input, "B T D -> B D T"),
target=rearrange(target, "B T D -> B D T"),
target_len=input_len,
)
@property
def input_types(self):
return {
"inputs": NeuralType(('B', 'D', 'T'), EncodedRepresentation()),
"input_len": NeuralType(tuple('B'), LengthsType()),
}
@property
def output_types(self):
return {
"dequantized": NeuralType(('B', 'D', 'T'), EncodedRepresentation()),
"indices": NeuralType(('D', 'B', 'T'), Index()),
"commit_loss": NeuralType((), LossType()),
}
def forward(self, inputs: Tensor, input_len: Tensor) -> Tuple[Tensor, Tensor, float]:
commit_loss = 0.0
residual = rearrange(inputs, "B D T -> B T D")
index_list = []
dequantized = torch.zeros_like(residual)
for codebook in self.codebooks:
dequantized_i, indices_i = codebook(inputs=residual, input_len=input_len)
if self.training:
dequantized_i = residual + (dequantized_i - residual).detach()
dequantized_i_const = dequantized_i.detach()
commit_loss_i = self._commit_loss(input=residual, target=dequantized_i_const, input_len=input_len)
commit_loss = commit_loss + commit_loss_i
residual = residual - dequantized_i_const
else:
residual = residual - dequantized_i
dequantized = dequantized + dequantized_i
index_list.append(indices_i)
# [N, B, T], first dimension is number of codebooks
indices = torch.stack(index_list)
dequantized = rearrange(dequantized, "B T D -> B D T")
return dequantized, indices, commit_loss
@typecheck(
input_types={
"inputs": NeuralType(('B', 'D', 'T'), EncodedRepresentation()),
"input_len": NeuralType(tuple('B'), LengthsType()),
},
output_types={"indices": NeuralType(('D', 'B', 'T'), Index())},
)
def encode(self, inputs: Tensor, input_len: Tensor) -> Tensor:
residual = rearrange(inputs, "B D T -> B T D")
index_list = []
for codebook in self.codebooks:
# [B, T]
indices_i = codebook.encode(inputs=residual, input_len=input_len)
# [B, D, T]
dequantized_i = codebook.decode(indices=indices_i, input_len=input_len)
residual = residual - dequantized_i
index_list.append(indices_i)
# [N, B, T]
indices = torch.stack(index_list)
return indices
@typecheck(
input_types={
"indices": NeuralType(('D', 'B', 'T'), Index()),
"input_len": NeuralType(tuple('B'), LengthsType()),
},
output_types={"dequantized": NeuralType(('B', 'D', 'T'), EncodedRepresentation()),},
)
def decode(self, indices: Tensor, input_len: Tensor) -> Tensor:
# [B, T, D]
dequantized = torch.zeros([indices.shape[1], indices.shape[2], self.codebook_dim], device=indices.device)
for codebook_indices, codebook in zip(indices, self.codebooks):
dequantized_i = codebook.decode(indices=codebook_indices, input_len=input_len)
dequantized = dequantized + dequantized_i
dequantized = rearrange(dequantized, "B T D -> B D T")
return dequantized
| NeMo-main | nemo/collections/tts/modules/encodec_modules.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
import torch
import torch.nn as nn
import torch.nn.functional as F
from nemo.collections.tts.modules.submodules import ConditionalInput, ConditionalLayerNorm, LinearNorm
from nemo.collections.tts.parts.utils.helpers import get_mask_from_lengths
from nemo.core.classes import NeuralModule, adapter_mixins, typecheck
from nemo.core.neural_types.elements import EncodedRepresentation, LengthsType, MaskType, TokenIndex
from nemo.core.neural_types.neural_type import NeuralType
def mask_from_lens(lens, max_len: Optional[int] = None):
if max_len is None:
max_len = lens.max()
ids = torch.arange(0, max_len, device=lens.device, dtype=lens.dtype)
mask = torch.lt(ids, lens.unsqueeze(1))
return mask
class PositionalEmbedding(nn.Module):
def __init__(self, demb):
super(PositionalEmbedding, self).__init__()
self.demb = demb
inv_freq = 1 / (10000 ** (torch.arange(0.0, demb, 2.0) / demb))
self.register_buffer('inv_freq', inv_freq)
def forward(self, pos_seq, bsz=None):
# sinusoid_inp = torch.ger(pos_seq, self.inv_freq)
sinusoid_inp = torch.matmul(torch.unsqueeze(pos_seq, -1), torch.unsqueeze(self.inv_freq, 0))
pos_emb = torch.cat([sinusoid_inp.sin(), sinusoid_inp.cos()], dim=1)
if bsz is not None:
return pos_emb[None, :, :].repeat(bsz, 1, 1)
else:
return pos_emb[None, :, :]
class PositionwiseConvFF(nn.Module):
def __init__(self, d_model, d_inner, kernel_size, dropout, pre_lnorm=False, condition_types=[]):
super(PositionwiseConvFF, self).__init__()
self.d_model = d_model
self.d_inner = d_inner
self.dropout = dropout
if type(kernel_size) is not tuple:
kernel_size = (kernel_size, kernel_size)
self.CoreNet = nn.Sequential(
nn.Conv1d(d_model, d_inner, kernel_size[0], 1, (kernel_size[0] // 2)),
nn.ReLU(),
# nn.Dropout(dropout), # worse convergence
nn.Conv1d(d_inner, d_model, kernel_size[1], 1, (kernel_size[1] // 2)),
nn.Dropout(dropout),
)
self.layer_norm = ConditionalLayerNorm(d_model, condition_dim=d_model, condition_types=condition_types)
self.pre_lnorm = pre_lnorm
def forward(self, inp, conditioning=None):
return self._forward(inp, conditioning)
def _forward(self, inp, conditioning=None):
if self.pre_lnorm:
# layer normalization + positionwise feed-forward
core_out = inp.transpose(1, 2)
core_out = self.CoreNet(self.layer_norm(core_out, conditioning).to(inp.dtype))
core_out = core_out.transpose(1, 2)
# residual connection
output = core_out + inp
else:
# positionwise feed-forward
core_out = inp.transpose(1, 2)
core_out = self.CoreNet(core_out)
core_out = core_out.transpose(1, 2)
# residual connection + layer normalization
output = self.layer_norm(inp + core_out, conditioning).to(inp.dtype)
return output
class MultiHeadAttn(nn.Module):
def __init__(self, n_head, d_model, d_head, dropout, dropatt=0.1, pre_lnorm=False, condition_types=[]):
super(MultiHeadAttn, self).__init__()
self.n_head = n_head
self.d_model = d_model
self.d_head = d_head
self.scale = 1 / (d_head ** 0.5)
self.pre_lnorm = pre_lnorm
self.qkv_net = nn.Linear(d_model, 3 * n_head * d_head)
self.drop = nn.Dropout(dropout)
self.dropatt = nn.Dropout(dropatt)
self.o_net = nn.Linear(n_head * d_head, d_model, bias=False)
self.layer_norm = ConditionalLayerNorm(d_model, condition_dim=d_model, condition_types=condition_types)
def forward(self, inp, attn_mask=None, conditioning=None):
return self._forward(inp, attn_mask, conditioning)
def _forward(self, inp, attn_mask=None, conditioning=None):
residual = inp
if self.pre_lnorm:
# layer normalization
inp = self.layer_norm(inp, conditioning)
n_head, d_head = self.n_head, self.d_head
head_q, head_k, head_v = torch.chunk(self.qkv_net(inp), 3, dim=2)
head_q = head_q.view(inp.size(0), inp.size(1), n_head, d_head)
head_k = head_k.view(inp.size(0), inp.size(1), n_head, d_head)
head_v = head_v.view(inp.size(0), inp.size(1), n_head, d_head)
q = head_q.permute(2, 0, 1, 3).reshape(-1, inp.size(1), d_head)
k = head_k.permute(2, 0, 1, 3).reshape(-1, inp.size(1), d_head)
v = head_v.permute(2, 0, 1, 3).reshape(-1, inp.size(1), d_head)
attn_score = torch.bmm(q, k.transpose(1, 2))
attn_score.mul_(self.scale)
if attn_mask is not None:
attn_mask = attn_mask.unsqueeze(1).to(attn_score.dtype)
attn_mask = attn_mask.repeat(n_head, attn_mask.size(2), 1)
attn_score.masked_fill_(attn_mask.to(torch.bool), -float('inf'))
attn_prob = F.softmax(attn_score, dim=2)
attn_prob = self.dropatt(attn_prob)
attn_vec = torch.bmm(attn_prob, v)
attn_vec = attn_vec.view(n_head, inp.size(0), inp.size(1), d_head)
attn_vec = attn_vec.permute(1, 2, 0, 3).contiguous().view(inp.size(0), inp.size(1), n_head * d_head)
# linear projection
attn_out = self.o_net(attn_vec)
attn_out = self.drop(attn_out)
if self.pre_lnorm:
# residual connection
output = residual + attn_out
else:
# residual connection + layer normalization
output = self.layer_norm(residual + attn_out, conditioning)
return output
class TransformerLayer(nn.Module, adapter_mixins.AdapterModuleMixin):
def __init__(self, n_head, d_model, d_head, d_inner, kernel_size, dropout, condition_types=[], **kwargs):
super(TransformerLayer, self).__init__()
self.dec_attn = MultiHeadAttn(n_head, d_model, d_head, dropout, condition_types=condition_types, **kwargs)
self.pos_ff = PositionwiseConvFF(
d_model, d_inner, kernel_size, dropout, pre_lnorm=kwargs.get('pre_lnorm'), condition_types=condition_types
)
def forward(self, dec_inp, mask=None, conditioning=None):
output = self.dec_attn(dec_inp, attn_mask=~mask.squeeze(2), conditioning=conditioning)
output *= mask
output = self.pos_ff(output, conditioning)
output *= mask
if self.is_adapter_available():
output = self.forward_enabled_adapters(output)
output *= mask
return output
class FFTransformerDecoder(NeuralModule):
def __init__(
self,
n_layer,
n_head,
d_model,
d_head,
d_inner,
kernel_size,
dropout,
dropatt,
dropemb=0.0,
pre_lnorm=False,
condition_types=[],
):
super(FFTransformerDecoder, self).__init__()
self.d_model = d_model
self.n_head = n_head
self.d_head = d_head
self.pos_emb = PositionalEmbedding(self.d_model)
self.drop = nn.Dropout(dropemb)
self.layers = nn.ModuleList()
self.cond_input = ConditionalInput(d_model, d_model, condition_types)
for _ in range(n_layer):
self.layers.append(
TransformerLayer(
n_head,
d_model,
d_head,
d_inner,
kernel_size,
dropout,
dropatt=dropatt,
pre_lnorm=pre_lnorm,
condition_types=condition_types,
)
)
@property
def input_types(self):
return {
"input": NeuralType(('B', 'T', 'D'), EncodedRepresentation()),
"seq_lens": NeuralType(('B'), LengthsType()),
"conditioning": NeuralType(('B', 'T', 'D'), EncodedRepresentation(), optional=True),
}
@property
def output_types(self):
return {
"out": NeuralType(('B', 'T', 'D'), EncodedRepresentation()),
"mask": NeuralType(('B', 'T', 'D'), MaskType()),
}
@typecheck()
def forward(self, input, seq_lens, conditioning=None):
return self._forward(input, mask_from_lens(seq_lens).unsqueeze(2), conditioning)
def _forward(self, inp, mask, conditioning):
pos_seq = torch.arange(inp.size(1), device=inp.device).to(inp.dtype)
pos_emb = self.pos_emb(pos_seq) * mask
inp += pos_emb
inp = self.cond_input(inp, conditioning)
out = self.drop(inp)
for layer in self.layers:
out = layer(out, mask=mask, conditioning=conditioning)
# out = self.drop(out)
return out, mask
class FFTransformerEncoder(FFTransformerDecoder):
def __init__(
self,
n_layer,
n_head,
d_model,
d_head,
d_inner,
kernel_size,
dropout,
dropatt,
dropemb=0.0,
pre_lnorm=False,
n_embed=None,
d_embed=None,
padding_idx=0,
condition_types=[],
):
super(FFTransformerEncoder, self).__init__(
n_layer,
n_head,
d_model,
d_head,
d_inner,
kernel_size,
dropout,
dropatt,
dropemb,
pre_lnorm,
condition_types,
)
self.padding_idx = padding_idx
self.word_emb = nn.Embedding(n_embed, d_embed or d_model, padding_idx=self.padding_idx)
@property
def input_types(self):
return {
"input": NeuralType(('B', 'T'), TokenIndex()),
"conditioning": NeuralType(('B', 'T', 'D'), EncodedRepresentation(), optional=True),
}
def forward(self, input, conditioning=0):
return self._forward(self.word_emb(input), (input != self.padding_idx).unsqueeze(2), conditioning) # (B, L, 1)
class FFTransformer(nn.Module):
def __init__(
self,
in_dim,
out_dim=1,
n_layers=6,
n_head=1,
d_head=64,
d_inner=1024,
kernel_size=3,
dropout=0.1,
dropatt=0.1,
dropemb=0.0,
):
super(FFTransformer, self).__init__()
self.in_dim = in_dim
self.out_dim = out_dim
self.n_head = n_head
self.d_head = d_head
self.pos_emb = PositionalEmbedding(self.in_dim)
self.drop = nn.Dropout(dropemb)
self.layers = nn.ModuleList()
for _ in range(n_layers):
self.layers.append(
TransformerLayer(n_head, in_dim, d_head, d_inner, kernel_size, dropout, dropatt=dropatt)
)
self.dense = LinearNorm(in_dim, out_dim)
def forward(self, dec_inp, in_lens):
# B, C, T --> B, T, C
inp = dec_inp.transpose(1, 2)
mask = get_mask_from_lengths(in_lens)[..., None]
pos_seq = torch.arange(inp.size(1), device=inp.device).to(inp.dtype)
pos_emb = self.pos_emb(pos_seq) * mask
out = self.drop(inp + pos_emb)
for layer in self.layers:
out = layer(out, mask=mask)
out = self.dense(out).transpose(1, 2)
return out
| NeMo-main | nemo/collections/tts/modules/transformer.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# BSD 3-Clause License
#
# Copyright (c) 2019, Seungwon Park 박승원
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# The following functions/classes were based on code from https://github.com/mindslab-ai/univnet:
# Generator, DiscriminatorP, MultiPeriodDiscriminator, DiscriminatorR, MultiScaleDiscriminator,
# KernelPredictor, LVCBlock
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import Conv2d
from torch.nn.utils import remove_weight_norm, spectral_norm, weight_norm
from nemo.core.classes.common import typecheck
from nemo.core.classes.module import NeuralModule
from nemo.core.neural_types.elements import AudioSignal, MelSpectrogramType, VoidType
from nemo.core.neural_types.neural_type import NeuralType
class KernelPredictor(torch.nn.Module):
''' Kernel predictor for the location-variable convolutions'''
def __init__(
self,
cond_channels,
conv_in_channels,
conv_out_channels,
conv_layers,
conv_kernel_size=3,
kpnet_hidden_channels=64,
kpnet_conv_size=3,
kpnet_dropout=0.0,
kpnet_nonlinear_activation="LeakyReLU",
kpnet_nonlinear_activation_params={"negative_slope": 0.1},
):
'''
Args:
cond_channels (int): number of channel for the conditioning sequence,
conv_in_channels (int): number of channel for the input sequence,
conv_out_channels (int): number of channel for the output sequence,
conv_layers (int): number of layers
'''
super().__init__()
self.conv_in_channels = conv_in_channels
self.conv_out_channels = conv_out_channels
self.conv_kernel_size = conv_kernel_size
self.conv_layers = conv_layers
kpnet_kernel_channels = conv_in_channels * conv_out_channels * conv_kernel_size * conv_layers # l_w
kpnet_bias_channels = conv_out_channels * conv_layers # l_b
self.input_conv = nn.Sequential(
nn.utils.weight_norm(nn.Conv1d(cond_channels, kpnet_hidden_channels, 5, padding=2, bias=True)),
getattr(nn, kpnet_nonlinear_activation)(**kpnet_nonlinear_activation_params),
)
self.residual_convs = nn.ModuleList()
padding = (kpnet_conv_size - 1) // 2
for _ in range(3):
self.residual_convs.append(
nn.Sequential(
nn.Dropout(kpnet_dropout),
nn.utils.weight_norm(
nn.Conv1d(
kpnet_hidden_channels, kpnet_hidden_channels, kpnet_conv_size, padding=padding, bias=True
)
),
getattr(nn, kpnet_nonlinear_activation)(**kpnet_nonlinear_activation_params),
nn.utils.weight_norm(
nn.Conv1d(
kpnet_hidden_channels, kpnet_hidden_channels, kpnet_conv_size, padding=padding, bias=True
)
),
getattr(nn, kpnet_nonlinear_activation)(**kpnet_nonlinear_activation_params),
)
)
self.kernel_conv = nn.utils.weight_norm(
nn.Conv1d(kpnet_hidden_channels, kpnet_kernel_channels, kpnet_conv_size, padding=padding, bias=True)
)
self.bias_conv = nn.utils.weight_norm(
nn.Conv1d(kpnet_hidden_channels, kpnet_bias_channels, kpnet_conv_size, padding=padding, bias=True)
)
def forward(self, c):
'''
Args:
c (Tensor): the conditioning sequence (batch, cond_channels, cond_length)
'''
batch, _, cond_length = c.shape
c = self.input_conv(c)
for residual_conv in self.residual_convs:
residual_conv.to(c.device)
c = c + residual_conv(c)
k = self.kernel_conv(c)
b = self.bias_conv(c)
kernels = k.contiguous().view(
batch, self.conv_layers, self.conv_in_channels, self.conv_out_channels, self.conv_kernel_size, cond_length,
)
bias = b.contiguous().view(batch, self.conv_layers, self.conv_out_channels, cond_length,)
return kernels, bias
def remove_weight_norm(self):
remove_weight_norm(self.input_conv[0])
remove_weight_norm(self.kernel_conv)
remove_weight_norm(self.bias_conv)
for block in self.residual_convs:
remove_weight_norm(block[1])
remove_weight_norm(block[3])
class LVCBlock(torch.nn.Module):
'''the location-variable convolutions'''
def __init__(
self,
in_channels,
cond_channels,
stride,
dilations=[1, 3, 9, 27],
lReLU_slope=0.2,
conv_kernel_size=3,
cond_hop_length=256,
kpnet_hidden_channels=64,
kpnet_conv_size=3,
kpnet_dropout=0.0,
):
super().__init__()
self.cond_hop_length = cond_hop_length
self.conv_layers = len(dilations)
self.conv_kernel_size = conv_kernel_size
self.kernel_predictor = KernelPredictor(
cond_channels=cond_channels,
conv_in_channels=in_channels,
conv_out_channels=2 * in_channels,
conv_layers=len(dilations),
conv_kernel_size=conv_kernel_size,
kpnet_hidden_channels=kpnet_hidden_channels,
kpnet_conv_size=kpnet_conv_size,
kpnet_dropout=kpnet_dropout,
kpnet_nonlinear_activation_params={"negative_slope": lReLU_slope},
)
self.convt_pre = nn.Sequential(
nn.LeakyReLU(lReLU_slope),
nn.utils.weight_norm(
nn.ConvTranspose1d(
in_channels,
in_channels,
2 * stride,
stride=stride,
padding=stride // 2 + stride % 2,
output_padding=stride % 2,
)
),
)
self.conv_blocks = nn.ModuleList()
for dilation in dilations:
self.conv_blocks.append(
nn.Sequential(
nn.LeakyReLU(lReLU_slope),
nn.utils.weight_norm(
nn.Conv1d(
in_channels,
in_channels,
conv_kernel_size,
padding=dilation * (conv_kernel_size - 1) // 2,
dilation=dilation,
)
),
nn.LeakyReLU(lReLU_slope),
)
)
def forward(self, x, c):
''' forward propagation of the location-variable convolutions.
Args:
x (Tensor): the input sequence (batch, in_channels, in_length)
c (Tensor): the conditioning sequence (batch, cond_channels, cond_length)
Returns:
Tensor: the output sequence (batch, in_channels, in_length)
'''
_, in_channels, _ = x.shape # (B, c_g, L')
x = self.convt_pre(x) # (B, c_g, stride * L')
kernels, bias = self.kernel_predictor(c)
for i, conv in enumerate(self.conv_blocks):
output = conv(x) # (B, c_g, stride * L')
k = kernels[:, i, :, :, :, :] # (B, 2 * c_g, c_g, kernel_size, cond_length)
b = bias[:, i, :, :] # (B, 2 * c_g, cond_length)
output = self.location_variable_convolution(
output, k, b, hop_size=self.cond_hop_length
) # (B, 2 * c_g, stride * L'): LVC
x = x + torch.sigmoid(output[:, :in_channels, :]) * torch.tanh(
output[:, in_channels:, :]
) # (B, c_g, stride * L'): GAU
return x
def location_variable_convolution(self, x, kernel, bias, dilation=1, hop_size=256):
''' perform location-variable convolution operation on the input sequence (x) using the local convolution kernel
Args:
x (Tensor): the input sequence (batch, in_channels, in_length).
kernel (Tensor): the local convolution kernel (batch, in_channel, out_channels, kernel_size, kernel_length)
bias (Tensor): the bias for the local convolution (batch, out_channels, kernel_length)
dilation (int): the dilation of convolution.
hop_size (int): the hop_size of the conditioning sequence.
Returns:
(Tensor): the output sequence after performing local convolution. (batch, out_channels, in_length).
'''
batch, _, in_length = x.shape
batch, _, out_channels, kernel_size, kernel_length = kernel.shape
assert in_length == (kernel_length * hop_size), "length of (x, kernel) is not matched"
padding = dilation * int((kernel_size - 1) / 2)
x = F.pad(x, (padding, padding), 'constant', 0) # (batch, in_channels, in_length + 2*padding)
x = x.unfold(2, hop_size + 2 * padding, hop_size) # (batch, in_channels, kernel_length, hop_size + 2*padding)
if hop_size < dilation:
x = F.pad(x, (0, dilation), 'constant', 0)
x = x.unfold(
3, dilation, dilation
) # (batch, in_channels, kernel_length, (hop_size + 2*padding)/dilation, dilation)
x = x[:, :, :, :, :hop_size]
x = x.transpose(3, 4) # (batch, in_channels, kernel_length, dilation, (hop_size + 2*padding)/dilation)
x = x.unfold(4, kernel_size, 1) # (batch, in_channels, kernel_length, dilation, _, kernel_size)
o = torch.einsum('bildsk,biokl->bolsd', x, kernel)
o = o.to(memory_format=torch.channels_last_3d)
bias = bias.unsqueeze(-1).unsqueeze(-1).to(memory_format=torch.channels_last_3d)
o = o + bias
o = o.contiguous().view(batch, out_channels, -1)
return o
def remove_weight_norm(self):
self.kernel_predictor.remove_weight_norm()
remove_weight_norm(self.convt_pre[1])
for block in self.conv_blocks:
remove_weight_norm(block[1])
class Generator(NeuralModule):
__constants__ = ['lrelu_slope', 'num_kernels', 'num_upsamples']
def __init__(
self,
noise_dim,
channel_size,
dilations,
strides,
lrelu_slope,
kpnet_conv_size,
n_mel_channels=80,
hop_length=256,
):
super(Generator, self).__init__()
self.noise_dim = noise_dim
self.channel_size = channel_size
self.dilations = dilations
self.strides = strides
self.lrelu_slope = lrelu_slope
self.kpnet_conv_size = kpnet_conv_size
self.mel_channel = n_mel_channels
self.hop_length = hop_length
self.res_stack = nn.ModuleList()
hop_length_lvc = 1
for stride in self.strides:
hop_length_lvc = stride * hop_length_lvc
self.res_stack.append(
LVCBlock(
self.channel_size,
self.mel_channel,
stride=stride,
dilations=self.dilations,
lReLU_slope=self.lrelu_slope,
cond_hop_length=hop_length_lvc,
kpnet_conv_size=self.kpnet_conv_size,
)
)
assert (
hop_length_lvc == self.hop_length
), "multiplied value of strides {} should match n_window_stride {}".format(self.strides, self.hop_length)
self.conv_pre = nn.utils.weight_norm(
nn.Conv1d(self.noise_dim, self.channel_size, 7, padding=3, padding_mode='reflect')
)
self.conv_post = nn.Sequential(
nn.LeakyReLU(self.lrelu_slope),
nn.utils.weight_norm(nn.Conv1d(self.channel_size, 1, 7, padding=3, padding_mode='reflect')),
nn.Tanh(),
)
@property
def input_types(self):
return {
"x": NeuralType(('B', 'D', 'T'), MelSpectrogramType()),
}
@property
def output_types(self):
return {
"audio": NeuralType(('B', 'S', 'T'), AudioSignal()),
}
@typecheck()
def forward(self, x):
# UnivNet starts with Gaussian noise
z = torch.randn(x.size(0), self.noise_dim, x.size(2), dtype=x.dtype, device=x.device)
z = self.conv_pre(z) # (B, c_g, L)
for res_block in self.res_stack:
z = res_block(z, x) # (B, c_g, L * s_0 * ... * s_i)
z = self.conv_post(z) # (B, 1, L * 256)
return z
def remove_weight_norm(self):
print('Removing weight norm...')
remove_weight_norm(self.conv_pre)
for layer in self.conv_post:
if len(layer.state_dict()) != 0:
remove_weight_norm(layer)
for res_block in self.res_stack:
res_block.remove_weight_norm()
class DiscriminatorP(NeuralModule):
def __init__(self, lrelu_slope, period, kernel_size=5, stride=3, use_spectral_norm=False, debug=False):
super().__init__()
self.lrelu_slope = lrelu_slope
self.period = period
norm_f = weight_norm if use_spectral_norm == False else spectral_norm
conv_ch = [64, 128, 256, 512, 1024] if not debug else [8, 12, 32, 64, 128]
self.convs = nn.ModuleList(
[
norm_f(Conv2d(1, conv_ch[0], (kernel_size, 1), (stride, 1), padding=(kernel_size // 2, 0))),
norm_f(Conv2d(conv_ch[0], conv_ch[1], (kernel_size, 1), (stride, 1), padding=(kernel_size // 2, 0))),
norm_f(Conv2d(conv_ch[1], conv_ch[2], (kernel_size, 1), (stride, 1), padding=(kernel_size // 2, 0))),
norm_f(Conv2d(conv_ch[2], conv_ch[3], (kernel_size, 1), (stride, 1), padding=(kernel_size // 2, 0))),
norm_f(Conv2d(conv_ch[3], conv_ch[4], (kernel_size, 1), 1, padding=(kernel_size // 2, 0))),
]
)
self.conv_post = norm_f(Conv2d(conv_ch[4], 1, (3, 1), 1, padding=(1, 0)))
@property
def input_types(self):
return {
"x": NeuralType(('B', 'S', 'T'), AudioSignal()),
}
@property
def output_types(self):
return {
"decision": NeuralType(('B', 'T'), VoidType()),
"feature_maps": [NeuralType(("B", "C", "H", "W"), VoidType())],
}
@typecheck()
def forward(self, x):
fmap = []
# 1d to 2d
b, c, t = x.shape
if t % self.period != 0: # pad first
n_pad = self.period - (t % self.period)
x = F.pad(x, (0, n_pad), "reflect")
t = t + n_pad
x = x.view(b, c, t // self.period, self.period)
for l in self.convs:
x = l(x)
x = F.leaky_relu(x, self.lrelu_slope)
fmap.append(x)
x = self.conv_post(x)
fmap.append(x)
x = torch.flatten(x, 1, -1)
return x, fmap
class MultiPeriodDiscriminator(NeuralModule):
def __init__(self, cfg, debug=False):
super().__init__()
self.lrelu_slope = cfg.lrelu_slope
self.periods = cfg.periods
assert len(self.periods) == 5, "MPD requires list of len=5, got {}".format(cfg.periods)
self.kernel_size = cfg.kernel_size
self.stride = cfg.stride
self.use_spectral_norm = cfg.use_spectral_norm
self.discriminators = nn.ModuleList(
[
DiscriminatorP(
self.lrelu_slope,
self.periods[0],
self.kernel_size,
self.stride,
self.use_spectral_norm,
debug=debug,
),
DiscriminatorP(
self.lrelu_slope,
self.periods[1],
self.kernel_size,
self.stride,
self.use_spectral_norm,
debug=debug,
),
DiscriminatorP(
self.lrelu_slope,
self.periods[2],
self.kernel_size,
self.stride,
self.use_spectral_norm,
debug=debug,
),
DiscriminatorP(
self.lrelu_slope,
self.periods[3],
self.kernel_size,
self.stride,
self.use_spectral_norm,
debug=debug,
),
DiscriminatorP(
self.lrelu_slope,
self.periods[4],
self.kernel_size,
self.stride,
self.use_spectral_norm,
debug=debug,
),
]
)
@property
def input_types(self):
return {
"y": NeuralType(('B', 'S', 'T'), AudioSignal()),
"y_hat": NeuralType(('B', 'S', 'T'), AudioSignal()),
}
@property
def output_types(self):
return {
"real_scores": [NeuralType(('B', 'T'), VoidType())],
"fake_scores": [NeuralType(('B', 'T'), VoidType())],
"real_feature_maps": [[NeuralType(("B", "C", "H", "W"), VoidType())]],
"fake_feature_maps": [[NeuralType(("B", "C", "H", "W"), VoidType())]],
}
@typecheck()
def forward(self, y, y_hat):
y_d_rs = []
y_d_gs = []
fmap_rs = []
fmap_gs = []
for i, d in enumerate(self.discriminators):
y_d_r, fmap_r = d(x=y)
y_d_g, fmap_g = d(x=y_hat)
y_d_rs.append(y_d_r)
fmap_rs.append(fmap_r)
y_d_gs.append(y_d_g)
fmap_gs.append(fmap_g)
return y_d_rs, y_d_gs, fmap_rs, fmap_gs
class DiscriminatorR(NeuralModule):
def __init__(self, cfg, resolution):
super().__init__()
self.resolution = resolution
assert len(self.resolution) == 3, "MRD layer requires list with len=3, got {}".format(self.resolution)
self.lrelu_slope = cfg.lrelu_slope
norm_f = weight_norm if cfg.use_spectral_norm == False else spectral_norm
self.convs = nn.ModuleList(
[
norm_f(nn.Conv2d(1, 32, (3, 9), padding=(1, 4))),
norm_f(nn.Conv2d(32, 32, (3, 9), stride=(1, 2), padding=(1, 4))),
norm_f(nn.Conv2d(32, 32, (3, 9), stride=(1, 2), padding=(1, 4))),
norm_f(nn.Conv2d(32, 32, (3, 9), stride=(1, 2), padding=(1, 4))),
norm_f(nn.Conv2d(32, 32, (3, 3), padding=(1, 1))),
]
)
self.conv_post = norm_f(nn.Conv2d(32, 1, (3, 3), padding=(1, 1)))
@property
def input_types(self):
return {
"x": NeuralType(('B', 'S', 'T'), AudioSignal()),
}
@property
def output_types(self):
return {
"decision": NeuralType(('B', 'T'), VoidType()),
"feature_maps": [NeuralType(("B", "C", "T"), VoidType())],
}
def forward(self, x):
fmap = []
x = self.spectrogram(x)
x = x.unsqueeze(1)
for l in self.convs:
x = l(x)
x = F.leaky_relu(x, self.lrelu_slope)
fmap.append(x)
x = self.conv_post(x)
fmap.append(x)
x = torch.flatten(x, 1, -1)
return x, fmap
def spectrogram(self, x):
n_fft, hop_length, win_length = self.resolution
x = F.pad(x, (int((n_fft - hop_length) / 2), int((n_fft - hop_length) / 2)), mode='reflect')
x = x.squeeze(1)
x = torch.view_as_real(
torch.stft(x, n_fft=n_fft, hop_length=hop_length, win_length=win_length, center=False, return_complex=True)
) # [B, F, TT, 2] (Note: torch.stft() returns complex tensor [B, F, TT]; converted via view_as_real)
mag = torch.norm(x, p=2, dim=-1) # [B, F, TT]
return mag
class MultiResolutionDiscriminator(NeuralModule):
def __init__(self, cfg, debug=False):
super().__init__()
self.resolutions = cfg.resolutions
assert (
len(self.resolutions) == 3
), "MRD requires list of list with len=3, each element having a list with len=3. got {}".format(
self.resolutions
)
self.discriminators = nn.ModuleList([DiscriminatorR(cfg, resolution) for resolution in self.resolutions])
@property
def input_types(self):
return {
"y": NeuralType(('B', 'S', 'T'), AudioSignal()),
"y_hat": NeuralType(('B', 'S', 'T'), AudioSignal()),
}
@property
def output_types(self):
return {
"real_scores": [NeuralType(('B', 'T'), VoidType())],
"fake_scores": [NeuralType(('B', 'T'), VoidType())],
"real_feature_maps": [[NeuralType(("B", "C", "T"), VoidType())]],
"fake_feature_maps": [[NeuralType(("B", "C", "T"), VoidType())]],
}
def forward(self, y, y_hat):
y_d_rs = []
y_d_gs = []
fmap_rs = []
fmap_gs = []
for i, d in enumerate(self.discriminators):
y_d_r, fmap_r = d(x=y)
y_d_g, fmap_g = d(x=y_hat)
y_d_rs.append(y_d_r)
fmap_rs.append(fmap_r)
y_d_gs.append(y_d_g)
fmap_gs.append(fmap_g)
return y_d_rs, y_d_gs, fmap_rs, fmap_gs
| NeMo-main | nemo/collections/tts/modules/univnet_modules.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Tuple
import torch
from nemo.collections.tts.modules.submodules import Invertible1x1Conv, WaveNet
from nemo.collections.tts.parts.utils.helpers import OperationMode, remove, split_view
from nemo.core.classes import Exportable, NeuralModule, typecheck
from nemo.core.neural_types.elements import (
AudioSignal,
IntType,
MelSpectrogramType,
NormalDistributionSamplesType,
VoidType,
)
from nemo.core.neural_types.neural_type import NeuralType
class WaveGlowModule(NeuralModule, Exportable):
def __init__(
self,
n_mel_channels: int,
n_flows: int,
n_group: int,
n_early_every: int,
n_early_size: int,
n_wn_channels: int,
n_wn_layers: int,
wn_kernel_size: int,
):
"""
WaveGlow module
Args:
n_mel_channels (int): Number of mel channels to output.
n_flows (int): Number of flow layers
n_group (int): Number of groups to respace the inputs
n_early_every (int): Every n_early_every layers, n_early_size gets skip connected to the output
n_early_size (int): The size of the chunk to be skip connected
n_wn_channels (int): Number of channels for the non-invertible wavenet transformation
n_wn_layers (int): Number of layers for the non-invertible wavenet transformation
wn_kernel_size (int): Kernel size for the non-invertible wavenet transformation
"""
super().__init__()
self.upsample = torch.nn.ConvTranspose1d(n_mel_channels, n_mel_channels, 1024, stride=256)
self.n_mel_channels = n_mel_channels
assert n_group % 2 == 0
self.n_flows = n_flows
self.n_group = n_group
self.n_early_every = n_early_every
self.n_early_size = n_early_size
self.wavenet = torch.nn.ModuleList()
self.convinv = torch.nn.ModuleList()
self.mode = OperationMode.infer
n_half = n_group // 2
# Set up layers with the right sizes based on how many dimensions
# have been output already
n_remaining_channels = n_group
for k in range(n_flows):
if k % self.n_early_every == 0 and k > 0:
n_half = n_half - int(self.n_early_size / 2)
n_remaining_channels = n_remaining_channels - self.n_early_size
self.convinv.append(Invertible1x1Conv(n_remaining_channels))
self.wavenet.append(
WaveNet(
n_half,
n_mel_channels * n_group,
n_layers=n_wn_layers,
n_channels=n_wn_channels,
kernel_size=wn_kernel_size,
)
)
self.n_remaining_channels = n_remaining_channels
self.time_cutoff = self.upsample.stride[0] - self.upsample.kernel_size[0]
# Pre-calculating the sizes of noise to use so it's not dynamic
n_halves = []
n_half = self.n_remaining_channels // 2
for k in reversed(range(self.n_flows)):
n_halves.append(n_half)
if k % self.n_early_every == 0 and k > 0:
n_half = n_half + int(self.n_early_size / 2)
n_halves.reverse()
self.n_halves = n_halves
self.removed_weightnorm = False
def _prepare_for_export(self, **kwargs):
"""
Override this method to prepare module for export. This is in-place operation.
Base version does common necessary module replacements (Apex etc)
"""
self.remove_weightnorm()
super()._prepare_for_export(**kwargs)
@typecheck()
def forward(self, spec, z=None, audio=None, run_inverse=True, sigma=1.0):
""" TODO
"""
if self.training and self.mode != OperationMode.training:
raise ValueError(f"{self} has self.training set to True but self.OperationMode was not set to training")
if not self.training and self.mode == OperationMode.training:
raise ValueError(f"{self} has self.training set to False but self.OperationMode was set to training")
audio_pred = torch.zeros((1, 1))
if run_inverse:
# norm_dist_to_audio is used to predict audio from spectrogram so only used in val or infer mode
# Could also log train audio but currently not done
audio_pred = self.norm_dist_to_audio(spec=spec, sigma=sigma, z=z)
if audio is not None and self.mode != OperationMode.infer:
# audio_to_normal_dist is used to calculate loss so only run this in train or val model
z1, log_s_list, log_det_W_list = self.audio_to_normal_dist(spec=spec, audio=audio)
return z1, log_s_list, log_det_W_list, audio_pred
return audio_pred
@property
def input_types(self):
if self.mode == OperationMode.infer:
return {
"spec": NeuralType(('B', 'D', 'T'), MelSpectrogramType()),
"z": NeuralType(('B', 'D', 'T'), MelSpectrogramType(), optional=True),
"sigma": NeuralType(optional=True),
}
else:
return {
"spec": NeuralType(('B', 'D', 'T'), MelSpectrogramType()),
"z": NeuralType(('B', 'D', 'T'), MelSpectrogramType(), optional=True),
"audio": NeuralType(('B', 'T'), AudioSignal(), optional=True),
"run_inverse": NeuralType(elements_type=IntType(), optional=True),
"sigma": NeuralType(optional=True),
}
@property
def output_types(self):
if self.mode == OperationMode.training or self.mode == OperationMode.validation:
return {
"pred_normal_dist": NeuralType(('B', 'flowgroup', 'T'), NormalDistributionSamplesType()),
"log_s_list": [NeuralType(('B', 'flowgroup', 'T'), VoidType())], # TODO: Figure out a good typing
"log_det_W_list": [NeuralType(elements_type=VoidType())], # TODO: Figure out a good typing
"audio_pred": NeuralType(('B', 'T'), AudioSignal()),
}
else:
return {
"audio": NeuralType(('B', 'T'), AudioSignal()),
}
def input_example(self, max_batch=1, max_dim=256):
"""
Generates input examples for tracing etc.
Returns:
A tuple of input examples.
"""
par = next(self.parameters())
mel = torch.randn((max_batch, self.n_mel_channels, max_dim), device=par.device, dtype=par.dtype)
z = torch.randn(
(max_batch, self.n_mel_channels, max_dim * self.upsample.stride[0] // self.n_group),
device=par.device,
dtype=par.dtype,
)
return {"spec": mel, "z": z}
def audio_to_normal_dist(self, *, spec: torch.Tensor, audio: torch.Tensor) -> Tuple[torch.Tensor, list, list]:
# Upsample spectrogram to size of audio
spec = self.upsample(spec)
assert spec.size(2) >= audio.size(1)
if spec.size(2) > audio.size(1):
spec = spec[:, :, : audio.size(1)]
# logging.debug(f"spec: {spec.shape}. n_group: {self.n_group}")
spec = split_view(spec, self.n_group, 2).permute(0, 2, 1, 3)
spec = spec.contiguous().view(spec.size(0), spec.size(1), -1)
spec = spec.permute(0, 2, 1)
audio = split_view(audio, self.n_group, 1).permute(0, 2, 1)
output_audio = []
log_s_list = []
log_det_W_list = []
for k in range(self.n_flows):
if k % self.n_early_every == 0 and k > 0:
output_audio.append(audio[:, : self.n_early_size, :])
audio = audio[:, self.n_early_size :, :]
audio, log_det_W = self.convinv[k](audio)
log_det_W_list.append(log_det_W)
n_half = int(audio.size(1) / 2)
audio_0 = audio[:, :n_half, :]
audio_1 = audio[:, n_half:, :]
output = self.wavenet[k]((audio_0, spec))
log_s = output[:, n_half:, :]
b = output[:, :n_half, :]
audio_1 = torch.exp(log_s) * audio_1 + b
log_s_list.append(log_s)
audio = torch.cat([audio_0, audio_1], 1)
output_audio.append(audio)
return torch.cat(output_audio, 1), log_s_list, log_det_W_list
def norm_dist_to_audio(self, *, spec, z=None, sigma: float = 1.0):
spec = self.upsample(spec)
spec = spec.contiguous().view(spec.size(0), spec.size(1), -1)
# trim conv artifacts. maybe pad spec to kernel multiple
if self.time_cutoff != 0:
spec = spec[:, :, : self.time_cutoff]
spec = split_view(spec, self.n_group, 2).permute(0, 2, 1, 3)
spec = spec.contiguous().view(spec.size(0), spec.size(1), -1)
spec = spec.permute(0, 2, 1)
z_size = torch.Size([spec.size(0), self.n_group, spec.size(2)])
if z is None:
z = sigma * torch.randn(z_size, device=spec.device).to(spec.dtype)
audio, z = torch.split(z, [self.n_remaining_channels, z.size(1) - self.n_remaining_channels], 1)
for k in reversed(range(self.n_flows)):
n_half = self.n_halves[k]
audio_0, audio_1 = torch.split(audio, [n_half, audio.size(1) - n_half], 1)
output = self.wavenet[k]((audio_0, spec))
b, s = torch.split(output, [n_half, output.size(1) - n_half], 1)
audio_1 = audio_1 - b
audio_1 = audio_1 / torch.exp(s)
audio = torch.cat((audio_0, audio_1), 1)
audio = self.convinv[k](audio, reverse=True)
if k % self.n_early_every == 0 and k > 0:
z1, z = torch.split(z, [self.n_early_size, z.size(1) - self.n_early_size], 1)
audio = torch.cat((z1, audio), 1)
return audio.permute(0, 2, 1).contiguous().view(audio.size(0), -1)
def remove_weightnorm(self):
if self.removed_weightnorm:
return
for wavenet in self.wavenet:
wavenet.start = torch.nn.utils.remove_weight_norm(wavenet.start)
wavenet.in_layers = remove(wavenet.in_layers)
wavenet.cond_layer = torch.nn.utils.remove_weight_norm(wavenet.cond_layer)
wavenet.res_skip_layers = remove(wavenet.res_skip_layers)
self.removed_weightnorm = True
| NeMo-main | nemo/collections/tts/modules/waveglow.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# MIT License
#
# Copyright (c) 2020 Phil Wang
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# The following is largely based on code from https://github.com/lucidrains/stylegan2-pytorch
import math
from functools import partial
from math import log2
from typing import List
import torch
import torch.nn.functional as F
from einops import rearrange
from kornia.filters import filter2d
from nemo.collections.tts.parts.utils.helpers import mask_sequence_tensor
class Blur(torch.nn.Module):
def __init__(self):
super().__init__()
f = torch.Tensor([1, 2, 1])
self.register_buffer("f", f)
def forward(self, x):
f = self.f
f = f[None, None, :] * f[None, :, None]
return filter2d(x, f, normalized=True)
class EqualLinear(torch.nn.Module):
def __init__(self, in_dim, out_dim, lr_mul=1, bias=True):
super().__init__()
self.weight = torch.nn.Parameter(torch.randn(out_dim, in_dim))
if bias:
self.bias = torch.nn.Parameter(torch.zeros(out_dim))
self.lr_mul = lr_mul
def forward(self, input):
return F.linear(input, self.weight * self.lr_mul, bias=self.bias * self.lr_mul)
class StyleMapping(torch.nn.Module):
def __init__(self, emb, depth, lr_mul=0.1):
super().__init__()
layers = []
for _ in range(depth):
layers.extend([EqualLinear(emb, emb, lr_mul), torch.nn.LeakyReLU(0.2, inplace=True)])
self.net = torch.nn.Sequential(*layers)
def forward(self, x):
x = F.normalize(x, dim=1)
return self.net(x)
class RGBBlock(torch.nn.Module):
def __init__(self, latent_dim, input_channel, upsample, channels=3):
super().__init__()
self.input_channel = input_channel
self.to_style = torch.nn.Linear(latent_dim, input_channel)
out_filters = channels
self.conv = Conv2DModulated(input_channel, out_filters, 1, demod=False)
self.upsample = (
torch.nn.Sequential(torch.nn.Upsample(scale_factor=2, mode="bilinear", align_corners=False), Blur(),)
if upsample
else None
)
def forward(self, x, prev_rgb, istyle):
style = self.to_style(istyle)
x = self.conv(x, style)
if prev_rgb is not None:
x = x + prev_rgb
if self.upsample is not None:
x = self.upsample(x)
return x
class Conv2DModulated(torch.nn.Module):
"""
Modulated convolution.
For details refer to [1]
[1] Karras et. al. - Analyzing and Improving the Image Quality of StyleGAN (https://arxiv.org/abs/1912.04958)
"""
def __init__(
self, in_chan, out_chan, kernel, demod=True, stride=1, dilation=1, eps=1e-8, **kwargs,
):
super().__init__()
self.filters = out_chan
self.demod = demod
self.kernel = kernel
self.stride = stride
self.dilation = dilation
self.weight = torch.nn.Parameter(torch.randn((out_chan, in_chan, kernel, kernel)))
self.eps = eps
torch.nn.init.kaiming_normal_(self.weight, a=0, mode="fan_in", nonlinearity="leaky_relu")
def _get_same_padding(self, size, kernel, dilation, stride):
return ((size - 1) * (stride - 1) + dilation * (kernel - 1)) // 2
def forward(self, x, y):
b, c, h, w = x.shape
w1 = y[:, None, :, None, None]
w2 = self.weight[None, :, :, :, :]
weights = w2 * (w1 + 1)
if self.demod:
d = torch.rsqrt((weights ** 2).sum(dim=(2, 3, 4), keepdim=True) + self.eps)
weights = weights * d
x = x.reshape(1, -1, h, w)
_, _, *ws = weights.shape
weights = weights.reshape(b * self.filters, *ws)
padding = self._get_same_padding(h, self.kernel, self.dilation, self.stride)
x = F.conv2d(x, weights, padding=padding, groups=b)
x = x.reshape(-1, self.filters, h, w)
return x
class GeneratorBlock(torch.nn.Module):
def __init__(
self, latent_dim, input_channels, filters, upsample=True, upsample_rgb=True, channels=1,
):
super().__init__()
self.upsample = torch.nn.Upsample(scale_factor=2, mode="bilinear", align_corners=False) if upsample else None
self.to_style1 = torch.nn.Linear(latent_dim, input_channels)
self.to_noise1 = torch.nn.Linear(1, filters)
self.conv1 = Conv2DModulated(input_channels, filters, 3)
self.to_style2 = torch.nn.Linear(latent_dim, filters)
self.to_noise2 = torch.nn.Linear(1, filters)
self.conv2 = Conv2DModulated(filters, filters, 3)
self.activation = torch.nn.LeakyReLU(0.2, inplace=True)
self.to_rgb = RGBBlock(latent_dim, filters, upsample_rgb, channels)
def forward(self, x, prev_rgb, istyle, inoise):
if self.upsample is not None:
x = self.upsample(x)
inoise = inoise[:, : x.shape[2], : x.shape[3], :]
noise1 = self.to_noise1(inoise).permute((0, 3, 1, 2))
noise2 = self.to_noise2(inoise).permute((0, 3, 1, 2))
style1 = self.to_style1(istyle)
x = self.conv1(x, style1)
x = self.activation(x + noise1)
style2 = self.to_style2(istyle)
x = self.conv2(x, style2)
x = self.activation(x + noise2)
rgb = self.to_rgb(x, prev_rgb, istyle)
return x, rgb
class DiscriminatorBlock(torch.nn.Module):
def __init__(self, input_channels, filters, downsample=True):
super().__init__()
self.conv_res = torch.nn.Conv2d(input_channels, filters, 1, stride=(2 if downsample else 1))
self.net = torch.nn.Sequential(
torch.nn.Conv2d(input_channels, filters, 3, padding=1),
torch.nn.LeakyReLU(0.2, inplace=True),
torch.nn.Conv2d(filters, filters, 3, padding=1),
torch.nn.LeakyReLU(0.2, inplace=True),
)
self.downsample = (
torch.nn.Sequential(Blur(), torch.nn.Conv2d(filters, filters, 3, padding=1, stride=2))
if downsample
else None
)
def forward(self, x):
res = self.conv_res(x)
x = self.net(x)
if self.downsample is not None:
x = self.downsample(x)
x = (x + res) * (1 / math.sqrt(2))
return x
class Generator(torch.nn.Module):
def __init__(
self, n_bands, latent_dim, style_depth, network_capacity=16, channels=1, fmap_max=512, start_from_zero=True
):
super().__init__()
self.image_size = n_bands
self.latent_dim = latent_dim
self.num_layers = int(log2(n_bands) - 1)
self.style_depth = style_depth
self.style_mapping = StyleMapping(self.latent_dim, self.style_depth, lr_mul=0.1)
filters = [network_capacity * (2 ** (i + 1)) for i in range(self.num_layers)][::-1]
set_fmap_max = partial(min, fmap_max)
filters = list(map(set_fmap_max, filters))
init_channels = filters[0]
filters = [init_channels, *filters]
in_out_pairs = zip(filters[:-1], filters[1:])
self.initial_conv = torch.nn.Conv2d(filters[0], filters[0], 3, padding=1)
self.blocks = torch.nn.ModuleList([])
for ind, (in_chan, out_chan) in enumerate(in_out_pairs):
not_first = ind != 0
not_last = ind != (self.num_layers - 1)
block = GeneratorBlock(
latent_dim, in_chan, out_chan, upsample=not_first, upsample_rgb=not_last, channels=channels,
)
self.blocks.append(block)
for m in self.modules():
if isinstance(m, (torch.nn.Conv2d, torch.nn.Linear)):
torch.nn.init.kaiming_normal_(m.weight, a=0, mode="fan_in", nonlinearity="leaky_relu")
for block in self.blocks:
torch.nn.init.zeros_(block.to_noise1.weight)
torch.nn.init.zeros_(block.to_noise1.bias)
torch.nn.init.zeros_(block.to_noise2.weight)
torch.nn.init.zeros_(block.to_noise2.bias)
initial_block_size = n_bands // self.upsample_factor, 1
self.initial_block = torch.nn.Parameter(
torch.randn((1, init_channels, *initial_block_size)), requires_grad=False
)
if start_from_zero:
self.initial_block.data.zero_()
def add_scaled_condition(self, target: torch.Tensor, condition: torch.Tensor, condition_lengths: torch.Tensor):
*_, target_height, _ = target.shape
*_, height, _ = condition.shape
scale = height // target_height
# scale appropriately
condition = F.interpolate(condition, size=target.shape[-2:], mode="bilinear")
# add and mask
result = (target + condition) / 2
result = mask_sequence_tensor(result, (condition_lengths / scale).ceil().long())
return result
@property
def upsample_factor(self):
return 2 ** sum(1 for block in self.blocks if block.upsample)
def forward(self, condition: torch.Tensor, lengths: torch.Tensor, ws: List[torch.Tensor], noise: torch.Tensor):
batch_size, _, _, max_length = condition.shape
x = self.initial_block.expand(batch_size, -1, -1, max_length // self.upsample_factor)
rgb = None
x = self.initial_conv(x)
for style, block in zip(ws, self.blocks):
x, rgb = block(x, rgb, style, noise)
x = self.add_scaled_condition(x, condition, lengths)
rgb = self.add_scaled_condition(rgb, condition, lengths)
return rgb
class Discriminator(torch.nn.Module):
def __init__(
self, n_bands, network_capacity=16, channels=1, fmap_max=512,
):
super().__init__()
num_layers = int(log2(n_bands) - 1)
num_init_filters = channels
blocks = []
filters = [num_init_filters] + [(network_capacity * 4) * (2 ** i) for i in range(num_layers + 1)]
set_fmap_max = partial(min, fmap_max)
filters = list(map(set_fmap_max, filters))
chan_in_out = list(zip(filters[:-1], filters[1:]))
blocks = []
for ind, (in_chan, out_chan) in enumerate(chan_in_out):
is_not_last = ind != (len(chan_in_out) - 1)
block = DiscriminatorBlock(in_chan, out_chan, downsample=is_not_last)
blocks.append(block)
self.blocks = torch.nn.ModuleList(blocks)
channel_last = filters[-1]
latent_dim = channel_last
self.final_conv = torch.nn.Conv2d(channel_last, channel_last, 3, padding=1)
self.to_logit = torch.nn.Linear(latent_dim, 1)
for m in self.modules():
if isinstance(m, (torch.nn.Conv2d, torch.nn.Linear)):
torch.nn.init.kaiming_normal_(m.weight, a=0, mode="fan_in", nonlinearity="leaky_relu")
def forward(self, x, condition: torch.Tensor, lengths: torch.Tensor):
for block in self.blocks:
x = block(x)
scale = condition.shape[-1] // x.shape[-1]
x = mask_sequence_tensor(x, (lengths / scale).ceil().long())
x = self.final_conv(x)
scale = condition.shape[-1] // x.shape[-1]
x = mask_sequence_tensor(x, (lengths / scale).ceil().long())
x = x.mean(axis=-2)
x = (x / rearrange(lengths / scale, "b -> b 1 1")).sum(axis=-1)
x = self.to_logit(x)
return x.squeeze()
| NeMo-main | nemo/collections/tts/modules/spectrogram_enhancer.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# MIT License
#
# Copyright (c) 2020 Jungil Kong
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# The following functions/classes were based on code from https://github.com/jik876/hifi-gan:
# ResBlock1, ResBlock2, Generator, DiscriminatorP, DiscriminatorS, MultiScaleDiscriminator,
# MultiPeriodDiscriminator, init_weights, get_padding
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import AvgPool1d, Conv1d, Conv2d, ConvTranspose1d
from torch.nn.utils import remove_weight_norm, spectral_norm, weight_norm
from nemo.core.classes.common import typecheck
from nemo.core.classes.module import NeuralModule
from nemo.core.neural_types.elements import AudioSignal, MelSpectrogramType, VoidType
from nemo.core.neural_types.neural_type import NeuralType
LRELU_SLOPE = 0.1
def init_weights(m, mean=0.0, std=0.01):
classname = m.__class__.__name__
if classname.find("Conv") != -1:
m.weight.data.normal_(mean, std)
def get_padding(kernel_size, dilation=1):
return int((kernel_size * dilation - dilation) / 2)
class ResBlock1(torch.nn.Module):
__constants__ = ['lrelu_slope']
def __init__(self, channels, kernel_size, dilation):
super().__init__()
self.lrelu_slope = LRELU_SLOPE
self.convs1 = nn.ModuleList(
[
weight_norm(
Conv1d(
channels,
channels,
kernel_size,
1,
dilation=dilation[0],
padding=get_padding(kernel_size, dilation[0]),
)
),
weight_norm(
Conv1d(
channels,
channels,
kernel_size,
1,
dilation=dilation[1],
padding=get_padding(kernel_size, dilation[1]),
)
),
weight_norm(
Conv1d(
channels,
channels,
kernel_size,
1,
dilation=dilation[2],
padding=get_padding(kernel_size, dilation[2]),
)
),
]
)
self.convs1.apply(init_weights)
self.convs2 = nn.ModuleList(
[
weight_norm(
Conv1d(channels, channels, kernel_size, 1, dilation=1, padding=get_padding(kernel_size, 1))
),
weight_norm(
Conv1d(channels, channels, kernel_size, 1, dilation=1, padding=get_padding(kernel_size, 1))
),
weight_norm(
Conv1d(channels, channels, kernel_size, 1, dilation=1, padding=get_padding(kernel_size, 1))
),
]
)
self.convs2.apply(init_weights)
def forward(self, x):
for c1, c2 in zip(self.convs1, self.convs2):
xt = F.leaky_relu(x, self.lrelu_slope)
xt = c1(xt)
xt = F.leaky_relu(xt, self.lrelu_slope)
xt = c2(xt)
x = xt + x
return x
def remove_weight_norm(self):
for l in self.convs1:
remove_weight_norm(l)
for l in self.convs2:
remove_weight_norm(l)
class ResBlock2(torch.nn.Module):
__constants__ = ['lrelu_slope']
def __init__(self, channels, kernel_size, dilation):
super().__init__()
self.convs = nn.ModuleList(
[
weight_norm(
Conv1d(
channels,
channels,
kernel_size,
1,
dilation=dilation[0],
padding=get_padding(kernel_size, dilation[0]),
)
),
weight_norm(
Conv1d(
channels,
channels,
kernel_size,
1,
dilation=dilation[1],
padding=get_padding(kernel_size, dilation[1]),
)
),
]
)
self.convs.apply(init_weights)
self.lrelu_slope = LRELU_SLOPE
def forward(self, x):
for c in self.convs:
xt = F.leaky_relu(x, self.lrelu_slope)
xt = c(xt)
x = xt + x
return x
def remove_weight_norm(self):
for l in self.convs:
remove_weight_norm(l)
class Generator(NeuralModule):
__constants__ = ['lrelu_slope', 'num_kernels', 'num_upsamples']
def __init__(
self,
resblock,
upsample_rates,
upsample_kernel_sizes,
upsample_initial_channel,
resblock_kernel_sizes,
resblock_dilation_sizes,
initial_input_size=80,
apply_weight_init_conv_pre=False,
):
super().__init__()
self.num_kernels = len(resblock_kernel_sizes)
self.num_upsamples = len(upsample_rates)
self.conv_pre = weight_norm(Conv1d(initial_input_size, upsample_initial_channel, 7, 1, padding=3))
self.lrelu_slope = LRELU_SLOPE
resblock = ResBlock1 if resblock == 1 else ResBlock2
self.ups = nn.ModuleList()
for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
self.ups.append(
weight_norm(
ConvTranspose1d(
upsample_initial_channel // (2 ** i),
upsample_initial_channel // (2 ** (i + 1)),
k,
u,
padding=(k - u) // 2,
)
)
)
self.resblocks = nn.ModuleList()
for i in range(len(self.ups)):
resblock_list = nn.ModuleList()
ch = upsample_initial_channel // (2 ** (i + 1))
for j, (k, d) in enumerate(zip(resblock_kernel_sizes, resblock_dilation_sizes)):
resblock_list.append(resblock(ch, k, d))
self.resblocks.append(resblock_list)
self.conv_post = weight_norm(Conv1d(ch, 1, 7, 1, padding=3))
self.ups.apply(init_weights)
self.conv_post.apply(init_weights)
if apply_weight_init_conv_pre:
self.conv_pre.apply(init_weights)
@property
def input_types(self):
return {
"x": NeuralType(('B', 'D', 'T'), MelSpectrogramType()),
}
@property
def output_types(self):
return {
"audio": NeuralType(('B', 'S', 'T'), AudioSignal()),
}
@typecheck()
def forward(self, x):
x = self.conv_pre(x)
for upsample_layer, resblock_group in zip(self.ups, self.resblocks):
x = F.leaky_relu(x, self.lrelu_slope)
x = upsample_layer(x)
xs = torch.zeros(x.shape, dtype=x.dtype, device=x.device)
for resblock in resblock_group:
xs += resblock(x)
x = xs / self.num_kernels
x = F.leaky_relu(x)
x = self.conv_post(x)
x = torch.tanh(x)
return x
def remove_weight_norm(self):
print('Removing weight norm...')
for l in self.ups:
remove_weight_norm(l)
for group in self.resblocks:
for block in group:
block.remove_weight_norm()
remove_weight_norm(self.conv_pre)
remove_weight_norm(self.conv_post)
class DiscriminatorP(NeuralModule):
def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False, debug=False):
super().__init__()
self.period = period
norm_f = weight_norm if use_spectral_norm == False else spectral_norm
conv_ch = [32, 128, 512, 1024] if not debug else [8, 12, 32, 64]
self.convs = nn.ModuleList(
[
norm_f(Conv2d(1, conv_ch[0], (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))),
norm_f(Conv2d(conv_ch[0], conv_ch[1], (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))),
norm_f(Conv2d(conv_ch[1], conv_ch[2], (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))),
norm_f(Conv2d(conv_ch[2], conv_ch[3], (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))),
norm_f(Conv2d(conv_ch[3], conv_ch[3], (kernel_size, 1), 1, padding=(2, 0))),
]
)
self.conv_post = norm_f(Conv2d(conv_ch[3], 1, (3, 1), 1, padding=(1, 0)))
@property
def input_types(self):
return {
"x": NeuralType(('B', 'S', 'T'), AudioSignal()),
}
@property
def output_types(self):
return {
"decision": NeuralType(('B', 'T'), VoidType()),
"feature_maps": [NeuralType(("B", "C", "H", "W"), VoidType())],
}
@typecheck()
def forward(self, x):
fmap = []
# 1d to 2d
b, c, t = x.shape
if t % self.period != 0: # pad first
n_pad = self.period - (t % self.period)
x = F.pad(x, (0, n_pad), "reflect")
t = t + n_pad
x = x.view(b, c, t // self.period, self.period)
for l in self.convs:
x = l(x)
x = F.leaky_relu(x, LRELU_SLOPE)
fmap.append(x)
x = self.conv_post(x)
fmap.append(x)
x = torch.flatten(x, 1, -1)
return x, fmap
class MultiPeriodDiscriminator(NeuralModule):
def __init__(self, debug=False):
super().__init__()
self.discriminators = nn.ModuleList(
[
DiscriminatorP(2, debug=debug),
DiscriminatorP(3, debug=debug),
DiscriminatorP(5, debug=debug),
DiscriminatorP(7, debug=debug),
DiscriminatorP(11, debug=debug),
]
)
@property
def input_types(self):
return {
"y": NeuralType(('B', 'S', 'T'), AudioSignal()),
"y_hat": NeuralType(('B', 'S', 'T'), AudioSignal()),
}
@property
def output_types(self):
return {
"real_scores": [NeuralType(('B', 'T'), VoidType())],
"fake_scores": [NeuralType(('B', 'T'), VoidType())],
"real_feature_maps": [[NeuralType(("B", "C", "H", "W"), VoidType())]],
"fake_feature_maps": [[NeuralType(("B", "C", "H", "W"), VoidType())]],
}
@typecheck()
def forward(self, y, y_hat):
y_d_rs = []
y_d_gs = []
fmap_rs = []
fmap_gs = []
for i, d in enumerate(self.discriminators):
y_d_r, fmap_r = d(x=y)
y_d_g, fmap_g = d(x=y_hat)
y_d_rs.append(y_d_r)
fmap_rs.append(fmap_r)
y_d_gs.append(y_d_g)
fmap_gs.append(fmap_g)
return y_d_rs, y_d_gs, fmap_rs, fmap_gs
class DiscriminatorS(NeuralModule):
def __init__(self, use_spectral_norm=False, debug=False):
super().__init__()
norm_f = weight_norm if use_spectral_norm == False else spectral_norm
conv_ch = [128, 256, 512, 1024] if not debug else [16, 32, 32, 64]
self.convs = nn.ModuleList(
[
norm_f(Conv1d(1, conv_ch[0], 15, 1, padding=7)),
norm_f(Conv1d(conv_ch[0], conv_ch[0], 41, 2, groups=4, padding=20)),
norm_f(Conv1d(conv_ch[0], conv_ch[1], 41, 2, groups=16, padding=20)),
norm_f(Conv1d(conv_ch[1], conv_ch[2], 41, 4, groups=16, padding=20)),
norm_f(Conv1d(conv_ch[2], conv_ch[3], 41, 4, groups=16, padding=20)),
norm_f(Conv1d(conv_ch[3], conv_ch[3], 41, 1, groups=16, padding=20)),
norm_f(Conv1d(conv_ch[3], conv_ch[3], 5, 1, padding=2)),
]
)
self.conv_post = norm_f(Conv1d(conv_ch[3], 1, 3, 1, padding=1))
@property
def input_types(self):
return {
"x": NeuralType(('B', 'S', 'T'), AudioSignal()),
}
@property
def output_types(self):
return {
"decision": NeuralType(('B', 'T'), VoidType()),
"feature_maps": [NeuralType(("B", "C", "T"), VoidType())],
}
@typecheck()
def forward(self, x):
fmap = []
for l in self.convs:
x = l(x)
x = F.leaky_relu(x, LRELU_SLOPE)
fmap.append(x)
x = self.conv_post(x)
fmap.append(x)
x = torch.flatten(x, 1, -1)
return x, fmap
class MultiScaleDiscriminator(NeuralModule):
def __init__(self, debug=False):
super().__init__()
self.discriminators = nn.ModuleList(
[
DiscriminatorS(use_spectral_norm=True, debug=debug),
DiscriminatorS(debug=debug),
DiscriminatorS(debug=debug),
]
)
self.meanpools = nn.ModuleList([AvgPool1d(4, 2, padding=2), AvgPool1d(4, 2, padding=2)])
@property
def input_types(self):
return {
"y": NeuralType(('B', 'S', 'T'), AudioSignal()),
"y_hat": NeuralType(('B', 'S', 'T'), AudioSignal()),
}
@property
def output_types(self):
return {
"real_scores": [NeuralType(('B', 'T'), VoidType())],
"fake_scores": [NeuralType(('B', 'T'), VoidType())],
"real_feature_maps": [[NeuralType(("B", "C", "T"), VoidType())]],
"fake_feature_maps": [[NeuralType(("B", "C", "T"), VoidType())]],
}
@typecheck()
def forward(self, y, y_hat):
y_d_rs = []
y_d_gs = []
fmap_rs = []
fmap_gs = []
for i, d in enumerate(self.discriminators):
if i != 0:
y = self.meanpools[i - 1](y)
y_hat = self.meanpools[i - 1](y_hat)
y_d_r, fmap_r = d(x=y)
y_d_g, fmap_g = d(x=y_hat)
y_d_rs.append(y_d_r)
fmap_rs.append(fmap_r)
y_d_gs.append(y_d_g)
fmap_gs.append(fmap_g)
return y_d_rs, y_d_gs, fmap_rs, fmap_gs
| NeMo-main | nemo/collections/tts/modules/hifigan_modules.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Iterable, Optional, Tuple
import torch
import torch.nn as nn
from einops import rearrange
from nemo.collections.tts.parts.utils.helpers import mask_sequence_tensor
from nemo.core.classes.module import NeuralModule
from nemo.core.neural_types.elements import AudioSignal, EncodedRepresentation, LengthsType, VoidType
from nemo.core.neural_types.neural_type import NeuralType
def get_padding(kernel_size: int, dilation: int = 1) -> int:
return (kernel_size * dilation - dilation) // 2
def get_padding_2d(kernel_size: Tuple[int, int], dilation: Tuple[int, int]) -> Tuple[int, int]:
paddings = (get_padding(kernel_size[0], dilation[0]), get_padding(kernel_size[1], dilation[1]))
return paddings
def get_down_sample_padding(kernel_size: int, stride: int) -> int:
return (kernel_size - stride + 1) // 2
def get_up_sample_padding(kernel_size: int, stride: int) -> Tuple[int, int]:
output_padding = (kernel_size - stride) % 2
padding = (kernel_size - stride + 1) // 2
return padding, output_padding
class Conv1dNorm(NeuralModule):
def __init__(
self, in_channels: int, out_channels: int, kernel_size: int, stride: int = 1, padding: Optional[int] = None
):
super().__init__()
if not padding:
padding = get_padding(kernel_size)
conv = nn.Conv1d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
padding_mode="reflect",
)
self.conv = nn.utils.weight_norm(conv)
@property
def input_types(self):
return {
"inputs": NeuralType(('B', 'C', 'T'), VoidType()),
"lengths": NeuralType(tuple('B'), LengthsType()),
}
@property
def output_types(self):
return {
"out": [NeuralType(('B', 'C', 'T'), VoidType())],
}
def remove_weight_norm(self):
nn.utils.remove_weight_norm(self.conv)
def forward(self, inputs, lengths):
out = self.conv(inputs)
out = mask_sequence_tensor(out, lengths)
return out
class ConvTranspose1dNorm(NeuralModule):
def __init__(self, in_channels: int, out_channels: int, kernel_size: int, stride: int = 1):
super().__init__()
padding, output_padding = get_up_sample_padding(kernel_size, stride)
conv = nn.ConvTranspose1d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
output_padding=output_padding,
padding_mode="zeros",
)
self.conv = nn.utils.weight_norm(conv)
@property
def input_types(self):
return {
"inputs": NeuralType(('B', 'C', 'T'), VoidType()),
"lengths": NeuralType(tuple('B'), LengthsType()),
}
@property
def output_types(self):
return {
"out": [NeuralType(('B', 'C', 'T'), VoidType())],
}
def remove_weight_norm(self):
nn.utils.remove_weight_norm(self.conv)
def forward(self, inputs, lengths):
out = self.conv(inputs)
out = mask_sequence_tensor(out, lengths)
return out
class Conv2dNorm(NeuralModule):
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size: Tuple[int, int],
stride: Tuple[int, int] = (1, 1),
dilation: Tuple[int, int] = (1, 1),
):
super().__init__()
assert len(kernel_size) == len(dilation)
padding = get_padding_2d(kernel_size, dilation)
conv = nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
padding=padding,
padding_mode="reflect",
)
self.conv = nn.utils.weight_norm(conv)
@property
def input_types(self):
return {
"inputs": NeuralType(('B', 'C', 'H', 'T'), VoidType()),
}
@property
def output_types(self):
return {
"out": [NeuralType(('B', 'C', 'H', 'T'), VoidType())],
}
def remove_weight_norm(self):
nn.utils.remove_weight_norm(self.conv)
def forward(self, inputs):
return self.conv(inputs)
| NeMo-main | nemo/collections/tts/modules/audio_codec_modules.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# MIT License
#
# Copyright (c) 2021 Jaehyeon Kim
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import math
import torch
import torch.nn as nn
from torch.nn import functional as F
from torch.nn.utils import remove_weight_norm, spectral_norm, weight_norm
from nemo.collections.tts.modules.hifigan_modules import ResBlock1, ResBlock2, get_padding, init_weights
from nemo.collections.tts.modules.monotonic_align import maximum_path
from nemo.collections.tts.parts.utils.helpers import (
convert_pad_shape,
generate_path,
get_mask_from_lengths,
rand_slice_segments,
)
from nemo.collections.tts.parts.utils.splines import piecewise_rational_quadratic_transform
LRELU_SLOPE = 0.1
@torch.jit.script
def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels):
n_channels_int = n_channels[0]
in_act = input_a + input_b
t_act = torch.tanh(in_act[:, :n_channels_int, :])
s_act = torch.sigmoid(in_act[:, n_channels_int:, :])
acts = t_act * s_act
return acts
class LayerNorm(nn.Module):
def __init__(self, channels, eps=1e-5):
super().__init__()
self.channels = channels
self.eps = eps
self.gamma = nn.Parameter(torch.ones(channels))
self.beta = nn.Parameter(torch.zeros(channels))
def forward(self, x):
x = x.transpose(1, -1)
x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps)
return x.transpose(1, -1)
class ConvReluNorm(nn.Module):
def __init__(self, in_channels, hidden_channels, out_channels, kernel_size, n_layers, p_dropout):
super().__init__()
self.in_channels = in_channels
self.hidden_channels = hidden_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.n_layers = n_layers
self.p_dropout = p_dropout
assert n_layers > 1, "Number of layers should be larger than 0."
self.conv_layers = nn.ModuleList()
self.norm_layers = nn.ModuleList()
self.conv_layers.append(nn.Conv1d(in_channels, hidden_channels, kernel_size, padding=kernel_size // 2))
self.norm_layers.append(LayerNorm(hidden_channels))
self.relu_drop = nn.Sequential(nn.ReLU(), nn.Dropout(p_dropout))
for _ in range(n_layers - 1):
self.conv_layers.append(nn.Conv1d(hidden_channels, hidden_channels, kernel_size, padding=kernel_size // 2))
self.norm_layers.append(LayerNorm(hidden_channels))
self.proj = nn.Conv1d(hidden_channels, out_channels, 1)
self.proj.weight.data.zero_()
self.proj.bias.data.zero_()
def forward(self, x, x_mask):
x_org = x
for i in range(self.n_layers):
x = self.conv_layers[i](x * x_mask)
x = self.norm_layers[i](x)
x = self.relu_drop(x)
x = x_org + self.proj(x)
return x * x_mask
class DDSConv(nn.Module):
"""
Dilated and Depth-Separable Convolution
"""
def __init__(self, channels, kernel_size, n_layers, p_dropout=0.0):
super().__init__()
self.channels = channels
self.kernel_size = kernel_size
self.n_layers = n_layers
self.p_dropout = p_dropout
self.drop = nn.Dropout(p_dropout)
self.convs_sep = nn.ModuleList()
self.convs_1x1 = nn.ModuleList()
self.norms_1 = nn.ModuleList()
self.norms_2 = nn.ModuleList()
for i in range(n_layers):
dilation = kernel_size ** i
padding = (kernel_size * dilation - dilation) // 2
self.convs_sep.append(
nn.Conv1d(channels, channels, kernel_size, groups=channels, dilation=dilation, padding=padding)
)
self.convs_1x1.append(nn.Conv1d(channels, channels, 1))
self.norms_1.append(LayerNorm(channels))
self.norms_2.append(LayerNorm(channels))
def forward(self, x, x_mask, g=None):
if g is not None:
x = x + g
for i in range(self.n_layers):
y = self.convs_sep[i](x * x_mask)
y = self.norms_1[i](y)
y = F.gelu(y)
y = self.convs_1x1[i](y)
y = self.norms_2[i](y)
y = F.gelu(y)
y = self.drop(y)
x = x + y
return x * x_mask
class WN(torch.nn.Module):
def __init__(self, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=0, p_dropout=0):
super(WN, self).__init__()
assert kernel_size % 2 == 1
self.hidden_channels = hidden_channels
self.kernel_size = (kernel_size,)
self.dilation_rate = dilation_rate
self.n_layers = n_layers
self.gin_channels = gin_channels
self.p_dropout = p_dropout
self.in_layers = torch.nn.ModuleList()
self.res_skip_layers = torch.nn.ModuleList()
self.drop = nn.Dropout(p_dropout)
if gin_channels != 0:
cond_layer = torch.nn.Conv1d(gin_channels, 2 * hidden_channels * n_layers, 1)
self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name='weight')
for i in range(n_layers):
dilation = dilation_rate ** i
padding = int((kernel_size * dilation - dilation) / 2)
in_layer = torch.nn.Conv1d(
hidden_channels, 2 * hidden_channels, kernel_size, dilation=dilation, padding=padding
)
in_layer = torch.nn.utils.weight_norm(in_layer, name='weight')
self.in_layers.append(in_layer)
# last one is not necessary
if i < n_layers - 1:
res_skip_channels = 2 * hidden_channels
else:
res_skip_channels = hidden_channels
res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1)
res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name='weight')
self.res_skip_layers.append(res_skip_layer)
def forward(self, x, x_mask, g=None, **kwargs):
output = torch.zeros_like(x)
n_channels_tensor = torch.IntTensor([self.hidden_channels])
if g is not None:
g = self.cond_layer(g)
for i in range(self.n_layers):
x_in = self.in_layers[i](x)
if g is not None:
cond_offset = i * 2 * self.hidden_channels
g_l = g[:, cond_offset : cond_offset + 2 * self.hidden_channels, :]
else:
g_l = torch.zeros_like(x_in)
acts = fused_add_tanh_sigmoid_multiply(x_in, g_l, n_channels_tensor)
acts = self.drop(acts)
res_skip_acts = self.res_skip_layers[i](acts)
if i < self.n_layers - 1:
res_acts = res_skip_acts[:, : self.hidden_channels, :]
x = (x + res_acts) * x_mask
output = output + res_skip_acts[:, self.hidden_channels :, :]
else:
output = output + res_skip_acts
return output * x_mask
def remove_weight_norm(self):
if self.gin_channels != 0:
torch.nn.utils.remove_weight_norm(self.cond_layer)
for l in self.in_layers:
torch.nn.utils.remove_weight_norm(l)
for l in self.res_skip_layers:
torch.nn.utils.remove_weight_norm(l)
class Log(nn.Module):
def forward(self, x, x_mask, reverse=False, **kwargs):
if not reverse:
y = torch.log(torch.clamp_min(x, 1e-5)) * x_mask
logdet = torch.sum(-y, [1, 2])
return y, logdet
else:
x = torch.exp(x) * x_mask
return x
class Flip(nn.Module):
def forward(self, x, *args, reverse=False, **kwargs):
x = torch.flip(x, [1])
if not reverse:
logdet = torch.zeros(x.size(0)).to(dtype=x.dtype, device=x.device)
return x, logdet
else:
return x
class ElementwiseAffine(nn.Module):
def __init__(self, channels):
super().__init__()
self.channels = channels
self.m = nn.Parameter(torch.zeros(channels, 1))
self.logs = nn.Parameter(torch.zeros(channels, 1))
def forward(self, x, x_mask, reverse=False, **kwargs):
if not reverse:
y = self.m + torch.exp(self.logs) * x
y = y * x_mask
logdet = torch.sum(self.logs * x_mask, [1, 2])
return y, logdet
else:
x = (x - self.m) * torch.exp(-self.logs) * x_mask
return x
class ResidualCouplingLayer(nn.Module):
def __init__(
self,
channels,
hidden_channels,
kernel_size,
dilation_rate,
n_layers,
p_dropout=0,
gin_channels=0,
mean_only=False,
):
assert channels % 2 == 0, "channels should be divisible by 2"
super().__init__()
self.channels = channels
self.hidden_channels = hidden_channels
self.kernel_size = kernel_size
self.dilation_rate = dilation_rate
self.n_layers = n_layers
self.half_channels = channels // 2
self.mean_only = mean_only
self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1)
self.enc = WN(
hidden_channels, kernel_size, dilation_rate, n_layers, p_dropout=p_dropout, gin_channels=gin_channels
)
self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1)
self.post.weight.data.zero_()
self.post.bias.data.zero_()
def forward(self, x, x_mask, g=None, reverse=False):
x0, x1 = torch.split(x, [self.half_channels] * 2, 1)
h = self.pre(x0) * x_mask
h = self.enc(h, x_mask, g=g)
stats = self.post(h) * x_mask
if not self.mean_only:
m, logs = torch.split(stats, [self.half_channels] * 2, 1)
else:
m = stats
logs = torch.zeros_like(m)
if not reverse:
x1 = m + x1 * torch.exp(logs) * x_mask
x = torch.cat([x0, x1], 1)
logdet = torch.sum(logs, [1, 2])
return x, logdet
else:
x1 = (x1 - m) * torch.exp(-logs) * x_mask
x = torch.cat([x0, x1], 1)
return x
class ConvFlow(nn.Module):
def __init__(self, in_channels, filter_channels, kernel_size, n_layers, num_bins=10, tail_bound=5.0):
super().__init__()
self.in_channels = in_channels
self.filter_channels = filter_channels
self.kernel_size = kernel_size
self.n_layers = n_layers
self.num_bins = num_bins
self.tail_bound = tail_bound
self.half_channels = in_channels // 2
self.pre = nn.Conv1d(self.half_channels, filter_channels, 1)
self.convs = DDSConv(filter_channels, kernel_size, n_layers, p_dropout=0.0)
self.proj = nn.Conv1d(filter_channels, self.half_channels * (num_bins * 3 - 1), 1)
self.proj.weight.data.zero_()
self.proj.bias.data.zero_()
def forward(self, x, x_mask, g=None, reverse=False):
x0, x1 = torch.split(x, [self.half_channels] * 2, 1)
h = self.pre(x0)
h = self.convs(h, x_mask, g=g)
h = self.proj(h) * x_mask
b, c, t = x0.shape
h = h.reshape(b, c, -1, t).permute(0, 1, 3, 2) # [b, cx?, t] -> [b, c, t, ?]
unnormalized_widths = h[..., : self.num_bins] / math.sqrt(self.filter_channels)
unnormalized_heights = h[..., self.num_bins : 2 * self.num_bins] / math.sqrt(self.filter_channels)
unnormalized_derivatives = h[..., 2 * self.num_bins :]
x1, logabsdet = piecewise_rational_quadratic_transform(
x1,
unnormalized_widths,
unnormalized_heights,
unnormalized_derivatives,
inverse=reverse,
tails='linear',
tail_bound=self.tail_bound,
)
x = torch.cat([x0, x1], 1) * x_mask
logdet = torch.sum(logabsdet * x_mask, [1, 2])
if not reverse:
return x, logdet
else:
return x
class StochasticDurationPredictor(nn.Module):
def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, n_flows=4, gin_channels=0):
super().__init__()
filter_channels = in_channels # it needs to be removed from future version.
self.in_channels = in_channels
self.filter_channels = filter_channels
self.kernel_size = kernel_size
self.p_dropout = p_dropout
self.n_flows = n_flows
self.gin_channels = gin_channels
self.log_flow = Log()
self.flows = nn.ModuleList()
self.flows.append(ElementwiseAffine(2))
for i in range(n_flows):
self.flows.append(ConvFlow(2, filter_channels, kernel_size, n_layers=3))
self.flows.append(Flip())
self.post_pre = nn.Conv1d(1, filter_channels, 1)
self.post_proj = nn.Conv1d(filter_channels, filter_channels, 1)
self.post_convs = DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout)
self.post_flows = nn.ModuleList()
self.post_flows.append(ElementwiseAffine(2))
for i in range(4):
self.post_flows.append(ConvFlow(2, filter_channels, kernel_size, n_layers=3))
self.post_flows.append(Flip())
self.pre = nn.Conv1d(in_channels, filter_channels, 1)
self.proj = nn.Conv1d(filter_channels, filter_channels, 1)
self.convs = DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout)
if gin_channels != 0:
self.cond = nn.Conv1d(gin_channels, filter_channels, 1)
def forward(self, x, x_mask, w=None, g=None, reverse=False, noise_scale=1.0):
x = torch.detach(x)
x = self.pre(x)
if g is not None:
g = torch.detach(g)
x = x + self.cond(g)
x = self.convs(x, x_mask)
x = self.proj(x) * x_mask
# torch.manual_seed(1)
# torch.cuda.manual_seed(1)
if not reverse:
flows = self.flows
assert w is not None
logdet_tot_q = 0
h_w = self.post_pre(w)
h_w = self.post_convs(h_w, x_mask)
h_w = self.post_proj(h_w) * x_mask
e_q = torch.randn(w.size(0), 2, w.size(2)).to(device=x.device, dtype=x.dtype) * x_mask
z_q = e_q
for flow in self.post_flows:
z_q, logdet_q = flow(z_q, x_mask, g=(x + h_w))
logdet_tot_q += logdet_q
z_u, z1 = torch.split(z_q, [1, 1], 1)
u = torch.sigmoid(z_u) * x_mask
z0 = (w - u) * x_mask
logdet_tot_q += torch.sum((F.logsigmoid(z_u) + F.logsigmoid(-z_u)) * x_mask, [1, 2])
logq = torch.sum(-0.5 * (math.log(2 * math.pi) + (e_q ** 2)) * x_mask, [1, 2]) - logdet_tot_q
logdet_tot = 0
z0, logdet = self.log_flow(z0, x_mask)
logdet_tot += logdet
z = torch.cat([z0, z1], 1)
for flow in flows:
z, logdet = flow(z, x_mask, g=x, reverse=reverse)
logdet_tot = logdet_tot + logdet
nll = torch.sum(0.5 * (math.log(2 * math.pi) + (z ** 2)) * x_mask, [1, 2]) - logdet_tot
return nll + logq # [b]
else:
flows = list(reversed(self.flows))
flows = flows[:-2] + [flows[-1]] # remove a useless vflow
z = torch.randn(x.size(0), 2, x.size(2)).to(device=x.device, dtype=x.dtype) * noise_scale
for flow in flows:
z = flow(z, x_mask, g=x, reverse=reverse)
z0, z1 = torch.split(z, [1, 1], 1)
logw = z0
return logw
class DurationPredictor(nn.Module):
def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, gin_channels=0):
super().__init__()
self.in_channels = in_channels
self.filter_channels = filter_channels
self.kernel_size = kernel_size
self.p_dropout = p_dropout
self.gin_channels = gin_channels
self.drop = nn.Dropout(p_dropout)
self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size, padding=kernel_size // 2)
self.norm_1 = LayerNorm(filter_channels)
self.conv_2 = nn.Conv1d(filter_channels, filter_channels, kernel_size, padding=kernel_size // 2)
self.norm_2 = LayerNorm(filter_channels)
self.proj = nn.Conv1d(filter_channels, 1, 1)
if gin_channels != 0:
self.cond = nn.Conv1d(gin_channels, in_channels, 1)
def forward(self, x, x_mask, g=None):
x = torch.detach(x)
if g is not None:
g = torch.detach(g)
x = x + self.cond(g)
x = self.conv_1(x * x_mask)
x = torch.relu(x)
x = self.norm_1(x)
x = self.drop(x)
x = self.conv_2(x * x_mask)
x = torch.relu(x)
x = self.norm_2(x)
x = self.drop(x)
x = self.proj(x * x_mask)
return x * x_mask
class TextEncoder(nn.Module):
def __init__(
self,
n_vocab,
out_channels,
hidden_channels,
filter_channels,
n_heads,
n_layers,
kernel_size,
p_dropout,
padding_idx,
):
super().__init__()
self.n_vocab = n_vocab
self.out_channels = out_channels
self.hidden_channels = hidden_channels
self.filter_channels = filter_channels
self.n_heads = n_heads
self.n_layers = n_layers
self.kernel_size = kernel_size
self.p_dropout = p_dropout
self.emb = nn.Embedding(n_vocab, hidden_channels, padding_idx=padding_idx)
nn.init.normal_(self.emb.weight, 0.0, hidden_channels ** -0.5)
self.encoder = AttentionEncoder(hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout)
self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
def forward(self, x, x_lengths):
x = self.emb(x) * math.sqrt(self.hidden_channels) # [b, t, h]
x = torch.transpose(x, 1, -1) # [b, h, t]
x_mask = torch.unsqueeze(get_mask_from_lengths(x_lengths, x), 1).to(x.dtype)
x = self.encoder(x * x_mask, x_mask)
stats = self.proj(x) * x_mask
m, logs = torch.split(stats, self.out_channels, dim=1)
return x, m, logs, x_mask
class ResidualCouplingBlock(nn.Module):
def __init__(self, channels, hidden_channels, kernel_size, dilation_rate, n_layers, n_flows=4, gin_channels=0):
super().__init__()
self.channels = channels
self.hidden_channels = hidden_channels
self.kernel_size = kernel_size
self.dilation_rate = dilation_rate
self.n_layers = n_layers
self.n_flows = n_flows
self.gin_channels = gin_channels
self.flows = nn.ModuleList()
for i in range(n_flows):
self.flows.append(
ResidualCouplingLayer(
channels,
hidden_channels,
kernel_size,
dilation_rate,
n_layers,
gin_channels=gin_channels,
mean_only=True,
)
)
self.flows.append(Flip())
def forward(self, x, x_mask, g=None, reverse=False):
if not reverse:
for flow in self.flows:
x, _ = flow(x, x_mask, g=g, reverse=reverse)
else:
for flow in reversed(self.flows):
x = flow(x, x_mask, g=g, reverse=reverse)
return x
class PosteriorEncoder(nn.Module):
def __init__(
self, in_channels, out_channels, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=0
):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.hidden_channels = hidden_channels
self.kernel_size = kernel_size
self.dilation_rate = dilation_rate
self.n_layers = n_layers
self.gin_channels = gin_channels
self.pre = nn.Conv1d(in_channels, hidden_channels, 1)
self.enc = WN(hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels)
self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
def forward(self, x, x_lengths, g=None):
x_mask = torch.unsqueeze(get_mask_from_lengths(x_lengths, x), 1).to(x.dtype).to(device=x.device)
x = self.pre(x) * x_mask
x = self.enc(x, x_mask, g=g)
stats = self.proj(x) * x_mask
m, logs = torch.split(stats, self.out_channels, dim=1)
z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask
return z, m, logs, x_mask
class Generator(torch.nn.Module):
def __init__(
self,
initial_channel,
resblock,
resblock_kernel_sizes,
resblock_dilation_sizes,
upsample_rates,
upsample_initial_channel,
upsample_kernel_sizes,
gin_channels=0,
):
super(Generator, self).__init__()
self.num_kernels = len(resblock_kernel_sizes)
self.num_upsamples = len(upsample_rates)
self.conv_pre = nn.Conv1d(initial_channel, upsample_initial_channel, 7, 1, padding=3)
resblock = ResBlock1 if resblock == '1' else ResBlock2
self.ups = nn.ModuleList()
for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
self.ups.append(
weight_norm(
nn.ConvTranspose1d(
upsample_initial_channel // (2 ** i),
upsample_initial_channel // (2 ** (i + 1)),
k,
u,
padding=(k - u) // 2,
)
)
)
self.resblocks = nn.ModuleList()
for i in range(len(self.ups)):
ch = upsample_initial_channel // (2 ** (i + 1))
for j, (k, d) in enumerate(zip(resblock_kernel_sizes, resblock_dilation_sizes)):
self.resblocks.append(resblock(ch, k, d))
self.conv_post = nn.Conv1d(ch, 1, 7, 1, padding=3, bias=False)
self.ups.apply(init_weights)
if gin_channels != 0:
self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1)
def forward(self, x, g=None):
x = self.conv_pre(x)
if g is not None:
x = x + self.cond(g)
for i in range(self.num_upsamples):
x = F.leaky_relu(x, LRELU_SLOPE)
x = self.ups[i](x)
xs = torch.zeros(x.shape, dtype=x.dtype, device=x.device)
for j in range(self.num_kernels):
xs += self.resblocks[i * self.num_kernels + j](x)
x = xs / self.num_kernels
x = F.leaky_relu(x)
x = self.conv_post(x)
x = torch.tanh(x)
return x
def remove_weight_norm(self):
print('Removing weight norm...')
for l in self.ups:
remove_weight_norm(l)
for l in self.resblocks:
l.remove_weight_norm()
class DiscriminatorP(torch.nn.Module):
def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False):
super(DiscriminatorP, self).__init__()
self.period = period
self.use_spectral_norm = use_spectral_norm
norm_f = weight_norm if use_spectral_norm == False else spectral_norm
self.convs = nn.ModuleList(
[
norm_f(nn.Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
norm_f(nn.Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
norm_f(nn.Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
norm_f(nn.Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
norm_f(nn.Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(get_padding(kernel_size, 1), 0))),
]
)
self.dropout = nn.Dropout(0.3)
self.conv_post = norm_f(nn.Conv2d(1024, 1, (3, 1), 1, padding=(1, 0)))
def forward(self, x):
fmap = []
# 1d to 2d
b, c, t = x.shape
if t % self.period != 0: # pad first
n_pad = self.period - (t % self.period)
x = F.pad(x, (0, n_pad), "reflect")
t = t + n_pad
x = x.view(b, c, t // self.period, self.period)
for l in self.convs:
x = l(x)
x = self.dropout(x)
x = F.leaky_relu(x, LRELU_SLOPE)
fmap.append(x)
x = self.conv_post(x)
fmap.append(x)
x = torch.flatten(x, 1, -1)
return x, fmap
class DiscriminatorS(torch.nn.Module):
def __init__(self, use_spectral_norm=False):
super(DiscriminatorS, self).__init__()
norm_f = weight_norm if use_spectral_norm == False else spectral_norm
self.convs = nn.ModuleList(
[
norm_f(nn.Conv1d(1, 16, 15, 1, padding=7)),
norm_f(nn.Conv1d(16, 64, 41, 4, groups=4, padding=20)),
norm_f(nn.Conv1d(64, 256, 41, 4, groups=16, padding=20)),
norm_f(nn.Conv1d(256, 1024, 41, 4, groups=64, padding=20)),
norm_f(nn.Conv1d(1024, 1024, 41, 4, groups=256, padding=20)),
norm_f(nn.Conv1d(1024, 1024, 5, 1, padding=2)),
]
)
self.dropout = nn.Dropout(0.3)
self.conv_post = norm_f(nn.Conv1d(1024, 1, 3, 1, padding=1))
def forward(self, x):
fmap = []
for l in self.convs:
x = l(x)
x = F.leaky_relu(x, LRELU_SLOPE)
fmap.append(x)
x = self.conv_post(x)
fmap.append(x)
x = torch.flatten(x, 1, -1)
return x, fmap
class MultiPeriodDiscriminator(torch.nn.Module):
def __init__(self, use_spectral_norm=False):
super(MultiPeriodDiscriminator, self).__init__()
periods = [2, 3, 5, 7, 11]
discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]
discs = discs + [DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods]
self.discriminators = nn.ModuleList(discs)
def forward(self, y, y_hat):
y_d_rs = []
y_d_gs = []
fmap_rs = []
fmap_gs = []
for i, d in enumerate(self.discriminators):
y_d_r, fmap_r = d(y)
y_d_g, fmap_g = d(y_hat)
y_d_rs.append(y_d_r)
y_d_gs.append(y_d_g)
fmap_rs.append(fmap_r)
fmap_gs.append(fmap_g)
return y_d_rs, y_d_gs, fmap_rs, fmap_gs
class SynthesizerTrn(nn.Module):
"""
Synthesizer for Training
"""
def __init__(
self,
n_vocab,
spec_channels,
segment_size,
inter_channels,
hidden_channels,
filter_channels,
n_heads,
n_layers,
kernel_size,
p_dropout,
padding_idx,
resblock,
resblock_kernel_sizes,
resblock_dilation_sizes,
upsample_rates,
upsample_initial_channel,
upsample_kernel_sizes,
n_speakers=0,
gin_channels=0,
use_sdp=True,
**kwargs
):
super().__init__()
self.n_vocab = n_vocab
self.spec_channels = spec_channels
self.inter_channels = inter_channels
self.hidden_channels = hidden_channels
self.filter_channels = filter_channels
self.n_heads = n_heads
self.n_layers = n_layers
self.kernel_size = kernel_size
self.p_dropout = p_dropout
self.padding_idx = padding_idx
self.resblock = resblock
self.resblock_kernel_sizes = resblock_kernel_sizes
self.resblock_dilation_sizes = resblock_dilation_sizes
self.upsample_rates = upsample_rates
self.upsample_initial_channel = upsample_initial_channel
self.upsample_kernel_sizes = upsample_kernel_sizes
self.segment_size = segment_size
self.n_speakers = n_speakers
self.gin_channels = gin_channels
self.use_sdp = use_sdp
self.enc_p = TextEncoder(
n_vocab,
inter_channels,
hidden_channels,
filter_channels,
n_heads,
n_layers,
kernel_size,
p_dropout,
padding_idx,
)
self.dec = Generator(
inter_channels,
resblock,
resblock_kernel_sizes,
resblock_dilation_sizes,
upsample_rates,
upsample_initial_channel,
upsample_kernel_sizes,
gin_channels=gin_channels,
)
self.enc_q = PosteriorEncoder(
spec_channels, inter_channels, hidden_channels, 5, 1, 16, gin_channels=gin_channels
)
self.flow = ResidualCouplingBlock(inter_channels, hidden_channels, 5, 1, 4, gin_channels=gin_channels)
if use_sdp:
self.dp = StochasticDurationPredictor(hidden_channels, 192, 3, 0.5, 4, gin_channels=gin_channels)
else:
self.dp = DurationPredictor(hidden_channels, 256, 3, 0.5, gin_channels=gin_channels)
if n_speakers > 1:
self.emb_g = nn.Embedding(n_speakers, gin_channels)
def forward(self, text, text_len, spec, spec_len, speakers=None):
x, mean_prior, logscale_prior, text_mask = self.enc_p(text, text_len)
if self.n_speakers > 1:
g = self.emb_g(speakers).unsqueeze(-1) # [b, h, 1]
else:
g = None
z, mean_posterior, logscale_posterior, spec_mask = self.enc_q(spec, spec_len, g=g)
z_p = self.flow(z, spec_mask, g=g)
with torch.no_grad():
# negative cross-entropy
s_p_sq_r = torch.exp(-2 * logscale_prior) # [b, d, t]
neg_cent1 = torch.sum(-0.5 * math.log(2 * math.pi) - logscale_prior, [1], keepdim=True) # [b, 1, t_s]
neg_cent2 = torch.matmul(
-0.5 * (z_p ** 2).transpose(1, 2), s_p_sq_r
) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s]
neg_cent3 = torch.matmul(
z_p.transpose(1, 2), (mean_prior * s_p_sq_r)
) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s]
neg_cent4 = torch.sum(-0.5 * (mean_prior ** 2) * s_p_sq_r, [1], keepdim=True) # [b, 1, t_s]
neg_cent = neg_cent1 + neg_cent2 + neg_cent3 + neg_cent4
attn_mask = torch.unsqueeze(text_mask, 2) * torch.unsqueeze(spec_mask, -1)
attn = maximum_path(neg_cent, attn_mask.squeeze(1)).unsqueeze(1).detach()
w = attn.sum(2)
if self.use_sdp:
l_length = self.dp(x, text_mask, w, g=g)
l_length = l_length / torch.sum(text_mask)
else:
logw_ = torch.log(w + 1e-6) * text_mask
logw = self.dp(x, text_mask, g=g)
l_length = torch.sum((logw - logw_) ** 2, [1, 2]) / torch.sum(text_mask) # for averaging
# expand prior
mean_prior = torch.matmul(attn.squeeze(1), mean_prior.transpose(1, 2)).transpose(
1, 2
) # [b, t', t], [b, t, d] -> [b, d, t']
logscale_prior = torch.matmul(attn.squeeze(1), logscale_prior.transpose(1, 2)).transpose(
1, 2
) # [b, t', t], [b, t, d] -> [b, d, t']
z_slice, ids_slice = rand_slice_segments(z, spec_len, self.segment_size)
audio = self.dec(z_slice, g=g)
return (
audio,
l_length,
attn,
ids_slice,
text_mask,
spec_mask,
(z, z_p, mean_prior, logscale_prior, mean_posterior, logscale_posterior),
)
def infer(self, text, text_len, speakers=None, noise_scale=1, length_scale=1, noise_scale_w=1.0, max_len=None):
x, mean_prior, logscale_prior, text_mask = self.enc_p(text, text_len)
if self.n_speakers > 1 and speakers is not None:
g = self.emb_g(speakers).unsqueeze(-1) # [b, h, 1]
else:
g = None
if self.use_sdp:
logw = self.dp(x, text_mask, g=g, reverse=True, noise_scale=noise_scale_w)
else:
logw = self.dp(x, text_mask, g=g)
w = torch.exp(logw) * text_mask * length_scale
w_ceil = torch.ceil(w)
audio_lengths = torch.clamp_min(torch.sum(w_ceil, [1, 2]), 1).long()
audio_mask = torch.unsqueeze(get_mask_from_lengths(audio_lengths, None), 1).to(text_mask.dtype)
attn_mask = torch.unsqueeze(text_mask, 2) * torch.unsqueeze(audio_mask, -1)
attn = generate_path(w_ceil, attn_mask)
mean_prior = torch.matmul(attn.squeeze(1), mean_prior.transpose(1, 2)).transpose(
1, 2
) # [b, t', t], [b, t, d] -> [b, d, t']
logscale_prior = torch.matmul(attn.squeeze(1), logscale_prior.transpose(1, 2)).transpose(
1, 2
) # [b, t', t], [b, t, d] -> [b, d, t']
z_p = mean_prior + torch.randn_like(mean_prior) * torch.exp(logscale_prior) * noise_scale
z = self.flow(z_p, audio_mask, g=g, reverse=True)
audio = self.dec((z * audio_mask)[:, :, :max_len], g=g)
return audio, attn, audio_mask, (z, z_p, mean_prior, logscale_prior)
# Can be used for emotions
def voice_conversion(self, y, y_lengths, speaker_src, speaker_tgt):
assert self.n_speakers > 1, "n_speakers have to be larger than 1."
g_src = self.emb_g(speaker_src).unsqueeze(-1)
g_tgt = self.emb_g(speaker_tgt).unsqueeze(-1)
z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g_src)
z_p = self.flow(z, y_mask, g=g_src)
z_hat = self.flow(z_p, y_mask, g=g_tgt, reverse=True)
o_hat = self.dec(z_hat * y_mask, g=g_tgt)
return o_hat, y_mask, (z, z_p, z_hat)
##############
# Attentions #
##############
class AttentionEncoder(nn.Module):
def __init__(
self,
hidden_channels,
filter_channels,
n_heads,
n_layers,
kernel_size=1,
p_dropout=0.0,
window_size=4,
**kwargs
):
super().__init__()
self.hidden_channels = hidden_channels
self.filter_channels = filter_channels
self.n_heads = n_heads
self.n_layers = n_layers
self.kernel_size = kernel_size
self.p_dropout = p_dropout
self.window_size = window_size
self.drop = nn.Dropout(p_dropout)
self.attn_layers = nn.ModuleList()
self.norm_layers_1 = nn.ModuleList()
self.ffn_layers = nn.ModuleList()
self.norm_layers_2 = nn.ModuleList()
for _ in range(self.n_layers):
self.attn_layers.append(
MultiHeadAttention(
hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, window_size=window_size
)
)
self.norm_layers_1.append(LayerNorm(hidden_channels))
self.ffn_layers.append(
FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout)
)
self.norm_layers_2.append(LayerNorm(hidden_channels))
def forward(self, x, x_mask):
attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
x = x * x_mask
for i in range(self.n_layers):
y = self.attn_layers[i](x, x, attn_mask)
y = self.drop(y)
x = self.norm_layers_1[i](x + y)
y = self.ffn_layers[i](x, x_mask)
y = self.drop(y)
x = self.norm_layers_2[i](x + y)
x = x * x_mask
return x
class MultiHeadAttention(nn.Module):
def __init__(
self,
channels,
out_channels,
n_heads,
p_dropout=0.0,
window_size=None,
heads_share=True,
block_length=None,
proximal_bias=False,
proximal_init=False,
):
super().__init__()
assert channels % n_heads == 0
self.channels = channels
self.out_channels = out_channels
self.n_heads = n_heads
self.p_dropout = p_dropout
self.window_size = window_size
self.heads_share = heads_share
self.block_length = block_length
self.proximal_bias = proximal_bias
self.proximal_init = proximal_init
self.attn = None
self.k_channels = channels // n_heads
self.conv_q = nn.Conv1d(channels, channels, 1)
self.conv_k = nn.Conv1d(channels, channels, 1)
self.conv_v = nn.Conv1d(channels, channels, 1)
self.conv_o = nn.Conv1d(channels, out_channels, 1)
self.drop = nn.Dropout(p_dropout)
if window_size is not None:
n_heads_rel = 1 if heads_share else n_heads
rel_stddev = self.k_channels ** -0.5
self.emb_rel_k = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev)
self.emb_rel_v = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev)
nn.init.xavier_uniform_(self.conv_q.weight)
nn.init.xavier_uniform_(self.conv_k.weight)
nn.init.xavier_uniform_(self.conv_v.weight)
if proximal_init:
with torch.no_grad():
self.conv_k.weight.copy_(self.conv_q.weight)
self.conv_k.bias.copy_(self.conv_q.bias)
def forward(self, x, c, attn_mask=None):
q = self.conv_q(x)
k = self.conv_k(c)
v = self.conv_v(c)
x, self.attn = self.attention(q, k, v, mask=attn_mask)
x = self.conv_o(x)
return x
def attention(self, query, key, value, mask=None):
# reshape [b, d, t] -> [b, n_h, t, d_k]
b, d, t_s, t_t = key.size(0), key.size(1), key.size(2), query.size(2)
query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3)
key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1))
if self.window_size is not None:
assert t_s == t_t, "Relative attention is only available for self-attention."
key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s)
rel_logits = self._matmul_with_relative_keys(query / math.sqrt(self.k_channels), key_relative_embeddings)
scores_local = self._relative_position_to_absolute_position(rel_logits)
scores = scores + scores_local
if self.proximal_bias:
assert t_s == t_t, "Proximal bias is only available for self-attention."
scores = scores + self._attention_bias_proximal(t_s).to(device=scores.device, dtype=scores.dtype)
if mask is not None:
scores = scores.masked_fill(mask == 0, -1e4)
if self.block_length is not None:
assert t_s == t_t, "Local attention is only available for self-attention."
block_mask = torch.ones_like(scores).triu(-self.block_length).tril(self.block_length)
scores = scores.masked_fill(block_mask == 0, -1e4)
p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s]
p_attn = self.drop(p_attn)
output = torch.matmul(p_attn, value)
if self.window_size is not None:
relative_weights = self._absolute_position_to_relative_position(p_attn)
value_relative_embeddings = self._get_relative_embeddings(self.emb_rel_v, t_s)
output = output + self._matmul_with_relative_values(relative_weights, value_relative_embeddings)
output = output.transpose(2, 3).contiguous().view(b, d, t_t) # [b, n_h, t_t, d_k] -> [b, d, t_t]
return output, p_attn
def _matmul_with_relative_values(self, x, y):
"""
x: [b, h, l, m]
y: [h or 1, m, d]
ret: [b, h, l, d]
"""
ret = torch.matmul(x, y.unsqueeze(0))
return ret
def _matmul_with_relative_keys(self, x, y):
"""
x: [b, h, l, d]
y: [h or 1, m, d]
ret: [b, h, l, m]
"""
ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1))
return ret
def _get_relative_embeddings(self, relative_embeddings, length):
# Pad first before slice to avoid using cond ops.
pad_length = max(length - (self.window_size + 1), 0)
slice_start_position = max((self.window_size + 1) - length, 0)
slice_end_position = slice_start_position + 2 * length - 1
if pad_length > 0:
padded_relative_embeddings = F.pad(
relative_embeddings, convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]])
)
else:
padded_relative_embeddings = relative_embeddings
used_relative_embeddings = padded_relative_embeddings[:, slice_start_position:slice_end_position]
return used_relative_embeddings
def _relative_position_to_absolute_position(self, x):
"""
x: [b, h, l, 2*l-1]
ret: [b, h, l, l]
"""
batch, heads, length, _ = x.size()
# Concat columns of pad to shift from relative to absolute indexing.
x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, 1]]))
# Concat extra elements so to add up to shape (len+1, 2*len-1).
x_flat = x.view([batch, heads, length * 2 * length])
x_flat = F.pad(x_flat, convert_pad_shape([[0, 0], [0, 0], [0, length - 1]]))
# Reshape and slice out the padded elements.
x_final = x_flat.view([batch, heads, length + 1, 2 * length - 1])[:, :, :length, length - 1 :]
return x_final
def _absolute_position_to_relative_position(self, x):
"""
x: [b, h, l, l]
ret: [b, h, l, 2*l-1]
"""
batch, heads, length, _ = x.size()
# padd along column
x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length - 1]]))
x_flat = x.view([batch, heads, length ** 2 + length * (length - 1)])
# add 0's in the beginning that will skew the elements after reshape
x_flat = F.pad(x_flat, convert_pad_shape([[0, 0], [0, 0], [length, 0]]))
x_final = x_flat.view([batch, heads, length, 2 * length])[:, :, :, 1:]
return x_final
def _attention_bias_proximal(self, length):
"""Bias for self-attention to encourage attention to close positions.
Args:
length: an integer scalar.
Returns:
a Tensor with shape [1, 1, length, length]
"""
r = torch.arange(length, dtype=torch.float32)
diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1)
return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0)
class FFN(nn.Module):
def __init__(
self, in_channels, out_channels, filter_channels, kernel_size, p_dropout=0.0, activation=None, causal=False
):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.filter_channels = filter_channels
self.kernel_size = kernel_size
self.p_dropout = p_dropout
self.activation = activation
self.causal = causal
if causal:
self.padding = self._causal_padding
else:
self.padding = self._same_padding
self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size)
self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size)
self.drop = nn.Dropout(p_dropout)
def forward(self, x, x_mask):
x = self.conv_1(self.padding(x * x_mask))
if self.activation == "gelu":
x = x * torch.sigmoid(1.702 * x)
else:
x = torch.relu(x)
x = self.drop(x)
x = self.conv_2(self.padding(x * x_mask))
return x * x_mask
def _causal_padding(self, x):
if self.kernel_size == 1:
return x
pad_l = self.kernel_size - 1
pad_r = 0
padding = [[0, 0], [0, 0], [pad_l, pad_r]]
x = F.pad(x, convert_pad_shape(padding))
return x
def _same_padding(self, x):
if self.kernel_size == 1:
return x
pad_l = (self.kernel_size - 1) // 2
pad_r = self.kernel_size // 2
padding = [[0, 0], [0, 0], [pad_l, pad_r]]
x = F.pad(x, convert_pad_shape(padding))
return x
| NeMo-main | nemo/collections/tts/modules/vits_modules.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn as nn
import torch.nn.functional as F
from nemo.collections.tts.modules.common import ConvLSTMLinear
from nemo.collections.tts.modules.submodules import ConvNorm, MaskedInstanceNorm1d
from nemo.collections.tts.modules.transformer import FFTransformer
from nemo.collections.tts.parts.utils.helpers import get_mask_from_lengths
def get_attribute_prediction_model(config):
name = config['name']
hparams = config['hparams']
if name == 'dap':
model = DAP(**hparams)
else:
raise Exception("{} model is not supported".format(name))
return model
class AttributeProcessing(nn.Module):
def __init__(self, take_log_of_input=False):
super(AttributeProcessing, self).__init__()
self.take_log_of_input = take_log_of_input
def normalize(self, x):
if self.take_log_of_input:
x = torch.log(x + 1)
return x
def denormalize(self, x):
if self.take_log_of_input:
x = torch.exp(x) - 1
return x
class BottleneckLayerLayer(nn.Module):
def __init__(self, in_dim, reduction_factor, norm='weightnorm', non_linearity='relu', use_pconv=False):
super(BottleneckLayerLayer, self).__init__()
self.reduction_factor = reduction_factor
reduced_dim = int(in_dim / reduction_factor)
self.out_dim = reduced_dim
if self.reduction_factor > 1:
if norm == 'weightnorm':
norm_args = {"use_weight_norm": True}
elif norm == 'instancenorm':
norm_args = {"norm_fn": MaskedInstanceNorm1d}
else:
norm_args = {}
fn = ConvNorm(in_dim, reduced_dim, kernel_size=3, **norm_args)
self.projection_fn = fn
self.non_linearity = non_linearity
def forward(self, x, lens):
if self.reduction_factor > 1:
# borisf: here, float() instead of to(x.dtype) to work arounf ONNX exporter bug
mask = get_mask_from_lengths(lens, x).unsqueeze(1).float()
x = self.projection_fn(x, mask)
if self.non_linearity == 'relu':
x = F.relu(x)
elif self.non_linearity == 'leakyrelu':
x = F.leaky_relu(x)
return x
class DAP(AttributeProcessing):
def __init__(self, n_speaker_dim, bottleneck_hparams, take_log_of_input, arch_hparams, use_transformer=False):
super(DAP, self).__init__(take_log_of_input)
self.bottleneck_layer = BottleneckLayerLayer(**bottleneck_hparams)
arch_hparams['in_dim'] = self.bottleneck_layer.out_dim + n_speaker_dim
if use_transformer:
self.feat_pred_fn = FFTransformer(**arch_hparams)
else:
self.feat_pred_fn = ConvLSTMLinear(**arch_hparams)
def forward(self, txt_enc, spk_emb, x, lens):
if x is not None:
x = self.normalize(x)
txt_enc = self.bottleneck_layer(txt_enc, lens)
spk_emb_expanded = spk_emb[..., None].expand(-1, -1, txt_enc.shape[2])
context = torch.cat((txt_enc, spk_emb_expanded), 1)
x_hat = self.feat_pred_fn(context, lens)
outputs = {'x_hat': x_hat, 'x': x}
return outputs
def infer(self, txt_enc, spk_emb, lens=None):
x_hat = self.forward(txt_enc, spk_emb, x=None, lens=lens)['x_hat']
x_hat = self.denormalize(x_hat)
return x_hat
| NeMo-main | nemo/collections/tts/modules/attribute_prediction_model.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Optional
from omegaconf import DictConfig
from nemo.collections.asr.parts.utils import adapter_utils
from nemo.collections.tts.modules.aligner import AlignmentEncoder
from nemo.collections.tts.modules.fastpitch import TemporalPredictor
from nemo.collections.tts.modules.transformer import FFTransformerDecoder, FFTransformerEncoder
from nemo.core.classes import adapter_mixins
class FFTransformerDecoderAdapter(FFTransformerDecoder, adapter_mixins.AdapterModuleMixin):
""" Inherit from FFTransformerDecoder and add support for adapter"""
def add_adapter(self, name: str, cfg: dict):
cfg = self._update_adapter_cfg_input_dim(cfg)
for fft_layer in self.layers: # type: adapter_mixins.AdapterModuleMixin
fft_layer.add_adapter(name, cfg)
def is_adapter_available(self) -> bool:
return any([FFT_layer.is_adapter_available() for FFT_layer in self.layers])
def set_enabled_adapters(self, name: Optional[str] = None, enabled: bool = True):
for FFT_layer in self.layers: # type: adapter_mixins.AdapterModuleMixin
FFT_layer.set_enabled_adapters(name=name, enabled=enabled)
def get_enabled_adapters(self) -> List[str]:
names = set([])
for FFT_layer in self.layers: # type: adapter_mixins.AdapterModuleMixin
names.update(FFT_layer.get_enabled_adapters())
names = sorted(list(names))
return names
def _update_adapter_cfg_input_dim(self, cfg: DictConfig):
cfg = adapter_utils.update_adapter_cfg_input_dim(self, cfg, module_dim=self.d_model)
return cfg
class FFTransformerEncoderAdapter(
FFTransformerDecoderAdapter, FFTransformerEncoder, adapter_mixins.AdapterModuleMixin
):
""" Inherit from FFTransformerEncoder and add support for adapter"""
pass
class AlignmentEncoderAdapter(AlignmentEncoder, adapter_mixins.AdapterModuleMixin):
""" Inherit from AlignmentEncoder and add support for adapter"""
def add_adapter(self, name: str, cfg: dict):
for i, conv_layer in enumerate(self.key_proj):
if i % 2 == 0:
cfg = self._update_adapter_cfg_input_dim(cfg, conv_layer.conv.out_channels)
conv_layer.add_adapter(name, cfg)
for i, conv_layer in enumerate(self.query_proj):
if i % 2 == 0:
cfg = self._update_adapter_cfg_input_dim(cfg, conv_layer.conv.out_channels)
conv_layer.add_adapter(name, cfg)
def is_adapter_available(self) -> bool:
return any(
[conv_layer.is_adapter_available() for i, conv_layer in enumerate(self.key_proj) if i % 2 == 0]
+ [conv_layer.is_adapter_available() for i, conv_layer in enumerate(self.query_proj) if i % 2 == 0]
)
def set_enabled_adapters(self, name: Optional[str] = None, enabled: bool = True):
for i, conv_layer in enumerate(self.key_proj):
if i % 2 == 0:
conv_layer.set_enabled_adapters(name=name, enabled=enabled)
for i, conv_layer in enumerate(self.query_proj):
if i % 2 == 0:
conv_layer.set_enabled_adapters(name=name, enabled=enabled)
def get_enabled_adapters(self) -> List[str]:
names = set([])
for i, conv_layer in enumerate(self.key_proj):
if i % 2 == 0:
names.update(conv_layer.get_enabled_adapters())
for i, conv_layer in enumerate(self.query_proj):
if i % 2 == 0:
names.update(conv_layer.get_enabled_adapters())
names = sorted(list(names))
return names
def _update_adapter_cfg_input_dim(self, cfg: DictConfig, module_dim: int):
cfg = adapter_utils.update_adapter_cfg_input_dim(self, cfg, module_dim=module_dim)
return cfg
class TemporalPredictorAdapter(TemporalPredictor, adapter_mixins.AdapterModuleMixin):
""" Inherit from TemporalPredictor and add support for adapter"""
def add_adapter(self, name: str, cfg: dict):
cfg = self._update_adapter_cfg_input_dim(cfg)
for conv_layer in self.layers: # type: adapter_mixins.AdapterModuleMixin
conv_layer.add_adapter(name, cfg)
def is_adapter_available(self) -> bool:
return any([conv_layer.is_adapter_available() for conv_layer in self.layers])
def set_enabled_adapters(self, name: Optional[str] = None, enabled: bool = True):
for conv_layer in self.layers: # type: adapter_mixins.AdapterModuleMixin
conv_layer.set_enabled_adapters(name=name, enabled=enabled)
def get_enabled_adapters(self) -> List[str]:
names = set([])
for conv_layer in self.layers: # type: adapter_mixins.AdapterModuleMixin
names.update(conv_layer.get_enabled_adapters())
names = sorted(list(names))
return names
def _update_adapter_cfg_input_dim(self, cfg: DictConfig):
cfg = adapter_utils.update_adapter_cfg_input_dim(self, cfg, module_dim=self.filter_size)
return cfg
"""Register any additional information"""
if adapter_mixins.get_registered_adapter(FFTransformerEncoder) is None:
adapter_mixins.register_adapter(base_class=FFTransformerEncoder, adapter_class=FFTransformerEncoderAdapter)
if adapter_mixins.get_registered_adapter(FFTransformerDecoder) is None:
adapter_mixins.register_adapter(base_class=FFTransformerDecoder, adapter_class=FFTransformerDecoderAdapter)
if adapter_mixins.get_registered_adapter(AlignmentEncoder) is None:
adapter_mixins.register_adapter(base_class=AlignmentEncoder, adapter_class=AlignmentEncoderAdapter)
if adapter_mixins.get_registered_adapter(TemporalPredictor) is None:
adapter_mixins.register_adapter(base_class=TemporalPredictor, adapter_class=TemporalPredictorAdapter)
| NeMo-main | nemo/collections/tts/modules/adapters.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# MIT License
#
# Copyright (c) 2021 Jaehyeon Kim
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from .numba_core import maximum_path
| NeMo-main | nemo/collections/tts/modules/monotonic_align/__init__.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numba
import numpy as np
import torch
def maximum_path(neg_cent, mask):
""" Numba version.
neg_cent: [b, t_t, t_s]
mask: [b, t_t, t_s]
"""
device = neg_cent.device
dtype = neg_cent.dtype
neg_cent = neg_cent.data.cpu().numpy().astype(np.float32)
path = np.zeros(neg_cent.shape, dtype=np.int32)
t_t_max = mask.sum(1)[:, 0].data.cpu().numpy().astype(np.int32)
t_s_max = mask.sum(2)[:, 0].data.cpu().numpy().astype(np.int32)
maximum_path_c(path, neg_cent, t_t_max, t_s_max)
return torch.from_numpy(path).to(device=device, dtype=dtype)
@numba.jit(nopython=True, boundscheck=False, parallel=True)
def maximum_path_each(path, value, t_y: int, t_x: int, max_neg_val=-1e9):
"""
Args:
path: int32[:, :]
value: float32[:, :]
t_y: int
t_x: int
max_neg_val: float
"""
index: int = t_x - 1
for y in range(t_y):
for x in range(max(0, t_x + y - t_y), min(t_x, y + 1)):
if x == y:
v_cur = max_neg_val
else:
v_cur = value[y - 1, x]
if x == 0:
if y == 0:
v_prev = 0.0
else:
v_prev = max_neg_val
else:
v_prev = value[y - 1, x - 1]
value[y, x] += max(v_prev, v_cur)
for y in range(t_y - 1, -1, -1):
path[y, index] = 1
if index != 0 and (index == y or value[y - 1, index] < value[y - 1, index - 1]):
index = index - 1
@numba.jit(nopython=True, boundscheck=False, parallel=True)
def maximum_path_c(paths, values, t_ys, t_xs):
"""
Args:
paths: int32[:, :, :]
values: float32[:, :, :]
t_ys: int[:]
t_xs: int[:]
"""
b: int = paths.shape[0]
for i in numba.prange(b):
maximum_path_each(paths[i], values[i], t_ys[i], t_xs[i])
if __name__ == '__main__':
pass
| NeMo-main | nemo/collections/tts/modules/monotonic_align/numba_core.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import traceback
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, List, Optional, Tuple
import librosa
import torch.utils.data
from nemo.collections.asr.parts.preprocessing.segment import AudioSegment
from nemo.collections.asr.parts.utils.manifest_utils import read_manifest
from nemo.collections.tts.parts.preprocessing.feature_processors import FeatureProcessor
from nemo.collections.tts.parts.utils.tts_dataset_utils import (
filter_dataset_by_duration,
get_abs_rel_paths,
get_weighted_sampler,
stack_tensors,
)
from nemo.core.classes import Dataset
from nemo.utils import logging
from nemo.utils.decorators import experimental
@dataclass
class DatasetMeta:
manifest_path: Path
audio_dir: Path
sample_weight: float = 1.0
@dataclass
class DatasetSample:
manifest_entry: dict
audio_dir: Path
@experimental
class VocoderDataset(Dataset):
"""
Class for processing and loading Vocoder training examples.
Args:
dataset_meta: Dict of dataset names (string) to dataset metadata.
sample_rate: Sample rate to load audio as. If the audio is stored at a different sample rate, then it will
be resampled.
n_samples: Optional int, if provided then n_samples samples will be randomly sampled from the full
audio file.
weighted_sampling_steps_per_epoch: Optional int, If provided, then data will be sampled (with replacement) based on
the sample weights provided in the dataset metadata. If None, then sample weights will be ignored.
feature_processors: Optional, list of feature processors to run on training examples.
min_duration: Optional float, if provided audio files in the training manifest shorter than 'min_duration'
will be ignored.
max_duration: Optional float, if provided audio files in the training manifest longer than 'max_duration'
will be ignored.
trunc_duration: Optional int, if provided audio will be truncated to at most 'trunc_duration' seconds.
num_audio_retries: Number of read attempts to make when sampling audio file, to avoid training failing
from sporadic IO errors.
"""
def __init__(
self,
dataset_meta: Dict,
sample_rate: int,
n_samples: Optional[int] = None,
weighted_sampling_steps_per_epoch: Optional[int] = None,
feature_processors: Optional[Dict[str, FeatureProcessor]] = None,
min_duration: Optional[float] = None,
max_duration: Optional[float] = None,
trunc_duration: Optional[float] = None,
num_audio_retries: int = 5,
):
super().__init__()
self.sample_rate = sample_rate
self.n_samples = n_samples
self.weighted_sampling_steps_per_epoch = weighted_sampling_steps_per_epoch
self.num_audio_retries = num_audio_retries
self.load_precomputed_mel = False
if trunc_duration:
self.trunc_samples = int(trunc_duration * self.sample_rate)
else:
self.trunc_samples = None
if feature_processors:
logging.info(f"Found feature processors {feature_processors.keys()}")
self.feature_processors = list(feature_processors.values())
else:
self.feature_processors = []
self.data_samples = []
self.sample_weights = []
for dataset_name, dataset_info in dataset_meta.items():
dataset = DatasetMeta(**dataset_info)
samples, weights = self._preprocess_manifest(
dataset_name=dataset_name, dataset=dataset, min_duration=min_duration, max_duration=max_duration,
)
self.data_samples += samples
self.sample_weights += weights
def get_sampler(self, batch_size: int) -> Optional[torch.utils.data.Sampler]:
if not self.weighted_sampling_steps_per_epoch:
return None
sampler = get_weighted_sampler(
sample_weights=self.sample_weights, batch_size=batch_size, num_steps=self.weighted_sampling_steps_per_epoch
)
return sampler
def _segment_audio(self, audio_filepath: Path) -> AudioSegment:
# Retry file read multiple times as file seeking can produce random IO errors.
for _ in range(self.num_audio_retries):
try:
audio_segment = AudioSegment.segment_from_file(
audio_filepath, target_sr=self.sample_rate, n_segments=self.n_samples,
)
return audio_segment
except Exception:
traceback.print_exc()
raise ValueError(f"Failed to read audio {audio_filepath}")
def _sample_audio(self, audio_filepath: Path) -> Tuple[torch.Tensor, torch.Tensor]:
if not self.n_samples:
audio_array, _ = librosa.load(audio_filepath, sr=self.sample_rate)
else:
audio_segment = self._segment_audio(audio_filepath)
audio_array = audio_segment.samples
if self.trunc_samples:
audio_array = audio_array[: self.trunc_samples]
audio = torch.tensor(audio_array)
audio_len = torch.tensor(audio.shape[0])
return audio, audio_len
@staticmethod
def _preprocess_manifest(
dataset_name: str, dataset: DatasetMeta, min_duration: float, max_duration: float,
):
entries = read_manifest(dataset.manifest_path)
filtered_entries, total_hours, filtered_hours = filter_dataset_by_duration(
entries=entries, min_duration=min_duration, max_duration=max_duration
)
logging.info(dataset_name)
logging.info(f"Original # of files: {len(entries)}")
logging.info(f"Filtered # of files: {len(filtered_entries)}")
logging.info(f"Original duration: {total_hours:.2f} hours")
logging.info(f"Filtered duration: {filtered_hours:.2f} hours")
samples = []
sample_weights = []
for entry in filtered_entries:
sample = DatasetSample(manifest_entry=entry, audio_dir=Path(dataset.audio_dir),)
samples.append(sample)
sample_weights.append(dataset.sample_weight)
return samples, sample_weights
def __len__(self):
return len(self.data_samples)
def __getitem__(self, index):
data = self.data_samples[index]
audio_filepath = Path(data.manifest_entry["audio_filepath"])
audio_filepath_abs, audio_filepath_rel = get_abs_rel_paths(input_path=audio_filepath, base_path=data.audio_dir)
audio, audio_len = self._sample_audio(audio_filepath_abs)
example = {"audio_filepath": audio_filepath_rel, "audio": audio, "audio_len": audio_len}
for processor in self.feature_processors:
processor.process(example)
return example
def collate_fn(self, batch: List[dict]):
audio_filepath_list = []
audio_list = []
audio_len_list = []
for example in batch:
audio_filepath_list.append(example["audio_filepath"])
audio_list.append(example["audio"])
audio_len_list.append(example["audio_len"])
batch_audio_len = torch.IntTensor(audio_len_list)
audio_max_len = int(batch_audio_len.max().item())
batch_audio = stack_tensors(audio_list, max_lens=[audio_max_len])
batch_dict = {
"audio_filepaths": audio_filepath_list,
"audio": batch_audio,
"audio_lens": batch_audio_len,
}
return batch_dict
| NeMo-main | nemo/collections/tts/data/vocoder_dataset.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-main | nemo/collections/tts/data/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from dataclasses import dataclass
from pathlib import Path
from typing import Any, Dict, List, Optional
import librosa
import torch.utils.data
from nemo.collections.asr.parts.utils.manifest_utils import read_manifest
from nemo.collections.common.tokenizers.text_to_speech.tts_tokenizers import BaseTokenizer
from nemo.collections.tts.parts.preprocessing.feature_processors import FeatureProcessor
from nemo.collections.tts.parts.preprocessing.features import Featurizer
from nemo.collections.tts.parts.utils.tts_dataset_utils import (
beta_binomial_prior_distribution,
filter_dataset_by_duration,
get_abs_rel_paths,
get_weighted_sampler,
stack_tensors,
)
from nemo.core.classes import Dataset
from nemo.utils import logging
from nemo.utils.decorators import experimental
@dataclass
class DatasetMeta:
manifest_path: Path
audio_dir: Path
feature_dir: Path
sample_weight: float = 1.0
@dataclass
class DatasetSample:
manifest_entry: Dict[str, Any]
audio_dir: Path
feature_dir: Path
text: str
speaker: str
speaker_index: int = None
@experimental
class TextToSpeechDataset(Dataset):
"""
Class for processing and loading text to speech training examples.
Args:
dataset_meta: Dict of dataset names (string) to dataset metadata.
sample_rate: Sample rate to load audio as. If the audio is stored at a different sample rate, then it will
be resampled.
text_tokenizer: Tokenizer to apply to the text field.
weighted_sampling_steps_per_epoch: Optional int, If provided, then data will be sampled (with replacement) based on
the sample weights provided in the dataset metadata. If None, then sample weights will be ignored.
speaker_path: Optional, path to JSON file with speaker indices, for multi-speaker training. Can be created with
scripts.dataset_processing.tts.create_speaker_map.py
featurizers: Optional, list of featurizers to load feature data from. Should be the same config provided
when running scripts.dataset_processing.tts.compute_features.py before training.
feature_processors: Optional, list of feature processors to run on training examples.
align_prior_hop_length: Optional int, hop length of audio features.
If provided alignment prior will be calculated and included in batch output. Must match hop length
of audio features used for training.
min_duration: Optional float, if provided audio files in the training manifest shorter than 'min_duration'
will be ignored.
max_duration: Optional float, if provided audio files in the training manifest longer than 'max_duration'
will be ignored.
"""
def __init__(
self,
dataset_meta: Dict,
sample_rate: int,
text_tokenizer: BaseTokenizer,
weighted_sampling_steps_per_epoch: Optional[int] = None,
speaker_path: Optional[Path] = None,
featurizers: Optional[Dict[str, Featurizer]] = None,
feature_processors: Optional[Dict[str, FeatureProcessor]] = None,
align_prior_hop_length: Optional[int] = None,
min_duration: Optional[float] = None,
max_duration: Optional[float] = None,
):
super().__init__()
self.sample_rate = sample_rate
self.text_tokenizer = text_tokenizer
self.weighted_sampling_steps_per_epoch = weighted_sampling_steps_per_epoch
self.align_prior_hop_length = align_prior_hop_length
self.include_align_prior = self.align_prior_hop_length is not None
if speaker_path:
self.include_speaker = True
with open(speaker_path, 'r', encoding="utf-8") as speaker_f:
speaker_index_map = json.load(speaker_f)
else:
self.include_speaker = False
speaker_index_map = None
if featurizers:
logging.info(f"Found featurizers {featurizers.keys()}")
self.featurizers = list(featurizers.values())
else:
self.featurizers = []
if feature_processors:
logging.info(f"Found featurize processors {feature_processors.keys()}")
self.feature_processors = list(feature_processors.values())
else:
self.feature_processors = []
self.data_samples = []
self.sample_weights = []
for dataset_name, dataset_info in dataset_meta.items():
dataset = DatasetMeta(**dataset_info)
samples, weights = self._preprocess_manifest(
dataset_name=dataset_name,
dataset=dataset,
min_duration=min_duration,
max_duration=max_duration,
speaker_index_map=speaker_index_map,
)
self.data_samples += samples
self.sample_weights += weights
def get_sampler(self, batch_size: int) -> Optional[torch.utils.data.Sampler]:
if not self.weighted_sampling_steps_per_epoch:
return None
sampler = get_weighted_sampler(
sample_weights=self.sample_weights, batch_size=batch_size, num_steps=self.weighted_sampling_steps_per_epoch
)
return sampler
def _preprocess_manifest(
self,
dataset_name: str,
dataset: DatasetMeta,
min_duration: float,
max_duration: float,
speaker_index_map: Dict[str, int],
):
entries = read_manifest(dataset.manifest_path)
filtered_entries, total_hours, filtered_hours = filter_dataset_by_duration(
entries=entries, min_duration=min_duration, max_duration=max_duration
)
logging.info(dataset_name)
logging.info(f"Original # of files: {len(entries)}")
logging.info(f"Filtered # of files: {len(filtered_entries)}")
logging.info(f"Original duration: {total_hours:.2f} hours")
logging.info(f"Filtered duration: {filtered_hours:.2f} hours")
samples = []
sample_weights = []
for entry in filtered_entries:
if "normalized_text" in entry:
text = entry["normalized_text"]
else:
text = entry["text"]
if self.include_speaker:
speaker = entry["speaker"]
speaker_index = speaker_index_map[speaker]
else:
speaker = None
speaker_index = 0
sample = DatasetSample(
manifest_entry=entry,
audio_dir=Path(dataset.audio_dir),
feature_dir=Path(dataset.feature_dir),
text=text,
speaker=speaker,
speaker_index=speaker_index,
)
samples.append(sample)
sample_weights.append(dataset.sample_weight)
return samples, sample_weights
def __len__(self):
return len(self.data_samples)
def __getitem__(self, index):
data = self.data_samples[index]
audio_filepath = Path(data.manifest_entry["audio_filepath"])
audio_filepath_abs, audio_filepath_rel = get_abs_rel_paths(input_path=audio_filepath, base_path=data.audio_dir)
audio, _ = librosa.load(audio_filepath_abs, sr=self.sample_rate)
tokens = self.text_tokenizer(data.text)
example = {"audio_filepath": audio_filepath_rel, "audio": audio, "tokens": tokens}
if data.speaker is not None:
example["speaker"] = data.speaker
example["speaker_index"] = data.speaker_index
if self.include_align_prior:
text_len = len(tokens)
spec_len = 1 + librosa.core.samples_to_frames(audio.shape[0], hop_length=self.align_prior_hop_length)
align_prior = beta_binomial_prior_distribution(phoneme_count=text_len, mel_count=spec_len)
align_prior = torch.tensor(align_prior, dtype=torch.float32)
example["align_prior"] = align_prior
for featurizer in self.featurizers:
feature_dict = featurizer.load(
manifest_entry=data.manifest_entry, audio_dir=data.audio_dir, feature_dir=data.feature_dir
)
example.update(feature_dict)
for processor in self.feature_processors:
processor.process(example)
return example
def collate_fn(self, batch: List[dict]):
audio_filepath_list = []
audio_list = []
audio_len_list = []
token_list = []
token_len_list = []
speaker_list = []
prior_list = []
for example in batch:
audio_filepath_list.append(example["audio_filepath"])
audio_tensor = torch.tensor(example["audio"], dtype=torch.float32)
audio_list.append(audio_tensor)
audio_len_list.append(audio_tensor.shape[0])
token_tensor = torch.tensor(example["tokens"], dtype=torch.int32)
token_list.append(token_tensor)
token_len_list.append(token_tensor.shape[0])
if self.include_speaker:
speaker_list.append(example["speaker_index"])
if self.include_align_prior:
prior_list.append(example["align_prior"])
batch_audio_len = torch.IntTensor(audio_len_list)
audio_max_len = int(batch_audio_len.max().item())
batch_token_len = torch.IntTensor(token_len_list)
token_max_len = int(batch_token_len.max().item())
batch_audio = stack_tensors(audio_list, max_lens=[audio_max_len])
batch_tokens = stack_tensors(token_list, max_lens=[token_max_len], pad_value=self.text_tokenizer.pad)
batch_dict = {
"audio_filepaths": audio_filepath_list,
"audio": batch_audio,
"audio_lens": batch_audio_len,
"text": batch_tokens,
"text_lens": batch_token_len,
}
if self.include_speaker:
batch_dict["speaker_id"] = torch.IntTensor(speaker_list)
if self.include_align_prior:
spec_max_len = max([prior.shape[0] for prior in prior_list])
text_max_len = max([prior.shape[1] for prior in prior_list])
batch_dict["align_prior_matrix"] = stack_tensors(prior_list, max_lens=[text_max_len, spec_max_len],)
for featurizer in self.featurizers:
feature_dict = featurizer.collate_fn(batch)
batch_dict.update(feature_dict)
return batch_dict
| NeMo-main | nemo/collections/tts/data/text_to_speech_dataset.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import math
import os
import pickle
import random
from collections import defaultdict
from pathlib import Path
from typing import Callable, Dict, List, Optional, Union
import librosa
import numpy as np
import torch
from einops import rearrange
from tqdm import tqdm
from nemo.collections.asr.parts.preprocessing.features import WaveformFeaturizer
from nemo.collections.asr.parts.preprocessing.segment import AudioSegment
from nemo.collections.common.tokenizers.text_to_speech.tts_tokenizers import (
BaseTokenizer,
EnglishCharsTokenizer,
EnglishPhonemesTokenizer,
)
from nemo.collections.tts.parts.utils.tts_dataset_utils import (
BetaBinomialInterpolator,
beta_binomial_prior_distribution,
general_padding,
get_base_dir,
)
from nemo.collections.tts.torch.tts_data_types import (
DATA_STR2DATA_CLASS,
MAIN_DATA_TYPES,
AlignPriorMatrix,
Durations,
Energy,
LMTokens,
LogMel,
P_voiced,
Pitch,
ReferenceAudio,
SpeakerID,
TTSDataType,
Voiced_mask,
WithLens,
)
from nemo.core.classes import Dataset
from nemo.utils import logging
try:
from nemo_text_processing.text_normalization.normalize import Normalizer
PYNINI_AVAILABLE = True
except (ImportError, ModuleNotFoundError):
Normalizer = None
PYNINI_AVAILABLE = False
EPSILON = 1e-9
WINDOW_FN_SUPPORTED = {
'hann': torch.hann_window,
'hamming': torch.hamming_window,
'blackman': torch.blackman_window,
'bartlett': torch.bartlett_window,
'none': None,
}
class TTSDataset(Dataset):
def __init__(
self,
manifest_filepath: Union[str, Path, List[str], List[Path]],
sample_rate: int,
text_tokenizer: Union[BaseTokenizer, Callable[[str], List[int]]],
tokens: Optional[List[str]] = None,
text_normalizer: Optional[Union[Normalizer, Callable[[str], str]]] = None,
text_normalizer_call_kwargs: Optional[Dict] = None,
text_tokenizer_pad_id: Optional[int] = None,
sup_data_types: Optional[List[str]] = None,
sup_data_path: Optional[Union[Path, str]] = None,
max_duration: Optional[float] = None,
min_duration: Optional[float] = None,
ignore_file: Optional[Union[str, Path]] = None,
trim: bool = False,
trim_ref: Optional[float] = None,
trim_top_db: Optional[int] = None,
trim_frame_length: Optional[int] = None,
trim_hop_length: Optional[int] = None,
n_fft: int = 1024,
win_length: Optional[int] = None,
hop_length: Optional[int] = None,
window: str = "hann",
n_mels: int = 80,
lowfreq: int = 0,
highfreq: Optional[int] = None,
segment_max_duration: Optional[int] = None,
pitch_augment: bool = False,
cache_pitch_augment: bool = True,
pad_multiple: int = 1,
**kwargs,
):
"""Dataset which can be used for training spectrogram generators and end-to-end TTS models.
It loads main data types (audio, text) and specified supplementary data types (log mel, durations, align prior matrix, pitch, energy, speaker id).
Some supplementary data types will be computed on the fly and saved in the sup_data_path if they did not exist before.
Saved folder can be changed for some supplementary data types (see keyword args section).
Arguments for supplementary data should be also specified in this class, and they will be used from kwargs (see keyword args section).
Args:
manifest_filepath (Union[str, Path, List[str], List[Path]]): Path(s) to the .json manifests containing information on the
dataset. Each line in the .json file should be valid json. Note: the .json file itself is not valid
json. Each line should contain the following:
"audio_filepath": <PATH_TO_WAV>,
"text": <THE_TRANSCRIPT>,
"normalized_text": <NORMALIZED_TRANSCRIPT> (Optional),
"mel_filepath": <PATH_TO_LOG_MEL_PT> (Optional),
"duration": <Duration of audio clip in seconds> (Optional),
sample_rate (int): The sample rate of the audio. Or the sample rate that we will resample all files to.
text_tokenizer (Optional[Union[BaseTokenizer, Callable[[str], List[int]]]]): BaseTokenizer or callable which represents text tokenizer.
tokens (Optional[List[str]]): Tokens from text_tokenizer. Should be specified if text_tokenizer is not BaseTokenizer.
text_normalizer (Optional[Union[Normalizer, Callable[[str], str]]]): Normalizer or callable which represents text normalizer.
text_normalizer_call_kwargs (Optional[Dict]): Additional arguments for text_normalizer function.
text_tokenizer_pad_id (Optional[int]): Index of padding. Should be specified if text_tokenizer is not BaseTokenizer.
sup_data_types (Optional[List[str]]): List of supplementary data types.
sup_data_path (Optional[Union[Path, str]]): A folder that contains or will contain supplementary data (e.g. pitch).
max_duration (Optional[float]): Max duration of audio clips in seconds. All samples exceeding this will be
pruned prior to training. Note: Requires "duration" to be set in the manifest file. It does not load
audio to compute duration. Defaults to None which does not prune.
min_duration (Optional[float]): Min duration of audio clips in seconds. All samples lower than this will be
pruned prior to training. Note: Requires "duration" to be set in the manifest file. It does not load
audio to compute duration. Defaults to None which does not prune.
ignore_file (Optional[Union[str, Path]]): The location of a pickle-saved list of audio paths
that will be pruned prior to training. Defaults to None which does not prune.
trim (bool): Whether to apply `librosa.effects.trim` to trim leading and trailing silence from an audio
signal. Defaults to False.
trim_ref (Optional[float]): the reference amplitude. By default, it uses `np.max` and compares to the peak
amplitude in the signal.
trim_top_db (Optional[int]): the threshold (in decibels) below reference to consider as silence.
Defaults to 60.
trim_frame_length (Optional[int]): the number of samples per analysis frame. Defaults to 2048.
trim_hop_length (Optional[int]): the number of samples between analysis frames. Defaults to 512.
n_fft (int): The number of fft samples. Defaults to 1024
win_length (Optional[int]): The length of the stft windows. Defaults to None which uses n_fft.
hop_length (Optional[int]): The hope length between fft computations. Defaults to None which uses n_fft//4.
window (str): One of 'hann', 'hamming', 'blackman','bartlett', 'none'. Which corresponds to the
equivalent torch window function.
n_mels (int): The number of mel filters. Defaults to 80.
lowfreq (int): The lowfreq input to the mel filter calculation. Defaults to 0.
highfreq (Optional[int]): The highfreq input to the mel filter calculation. Defaults to None.
Keyword Args:
log_mel_folder (Optional[Union[Path, str]]): The folder that contains or will contain log mel spectrograms.
pitch_folder (Optional[Union[Path, str]]): The folder that contains or will contain pitch.
voiced_mask_folder (Optional[Union[Path, str]]): The folder that contains or will contain voiced mask of the pitch
p_voiced_folder (Optional[Union[Path, str]]): The folder that contains or will contain p_voiced(probability) of the pitch
energy_folder (Optional[Union[Path, str]]): The folder that contains or will contain energy.
durs_file (Optional[str]): String path to pickled durations location.
durs_type (Optional[str]): Type of durations. Currently, supported only "aligner-based".
use_beta_binomial_interpolator (Optional[bool]): Whether to use beta-binomial interpolator for calculating alignment prior matrix. Defaults to False.
pitch_fmin (Optional[float]): The fmin input to librosa.pyin. Defaults to librosa.note_to_hz('C2').
pitch_fmax (Optional[float]): The fmax input to librosa.pyin. Defaults to librosa.note_to_hz('C7').
pitch_mean (Optional[float]): The mean that we use to normalize the pitch.
pitch_std (Optional[float]): The std that we use to normalize the pitch.
segment_max_duration (Optional[float]): If audio length is greater than segment_max_duration, take a random segment of segment_max_duration (Used for SV task in SSLDisentangler)
pitch_augment (bool): Whether to apply pitch-shift transform and return a pitch-shifted audio. If set as False, audio_shifted will be None (used in SSLDisentangler)
cache_pitch_augment (bool): Whether to cache pitch augmented audio or not. Defaults to False (used in SSLDisentangler)
pad_multiple (int): If audio length is not divisible by pad_multiple, pad the audio with zeros to make it divisible by pad_multiple (used in SSLDisentangler)
pitch_norm (Optional[bool]): Whether to normalize pitch or not. If True, requires providing either
pitch_stats_path or (pitch_mean and pitch_std).
pitch_stats_path (Optional[Path, str]): Path to file containing speaker level pitch statistics.
"""
super().__init__()
# Initialize text tokenizer
self.text_tokenizer = text_tokenizer
self.phoneme_probability = None
if isinstance(self.text_tokenizer, BaseTokenizer):
self.text_tokenizer_pad_id = text_tokenizer.pad
self.phoneme_probability = getattr(self.text_tokenizer, "phoneme_probability", None)
else:
if text_tokenizer_pad_id is None:
raise ValueError(f"text_tokenizer_pad_id must be specified if text_tokenizer is not BaseTokenizer")
if tokens is None:
raise ValueError(f"tokens must be specified if text_tokenizer is not BaseTokenizer")
self.text_tokenizer_pad_id = text_tokenizer_pad_id
self.cache_text = True if self.phoneme_probability is None else False
# Initialize text normalizer if specified
self.text_normalizer = text_normalizer
if self.text_normalizer is None:
self.text_normalizer_call = None
elif not PYNINI_AVAILABLE:
raise ImportError(
"`nemo_text_processing` is not installed, see https://github.com/NVIDIA/NeMo-text-processing for details"
)
else:
self.text_normalizer_call = (
self.text_normalizer.normalize
if isinstance(self.text_normalizer, Normalizer)
else self.text_normalizer
)
self.text_normalizer_call_kwargs = (
text_normalizer_call_kwargs if text_normalizer_call_kwargs is not None else {}
)
# Initialize and read manifest file(s), filter out data by duration and ignore_file, compute base dir
if isinstance(manifest_filepath, str):
manifest_filepath = [manifest_filepath]
self.manifest_filepath = manifest_filepath
self.lengths = [] # Needed for BucketSampling
data = []
total_duration = 0
for manifest_file in self.manifest_filepath:
with open(Path(manifest_file).expanduser(), 'r') as f:
logging.info(f"Loading dataset from {manifest_file}.")
for line in tqdm(f):
item = json.loads(line)
file_info = {
"audio_filepath": item["audio_filepath"],
"original_text": item["text"],
"mel_filepath": item["mel_filepath"] if "mel_filepath" in item else None,
"duration": item["duration"] if "duration" in item else None,
"speaker_id": item["speaker"] if "speaker" in item else None,
}
if "normalized_text" in item:
file_info["normalized_text"] = item["normalized_text"]
elif "text_normalized" in item:
file_info["normalized_text"] = item["text_normalized"]
else:
text = item["text"]
if self.text_normalizer is not None:
text = self.text_normalizer_call(text, **self.text_normalizer_call_kwargs)
file_info["normalized_text"] = text
if self.cache_text:
file_info["text_tokens"] = self.text_tokenizer(file_info["normalized_text"])
data.append(file_info)
# Calculating length of spectrogram from input audio for batch sampling
self.lengths.append(os.path.getsize(item["audio_filepath"]) // (n_fft // 2))
if file_info["duration"] is None:
logging.info(
"Not all audio files have duration information. Duration logging will be disabled."
)
total_duration = None
if total_duration is not None:
total_duration += item["duration"]
logging.info(f"Loaded dataset with {len(data)} files.")
if total_duration is not None:
logging.info(f"Dataset contains {total_duration / 3600:.2f} hours.")
self.data = TTSDataset.filter_files(data, ignore_file, min_duration, max_duration, total_duration)
self.base_data_dir = get_base_dir([item["audio_filepath"] for item in self.data])
# Initialize audio and mel related parameters
self.sample_rate = sample_rate
self.featurizer = WaveformFeaturizer(sample_rate=self.sample_rate)
self.trim = trim
self.trim_ref = trim_ref if trim_ref is not None else np.max
self.trim_top_db = trim_top_db if trim_top_db is not None else 60
self.trim_frame_length = trim_frame_length if trim_frame_length is not None else 2048
self.trim_hop_length = trim_hop_length if trim_hop_length is not None else 512
self.segment_max_duration = segment_max_duration
self.pitch_augment = pitch_augment
self.cache_pitch_augment = cache_pitch_augment
self.n_fft = n_fft
self.n_mels = n_mels
self.lowfreq = lowfreq
self.highfreq = highfreq
self.window = window
self.win_length = win_length or self.n_fft
self.hop_length = hop_length
self.hop_len = self.hop_length or self.n_fft // 4
self.fb = torch.tensor(
librosa.filters.mel(
sr=self.sample_rate, n_fft=self.n_fft, n_mels=self.n_mels, fmin=self.lowfreq, fmax=self.highfreq
),
dtype=torch.float,
).unsqueeze(0)
try:
window_fn = WINDOW_FN_SUPPORTED[self.window]
except KeyError:
raise NotImplementedError(
f"Current implementation doesn't support {self.window} window. "
f"Please choose one from {list(WINDOW_FN_SUPPORTED.keys())}."
)
self.stft = lambda x: torch.stft(
input=x,
n_fft=self.n_fft,
hop_length=self.hop_len,
win_length=self.win_length,
window=window_fn(self.win_length, periodic=False).to(torch.float) if window_fn else None,
return_complex=True,
)
# Initialize sup_data_path, sup_data_types and run preprocessing methods for every supplementary data type
if sup_data_path is not None:
Path(sup_data_path).mkdir(parents=True, exist_ok=True)
self.sup_data_path = sup_data_path
self.sup_data_types = []
if sup_data_types is not None:
for d_as_str in sup_data_types:
try:
sup_data_type = DATA_STR2DATA_CLASS[d_as_str]
except KeyError:
raise NotImplementedError(f"Current implementation doesn't support {d_as_str} type.")
self.sup_data_types.append(sup_data_type)
if ("voiced_mask" in sup_data_types or "p_voiced" in sup_data_types) and ("pitch" not in sup_data_types):
raise ValueError(
"Please add 'pitch' to sup_data_types in YAML because 'pitch' is required when using either "
"'voiced_mask' or 'p_voiced' or both."
)
self.sup_data_types_set = set(self.sup_data_types)
for data_type in self.sup_data_types:
getattr(self, f"add_{data_type.name}")(**kwargs)
self.pad_multiple = pad_multiple
@staticmethod
def filter_files(data, ignore_file, min_duration, max_duration, total_duration):
if ignore_file:
logging.info(f"Using {ignore_file} to prune dataset.")
with open(Path(ignore_file).expanduser(), "rb") as f:
wavs_to_ignore = set(pickle.load(f))
filtered_data: List[Dict] = []
pruned_duration = 0 if total_duration is not None else None
pruned_items = 0
for item in data:
audio_path = item['audio_filepath']
# Prune data according to min/max_duration & the ignore file
if total_duration is not None:
if (min_duration and item["duration"] < min_duration) or (
max_duration and item["duration"] > max_duration
):
pruned_duration += item["duration"]
pruned_items += 1
continue
if ignore_file and (audio_path in wavs_to_ignore):
pruned_items += 1
pruned_duration += item["duration"]
wavs_to_ignore.remove(audio_path)
continue
filtered_data.append(item)
logging.info(f"Pruned {pruned_items} files. Final dataset contains {len(filtered_data)} files")
if pruned_duration is not None:
logging.info(
f"Pruned {pruned_duration / 3600:.2f} hours. Final dataset contains "
f"{(total_duration - pruned_duration) / 3600:.2f} hours."
)
return filtered_data
def add_log_mel(self, **kwargs):
self.log_mel_folder = kwargs.pop('log_mel_folder', None)
if self.log_mel_folder is None:
self.log_mel_folder = Path(self.sup_data_path) / LogMel.name
elif isinstance(self.log_mel_folder, str):
self.log_mel_folder = Path(self.log_mel_folder)
self.log_mel_folder.mkdir(exist_ok=True, parents=True)
def add_durations(self, **kwargs):
durs_file = kwargs.pop('durs_file')
durs_type = kwargs.pop('durs_type')
audio_stem2durs = torch.load(durs_file)
self.durs = []
for tag in [Path(d["audio_filepath"]).stem for d in self.data]:
durs = audio_stem2durs[tag]
if durs_type == "aligner-based":
self.durs.append(durs)
else:
raise NotImplementedError(
f"{durs_type} duration type is not supported. Only aligner-based is supported at this moment."
)
def add_align_prior_matrix(self, **kwargs):
self.use_beta_binomial_interpolator = kwargs.pop('use_beta_binomial_interpolator', False)
if not self.cache_text:
if 'use_beta_binomial_interpolator' in kwargs and not self.use_beta_binomial_interpolator:
logging.warning(
"phoneme_probability is not None, but use_beta_binomial_interpolator=False, we"
" set use_beta_binomial_interpolator=True manually to use phoneme_probability."
)
self.use_beta_binomial_interpolator = True
if self.use_beta_binomial_interpolator:
self.beta_binomial_interpolator = BetaBinomialInterpolator()
def add_pitch(self, **kwargs):
self.pitch_folder = kwargs.pop('pitch_folder', None)
if self.pitch_folder is None:
self.pitch_folder = Path(self.sup_data_path) / Pitch.name
elif isinstance(self.pitch_folder, str):
self.pitch_folder = Path(self.pitch_folder)
self.pitch_folder.mkdir(exist_ok=True, parents=True)
self.pitch_fmin = kwargs.pop("pitch_fmin", librosa.note_to_hz('C2'))
self.pitch_fmax = kwargs.pop("pitch_fmax", librosa.note_to_hz('C7'))
self.pitch_mean = kwargs.pop("pitch_mean", None)
self.pitch_std = kwargs.pop("pitch_std", None)
self.pitch_norm = kwargs.pop("pitch_norm", False)
pitch_stats_path = kwargs.pop("pitch_stats_path", None)
if self.pitch_norm:
# XOR to validate that both or neither pitch mean and std are provided
assert (self.pitch_mean is None) == (
self.pitch_std is None
), f"Found only 1 of (pitch_mean, pitch_std): ({self.pitch_mean}, {self.pitch_std})"
# XOR to validate that exactly 1 of (pitch_mean, pitch_std) or pitch_stats_path is provided.
assert (self.pitch_mean is None) != (pitch_stats_path is None), (
f"pitch_norm requires exactly 1 of (pitch_mean, pitch_std) or pitch_stats_path. "
f"Provided: ({self.pitch_mean}, {self.pitch_std}) and {pitch_stats_path}"
)
if pitch_stats_path is not None:
with open(Path(pitch_stats_path), 'r', encoding="utf-8") as pitch_f:
self.pitch_stats = json.load(pitch_f)
# saving voiced_mask and p_voiced with pitch
def add_voiced_mask(self, **kwargs):
self.voiced_mask_folder = kwargs.pop('voiced_mask_folder', None)
if self.voiced_mask_folder is None:
self.voiced_mask_folder = Path(self.sup_data_path) / Voiced_mask.name
self.voiced_mask_folder.mkdir(exist_ok=True, parents=True)
def add_p_voiced(self, **kwargs):
self.p_voiced_folder = kwargs.pop('p_voiced_folder', None)
if self.p_voiced_folder is None:
self.p_voiced_folder = Path(self.sup_data_path) / P_voiced.name
self.p_voiced_folder.mkdir(exist_ok=True, parents=True)
def add_energy(self, **kwargs):
self.energy_folder = kwargs.pop('energy_folder', None)
if self.energy_folder is None:
self.energy_folder = Path(self.sup_data_path) / Energy.name
elif isinstance(self.energy_folder, str):
self.energy_folder = Path(self.energy_folder)
self.energy_folder.mkdir(exist_ok=True, parents=True)
def add_speaker_id(self, **kwargs):
pass
def add_reference_audio(self, **kwargs):
assert SpeakerID in self.sup_data_types, "Please add speaker_id in sup_data_types."
"""Add a mapping for each speaker to their manifest indexes"""
self.speaker_to_index_map = defaultdict(set)
for i, d in enumerate(self.data):
self.speaker_to_index_map[d['speaker_id']].add(i)
def get_spec(self, audio):
with torch.cuda.amp.autocast(enabled=False):
spec = self.stft(audio)
if spec.dtype in [torch.cfloat, torch.cdouble]:
spec = torch.view_as_real(spec)
spec = torch.sqrt(spec.pow(2).sum(-1) + EPSILON)
return spec
def get_log_mel(self, audio):
with torch.cuda.amp.autocast(enabled=False):
spec = self.get_spec(audio)
mel = torch.matmul(self.fb.to(spec.dtype), spec)
log_mel = torch.log(torch.clamp(mel, min=torch.finfo(mel.dtype).tiny))
return log_mel
def pitch_shift(self, audio, sr, rel_audio_path_as_text_id):
audio_shifted_path = Path(self.sup_data_path) / f"{rel_audio_path_as_text_id}_pitch_shift.pt"
if audio_shifted_path.exists() and self.cache_pitch_augment:
audio_shifted = torch.load(audio_shifted_path)
return audio_shifted
else:
choice1 = np.random.uniform(-4, -1)
choice2 = np.random.uniform(1, 4)
shift_val = random.choice([choice1, choice2])
audio_shifted = librosa.effects.pitch_shift(audio, sr=sr, n_steps=shift_val)
# save audio_shifted
audio_shifted = torch.tensor(audio_shifted)
if self.cache_pitch_augment:
torch.save(audio_shifted, audio_shifted_path)
return audio_shifted
def _pad_wav_to_multiple(self, wav):
if self.pad_multiple > 1:
if wav.shape[0] % self.pad_multiple != 0:
wav = torch.cat(
[wav, torch.zeros(self.pad_multiple - wav.shape[0] % self.pad_multiple, dtype=torch.float)]
)
return wav
# Random sample a reference index from the same speaker
def sample_reference_index(self, speaker_id):
reference_pool = self.speaker_to_index_map[speaker_id]
reference_index = random.sample(reference_pool, 1)[0]
return reference_index
def __getitem__(self, index):
sample = self.data[index]
# Let's keep audio name and all internal directories in rel_audio_path_as_text_id to avoid any collisions
rel_audio_path = Path(sample["audio_filepath"]).relative_to(self.base_data_dir).with_suffix("")
rel_audio_path_as_text_id = str(rel_audio_path).replace("/", "_")
if (
self.segment_max_duration is not None
and 'duration' in sample
and sample['duration'] > self.segment_max_duration
):
# this case has been added for segmenting audio for speaker verification task of SSLDisentangler
n_segments = int(self.segment_max_duration * self.sample_rate)
features = AudioSegment.segment_from_file(
sample["audio_filepath"], target_sr=self.sample_rate, n_segments=n_segments, trim=self.trim
)
audio_shifted = None
# should not have pitch shift augmented data for speaker verification
assert not self.pitch_augment
features = torch.tensor(features.samples)
if self.pad_multiple > 1:
features = self._pad_wav_to_multiple(features)
audio, audio_length = features, torch.tensor(features.shape[0]).long()
else:
features = self.featurizer.process(
sample["audio_filepath"],
trim=self.trim,
trim_ref=self.trim_ref,
trim_top_db=self.trim_top_db,
trim_frame_length=self.trim_frame_length,
trim_hop_length=self.trim_hop_length,
)
if self.pad_multiple > 1:
features = self._pad_wav_to_multiple(features)
audio_shifted = None
if self.pitch_augment:
audio_shifted = self.pitch_shift(
features.cpu().detach().numpy(), self.sample_rate, rel_audio_path_as_text_id
)
assert audio_shifted.size() == features.size(), "{} != {}".format(
audio_shifted.size(), features.size()
)
audio, audio_length = features, torch.tensor(features.shape[0]).long()
if "text_tokens" in sample:
text = torch.tensor(sample["text_tokens"]).long()
text_length = torch.tensor(len(text)).long()
else:
tokenized = self.text_tokenizer(sample["normalized_text"])
text = torch.tensor(tokenized).long()
text_length = torch.tensor(len(tokenized)).long()
# Load mel if needed
log_mel, log_mel_length = None, None
if LogMel in self.sup_data_types_set:
mel_path = sample["mel_filepath"]
if mel_path is not None and Path(mel_path).exists():
log_mel = torch.load(mel_path)
else:
mel_path = self.log_mel_folder / f"{rel_audio_path_as_text_id}.pt"
if mel_path.exists():
log_mel = torch.load(mel_path)
else:
log_mel = self.get_log_mel(audio)
torch.save(log_mel, mel_path)
log_mel = log_mel.squeeze(0)
log_mel_length = torch.tensor(log_mel.shape[1]).long()
# Load durations if needed
durations = None
if Durations in self.sup_data_types_set:
durations = self.durs[index]
# Load alignment prior matrix if needed
align_prior_matrix = None
if AlignPriorMatrix in self.sup_data_types_set:
mel_len = self.get_log_mel(audio).shape[2]
if self.use_beta_binomial_interpolator:
align_prior_matrix = torch.from_numpy(self.beta_binomial_interpolator(mel_len, text_length.item()))
else:
align_prior_matrix = torch.from_numpy(beta_binomial_prior_distribution(text_length, mel_len))
non_exist_voiced_index = []
my_var = locals()
for i, voiced_item in enumerate([Pitch, Voiced_mask, P_voiced]):
if voiced_item in self.sup_data_types_set:
voiced_folder = getattr(self, f"{voiced_item.name}_folder")
voiced_filepath = voiced_folder / f"{rel_audio_path_as_text_id}.pt"
if voiced_filepath.exists():
my_var.__setitem__(voiced_item.name, torch.load(voiced_filepath).float())
else:
non_exist_voiced_index.append((i, voiced_item.name, voiced_filepath))
if len(non_exist_voiced_index) != 0:
voiced_tuple = librosa.pyin(
audio.numpy(),
fmin=self.pitch_fmin,
fmax=self.pitch_fmax,
frame_length=self.win_length,
sr=self.sample_rate,
fill_na=0.0,
)
for (i, voiced_name, voiced_filepath) in non_exist_voiced_index:
my_var.__setitem__(voiced_name, torch.from_numpy(voiced_tuple[i]).float())
torch.save(my_var.get(voiced_name), voiced_filepath)
pitch = my_var.get('pitch', None)
pitch_length = my_var.get('pitch_length', None)
voiced_mask = my_var.get('voiced_mask', None)
p_voiced = my_var.get('p_voiced', None)
# normalize pitch if requested.
if pitch is not None:
pitch_length = torch.tensor(len(pitch)).long()
if self.pitch_norm:
if self.pitch_mean is not None and self.pitch_std is not None:
sample_pitch_mean = self.pitch_mean
sample_pitch_std = self.pitch_std
elif self.pitch_stats:
if "speaker_id" in sample and str(sample["speaker_id"]) in self.pitch_stats:
pitch_stats = self.pitch_stats[str(sample["speaker_id"])]
elif "default" in self.pitch_stats:
pitch_stats = self.pitch_stats["default"]
else:
raise ValueError(f"Could not find pitch stats for {sample}.")
sample_pitch_mean = pitch_stats["pitch_mean"]
sample_pitch_std = pitch_stats["pitch_std"]
else:
raise ValueError(f"Missing statistics for pitch normalization.")
pitch -= sample_pitch_mean
pitch[pitch == -sample_pitch_mean] = 0.0 # Zero out values that were previously zero
pitch /= sample_pitch_std
# Load energy if needed
energy, energy_length = None, None
if Energy in self.sup_data_types_set:
energy_path = self.energy_folder / f"{rel_audio_path_as_text_id}.pt"
if energy_path.exists():
energy = torch.load(energy_path).float()
else:
spec = self.get_spec(audio)
energy = torch.linalg.norm(spec.squeeze(0), axis=0).float()
torch.save(energy, energy_path)
energy_length = torch.tensor(len(energy)).long()
# Load speaker id if needed
speaker_id = None
if SpeakerID in self.sup_data_types_set:
speaker_id = torch.tensor(sample["speaker_id"]).long()
reference_audio, reference_audio_length = None, None
if ReferenceAudio in self.sup_data_types_set:
reference_index = self.sample_reference_index(sample["speaker_id"])
reference_audio = self.featurizer.process(
self.data[reference_index]["audio_filepath"],
trim=self.trim,
trim_ref=self.trim_ref,
trim_top_db=self.trim_top_db,
trim_frame_length=self.trim_frame_length,
trim_hop_length=self.trim_hop_length,
)
reference_audio_length = torch.tensor(reference_audio.shape[0]).long()
return (
audio,
audio_length,
text,
text_length,
log_mel,
log_mel_length,
durations,
align_prior_matrix,
pitch,
pitch_length,
energy,
energy_length,
speaker_id,
voiced_mask,
p_voiced,
audio_shifted,
reference_audio,
reference_audio_length,
)
def __len__(self):
return len(self.data)
def join_data(self, data_dict):
result = []
for data_type in MAIN_DATA_TYPES + self.sup_data_types:
result.append(data_dict[data_type.name])
if issubclass(data_type, TTSDataType) and issubclass(data_type, WithLens):
result.append(data_dict[f"{data_type.name}_lens"])
return tuple(result)
def general_collate_fn(self, batch):
(
_,
audio_lengths,
_,
tokens_lengths,
_,
log_mel_lengths,
durations_list,
align_prior_matrices_list,
pitches,
pitches_lengths,
energies,
energies_lengths,
_,
voiced_masks,
p_voiceds,
_,
_,
reference_audio_lengths,
) = zip(*batch)
max_audio_len = max(audio_lengths).item()
max_tokens_len = max(tokens_lengths).item()
max_log_mel_len = max(log_mel_lengths) if LogMel in self.sup_data_types_set else None
max_durations_len = max([len(i) for i in durations_list]) if Durations in self.sup_data_types_set else None
max_pitches_len = max(pitches_lengths).item() if Pitch in self.sup_data_types_set else None
max_energies_len = max(energies_lengths).item() if Energy in self.sup_data_types_set else None
max_reference_audio_len = (
max(reference_audio_lengths).item() if ReferenceAudio in self.sup_data_types_set else None
)
if LogMel in self.sup_data_types_set:
log_mel_pad = torch.finfo(batch[0][4].dtype).tiny
align_prior_matrices = (
torch.zeros(
len(align_prior_matrices_list),
max([prior_i.shape[0] for prior_i in align_prior_matrices_list]),
max([prior_i.shape[1] for prior_i in align_prior_matrices_list]),
)
if AlignPriorMatrix in self.sup_data_types_set
else []
)
(
audios,
tokens,
log_mels,
durations_list,
pitches,
energies,
speaker_ids,
voiced_masks,
p_voiceds,
audios_shifted,
reference_audios,
) = (
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
)
for i, sample_tuple in enumerate(batch):
(
audio,
audio_len,
token,
token_len,
log_mel,
log_mel_len,
durations,
align_prior_matrix,
pitch,
pitch_length,
energy,
energy_length,
speaker_id,
voiced_mask,
p_voiced,
audio_shifted,
reference_audio,
reference_audios_length,
) = sample_tuple
audio = general_padding(audio, audio_len.item(), max_audio_len)
audios.append(audio)
token = general_padding(token, token_len.item(), max_tokens_len, pad_value=self.text_tokenizer_pad_id)
tokens.append(token)
if audio_shifted is not None:
audio_shifted = general_padding(audio_shifted, audio_len.item(), max_audio_len)
audios_shifted.append(audio_shifted)
if LogMel in self.sup_data_types_set:
log_mels.append(general_padding(log_mel, log_mel_len, max_log_mel_len, pad_value=log_mel_pad))
if Durations in self.sup_data_types_set:
durations_list.append(general_padding(durations, len(durations), max_durations_len))
if AlignPriorMatrix in self.sup_data_types_set:
align_prior_matrices[
i, : align_prior_matrix.shape[0], : align_prior_matrix.shape[1]
] = align_prior_matrix
if Pitch in self.sup_data_types_set:
pitches.append(general_padding(pitch, pitch_length.item(), max_pitches_len))
if Voiced_mask in self.sup_data_types_set:
voiced_masks.append(general_padding(voiced_mask, pitch_length.item(), max_pitches_len))
if P_voiced in self.sup_data_types_set:
p_voiceds.append(general_padding(p_voiced, pitch_length.item(), max_pitches_len))
if Energy in self.sup_data_types_set:
energies.append(general_padding(energy, energy_length.item(), max_energies_len))
if SpeakerID in self.sup_data_types_set:
speaker_ids.append(speaker_id)
if ReferenceAudio in self.sup_data_types_set:
reference_audios.append(
general_padding(reference_audio, reference_audios_length.item(), max_reference_audio_len)
)
data_dict = {
"audio": torch.stack(audios),
"audio_lens": torch.stack(audio_lengths),
"text": torch.stack(tokens),
"text_lens": torch.stack(tokens_lengths),
"log_mel": torch.stack(log_mels) if LogMel in self.sup_data_types_set else None,
"log_mel_lens": torch.stack(log_mel_lengths) if LogMel in self.sup_data_types_set else None,
"durations": torch.stack(durations_list) if Durations in self.sup_data_types_set else None,
"align_prior_matrix": align_prior_matrices if AlignPriorMatrix in self.sup_data_types_set else None,
"pitch": torch.stack(pitches) if Pitch in self.sup_data_types_set else None,
"pitch_lens": torch.stack(pitches_lengths) if Pitch in self.sup_data_types_set else None,
"energy": torch.stack(energies) if Energy in self.sup_data_types_set else None,
"energy_lens": torch.stack(energies_lengths) if Energy in self.sup_data_types_set else None,
"speaker_id": torch.stack(speaker_ids) if SpeakerID in self.sup_data_types_set else None,
"voiced_mask": torch.stack(voiced_masks) if Voiced_mask in self.sup_data_types_set else None,
"p_voiced": torch.stack(p_voiceds) if P_voiced in self.sup_data_types_set else None,
"audio_shifted": torch.stack(audios_shifted) if audio_shifted is not None else None,
"reference_audio": torch.stack(reference_audios) if ReferenceAudio in self.sup_data_types_set else None,
"reference_audio_lens": torch.stack(reference_audio_lengths)
if ReferenceAudio in self.sup_data_types_set
else None,
}
return data_dict
def _collate_fn(self, batch):
data_dict = self.general_collate_fn(batch)
joined_data = self.join_data(data_dict)
return joined_data
class MixerTTSXDataset(TTSDataset):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def _albert(self):
from transformers import AlbertTokenizer # noqa pylint: disable=import-outside-toplevel
self.lm_model_tokenizer = AlbertTokenizer.from_pretrained('albert-base-v2')
self.lm_padding_value = self.lm_model_tokenizer._convert_token_to_id('<pad>')
space_value = self.lm_model_tokenizer._convert_token_to_id('▁')
self.id2lm_tokens = {}
for i, d in enumerate(self.data):
normalized_text = d["normalized_text"]
assert isinstance(self.text_tokenizer, EnglishPhonemesTokenizer) or isinstance(
self.text_tokenizer, EnglishCharsTokenizer
)
preprocess_text_as_tts_input = self.text_tokenizer.text_preprocessing_func(normalized_text)
lm_tokens_as_ids = self.lm_model_tokenizer.encode(preprocess_text_as_tts_input, add_special_tokens=False)
if self.text_tokenizer.pad_with_space:
lm_tokens_as_ids = [space_value] + lm_tokens_as_ids + [space_value]
self.id2lm_tokens[i] = lm_tokens_as_ids
def add_lm_tokens(self, **kwargs):
lm_model = kwargs.pop('lm_model')
if lm_model == "albert":
self._albert()
else:
raise NotImplementedError(
f"{lm_model} lm model is not supported. Only albert is supported at this moment."
)
def __getitem__(self, index):
(
audio,
audio_length,
text,
text_length,
log_mel,
log_mel_length,
durations,
align_prior_matrix,
pitch,
pitch_length,
energy,
energy_length,
speaker_id,
voiced_mask,
p_voiced,
_, # audio_shifted (only needed for SSLDisentangler)
) = super().__getitem__(index)
lm_tokens = None
if LMTokens in self.sup_data_types_set:
lm_tokens = torch.tensor(self.id2lm_tokens[index]).long()
# Note: Please change the indices in _collate_fn if any items are added/removed.
return (
audio,
audio_length,
text,
text_length,
log_mel,
log_mel_length,
durations,
align_prior_matrix,
pitch,
pitch_length,
energy,
energy_length,
speaker_id,
voiced_mask,
p_voiced,
lm_tokens,
)
def _collate_fn(self, batch):
batch = list(zip(*batch))
data_dict = self.general_collate_fn(list(zip(*batch[:15])))
lm_tokens_list = batch[15]
if LMTokens in self.sup_data_types_set:
lm_tokens = torch.full(
(len(lm_tokens_list), max([lm_tokens.shape[0] for lm_tokens in lm_tokens_list])),
fill_value=self.lm_padding_value,
)
for i, lm_tokens_i in enumerate(lm_tokens_list):
lm_tokens[i, : lm_tokens_i.shape[0]] = lm_tokens_i
data_dict[LMTokens.name] = lm_tokens
joined_data = self.join_data(data_dict)
return joined_data
class VocoderDataset(Dataset):
def __init__(
self,
manifest_filepath: Union[str, Path, List[str], List[Path]],
sample_rate: int,
n_segments: Optional[int] = None,
max_duration: Optional[float] = None,
min_duration: Optional[float] = None,
ignore_file: Optional[Union[str, Path]] = None,
trim: Optional[bool] = False,
load_precomputed_mel: bool = False,
hop_length: Optional[int] = None,
):
"""Dataset which can be used for training and fine-tuning vocoder with pre-computed mel-spectrograms.
Args:
manifest_filepath (Union[str, Path, List[str], List[Path]]): Path(s) to the .json manifests containing
information on the dataset. Each line in the .json file should be valid json. Note: the .json file itself
is not valid json. Each line should contain the following:
"audio_filepath": <PATH_TO_WAV>,
"duration": <Duration of audio clip in seconds> (Optional),
"mel_filepath": <PATH_TO_LOG_MEL> (Optional, can be in .npy (numpy.save) or .pt (torch.save) format)
sample_rate (int): The sample rate of the audio. Or the sample rate that we will resample all files to.
n_segments (int): The length of audio in samples to load. For example, given a sample rate of 16kHz, and
n_segments=16000, a random 1-second section of audio from the clip will be loaded. The section will
be randomly sampled everytime the audio is batched. Can be set to None to load the entire audio.
Must be specified if load_precomputed_mel is True.
max_duration (Optional[float]): Max duration of audio clips in seconds. All samples exceeding this will be
pruned prior to training. Note: Requires "duration" to be set in the manifest file. It does not load
audio to compute duration. Defaults to None which does not prune.
min_duration (Optional[float]): Min duration of audio clips in seconds. All samples lower than this will be
pruned prior to training. Note: Requires "duration" to be set in the manifest file. It does not load
audio to compute duration. Defaults to None which does not prune.
ignore_file (Optional[Union[str, Path]]): The location of a pickle-saved list of audio paths
that will be pruned prior to training. Defaults to None which does not prune.
trim (bool): Whether to apply librosa.effects.trim to the audio file. Defaults to False.
load_precomputed_mel (bool): Whether to load precomputed mel (useful for fine-tuning).
Note: Requires "mel_filepath" to be set in the manifest file.
hop_length (Optional[int]): The hope length between fft computations. Must be specified if load_precomputed_mel is True.
"""
super().__init__()
if load_precomputed_mel:
if hop_length is None:
raise ValueError("hop_length must be specified when load_precomputed_mel is True")
if n_segments is None:
raise ValueError("n_segments must be specified when load_precomputed_mel is True")
# Initialize and read manifest file(s), filter out data by duration and ignore_file
if isinstance(manifest_filepath, str):
manifest_filepath = [manifest_filepath]
self.manifest_filepath = manifest_filepath
data = []
total_duration = 0
for manifest_file in self.manifest_filepath:
with open(Path(manifest_file).expanduser(), 'r') as f:
logging.info(f"Loading dataset from {manifest_file}.")
for line in tqdm(f):
item = json.loads(line)
if "mel_filepath" not in item and load_precomputed_mel:
raise ValueError(f"mel_filepath is missing in {manifest_file}")
file_info = {
"audio_filepath": item["audio_filepath"],
"mel_filepath": item["mel_filepath"] if "mel_filepath" in item else None,
"duration": item["duration"] if "duration" in item else None,
}
data.append(file_info)
if file_info["duration"] is None:
logging.info(
"Not all audio files have duration information. Duration logging will be disabled."
)
total_duration = None
if total_duration is not None:
total_duration += item["duration"]
logging.info(f"Loaded dataset with {len(data)} files.")
if total_duration is not None:
logging.info(f"Dataset contains {total_duration / 3600:.2f} hours.")
self.data = TTSDataset.filter_files(data, ignore_file, min_duration, max_duration, total_duration)
self.base_data_dir = get_base_dir([item["audio_filepath"] for item in self.data])
# Initialize audio and mel related parameters
self.load_precomputed_mel = load_precomputed_mel
self.featurizer = WaveformFeaturizer(sample_rate=sample_rate)
self.sample_rate = sample_rate
self.n_segments = n_segments
self.hop_length = hop_length
self.trim = trim
def _collate_fn(self, batch):
if self.load_precomputed_mel:
return torch.utils.data.dataloader.default_collate(batch)
audio_lengths = [audio_len for _, audio_len in batch]
audio_signal = torch.zeros(len(batch), max(audio_lengths), dtype=torch.float)
for i, sample in enumerate(batch):
audio_signal[i].narrow(0, 0, sample[0].size(0)).copy_(sample[0])
return audio_signal, torch.tensor(audio_lengths, dtype=torch.long)
def __getitem__(self, index):
sample = self.data[index]
if not self.load_precomputed_mel:
features = AudioSegment.segment_from_file(
sample["audio_filepath"],
target_sr=self.sample_rate,
n_segments=self.n_segments if self.n_segments is not None else -1,
trim=self.trim,
)
features = torch.tensor(features.samples)
audio, audio_length = features, torch.tensor(features.shape[0]).long()
return audio, audio_length
else:
features = self.featurizer.process(sample["audio_filepath"], trim=self.trim)
audio, audio_length = features, torch.tensor(features.shape[0]).long()
if Path(sample["mel_filepath"]).suffix == ".npy":
mel = torch.from_numpy(np.load(sample["mel_filepath"]))
else:
mel = torch.load(sample["mel_filepath"])
frames = math.ceil(self.n_segments / self.hop_length)
if len(audio) >= self.n_segments:
start = random.randint(0, mel.shape[1] - frames - 1)
mel = mel[:, start : start + frames]
audio = audio[start * self.hop_length : (start + frames) * self.hop_length]
else:
mel = torch.nn.functional.pad(mel, (0, frames - mel.shape[1]))
audio = torch.nn.functional.pad(audio, (0, self.n_segments - len(audio)))
return audio, len(audio), mel
def __len__(self):
return len(self.data)
class PairedRealFakeSpectrogramsDataset(Dataset):
def __init__(
self, manifest_filepath: Union[str, Path],
):
manifest_filepath = Path(manifest_filepath)
with Path(manifest_filepath).open() as f:
logging.info(f"Loading paired spectrogram dataset from {manifest_filepath}")
self.manifest = []
for line in f:
entry = json.loads(line.strip())
assert "mel_filepath" in entry
assert "mel_gt_filepath" in entry
self.manifest.append(entry)
logging.info(f"Manifest describes {len(self)} spectrogram pairs")
def __len__(self):
return len(self.manifest)
def __getitem__(self, index):
entry = self.manifest[index]
pred_spec = np.load(entry["mel_filepath"])
true_spec = np.load(entry["mel_gt_filepath"])
return torch.from_numpy(pred_spec.T), torch.from_numpy(true_spec.T)
def _collate_fn(self, batch):
pred_specs, true_specs = zip(*batch)
lengths = [spec.shape[0] for spec in true_specs]
pred_specs = torch.nn.utils.rnn.pad_sequence(pred_specs, batch_first=True)
true_specs = torch.nn.utils.rnn.pad_sequence(true_specs, batch_first=True)
lengths = torch.LongTensor(lengths)
return rearrange(pred_specs, "b l c -> b c l"), rearrange(true_specs, "b l c -> b c l"), lengths
class FastPitchSSLDataset(Dataset):
def __init__(
self,
manifest_filepath: Union[str, Path, List[str], List[Path]],
sample_rate: int,
ssl_content_emb_type: str,
pad_multiple: Optional[int] = 1024,
max_duration: Optional[float] = None,
min_duration: Optional[float] = None,
ignore_file: Optional[Union[str, Path]] = None,
trim: Optional[bool] = False,
pitch_conditioning: Optional[bool] = False,
pitch_mean: Optional[float] = None,
pitch_std: Optional[float] = None,
pitch_normalization: Optional[str] = None,
sup_data_dir: Optional[Union[str, Path]] = None,
speaker_stats_pitch_fp: Optional[Union[str, Path]] = None,
speaker_conditioning_type: Optional[str] = "per_sample", # per_sample, mean, interpolate,
):
"""Dataset used for training FastPitchModel_SSL model.
Requires supplementary data created using scripts/ssl_tts/make_supdata.py
Args:
manifest_filepath (Union[str, Path, List[str], List[Path]]): Path(s) to the .json manifests containing
information on the dataset. Each line in the .json file should be valid json. Note: the .json file itself
is not valid json. Each line should contain the following:
"audio_filepath": <PATH_TO_WAV>,
"speaker" : <SPEAKER NUM>
"duration": <Duration of audio clip in seconds> (Optional)
sample_rate (int): The sample rate of the audio. Or the sample rate that we will resample all files to.
ssl_content_emb_type (str): One of ["probs", "embedding", "log_probs", "embedding_and_probs"].
Indicated which output to use as content embedding.
max_duration (Optional[float]): Max duration of audio clips in seconds. All samples exceeding this will be
pruned prior to training. Note: Requires "duration" to be set in the manifest file. It does not load
audio to compute duration. Defaults to None which does not prune.
min_duration (Optional[float]): Min duration of audio clips in seconds. All samples lower than this will be
pruned prior to training. Note: Requires "duration" to be set in the manifest file. It does not load
audio to compute duration. Defaults to None which does not prune.
ignore_file (Optional[Union[str, Path]]): The location of a pickle-saved list of audio paths
that will be pruned prior to training. Defaults to None which does not prune.
trim (bool): Whether to apply `librosa.effects.trim` to trim leading and trailing silence from an audio
signal. Defaults to False.
pitch_conditioning (bool): Whether to load pitch contour or not
pitch_mean (Optional[float]): If using global normalization, normalize using these statistics.
Also used if speaker stats are not available for the given speaker
pitch_std (Optional[float]): If using global normalization, normalize using these statistics.
Also used if speaker stats are not available for the given speaker
pitch_normalization (str): Can be one of ['speaker_wise', 'global', 'none']. Indicates the kind of pitch normalization.
sup_data_dir (Optional[Union[str, Path]]): Data directory containing pre-computed embeddings/statistics. If set as
speaker_stats_pitch_fp (Optional[Union[str, Path]]): Path to the json containing speaker pitch stats.
If set as None, tries to lookup for a default filename (speaker_pitch_stats.json) in sup_data_dir.
Needed if we use pitch_normalization is "speaker_wise"
speaker_conditioning_type (Optional[str]): Can be one of ["per_sample", "mean", "interpolate"]. Defaults to "per_sample"
per_sample: Speaker embedding computed from the same utterance
mean: Speaker embedding for all utterances of a given speaker is the same and equal to the mean speaker embedding.
interpolate: Interpolate b/w per_sample and mean speaker embedding.
"""
assert ssl_content_emb_type in ["probs", "embedding", "log_probs", "embedding_and_probs"]
if isinstance(manifest_filepath, str):
manifest_filepath = [manifest_filepath]
self.manifest_filepath = manifest_filepath
data = []
total_duration = 0
# TODO: Reuse code for reading manifests across all tts datasets
for manifest_file in self.manifest_filepath:
with open(Path(manifest_file).expanduser(), 'r') as f:
logging.info(f"Loading dataset from {manifest_file}.")
for line in tqdm(f):
item = json.loads(line)
if "speaker" not in item:
item["speaker"] = 0
file_info = {
"audio_filepath": item["audio_filepath"],
"duration": item["duration"] if "duration" in item else None,
"speaker": item["speaker"] if "speaker" in item else 0,
"dataset_id": item["dataset_id"] if "dataset_id" in item else 0,
}
data.append(file_info)
if file_info["duration"] is None:
logging.info(
"Not all audio files have duration information. Duration logging will be disabled."
)
total_duration = None
if total_duration is not None:
total_duration += item["duration"]
logging.info(f"Loaded dataset with {len(data)} files.")
if total_duration is not None:
logging.info(f"Dataset contains {total_duration / 3600:.2f} hours.")
self.data = TTSDataset.filter_files(data, ignore_file, min_duration, max_duration, total_duration)
self.base_data_dir = get_base_dir([item["audio_filepath"] for item in self.data])
self.featurizer = WaveformFeaturizer(sample_rate=sample_rate)
self.sample_rate = sample_rate
self.trim = trim
self.pad_multiple = pad_multiple
self.pitch_normalization = pitch_normalization
self.pitch_mean = pitch_mean
self.pitch_std = pitch_std
self.pitch_conditioning = pitch_conditioning
self.speaker_conditioning_type = speaker_conditioning_type
self.ssl_content_emb_type = ssl_content_emb_type
if sup_data_dir is None:
sup_data_dir = os.path.join(self.base_data_dir, "sup_data")
self.sup_data_dir = sup_data_dir
if self.pitch_normalization == "speaker_wise":
self.speaker_stats = {}
if speaker_stats_pitch_fp is None:
speaker_stats_pitch_fp = os.path.join(sup_data_dir, "speaker_pitch_stats.json")
assert os.path.exists(
speaker_stats_pitch_fp
), "speaker_stats_pitch_fp {} does not exist. Make sure to run scripts/ssl_tts/make_supdata.py before training.".format(
speaker_stats_pitch_fp
)
with open(speaker_stats_pitch_fp, "r") as f:
speaker_stats_raw = json.load(f)
for key in speaker_stats_raw:
self.speaker_stats[int(key)] = speaker_stats_raw[key]
def _get_wav_from_filepath(self, audio_filepath):
features = AudioSegment.segment_from_file(
audio_filepath, target_sr=self.sample_rate, n_segments=-1, trim=self.trim,
)
audio_samples = features.samples
audio, audio_length = torch.tensor(audio_samples), torch.tensor(audio_samples.shape[0]).long()
# pad audio to a multiple of self.pad_multiple
if audio.shape[0] % self.pad_multiple != 0:
audio = torch.cat(
[audio, torch.zeros(self.pad_multiple - audio.shape[0] % self.pad_multiple, dtype=torch.float)]
)
audio_length = torch.tensor(audio.shape[0]).long()
return audio, audio_length
def get_ssl_features(self, wav_text_id):
content_emb_fn = f"{self.ssl_content_emb_type}_content_embedding_{wav_text_id}.pt"
speaker_emb_fn = f"speaker_embedding_{wav_text_id}.pt"
duration_fn = f"duration_embedding_{wav_text_id}.pt" # embedding just for namesake
content_emb_fp = os.path.join(self.sup_data_dir, content_emb_fn)
speaker_emb_fp = os.path.join(self.sup_data_dir, speaker_emb_fn)
duration_fp = os.path.join(self.sup_data_dir, duration_fn)
if os.path.exists(content_emb_fp):
content_embedding = torch.load(content_emb_fp)
else:
raise ValueError(
f"Content embedding file {content_emb_fp} does not exist. Make sure to run scripts/ssl_tts/make_supdata.py before training."
)
if os.path.exists(speaker_emb_fp):
speaker_embedding = torch.load(speaker_emb_fp)
else:
raise ValueError(
f"Speaker embedding file {speaker_emb_fp} does not exist. Make sure to run scripts/ssl_tts/make_supdata.py before training."
)
if os.path.exists(duration_fp):
duration = torch.load(duration_fp)
else:
raise ValueError(
f"Duration file {duration_fp} does not exist. Make sure to run scripts/ssl_tts/make_supdata.py before training."
)
encoded_len = torch.tensor(content_embedding.shape[1]).long()
return content_embedding, speaker_embedding, encoded_len, duration
def get_pitch_contour(self, wav_text_id):
pitch_contour_fn = f"pitch_contour_{wav_text_id}.pt"
pitch_contour_fp = os.path.join(self.sup_data_dir, pitch_contour_fn)
if os.path.exists(pitch_contour_fp):
return torch.load(pitch_contour_fp)
else:
raise ValueError(
f"Pitch contour file {pitch_contour_fp} does not exist. Make sure to run scripts/ssl_tts/make_supdata.py before training."
)
def get_mel_spectrogram(self, wav_text_id):
mel_spec_fn = f"mel_spec_{wav_text_id}.pt"
mel_spec_fp = os.path.join(self.sup_data_dir, mel_spec_fn)
if os.path.exists(mel_spec_fp):
return torch.load(mel_spec_fp)
else:
raise ValueError(
f"Mel spectrogram file {mel_spec_fp} does not exist. Make sure to run scripts/ssl_tts/make_supdata.py before training."
)
def pad_collate_fn(self, batch):
"""
Collate function for FastPitchModel_SSL.
Pads the tensors in the batch with zeros to match length of the longest sequence in the batch.
Used in fastpitch_ssl.py
"""
final_batch = defaultdict(list)
for row in batch:
for key in row:
final_batch[key].append(row[key])
max_audio_len = max([_audio_len.item() for _audio_len in final_batch["audio_len"]])
max_mel_len = max([_mel_len.item() for _mel_len in final_batch["mel_len"]])
max_encoded_len = max([_encoded_len.item() for _encoded_len in final_batch["encoded_len"]])
audios_padded = []
for audio in final_batch["audio"]:
audio_padded = torch.nn.functional.pad(audio, (0, max_audio_len - audio.size(0)), value=0)
audios_padded.append(audio_padded)
mels_padded = []
for mel in final_batch["mel_spectrogram"]:
mel_padded = torch.nn.functional.pad(mel, (0, max_mel_len - mel.size(1)), value=0)
mels_padded.append(mel_padded)
pitch_contours_padded = []
for pitch_contour in final_batch["pitch_contour"]:
pitch_contour_padded = torch.nn.functional.pad(
pitch_contour, (0, max_mel_len - pitch_contour.size(0)), value=0
)
pitch_contours_padded.append(pitch_contour_padded)
content_embeddings_padded = []
for encoded in final_batch["content_embedding"]:
encoded_padded = torch.nn.functional.pad(encoded, (0, max_encoded_len - encoded.size(1)), value=0)
content_embeddings_padded.append(encoded_padded)
durations_padded = []
for duration in final_batch["duration"]:
duration_padded = torch.nn.functional.pad(duration, (0, max_encoded_len - duration.size(0)), value=0.0)
durations_padded.append(duration_padded)
final_batch["audio"] = audios_padded
final_batch["mel_spectrogram"] = mels_padded
final_batch["pitch_contour"] = pitch_contours_padded
final_batch["content_embedding"] = content_embeddings_padded
final_batch["duration"] = durations_padded
for key in final_batch:
final_batch[key] = torch.stack(final_batch[key])
return final_batch
def __getitem__(self, index):
sample = self.data[index]
rel_audio_path = Path(sample["audio_filepath"]).relative_to(self.base_data_dir).with_suffix("")
rel_audio_path_as_text_id = str(rel_audio_path).replace("/", "_")
speaker = torch.tensor(sample["speaker"]).long()
dataset_id = torch.tensor(sample["dataset_id"]).long()
audio, audio_length = self._get_wav_from_filepath(sample["audio_filepath"])
pitch_contour = None
if self.pitch_conditioning:
pitch_contour = self.get_pitch_contour(rel_audio_path_as_text_id)
content_embedding, speaker_embedding, encoded_len, duration = self.get_ssl_features(rel_audio_path_as_text_id)
if self.speaker_conditioning_type == "mean":
assert sample["speaker"] in self.mean_speaker_embeddings, "{} not in speaker emb".format(sample['speaker'])
speaker_embedding = self.mean_speaker_embeddings[sample["speaker"]]
elif self.speaker_conditioning_type == "interpolate":
assert sample["speaker"] in self.mean_speaker_embeddings, "{} not in speaker emb".format(sample['speaker'])
e1 = self.mean_speaker_embeddings[sample["speaker"]]
e2 = speaker_embedding
interpolate_factor = np.random.uniform(0, 1)
speaker_embedding = e1 * (1 - interpolate_factor) + e2 * interpolate_factor
l2_norm = torch.norm(speaker_embedding, p=2)
speaker_embedding = speaker_embedding / l2_norm
mel_spectrogram = None
mel_len = None
mel_spectrogram = self.get_mel_spectrogram(rel_audio_path_as_text_id)
mel_len = torch.tensor(mel_spectrogram.shape[1]).long()
if pitch_contour is not None:
if self.pitch_normalization in ["speaker_wise", "global"]:
mean, std = self.pitch_mean, self.pitch_std
if self.pitch_normalization == "speaker_wise":
mean = self.speaker_stats[sample["speaker"]]["pitch_mean"]
std = self.speaker_stats[sample["speaker"]]["pitch_std"]
if np.isnan(mean) or np.isnan(std) or mean == 0 or std == 0:
logging.warning("NaN found in pitch mean/std for speaker {}".format(sample["speaker"]))
mean = self.pitch_mean
std = self.pitch_std
elif self.pitch_normalization == "global":
mean = self.pitch_mean
std = self.pitch_std
pitch_contour = pitch_contour - mean
pitch_contour[pitch_contour == -mean] = 0.0
pitch_contour = pitch_contour / std
if pitch_contour.dtype != torch.float32:
logging.warning("invalid pitch contour for {}".format(sample["audio_filepath"]))
logging.warning("Setting pitch contour to 0")
pitch_contour = torch.zeros(mel_spectrogram.shape[1])
item = {
'audio': audio,
'audio_len': audio_length,
'content_embedding': content_embedding,
'speaker_embedding': speaker_embedding,
'encoded_len': encoded_len,
'pitch_contour': pitch_contour,
'speaker': speaker,
'mel_spectrogram': mel_spectrogram,
'mel_len': mel_len,
'dataset_id': dataset_id,
'duration': duration,
}
return item
def __len__(self):
return len(self.data)
class DistributedBucketSampler(torch.utils.data.distributed.DistributedSampler):
"""
Maintain similar input lengths in a batch.
Length groups are specified by boundaries.
Ex) boundaries = [b1, b2, b3] -> any batch is included either {x | b1 < length(x) <=b2} or {x | b2 < length(x) <= b3}.
It removes samples which are not included in the boundaries.
Ex) boundaries = [b1, b2, b3] -> any x s.t. length(x) <= b1 or length(x) > b3 are discarded.
"""
def __init__(self, dataset, batch_size, boundaries, num_replicas=None, rank=None, shuffle=True):
super().__init__(dataset, num_replicas=num_replicas, rank=rank, shuffle=shuffle)
self.lengths = dataset.lengths
self.batch_size = batch_size
self.boundaries = boundaries
self.buckets, self.num_samples_per_bucket = self._create_buckets()
self.total_size = sum(self.num_samples_per_bucket)
self.num_samples = self.total_size // self.num_replicas
def _create_buckets(self):
buckets = [[] for _ in range(len(self.boundaries) - 1)]
for i in range(len(self.lengths)):
length = self.lengths[i]
idx_bucket = self._bisect(length)
if idx_bucket != -1:
buckets[idx_bucket].append(i)
for i in range(len(buckets) - 1, 0, -1):
if len(buckets[i]) == 0:
buckets.pop(i)
self.boundaries.pop(i + 1)
num_samples_per_bucket = []
total_batch_size = self.num_replicas * self.batch_size
for i in range(len(buckets)):
len_bucket = len(buckets[i])
rem = (total_batch_size - (len_bucket % total_batch_size)) % total_batch_size
num_samples_per_bucket.append(len_bucket + rem)
return buckets, num_samples_per_bucket
def __iter__(self):
# deterministically shuffle based on epoch
g = torch.Generator()
g.manual_seed(self.epoch)
indices = []
if self.shuffle:
for bucket in self.buckets:
indices.append(torch.randperm(len(bucket), generator=g).tolist())
else:
for bucket in self.buckets:
indices.append(list(range(len(bucket))))
batches = []
for i in range(len(self.buckets)):
bucket = self.buckets[i]
len_bucket = len(bucket)
ids_bucket = indices[i]
num_samples_bucket = self.num_samples_per_bucket[i]
# add extra samples to make it evenly divisible
rem = num_samples_bucket - len_bucket
ids_bucket = ids_bucket + ids_bucket * (rem // len_bucket) + ids_bucket[: (rem % len_bucket)]
# subsample
ids_bucket = ids_bucket[self.rank :: self.num_replicas]
# batching
for j in range(len(ids_bucket) // self.batch_size):
batch = [bucket[idx] for idx in ids_bucket[j * self.batch_size : (j + 1) * self.batch_size]]
batches.append(batch)
if self.shuffle:
batch_ids = torch.randperm(len(batches), generator=g).tolist()
batches = [batches[i] for i in batch_ids]
self.batches = batches
assert len(self.batches) * self.batch_size == self.num_samples
return iter(self.batches)
def _bisect(self, x, lo=0, hi=None):
if hi is None:
hi = len(self.boundaries) - 1
if hi > lo:
mid = (hi + lo) // 2
if self.boundaries[mid] < x and x <= self.boundaries[mid + 1]:
return mid
elif x <= self.boundaries[mid]:
return self._bisect(x, lo, mid)
else:
return self._bisect(x, mid + 1, hi)
else:
return -1
def __len__(self):
return self.num_samples // self.batch_size
def set_epoch(self, epoch: int) -> None:
"""
Sets the epoch for this sampler. When :attr:`shuffle=True`, this ensures all replicas
use a different random ordering for each epoch. Otherwise, the next iteration of this
sampler will yield the same ordering.
Args:
epoch (int): Epoch number.
"""
self.epoch = epoch
| NeMo-main | nemo/collections/tts/data/dataset.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo.collections.nlp import data, losses, models, modules
from nemo.package_info import __version__
# Set collection version equal to NeMo version.
__version = __version__
# Authorship.
__author__ = "NVIDIA Corporation"
# Set collection name.
__description__ = "Natural Language Processing collection"
| NeMo-main | nemo/collections/nlp/__init__.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from collections import Counter
import numpy as np
from sacrebleu import corpus_bleu
class DialogueGenerationMetrics(object):
@staticmethod
def save_predictions(
filename, generated_field, ground_truth_field, inputs,
):
"""
Save predictions as a jsonl file
Args:
Each arg is a list of strings (all args have the same length)
"""
docs = []
for i in range(len(inputs)):
docs.append(
{"input": inputs[i], "ground_truth": ground_truth_field[i], "generated": generated_field[i],}
)
with open(filename, 'w', encoding="UTF-8") as f:
for item in docs:
f.write(json.dumps(item) + "\n")
@staticmethod
def _get_one_f1(generated_field, ground_truth_field):
"""
Get precision, recall, f1 based on token overlap between generated and ground_truth sequence
"""
generated_tokens = generated_field.split()
ground_truth_tokens = ground_truth_field.split()
common = Counter(generated_tokens) & Counter(ground_truth_tokens)
num_same = sum(common.values())
if num_same == 0:
return 0, 0, 0
precision = 1.0 * num_same / len(generated_tokens)
recall = 1.0 * num_same / len(ground_truth_tokens)
f1 = (2 * precision * recall) / (precision + recall)
return np.array([precision * 100, recall * 100, f1 * 100])
@staticmethod
def get_f1(generated_fields, ground_truth_fields):
total_p_r_f1 = np.array(
[
DialogueGenerationMetrics._get_one_f1(generated_fields[i], ground_truth_fields[i])
for i in range(len(ground_truth_fields))
]
)
avg_p_r_f1 = np.mean(total_p_r_f1, axis=0)
return avg_p_r_f1
@staticmethod
def get_bleu(generated_field, ground_truth_field):
"""
Referenced from NMT evaluation
Note 13a is the default tokenizer for English for WMT
Known issue that it doesn't hand edge case of None or ''
https://github.com/mjpost/sacrebleu/issues/161
"""
valid_indices = [i for i in range(len(generated_field)) if generated_field[i] and ground_truth_field[i]]
generated_field = [generated_field[i] for i in valid_indices]
ground_truth_field = [ground_truth_field[i] for i in valid_indices]
sacre_bleu = corpus_bleu(generated_field, [ground_truth_field], tokenize="13a")
return sacre_bleu.score
class DialogueClassificationMetrics(object):
@staticmethod
def save_predictions(
filename,
generated_labels,
generated_slots,
ground_truth_labels,
ground_truth_slots,
generated_field,
ground_truth_field,
inputs,
):
"""
Save predictions as a jsonl file
Args:
Each arg is a list of strings (all args have the same length)
"""
docs = []
for i in range(len(inputs)):
docs.append(
{
"input": inputs[i],
"ground_truth": ground_truth_field[i],
"ground_truth_slots": ground_truth_slots[i],
"ground_truth_labels": ground_truth_labels[i],
"generated": generated_field[i],
"generated_slots": generated_slots[i],
"generated_labels": generated_labels[i],
}
)
with open(filename, 'w', encoding="UTF-8") as f:
for item in docs:
f.write(json.dumps(item) + "\n")
@staticmethod
def split_label_and_slots(fields, with_slots=False):
"""
Split target into label and slots when doing joint label (i.e. intent) classificaiton and slot filling
For instance, split "reserve_restaurant\nslots: time_of_day(7pm), number_of_people(3)" into
label = "reserve_restaurant" and slots = ["time_of_day(7pm)", "number_of_people(3)"]
Args:
fields: list of strings
"""
labels = []
slots_list = []
for field in fields:
if with_slots:
combo = [i.strip() for i in field.split('slots:', 1)]
label = 'none'
if len(combo) == 2:
label, slots = combo
elif len(combo) == 1:
slots = combo[0]
label = 'none'
if isinstance(slots, str):
# temporary patch for purnendu model output
if 'possible intents:' in slots:
slots = slots.split('possible intents:')[0]
slots = slots.split(', ')
else:
slots = ['None']
else:
label = field
slots = []
slots_list.append(slots)
labels.append(label)
return labels, slots_list
@staticmethod
def get_slot_filling_metrics(generated_slots, ground_truth_slots):
"""
Args:
generated_slots: list of list of strings.
Each string is slot-name and slot-value pair e.g. location(Seattle)
ground_truth_slots: list of list of strings
"""
all_recall = []
all_precision = []
all_joint_goal_accuracy = []
for i in range(len(generated_slots)):
# depulicate and sort
ground_truth = sorted(list(set(ground_truth_slots[i])))
predicted = sorted(list(set(generated_slots[i])))
correct = [item for item in predicted if item in ground_truth]
recall = len(correct) / len(ground_truth) if len(ground_truth) > 0 else 0
precision = len(correct) / len(predicted) if len(predicted) > 0 else 0
joint_goal_accuracy = int(ground_truth == predicted)
all_recall.append(recall)
all_precision.append(precision)
all_joint_goal_accuracy.append(joint_goal_accuracy)
avg_joint_goal_accuracy = np.mean(all_joint_goal_accuracy) * 100
avg_precision = np.mean(all_precision) * 100
avg_recall = np.mean(all_recall) * 100
avg_f1 = 2 * (avg_recall * avg_precision) / (avg_recall + avg_precision + 1e-20)
return avg_precision, avg_recall, avg_f1, avg_joint_goal_accuracy
| NeMo-main | nemo/collections/nlp/metrics/dialogue_metrics.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from torchmetrics import Metric
__all__ = ['SequencePerplexity']
class SequencePerplexity(Metric):
"""
This class computes mean perplexity across the batches of sequences.
You have to provide ``log_probs`` (float tensor of shape [batch_size x seq_length x vocab_size]) and
``labels`` (int tensor of shape [batch_size x seq_length] with values from the range [0, vocab_size-1])
to the :meth:`update` method. If some of the sequences are shorter than seq_length, you can also provide
an optional argument ``mask`` (bool tensor of shape [batch_size x seq_length]) which masks out tokens
not participating in perplexity computation.
See :doc:`PyTorch Lightning Metrics<pytorch-lightning:metrics>` for the metric usage instructions.
Args:
dist_sync_on_step:
Synchronize metric state across processes at each ``forward()`` before returning the value at the step.
process_group:
Specify the process group on which synchronization is called. default: ``None`` (which selects the entire
world)
dist_sync_fn:
Callback that performs the allgather operation on the metric state. When ``None``, DDP will be used
to perform the allgather.
"""
def __init__(self, dist_sync_on_step=False, process_group=None, dist_sync_fn=None):
super().__init__(
dist_sync_on_step=dist_sync_on_step, process_group=process_group, dist_sync_fn=dist_sync_fn,
)
# Total sum of exponentiated average negative log likelihoods
self.add_state('perplexities_sum', default=torch.tensor(0.0, dtype=torch.float64), dist_reduce_fx='sum')
# Total number of sequences in all batches
self.add_state('num_sequences', default=torch.tensor(0, dtype=torch.int64), dist_reduce_fx='sum')
def update(self, log_probs: torch.Tensor, labels: torch.Tensor, mask=None):
if mask is None:
mask = torch.ones_like(labels)
if mask.dtype is not log_probs.dtype:
mask = mask.to(log_probs.dtype)
target_log_probs = log_probs.gather(2, labels.unsqueeze(2)).squeeze(2)
avg_neg_ll = -(target_log_probs * mask).sum(dim=-1) / mask.sum(dim=-1)
ppl = avg_neg_ll.exp()
self.num_sequences += ppl.numel()
self.perplexities_sum += ppl.sum()
def compute(self):
"""
Returns perplexity across all workers and resets to 0 :attr:`perplexities_sum` and :attr:`num_sequences`.
"""
if self.num_sequences.eq(0):
return None
return self.perplexities_sum / self.num_sequences
| NeMo-main | nemo/collections/nlp/metrics/sequence_perplexity.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo.collections.nlp.metrics.classification_report import ClassificationReport, MultiLabelClassificationReport
from nemo.collections.nlp.metrics.dialogue_metrics import DialogueClassificationMetrics
from nemo.collections.nlp.metrics.qa_metrics import QAMetrics
from nemo.collections.nlp.metrics.sequence_perplexity import SequencePerplexity
| NeMo-main | nemo/collections/nlp/metrics/__init__.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import json
import re
import string
import torch
from tqdm import tqdm
from nemo.collections.nlp.parts.utils_funcs import tensor2list
from nemo.utils import logging
class QAMetrics(object):
@staticmethod
def remove_articles(text):
return re.sub(r"\b(a|an|the)\b", " ", text)
@staticmethod
def white_space_fix(text):
return " ".join(text.split())
@staticmethod
def remove_punc(text):
exclude = set(string.punctuation)
return "".join(ch for ch in text if ch not in exclude)
@staticmethod
def normalize_answer(s: str):
""" Lower text and remove punctuation, articles and extra whitespace """
return QAMetrics.white_space_fix(QAMetrics.remove_articles(QAMetrics.remove_punc(s.lower())))
@staticmethod
def _get_normalized_tokens(s: str):
""" Get normalized tokens """
if not s:
return []
return QAMetrics.normalize_answer(s).split()
@staticmethod
def get_one_f1(prediction: str, ground_truth: str):
""" Computes f1 score between prediction and ground truth """
prediction_tokens = QAMetrics._get_normalized_tokens(prediction)
ground_truth_tokens = QAMetrics._get_normalized_tokens(ground_truth)
common = collections.Counter(prediction_tokens) & collections.Counter(ground_truth_tokens)
num_same = sum(common.values())
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
if len(ground_truth_tokens) == 0 or len(prediction_tokens) == 0:
return int(ground_truth_tokens == prediction_tokens)
if num_same == 0:
return 0
precision = 1.0 * num_same / len(prediction_tokens)
recall = 1.0 * num_same / len(ground_truth_tokens)
f1 = (2 * precision * recall) / (precision + recall)
return f1
@staticmethod
def get_one_exact_match(prediction: str, ground_truth: str):
""" Computes exact match between prediction and ground truth """
return int(QAMetrics.normalize_answer(prediction) == QAMetrics.normalize_answer(ground_truth))
@staticmethod
def convert_dict_outputs_to_lists(outputs, keys):
output_lists = [[] for _ in range(len(keys))]
for output in outputs:
for i, key in enumerate(keys):
if isinstance(output[key], torch.Tensor):
output_lists[i].extend(tensor2list(output[key]))
else:
output_lists[i].extend(output[key])
return output_lists
@staticmethod
def get_exact_match_and_f1(examples, preds, question_id_filter=[]):
"""
Returns a dictionary of question id: exact match/f1 score
Questions with ids *not* present in `question_id_filter` are excluded
"""
exact_scores = {}
f1_scores = {}
for example in examples:
question_id = example.qas_id
if question_id not in question_id_filter:
continue
gold_answers = [answer["text"] for answer in example.answers if QAMetrics.normalize_answer(answer["text"])]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
gold_answers = [""]
pred = preds[question_id]
exact_scores[question_id] = max(QAMetrics.get_one_exact_match(pred, a) for a in gold_answers)
f1_scores[question_id] = max(QAMetrics.get_one_f1(pred, a) for a in gold_answers)
return exact_scores, f1_scores
@staticmethod
def make_eval_dict(exact_scores, f1_scores, prefix=""):
""" Returns dictionary with formatted evaluation scores """
total = len(exact_scores)
return collections.OrderedDict(
[
(f"{prefix}exact", (100.0 * sum(exact_scores.values()) / total) if total != 0 else 0.0),
(f"{prefix}f1", (100.0 * sum(f1_scores.values()) / total) if total != 0 else 0.0),
(f"{prefix}total", float(total)),
]
)
@staticmethod
def merge_eval_dicts(eval_dicts):
"""
Combines multiple evaluation dict outputs into one dict
Ex: combines eval dicts for HasAns F1, NoAnsF1, and Total F1
"""
merged_dict = collections.OrderedDict()
for eval_dict in eval_dicts:
for key in eval_dict:
merged_dict[key] = eval_dict[key]
return merged_dict
@staticmethod
def evaluate_predictions(examples, all_predictions):
"""
Calculates exact match and f1 scores for all predictions,
questions with answers, and no answer questions
"""
qas_id_to_has_answer = {example.qas_id: bool(example.answers) for example in examples[: len(all_predictions)]}
has_answer_qids = [qas_id for qas_id, has_answer in qas_id_to_has_answer.items() if has_answer]
no_answer_qids = [qas_id for qas_id, has_answer in qas_id_to_has_answer.items() if not has_answer]
filters_and_prefixes = [
(list(qas_id_to_has_answer), ""),
(has_answer_qids, "HasAns_"),
(no_answer_qids, "NoAns_"),
]
eval_dicts = []
for qas_id_filter, prefix in filters_and_prefixes:
curr_exact, curr_f1 = QAMetrics.get_exact_match_and_f1(examples, all_predictions, qas_id_filter)
curr_eval_dict = QAMetrics.make_eval_dict(curr_exact, curr_f1, prefix=prefix)
eval_dicts.append(curr_eval_dict)
merged_eval_dict = QAMetrics.merge_eval_dicts(eval_dicts)
return merged_eval_dict
@staticmethod
def dump_predicted_answers_to_file(output_filename, examples, predictions):
logging.info(f"Writing predictions to {output_filename}")
with open(output_filename, "w") as writer:
for ex in tqdm(examples):
output_item = {
"id": ex.qas_id,
"context": ex.context_text,
"question": ex.question_text,
"predicted_answer": predictions[ex.qas_id],
}
writer.write(json.dumps(output_item) + "\n")
@staticmethod
def dump_nbest_predictions_to_file(output_filename, examples, nbest_predictions, keys_to_dump=[]):
logging.info(f"Writing nbest predictions to {output_filename}")
with open(output_filename, "w") as writer:
for ex in tqdm(examples):
output_item = {
"id": ex.qas_id,
"context": ex.context_text,
"question": ex.question_text,
"nbest_predictions": [],
}
for pred in nbest_predictions[ex.qas_id]:
output_item["nbest_predictions"].append({key: pred[key] for key in keys_to_dump})
writer.write(json.dumps(output_item) + "\n")
| NeMo-main | nemo/collections/nlp/metrics/qa_metrics.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABC, abstractmethod
import numpy as np
import torch
from rouge_score import rouge_scorer
from sacrebleu import BLEU
class ValMetric(ABC):
@abstractmethod
def get_score(self, ground_truth, predicted_text):
pass
class AccuracyScore(ValMetric):
def get_score(self, ground_truth, predicted_text):
corrects = 0
for (pred, label) in zip(predicted_text, ground_truth):
if pred == label:
corrects += 1
val_acc = corrects / len(ground_truth)
return {'accuracy': torch.tensor(val_acc)}
class BLEUScore(ValMetric):
def __init__(self):
self.scorer = BLEU()
def get_score(self, ground_truth, predicted_text):
return {
'bleu_score': torch.tensor(self.scorer.corpus_score(predicted_text, [[i] for i in ground_truth],).score)
}
class ROUGEScores(ValMetric):
def __init__(self):
self.rscorers = rouge_scorer.RougeScorer(['rouge1', 'rouge2', 'rouge3', 'rougeL'], use_stemmer=True)
def get_score(self, ground_truth, predicted_text):
all_rouges = []
for i in range(len(ground_truth)):
scores = self.rscorers.score(predicted_text[i], ground_truth[i])
all_rouges.append(
[
scores['rouge1'].fmeasure,
scores['rouge2'].fmeasure,
scores['rouge3'].fmeasure,
scores['rougeL'].fmeasure,
]
)
all_rouges = np.mean(np.array(all_rouges), axis=0).tolist()
return {
'rouge_1_score': torch.tensor(all_rouges[0]),
'rouge_2_score': torch.tensor(all_rouges[1]),
'rouge_3_score': torch.tensor(all_rouges[2]),
'rouge_L_score': torch.tensor(all_rouges[3]),
}
| NeMo-main | nemo/collections/nlp/metrics/prompt_learning_metrics.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Evaluation metrics for Schema-guided dialogue.
This library provides functions for calculating the evaluation metrics for a
single dialogue. The following metrics are defined:
(1) Active intent accuracy: The fraction of user turns for which the active
intent has been correctly predicted.
(2) Slot tagging F1: The macro-averaged F1 score for tagging slot values for
non-categorical slots. This metric is optional to report in the final paper
if participants decide not to use slot tagging.
(3) Requested slots F1: The macro-averaged F1 score for requested slots over the
turns. For a turn, if there are no requested slots in both the ground truth
and the prediction, that turn is skipped. The reported number is the average
F1 score for all un-skipped user turns. This metric is optional to report in
the final paper.
(4) Average goal accuracy: For each turn, participants must predict a single
value for each slot present in the dialogue state. The slots which have a
non-empty assignment in the ground truth dialogue state are only considered.
This is the average accuracy of predicting the value of a slot correctly. A
fuzzy matching based score is used for non-categorical slots.
(5) Joint goal accuracy: This is the average accuracy of predicting all slot
assignments for a turn correctly. A fuzzy matching based score is used for
non-categorical slots. This is the primary evaluation metric used for ranking
submissions. More details to follow with the evaluation script.
This file contains code artifacts adapted from the original implementation:
https://github.com/google-research/google-research/blob/master/schema_guided_dst/metrics.py
"""
import collections
import numpy as np
from rapidfuzz import fuzz
F1Scores = collections.namedtuple("F1Scores", ["f1", "precision", "recall"])
# Evaluation and other relevant metrics for DSTC8/SGD Schema-guided DST.
# (1) Active intent accuracy.
ACTIVE_INTENT_ACCURACY = "active_intent_accuracy"
# (2) Slot tagging F1.
SLOT_TAGGING_F1 = "slot_tagging_f1"
SLOT_TAGGING_PRECISION = "slot_tagging_precision"
SLOT_TAGGING_RECALL = "slot_tagging_recall"
# (3) Requested slots F1.
REQUESTED_SLOTS_F1 = "requested_slots_f1"
REQUESTED_SLOTS_PRECISION = "requested_slots_precision"
REQUESTED_SLOTS_RECALL = "requested_slots_recall"
# (4) Average goal accuracy.
AVERAGE_GOAL_ACCURACY = "average_goal_accuracy"
AVERAGE_CAT_ACCURACY = "average_cat_accuracy"
AVERAGE_NONCAT_ACCURACY = "average_noncat_accuracy"
# (5) Joint goal accuracy.
JOINT_GOAL_ACCURACY = "joint_goal_accuracy"
JOINT_CAT_ACCURACY = "joint_cat_accuracy"
JOINT_NONCAT_ACCURACY = "joint_noncat_accuracy"
AVERAGE_CAT_STATUS_ACCURACY = "average_cat_status_accuracy"
AVERAGE_CAT_VALUE_ACCURACY = "average_cat_value_accuracy"
AVERAGE_NONCAT_STATUS_ACCURACY = "average_noncat_status_accuracy"
AVERAGE_NONCAT_VALUE_ACCURACY = "average_noncat_value_accuracy"
JOINT_CAT_STATUS_ACCURACY = "joint_cat_status_accuracy"
JOINT_CAT_VALUE_ACCURACY = "joint_cat_value_accuracy"
JOINT_NONCAT_STATUS_ACCURACY = "joint_noncat_status_accuracy"
JOINT_NONCAT_VALUE_ACCURACY = "joint_noncat_value_accuracy"
NAN_VAL = "NA"
def compute_f1(list_ref, list_hyp):
"""Compute F1 score from reference (grouth truth) list and hypothesis list.
Args:
list_ref: List of true elements.
list_hyp: List of postive (retrieved) elements.
Returns:
A F1Scores object containing F1, precision, and recall scores.
"""
ref = collections.Counter(list_ref)
hyp = collections.Counter(list_hyp)
true = sum(ref.values())
positive = sum(hyp.values())
true_positive = sum((ref & hyp).values())
precision = float(true_positive) / positive if positive else 1.0
recall = float(true_positive) / true if true else 1.0
if precision + recall > 0.0:
f1 = 2.0 * precision * recall / (precision + recall)
else: # The F1-score is defined to be 0 if both precision and recall are 0.
f1 = 0.0
return F1Scores(f1=f1, precision=precision, recall=recall)
def fuzzy_string_match(str_ref, str_hyp):
"""Returns fuzzy string similarity score in range [0.0, 1.0].
Args:
str_ref: reference string
str_hyp: hypothesis string
Returns:
fuzzy string similarity
"""
# The higher the score, the higher the similarity between the two strings.
return fuzz.token_sort_ratio(str_ref, str_hyp) / 100.0
def noncat_slot_value_match(str_ref_list, str_hyp, use_fuzzy_match):
"""Calculate non-categorical slots correctness.
Args:
str_ref_list: a list of reference strings.
str_hyp: the hypothesis string.
use_fuzzy_match: whether to use fuzzy string matching.
Returns:
score: The highest fuzzy string match score of the references and hypotheis.
"""
score = 0.0
for str_ref in str_ref_list:
if use_fuzzy_match:
match_score = fuzzy_string_match(str_ref, str_hyp)
else:
match_score = float(str_ref == str_hyp)
score = max(score, match_score)
return score
def compare_slot_values(slot_values_ref, slot_values_hyp, service, use_fuzzy_match):
"""Compare and get correctness of goal state's slot_values.
Args:
slot_values_ref: goal state slot_values from reference (ground truth).
slot_values_hyp: goal state slot_values from hypothesis (prediction).
service: a service data structure in the schema. We use it to obtain the
list of slots in the service and infer whether a slot is categorical.
use_fuzzy_match: whether to use fuzzy string matching for non-categorical
slot values
Returns:
list_cor: list of corectness scores, each corresponding to one slot in the
service. The score is a float either 0.0 or 1.0 for categorical slot,
and in range [0.0, 1.0] for non-categorical slot.
slot_active: list indicating whether the element in list_cor corresponds to
an active ground-truth slot.
slot_cat: list indicating whether the element in list_cor corresponds to a
categorical slot.
list_cor_status: list of correct slot statuses
list_cor_value: list of correctness score only for active slots. Monactive slots are assigned -1.
"""
list_cor = []
list_cor_status = []
list_cor_value = []
slot_active = []
slot_cat = []
for slot in service["slots"]:
slot_name = slot["name"]
slot_cat.append(slot["is_categorical"])
if slot_name in slot_values_ref: # REF=active
slot_active.append(True)
if slot_name in slot_values_hyp: # HYP=active, apply matching
value_ref_list = slot_values_ref[slot_name]
value_hyp = slot_values_hyp[slot_name][0]
if slot["is_categorical"]:
cor = float(value_ref_list[0] == value_hyp)
else:
cor = noncat_slot_value_match(value_ref_list, value_hyp, use_fuzzy_match)
list_cor.append(cor)
list_cor_status.append(1.0)
list_cor_value.append(cor)
else: # HYP=off
list_cor.append(0.0)
list_cor_status.append(0.0)
list_cor_value.append(-1.0)
else: # REF=off
slot_active.append(False)
if slot_name in slot_values_hyp: # HYP=active
list_cor.append(0.0)
list_cor_status.append(0.0)
else: # HYP=off
list_cor.append(1.0)
list_cor_status.append(1.0)
list_cor_value.append(-1.0)
assert len(list_cor) == len(service["slots"])
assert len(slot_active) == len(service["slots"])
assert len(slot_cat) == len(service["slots"])
return list_cor, slot_active, slot_cat, list_cor_status, list_cor_value
def get_active_intent_accuracy(frame_ref, frame_hyp):
"""Get active intent accuracy of a frame.
Args:
frame_ref: single semantic frame from reference (ground truth) file.
frame_hyp: single semantic frame from hypothesis (prediction) file.
Returns:
1.0 if the intent prediction is correct, otherwise 0.0.
"""
return float(frame_ref["state"]["active_intent"] == frame_hyp["state"]["active_intent"])
def get_slot_tagging_f1(frame_ref, frame_hyp, utt, service):
"""Get slot tagging (non-categorical slots only) F1 scores of a frame.
Args:
frame_ref: single semantic frame from reference (ground truth) file.
frame_hyp: single semantic frame from hypothesis (prediction) file.
utt: user utterance. Slot tagging annotations are the character positions in
the utterance.
service: a service data structure in the schema. We use it to infer whether
a slot is non-categorical.
Returns:
A F1Scores object containing F1, precision, and recall scores.
"""
list_noncat_slots = [s["name"] for s in service["slots"] if not s["is_categorical"]]
if "slots" not in frame_hyp:
return None
else:
list_ref = [
(s["slot"], utt[s["start"] : s["exclusive_end"]])
for s in frame_ref["slots"]
if s["slot"] in list_noncat_slots
]
list_hyp = [
(s["slot"], utt[s["start"] : s["exclusive_end"]])
for s in frame_hyp["slots"]
if s["slot"] in list_noncat_slots
]
return compute_f1(list_ref, list_hyp)
def get_requested_slots_f1(frame_ref, frame_hyp):
"""Get requested slots F1 scores of a frame.
Args:
frame_ref: single semantic frame from reference (ground truth) file.
frame_hyp: single semantic frame from hypothesis (prediction) file.
Returns:
A F1Scores object containing F1, precision, and recall scores.
"""
return compute_f1(frame_ref["state"]["requested_slots"], frame_hyp["state"]["requested_slots"])
def get_average_and_joint_goal_accuracy(frame_ref, frame_hyp, service, use_fuzzy_match):
"""Get average and joint goal accuracies of a frame.
Args:
frame_ref: single semantic frame from reference (ground truth) file.
frame_hyp: single semantic frame from hypothesis (prediction) file.
service: a service data structure in the schema. We use it to obtain the
list of slots in the service and infer whether a slot is categorical.
use_fuzzy_match: whether to use fuzzy string matching for comparing
non-categorical slot values.
Returns:
goal_acc: a dict whose values are average / joint
all-goal / categorical-goal / non-categorical-goal accuracies.
"""
goal_acc = {}
list_acc, slot_active, slot_cat, list_status_acc, list_value_acc = compare_slot_values(
frame_ref["state"]["slot_values"], frame_hyp["state"]["slot_values"], service, use_fuzzy_match
)
# (4) Average goal accuracy.
active_acc = [acc for acc, active in zip(list_acc, slot_active) if active]
goal_acc[AVERAGE_GOAL_ACCURACY] = np.mean(active_acc) if active_acc else NAN_VAL
# (4-a) categorical.
active_cat_acc = [acc for acc, active, cat in zip(list_acc, slot_active, slot_cat) if active and cat]
goal_acc[AVERAGE_CAT_ACCURACY] = np.mean(active_cat_acc) if active_cat_acc else NAN_VAL
# (4-b) non-categorical.
active_noncat_acc = [acc for acc, active, cat in zip(list_acc, slot_active, slot_cat) if active and not cat]
goal_acc[AVERAGE_NONCAT_ACCURACY] = np.mean(active_noncat_acc) if active_noncat_acc else NAN_VAL
# (5) Joint goal accuracy.
goal_acc[JOINT_GOAL_ACCURACY] = np.prod(list_acc) if list_acc else NAN_VAL
# (5-a) categorical.
cat_acc = [acc for acc, cat in zip(list_acc, slot_cat) if cat]
goal_acc[JOINT_CAT_ACCURACY] = np.prod(cat_acc) if cat_acc else NAN_VAL
# (5-b) non-categorical.
noncat_acc = [acc for acc, cat in zip(list_acc, slot_cat) if not cat]
goal_acc[JOINT_NONCAT_ACCURACY] = np.prod(noncat_acc) if noncat_acc else NAN_VAL
# !!!!!!!!!!DEBUG!!!!!!!!!!!!!
# cat status acc for both active and non active
active_cat_status_acc = [acc for acc, active, cat in zip(list_status_acc, slot_active, slot_cat) if cat and active]
goal_acc[AVERAGE_CAT_STATUS_ACCURACY] = np.mean(active_cat_status_acc) if active_cat_status_acc else NAN_VAL
# joint cat status acc for both active and non active
cat_status_acc = [acc for acc, cat in zip(list_status_acc, slot_cat) if cat]
goal_acc[JOINT_CAT_STATUS_ACCURACY] = np.prod(cat_status_acc) if cat_status_acc else NAN_VAL
# non cat status acc for both active and non active
active_noncat_status_acc = [
acc for acc, active, cat in zip(list_status_acc, slot_active, slot_cat) if not cat and active
]
goal_acc[AVERAGE_NONCAT_STATUS_ACCURACY] = (
np.mean(active_noncat_status_acc) if active_noncat_status_acc else NAN_VAL
)
# joint non cat status acc for both active and non active
noncat_status_acc = [acc for acc, cat in zip(list_status_acc, slot_cat) if not cat]
goal_acc[JOINT_NONCAT_STATUS_ACCURACY] = np.prod(noncat_status_acc) if noncat_status_acc else NAN_VAL
# cat value acc for both active and non active
active_cat_val_acc = [
acc for acc, active, cat in zip(list_value_acc, slot_active, slot_cat) if cat and acc > -0.5 and active
]
goal_acc[AVERAGE_CAT_VALUE_ACCURACY] = np.mean(active_cat_val_acc) if active_cat_val_acc else NAN_VAL
# joint cat value acc for both active and non active
cat_val_acc = [acc for acc, cat in zip(list_value_acc, slot_cat) if cat and acc > -0.5]
goal_acc[JOINT_CAT_VALUE_ACCURACY] = np.prod(cat_val_acc) if cat_val_acc else NAN_VAL
# cat non value acc for both active and non active
active_noncat_val_acc = [
acc for acc, active, cat in zip(list_value_acc, slot_active, slot_cat) if not cat and acc > -0.5 and active
]
goal_acc[AVERAGE_NONCAT_VALUE_ACCURACY] = np.mean(active_noncat_val_acc) if active_noncat_val_acc else NAN_VAL
# joint non cat value acc for both active and non active
noncat_val_acc = [acc for acc, cat in zip(list_value_acc, slot_cat) if not cat and acc > -0.5]
goal_acc[JOINT_NONCAT_VALUE_ACCURACY] = np.prod(noncat_val_acc) if noncat_val_acc else NAN_VAL
return goal_acc
| NeMo-main | nemo/collections/nlp/metrics/sgd_metrics.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Dict, Optional, Tuple
import torch
from torchmetrics import Metric
from torchmetrics.utilities.data import METRIC_EPS
__all__ = ['ClassificationReport', 'MultiLabelClassificationReport']
class ClassificationReport(Metric):
"""
This metric computes the number of True Positive, False Negative, and False Positive examples per class.
When doing distributed training/evaluation the result of res=ClassificationReport(predictions, labels) calls
will be all-reduced between all workers using SUM operations.
If used with PytorchLightning LightningModule, include TPs, FNs, and FPs inside validation_step results.
Then aggregate them at the end of validation epoch to correctly compute validation precision, recall, f1
using get_precision_recall_f1().
Example:
def validation_step(self, batch, batch_idx):
...
tp, fn, fp, _ = self.classification_report(preds, labels)
return {'val_loss': val_loss, 'tp': tp, 'fn': fn, 'fp': fp}
def on_validation_epoch_end(self):
...
# calculate metrics and classification report
precision, recall, f1, report = self.classification_report.compute()
logging.info(report)
self.log('val_loss', avg_loss, prog_bar=True)
self.log('precision', precision)
self.log('f1', f1)
self.log('recall', recall)
Args:
num_classes: number of classes in the dataset
label_ids (optional): label name to label id mapping
mode: how to compute the average
dist_sync_on_step: sync across ddp
process_group: which processes to sync across
Return:
aggregated precision, recall, f1, report
"""
full_state_update = True
def __init__(
self,
num_classes: int,
label_ids: Dict[str, int] = None,
mode: str = 'macro',
dist_sync_on_step: bool = False,
process_group: Optional[Any] = None,
):
super().__init__(dist_sync_on_step=dist_sync_on_step, process_group=process_group)
self.num_classes = num_classes
if label_ids:
self.ids_to_labels = {v: k for k, v in label_ids.items()}
else:
self.ids_to_labels = None
self.mode = mode
self.add_state("tp", default=torch.zeros(num_classes), dist_reduce_fx='sum', persistent=False)
self.add_state("fn", default=torch.zeros(num_classes), dist_reduce_fx='sum', persistent=False)
self.add_state("fp", default=torch.zeros(num_classes), dist_reduce_fx='sum', persistent=False)
self.add_state(
"num_examples_per_class", default=torch.zeros(num_classes), dist_reduce_fx='sum', persistent=False
)
def update(self, predictions: torch.Tensor, labels: torch.Tensor) -> None:
"""
Updates attributes needed for new classification report (true positive, false negative, false postive, examples per class)
Args:
predictions: predicted labels
labels: actual labels
Return:
None
"""
TP = []
FN = []
FP = []
for label_id in range(self.num_classes):
current_label = labels == label_id
label_predicted = predictions == label_id
TP.append((label_predicted == current_label)[label_predicted].sum())
FP.append((label_predicted != current_label)[label_predicted].sum())
FN.append((label_predicted != current_label)[current_label].sum())
tp = torch.tensor(TP).to(predictions.device)
fn = torch.tensor(FN).to(predictions.device)
fp = torch.tensor(FP).to(predictions.device)
num_examples_per_class = tp + fn
self.tp += tp
self.fn += fn
self.fp += fp
self.num_examples_per_class += num_examples_per_class
def compute(self) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Aggregates and then calculates logs classification report similar to sklearn.metrics.classification_report.
Typically used during epoch_end.
Return:
aggregated precision, recall, f1, report
"""
total_examples = torch.sum(self.num_examples_per_class)
num_non_empty_classes = torch.nonzero(self.num_examples_per_class).size(0)
precision = torch.true_divide(self.tp * 100, (self.tp + self.fp + METRIC_EPS))
recall = torch.true_divide(self.tp * 100, (self.tp + self.fn + METRIC_EPS))
f1 = torch.true_divide(2 * precision * recall, (precision + recall + METRIC_EPS))
report = '\n{:50s} {:10s} {:10s} {:10s} {:10s}'.format('label', 'precision', 'recall', 'f1', 'support')
for i in range(len(self.tp)):
label = f'label_id: {i}'
if self.ids_to_labels and i in self.ids_to_labels:
label = f'{self.ids_to_labels[i]} ({label})'
report += '\n{:50s} {:8.2f} {:8.2f} {:8.2f} {:8.0f}'.format(
label, precision[i], recall[i], f1[i], self.num_examples_per_class[i]
)
micro_precision = torch.true_divide(torch.sum(self.tp) * 100, torch.sum(self.tp + self.fp) + METRIC_EPS)
micro_recall = torch.true_divide(torch.sum(self.tp) * 100, torch.sum(self.tp + self.fn) + METRIC_EPS)
micro_f1 = torch.true_divide(2 * micro_precision * micro_recall, (micro_precision + micro_recall + METRIC_EPS))
macro_precision = torch.sum(precision) / num_non_empty_classes
macro_recall = torch.sum(recall) / num_non_empty_classes
macro_f1 = torch.sum(f1) / num_non_empty_classes
weighted_precision = torch.sum(precision * self.num_examples_per_class) / total_examples
weighted_recall = torch.sum(recall * self.num_examples_per_class) / total_examples
weighted_f1 = torch.sum(f1 * self.num_examples_per_class) / total_examples
report += "\n-------------------"
report += '\n{:50s} {:8.2f} {:8.2f} {:8.2f} {:8.0f}'.format(
'micro avg', micro_precision, micro_recall, micro_f1, total_examples
)
report += '\n{:50s} {:8.2f} {:8.2f} {:8.2f} {:8.0f}'.format(
'macro avg', macro_precision, macro_recall, macro_f1, total_examples
)
report += (
'\n{:50s} {:8.2f} {:8.2f} {:8.2f} {:8.0f}'.format(
'weighted avg', weighted_precision, weighted_recall, weighted_f1, total_examples
)
+ '\n'
)
self.total_examples = total_examples
if self.mode == 'macro':
return macro_precision, macro_recall, macro_f1, report
elif self.mode == 'weighted':
return weighted_precision, weighted_recall, weighted_f1, report
elif self.mode == 'micro':
return micro_precision, micro_recall, micro_f1, report
elif self.mode == 'all':
return precision, recall, f1, report
else:
raise ValueError(
f'{self.mode} mode is not supported. Choose "macro" to get aggregated numbers \
or "all" to get values for each class.'
)
class MultiLabelClassificationReport(ClassificationReport):
"""
This metric computes the number of True Positive, False Negative, and False Positive examples per class for
a multi-label dataset.
When doing distributed training/evaluation the result of res=ClassificationReport(predictions, labels) calls
will be all-reduced between all workers using SUM operations.
If used with PytorchLightning LightningModule, include TPs, FNs, and FPs inside validation_step results.
Then aggregate them at the end of validation epoch to correctly compute validation precision, recall, f1
using get_precision_recall_f1().
Example:
def validation_step(self, batch, batch_idx):
...
tp, fn, fp, _ = self.classification_report(preds, labels)
return {'val_loss': val_loss, 'tp': tp, 'fn': fn, 'fp': fp}
def on_validation_epoch_end(self):
...
# calculate metrics and classification report
precision, recall, f1, report = self.classification_report.compute()
logging.info(report)
self.log('val_loss', avg_loss, prog_bar=True)
self.log('precision', precision)
self.log('f1', f1)
self.log('recall', recall)
Args:
num_classes: number of classes in the dataset
label_ids (optional): label name to label id mapping
mode: how to compute the average
dist_sync_on_step: sync across ddp
process_group: which processes to sync across
Return:
aggregated precision, recall, f1, report
"""
def update(self, predictions: torch.Tensor, labels: torch.Tensor) -> None:
"""
Updates attributes needed for new classification report (true positive, false negative, false postive, examples per class)
Args:
predictions: predicted labels
labels: actual labels
Return:
None
"""
predictions = predictions.t()
TP = []
FN = []
FP = []
for label_id in range(self.num_classes):
current_label = labels[label_id]
labels_predicted = predictions[label_id]
TP.append((labels_predicted == current_label)[labels_predicted == 1].sum())
FP.append((labels_predicted != current_label)[labels_predicted == 1].sum())
FN.append((labels_predicted != current_label)[current_label == 1].sum())
tp = torch.tensor(TP).to(predictions.device)
fn = torch.tensor(FN).to(predictions.device)
fp = torch.tensor(FP).to(predictions.device)
num_examples_per_class = tp + fn
self.tp += tp
self.fn += fn
self.fp += fp
self.num_examples_per_class += num_examples_per_class
| NeMo-main | nemo/collections/nlp/metrics/classification_report.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
This file contains code artifacts adapted from the original implementation:
https://github.com/google-research/google-research/blob/master/schema_guided_dst/baseline/train_and_predict.py
'''
import torch
from nemo.core.classes import Loss, Typing, typecheck
from nemo.core.neural_types import ChannelType, LabelsType, LogitsType, LossType, NeuralType
from nemo.utils import logging
__all__ = ['SGDDialogueStateLoss']
class SGDDialogueStateLoss(Loss, Typing):
"""
Neural module which implements loss for SGD model.
"""
@property
def input_types(self):
"""Returns definitions of module input ports.
logit_intent_status: Output of SGD model
intent_status: intent label
logit_req_slot_status: Output of SGD model
requested_slot_status: Takes value 1 if the corresponding slot is requested, 0 otherwise
logit_cat_slot_status: Output of SGD model
categorical_slot_status: The status of each categorical slot in the service
logit_cat_slot_value_status: Output of SGD model
categorical_slot_value_status: Takes value 1 if the corresponding slot value is correct, 0 otherwise
logit_noncat_slot_status: Output of SGD model
noncategorical_slot_status: The status of each noncategorical slot in the service\
logit_spans: Output of SGD model
noncategorical_slot_value_start: The index of the starting subword corresponding to the slot span for a non-categorical slot value
noncategorical_slot_value_end: The index of the ending (inclusive) subword corresponding to the slot span for a non-categorical slot value
task_mask: Mask contains 1 if its the current task, 0 otherwise
"""
return {
"logit_intent_status": NeuralType(('B', 'T'), LogitsType()),
"intent_status": NeuralType(('B'), LabelsType()),
"logit_req_slot_status": NeuralType(('B', 'T'), LogitsType()),
"requested_slot_status": NeuralType(('B'), LabelsType()),
"logit_cat_slot_status": NeuralType(('B', 'T'), LogitsType()),
"categorical_slot_status": NeuralType(('B'), LabelsType()),
"logit_cat_slot_value_status": NeuralType(('B', 'T'), LogitsType()),
"categorical_slot_value_status": NeuralType(('B'), LabelsType()),
"logit_noncat_slot_status": NeuralType(('B', 'T'), LogitsType()),
"noncategorical_slot_status": NeuralType(('B'), LabelsType()),
"logit_spans": NeuralType(('B', 'T', 'D'), LogitsType()),
"noncategorical_slot_value_start": NeuralType(('B'), LabelsType()),
"noncategorical_slot_value_end": NeuralType(('B'), LabelsType()),
"task_mask": NeuralType(('B', 'T'), ChannelType()),
}
@property
def output_types(self):
"""
Returns definitions of module output ports.
loss:
NeuralType(None)
"""
return {"loss": NeuralType(elements_type=LossType())}
def __init__(self, reduction: str = 'mean'):
"""
Args:
reduction: specifies the reduction to apply to the final loss, choose 'mean' or 'sum'
"""
super().__init__()
if reduction not in ['mean', 'sum']:
logging.warning(f'{reduction} reduction is not supported. Setting reduction to "mean"')
reduction = 'mean'
self.reduction = reduction
self._cross_entropy = torch.nn.CrossEntropyLoss(reduction=self.reduction)
self._cross_entropy_bin = torch.nn.BCEWithLogitsLoss(reduction=self.reduction)
def _helper(self, logits, labels, loss_mask=None):
"""
flattens logits and labels according loss mask
Args:
logits: logits
labels: labels
loss_mask: loss mask
Returns:
logits_flatten: flattened logits where loss mask is true
labels_flatten: flattened labels where loss mask is true
"""
logits_flatten = torch.flatten(logits, start_dim=0, end_dim=-2)
labels_flatten = torch.flatten(labels, start_dim=0, end_dim=-1)
if loss_mask is not None:
if loss_mask.dtype is not torch.bool:
loss_mask = loss_mask > 0.5
loss_mask_flatten = torch.flatten(loss_mask, start_dim=0, end_dim=-1)
logits_flatten = logits_flatten[loss_mask_flatten]
labels_flatten = labels_flatten[loss_mask_flatten]
return logits_flatten, labels_flatten
@typecheck()
def forward(
self,
logit_intent_status,
intent_status,
logit_req_slot_status,
requested_slot_status,
logit_cat_slot_status,
categorical_slot_status,
logit_cat_slot_value_status,
categorical_slot_value_status,
logit_noncat_slot_status,
noncategorical_slot_status,
logit_spans,
noncategorical_slot_value_start,
noncategorical_slot_value_end,
task_mask,
):
# Intent loss
old_logit_intent_status = logit_intent_status
logit_intent_status, intent_status = self._helper(logit_intent_status, intent_status, task_mask[:, 0])
if len(intent_status) == 0:
intent_loss = torch.clamp(torch.max(old_logit_intent_status.view(-1)), 0, 0)
else:
intent_loss = self._cross_entropy_bin(logit_intent_status.squeeze(dim=-1), intent_status)
old_logit_req_slot_status = logit_req_slot_status
logit_req_slot_status, requested_slot_status = self._helper(
logit_req_slot_status, requested_slot_status, task_mask[:, 1]
)
if len(requested_slot_status) == 0:
requested_slot_loss = torch.clamp(torch.max(old_logit_req_slot_status.view(-1)), 0, 0)
else:
requested_slot_loss = self._cross_entropy_bin(logit_req_slot_status.squeeze(dim=-1), requested_slot_status)
old_logit_cat_slot_status = logit_cat_slot_status
logit_cat_slot_status, categorical_slot_status = self._helper(
logit_cat_slot_status, categorical_slot_status, task_mask[:, 2]
)
if len(categorical_slot_status) == 0:
cat_slot_status_loss = torch.clamp(torch.max(old_logit_cat_slot_status.view(-1)), 0, 0)
else:
cat_slot_status_loss = self._cross_entropy(logit_cat_slot_status, categorical_slot_status,)
old_logit_cat_slot_value_status = logit_cat_slot_value_status
logit_cat_slot_value_status, categorical_slot_value_status = self._helper(
logit_cat_slot_value_status, categorical_slot_value_status, task_mask[:, 3]
)
if len(categorical_slot_value_status) == 0:
cat_slot_value_status_loss = torch.clamp(torch.max(old_logit_cat_slot_value_status.view(-1)), 0, 0)
else:
cat_slot_value_status_loss = self._cross_entropy_bin(
logit_cat_slot_value_status.squeeze(dim=-1), categorical_slot_value_status
)
old_logit_noncat_slot_status = logit_noncat_slot_status
logit_noncat_slot_status, noncategorical_slot_status = self._helper(
logit_noncat_slot_status, noncategorical_slot_status, task_mask[:, 4]
)
if len(noncategorical_slot_status) == 0:
noncat_slot_status_loss = torch.clamp(torch.max(old_logit_noncat_slot_status.view(-1)), 0, 0)
else:
noncat_slot_status_loss = self._cross_entropy(logit_noncat_slot_status, noncategorical_slot_status,)
logit_noncat_slot_start, logit_noncat_slot_end = torch.unbind(logit_spans, dim=-1)
_, max_num_tokens = logit_noncat_slot_start.size()
old_logit_noncat_slot_start = logit_noncat_slot_start
logit_noncat_slot_start, noncategorical_slot_value_start = self._helper(
logit_noncat_slot_start, noncategorical_slot_value_start, task_mask[:, 5]
)
if len(noncategorical_slot_value_start) == 0:
span_start_loss = torch.clamp(torch.max(old_logit_noncat_slot_start.view(-1)), 0, 0)
else:
span_start_loss = self._cross_entropy(logit_noncat_slot_start, noncategorical_slot_value_start)
old_logit_noncat_slot_end = logit_noncat_slot_end
logit_noncat_slot_end, noncategorical_slot_value_end = self._helper(
logit_noncat_slot_end, noncategorical_slot_value_end, task_mask[:, 5]
)
if len(noncategorical_slot_value_end) == 0:
span_end_loss = torch.clamp(torch.max(old_logit_noncat_slot_end.view(-1)), 0, 0)
else:
span_end_loss = self._cross_entropy(logit_noncat_slot_end, noncategorical_slot_value_end)
losses = {
"intent_loss": intent_loss,
"requested_slot_loss": requested_slot_loss,
"cat_slot_status_loss": cat_slot_status_loss,
"cat_slot_value_status_loss": cat_slot_value_status_loss,
"noncat_slot_status_loss": noncat_slot_status_loss,
"span_start_loss": span_start_loss,
"span_end_loss": span_end_loss,
}
total_loss = sum(losses.values())
if self.reduction == 'mean':
total_loss = total_loss / len(losses)
else:
batch_size = logit_intent_status.shape[0]
total_loss = total_loss / batch_size
return total_loss
| NeMo-main | nemo/collections/nlp/losses/sgd_loss.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo.collections.nlp.losses.sgd_loss import SGDDialogueStateLoss
| NeMo-main | nemo/collections/nlp/losses/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
from typing import Any
from omegaconf.omegaconf import MISSING
from pytorch_lightning.trainer.trainer import Trainer
from nemo.collections.nlp.models.nlp_model import NLPModel
from nemo.collections.nlp.modules.common.decoder_module import DecoderModule
from nemo.collections.nlp.modules.common.encoder_module import EncoderModule
from nemo.collections.nlp.modules.common.tokenizer_utils import TokenizerConfig
from nemo.core.config.modelPT import ModelConfig
@dataclass
class EncDecNLPModelConfig(ModelConfig):
encoder_tokenizer: TokenizerConfig = MISSING
decoder_tokenizer: TokenizerConfig = MISSING
encoder: Any = MISSING
decoder: Any = MISSING
head: Any = MISSING
class EncDecNLPModel(NLPModel):
"""Base class for encoder-decoder NLP models.
"""
def __init__(self, cfg: EncDecNLPModelConfig, trainer: Trainer = None):
super().__init__(cfg=cfg, trainer=trainer)
@property
def encoder_vocab_size(self):
return self.encoder_tokenizer.vocab_size
@property
def decoder_vocab_size(self):
return self.decoder_tokenizer.vocab_size
@property
def encoder_tokenizer(self):
return self._encoder_tokenizer
@encoder_tokenizer.setter
def encoder_tokenizer(self, tokenizer):
self._encoder_tokenizer = tokenizer
@property
def decoder_tokenizer(self):
return self._decoder_tokenizer
@decoder_tokenizer.setter
def decoder_tokenizer(self, tokenizer):
self._decoder_tokenizer = tokenizer
@property
def encoder(self) -> EncoderModule:
return self._encoder
@encoder.setter
def encoder(self, encoder):
self._encoder = encoder
@property
def decoder(self) -> DecoderModule:
return self._decoder
@decoder.setter
def decoder(self, decoder):
self._decoder = decoder
| NeMo-main | nemo/collections/nlp/models/enc_dec_nlp_model.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import hashlib
import json
import os
from typing import Any, Mapping, Optional
from lightning_fabric.utilities.cloud_io import _load as pl_load
from omegaconf import DictConfig, OmegaConf
from pytorch_lightning import Trainer
from pytorch_lightning.core.saving import _load_state as ptl_load_state
from pytorch_lightning.core.saving import load_hparams_from_tags_csv, load_hparams_from_yaml
from pytorch_lightning.utilities import rank_zero_only
from pytorch_lightning.utilities.migration import pl_legacy_patch
from transformers import TRANSFORMERS_CACHE
from nemo.collections.common.tokenizers.huggingface.auto_tokenizer import AutoTokenizer
from nemo.collections.nlp.modules import BertModule
from nemo.collections.nlp.modules.common.huggingface.huggingface_utils import VOCAB_FILE_NAME
from nemo.collections.nlp.modules.common.lm_utils import get_lm_model
from nemo.collections.nlp.modules.common.megatron.megatron_utils import (
MEGATRON_CONFIG_MAP,
get_megatron_pretrained_bert_models,
)
from nemo.collections.nlp.modules.common.tokenizer_utils import get_tokenizer
from nemo.collections.nlp.parts.nlp_overrides import NLPSaveRestoreConnector
from nemo.core.classes import ModelPT
from nemo.core.classes.exportable import Exportable
from nemo.utils import AppState, logging
try:
from megatron.core import dist_checkpointing, parallel_state
HAVE_MEGATRON_CORE = True
except (ImportError, ModuleNotFoundError):
HAVE_MEGATRON_CORE = False
__all__ = ['NLPModel']
NEMO_NLP_TMP = os.path.join(os.path.dirname(str(TRANSFORMERS_CACHE)), "nemo_nlp_tmp")
os.makedirs(NEMO_NLP_TMP, exist_ok=True)
class NLPModel(ModelPT, Exportable):
"""Base class for NLP Models.
"""
def __init__(self, cfg: DictConfig, trainer: Trainer = None, no_lm_init=False):
self.hidden_size = None
self.bert_model = None
vocab_file = None
nemo_file = None
config_dict = None
config_file = None
# tokenizer needs to get initialized before the super.__init__()
# as dataloaders and datasets need it to process the data
pretrain_model_name = ''
if cfg.get('language_model') and cfg.language_model.get('pretrained_model_name', ''):
pretrain_model_name = cfg.language_model.get('pretrained_model_name', '')
all_pretrained_megatron_bert_models = get_megatron_pretrained_bert_models()
if cfg.get('tokenizer'):
# Some models have their own tokenizer setup
if (
not hasattr(self, 'tokenizer')
and cfg.tokenizer.get('tokenizer_name')
and pretrain_model_name not in all_pretrained_megatron_bert_models
):
self.setup_tokenizer(cfg.tokenizer)
elif pretrain_model_name in all_pretrained_megatron_bert_models:
copy_cfg = copy.deepcopy(cfg)
bert_model = get_lm_model(
config_file=config_file,
config_dict=config_dict,
vocab_file=vocab_file,
trainer=trainer,
cfg=copy_cfg,
)
# set the tokenizer if it is not initialized explicitly
if (
(hasattr(self, 'tokenizer') and self.tokenizer is None) or not hasattr(self, 'tokenizer')
) and hasattr(bert_model, 'tokenizer'):
self.tokenizer = bert_model.tokenizer
if (
cfg.get('tokenizer')
and hasattr(cfg.get('tokenizer'), 'vocab_file')
and cfg.get('tokenizer').get('vocab_file')
):
vocab_file = self.register_artifact('tokenizer.vocab_file', cfg.tokenizer.vocab_file)
super().__init__(cfg, trainer)
# handles model parallel save and restore logic
self._save_restore_connector = NLPSaveRestoreConnector()
if cfg.get('language_model') and not no_lm_init:
if cfg.get('language_model').get('nemo_file'):
nemo_file = self.register_artifact('language_model.nemo_file', cfg.language_model.nemo_file)
if cfg.get('language_model').get('config'):
config_dict = OmegaConf.to_container(cfg.language_model.config)
if cfg.get('language_model').get('config_file'):
config_file = self.register_artifact('language_model.config_file', cfg.language_model.config_file)
bert_model = get_lm_model(
config_file=config_file, config_dict=config_dict, vocab_file=vocab_file, trainer=trainer, cfg=cfg,
)
# set the tokenizer if it is not initialized explicitly
if ((hasattr(self, 'tokenizer') and self.tokenizer is None) or not hasattr(self, 'tokenizer')) and hasattr(
bert_model, 'tokenizer'
):
self.tokenizer = bert_model.tokenizer
# Required to pull up the config for MegatronBert models
self.pretrained_model_name = cfg.language_model.pretrained_model_name
if (
cfg.tokenizer is not None
and cfg.tokenizer.get("tokenizer_name", "") is not None
and "megatron" in cfg.tokenizer.get("tokenizer_name", "")
) or pretrain_model_name in all_pretrained_megatron_bert_models:
self.hidden_size = bert_model.cfg.hidden_size
else:
self.hidden_size = bert_model.config.hidden_size
if cfg.get('language_model') and not no_lm_init:
self.bert_model = bert_model
# register encoder config
self.register_bert_model()
def register_artifact(
self, config_path: str, src: str, verify_src_exists: bool = False,
):
""" Overrides ModelPT register_artifact default behavior.
NLP models usually need artifacts that are optional."""
return super().register_artifact(config_path, src, verify_src_exists=verify_src_exists)
@rank_zero_only
def register_bert_model(self):
"""Adds encoder config to .nemo archive for Jarvis.
"""
# check if there is an encoder, warn if not
if self.bert_model is not None:
# get encoder config and create source for artifact
if isinstance(self.bert_model, BertModule):
# HuggingFace Transformer Config
pretrained_model_name = self.bert_model.name_or_path
# Some HF names have "/" in them so we replace with _
pretrained_model_name = pretrained_model_name.replace("/", "_")
encoder_config_path = pretrained_model_name + '_encoder_config'
encoder_config_src = os.path.join(NEMO_NLP_TMP, encoder_config_path + '.json')
self.bert_model.config.to_json_file(encoder_config_src) # name requested by jarvis team
self.register_artifact('language_model.config_file', encoder_config_src) # for .nemo
# MegatronBertModel's superclass is NLP model, hence can't check for isinstance of self.bert_modelel
elif hasattr(self, 'pretrained_model_name') and 'megatron' in self.pretrained_model_name:
if self.pretrained_model_name in MEGATRON_CONFIG_MAP:
output_config = MEGATRON_CONFIG_MAP[self.pretrained_model_name]["config"]
if output_config is not None:
encoder_config_path = self.pretrained_model_name + '_encoder_config'
encoder_config_src = os.path.join(NEMO_NLP_TMP, encoder_config_path + '.json')
with open(encoder_config_src, 'w', encoding='utf-8') as f:
f.write(json.dumps(output_config, indent=2, sort_keys=True) + '\n')
self.register_artifact('language_model.config_file', encoder_config_src) # for .nemo
else:
# No defaults as this case can be any possible hyper-parameter combination of MegatronBert config
logging.info(f'For {self.pretrained_model_name}, set the config_file in the YAML file')
else:
logging.info(
f'Registering MegatronBERT model config for {self.pretrained_model_name} is not yet supported. \
Please override this method if needed.'
)
else:
logging.info(
f'Registering BERT model config for {self.bert_model} is not yet supported. Please override this method if needed.'
)
def setup_tokenizer(self, cfg: DictConfig):
"""Instantiates tokenizer based on config and registers tokenizer artifacts.
If model is being restored from .nemo file then the tokenizer.vocab_file will
be used (if it exists).
Otherwise, we will use the vocab file provided in the config (if it exists).
Finally, if no vocab file is given (this happens frequently when using HF),
we will attempt to extract the vocab from the tokenizer object and then register it.
Args:
cfg (DictConfig): Tokenizer config
"""
vocab_file = None
if cfg.get('vocab_file'):
vocab_file = self.register_artifact(config_path='tokenizer.vocab_file', src=cfg.vocab_file)
# only load tokenizer if vocab_file and tokenizer_model is not None
if cfg.tokenizer_name or vocab_file or cfg.tokenizer_model:
self.tokenizer = get_tokenizer(
tokenizer_name=cfg.tokenizer_name,
vocab_file=vocab_file,
special_tokens=OmegaConf.to_container(cfg.special_tokens) if cfg.special_tokens else None,
tokenizer_model=self.register_artifact(
config_path='tokenizer.tokenizer_model', src=cfg.tokenizer_model
),
)
if vocab_file is None:
# when there is no vocab file we try to get the vocab from the tokenizer and register it
self._register_vocab_from_tokenizer(vocab_file_config_path='tokenizer.vocab_file', cfg=cfg)
@rank_zero_only
def _register_vocab_from_tokenizer(
self,
vocab_file_config_path: str = 'tokenizer.vocab_file',
vocab_dict_config_path: str = 'tokenizer_vocab_dict',
cfg: DictConfig = None,
):
"""Creates vocab file from tokenizer if vocab file is None.
Args:
vocab_file_config_path: path to the vocab_file in the config
vocab_dict_config_path: path to the vocab_dict in the config
cfg: tokenizer config
"""
if self.tokenizer is None:
raise ValueError('Instantiate self.tokenizer before registering vocab from it.')
else:
if isinstance(self.tokenizer, AutoTokenizer):
# extract vocab from tokenizer
vocab_dict = self.tokenizer.tokenizer.get_vocab()
# for fast and slow tokenizer vocabularies compatibility
vocab_dict = dict(sorted(vocab_dict.items(), key=lambda item: item[1]))
# get hash of vocab_dict to create a unique directory to write vocab_dict and vocab_file
m = hashlib.md5()
if 'tokenizer_name' in cfg:
if cfg.tokenizer_name is not None:
# different pretrained models with the same vocab will have different hash
m.update(cfg.tokenizer_name.encode())
# get string representation of vocab_dict
vocab_dict_str = json.dumps(vocab_dict, sort_keys=True).encode()
m.update(vocab_dict_str)
vocab_dict_hash = m.hexdigest()
hash_path = os.path.join(NEMO_NLP_TMP, vocab_dict_hash)
os.makedirs(hash_path, exist_ok=True)
vocab_json_src = os.path.join(hash_path, vocab_dict_config_path)
with open(vocab_json_src, 'w', encoding='utf-8') as f:
f.write(json.dumps(vocab_dict, indent=2, sort_keys=True) + '\n')
self.register_artifact(config_path=vocab_dict_config_path, src=vocab_json_src)
tokenizer_name = self.tokenizer.tokenizer.__class__.__name__
# save vocab file
# depending on the HuggingFace model, vocab file could mean different things, see VOCAB_FILE_NAME
self.tokenizer.save_vocabulary(hash_path)
# create vocab file
vocab_file_src = os.path.join(hash_path, VOCAB_FILE_NAME[tokenizer_name])
cfg.vocab_file = vocab_file_src
self.register_artifact(config_path=vocab_file_config_path, src=vocab_file_src)
else:
logging.info(
f'Registering tokenizer vocab for {self.tokenizer} is not yet supported. Please override this method if needed.'
)
@staticmethod
def _unpack_nemo_file(path2file: str, out_folder: str) -> str:
return super(NLPModel, NLPModel)._unpack_nemo_file(path2file, out_folder)
@staticmethod
def _make_nemo_file_from_folder(filename, source_dir):
return super(NLPModel, NLPModel)._make_nemo_file_from_folder(filename, source_dir)
@property
def input_module(self):
return self.bert_model
@property
def output_module(self):
return self.classifier
@property
def is_model_parallel_initialized(self):
app_state = AppState()
if app_state.model_parallel_group is not None:
return True
else:
return False
@classmethod
def load_from_checkpoint(
cls,
checkpoint_path: str,
map_location: Any = None,
hparams_file: Optional[str] = None,
strict: bool = True,
**kwargs,
):
"""
Loads ModelPT from checkpoint, with some maintenance of restoration.
For documentation, please refer to LightningModule.load_from_checkpoin() documentation.
"""
checkpoint = None
try:
cls._set_model_restore_state(is_being_restored=True)
# dist checkpoint is a dir
checkpoint_dir = None
if os.path.isdir(checkpoint_path):
# store dir for later use
checkpoint_dir = checkpoint_path
# metadata is stored in common.pt
checkpoint_path = os.path.join(checkpoint_path, 'common.pt')
# we defer loading the state_dict until the class has been initialized
# we need to set this for ptl_load_state
strict = False
# TODO: replace with proper PTL API
with pl_legacy_patch():
if map_location is not None:
checkpoint = pl_load(checkpoint_path, map_location=map_location)
else:
checkpoint = pl_load(checkpoint_path, map_location=lambda storage, loc: storage)
if hparams_file is not None:
extension = hparams_file.split(".")[-1]
if extension.lower() == "csv":
hparams = load_hparams_from_tags_csv(hparams_file)
elif extension.lower() in ("yml", "yaml"):
hparams = load_hparams_from_yaml(hparams_file)
else:
raise ValueError(".csv, .yml or .yaml is required for `hparams_file`")
hparams["on_gpu"] = False
# overwrite hparams by the given file
checkpoint[cls.CHECKPOINT_HYPER_PARAMS_KEY] = hparams
# for past checkpoint need to add the new key
if cls.CHECKPOINT_HYPER_PARAMS_KEY not in checkpoint:
checkpoint[cls.CHECKPOINT_HYPER_PARAMS_KEY] = {}
# override the hparams with values that were passed in
cfg = checkpoint[cls.CHECKPOINT_HYPER_PARAMS_KEY].get('cfg', checkpoint[cls.CHECKPOINT_HYPER_PARAMS_KEY])
# TODO: can we do this without overriding?
config_kwargs = kwargs.copy()
if 'trainer' in config_kwargs:
config_kwargs.pop('trainer')
cfg.update(config_kwargs)
if cfg.get('megatron_amp_O2', False) and checkpoint_dir is None:
new_state_dict = {}
for key in checkpoint['state_dict'].keys():
new_key = key.replace('model.', 'model.module.', 1)
new_state_dict[new_key] = checkpoint['state_dict'][key]
checkpoint['state_dict'] = new_state_dict
if 'cfg' in kwargs:
model = ptl_load_state(cls, checkpoint, strict=strict, **kwargs)
else:
model = ptl_load_state(cls, checkpoint, strict=strict, cfg=cfg, **kwargs)
# cfg = checkpoint[cls.CHECKPOINT_HYPER_PARAMS_KEY].cfg
# if the checkpoint is distributed, we deferred loading the state_dict until now
if checkpoint_dir is not None:
sharded_state_dict = model.sharded_state_dict()
checkpoint['state_dict'] = sharded_state_dict
# dist checkpointing needs torch.distributed to load the checkpoint
if parallel_state.is_unitialized():
def dummy():
return
if model.trainer.strategy.launcher is not None:
model.trainer.strategy.launcher.launch(dummy, trainer=model.trainer)
model.trainer.strategy.setup_environment()
# load the checkpoint from disk
checkpoint = dist_checkpointing.load(sharded_state_dict=checkpoint, checkpoint_dir=checkpoint_dir)
# restore the weights
model.on_load_checkpoint(checkpoint)
if hasattr(model, 'setup_transformer_engine_tp_groups'):
model.setup_transformer_engine_tp_groups()
# NMT models do not have a `tokenizer` attribute, they instead have an encoder_tokenizer and decoder_tokenizer attribute.
if hasattr(cfg, "tokenizer"):
if cfg.tokenizer.get("tokenizer_model") is not None:
model.register_artifact("tokenizer.tokenizer_model", cfg.tokenizer.tokenizer_model)
if cfg.tokenizer.get("vocab_file") is not None:
model.register_artifact("tokenizer.vocab_file", cfg.tokenizer.vocab_file)
if cfg.tokenizer.get("merge_file") is not None:
model.register_artifact("tokenizer.merge_file", cfg.tokenizer.merge_file)
if hasattr(cfg, "encoder_tokenizer"):
if cfg.encoder_tokenizer.get("tokenizer_model") is not None:
model.register_artifact("encoder_tokenizer.tokenizer_model", cfg.encoder_tokenizer.tokenizer_model)
if cfg.encoder_tokenizer.get("vocab_file") is not None:
model.register_artifact("encoder_tokenizer.vocab_file", cfg.encoder_tokenizer.vocab_file)
if cfg.encoder_tokenizer.get("merge_file") is not None:
model.register_artifact("encoder_tokenizer.merge_file", cfg.encoder_tokenizer.merge_file)
if hasattr(cfg, "decoder_tokenizer"):
if cfg.decoder_tokenizer.get("tokenizer_model") is not None:
model.register_artifact("decoder_tokenizer.tokenizer_model", cfg.decoder_tokenizer.tokenizer_model)
if cfg.decoder_tokenizer.get("vocab_file") is not None:
model.register_artifact("decoder_tokenizer.vocab_file", cfg.decoder_tokenizer.vocab_file)
if cfg.decoder_tokenizer.get("merge_file") is not None:
model.register_artifact("decoder_tokenizer.merge_file", cfg.decoder_tokenizer.merge_file)
checkpoint = model
finally:
cls._set_model_restore_state(is_being_restored=False)
return checkpoint
def load_state_dict(self, state_dict: Mapping[str, Any], strict: bool = True):
# starting with trasformers v4.31.0, buffer for position_ids is persistent=False
if (
self.bert_model is not None
and "position_ids" not in self.bert_model.embeddings._modules
and "bert_model.embeddings.position_ids" in state_dict
):
del state_dict["bert_model.embeddings.position_ids"]
results = super(NLPModel, self).load_state_dict(state_dict, strict=strict)
return results
| NeMo-main | nemo/collections/nlp/models/nlp_model.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo.collections.nlp.models.duplex_text_normalization import (
DuplexDecoderModel,
DuplexTaggerModel,
DuplexTextNormalizationModel,
)
from nemo.collections.nlp.models.entity_linking.entity_linking_model import EntityLinkingModel
from nemo.collections.nlp.models.glue_benchmark.glue_benchmark_model import GLUEModel
from nemo.collections.nlp.models.information_retrieval import BertDPRModel, BertJointIRModel
from nemo.collections.nlp.models.intent_slot_classification import (
IntentSlotClassificationModel,
MultiLabelIntentSlotClassificationModel,
)
from nemo.collections.nlp.models.language_modeling import MegatronGPTPromptLearningModel
from nemo.collections.nlp.models.language_modeling.bert_lm_model import BERTLMModel
from nemo.collections.nlp.models.language_modeling.transformer_lm_model import TransformerLMModel
from nemo.collections.nlp.models.machine_translation import MTEncDecModel
from nemo.collections.nlp.models.question_answering.qa_model import QAModel
from nemo.collections.nlp.models.spellchecking_asr_customization import SpellcheckingAsrCustomizationModel
from nemo.collections.nlp.models.text2sparql.text2sparql_model import Text2SparqlModel
from nemo.collections.nlp.models.text_classification import TextClassificationModel
from nemo.collections.nlp.models.text_normalization_as_tagging import ThutmoseTaggerModel
from nemo.collections.nlp.models.token_classification import (
PunctuationCapitalizationLexicalAudioModel,
PunctuationCapitalizationModel,
TokenClassificationModel,
)
from nemo.collections.nlp.models.zero_shot_intent_recognition import ZeroShotIntentModel
| NeMo-main | nemo/collections/nlp/models/__init__.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.