python_code
stringlengths 0
229k
|
---|
import argparse
import logging
import os
import unittest
from interactive_asr.utils import setup_asr, transcribe_file
class ASRTest(unittest.TestCase):
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
arguments_dict = {
"path": "/scratch/jamarshon/downloads/model.pt",
"input_file": "/scratch/jamarshon/audio/examples/interactive_asr/data/sample.wav",
"data": "/scratch/jamarshon/downloads",
"user_dir": "/scratch/jamarshon/fairseq-py/examples/speech_recognition",
"no_progress_bar": False,
"log_interval": 1000,
"log_format": None,
"tensorboard_logdir": "",
"tbmf_wrapper": False,
"seed": 1,
"cpu": True,
"fp16": False,
"memory_efficient_fp16": False,
"fp16_init_scale": 128,
"fp16_scale_window": None,
"fp16_scale_tolerance": 0.0,
"min_loss_scale": 0.0001,
"threshold_loss_scale": None,
"criterion": "cross_entropy",
"tokenizer": None,
"bpe": None,
"optimizer": "nag",
"lr_scheduler": "fixed",
"task": "speech_recognition",
"num_workers": 0,
"skip_invalid_size_inputs_valid_test": False,
"max_tokens": 10000000,
"max_sentences": None,
"required_batch_size_multiple": 8,
"dataset_impl": None,
"gen_subset": "test",
"num_shards": 1,
"shard_id": 0,
"remove_bpe": None,
"quiet": False,
"model_overrides": "{}",
"results_path": None,
"beam": 40,
"nbest": 1,
"max_len_a": 0,
"max_len_b": 200,
"min_len": 1,
"match_source_len": False,
"no_early_stop": False,
"unnormalized": False,
"no_beamable_mm": False,
"lenpen": 1,
"unkpen": 0,
"replace_unk": None,
"sacrebleu": False,
"score_reference": False,
"prefix_size": 0,
"no_repeat_ngram_size": 0,
"sampling": False,
"sampling_topk": -1,
"sampling_topp": -1.0,
"temperature": 1.0,
"diverse_beam_groups": -1,
"diverse_beam_strength": 0.5,
"print_alignment": False,
"ctc": False,
"rnnt": False,
"kspmodel": None,
"wfstlm": None,
"rnnt_decoding_type": "greedy",
"lm_weight": 0.2,
"rnnt_len_penalty": -0.5,
"momentum": 0.99,
"weight_decay": 0.0,
"force_anneal": None,
"lr_shrink": 0.1,
"warmup_updates": 0,
}
arguments_dict["path"] = os.environ.get("ASR_MODEL_PATH", None)
arguments_dict["input_file"] = os.environ.get("ASR_INPUT_FILE", None)
arguments_dict["data"] = os.environ.get("ASR_DATA_PATH", None)
arguments_dict["user_dir"] = os.environ.get("ASR_USER_DIR", None)
args = argparse.Namespace(**arguments_dict)
def test_transcribe_file(self):
task, generator, models, sp, tgt_dict = setup_asr(self.args, self.logger)
_, transcription = transcribe_file(
self.args, task, generator, models, sp, tgt_dict
)
expected_transcription = [["THE QUICK BROWN FOX JUMPS OVER THE LAZY DOG"]]
self.assertEqual(transcription, expected_transcription, msg=str(transcription))
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/env python3
"""This is the preprocessing script for HuBERT model training.
The script includes:
- File list creation
- MFCC/HuBERT feature extraction
- KMeans clustering model training
- Pseudo-label generation
"""
import logging
from argparse import ArgumentParser, RawTextHelpFormatter
from multiprocessing import Pool
from pathlib import Path
import torch
from utils import (
create_tsv,
dump_features,
learn_kmeans,
get_km_label,
)
def _init_logger(debug=False):
message_fmt = (
"%(levelname)5s: %(funcName)10s: %(message)s" if debug else "%(message)s"
)
logging.basicConfig(
level=logging.DEBUG if debug else logging.INFO,
format=f"%(asctime)s: {message_fmt}",
)
def _parse_args():
parser = ArgumentParser(
description=__doc__,
formatter_class=RawTextHelpFormatter,
)
parser.add_argument("--debug", action="store_true", help="Enable debug log")
parser.add_argument("--dataset", default="librispeech", type=str, choices=["librispeech", "librilight"])
parser.add_argument(
"--root-dir",
type=Path,
help="The path to the directory where the directory ``LibriSpeech`` or ``LibriLight`` is stored.",
)
parser.add_argument("--num-rank", default=5, type=int)
parser.add_argument("--feat-type", default="mfcc", type=str)
parser.add_argument("--use-gpu", default=False, type=bool)
parser.add_argument(
"--exp-dir",
type=Path,
help="The directory to store the experiment outputs.",
)
parser.add_argument(
"--num-cluster",
default=100,
type=int,
help="The number of clusters for KMeans clustering.",
)
args = parser.parse_args()
return args
def main(args):
_init_logger(args.debug)
if not args.exp_dir.exists():
args.exp_dir.mkdir()
tsv_dir = args.exp_dir / "tsv"
feat_dir = args.exp_dir / args.feat_type
km_dir = args.exp_dir / "km_model"
label_dir = args.exp_dir / "label"
if args.use_gpu:
device = torch.device("cuda")
else:
device = torch.device("cpu")
# Create file lists for training and validation (optional)
create_tsv(args.root_dir, tsv_dir)
# Extract features for KMeans clustering
if not feat_dir.exists():
feat_dir.mkdir()
for split in ["train", "valid"]:
p = Pool(args.num_rank)
inputs = [(
tsv_dir / f"{args.dataset}_{split}.tsv",
feat_dir,
split,
rank,
args.num_rank,
device,
args.feat_type,
16_000,)
for rank in range(args.num_rank)
]
_ = p.starmap(dump_features, inputs)
p.close()
p.join()
# Fit KMeans clustering model
learn_kmeans(
feat_dir,
"train",
args.num_rank,
km_dir,
args.num_cluster,
)
# Predict labels for MFCC features
for split in ["train", "valid"]:
get_km_label(
feat_dir,
km_dir,
label_dir,
split,
args.num_rank,
device,
)
if __name__ == "__main__":
main(_parse_args())
|
from .common_utils import create_tsv
from .feature_utils import dump_features
from .kmeans import learn_kmeans, get_km_label
__all__ = [
"create_tsv",
"dump_features",
"learn_kmeans",
"get_km_label",
]
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# https://github.com/pytorch/fairseq/blob/265df7144c79446f5ea8d835bda6e727f54dad9d/LICENSE
import logging
from pathlib import Path
from typing import (
Tuple,
)
import joblib
import torch
from sklearn.cluster import MiniBatchKMeans
from torch import Tensor
from .common_utils import _get_feat_lens_paths, _get_model_path
_LG = logging.getLogger(__name__)
def load_feature(
feat_dir: Path,
split: str,
num_rank: int,
) -> Tuple[Tensor, Tensor]:
r"""Loading features from pre-saved `.pt` files.
Args:
feat_dir (Path): The directory that stores the feature files.
split (str): The split of data. Options: [``train``, ``valid``].
num_rank (int): The number of ranks for multi-processing in feature extraction.
Returns:
(Tensor, Tensor)
Tensor: The concatenated feature tensor of shape `(frame, feature_dim)`.
Tensor: The lengths tensor of shape `(num_utterance,)`.
"""
feats = []
lens = []
for rank in range(num_rank):
feat_path, len_path = _get_feat_lens_paths(feat_dir, split, rank, num_rank)
feat = torch.load(feat_path)
length = torch.load(len_path)
feats.append(feat)
lens.append(length)
feats = torch.cat(feats)
lens = torch.cat(lens)
return feats, lens
def learn_kmeans(
feat_dir: Path,
split: str,
num_rank: int,
km_dir: Path,
n_clusters: int,
init: str = "k-means++",
max_iter: int = 100,
batch_size: int = 10000,
tol: float = 0.0,
n_init: int = 20,
reassignment_ratio: float = 0.0,
max_no_improvement: int = 100,
) -> None:
r"""Build and train the KMeans clustering model. The model is saved in "{km_dir}/model.pt"
Args:
feat_dir (Path): The directory that stores the feature files.
split (str): The split of data. Options: [``train``, ``valid``].
num_rank (int): The number of ranks for multi-processing in feature extraction.
km_dir (Path): The directory to store the KMeans clustering model.
n_clusters (int): The number of clusters.
init (str, optional): Method for initialization. Options: [``k-means++``, ``random``].
(Default: ``k-means++``)
max_iter (int, optional): Maximum number of iterations over the complete dataset. (Default: 100)
batch_size (int, optional): Batch size for training the KMeans clustering model. (Default: 10000)
tol (float, optional): Control early stopping based on the relative center changes as measured by a smoothed,
variance-normalized of the mean center squared position changes. (Default: 0.0)
n_init (int, optional): Number of random initializations that are tried. (Default: 20)
reassignment_ratio (float, optional): Control the fraction of the maximum number of counts for a center
to be reassigned. A higher value means that low count centers are more easily reassigned. (Default: 0.0)
max_no_improvement (int, optional): Control early stopping based on the consecutive number of mini batches
that does not yield an improvement on the smoothed inertia. (Default: 100)
Returns:
None
"""
if not km_dir.exists():
km_dir.mkdir()
km_model = MiniBatchKMeans(
n_clusters=n_clusters,
init=init,
max_iter=max_iter,
batch_size=batch_size,
verbose=0,
compute_labels=False,
tol=tol,
max_no_improvement=max_no_improvement,
init_size=None,
n_init=n_init,
reassignment_ratio=reassignment_ratio,
)
feats, _ = load_feature(
feat_dir,
split,
num_rank,
)
feats = feats.numpy()
km_model.fit(feats)
km_path = _get_model_path(km_dir)
joblib.dump(km_model, km_path)
inertia = -km_model.score(feats) / len(feats)
_LG.info("Total intertia: %.5f", inertia)
_LG.info("Finished training the KMeans clustering model successfully")
class ApplyKmeans(object):
def __init__(self, km_path, device):
self.km_model = joblib.load(km_path)
self.C_np = self.km_model.cluster_centers_.transpose()
self.Cnorm_np = (self.C_np ** 2).sum(0, keepdims=True)
self.C = torch.from_numpy(self.C_np).to(device)
self.Cnorm = torch.from_numpy(self.Cnorm_np).to(device)
def __call__(self, x):
dist = (
x.pow(2).sum(1, keepdim=True)
- 2 * torch.matmul(x, self.C)
+ self.Cnorm
)
return dist.argmin(dim=1).cpu().numpy()
def get_km_label(
feat_dir: Path,
km_dir: Path,
label_dir: Path,
split: str,
num_rank: int,
device: torch.device,
) -> None:
r"""Predict the labels by the KMeans clustering model.
Args:
feat_dir (Path): The directory that stores the dumped features.
km_dir (Path): The directory that stores the KMeans model.
label_dir (Path): The directory to save the predicted labels.
split (str): The split of data. Options: [``train``, ``valid``].
num_rank (int): The number of ranks for multi-processing in feature extraction.
device (torch.device): The location to allocate for PyTorch Tensors.
Options: [``torch.device('cpu')``, torch.device('cuda')``].
Returns:
None
"""
if not label_dir.exists():
label_dir.mkdir()
km_path = _get_model_path(km_dir)
label_path = label_dir / f"label_{split}.pt"
apply_kmeans = ApplyKmeans(km_path, device)
feats, lens = load_feature(
feat_dir,
split,
num_rank,
)
feats = feats
lens = lens.long()
offset = 0
assert feats.shape[0] == lens.sum()
with open(label_path, "w") as f:
for i in range(lens.shape[0]):
feat = feats[offset:offset + lens[i]].to(device)
offset += lens[i]
label = apply_kmeans(feat).tolist()
f.write(" ".join(map(str, label)) + "\n")
_LG.info("Finished predicting labels successfully")
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# https://github.com/pytorch/fairseq/blob/265df7144c79446f5ea8d835bda6e727f54dad9d/LICENSE
import logging
from pathlib import Path
from typing import (
Tuple,
Union,
)
import torch
import torchaudio
from torch import Tensor
from .common_utils import _get_feat_lens_paths
_LG = logging.getLogger(__name__)
def get_shard_range(
num_lines: int,
num_rank: int,
rank: int
) -> Tuple[int, int]:
r"""Get the range of indices for the current rank in multi-processing.
Args:
num_lines (int): The number of lines to process.
num_rank (int): The number of ranks for multi-processing in feature extraction.
rank (int): The rank in the multi-processing.
Returns:
(int, int):
int: The start index for the current rank.
int: The end index for the current rank.
"""
assert 0 <= rank < num_rank, f"invalid rank/num_rank {rank}/{num_rank}"
assert num_lines > 0, f"Found {num_lines} files, make sure you specify the correct root directory"
start = round(num_lines / num_rank * rank)
end = round(num_lines / num_rank * (rank + 1))
_LG.info(
f"rank {rank} of {num_rank}, process {end-start} "
f"({start}-{end}) out of {num_lines}"
)
return start, end
def extract_feature(
path: str,
device: torch.device,
feature_type: str,
sample_rate: int,
) -> Tensor:
r"""Extract features for KMeans clustering and pseudo label prediction.
Args:
path (str): The file path of the audio.
device (torch.device): The location to allocate for PyTorch Tensors.
Options: [``torch.device('cpu')``, torch.device('cuda')``].
feature_type (str): The type of the desired feature. Options: [``mfcc``, ``hubert``].
sample_rate (int): The sample rate of the audio.
Returns:
Tensor: The desired feature tensor of the given audio file.
"""
waveform, sr = torchaudio.load(path)
assert sr == sample_rate
waveform = waveform[0].to(device)
if feature_type == "mfcc":
feature_extractor = torchaudio.transforms.MFCC(
sample_rate=sample_rate,
n_mfcc=13,
melkwargs={'n_fft': 400, 'hop_length': 160, 'center': False}
).to(device)
mfccs = feature_extractor(waveform) # (freq, time)
# mfccs = torchaudio.compliance.kaldi.mfcc(
# waveform=waveform,
# sample_frequency=sample_rate,
# use_energy=False,
# ) # (time, freq)
# mfccs = mfccs.transpose(0, 1) # (freq, time)
deltas = torchaudio.functional.compute_deltas(mfccs)
ddeltas = torchaudio.functional.compute_deltas(deltas)
concat = torch.cat([mfccs, deltas, ddeltas], dim=0)
concat = concat.transpose(0, 1) # (time, freq)
return concat
def dump_features(
tsv_file: Union[str, Path],
out_dir: Union[str, Path],
split: str,
rank: int,
num_rank: int,
device: torch.device,
feature_type: str = "mfcc",
sample_rate: int = 16_000,
) -> None:
r"""Dump the feature tensors given a ``.tsv`` file list. The feature and lengths tensors
will be stored under ``out_dir`` directory.
Args:
tsv_file (str or Path): The path of the tsv file.
out_dir (str or Path): The directory to store the feature tensors.
split (str): The split of data. Options: [``train``, ``valid``].
rank (int): The rank in the multi-processing.
num_rank (int): The number of ranks for multi-processing in feature extraction.
device (torch.device): The location to allocate for PyTorch Tensors.
Options: [``torch.device('cpu')``, torch.device('cuda')``].
feature_type (str, optional): The type of the desired feature. Options: [``mfcc``, ``hubert``].
(Default: ``mfcc``)
sample_rate (int, optional): The sample rate of the audio. (Default: 16000)
Returns:
None
"""
if feature_type not in ["mfcc", "hubert"]:
raise ValueError("Unexpected feature type.")
features = []
lens = []
out_dir = Path(out_dir)
feat_path, len_path = _get_feat_lens_paths(out_dir, split, rank, num_rank)
with open(tsv_file, "r") as f:
root = f.readline().rstrip()
lines = [line.rstrip() for line in f]
start, end = get_shard_range(len(lines), num_rank, rank)
lines = lines[start:end]
for line in lines:
path, nsample = line.split("\t")
path = f"{root}/{path}"
nsample = int(nsample)
feature = extract_feature(path, device, feature_type, sample_rate)
features.append(feature.cpu())
lens.append(feature.shape[0])
features = torch.cat(features)
lens = torch.Tensor(lens)
torch.save(features, feat_path)
torch.save(lens, len_path)
_LG.info(f"Finished dumping features for rank {rank} of {num_rank} successfully")
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# https://github.com/pytorch/fairseq/blob/265df7144c79446f5ea8d835bda6e727f54dad9d/LICENSE
"""
Data pre-processing: create tsv files for training (and valiation).
"""
import logging
import re
from pathlib import Path
from typing import (
Tuple,
Union,
)
import torch
import torchaudio
_LG = logging.getLogger(__name__)
def create_tsv(
root_dir: Union[str, Path],
out_dir: Union[str, Path],
dataset: str = "librispeech",
valid_percent: float = 0.01,
seed: int = 0,
extension: str = "flac",
) -> None:
"""Create file lists for training and validation.
Args:
root_dir (str or Path): The directory of the dataset.
out_dir (str or Path): The directory to store the file lists.
dataset (str, optional): The dataset to use. Options:
[``librispeech``, ``libri-light``]. (Default: ``librispeech``)
valid_percent (float, optional): The percentage of data for validation. (Default: 0.01)
seed (int): The seed for randomly selecting the validation files.
extension (str, optional): The extention of audio files. (Default: ``flac``)
Returns:
None
"""
assert valid_percent >= 0 and valid_percent <= 1.0
torch.manual_seed(seed)
root_dir = Path(root_dir)
out_dir = Path(out_dir)
if not out_dir.exists():
out_dir.mkdir()
valid_f = (
open(out_dir / f"{dataset}_valid.tsv", "w")
if valid_percent > 0
else None
)
search_pattern = ".*train.*"
with open(out_dir / f"{dataset}_train.tsv", "w") as train_f:
print(root_dir, file=train_f)
if valid_f is not None:
print(root_dir, file=valid_f)
for fname in root_dir.glob(f"**/*.{extension}"):
if re.match(search_pattern, str(fname)):
frames = torchaudio.info(fname).num_frames
dest = train_f if torch.rand(1) > valid_percent else valid_f
print(
f"{fname.relative_to(root_dir)}\t{frames}", file=dest
)
if valid_f is not None:
valid_f.close()
_LG.info("Finished creating the file lists successfully")
def _get_feat_lens_paths(
feat_dir: Path,
split: str,
rank: int,
num_rank: int
) -> Tuple[Path, Path]:
r"""Get the feature and lengths paths based on feature directory,
data split, rank, and number of ranks.
Args:
feat_dir (Path): The directory that stores the feature and lengths tensors.
split (str): The split of data. Options: [``train``, ``valid``].
rank (int): The rank in the multi-processing.
num_rank (int): The number of ranks for multi-processing in feature extraction.
Returns:
(Path, Path)
Path: The file path of the feature tensor for the current rank.
Path: The file path of the lengths tensor for the current rank.
"""
feat_path = feat_dir / f"{split}_{rank}_{num_rank}.pt"
len_path = feat_dir / f"len_{split}_{rank}_{num_rank}.pt"
return feat_path, len_path
def _get_model_path(
km_dir: Path
) -> Path:
r"""Get the file path of the KMeans clustering model
Args:
km_dir (Path): The directory to store the KMeans clustering model.
Returns:
Path: The file path of the model.
"""
return km_dir / "model.pt"
|
# *****************************************************************************
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
from typing import Tuple, Callable, List
import torch
from torch import Tensor
from torch.utils.data.dataset import random_split
from torchaudio.datasets import LJSPEECH
class SpectralNormalization(torch.nn.Module):
def forward(self, input):
return torch.log(torch.clamp(input, min=1e-5))
class InverseSpectralNormalization(torch.nn.Module):
def forward(self, input):
return torch.exp(input)
class MapMemoryCache(torch.utils.data.Dataset):
r"""Wrap a dataset so that, whenever a new item is returned, it is saved to memory.
"""
def __init__(self, dataset):
self.dataset = dataset
self._cache = [None] * len(dataset)
def __getitem__(self, n):
if self._cache[n] is not None:
return self._cache[n]
item = self.dataset[n]
self._cache[n] = item
return item
def __len__(self):
return len(self.dataset)
class Processed(torch.utils.data.Dataset):
def __init__(self, dataset, transforms, text_preprocessor):
self.dataset = dataset
self.transforms = transforms
self.text_preprocessor = text_preprocessor
def __getitem__(self, key):
item = self.dataset[key]
return self.process_datapoint(item)
def __len__(self):
return len(self.dataset)
def process_datapoint(self, item):
melspec = self.transforms(item[0])
text_norm = torch.IntTensor(self.text_preprocessor(item[2]))
return text_norm, torch.squeeze(melspec, 0)
def split_process_dataset(dataset: str,
file_path: str,
val_ratio: float,
transforms: Callable,
text_preprocessor: Callable[[str], List[int]],
) -> Tuple[torch.utils.data.Dataset, torch.utils.data.Dataset]:
"""Returns the Training and validation datasets.
Args:
dataset (str): The dataset to use. Avaliable options: [`'ljspeech'`]
file_path (str): Path to the data.
val_ratio (float): Path to the data.
transforms (callable): A function/transform that takes in a waveform and
returns a transformed waveform (mel spectrogram in this example).
text_preprocess (callable): A function that takes in a string and
returns a list of integers representing each of the symbol in the string.
Returns:
train_dataset (`torch.utils.data.Dataset`): The training set.
val_dataset (`torch.utils.data.Dataset`): The validation set.
"""
if dataset == 'ljspeech':
data = LJSPEECH(root=file_path, download=False)
val_length = int(len(data) * val_ratio)
lengths = [len(data) - val_length, val_length]
train_dataset, val_dataset = random_split(data, lengths)
else:
raise ValueError(f"Expected datasets: `ljspeech`, but found {dataset}")
train_dataset = Processed(train_dataset, transforms, text_preprocessor)
val_dataset = Processed(val_dataset, transforms, text_preprocessor)
train_dataset = MapMemoryCache(train_dataset)
val_dataset = MapMemoryCache(val_dataset)
return train_dataset, val_dataset
def text_mel_collate_fn(batch: Tuple[Tensor, Tensor],
n_frames_per_step: int = 1) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]:
"""The collate function padding and adjusting the data based on `n_frames_per_step`.
Modified from https://github.com/NVIDIA/DeepLearningExamples
Args:
batch (tuple of two tensors): the first tensor is the mel spectrogram with shape
(n_batch, n_mels, n_frames), the second tensor is the text with shape (n_batch, ).
n_frames_per_step (int, optional): The number of frames to advance every step.
Returns:
text_padded (Tensor): The input text to Tacotron2 with shape (n_batch, max of ``text_lengths``).
text_lengths (Tensor): The length of each text with shape (n_batch).
mel_specgram_padded (Tensor): The target mel spectrogram
with shape (n_batch, n_mels, max of ``mel_specgram_lengths``)
mel_specgram_lengths (Tensor): The length of each mel spectrogram with shape (n_batch).
gate_padded (Tensor): The ground truth gate output
with shape (n_batch, max of ``mel_specgram_lengths``)
"""
text_lengths, ids_sorted_decreasing = torch.sort(
torch.LongTensor([len(x[0]) for x in batch]), dim=0, descending=True)
max_input_len = text_lengths[0]
text_padded = torch.zeros((len(batch), max_input_len), dtype=torch.int64)
for i in range(len(ids_sorted_decreasing)):
text = batch[ids_sorted_decreasing[i]][0]
text_padded[i, :text.size(0)] = text
# Right zero-pad mel-spec
num_mels = batch[0][1].size(0)
max_target_len = max([x[1].size(1) for x in batch])
if max_target_len % n_frames_per_step != 0:
max_target_len += n_frames_per_step - max_target_len % n_frames_per_step
assert max_target_len % n_frames_per_step == 0
# include mel padded and gate padded
mel_specgram_padded = torch.zeros((len(batch), num_mels, max_target_len), dtype=torch.float32)
gate_padded = torch.zeros((len(batch), max_target_len), dtype=torch.float32)
mel_specgram_lengths = torch.LongTensor(len(batch))
for i in range(len(ids_sorted_decreasing)):
mel = batch[ids_sorted_decreasing[i]][1]
mel_specgram_padded[i, :, :mel.size(1)] = mel
mel_specgram_lengths[i] = mel.size(1)
gate_padded[i, mel.size(1) - 1:] = 1
return text_padded, text_lengths, mel_specgram_padded, mel_specgram_lengths, gate_padded
|
# *****************************************************************************
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
from typing import Tuple
from torch import nn, Tensor
class Tacotron2Loss(nn.Module):
"""Tacotron2 loss function modified from:
https://github.com/NVIDIA/DeepLearningExamples/blob/master/PyTorch/SpeechSynthesis/Tacotron2/tacotron2/loss_function.py
"""
def __init__(self):
super().__init__()
self.mse_loss = nn.MSELoss(reduction="mean")
self.bce_loss = nn.BCEWithLogitsLoss(reduction="mean")
def forward(
self,
model_outputs: Tuple[Tensor, Tensor, Tensor],
targets: Tuple[Tensor, Tensor],
) -> Tuple[Tensor, Tensor, Tensor]:
r"""Pass the input through the Tacotron2 loss.
The original implementation was introduced in
*Natural TTS Synthesis by Conditioning WaveNet on Mel Spectrogram Predictions*
[:footcite:`shen2018natural`].
Args:
model_outputs (tuple of three Tensors): The outputs of the
Tacotron2. These outputs should include three items:
(1) the predicted mel spectrogram before the postnet (``mel_specgram``)
with shape (batch, mel, time).
(2) predicted mel spectrogram after the postnet (``mel_specgram_postnet``)
with shape (batch, mel, time), and
(3) the stop token prediction (``gate_out``) with shape (batch, ).
targets (tuple of two Tensors): The ground truth mel spectrogram (batch, mel, time) and
stop token with shape (batch, ).
Returns:
mel_loss (Tensor): The mean MSE of the mel_specgram and ground truth mel spectrogram
with shape ``torch.Size([])``.
mel_postnet_loss (Tensor): The mean MSE of the mel_specgram_postnet and
ground truth mel spectrogram with shape ``torch.Size([])``.
gate_loss (Tensor): The mean binary cross entropy loss of
the prediction on the stop token with shape ``torch.Size([])``.
"""
mel_target, gate_target = targets[0], targets[1]
gate_target = gate_target.view(-1, 1)
mel_specgram, mel_specgram_postnet, gate_out = model_outputs
gate_out = gate_out.view(-1, 1)
mel_loss = self.mse_loss(mel_specgram, mel_target)
mel_postnet_loss = self.mse_loss(mel_specgram_postnet, mel_target)
gate_loss = self.bce_loss(gate_out, gate_target)
return mel_loss, mel_postnet_loss, gate_loss
|
# *****************************************************************************
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
import logging
import os
import shutil
from typing import List, Tuple, Callable
import torch
from torch import Tensor
def save_checkpoint(state, is_best, filename):
r"""Save the model to a temporary file first, then copy it to filename,
in case signals interrupt the torch.save() process.
"""
torch.save(state, filename)
logging.info(f"Checkpoint saved to {filename}")
if is_best:
path, best_filename = os.path.split(filename)
best_filename = os.path.join(path, "best_" + best_filename)
shutil.copyfile(filename, best_filename)
logging.info(f"Current best checkpoint saved to {best_filename}")
def pad_sequences(batch: List[Tensor]) -> Tuple[Tensor, Tensor]:
r"""Right zero-pad all one-hot text sequences to max input length.
Modified from https://github.com/NVIDIA/DeepLearningExamples.
"""
input_lengths, ids_sorted_decreasing = torch.sort(
torch.LongTensor([len(x) for x in batch]), dim=0, descending=True)
max_input_len = input_lengths[0]
text_padded = torch.LongTensor(len(batch), max_input_len)
text_padded.zero_()
for i in range(len(ids_sorted_decreasing)):
text = batch[ids_sorted_decreasing[i]]
text_padded[i, :text.size(0)] = text
return text_padded, input_lengths
def prepare_input_sequence(texts: List[str],
text_processor: Callable[[str], List[int]]) -> Tuple[Tensor, Tensor]:
d = []
for text in texts:
d.append(torch.IntTensor(text_processor(text)[:]))
text_padded, input_lengths = pad_sequences(d)
return text_padded, input_lengths
|
# *****************************************************************************
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
"""
Modified from
https://github.com/NVIDIA/DeepLearningExamples/blob/master/PyTorch/SpeechSynthesis/Tacotron2/train.py
"""
import argparse
from datetime import datetime
from functools import partial
import logging
import random
import os
from time import time
import torch
import torchaudio
import torch.multiprocessing as mp
import torch.distributed as dist
from torch.utils.tensorboard import SummaryWriter
from torch.utils.data import DataLoader
from torch.optim import Adam
from torchaudio.models import Tacotron2
from tqdm import tqdm
import matplotlib.pyplot as plt
plt.switch_backend('agg')
from datasets import text_mel_collate_fn, split_process_dataset, SpectralNormalization
from utils import save_checkpoint
from loss import Tacotron2Loss
from text.text_preprocessing import (
available_symbol_set,
available_phonemizers,
get_symbol_list,
text_to_sequence,
)
logging.basicConfig(format='%(asctime)s %(levelname)-8s %(message)s',
level=logging.INFO, datefmt='%Y-%m-%d %H:%M:%S')
logger = logging.getLogger(os.path.basename(__file__))
def parse_args(parser):
"""Parse commandline arguments."""
parser.add_argument("--dataset", default="ljspeech", choices=["ljspeech"], type=str,
help="select dataset to train with")
parser.add_argument('--logging-dir', type=str, default=None,
help='directory to save the log files')
parser.add_argument('--dataset-path', type=str, default='./',
help='path to dataset')
parser.add_argument("--val-ratio", default=0.1, type=float,
help="the ratio of waveforms for validation")
parser.add_argument('--anneal-steps', nargs='*',
help='epochs after which decrease learning rate')
parser.add_argument('--anneal-factor', type=float, choices=[0.1, 0.3], default=0.1,
help='factor for annealing learning rate')
parser.add_argument('--master-addr', default=None, type=str,
help='the address to use for distributed training')
parser.add_argument('--master-port', default=None, type=str,
help='the port to use for distributed training')
preprocessor = parser.add_argument_group('text preprocessor setup')
preprocessor.add_argument('--text-preprocessor', default='english_characters', type=str,
choices=available_symbol_set,
help='select text preprocessor to use.')
preprocessor.add_argument('--phonemizer', type=str, choices=available_phonemizers,
help='select phonemizer to use, only used when text-preprocessor is "english_phonemes"')
preprocessor.add_argument('--phonemizer-checkpoint', type=str,
help='the path or name of the checkpoint for the phonemizer, '
'only used when text-preprocessor is "english_phonemes"')
preprocessor.add_argument('--cmudict-root', default="./", type=str,
help='the root directory for storing cmudictionary files')
# training
training = parser.add_argument_group('training setup')
training.add_argument('--epochs', type=int, required=True,
help='number of total epochs to run')
training.add_argument('--checkpoint-path', type=str, default='',
help='checkpoint path. If a file exists, '
'the program will load it and resume training.')
training.add_argument('--workers', default=8, type=int,
help="number of data loading workers")
training.add_argument("--validate-and-checkpoint-freq", default=10, type=int, metavar="N",
help="validation and saving checkpoint frequency in epochs",)
training.add_argument("--logging-freq", default=10, type=int, metavar="N",
help="logging frequency in epochs")
optimization = parser.add_argument_group('optimization setup')
optimization.add_argument('--learning-rate', default=1e-3, type=float,
help='initial learing rate')
optimization.add_argument('--weight-decay', default=1e-6, type=float,
help='weight decay')
optimization.add_argument('--batch-size', default=32, type=int,
help='batch size per GPU')
optimization.add_argument('--grad-clip', default=5.0, type=float,
help='clipping gradient with maximum gradient norm value')
# model parameters
model = parser.add_argument_group('model parameters')
model.add_argument('--mask-padding', action='store_true', default=False,
help='use mask padding')
model.add_argument('--symbols-embedding-dim', default=512, type=int,
help='input embedding dimension')
# encoder
model.add_argument('--encoder-embedding-dim', default=512, type=int,
help='encoder embedding dimension')
model.add_argument('--encoder-n-convolution', default=3, type=int,
help='number of encoder convolutions')
model.add_argument('--encoder-kernel-size', default=5, type=int,
help='encoder kernel size')
# decoder
model.add_argument('--n-frames-per-step', default=1, type=int,
help='number of frames processed per step (currently only 1 is supported)')
model.add_argument('--decoder-rnn-dim', default=1024, type=int,
help='number of units in decoder LSTM')
model.add_argument('--decoder-dropout', default=0.1, type=float,
help='dropout probability for decoder LSTM')
model.add_argument('--decoder-max-step', default=2000, type=int,
help='maximum number of output mel spectrograms')
model.add_argument('--decoder-no-early-stopping', action='store_true', default=False,
help='stop decoding only when all samples are finished')
# attention model
model.add_argument('--attention-hidden-dim', default=128, type=int,
help='dimension of attention hidden representation')
model.add_argument('--attention-rnn-dim', default=1024, type=int,
help='number of units in attention LSTM')
model.add_argument('--attention-location-n-filter', default=32, type=int,
help='number of filters for location-sensitive attention')
model.add_argument('--attention-location-kernel-size', default=31, type=int,
help='kernel size for location-sensitive attention')
model.add_argument('--attention-dropout', default=0.1, type=float,
help='dropout probability for attention LSTM')
model.add_argument('--prenet-dim', default=256, type=int,
help='number of ReLU units in prenet layers')
# mel-post processing network parameters
model.add_argument('--postnet-n-convolution', default=5, type=float,
help='number of postnet convolutions')
model.add_argument('--postnet-kernel-size', default=5, type=float,
help='postnet kernel size')
model.add_argument('--postnet-embedding-dim', default=512, type=float,
help='postnet embedding dimension')
model.add_argument('--gate-threshold', default=0.5, type=float,
help='probability threshold for stop token')
# audio parameters
audio = parser.add_argument_group('audio parameters')
audio.add_argument('--sample-rate', default=22050, type=int,
help='Sampling rate')
audio.add_argument('--n-fft', default=1024, type=int,
help='Filter length for STFT')
audio.add_argument('--hop-length', default=256, type=int,
help='Hop (stride) length')
audio.add_argument('--win-length', default=1024, type=int,
help='Window length')
audio.add_argument('--n-mels', default=80, type=int,
help='')
audio.add_argument('--mel-fmin', default=0.0, type=float,
help='Minimum mel frequency')
audio.add_argument('--mel-fmax', default=8000.0, type=float,
help='Maximum mel frequency')
return parser
def adjust_learning_rate(epoch, optimizer, learning_rate,
anneal_steps, anneal_factor):
"""Adjust learning rate base on the initial setting."""
p = 0
if anneal_steps is not None:
for _, a_step in enumerate(anneal_steps):
if epoch >= int(a_step):
p = p + 1
if anneal_factor == 0.3:
lr = learning_rate * ((0.1 ** (p // 2)) * (1.0 if p % 2 == 0 else 0.3))
else:
lr = learning_rate * (anneal_factor ** p)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def to_gpu(x):
x = x.contiguous()
if torch.cuda.is_available():
x = x.cuda(non_blocking=True)
return x
def batch_to_gpu(batch):
text_padded, text_lengths, mel_specgram_padded, mel_specgram_lengths, gate_padded = batch
text_padded = to_gpu(text_padded).long()
text_lengths = to_gpu(text_lengths).long()
mel_specgram_padded = to_gpu(mel_specgram_padded).float()
gate_padded = to_gpu(gate_padded).float()
mel_specgram_lengths = to_gpu(mel_specgram_lengths).long()
x = (text_padded, text_lengths, mel_specgram_padded, mel_specgram_lengths)
y = (mel_specgram_padded, gate_padded)
return x, y
def training_step(model, train_batch, batch_idx):
(text_padded, text_lengths, mel_specgram_padded, mel_specgram_lengths), y = batch_to_gpu(train_batch)
y_pred = model(text_padded, text_lengths, mel_specgram_padded, mel_specgram_lengths)
y[0].requires_grad = False
y[1].requires_grad = False
losses = Tacotron2Loss()(y_pred[:3], y)
return losses[0] + losses[1] + losses[2], losses
def validation_step(model, val_batch, batch_idx):
(text_padded, text_lengths, mel_specgram_padded, mel_specgram_lengths), y = batch_to_gpu(val_batch)
y_pred = model(text_padded, text_lengths, mel_specgram_padded, mel_specgram_lengths)
losses = Tacotron2Loss()(y_pred[:3], y)
return losses[0] + losses[1] + losses[2], losses
def reduce_tensor(tensor, world_size):
rt = tensor.clone()
dist.all_reduce(rt, op=dist.ReduceOp.SUM)
if rt.is_floating_point():
rt = rt / world_size
else:
rt = rt // world_size
return rt
def log_additional_info(writer, model, loader, epoch):
model.eval()
data = next(iter(loader))
with torch.no_grad():
(text_padded, text_lengths, mel_specgram_padded, mel_specgram_lengths), _ = batch_to_gpu(data)
y_pred = model(text_padded, text_lengths, mel_specgram_padded, mel_specgram_lengths)
mel_out, mel_out_postnet, gate_out, alignment = y_pred
fig = plt.figure()
ax = plt.gca()
ax.imshow(mel_out[0].cpu().numpy())
writer.add_figure("trn/mel_out", fig, epoch)
fig = plt.figure()
ax = plt.gca()
ax.imshow(mel_out_postnet[0].cpu().numpy())
writer.add_figure("trn/mel_out_postnet", fig, epoch)
writer.add_image("trn/gate_out", torch.tile(gate_out[:1], (10, 1)), epoch, dataformats="HW")
writer.add_image("trn/alignment", alignment[0], epoch, dataformats="HW")
def get_datasets(args):
text_preprocessor = partial(
text_to_sequence,
symbol_list=args.text_preprocessor,
phonemizer=args.phonemizer,
checkpoint=args.phonemizer_checkpoint,
cmudict_root=args.cmudict_root,
)
transforms = torch.nn.Sequential(
torchaudio.transforms.MelSpectrogram(
sample_rate=args.sample_rate,
n_fft=args.n_fft,
win_length=args.win_length,
hop_length=args.hop_length,
f_min=args.mel_fmin,
f_max=args.mel_fmax,
n_mels=args.n_mels,
mel_scale='slaney',
normalized=False,
power=1,
norm='slaney',
),
SpectralNormalization()
)
trainset, valset = split_process_dataset(
args.dataset, args.dataset_path, args.val_ratio, transforms, text_preprocessor)
return trainset, valset
def train(rank, world_size, args):
dist.init_process_group("nccl", rank=rank, world_size=world_size)
if rank == 0 and args.logging_dir:
if not os.path.isdir(args.logging_dir):
os.makedirs(args.logging_dir)
filehandler = logging.FileHandler(os.path.join(args.logging_dir, 'train.log'))
filehandler.setLevel(logging.INFO)
logger.addHandler(filehandler)
writer = SummaryWriter(log_dir=args.logging_dir)
else:
writer = None
torch.manual_seed(0)
torch.cuda.set_device(rank)
symbols = get_symbol_list(args.text_preprocessor)
model = Tacotron2(
mask_padding=args.mask_padding,
n_mels=args.n_mels,
n_symbol=len(symbols),
n_frames_per_step=args.n_frames_per_step,
symbol_embedding_dim=args.symbols_embedding_dim,
encoder_embedding_dim=args.encoder_embedding_dim,
encoder_n_convolution=args.encoder_n_convolution,
encoder_kernel_size=args.encoder_kernel_size,
decoder_rnn_dim=args.decoder_rnn_dim,
decoder_max_step=args.decoder_max_step,
decoder_dropout=args.decoder_dropout,
decoder_early_stopping=(not args.decoder_no_early_stopping),
attention_rnn_dim=args.attention_rnn_dim,
attention_hidden_dim=args.attention_hidden_dim,
attention_location_n_filter=args.attention_location_n_filter,
attention_location_kernel_size=args.attention_location_kernel_size,
attention_dropout=args.attention_dropout,
prenet_dim=args.prenet_dim,
postnet_n_convolution=args.postnet_n_convolution,
postnet_kernel_size=args.postnet_kernel_size,
postnet_embedding_dim=args.postnet_embedding_dim,
gate_threshold=args.gate_threshold,
).cuda(rank)
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[rank])
optimizer = Adam(model.parameters(), lr=args.learning_rate)
best_loss = float("inf")
start_epoch = 0
if args.checkpoint_path and os.path.isfile(args.checkpoint_path):
logger.info(f"Checkpoint: loading '{args.checkpoint_path}'")
map_location = {'cuda:%d' % 0: 'cuda:%d' % rank}
checkpoint = torch.load(args.checkpoint_path, map_location=map_location)
start_epoch = checkpoint["epoch"]
best_loss = checkpoint["best_loss"]
model.load_state_dict(checkpoint["state_dict"])
optimizer.load_state_dict(checkpoint["optimizer"])
logger.info(
f"Checkpoint: loaded '{args.checkpoint_path}' at epoch {checkpoint['epoch']}"
)
trainset, valset = get_datasets(args)
train_sampler = torch.utils.data.distributed.DistributedSampler(
trainset,
shuffle=True,
num_replicas=world_size,
rank=rank,
)
val_sampler = torch.utils.data.distributed.DistributedSampler(
valset,
shuffle=False,
num_replicas=world_size,
rank=rank,
)
loader_params = {
"batch_size": args.batch_size,
"num_workers": args.workers,
"prefetch_factor": 1024,
'persistent_workers': True,
"shuffle": False,
"pin_memory": True,
"drop_last": False,
"collate_fn": partial(text_mel_collate_fn, n_frames_per_step=args.n_frames_per_step),
}
train_loader = DataLoader(trainset, sampler=train_sampler, **loader_params)
val_loader = DataLoader(valset, sampler=val_sampler, **loader_params)
dist.barrier()
for epoch in range(start_epoch, args.epochs):
start = time()
model.train()
trn_loss, counts = 0, 0
if rank == 0:
iterator = tqdm(enumerate(train_loader), desc=f"Epoch {epoch}", total=len(train_loader))
else:
iterator = enumerate(train_loader)
for i, batch in iterator:
adjust_learning_rate(epoch, optimizer, args.learning_rate,
args.anneal_steps, args.anneal_factor)
model.zero_grad()
loss, losses = training_step(model, batch, i)
loss.backward()
torch.nn.utils.clip_grad_norm_(
model.parameters(), args.grad_clip)
optimizer.step()
if rank == 0 and writer:
global_iters = epoch * len(train_loader)
writer.add_scalar("trn/mel_loss", losses[0], global_iters)
writer.add_scalar("trn/mel_postnet_loss", losses[1], global_iters)
writer.add_scalar("trn/gate_loss", losses[2], global_iters)
trn_loss += loss * len(batch[0])
counts += len(batch[0])
trn_loss = trn_loss / counts
trn_loss = reduce_tensor(trn_loss, world_size)
if rank == 0:
logger.info(f"[Epoch: {epoch}] time: {time()-start}; trn_loss: {trn_loss}")
if writer:
writer.add_scalar("trn_loss", trn_loss, epoch)
if ((epoch + 1) % args.validate_and_checkpoint_freq == 0) or (epoch == args.epochs - 1):
val_start_time = time()
model.eval()
val_loss, counts = 0, 0
iterator = tqdm(enumerate(val_loader), desc=f"[Rank: {rank}; Epoch: {epoch}; Eval]", total=len(val_loader))
with torch.no_grad():
for val_batch_idx, val_batch in iterator:
val_loss = val_loss + validation_step(model, val_batch, val_batch_idx)[0] * len(val_batch[0])
counts = counts + len(val_batch[0])
val_loss = val_loss / counts
val_loss = reduce_tensor(val_loss, world_size)
if rank == 0 and writer:
writer.add_scalar("val_loss", val_loss, epoch)
log_additional_info(writer, model, val_loader, epoch)
if rank == 0:
is_best = val_loss < best_loss
best_loss = min(val_loss, best_loss)
logger.info(f"[Rank: {rank}, Epoch: {epoch}; Eval] time: {time()-val_start_time}; val_loss: {val_loss}")
logger.info(f"[Epoch: {epoch}] Saving checkpoint to {args.checkpoint_path}")
save_checkpoint(
{
"epoch": epoch + 1,
"state_dict": model.state_dict(),
"best_loss": best_loss,
"optimizer": optimizer.state_dict(),
},
is_best,
args.checkpoint_path,
)
dist.destroy_process_group()
def main(args):
logger.info("Start time: {}".format(str(datetime.now())))
torch.manual_seed(0)
random.seed(0)
if args.master_addr is not None:
os.environ['MASTER_ADDR'] = args.master_addr
elif 'MASTER_ADDR' not in os.environ:
os.environ['MASTER_ADDR'] = 'localhost'
if args.master_port is not None:
os.environ['MASTER_PORT'] = args.master_port
elif 'MASTER_PORT' not in os.environ:
os.environ['MASTER_PORT'] = '17778'
device_counts = torch.cuda.device_count()
logger.info(f"# available GPUs: {device_counts}")
# download dataset is not already downloaded
if args.dataset == 'ljspeech':
if not os.path.exists(os.path.join(args.dataset_path, 'LJSpeech-1.1')):
from torchaudio.datasets import LJSPEECH
LJSPEECH(root=args.dataset_path, download=True)
if device_counts == 1:
train(0, 1, args)
else:
mp.spawn(train, args=(device_counts, args, ),
nprocs=device_counts, join=True)
logger.info(f"End time: {datetime.now()}")
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='PyTorch Tacotron 2 Training')
parser = parse_args(parser)
args, _ = parser.parse_known_args()
main(args)
|
"""
Text-to-speech pipeline using Tacotron2.
"""
from functools import partial
import argparse
import os
import random
import sys
import torch
import torchaudio
import numpy as np
from torchaudio.models import Tacotron2
from torchaudio.models import tacotron2 as pretrained_tacotron2
from utils import prepare_input_sequence
from datasets import InverseSpectralNormalization
from text.text_preprocessing import (
available_symbol_set,
available_phonemizers,
get_symbol_list,
text_to_sequence,
)
def parse_args():
r"""
Parse commandline arguments.
"""
from torchaudio.models.tacotron2 import _MODEL_CONFIG_AND_URLS as tacotron2_config_and_urls
from torchaudio.models.wavernn import _MODEL_CONFIG_AND_URLS as wavernn_config_and_urls
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
'--checkpoint-name',
type=str,
default=None,
choices=list(tacotron2_config_and_urls.keys()),
help='[string] The name of the checkpoint to load.'
)
parser.add_argument(
'--checkpoint-path',
type=str,
default=None,
help='[string] Path to the checkpoint file.'
)
parser.add_argument(
'--output-path',
type=str,
default="./audio.wav",
help='[string] Path to the output .wav file.'
)
parser.add_argument(
'--input-text',
'-i',
type=str,
default="Hello world",
help='[string] Type in something here and TTS will generate it!'
)
parser.add_argument(
'--vocoder',
default='nvidia_waveglow',
choices=['griffin_lim', 'wavernn', 'nvidia_waveglow'],
type=str,
help="Select the vocoder to use.",
)
parser.add_argument(
"--jit",
default=False,
action="store_true",
help="If used, the model and inference function is jitted."
)
preprocessor = parser.add_argument_group('text preprocessor setup')
preprocessor.add_argument(
'--text-preprocessor',
default='english_characters',
type=str,
choices=available_symbol_set,
help='select text preprocessor to use.'
)
preprocessor.add_argument(
'--phonemizer',
default="DeepPhonemizer",
type=str,
choices=available_phonemizers,
help='select phonemizer to use, only used when text-preprocessor is "english_phonemes"'
)
preprocessor.add_argument(
'--phonemizer-checkpoint',
default="./en_us_cmudict_forward.pt",
type=str,
help='the path or name of the checkpoint for the phonemizer, '
'only used when text-preprocessor is "english_phonemes"'
)
preprocessor.add_argument(
'--cmudict-root',
default="./",
type=str,
help='the root directory for storing CMU dictionary files'
)
audio = parser.add_argument_group('audio parameters')
audio.add_argument(
'--sample-rate',
default=22050,
type=int,
help='Sampling rate'
)
audio.add_argument(
'--n-fft',
default=1024,
type=int,
help='Filter length for STFT'
)
audio.add_argument(
'--n-mels',
default=80,
type=int,
help=''
)
audio.add_argument(
'--mel-fmin',
default=0.0,
type=float,
help='Minimum mel frequency'
)
audio.add_argument(
'--mel-fmax',
default=8000.0,
type=float,
help='Maximum mel frequency'
)
# parameters for WaveRNN
wavernn = parser.add_argument_group('WaveRNN parameters')
wavernn.add_argument(
'--wavernn-checkpoint-name',
default="wavernn_10k_epochs_8bits_ljspeech",
choices=list(wavernn_config_and_urls.keys()),
help="Select the WaveRNN checkpoint."
)
wavernn.add_argument(
"--wavernn-loss",
default="crossentropy",
choices=["crossentropy"],
type=str,
help="The type of loss the WaveRNN pretrained model is trained on.",
)
wavernn.add_argument(
"--wavernn-no-batch-inference",
default=False,
action="store_true",
help="Don't use batch inference for WaveRNN inference."
)
wavernn.add_argument(
"--wavernn-no-mulaw",
default=False,
action="store_true",
help="Don't use mulaw decoder to decode the signal."
)
wavernn.add_argument(
"--wavernn-batch-timesteps",
default=11000,
type=int,
help="The time steps for each batch. Only used when batch inference is used",
)
wavernn.add_argument(
"--wavernn-batch-overlap",
default=550,
type=int,
help="The overlapping time steps between batches. Only used when batch inference is used",
)
return parser
def unwrap_distributed(state_dict):
r"""torch.distributed.DistributedDataParallel wraps the model with an additional "module.".
This function unwraps this layer so that the weights can be loaded on models with a single GPU.
Args:
state_dict: Original state_dict.
Return:
unwrapped_state_dict: Unwrapped state_dict.
"""
return {k.replace('module.', ''): v for k, v in state_dict.items()}
def nvidia_waveglow_vocode(mel_specgram, device, jit=False):
waveglow = torch.hub.load('NVIDIA/DeepLearningExamples:torchhub', 'nvidia_waveglow', model_math='fp16')
waveglow = waveglow.remove_weightnorm(waveglow)
waveglow = waveglow.to(device)
waveglow.eval()
if args.jit:
raise ValueError("Vocoder option `nvidia_waveglow is not jittable.")
with torch.no_grad():
waveform = waveglow.infer(mel_specgram).cpu()
return waveform
def wavernn_vocode(mel_specgram, wavernn_checkpoint_name, wavernn_loss, wavernn_no_mulaw,
wavernn_no_batch_inference, wavernn_batch_timesteps, wavernn_batch_overlap,
device, jit):
from torchaudio.models import wavernn
sys.path.append(os.path.join(os.path.dirname(__file__), "../pipeline_wavernn"))
from wavernn_inference_wrapper import WaveRNNInferenceWrapper
from processing import NormalizeDB
wavernn_model = wavernn(wavernn_checkpoint_name).eval().to(device)
wavernn_inference_model = WaveRNNInferenceWrapper(wavernn_model)
if jit:
wavernn_inference_model = torch.jit.script(wavernn_inference_model)
# WaveRNN spectro setting for default checkpoint
# n_fft = 2048
# n_mels = 80
# win_length = 1100
# hop_length = 275
# f_min = 40
# f_max = 11025
transforms = torch.nn.Sequential(
InverseSpectralNormalization(),
NormalizeDB(min_level_db=-100, normalization=True),
)
mel_specgram = transforms(mel_specgram.cpu())
with torch.no_grad():
waveform = wavernn_inference_model(mel_specgram.to(device),
loss_name=wavernn_loss,
mulaw=(not wavernn_no_mulaw),
batched=(not wavernn_no_batch_inference),
timesteps=wavernn_batch_timesteps,
overlap=wavernn_batch_overlap,)
return waveform.unsqueeze(0)
def griffin_lim_vocode(mel_specgram, n_fft, n_mels, sample_rate, mel_fmin, mel_fmax, jit, ):
from torchaudio.transforms import GriffinLim, InverseMelScale
inv_norm = InverseSpectralNormalization()
inv_mel = InverseMelScale(
n_stft=(n_fft // 2 + 1),
n_mels=n_mels,
sample_rate=sample_rate,
f_min=mel_fmin,
f_max=mel_fmax,
mel_scale="slaney",
norm='slaney',
)
griffin_lim = GriffinLim(
n_fft=n_fft,
power=1,
hop_length=256,
win_length=1024,
)
vocoder = torch.nn.Sequential(
inv_norm,
inv_mel,
griffin_lim
)
if jit:
vocoder = torch.jit.script(vocoder)
waveform = vocoder(mel_specgram.cpu())
return waveform
def main(args):
torch.manual_seed(0)
random.seed(0)
np.random.seed(0)
device = "cuda" if torch.cuda.is_available() else "cpu"
if args.checkpoint_path is None and args.checkpoint_name is None:
raise ValueError("Either --checkpoint-path or --checkpoint-name must be specified.")
elif args.checkpoint_path is not None and args.checkpoint_name is not None:
raise ValueError("Both --checkpoint-path and --checkpoint-name are specified, "
"can only specify one.")
n_symbols = len(get_symbol_list(args.text_preprocessor))
text_preprocessor = partial(
text_to_sequence,
symbol_list=args.text_preprocessor,
phonemizer=args.phonemizer,
checkpoint=args.phonemizer_checkpoint,
cmudict_root=args.cmudict_root,
)
if args.checkpoint_path is not None:
tacotron2 = Tacotron2(n_symbol=n_symbols)
tacotron2.load_state_dict(
unwrap_distributed(torch.load(args.checkpoint_path, map_location=device)['state_dict']))
tacotron2 = tacotron2.to(device).eval()
elif args.checkpoint_name is not None:
tacotron2 = pretrained_tacotron2(args.checkpoint_name).to(device).eval()
if n_symbols != tacotron2.n_symbols:
raise ValueError("the number of symbols for text_preprocessor ({n_symbols}) "
"should match the number of symbols for the"
"pretrained tacotron2 ({tacotron2.n_symbols}).")
if args.jit:
tacotron2 = torch.jit.script(tacotron2)
sequences, lengths = prepare_input_sequence([args.input_text],
text_processor=text_preprocessor)
sequences, lengths = sequences.long().to(device), lengths.long().to(device)
with torch.no_grad():
mel_specgram, _, _ = tacotron2.infer(sequences, lengths)
if args.vocoder == "nvidia_waveglow":
waveform = nvidia_waveglow_vocode(mel_specgram=mel_specgram, device=device, jit=args.jit)
elif args.vocoder == "wavernn":
waveform = wavernn_vocode(mel_specgram=mel_specgram,
wavernn_checkpoint_name=args.wavernn_checkpoint_name,
wavernn_loss=args.wavernn_loss,
wavernn_no_mulaw=args.wavernn_no_mulaw,
wavernn_no_batch_inference=args.wavernn_no_batch_inference,
wavernn_batch_timesteps=args.wavernn_batch_timesteps,
wavernn_batch_overlap=args.wavernn_batch_overlap,
device=device,
jit=args.jit)
elif args.vocoder == "griffin_lim":
waveform = griffin_lim_vocode(mel_specgram=mel_specgram,
n_fft=args.n_fft,
n_mels=args.n_mels,
sample_rate=args.sample_rate,
mel_fmin=args.mel_fmin,
mel_fmax=args.mel_fmax,
jit=args.jit)
torchaudio.save(args.output_path, waveform, args.sample_rate)
if __name__ == "__main__":
parser = parse_args()
args, _ = parser.parse_known_args()
main(args)
|
# *****************************************************************************
# Copyright (c) 2017 Keith Ito
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# *****************************************************************************
"""
Modified from https://github.com/keithito/tacotron
"""
from typing import List, Union, Optional
import re
from unidecode import unidecode
from torchaudio.datasets import CMUDict
from .numbers import normalize_numbers
# Regular expression matching whitespace:
_whitespace_re = re.compile(r'\s+')
# List of (regular expression, replacement) pairs for abbreviations:
_abbreviations = [(re.compile('\\b%s\\.' % x[0], re.IGNORECASE), x[1]) for x in [
('mrs', 'misess'),
('mr', 'mister'),
('dr', 'doctor'),
('st', 'saint'),
('co', 'company'),
('jr', 'junior'),
('maj', 'major'),
('gen', 'general'),
('drs', 'doctors'),
('rev', 'reverend'),
('lt', 'lieutenant'),
('hon', 'honorable'),
('sgt', 'sergeant'),
('capt', 'captain'),
('esq', 'esquire'),
('ltd', 'limited'),
('col', 'colonel'),
('ft', 'fort'),
]]
_pad = '_'
_punctuation = '!\'(),.:;? '
_special = '-'
_letters = 'abcdefghijklmnopqrstuvwxyz'
symbols = [_pad] + list(_special) + list(_punctuation) + list(_letters)
_phonemizer = None
available_symbol_set = set(["english_characters", "english_phonemes"])
available_phonemizers = set(["DeepPhonemizer"])
def get_symbol_list(symbol_list: str = "english_characters",
cmudict_root: Optional[str] = "./") -> List[str]:
if symbol_list == "english_characters":
return [_pad] + list(_special) + list(_punctuation) + list(_letters)
elif symbol_list == "english_phonemes":
return [_pad] + list(_special) + list(_punctuation) + CMUDict(cmudict_root).symbols
else:
raise ValueError(f"The `symbol_list` {symbol_list} is not supported."
f"Supported `symbol_list` includes {available_symbol_set}.")
def word_to_phonemes(sent: str, phonemizer: str, checkpoint: str) -> List[str]:
if phonemizer == "DeepPhonemizer":
from dp.phonemizer import Phonemizer
global _phonemizer
_other_symbols = ''.join(list(_special) + list(_punctuation))
_phone_symbols_re = r'(\[[A-Z]+?\]|' + '[' + _other_symbols + '])' # [\[([A-Z]+?)\]|[-!'(),.:;? ]]
if _phonemizer is None:
# using a global variable so that we don't have to relode checkpoint
# everytime this function is called
_phonemizer = Phonemizer.from_checkpoint(checkpoint)
# Example:
# sent = "hello world!"
# '[HH][AH][L][OW] [W][ER][L][D]!'
sent = _phonemizer(sent, lang='en_us')
# ['[HH]', '[AH]', '[L]', '[OW]', ' ', '[W]', '[ER]', '[L]', '[D]', '!']
ret = re.findall(_phone_symbols_re, sent)
# ['HH', 'AH', 'L', 'OW', ' ', 'W', 'ER', 'L', 'D', '!']
ret = [r.replace("[", "").replace("]", "") for r in ret]
return ret
else:
raise ValueError(f"The `phonemizer` {phonemizer} is not supported. "
"Supported `symbol_list` includes `'DeepPhonemizer'`.")
def text_to_sequence(sent: str,
symbol_list: Union[str, List[str]] = "english_characters",
phonemizer: Optional[str] = "DeepPhonemizer",
checkpoint: Optional[str] = "./en_us_cmudict_forward.pt",
cmudict_root: Optional[str] = "./") -> List[int]:
r'''Converts a string of text to a sequence of IDs corresponding to the symbols in the text.
Args:
sent (str): The input sentence to convert to a sequence.
symbol_list (str or List of string, optional): When the input is a string, available options include
"english_characters" and "english_phonemes". When the input is a list of string, ``symbol_list`` will
directly be used as the symbol to encode. (Default: "english_characters")
phonemizer (str or None, optional): The phonemizer to use. Only used when ``symbol_list`` is "english_phonemes".
Available options include "DeepPhonemizer". (Default: "DeepPhonemizer")
checkpoint (str or None, optional): The path to the checkpoint of the phonemizer. Only used when
``symbol_list`` is "english_phonemes". (Default: "./en_us_cmudict_forward.pt")
cmudict_root (str or None, optional): The path to the directory where the CMUDict dataset is found or
downloaded. Only used when ``symbol_list`` is "english_phonemes". (Default: "./")
Returns:
List of integers corresponding to the symbols in the sentence.
Examples:
>>> text_to_sequence("hello world!", "english_characters")
[19, 16, 23, 23, 26, 11, 34, 26, 29, 23, 15, 2]
>>> text_to_sequence("hello world!", "english_phonemes")
[54, 20, 65, 69, 11, 92, 44, 65, 38, 2]
'''
if symbol_list == "english_phonemes":
if any(param is None for param in [phonemizer, checkpoint, cmudict_root]):
raise ValueError(
"When `symbol_list` is 'english_phonemes', "
"all of `phonemizer`, `checkpoint`, and `cmudict_root` must be provided.")
sent = unidecode(sent) # convert to ascii
sent = sent.lower() # lower case
sent = normalize_numbers(sent) # expand numbers
for regex, replacement in _abbreviations: # expand abbreviations
sent = re.sub(regex, replacement, sent)
sent = re.sub(_whitespace_re, ' ', sent) # collapse whitespace
if isinstance(symbol_list, list):
symbols = symbol_list
elif isinstance(symbol_list, str):
symbols = get_symbol_list(symbol_list, cmudict_root=cmudict_root)
if symbol_list == "english_phonemes":
sent = word_to_phonemes(sent, phonemizer=phonemizer, checkpoint=checkpoint)
_symbol_to_id = {s: i for i, s in enumerate(symbols)}
return [_symbol_to_id[s] for s in sent if s in _symbol_to_id]
|
# *****************************************************************************
# Copyright (c) 2017 Keith Ito
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# *****************************************************************************
"""
Modified from https://github.com/keithito/tacotron
"""
import inflect
import re
_inflect = inflect.engine()
_comma_number_re = re.compile(r'([0-9][0-9\,]+[0-9])')
_pounds_re = re.compile(r'£([0-9\,]*[0-9]+)')
_dollars_re = re.compile(r'\$([0-9\.\,]*[0-9]+)')
_decimal_number_re = re.compile(r'([0-9]+\.[0-9]+)')
_ordinal_re = re.compile(r'[0-9]+(st|nd|rd|th)')
_number_re = re.compile(r'[0-9]+')
def _remove_commas(text: str) -> str:
return re.sub(_comma_number_re, lambda m: m.group(1).replace(',', ''), text)
def _expand_pounds(text: str) -> str:
return re.sub(_pounds_re, r'\1 pounds', text)
def _expand_dollars_repl_fn(m):
"""The replacement function for expanding dollars."""
match = m.group(1)
parts = match.split('.')
if len(parts) > 2:
return match + ' dollars' # Unexpected format
dollars = int(parts[0]) if parts[0] else 0
if len(parts) > 1 and parts[1]:
if len(parts[1]) == 1:
# handle the case where we have one digit after the decimal point
cents = int(parts[1]) * 10
else:
cents = int(parts[1])
else:
cents = 0
if dollars and cents:
dollar_unit = 'dollar' if dollars == 1 else 'dollars'
cent_unit = 'cent' if cents == 1 else 'cents'
return '%s %s, %s %s' % (dollars, dollar_unit, cents, cent_unit)
elif dollars:
dollar_unit = 'dollar' if dollars == 1 else 'dollars'
return '%s %s' % (dollars, dollar_unit)
elif cents:
cent_unit = 'cent' if cents == 1 else 'cents'
return '%s %s' % (cents, cent_unit)
else:
return 'zero dollars'
def _expand_dollars(text: str) -> str:
return re.sub(_dollars_re, _expand_dollars_repl_fn, text)
def _expand_decimal_point(text: str) -> str:
return re.sub(_decimal_number_re, lambda m: m.group(1).replace('.', ' point '), text)
def _expand_ordinal(text: str) -> str:
return re.sub(_ordinal_re, lambda m: _inflect.number_to_words(m.group(0)), text)
def _expand_number_repl_fn(m):
"""The replacement function for expanding number."""
num = int(m.group(0))
if num > 1000 and num < 3000:
if num == 2000:
return 'two thousand'
elif num > 2000 and num < 2010:
return 'two thousand ' + _inflect.number_to_words(num % 100)
elif num % 100 == 0:
return _inflect.number_to_words(num // 100) + ' hundred'
else:
return _inflect.number_to_words(num, andword='', zero='oh', group=2).replace(', ', ' ')
else:
return _inflect.number_to_words(num, andword='')
def _expand_number(text: str) -> str:
return re.sub(_number_re, _expand_number_repl_fn, text)
def normalize_numbers(text: str) -> str:
text = _remove_commas(text)
text = _expand_pounds(text)
text = _expand_dollars(text)
text = _expand_decimal_point(text)
text = _expand_ordinal(text)
text = _expand_number(text)
return text
|
from . import utils, vad
__all__ = ['utils', 'vad']
|
#!/usr/bin/env python3
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
"""
Run inference for pre-processed data with a trained model.
"""
import datetime as dt
import logging
from fairseq import options
from interactive_asr.utils import add_asr_eval_argument, setup_asr, get_microphone_transcription, transcribe_file
def main(args):
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
task, generator, models, sp, tgt_dict = setup_asr(args, logger)
print("READY!")
if args.input_file:
transcription_time, transcription = transcribe_file(args, task, generator, models, sp, tgt_dict)
print("transcription:", transcription)
print("transcription_time:", transcription_time)
else:
for transcription in get_microphone_transcription(args, task, generator, models, sp, tgt_dict):
print(
"{}: {}".format(
dt.datetime.now().strftime("%H:%M:%S"), transcription[0][0]
)
)
def cli_main():
parser = options.get_generation_parser()
parser = add_asr_eval_argument(parser)
args = options.parse_args_and_arch(parser)
main(args)
if __name__ == "__main__":
cli_main()
|
#!/usr/bin/env python3
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import os
import sys
import time
import torch
import torchaudio
import sentencepiece as spm
from fairseq import tasks
from fairseq.utils import load_ensemble_for_inference, import_user_module
from interactive_asr.vad import get_microphone_chunks
def add_asr_eval_argument(parser):
parser.add_argument("--input_file", help="input file")
parser.add_argument("--ctc", action="store_true", help="decode a ctc model")
parser.add_argument("--rnnt", default=False, help="decode a rnnt model")
parser.add_argument("--kspmodel", default=None, help="sentence piece model")
parser.add_argument(
"--wfstlm", default=None, help="wfstlm on dictonary output units"
)
parser.add_argument(
"--rnnt_decoding_type",
default="greedy",
help="wfstlm on dictonary output units",
)
parser.add_argument(
"--lm_weight",
default=0.2,
help="weight for wfstlm while interpolating with neural score",
)
parser.add_argument(
"--rnnt_len_penalty", default=-0.5, help="rnnt length penalty on word level"
)
return parser
def check_args(args):
assert args.path is not None, "--path required for generation!"
assert (
not args.sampling or args.nbest == args.beam
), "--sampling requires --nbest to be equal to --beam"
assert (
args.replace_unk is None or args.raw_text
), "--replace-unk requires a raw text dataset (--raw-text)"
def process_predictions(args, hypos, sp, tgt_dict):
res = []
device = torch.device("cuda:0" if torch.cuda.is_available() and not args.cpu else "cpu")
for hypo in hypos[: min(len(hypos), args.nbest)]:
hyp_pieces = tgt_dict.string(hypo["tokens"].int().to(device))
hyp_words = sp.DecodePieces(hyp_pieces.split())
res.append(hyp_words)
return res
def optimize_models(args, use_cuda, models):
"""Optimize ensemble for generation
"""
for model in models:
model.make_generation_fast_(
beamable_mm_beam_size=None if args.no_beamable_mm else args.beam,
need_attn=args.print_alignment,
)
if args.fp16:
model.half()
if use_cuda:
model.cuda()
def calc_mean_invstddev(feature):
if len(feature.shape) != 2:
raise ValueError("We expect the input feature to be 2-D tensor")
mean = torch.mean(feature, dim=0)
var = torch.var(feature, dim=0)
# avoid division by ~zero
if (var < sys.float_info.epsilon).any():
return mean, 1.0 / (torch.sqrt(var) + sys.float_info.epsilon)
return mean, 1.0 / torch.sqrt(var)
def calcMN(features):
mean, invstddev = calc_mean_invstddev(features)
res = (features - mean) * invstddev
return res
def transcribe(waveform, args, task, generator, models, sp, tgt_dict):
num_features = 80
output = torchaudio.compliance.kaldi.fbank(waveform, num_mel_bins=num_features)
device = torch.device("cuda:0" if torch.cuda.is_available() and not args.cpu else "cpu")
output_cmvn = calcMN(output.to(device).detach())
# size (m, n)
source = output_cmvn
frames_lengths = torch.LongTensor([source.size(0)])
# size (1, m, n). In general, if source is (x, m, n), then hypos is (x, ...)
source.unsqueeze_(0)
sample = {"net_input": {"src_tokens": source, "src_lengths": frames_lengths}}
hypos = task.inference_step(generator, models, sample)
assert len(hypos) == 1
transcription = []
for i in range(len(hypos)):
# Process top predictions
hyp_words = process_predictions(args, hypos[i], sp, tgt_dict)
transcription.append(hyp_words)
return transcription
def setup_asr(args, logger):
check_args(args)
import_user_module(args)
if args.max_tokens is None and args.batch_size is None:
args.max_tokens = 30000
logger.info(args)
use_cuda = torch.cuda.is_available() and not args.cpu
# Load dataset splits
task = tasks.setup_task(args)
# Set dictionary
tgt_dict = task.target_dictionary
if args.ctc or args.rnnt:
tgt_dict.add_symbol("<ctc_blank>")
if args.ctc:
logger.info("| decoding a ctc model")
if args.rnnt:
logger.info("| decoding a rnnt model")
# Load ensemble
logger.info("| loading model(s) from {}".format(args.path))
models, _model_args = load_ensemble_for_inference(
args.path.split(":"),
task,
model_arg_overrides=eval(args.model_overrides), # noqa
)
optimize_models(args, use_cuda, models)
# Initialize generator
generator = task.build_generator(models, args)
sp = spm.SentencePieceProcessor()
sp.Load(os.path.join(args.data, "spm.model"))
return task, generator, models, sp, tgt_dict
def transcribe_file(args, task, generator, models, sp, tgt_dict):
path = args.input_file
if not os.path.exists(path):
raise FileNotFoundError("Audio file not found: {}".format(path))
waveform, sample_rate = torchaudio.load_wav(path)
waveform = waveform.mean(0, True)
waveform = torchaudio.transforms.Resample(
orig_freq=sample_rate, new_freq=16000
)(waveform)
start = time.time()
transcription = transcribe(
waveform, args, task, generator, models, sp, tgt_dict
)
transcription_time = time.time() - start
return transcription_time, transcription
def get_microphone_transcription(args, task, generator, models, sp, tgt_dict):
for (waveform, sample_rate) in get_microphone_chunks():
waveform = torchaudio.transforms.Resample(
orig_freq=sample_rate, new_freq=16000
)(waveform.reshape(1, -1))
transcription = transcribe(
waveform, args, task, generator, models, sp, tgt_dict
)
yield transcription
|
#!/usr/bin/env python3
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
"""
Following `a simple but efficient real-time voice activity detection algorithm
<https://www.eurasip.org/Proceedings/Eusipco/Eusipco2009/contents/papers/1569192958.pdf>`__.
There are three criteria to decide if a frame contains speech: energy, most
dominant frequency, and spectral flatness. If any two of those are higher than
a minimum plus a threshold, then the frame contains speech. In the offline
case, the list of frames is postprocessed to remove too short silence and
speech sequences. In the online case here, inertia is added before switching
from speech to silence or vice versa.
"""
from collections import deque
import numpy as np
import torch
import queue
import librosa
import pyaudio
import torchaudio
def compute_spectral_flatness(frame, epsilon=0.01):
# epsilon protects against log(0)
geometric_mean = torch.exp((frame + epsilon).log().mean(-1)) - epsilon
arithmetic_mean = frame.mean(-1)
return -10 * torch.log10(epsilon + geometric_mean / arithmetic_mean)
class VoiceActivityDetection:
def __init__(
self,
num_init_frames=30,
ignore_silent_count=4,
ignore_speech_count=1,
energy_prim_thresh=60,
frequency_prim_thresh=10,
spectral_flatness_prim_thresh=3,
verbose=False,
):
self.num_init_frames = num_init_frames
self.ignore_silent_count = ignore_silent_count
self.ignore_speech_count = ignore_speech_count
self.energy_prim_thresh = energy_prim_thresh
self.frequency_prim_thresh = frequency_prim_thresh
self.spectral_flatness_prim_thresh = spectral_flatness_prim_thresh
self.verbose = verbose
self.speech_mark = True
self.silence_mark = False
self.silent_count = 0
self.speech_count = 0
self.n = 0
if self.verbose:
self.energy_list = []
self.frequency_list = []
self.spectral_flatness_list = []
def iter(self, frame):
frame_fft = torch.rfft(frame, 1)
amplitudes = torchaudio.functional.complex_norm(frame_fft)
# Compute frame energy
energy = frame.pow(2).sum(-1)
# Most dominant frequency component
frequency = amplitudes.argmax()
# Spectral flatness measure
spectral_flatness = compute_spectral_flatness(amplitudes)
if self.verbose:
self.energy_list.append(energy)
self.frequency_list.append(frequency)
self.spectral_flatness_list.append(spectral_flatness)
if self.n == 0:
self.min_energy = energy
self.min_frequency = frequency
self.min_spectral_flatness = spectral_flatness
elif self.n < self.num_init_frames:
self.min_energy = min(energy, self.min_energy)
self.min_frequency = min(frequency, self.min_frequency)
self.min_spectral_flatness = min(
spectral_flatness, self.min_spectral_flatness
)
self.n += 1
# Add 1. to avoid log(0)
thresh_energy = self.energy_prim_thresh * torch.log(1.0 + self.min_energy)
thresh_frequency = self.frequency_prim_thresh
thresh_spectral_flatness = self.spectral_flatness_prim_thresh
# Check all three conditions
counter = 0
if energy - self.min_energy >= thresh_energy:
counter += 1
if frequency - self.min_frequency >= thresh_frequency:
counter += 1
if spectral_flatness - self.min_spectral_flatness >= thresh_spectral_flatness:
counter += 1
# Detection
if counter > 1:
# Speech detected
self.speech_count += 1
# Inertia against switching
if (
self.n >= self.num_init_frames
and self.speech_count <= self.ignore_speech_count
):
# Too soon to change
return self.silence_mark
else:
self.silent_count = 0
return self.speech_mark
else:
# Silence detected
self.min_energy = ((self.silent_count * self.min_energy) + energy) / (
self.silent_count + 1
)
self.silent_count += 1
# Inertia against switching
if (
self.n >= self.num_init_frames
and self.silent_count <= self.ignore_silent_count
):
# Too soon to change
return self.speech_mark
else:
self.speech_count = 0
return self.silence_mark
class MicrophoneStream:
"""Opens a recording stream as a generator yielding the audio chunks."""
def __init__(self, device=None, rate=22050, chunk=2205):
"""
The 22050 is the librosa default, which is what our models were
trained on. The ratio of [chunk / rate] is the amount of time between
audio samples - for example, with these defaults,
an audio fragment will be processed every tenth of a second.
"""
self._rate = rate
self._chunk = chunk
self._device = device
# Create a thread-safe buffer of audio data
self._buff = queue.Queue()
self.closed = True
def __enter__(self):
self._audio_interface = pyaudio.PyAudio()
self._audio_stream = self._audio_interface.open(
# format=pyaudio.paInt16,
format=pyaudio.paFloat32,
# The API currently only supports 1-channel (mono) audio
# https://goo.gl/z757pE
channels=1,
rate=self._rate,
input=True,
frames_per_buffer=self._chunk,
input_device_index=self._device,
# Run the audio stream asynchronously to fill the buffer object.
# This is necessary so that the input device's buffer doesn't
# overflow while the calling thread makes network requests, etc.
stream_callback=self._fill_buffer,
)
self.closed = False
return self
def __exit__(self, type, value, traceback):
self._audio_stream.stop_stream()
self._audio_stream.close()
self.closed = True
# Signal the generator to terminate so that the client's
# streaming_recognize method will not block the process termination.
self._buff.put(None)
self._audio_interface.terminate()
def _fill_buffer(self, in_data, frame_count, time_info, status_flags):
"""Continuously collect data from the audio stream, into the buffer."""
self._buff.put(in_data)
return None, pyaudio.paContinue
def generator(self):
while not self.closed:
# Use a blocking get() to ensure there's at least one chunk of
# data, and stop iteration if the chunk is None, indicating the
# end of the audio stream.
chunk = self._buff.get()
if chunk is None:
return
data = [chunk]
# Now consume whatever other data's still buffered.
while True:
try:
chunk = self._buff.get(block=False)
if chunk is None:
return
data.append(chunk)
except queue.Empty:
break
ans = np.fromstring(b"".join(data), dtype=np.float32)
# yield uniform-sized chunks
ans = np.split(ans, np.shape(ans)[0] / self._chunk)
# Resample the audio to 22050, librosa default
for chunk in ans:
yield librosa.core.resample(chunk, self._rate, 22050)
def get_microphone_chunks(
min_to_cumulate=5, # 0.5 seconds
max_to_cumulate=100, # 10 seconds
precumulate=5,
max_to_visualize=100,
):
vad = VoiceActivityDetection()
cumulated = []
precumulated = deque(maxlen=precumulate)
with MicrophoneStream() as stream:
audio_generator = stream.generator()
chunk_length = stream._chunk
waveform = torch.zeros(max_to_visualize * chunk_length)
for chunk in audio_generator:
# Is speech?
chunk = torch.tensor(chunk)
is_speech = vad.iter(chunk)
# Cumulate speech
if is_speech or cumulated:
cumulated.append(chunk)
else:
precumulated.append(chunk)
if (not is_speech and len(cumulated) >= min_to_cumulate) or (
len(cumulated) > max_to_cumulate
):
waveform = torch.cat(list(precumulated) + cumulated, -1)
yield (waveform * stream._rate, stream._rate)
cumulated = []
precumulated = deque(maxlen=precumulate)
|
#!/usr/bin/env python3
"""Launch souce separation training.
This script runs training in Distributed Data Parallel (DDP) framework and has two major
operation modes. This behavior depends on if `--worker-id` argument is given or not.
1. (`--worker-id` is not given) Launchs worker subprocesses that performs the actual training.
2. (`--worker-id` is given) Performs the training as a part of distributed training.
When launching the script without any distributed trainig parameters (operation mode 1),
this script will check the number of GPUs available on the local system and spawns the same
number of training subprocesses (as operaiton mode 2). You can reduce the number of GPUs with
`--num-workers`. If there is no GPU available, only one subprocess is launched.
When launching the script as a worker process of a distributed training, you need to configure
the coordination of the workers.
"""
import sys
import logging
import argparse
import subprocess
import torch
from utils import dist_utils
_LG = dist_utils.getLogger(__name__)
def _parse_args(args=None):
max_world_size = torch.cuda.device_count() or 1
parser = argparse.ArgumentParser(
description=__doc__,
)
parser.add_argument("--debug", action="store_true", help="Enable debug log")
group = parser.add_argument_group("Distributed Training")
group.add_argument(
"--worker-id",
type=int,
help=(
"If not provided, the launched process serves as a master process of "
"single-node, multi-worker training and spawns the worker subprocesses. "
"If provided, the launched process serves as a worker process, which "
"performs the actual training. The valid value is [0, --num-workers)."
),
)
group.add_argument(
"--device-id",
type=int,
help="The CUDA device ID. Allowed only when --worker-id is provided.",
)
group.add_argument(
"--num-workers",
type=int,
default=max_world_size,
help=(
"The size of distributed trainig workers. "
"If launching a training as single-node, multi-worker training, "
"(i.e. --worker-id is not provided) then this value should not exceed "
"the number of available GPUs. "
"If launching the training process as a multi-node, multi-gpu training, "
"(i.e. --worker-id is provided) then the value has to match "
f"the number of workers across nodes. (default: {max_world_size})"
),
)
group.add_argument(
"--sync-protocol",
type=str,
default="env://",
help=(
"Synchronization protocol for distributed training. "
"This value is passed as `init_method` argument of "
"`torch.distributed.init_process_group` function."
'If you are using `"env://"`, you can additionally configure '
'environment variables "MASTER_ADDR" and "MASTER_PORT". '
'If you are using `"file://..."`, then the process has to have '
"the access to the designated file. "
"See the documentation for `torch.distributed` for the detail. "
'If you are running the training in a single node, `"env://"` '
"should do. If you are running the training in multiple nodes, "
"you need to provide the file location where all the nodes have "
'access, using `"file://..."` protocol. (default: "env://")'
),
)
group.add_argument(
"--random-seed",
type=int,
help="Set random seed value. (default: None)",
)
parser.add_argument(
"rest", nargs=argparse.REMAINDER, help="Model-specific arguments."
)
namespace = parser.parse_args(args)
if namespace.worker_id is None:
if namespace.device_id is not None:
raise ValueError(
"`--device-id` cannot be provided when runing as master process."
)
if namespace.num_workers > max_world_size:
raise ValueError(
"--num-workers ({num_workers}) cannot exceed {device_count}."
)
if namespace.rest[:1] == ["--"]:
namespace.rest = namespace.rest[1:]
return namespace
def _main(cli_args):
args = _parse_args(cli_args)
if any(arg in ["--help", "-h"] for arg in args.rest):
_run_training(args.rest)
_init_logger(args.worker_id, args.debug)
if args.worker_id is None:
_run_training_subprocesses(args.num_workers, cli_args)
else:
dist_utils.setup_distributed(
world_size=args.num_workers,
rank=args.worker_id,
local_rank=args.device_id,
backend='nccl' if torch.cuda.is_available() else 'gloo',
init_method=args.sync_protocol,
)
if args.random_seed is not None:
torch.manual_seed(args.random_seed)
if torch.cuda.is_available():
torch.cuda.set_device(args.device_id)
_LG.info("CUDA device set to %s", args.device_id)
_run_training(args.rest)
def _run_training_subprocesses(num_workers, original_args):
workers = []
_LG.info("Spawning %s workers", num_workers)
for i in range(num_workers):
worker_arg = ["--worker-id", f"{i}", "--num-workers", f"{num_workers}"]
device_arg = ["--device-id", f"{i}"] if torch.cuda.is_available() else []
command = (
[sys.executable, "-u", sys.argv[0]]
+ worker_arg
+ device_arg
+ original_args
)
_LG.info("Launching worker %s: `%s`", i, " ".join(command))
worker = subprocess.Popen(command)
workers.append(worker)
num_failed = 0
for worker in workers:
worker.wait()
if worker.returncode != 0:
num_failed += 1
sys.exit(num_failed)
def _run_training(args):
import conv_tasnet.train
conv_tasnet.train.train(args)
def _init_logger(rank=None, debug=False):
worker_fmt = "[master]" if rank is None else f"[worker {rank:2d}]"
message_fmt = (
"%(levelname)5s: %(funcName)10s: %(message)s" if debug else "%(message)s"
)
logging.basicConfig(
level=logging.DEBUG if debug else logging.INFO,
format=f"%(asctime)s: {worker_fmt} {message_fmt}",
)
if __name__ == "__main__":
_main(sys.argv[1:])
|
#!/usr/bin/env python3
# pyre-strict
from pathlib import Path
from argparse import ArgumentParser
from typing import (
Any,
Callable,
Dict,
Mapping,
List,
Optional,
Tuple,
TypedDict,
Union,
)
import torch
import torchaudio
from pytorch_lightning import LightningModule, Trainer
from pytorch_lightning.callbacks import ModelCheckpoint, EarlyStopping
from pytorch_lightning.plugins import DDPPlugin
from torch import nn
from torch.optim.lr_scheduler import _LRScheduler
from torch.utils.data import DataLoader
from utils import metrics
from utils.dataset import utils as dataset_utils
class Batch(TypedDict):
mix: torch.Tensor # (batch, time)
src: torch.Tensor # (batch, source, time)
mask: torch.Tensor # (batch, source, time)
def sisdri_metric(
estimate: torch.Tensor,
reference: torch.Tensor,
mix: torch.Tensor,
mask: torch.Tensor
) -> torch.Tensor:
"""Compute the improvement of scale-invariant SDR. (SI-SDRi).
Args:
estimate (torch.Tensor): Estimated source signals.
Tensor of dimension (batch, speakers, time)
reference (torch.Tensor): Reference (original) source signals.
Tensor of dimension (batch, speakers, time)
mix (torch.Tensor): Mixed souce signals, from which the setimated signals were generated.
Tensor of dimension (batch, speakers == 1, time)
mask (torch.Tensor): Mask to indicate padded value (0) or valid value (1).
Tensor of dimension (batch, 1, time)
Returns:
torch.Tensor: Improved SI-SDR. Tensor of dimension (batch, )
References:
- Conv-TasNet: Surpassing Ideal Time--Frequency Magnitude Masking for Speech Separation
Luo, Yi and Mesgarani, Nima
https://arxiv.org/abs/1809.07454
"""
with torch.no_grad():
estimate = estimate - estimate.mean(axis=2, keepdim=True)
reference = reference - reference.mean(axis=2, keepdim=True)
mix = mix - mix.mean(axis=2, keepdim=True)
si_sdri = metrics.sdri(estimate, reference, mix, mask=mask)
return si_sdri.mean().item()
def sdri_metric(
estimate: torch.Tensor,
reference: torch.Tensor,
mix: torch.Tensor,
mask: torch.Tensor,
) -> torch.Tensor:
"""Compute the improvement of SDR. (SDRi).
Args:
estimate (torch.Tensor): Estimated source signals.
Tensor of dimension (batch, speakers, time)
reference (torch.Tensor): Reference (original) source signals.
Tensor of dimension (batch, speakers, time)
mix (torch.Tensor): Mixed souce signals, from which the setimated signals were generated.
Tensor of dimension (batch, speakers == 1, time)
mask (torch.Tensor): Mask to indicate padded value (0) or valid value (1).
Tensor of dimension (batch, 1, time)
Returns:
torch.Tensor: Improved SDR. Tensor of dimension (batch, )
References:
- Conv-TasNet: Surpassing Ideal Time--Frequency Magnitude Masking for Speech Separation
Luo, Yi and Mesgarani, Nima
https://arxiv.org/abs/1809.07454
"""
with torch.no_grad():
sdri = metrics.sdri(estimate, reference, mix, mask=mask)
return sdri.mean().item()
def si_sdr_loss(
estimate: torch.Tensor,
reference: torch.Tensor,
mask: torch.Tensor
) -> torch.Tensor:
"""Compute the Si-SDR loss.
Args:
estimate (torch.Tensor): Estimated source signals.
Tensor of dimension (batch, speakers, time)
reference (torch.Tensor): Reference (original) source signals.
Tensor of dimension (batch, speakers, time)
mask (torch.Tensor): Mask to indicate padded value (0) or valid value (1).
Tensor of dimension (batch, 1, time)
Returns:
torch.Tensor: Si-SDR loss. Tensor of dimension (batch, )
"""
estimate = estimate - estimate.mean(axis=2, keepdim=True)
reference = reference - reference.mean(axis=2, keepdim=True)
si_sdri = metrics.sdr_pit(estimate, reference, mask=mask)
return -si_sdri.mean()
class ConvTasNetModule(LightningModule):
"""
The Lightning Module for speech separation.
Args:
model (Any): The model to use for the classification task.
train_loader (DataLoader): the training dataloader.
val_loader (DataLoader or None): the validation dataloader.
loss (Any): The loss function to use.
optim (Any): The optimizer to use.
metrics (List of methods): The metrics to track, which will be used for both train and validation.
lr_scheduler (Any or None): The LR Scheduler.
"""
def __init__(
self,
model: Any,
train_loader: DataLoader,
val_loader: Optional[DataLoader],
loss: Any,
optim: Any,
metrics: List[Any],
lr_scheduler: Optional[Any] = None,
) -> None:
super().__init__()
self.model: nn.Module = model
self.loss: nn.Module = loss
self.optim: torch.optim.Optimizer = optim
self.lr_scheduler: Optional[_LRScheduler] = None
if lr_scheduler:
self.lr_scheduler = lr_scheduler
self.metrics: Mapping[str, Callable] = metrics
self.train_metrics: Dict = {}
self.val_metrics: Dict = {}
self.test_metrics: Dict = {}
self.save_hyperparameters()
self.train_loader = train_loader
self.val_loader = val_loader
def setup(self, stage: Optional[str] = None) -> None:
if stage == "fit":
self.train_metrics.update(self.metrics)
self.val_metrics.update(self.metrics)
else:
self.test_metrics.update(self.metrics)
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""
Forward defines the prediction/inference actions.
"""
return self.model(x)
def training_step(
self, batch: Batch, batch_idx: int, *args: Any, **kwargs: Any
) -> Dict[str, Any]:
return self._step(batch, batch_idx, "train")
def validation_step(
self, batch: Batch, batch_idx: int, *args: Any, **kwargs: Any
) -> Dict[str, Any]:
"""
Operates on a single batch of data from the validation set.
"""
return self._step(batch, batch_idx, "val")
def test_step(
self, batch: Batch, batch_idx: int, *args: Any, **kwargs: Any
) -> Optional[Dict[str, Any]]:
"""
Operates on a single batch of data from the test set.
"""
return self._step(batch, batch_idx, "test")
def _step(self, batch: Batch, batch_idx: int, phase_type: str) -> Dict[str, Any]:
"""
Common step for training, validation, and testing.
"""
mix, src, mask = batch
pred = self.model(mix)
loss = self.loss(pred, src, mask)
self.log(f"Losses/{phase_type}_loss", loss.item(), on_step=True, on_epoch=True)
metrics_result = self._compute_metrics(pred, src, mix, mask, phase_type)
self.log_dict(metrics_result, on_epoch=True)
return loss
def configure_optimizers(
self,
) -> Tuple[Any]:
lr_scheduler = self.lr_scheduler
if not lr_scheduler:
return self.optim
epoch_schedulers = {
'scheduler': lr_scheduler,
'monitor': 'Losses/val_loss',
'interval': 'epoch'
}
return [self.optim], [epoch_schedulers]
def _compute_metrics(
self,
pred: torch.Tensor,
label: torch.Tensor,
inputs: torch.Tensor,
mask: torch.Tensor,
phase_type: str,
) -> Dict[str, torch.Tensor]:
metrics_dict = getattr(self, f"{phase_type}_metrics")
metrics_result = {}
for name, metric in metrics_dict.items():
metrics_result[f"Metrics/{phase_type}/{name}"] = metric(pred, label, inputs, mask)
return metrics_result
def train_dataloader(self):
"""Training dataloader"""
return self.train_loader
def val_dataloader(self):
"""Validation dataloader"""
return self.val_loader
def _get_model(
num_sources,
enc_kernel_size=16,
enc_num_feats=512,
msk_kernel_size=3,
msk_num_feats=128,
msk_num_hidden_feats=512,
msk_num_layers=8,
msk_num_stacks=3,
msk_activate="relu",
):
model = torchaudio.models.ConvTasNet(
num_sources=num_sources,
enc_kernel_size=enc_kernel_size,
enc_num_feats=enc_num_feats,
msk_kernel_size=msk_kernel_size,
msk_num_feats=msk_num_feats,
msk_num_hidden_feats=msk_num_hidden_feats,
msk_num_layers=msk_num_layers,
msk_num_stacks=msk_num_stacks,
msk_activate=msk_activate,
)
return model
def _get_dataloader(
dataset_type: str,
root_dir: Union[str, Path],
num_speakers: int = 2,
sample_rate: int = 8000,
batch_size: int = 6,
num_workers: int = 4,
librimix_task: Optional[str] = None,
librimix_tr_split: Optional[str] = None,
) -> Tuple[DataLoader]:
"""Get dataloaders for training, validation, and testing.
Args:
dataset_type (str): the dataset to use.
root_dir (str or Path): the root directory of the dataset.
num_speakers (int, optional): the number of speakers in the mixture. (Default: 2)
sample_rate (int, optional): the sample rate of the audio. (Default: 8000)
batch_size (int, optional): the batch size of the dataset. (Default: 6)
num_workers (int, optional): the number of workers for each dataloader. (Default: 4)
librimix_task (str or None, optional): the task in LibriMix dataset.
librimix_tr_split (str or None, optional): the training split in LibriMix dataset.
Returns:
tuple: (train_loader, valid_loader, eval_loader)
"""
train_dataset, valid_dataset, eval_dataset = dataset_utils.get_dataset(
dataset_type, root_dir, num_speakers, sample_rate, librimix_task, librimix_tr_split
)
train_collate_fn = dataset_utils.get_collate_fn(
dataset_type, mode='train', sample_rate=sample_rate, duration=3
)
test_collate_fn = dataset_utils.get_collate_fn(dataset_type, mode='test', sample_rate=sample_rate)
train_loader = DataLoader(
train_dataset,
batch_size=batch_size,
shuffle=True,
collate_fn=train_collate_fn,
num_workers=num_workers,
drop_last=True,
)
valid_loader = DataLoader(
valid_dataset,
batch_size=batch_size,
collate_fn=test_collate_fn,
num_workers=num_workers,
drop_last=True,
)
eval_loader = DataLoader(
eval_dataset,
batch_size=batch_size,
collate_fn=test_collate_fn,
num_workers=num_workers,
)
return train_loader, valid_loader, eval_loader
def cli_main():
parser = ArgumentParser()
parser.add_argument("--batch-size", default=6, type=int)
parser.add_argument("--dataset", default="librimix", type=str, choices=["wsj0-mix", "librimix"])
parser.add_argument(
"--root-dir",
type=Path,
help="The path to the directory where the directory ``Libri2Mix`` or ``Libri3Mix`` is stored.",
)
parser.add_argument(
"--librimix-tr-split",
default="train-360",
choices=["train-360", "train-100"],
help="The training partition of librimix dataset. (default: ``train-360``)",
)
parser.add_argument(
"--librimix-task",
default="sep_clean",
type=str,
choices=["sep_clean", "sep_noisy", "enh_single", "enh_both"],
help="The task to perform (separation or enhancement, noisy or clean). (default: ``sep_clean``)",
)
parser.add_argument(
"--num-speakers", default=2, type=int, help="The number of speakers in the mixture. (default: 2)"
)
parser.add_argument(
"--sample-rate",
default=8000,
type=int,
help="Sample rate of audio files in the given dataset. (default: 8000)",
)
parser.add_argument(
"--exp-dir",
default=Path("./exp"),
type=Path,
help="The directory to save checkpoints and logs."
)
parser.add_argument(
"--epochs",
metavar="NUM_EPOCHS",
default=200,
type=int,
help="The number of epochs to train. (default: 200)",
)
parser.add_argument(
"--learning-rate",
default=1e-3,
type=float,
help="Initial learning rate. (default: 1e-3)",
)
parser.add_argument(
"--num-gpu",
default=1,
type=int,
help="The number of GPUs for training. (default: 1)",
)
parser.add_argument(
"--num-node",
default=1,
type=int,
help="The number of nodes in the cluster for training. (default: 1)",
)
parser.add_argument(
"--num-workers",
default=4,
type=int,
help="The number of workers for dataloader. (default: 4)",
)
args = parser.parse_args()
model = _get_model(num_sources=args.num_speakers)
optimizer = torch.optim.Adam(model.parameters(), lr=args.learning_rate)
lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
optimizer, mode="min", factor=0.5, patience=5
)
train_loader, valid_loader, eval_loader = _get_dataloader(
args.dataset,
args.root_dir,
args.num_speakers,
args.sample_rate,
args.batch_size,
args.num_workers,
args.librimix_task,
args.librimix_tr_split,
)
loss = si_sdr_loss
metric_dict = {
"sdri": sdri_metric,
"sisdri": sisdri_metric,
}
model = ConvTasNetModule(
model=model,
train_loader=train_loader,
val_loader=valid_loader,
loss=loss,
optim=optimizer,
metrics=metric_dict,
lr_scheduler=lr_scheduler,
)
checkpoint_dir = args.exp_dir / "checkpoints"
checkpoint = ModelCheckpoint(
checkpoint_dir,
monitor="Losses/val_loss",
mode="min",
save_top_k=5,
save_weights_only=True,
verbose=True
)
callbacks = [
checkpoint,
EarlyStopping(monitor="Losses/val_loss", mode="min", patience=30, verbose=True),
]
trainer = Trainer(
default_root_dir=args.exp_dir,
max_epochs=args.epochs,
gpus=args.num_gpu,
num_nodes=args.num_node,
accelerator="ddp",
plugins=DDPPlugin(find_unused_parameters=False), # make sure there is no unused params
limit_train_batches=1.0, # Useful for fast experiment
gradient_clip_val=5.0,
callbacks=callbacks,
)
trainer.fit(model)
model.load_from_checkpoint(checkpoint.best_model_path)
state_dict = torch.load(checkpoint.best_model_path, map_location="cpu")
state_dict = {k.replace("model.", ""): v for k, v in state_dict["state_dict"].items()}
torch.save(state_dict, args.exp_dir / "best_model.pth")
trainer.test(model, eval_loader)
if __name__ == "__main__":
cli_main()
|
from argparse import ArgumentParser
from pathlib import Path
from lightning_train import _get_model, _get_dataloader, sisdri_metric
import mir_eval
import torch
def _eval(model, data_loader, device):
results = torch.zeros(4)
with torch.no_grad():
for _, batch in enumerate(data_loader):
mix, src, mask = batch
mix, src, mask = mix.to(device), src.to(device), mask.to(device)
est = model(mix)
sisdri = sisdri_metric(est, src, mix, mask)
src = src.cpu().detach().numpy()
est = est.cpu().detach().numpy()
mix = mix.repeat(1, src.shape[1], 1).cpu().detach().numpy()
sdr, sir, sar, _ = mir_eval.separation.bss_eval_sources(src[0], est[0])
sdr_mix, sir_mix, sar_mix, _ = mir_eval.separation.bss_eval_sources(src[0], mix[0])
results += torch.tensor([
sdr.mean() - sdr_mix.mean(),
sisdri,
sir.mean() - sir_mix.mean(),
sar.mean() - sar_mix.mean()
])
results /= len(data_loader)
print("SDR improvement: ", results[0].item())
print("Si-SDR improvement: ", results[1].item())
print("SIR improvement: ", results[2].item())
print("SAR improvement: ", results[3].item())
def cli_main():
parser = ArgumentParser()
parser.add_argument("--dataset", default="librimix", type=str, choices=["wsj0-mix", "librimix"])
parser.add_argument(
"--root-dir",
type=Path,
help="The path to the directory where the directory ``Libri2Mix`` or ``Libri3Mix`` is stored.",
)
parser.add_argument(
"--librimix-tr-split",
default="train-360",
choices=["train-360", "train-100"],
help="The training partition of librimix dataset. (default: ``train-360``)",
)
parser.add_argument(
"--librimix-task",
default="sep_clean",
type=str,
choices=["sep_clean", "sep_noisy", "enh_single", "enh_both"],
help="The task to perform (separation or enhancement, noisy or clean). (default: ``sep_clean``)",
)
parser.add_argument(
"--num-speakers", default=2, type=int, help="The number of speakers in the mixture. (default: 2)"
)
parser.add_argument(
"--sample-rate",
default=8000,
type=int,
help="Sample rate of audio files in the given dataset. (default: 8000)",
)
parser.add_argument(
"--exp-dir",
default=Path("./exp"),
type=Path,
help="The directory to save checkpoints and logs."
)
parser.add_argument(
"--gpu-device",
default=-1,
type=int,
help="The gpu device for model inference. (default: -1)"
)
args = parser.parse_args()
model = _get_model(num_sources=2)
state_dict = torch.load(args.exp_dir / 'best_model.pth')
model.load_state_dict(state_dict)
if args.gpu_device != -1:
device = torch.device('cuda:' + str(args.gpu_device))
else:
device = torch.device('cpu')
model = model.to(device)
_, _, eval_loader = _get_dataloader(
args.dataset,
args.data_dir,
args.num_speakers,
args.sample_rate,
1, # batch size is set to 1 to avoid masking
0, # set num_workers to 0
args.librimix_task,
args.librimix_tr_split,
)
_eval(model, eval_loader, device)
if __name__ == "__main__":
cli_main()
|
import math
from typing import Optional
from itertools import permutations
import torch
def sdr(
estimate: torch.Tensor,
reference: torch.Tensor,
mask: Optional[torch.Tensor] = None,
epsilon: float = 1e-8
) -> torch.Tensor:
"""Computes source-to-distortion ratio.
1. scale the reference signal with power(s_est * s_ref) / powr(s_ref * s_ref)
2. compute SNR between adjusted estimate and reference.
Args:
estimate (torch.Tensor): Estimtaed signal.
Shape: [batch, speakers (can be 1), time frame]
reference (torch.Tensor): Reference signal.
Shape: [batch, speakers, time frame]
mask (torch.Tensor or None, optional): Binary mask to indicate padded value (0) or valid value (1).
Shape: [batch, 1, time frame]
epsilon (float, optional): constant value used to stabilize division.
Returns:
torch.Tensor: scale-invariant source-to-distortion ratio.
Shape: [batch, speaker]
References:
- Single-channel multi-speaker separation using deep clustering
Y. Isik, J. Le Roux, Z. Chen, S. Watanabe, and J. R. Hershey,
- Conv-TasNet: Surpassing Ideal Time--Frequency Magnitude Masking for Speech Separation
Luo, Yi and Mesgarani, Nima
https://arxiv.org/abs/1809.07454
Notes:
This function is tested to produce the exact same result as
https://github.com/naplab/Conv-TasNet/blob/e66d82a8f956a69749ec8a4ae382217faa097c5c/utility/sdr.py#L34-L56
"""
reference_pow = reference.pow(2).mean(axis=2, keepdim=True)
mix_pow = (estimate * reference).mean(axis=2, keepdim=True)
scale = mix_pow / (reference_pow + epsilon)
reference = scale * reference
error = estimate - reference
reference_pow = reference.pow(2)
error_pow = error.pow(2)
if mask is None:
reference_pow = reference_pow.mean(axis=2)
error_pow = error_pow.mean(axis=2)
else:
denom = mask.sum(axis=2)
reference_pow = (mask * reference_pow).sum(axis=2) / denom
error_pow = (mask * error_pow).sum(axis=2) / denom
return 10 * torch.log10(reference_pow) - 10 * torch.log10(error_pow)
class PIT(torch.nn.Module):
"""Applies utterance-level speaker permutation
Computes the maxium possible value of the given utility function
over the permutations of the speakers.
Args:
utility_func (function):
Function that computes the utility (opposite of loss) with signature of
(extimate: torch.Tensor, reference: torch.Tensor) -> torch.Tensor
where input Tensors are shape of [batch, speakers, frame] and
the output Tensor is shape of [batch, speakers].
References:
- Multi-talker Speech Separation with Utterance-level Permutation Invariant Training of
Deep Recurrent Neural Networks
Morten Kolbæk, Dong Yu, Zheng-Hua Tan and Jesper Jensen
https://arxiv.org/abs/1703.06284
"""
def __init__(self, utility_func):
super().__init__()
self.utility_func = utility_func
def forward(
self,
estimate: torch.Tensor,
reference: torch.Tensor,
mask: Optional[torch.Tensor] = None,
epsilon: float = 1e-8
) -> torch.Tensor:
"""Compute utterance-level PIT Loss
Args:
estimate (torch.Tensor): Estimated source signals.
Shape: [bacth, speakers, time frame]
reference (torch.Tensor): Reference (original) source signals.
Shape: [batch, speakers, time frame]
mask (torch.Tensor or None, optional): Binary mask to indicate padded value (0) or valid value (1).
Shape: [batch, 1, time frame]
epsilon (float, optional): constant value used to stabilize division.
Returns:
torch.Tensor: Maximum criterion over the speaker permutation.
Shape: [batch, ]
"""
assert estimate.shape == reference.shape
batch_size, num_speakers = reference.shape[:2]
num_permute = math.factorial(num_speakers)
util_mat = torch.zeros(
batch_size, num_permute, dtype=estimate.dtype, device=estimate.device
)
for i, idx in enumerate(permutations(range(num_speakers))):
util = self.utility_func(estimate, reference[:, idx, :], mask=mask, epsilon=epsilon)
util_mat[:, i] = util.mean(dim=1) # take the average over speaker dimension
return util_mat.max(dim=1).values
_sdr_pit = PIT(utility_func=sdr)
def sdr_pit(
estimate: torch.Tensor,
reference: torch.Tensor,
mask: Optional[torch.Tensor] = None,
epsilon: float = 1e-8):
"""Computes scale-invariant source-to-distortion ratio.
1. adjust both estimate and reference to have 0-mean
2. scale the reference signal with power(s_est * s_ref) / powr(s_ref * s_ref)
3. compute SNR between adjusted estimate and reference.
Args:
estimate (torch.Tensor): Estimtaed signal.
Shape: [batch, speakers (can be 1), time frame]
reference (torch.Tensor): Reference signal.
Shape: [batch, speakers, time frame]
mask (torch.Tensor or None, optional): Binary mask to indicate padded value (0) or valid value (1).
Shape: [batch, 1, time frame]
epsilon (float, optional): constant value used to stabilize division.
Returns:
torch.Tensor: scale-invariant source-to-distortion ratio.
Shape: [batch, speaker]
References:
- Single-channel multi-speaker separation using deep clustering
Y. Isik, J. Le Roux, Z. Chen, S. Watanabe, and J. R. Hershey,
- Conv-TasNet: Surpassing Ideal Time--Frequency Magnitude Masking for Speech Separation
Luo, Yi and Mesgarani, Nima
https://arxiv.org/abs/1809.07454
Notes:
This function is tested to produce the exact same result as the reference implementation,
*when the inputs have 0-mean*
https://github.com/naplab/Conv-TasNet/blob/e66d82a8f956a69749ec8a4ae382217faa097c5c/utility/sdr.py#L107-L153
"""
return _sdr_pit(estimate, reference, mask, epsilon)
def sdri(
estimate: torch.Tensor,
reference: torch.Tensor,
mix: torch.Tensor,
mask: Optional[torch.Tensor] = None,
epsilon: float = 1e-8,
) -> torch.Tensor:
"""Compute the improvement of SDR (SDRi).
This function compute how much SDR is improved if the estimation is changed from
the original mixture signal to the actual estimated source signals. That is,
``SDR(estimate, reference) - SDR(mix, reference)``.
For computing ``SDR(estimate, reference)``, PIT (permutation invariant training) is applied,
so that best combination of sources between the reference signals and the esimate signals
are picked.
Args:
estimate (torch.Tensor): Estimated source signals.
Shape: [batch, speakers, time frame]
reference (torch.Tensor): Reference (original) source signals.
Shape: [batch, speakers, time frame]
mix (torch.Tensor): Mixed souce signals, from which the setimated signals were generated.
Shape: [batch, speakers == 1, time frame]
mask (torch.Tensor or None, optional): Binary mask to indicate padded value (0) or valid value (1).
Shape: [batch, 1, time frame]
epsilon (float, optional): constant value used to stabilize division.
Returns:
torch.Tensor: Improved SDR. Shape: [batch, ]
References:
- Conv-TasNet: Surpassing Ideal Time--Frequency Magnitude Masking for Speech Separation
Luo, Yi and Mesgarani, Nima
https://arxiv.org/abs/1809.07454
"""
sdr_ = sdr_pit(estimate, reference, mask=mask, epsilon=epsilon) # [batch, ]
base_sdr = sdr(mix, reference, mask=mask, epsilon=epsilon) # [batch, speaker]
return sdr_ - base_sdr.mean(dim=1)
|
from . import (
dataset,
dist_utils,
metrics,
)
__all__ = ['dataset', 'dist_utils', 'metrics']
|
import os
import csv
import types
import logging
import torch
import torch.distributed as dist
def _info_on_master(self, *args, **kwargs):
if dist.get_rank() == 0:
self.info(*args, **kwargs)
def getLogger(name):
"""Get logging.Logger module with additional ``info_on_master`` method."""
logger = logging.getLogger(name)
logger.info_on_master = types.MethodType(_info_on_master, logger)
return logger
_LG = getLogger(__name__)
def setup_distributed(
world_size, rank, local_rank, backend="nccl", init_method="env://"
):
"""Perform env setup and initialization for distributed training"""
if init_method == "env://":
_set_env_vars(world_size, rank, local_rank)
if world_size > 1 and "OMP_NUM_THREADS" not in os.environ:
_LG.info("Setting OMP_NUM_THREADS == 1")
os.environ["OMP_NUM_THREADS"] = "1"
params = {
"backend": backend,
"init_method": init_method,
"world_size": world_size,
"rank": rank,
}
_LG.info("Initializing distributed process group with %s", params)
dist.init_process_group(**params)
_LG.info("Initialized distributed process group.")
def _set_env_vars(world_size, rank, local_rank):
for key, default in [("MASTER_ADDR", "127.0.0.1"), ("MASTER_PORT", "29500")]:
if key not in os.environ:
os.environ[key] = default
os.environ["WORLD_SIZE"] = str(world_size)
os.environ["RANK"] = str(rank)
os.environ["LOCAL_RANK"] = str(local_rank)
def save_on_master(path, obj):
if dist.get_rank() == 0:
_LG.info("Saving %s", path)
torch.save(obj, path)
def write_csv_on_master(path, *rows):
if dist.get_rank() == 0:
with open(path, "a", newline="") as fileobj:
writer = csv.writer(fileobj)
for row in rows:
writer.writerow(row)
def synchronize_params(path, device, *modules):
if dist.get_world_size() < 2:
return
rank = dist.get_rank()
if rank == 0:
_LG.info("[Parameter Sync]: Saving parameters to a temp file...")
torch.save({f"{i}": m.state_dict() for i, m in enumerate(modules)}, path)
dist.barrier()
if rank != 0:
_LG.info("[Parameter Sync]: Loading parameters...")
data = torch.load(path, map_location=device)
for i, m in enumerate(modules):
m.load_state_dict(data[f"{i}"])
dist.barrier()
if rank == 0:
_LG.info("[Parameter Sync]: Removing the temp file...")
os.remove(path)
_LG.info_on_master("[Parameter Sync]: Complete.")
|
from . import utils, wsj0mix
__all__ = ['utils', 'wsj0mix']
|
from typing import List
from functools import partial
from collections import namedtuple
from torchaudio.datasets import LibriMix
import torch
from . import wsj0mix
Batch = namedtuple("Batch", ["mix", "src", "mask"])
def get_dataset(dataset_type, root_dir, num_speakers, sample_rate, task=None, librimix_tr_split=None):
if dataset_type == "wsj0mix":
train = wsj0mix.WSJ0Mix(root_dir / "tr", num_speakers, sample_rate)
validation = wsj0mix.WSJ0Mix(root_dir / "cv", num_speakers, sample_rate)
evaluation = wsj0mix.WSJ0Mix(root_dir / "tt", num_speakers, sample_rate)
elif dataset_type == "librimix":
train = LibriMix(root_dir, librimix_tr_split, num_speakers, sample_rate, task)
validation = LibriMix(root_dir, "dev", num_speakers, sample_rate, task)
evaluation = LibriMix(root_dir, "test", num_speakers, sample_rate, task)
else:
raise ValueError(f"Unexpected dataset: {dataset_type}")
return train, validation, evaluation
def _fix_num_frames(sample: wsj0mix.SampleType, target_num_frames: int, sample_rate: int, random_start=False):
"""Ensure waveform has exact number of frames by slicing or padding"""
mix = sample[1] # [1, time]
src = torch.cat(sample[2], 0) # [num_sources, time]
num_channels, num_frames = src.shape
num_seconds = torch.div(num_frames, sample_rate, rounding_mode='floor')
target_seconds = torch.div(target_num_frames, sample_rate, rounding_mode='floor')
if num_frames >= target_num_frames:
if random_start and num_frames > target_num_frames:
start_frame = torch.randint(num_seconds - target_seconds + 1, [1]) * sample_rate
mix = mix[:, start_frame:]
src = src[:, start_frame:]
mix = mix[:, :target_num_frames]
src = src[:, :target_num_frames]
mask = torch.ones_like(mix)
else:
num_padding = target_num_frames - num_frames
pad = torch.zeros([1, num_padding], dtype=mix.dtype, device=mix.device)
mix = torch.cat([mix, pad], 1)
src = torch.cat([src, pad.expand(num_channels, -1)], 1)
mask = torch.ones_like(mix)
mask[..., num_frames:] = 0
return mix, src, mask
def collate_fn_wsj0mix_train(samples: List[wsj0mix.SampleType], sample_rate, duration):
target_num_frames = int(duration * sample_rate)
mixes, srcs, masks = [], [], []
for sample in samples:
mix, src, mask = _fix_num_frames(sample, target_num_frames, sample_rate, random_start=True)
mixes.append(mix)
srcs.append(src)
masks.append(mask)
return Batch(torch.stack(mixes, 0), torch.stack(srcs, 0), torch.stack(masks, 0))
def collate_fn_wsj0mix_test(samples: List[wsj0mix.SampleType], sample_rate):
max_num_frames = max(s[1].shape[-1] for s in samples)
mixes, srcs, masks = [], [], []
for sample in samples:
mix, src, mask = _fix_num_frames(sample, max_num_frames, sample_rate, random_start=False)
mixes.append(mix)
srcs.append(src)
masks.append(mask)
return Batch(torch.stack(mixes, 0), torch.stack(srcs, 0), torch.stack(masks, 0))
def get_collate_fn(dataset_type, mode, sample_rate=None, duration=4):
assert mode in ["train", "test"]
if dataset_type in ["wsj0mix", "librimix"]:
if mode == 'train':
if sample_rate is None:
raise ValueError("sample_rate is not given.")
return partial(collate_fn_wsj0mix_train, sample_rate=sample_rate, duration=duration)
return partial(collate_fn_wsj0mix_test, sample_rate=sample_rate)
raise ValueError(f"Unexpected dataset: {dataset_type}")
|
from pathlib import Path
from typing import Union, Tuple, List
import torch
from torch.utils.data import Dataset
import torchaudio
SampleType = Tuple[int, torch.Tensor, List[torch.Tensor]]
class WSJ0Mix(Dataset):
"""Create a Dataset for wsj0-mix.
Args:
root (str or Path): Path to the directory where the dataset is found.
num_speakers (int): The number of speakers, which determines the directories
to traverse. The Dataset will traverse ``s1`` to ``sN`` directories to collect
N source audios.
sample_rate (int): Expected sample rate of audio files. If any of the audio has a
different sample rate, raises ``ValueError``.
audio_ext (str, optional): The extension of audio files to find. (default: ".wav")
"""
def __init__(
self,
root: Union[str, Path],
num_speakers: int,
sample_rate: int,
audio_ext: str = ".wav",
):
self.root = Path(root)
self.sample_rate = sample_rate
self.mix_dir = (self.root / "mix").resolve()
self.src_dirs = [(self.root / f"s{i+1}").resolve() for i in range(num_speakers)]
self.files = [p.name for p in self.mix_dir.glob(f"*{audio_ext}")]
self.files.sort()
def _load_audio(self, path) -> torch.Tensor:
waveform, sample_rate = torchaudio.load(path)
if sample_rate != self.sample_rate:
raise ValueError(
f"The dataset contains audio file of sample rate {sample_rate}, "
f"but the requested sample rate is {self.sample_rate}."
)
return waveform
def _load_sample(self, filename) -> SampleType:
mixed = self._load_audio(str(self.mix_dir / filename))
srcs = []
for i, dir_ in enumerate(self.src_dirs):
src = self._load_audio(str(dir_ / filename))
if mixed.shape != src.shape:
raise ValueError(
f"Different waveform shapes. mixed: {mixed.shape}, src[{i}]: {src.shape}"
)
srcs.append(src)
return self.sample_rate, mixed, srcs
def __len__(self) -> int:
return len(self.files)
def __getitem__(self, key: int) -> SampleType:
"""Load the n-th sample from the dataset.
Args:
key (int): The index of the sample to be loaded
Returns:
tuple: ``(sample_rate, mix_waveform, list_of_source_waveforms)``
"""
return self._load_sample(self.files[key])
|
from . import (
train,
trainer
)
__all__ = ['train', 'trainer']
|
#!/usr/bin/env python3
"""Train Conv-TasNet"""
import time
import pathlib
import argparse
import torch
import torchaudio
import torchaudio.models
import conv_tasnet
from utils import dist_utils
from utils.dataset import utils as dataset_utils
_LG = dist_utils.getLogger(__name__)
def _parse_args(args):
parser = argparse.ArgumentParser(description=__doc__,)
parser.add_argument(
"--debug",
action="store_true",
help="Enable debug behavior. Each epoch will end with just one batch.")
group = parser.add_argument_group("Model Options")
group.add_argument(
"--num-speakers", required=True, type=int, help="The number of speakers."
)
group = parser.add_argument_group("Dataset Options")
group.add_argument(
"--sample-rate",
required=True,
type=int,
help="Sample rate of audio files in the given dataset.",
)
group.add_argument(
"--dataset",
default="wsj0mix",
choices=["wsj0mix"],
help='Dataset type. (default: "wsj0mix")',
)
group.add_argument(
"--dataset-dir",
required=True,
type=pathlib.Path,
help=(
"Directory where dataset is found. "
'If the dataset type is "wsj9mix", then this is the directory where '
'"cv", "tt" and "tr" subdirectories are found.'
),
)
group = parser.add_argument_group("Save Options")
group.add_argument(
"--save-dir",
required=True,
type=pathlib.Path,
help=(
"Directory where the checkpoints and logs are saved. "
"Though, only the worker 0 saves checkpoint data, "
"all the worker processes must have access to the directory."
),
)
group = parser.add_argument_group("Dataloader Options")
group.add_argument(
"--batch-size",
type=int,
help="Batch size. (default: 16 // world_size)",
)
group = parser.add_argument_group("Training Options")
group.add_argument(
"--epochs",
metavar="NUM_EPOCHS",
default=100,
type=int,
help="The number of epochs to train. (default: 100)",
)
group.add_argument(
"--learning-rate",
default=1e-3,
type=float,
help="Initial learning rate. (default: 1e-3)",
)
group.add_argument(
"--grad-clip",
metavar="CLIP_VALUE",
default=5.0,
type=float,
help="Gradient clip value (l2 norm). (default: 5.0)",
)
group.add_argument(
"--resume",
metavar="CHECKPOINT_PATH",
help="Previous checkpoint file from which the training is resumed.",
)
args = parser.parse_args(args)
# Delaing the default value initialization until parse_args is done because
# if `--help` is given, distributed training is not enabled.
if args.batch_size is None:
args.batch_size = 16 // torch.distributed.get_world_size()
return args
def _get_model(
num_sources,
enc_kernel_size=16,
enc_num_feats=512,
msk_kernel_size=3,
msk_num_feats=128,
msk_num_hidden_feats=512,
msk_num_layers=8,
msk_num_stacks=3,
):
model = torchaudio.models.ConvTasNet(
num_sources=num_sources,
enc_kernel_size=enc_kernel_size,
enc_num_feats=enc_num_feats,
msk_kernel_size=msk_kernel_size,
msk_num_feats=msk_num_feats,
msk_num_hidden_feats=msk_num_hidden_feats,
msk_num_layers=msk_num_layers,
msk_num_stacks=msk_num_stacks,
)
_LG.info_on_master("Model Configuration:")
_LG.info_on_master(" - N: %d", enc_num_feats)
_LG.info_on_master(" - L: %d", enc_kernel_size)
_LG.info_on_master(" - B: %d", msk_num_feats)
_LG.info_on_master(" - H: %d", msk_num_hidden_feats)
_LG.info_on_master(" - Sc: %d", msk_num_feats)
_LG.info_on_master(" - P: %d", msk_kernel_size)
_LG.info_on_master(" - X: %d", msk_num_layers)
_LG.info_on_master(" - R: %d", msk_num_stacks)
_LG.info_on_master(
" - Receptive Field: %s [samples]", model.mask_generator.receptive_field,
)
return model
def _get_dataloader(dataset_type, dataset_dir, num_speakers, sample_rate, batch_size, task=None):
train_dataset, valid_dataset, eval_dataset = dataset_utils.get_dataset(
dataset_type, dataset_dir, num_speakers, sample_rate, task
)
train_collate_fn = dataset_utils.get_collate_fn(
dataset_type, mode='train', sample_rate=sample_rate, duration=4
)
test_collate_fn = dataset_utils.get_collate_fn(dataset_type, mode='test')
train_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=batch_size,
sampler=torch.utils.data.distributed.DistributedSampler(train_dataset),
collate_fn=train_collate_fn,
pin_memory=True,
)
valid_loader = torch.utils.data.DataLoader(
valid_dataset,
batch_size=batch_size,
sampler=torch.utils.data.distributed.DistributedSampler(valid_dataset),
collate_fn=test_collate_fn,
pin_memory=True,
)
eval_loader = torch.utils.data.DataLoader(
eval_dataset,
batch_size=batch_size,
sampler=torch.utils.data.distributed.DistributedSampler(eval_dataset),
collate_fn=test_collate_fn,
pin_memory=True,
)
return train_loader, valid_loader, eval_loader
def _write_header(log_path, args):
rows = [
[f"# torch: {torch.__version__}", ],
[f"# torchaudio: {torchaudio.__version__}", ]
]
rows.append(["# arguments"])
for key, item in vars(args).items():
rows.append([f"# {key}: {item}"])
dist_utils.write_csv_on_master(log_path, *rows)
def train(args):
args = _parse_args(args)
_LG.info("%s", args)
args.save_dir.mkdir(parents=True, exist_ok=True)
if "sox_io" in torchaudio.list_audio_backends():
torchaudio.set_audio_backend("sox_io")
start_epoch = 1
if args.resume:
checkpoint = torch.load(args.resume)
if args.sample_rate != checkpoint["sample_rate"]:
raise ValueError(
"The provided sample rate ({args.sample_rate}) does not match "
"the sample rate from the check point ({checkpoint['sample_rate']})."
)
if args.num_speakers != checkpoint["num_speakers"]:
raise ValueError(
"The provided #of speakers ({args.num_speakers}) does not match "
"the #of speakers from the check point ({checkpoint['num_speakers']}.)"
)
start_epoch = checkpoint["epoch"]
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
_LG.info("Using: %s", device)
model = _get_model(num_sources=args.num_speakers)
model.to(device)
model = torch.nn.parallel.DistributedDataParallel(
model, device_ids=[device] if torch.cuda.is_available() else None
)
optimizer = torch.optim.Adam(model.parameters(), lr=args.learning_rate)
if args.resume:
_LG.info("Loading parameters from the checkpoint...")
model.module.load_state_dict(checkpoint["model"])
optimizer.load_state_dict(checkpoint["optimizer"])
else:
dist_utils.synchronize_params(
str(args.save_dir / "tmp.pt"), device, model, optimizer
)
lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
optimizer, mode="max", factor=0.5, patience=3
)
train_loader, valid_loader, eval_loader = _get_dataloader(
args.dataset,
args.dataset_dir,
args.num_speakers,
args.sample_rate,
args.batch_size,
)
num_train_samples = len(train_loader.dataset)
num_valid_samples = len(valid_loader.dataset)
num_eval_samples = len(eval_loader.dataset)
_LG.info_on_master("Datasets:")
_LG.info_on_master(" - Train: %s", num_train_samples)
_LG.info_on_master(" - Valid: %s", num_valid_samples)
_LG.info_on_master(" - Eval: %s", num_eval_samples)
trainer = conv_tasnet.trainer.Trainer(
model,
optimizer,
train_loader,
valid_loader,
eval_loader,
args.grad_clip,
device,
debug=args.debug,
)
log_path = args.save_dir / "log.csv"
_write_header(log_path, args)
dist_utils.write_csv_on_master(
log_path,
[
"epoch",
"learning_rate",
"valid_si_snri",
"valid_sdri",
"eval_si_snri",
"eval_sdri",
],
)
_LG.info_on_master("Running %s epochs", args.epochs)
for epoch in range(start_epoch, start_epoch + args.epochs):
_LG.info_on_master("=" * 70)
_LG.info_on_master("Epoch: %s", epoch)
_LG.info_on_master("Learning rate: %s", optimizer.param_groups[0]["lr"])
_LG.info_on_master("=" * 70)
t0 = time.monotonic()
trainer.train_one_epoch()
train_sps = num_train_samples / (time.monotonic() - t0)
_LG.info_on_master("-" * 70)
t0 = time.monotonic()
valid_metric = trainer.validate()
valid_sps = num_valid_samples / (time.monotonic() - t0)
_LG.info_on_master("Valid: %s", valid_metric)
_LG.info_on_master("-" * 70)
t0 = time.monotonic()
eval_metric = trainer.evaluate()
eval_sps = num_eval_samples / (time.monotonic() - t0)
_LG.info_on_master(" Eval: %s", eval_metric)
_LG.info_on_master("-" * 70)
_LG.info_on_master("Train: Speed: %6.2f [samples/sec]", train_sps)
_LG.info_on_master("Valid: Speed: %6.2f [samples/sec]", valid_sps)
_LG.info_on_master(" Eval: Speed: %6.2f [samples/sec]", eval_sps)
_LG.info_on_master("-" * 70)
dist_utils.write_csv_on_master(
log_path,
[
epoch,
optimizer.param_groups[0]["lr"],
valid_metric.si_snri,
valid_metric.sdri,
eval_metric.si_snri,
eval_metric.sdri,
],
)
lr_scheduler.step(valid_metric.si_snri)
save_path = args.save_dir / f"epoch_{epoch}.pt"
dist_utils.save_on_master(
save_path,
{
"model": model.module.state_dict(),
"optimizer": optimizer.state_dict(),
"num_speakers": args.num_speakers,
"sample_rate": args.sample_rate,
"epoch": epoch,
},
)
|
import time
from typing import Tuple
from collections import namedtuple
import torch
import torch.distributed as dist
from utils import dist_utils, metrics
_LG = dist_utils.getLogger(__name__)
Metric = namedtuple("SNR", ["si_snri", "sdri"])
Metric.__str__ = (
lambda self: f"SI-SNRi: {self.si_snri:10.3e}, SDRi: {self.sdri:10.3e}"
)
def si_sdr_improvement(
estimate: torch.Tensor,
reference: torch.Tensor,
mix: torch.Tensor,
mask: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Compute the improvement of scale-invariant SDR. (SI-SNRi) and bare SDR (SDRi).
Args:
estimate (torch.Tensor): Estimated source signals.
Shape: [batch, speakers, time frame]
reference (torch.Tensor): Reference (original) source signals.
Shape: [batch, speakers, time frame]
mix (torch.Tensor): Mixed souce signals, from which the setimated signals were generated.
Shape: [batch, speakers == 1, time frame]
mask (torch.Tensor): Mask to indicate padded value (0) or valid value (1).
Shape: [batch, 1, time frame]
Returns:
torch.Tensor: Improved SI-SDR. Shape: [batch, ]
torch.Tensor: Absolute SI-SDR. Shape: [batch, ]
References:
- Conv-TasNet: Surpassing Ideal Time--Frequency Magnitude Masking for Speech Separation
Luo, Yi and Mesgarani, Nima
https://arxiv.org/abs/1809.07454
"""
with torch.no_grad():
sdri = metrics.sdri(estimate, reference, mix, mask=mask)
estimate = estimate - estimate.mean(axis=2, keepdim=True)
reference = reference - reference.mean(axis=2, keepdim=True)
mix = mix - mix.mean(axis=2, keepdim=True)
si_sdri = metrics.sdri(estimate, reference, mix, mask=mask)
return si_sdri, sdri
class OccasionalLogger:
"""Simple helper class to log once in a while or when progress is quick enough"""
def __init__(self, time_interval=180, progress_interval=0.1):
self.time_interval = time_interval
self.progress_interval = progress_interval
self.last_time = 0.0
self.last_progress = 0.0
def log(self, metric, progress, force=False):
now = time.monotonic()
if (
force
or now > self.last_time + self.time_interval
or progress > self.last_progress + self.progress_interval
):
self.last_time = now
self.last_progress = progress
_LG.info_on_master("train: %s [%3d%%]", metric, 100 * progress)
class Trainer:
def __init__(
self,
model,
optimizer,
train_loader,
valid_loader,
eval_loader,
grad_clip,
device,
*,
debug,
):
self.model = model
self.optimizer = optimizer
self.train_loader = train_loader
self.valid_loader = valid_loader
self.eval_loader = eval_loader
self.grad_clip = grad_clip
self.device = device
self.debug = debug
def train_one_epoch(self):
self.model.train()
logger = OccasionalLogger()
num_batches = len(self.train_loader)
for i, batch in enumerate(self.train_loader, start=1):
mix = batch.mix.to(self.device)
src = batch.src.to(self.device)
mask = batch.mask.to(self.device)
estimate = self.model(mix)
si_snri, sdri = si_sdr_improvement(estimate, src, mix, mask)
si_snri = si_snri.mean()
sdri = sdri.mean()
loss = -si_snri
self.optimizer.zero_grad()
loss.backward()
torch.nn.utils.clip_grad_norm_(
self.model.parameters(), self.grad_clip, norm_type=2.0
)
self.optimizer.step()
metric = Metric(si_snri.item(), sdri.item())
logger.log(metric, progress=i / num_batches, force=i == num_batches)
if self.debug:
break
def evaluate(self):
with torch.no_grad():
return self._test(self.eval_loader)
def validate(self):
with torch.no_grad():
return self._test(self.valid_loader)
def _test(self, loader):
self.model.eval()
total_si_snri = torch.zeros(1, dtype=torch.float32, device=self.device)
total_sdri = torch.zeros(1, dtype=torch.float32, device=self.device)
for batch in loader:
mix = batch.mix.to(self.device)
src = batch.src.to(self.device)
mask = batch.mask.to(self.device)
estimate = self.model(mix)
si_snri, sdri = si_sdr_improvement(estimate, src, mix, mask)
total_si_snri += si_snri.sum()
total_sdri += sdri.sum()
if self.debug:
break
dist.all_reduce(total_si_snri, dist.ReduceOp.SUM)
dist.all_reduce(total_sdri, dist.ReduceOp.SUM)
num_samples = len(loader.dataset)
metric = Metric(total_si_snri.item() / num_samples, total_sdri.item() / num_samples)
return metric
|
import torch
class Normalize(torch.nn.Module):
def forward(self, tensor):
return (tensor - tensor.mean(-1, keepdim=True)) / tensor.std(-1, keepdim=True)
class UnsqueezeFirst(torch.nn.Module):
def forward(self, tensor):
return tensor.unsqueeze(0)
|
import torch
from torchaudio.datasets import LIBRISPEECH
class MapMemoryCache(torch.utils.data.Dataset):
"""
Wrap a dataset so that, whenever a new item is returned, it is saved to memory.
"""
def __init__(self, dataset):
self.dataset = dataset
self._cache = [None] * len(dataset)
def __getitem__(self, n):
if self._cache[n] is not None:
return self._cache[n]
item = self.dataset[n]
self._cache[n] = item
return item
def __len__(self):
return len(self.dataset)
class Processed(torch.utils.data.Dataset):
def __init__(self, dataset, transforms, encode):
self.dataset = dataset
self.transforms = transforms
self.encode = encode
def __getitem__(self, key):
item = self.dataset[key]
return self.process_datapoint(item)
def __len__(self):
return len(self.dataset)
def process_datapoint(self, item):
transformed = item[0]
target = item[2].lower()
transformed = self.transforms(transformed)
transformed = transformed[0, ...].transpose(0, -1)
target = self.encode(target)
target = torch.tensor(target, dtype=torch.long, device=transformed.device)
return transformed, target
def split_process_librispeech(
datasets, transforms, language_model, root, folder_in_archive,
):
def create(tags, cache=True):
if isinstance(tags, str):
tags = [tags]
if isinstance(transforms, list):
transform_list = transforms
else:
transform_list = [transforms]
data = torch.utils.data.ConcatDataset(
[
Processed(
LIBRISPEECH(
root, tag, folder_in_archive=folder_in_archive, download=False,
),
transform,
language_model.encode,
)
for tag, transform in zip(tags, transform_list)
]
)
data = MapMemoryCache(data)
return data
# For performance, we cache all datasets
return tuple(create(dataset) for dataset in datasets)
def collate_factory(model_length_function, transforms=None):
if transforms is None:
transforms = torch.nn.Sequential()
def collate_fn(batch):
tensors = [transforms(b[0]) for b in batch if b]
tensors_lengths = torch.tensor(
[model_length_function(t) for t in tensors],
dtype=torch.long,
device=tensors[0].device,
)
tensors = torch.nn.utils.rnn.pad_sequence(tensors, batch_first=True)
tensors = tensors.transpose(1, -1)
targets = [b[1] for b in batch if b]
target_lengths = torch.tensor(
[target.shape[0] for target in targets],
dtype=torch.long,
device=tensors.device,
)
targets = torch.nn.utils.rnn.pad_sequence(targets, batch_first=True)
return tensors, targets, tensors_lengths, target_lengths
return collate_fn
|
import json
import logging
import os
import shutil
from collections import defaultdict
import torch
class MetricLogger(defaultdict):
def __init__(self, name, print_freq=1, disable=False):
super().__init__(lambda: 0.0)
self.disable = disable
self.print_freq = print_freq
self._iter = 0
self["name"] = name
def __str__(self):
return json.dumps(self)
def __call__(self):
self._iter = (self._iter + 1) % self.print_freq
if not self.disable and not self._iter:
print(self, flush=True)
def save_checkpoint(state, is_best, filename, disable):
"""
Save the model to a temporary file first,
then copy it to filename, in case the signal interrupts
the torch.save() process.
"""
if disable:
return
if filename == "":
return
tempfile = filename + ".temp"
# Remove tempfile in case interuption during the copying from tempfile to filename
if os.path.isfile(tempfile):
os.remove(tempfile)
torch.save(state, tempfile)
if os.path.isfile(tempfile):
os.rename(tempfile, filename)
if is_best:
shutil.copyfile(filename, "model_best.pth.tar")
logging.warning("Checkpoint: saved")
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
|
from torch import topk
class GreedyDecoder:
def __call__(self, outputs):
"""Greedy Decoder. Returns highest probability of class labels for each timestep
Args:
outputs (torch.Tensor): shape (input length, batch size, number of classes (including blank))
Returns:
torch.Tensor: class labels per time step.
"""
_, indices = topk(outputs, k=1, dim=-1)
return indices[..., 0]
|
import collections
import itertools
class LanguageModel:
def __init__(self, labels, char_blank, char_space):
self.char_space = char_space
self.char_blank = char_blank
labels = list(labels)
self.length = len(labels)
enumerated = list(enumerate(labels))
flipped = [(sub[1], sub[0]) for sub in enumerated]
d1 = collections.OrderedDict(enumerated)
d2 = collections.OrderedDict(flipped)
self.mapping = {**d1, **d2}
def encode(self, iterable):
if isinstance(iterable, list):
return [self.encode(i) for i in iterable]
else:
return [self.mapping[i] + self.mapping[self.char_blank] for i in iterable]
def decode(self, tensor):
if len(tensor) > 0 and isinstance(tensor[0], list):
return [self.decode(t) for t in tensor]
else:
# not idempotent, since clean string
x = (self.mapping[i] for i in tensor)
x = "".join(i for i, _ in itertools.groupby(x))
x = x.replace(self.char_blank, "")
# x = x.strip()
return x
def __len__(self):
return self.length
|
import argparse
import logging
import os
import string
from datetime import datetime
from time import time
import torch
import torchaudio
from torch.optim import SGD, Adadelta, Adam, AdamW
from torch.optim.lr_scheduler import ExponentialLR, ReduceLROnPlateau
from torch.utils.data import DataLoader
from torchaudio.datasets.utils import bg_iterator
from torchaudio.functional import edit_distance
from torchaudio.models.wav2letter import Wav2Letter
from ctc_decoders import GreedyDecoder
from datasets import collate_factory, split_process_librispeech
from languagemodels import LanguageModel
from transforms import Normalize, UnsqueezeFirst
from utils import MetricLogger, count_parameters, save_checkpoint
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--type",
metavar="T",
default="mfcc",
choices=["waveform", "mfcc"],
help="input type for model",
)
parser.add_argument(
"--freq-mask",
default=0,
type=int,
metavar="N",
help="maximal width of frequency mask",
)
parser.add_argument(
"--win-length",
default=400,
type=int,
metavar="N",
help="width of spectrogram window",
)
parser.add_argument(
"--hop-length",
default=160,
type=int,
metavar="N",
help="width of spectrogram window",
)
parser.add_argument(
"--time-mask",
default=0,
type=int,
metavar="N",
help="maximal width of time mask",
)
parser.add_argument(
"--workers",
default=0,
type=int,
metavar="N",
help="number of data loading workers",
)
parser.add_argument(
"--checkpoint",
default="",
type=str,
metavar="PATH",
help="path to latest checkpoint",
)
parser.add_argument(
"--epochs",
default=200,
type=int,
metavar="N",
help="number of total epochs to run",
)
parser.add_argument(
"--start-epoch", default=0, type=int, metavar="N", help="manual epoch number"
)
parser.add_argument(
"--reduce-lr-valid",
action="store_true",
help="reduce learning rate based on validation loss",
)
parser.add_argument(
"--normalize", action="store_true", help="normalize model input"
)
parser.add_argument(
"--progress-bar", action="store_true", help="use progress bar while training"
)
parser.add_argument(
"--decoder",
metavar="D",
default="greedy",
choices=["greedy"],
help="decoder to use",
)
parser.add_argument(
"--batch-size", default=128, type=int, metavar="N", help="mini-batch size"
)
parser.add_argument(
"--n-bins",
default=13,
type=int,
metavar="N",
help="number of bins in transforms",
)
parser.add_argument(
"--optimizer",
metavar="OPT",
default="adadelta",
choices=["sgd", "adadelta", "adam", "adamw"],
help="optimizer to use",
)
parser.add_argument(
"--scheduler",
metavar="S",
default="reduceonplateau",
choices=["exponential", "reduceonplateau"],
help="optimizer to use",
)
parser.add_argument(
"--learning-rate",
default=0.6,
type=float,
metavar="LR",
help="initial learning rate",
)
parser.add_argument(
"--gamma",
default=0.99,
type=float,
metavar="GAMMA",
help="learning rate exponential decay constant",
)
parser.add_argument(
"--momentum", default=0.8, type=float, metavar="M", help="momentum"
)
parser.add_argument(
"--weight-decay", default=1e-5, type=float, metavar="W", help="weight decay"
)
parser.add_argument("--eps", metavar="EPS", type=float, default=1e-8)
parser.add_argument("--rho", metavar="RHO", type=float, default=0.95)
parser.add_argument("--clip-grad", metavar="NORM", type=float, default=0.0)
parser.add_argument(
"--dataset-root",
type=str,
help="specify dataset root folder",
)
parser.add_argument(
"--dataset-folder-in-archive",
type=str,
help="specify dataset folder in archive",
)
parser.add_argument(
"--dataset-train",
default=["train-clean-100"],
nargs="+",
type=str,
help="select which part of librispeech to train with",
)
parser.add_argument(
"--dataset-valid",
default=["dev-clean"],
nargs="+",
type=str,
help="select which part of librispeech to validate with",
)
parser.add_argument(
"--distributed", action="store_true", help="enable DistributedDataParallel"
)
parser.add_argument("--seed", type=int, default=0, help="random seed")
parser.add_argument(
"--world-size", type=int, default=8, help="the world size to initiate DPP"
)
parser.add_argument("--jit", action="store_true", help="if used, model is jitted")
args = parser.parse_args()
logging.info(args)
return args
def setup_distributed(rank, world_size):
os.environ["MASTER_ADDR"] = "localhost"
os.environ["MASTER_PORT"] = "12355"
# initialize the process group
torch.distributed.init_process_group("nccl", rank=rank, world_size=world_size)
def model_length_function(tensor):
if tensor.shape[1] == 1:
# waveform mode
return int(tensor.shape[0]) // 160 // 2 + 1
return int(tensor.shape[0]) // 2 + 1
def compute_error_rates(outputs, targets, decoder, language_model, metric):
output = outputs.transpose(0, 1).to("cpu")
output = decoder(output)
# Compute CER
output = language_model.decode(output.tolist())
target = language_model.decode(targets.tolist())
print_length = 20
for i in range(2):
# Print a few examples
output_print = output[i].ljust(print_length)[:print_length]
target_print = target[i].ljust(print_length)[:print_length]
logging.info("Target: %s Output: %s", target_print, output_print)
cers = [edit_distance(t, o) for t, o in zip(target, output)]
cers = sum(cers)
n = sum(len(t) for t in target)
metric["batch char error"] = cers
metric["batch char total"] = n
metric["batch char error rate"] = cers / n
metric["epoch char error"] += cers
metric["epoch char total"] += n
metric["epoch char error rate"] = metric["epoch char error"] / metric["epoch char total"]
# Compute WER
output = [o.split(language_model.char_space) for o in output]
target = [t.split(language_model.char_space) for t in target]
wers = [edit_distance(t, o) for t, o in zip(target, output)]
wers = sum(wers)
n = sum(len(t) for t in target)
metric["batch word error"] = wers
metric["batch word total"] = n
metric["batch word error rate"] = wers / n
metric["epoch word error"] += wers
metric["epoch word total"] += n
metric["epoch word error rate"] = metric["epoch word error"] / metric["epoch word total"]
def train_one_epoch(
model,
criterion,
optimizer,
scheduler,
data_loader,
decoder,
language_model,
device,
epoch,
clip_grad,
disable_logger=False,
reduce_lr_on_plateau=False,
):
model.train()
metric = MetricLogger("train", disable=disable_logger)
metric["epoch"] = epoch
for inputs, targets, tensors_lengths, target_lengths in bg_iterator(
data_loader, maxsize=2
):
start = time()
inputs = inputs.to(device, non_blocking=True)
targets = targets.to(device, non_blocking=True)
# keep batch first for data parallel
outputs = model(inputs).transpose(-1, -2).transpose(0, 1)
# CTC
# outputs: input length, batch size, number of classes (including blank)
# targets: batch size, max target length
# input_lengths: batch size
# target_lengths: batch size
loss = criterion(outputs, targets, tensors_lengths, target_lengths)
optimizer.zero_grad()
loss.backward()
if clip_grad > 0:
metric["gradient"] = torch.nn.utils.clip_grad_norm_(
model.parameters(), clip_grad
)
optimizer.step()
compute_error_rates(outputs, targets, decoder, language_model, metric)
try:
metric["lr"] = scheduler.get_last_lr()[0]
except AttributeError:
metric["lr"] = optimizer.param_groups[0]["lr"]
metric["batch size"] = len(inputs)
metric["n_channel"] = inputs.shape[1]
metric["n_time"] = inputs.shape[-1]
metric["dataset length"] += metric["batch size"]
metric["iteration"] += 1
metric["loss"] = loss.item()
metric["cumulative loss"] += metric["loss"]
metric["average loss"] = metric["cumulative loss"] / metric["iteration"]
metric["iteration time"] = time() - start
metric["epoch time"] += metric["iteration time"]
metric()
if reduce_lr_on_plateau and isinstance(scheduler, ReduceLROnPlateau):
scheduler.step(metric["average loss"])
elif not isinstance(scheduler, ReduceLROnPlateau):
scheduler.step()
def evaluate(
model,
criterion,
data_loader,
decoder,
language_model,
device,
epoch,
disable_logger=False,
):
with torch.no_grad():
model.eval()
start = time()
metric = MetricLogger("validation", disable=disable_logger)
metric["epoch"] = epoch
for inputs, targets, tensors_lengths, target_lengths in bg_iterator(
data_loader, maxsize=2
):
inputs = inputs.to(device, non_blocking=True)
targets = targets.to(device, non_blocking=True)
# keep batch first for data parallel
outputs = model(inputs).transpose(-1, -2).transpose(0, 1)
# CTC
# outputs: input length, batch size, number of classes (including blank)
# targets: batch size, max target length
# input_lengths: batch size
# target_lengths: batch size
metric["cumulative loss"] += criterion(
outputs, targets, tensors_lengths, target_lengths
).item()
metric["dataset length"] += len(inputs)
metric["iteration"] += 1
compute_error_rates(outputs, targets, decoder, language_model, metric)
metric["average loss"] = metric["cumulative loss"] / metric["iteration"]
metric["validation time"] = time() - start
metric()
return metric["average loss"]
def main(rank, args):
# Distributed setup
if args.distributed:
setup_distributed(rank, args.world_size)
not_main_rank = args.distributed and rank != 0
logging.info("Start time: %s", datetime.now())
# Explicitly set seed to make sure models created in separate processes
# start from same random weights and biases
torch.manual_seed(args.seed)
# Empty CUDA cache
torch.cuda.empty_cache()
# Change backend for flac files
torchaudio.set_audio_backend("soundfile")
# Transforms
melkwargs = {
"n_fft": args.win_length,
"n_mels": args.n_bins,
"hop_length": args.hop_length,
}
sample_rate_original = 16000
if args.type == "mfcc":
transforms = torch.nn.Sequential(
torchaudio.transforms.MFCC(
sample_rate=sample_rate_original,
n_mfcc=args.n_bins,
melkwargs=melkwargs,
),
)
num_features = args.n_bins
elif args.type == "waveform":
transforms = torch.nn.Sequential(UnsqueezeFirst())
num_features = 1
else:
raise ValueError("Model type not supported")
if args.normalize:
transforms = torch.nn.Sequential(transforms, Normalize())
augmentations = torch.nn.Sequential()
if args.freq_mask:
augmentations = torch.nn.Sequential(
augmentations,
torchaudio.transforms.FrequencyMasking(freq_mask_param=args.freq_mask),
)
if args.time_mask:
augmentations = torch.nn.Sequential(
augmentations,
torchaudio.transforms.TimeMasking(time_mask_param=args.time_mask),
)
# Text preprocessing
char_blank = "*"
char_space = " "
char_apostrophe = "'"
labels = char_blank + char_space + char_apostrophe + string.ascii_lowercase
language_model = LanguageModel(labels, char_blank, char_space)
# Dataset
training, validation = split_process_librispeech(
[args.dataset_train, args.dataset_valid],
[transforms, transforms],
language_model,
root=args.dataset_root,
folder_in_archive=args.dataset_folder_in_archive,
)
# Decoder
if args.decoder == "greedy":
decoder = GreedyDecoder()
else:
raise ValueError("Selected decoder not supported")
# Model
model = Wav2Letter(
num_classes=language_model.length,
input_type=args.type,
num_features=num_features,
)
if args.jit:
model = torch.jit.script(model)
if args.distributed:
n = torch.cuda.device_count() // args.world_size
devices = list(range(rank * n, (rank + 1) * n))
model = model.to(devices[0])
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=devices)
else:
devices = ["cuda" if torch.cuda.is_available() else "cpu"]
model = model.to(devices[0], non_blocking=True)
model = torch.nn.DataParallel(model)
n = count_parameters(model)
logging.info("Number of parameters: %s", n)
# Optimizer
if args.optimizer == "adadelta":
optimizer = Adadelta(
model.parameters(),
lr=args.learning_rate,
weight_decay=args.weight_decay,
eps=args.eps,
rho=args.rho,
)
elif args.optimizer == "sgd":
optimizer = SGD(
model.parameters(),
lr=args.learning_rate,
momentum=args.momentum,
weight_decay=args.weight_decay,
)
elif args.optimizer == "adam":
optimizer = Adam(
model.parameters(),
lr=args.learning_rate,
momentum=args.momentum,
weight_decay=args.weight_decay,
)
elif args.optimizer == "adamw":
optimizer = AdamW(
model.parameters(),
lr=args.learning_rate,
momentum=args.momentum,
weight_decay=args.weight_decay,
)
else:
raise ValueError("Selected optimizer not supported")
if args.scheduler == "exponential":
scheduler = ExponentialLR(optimizer, gamma=args.gamma)
elif args.scheduler == "reduceonplateau":
scheduler = ReduceLROnPlateau(optimizer, patience=10, threshold=1e-3)
else:
raise ValueError("Selected scheduler not supported")
criterion = torch.nn.CTCLoss(
blank=language_model.mapping[char_blank], zero_infinity=False
)
# Data Loader
collate_fn_train = collate_factory(model_length_function, augmentations)
collate_fn_valid = collate_factory(model_length_function)
loader_training_params = {
"num_workers": args.workers,
"pin_memory": True,
"shuffle": True,
"drop_last": True,
}
loader_validation_params = loader_training_params.copy()
loader_validation_params["shuffle"] = False
loader_training = DataLoader(
training,
batch_size=args.batch_size,
collate_fn=collate_fn_train,
**loader_training_params,
)
loader_validation = DataLoader(
validation,
batch_size=args.batch_size,
collate_fn=collate_fn_valid,
**loader_validation_params,
)
# Setup checkpoint
best_loss = 1.0
load_checkpoint = args.checkpoint and os.path.isfile(args.checkpoint)
if args.distributed:
torch.distributed.barrier()
if load_checkpoint:
logging.info("Checkpoint: loading %s", args.checkpoint)
checkpoint = torch.load(args.checkpoint)
args.start_epoch = checkpoint["epoch"]
best_loss = checkpoint["best_loss"]
model.load_state_dict(checkpoint["state_dict"])
optimizer.load_state_dict(checkpoint["optimizer"])
scheduler.load_state_dict(checkpoint["scheduler"])
logging.info(
"Checkpoint: loaded '%s' at epoch %s", args.checkpoint, checkpoint["epoch"]
)
else:
logging.info("Checkpoint: not found")
save_checkpoint(
{
"epoch": args.start_epoch,
"state_dict": model.state_dict(),
"best_loss": best_loss,
"optimizer": optimizer.state_dict(),
"scheduler": scheduler.state_dict(),
},
False,
args.checkpoint,
not_main_rank,
)
if args.distributed:
torch.distributed.barrier()
torch.autograd.set_detect_anomaly(False)
for epoch in range(args.start_epoch, args.epochs):
logging.info("Epoch: %s", epoch)
train_one_epoch(
model,
criterion,
optimizer,
scheduler,
loader_training,
decoder,
language_model,
devices[0],
epoch,
args.clip_grad,
not_main_rank,
not args.reduce_lr_valid,
)
loss = evaluate(
model,
criterion,
loader_validation,
decoder,
language_model,
devices[0],
epoch,
not_main_rank,
)
if args.reduce_lr_valid and isinstance(scheduler, ReduceLROnPlateau):
scheduler.step(loss)
is_best = loss < best_loss
best_loss = min(loss, best_loss)
save_checkpoint(
{
"epoch": epoch + 1,
"state_dict": model.state_dict(),
"best_loss": best_loss,
"optimizer": optimizer.state_dict(),
"scheduler": scheduler.state_dict(),
},
is_best,
args.checkpoint,
not_main_rank,
)
logging.info("End time: %s", datetime.now())
if args.distributed:
torch.distributed.destroy_process_group()
def spawn_main(main, args):
if args.distributed:
torch.multiprocessing.spawn(
main, args=(args,), nprocs=args.world_size, join=True
)
else:
main(0, args)
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
args = parse_args()
spawn_main(main, args)
|
# -*- coding: utf-8 -*-
"""
Audio Resampling
================
Here, we will walk through resampling audio waveforms using ``torchaudio``.
"""
# When running this tutorial in Google Colab, install the required packages
# with the following.
# !pip install torchaudio librosa
import torch
import torchaudio
import torchaudio.functional as F
import torchaudio.transforms as T
print(torch.__version__)
print(torchaudio.__version__)
######################################################################
# Preparing data and utility functions (skip this section)
# --------------------------------------------------------
#
# @title Prepare data and utility functions. {display-mode: "form"}
# @markdown
# @markdown You do not need to look into this cell.
# @markdown Just execute once and you are good to go.
# -------------------------------------------------------------------------------
# Preparation of data and helper functions.
# -------------------------------------------------------------------------------
import math
import time
import librosa
import matplotlib.pyplot as plt
from IPython.display import Audio, display
import pandas as pd
DEFAULT_OFFSET = 201
SWEEP_MAX_SAMPLE_RATE = 48000
DEFAULT_LOWPASS_FILTER_WIDTH = 6
DEFAULT_ROLLOFF = 0.99
DEFAULT_RESAMPLING_METHOD = "sinc_interpolation"
def _get_log_freq(sample_rate, max_sweep_rate, offset):
"""Get freqs evenly spaced out in log-scale, between [0, max_sweep_rate // 2]
offset is used to avoid negative infinity `log(offset + x)`.
"""
start, stop = math.log(offset), math.log(offset + max_sweep_rate // 2)
return (
torch.exp(torch.linspace(start, stop, sample_rate, dtype=torch.double)) - offset
)
def _get_inverse_log_freq(freq, sample_rate, offset):
"""Find the time where the given frequency is given by _get_log_freq"""
half = sample_rate // 2
return sample_rate * (math.log(1 + freq / offset) / math.log(1 + half / offset))
def _get_freq_ticks(sample_rate, offset, f_max):
# Given the original sample rate used for generating the sweep,
# find the x-axis value where the log-scale major frequency values fall in
time, freq = [], []
for exp in range(2, 5):
for v in range(1, 10):
f = v * 10 ** exp
if f < sample_rate // 2:
t = _get_inverse_log_freq(f, sample_rate, offset) / sample_rate
time.append(t)
freq.append(f)
t_max = _get_inverse_log_freq(f_max, sample_rate, offset) / sample_rate
time.append(t_max)
freq.append(f_max)
return time, freq
def get_sine_sweep(sample_rate, offset=DEFAULT_OFFSET):
max_sweep_rate = sample_rate
freq = _get_log_freq(sample_rate, max_sweep_rate, offset)
delta = 2 * math.pi * freq / sample_rate
cummulative = torch.cumsum(delta, dim=0)
signal = torch.sin(cummulative).unsqueeze(dim=0)
return signal
def plot_sweep(
waveform,
sample_rate,
title,
max_sweep_rate=SWEEP_MAX_SAMPLE_RATE,
offset=DEFAULT_OFFSET,
):
x_ticks = [100, 500, 1000, 5000, 10000, 20000, max_sweep_rate // 2]
y_ticks = [1000, 5000, 10000, 20000, sample_rate // 2]
time, freq = _get_freq_ticks(max_sweep_rate, offset, sample_rate // 2)
freq_x = [f if f in x_ticks and f <= max_sweep_rate // 2 else None for f in freq]
freq_y = [f for f in freq if f >= 1000 and f in y_ticks and f <= sample_rate // 2]
figure, axis = plt.subplots(1, 1)
axis.specgram(waveform[0].numpy(), Fs=sample_rate)
plt.xticks(time, freq_x)
plt.yticks(freq_y, freq_y)
axis.set_xlabel("Original Signal Frequency (Hz, log scale)")
axis.set_ylabel("Waveform Frequency (Hz)")
axis.xaxis.grid(True, alpha=0.67)
axis.yaxis.grid(True, alpha=0.67)
figure.suptitle(f"{title} (sample rate: {sample_rate} Hz)")
plt.show(block=True)
def play_audio(waveform, sample_rate):
waveform = waveform.numpy()
num_channels, num_frames = waveform.shape
if num_channels == 1:
display(Audio(waveform[0], rate=sample_rate))
elif num_channels == 2:
display(Audio((waveform[0], waveform[1]), rate=sample_rate))
else:
raise ValueError("Waveform with more than 2 channels are not supported.")
def plot_specgram(waveform, sample_rate, title="Spectrogram", xlim=None):
waveform = waveform.numpy()
num_channels, num_frames = waveform.shape
figure, axes = plt.subplots(num_channels, 1)
if num_channels == 1:
axes = [axes]
for c in range(num_channels):
axes[c].specgram(waveform[c], Fs=sample_rate)
if num_channels > 1:
axes[c].set_ylabel(f"Channel {c+1}")
if xlim:
axes[c].set_xlim(xlim)
figure.suptitle(title)
plt.show(block=False)
def benchmark_resample(
method,
waveform,
sample_rate,
resample_rate,
lowpass_filter_width=DEFAULT_LOWPASS_FILTER_WIDTH,
rolloff=DEFAULT_ROLLOFF,
resampling_method=DEFAULT_RESAMPLING_METHOD,
beta=None,
librosa_type=None,
iters=5,
):
if method == "functional":
begin = time.time()
for _ in range(iters):
F.resample(
waveform,
sample_rate,
resample_rate,
lowpass_filter_width=lowpass_filter_width,
rolloff=rolloff,
resampling_method=resampling_method,
)
elapsed = time.time() - begin
return elapsed / iters
elif method == "transforms":
resampler = T.Resample(
sample_rate,
resample_rate,
lowpass_filter_width=lowpass_filter_width,
rolloff=rolloff,
resampling_method=resampling_method,
dtype=waveform.dtype,
)
begin = time.time()
for _ in range(iters):
resampler(waveform)
elapsed = time.time() - begin
return elapsed / iters
elif method == "librosa":
waveform_np = waveform.squeeze().numpy()
begin = time.time()
for _ in range(iters):
librosa.resample(
waveform_np, sample_rate, resample_rate, res_type=librosa_type
)
elapsed = time.time() - begin
return elapsed / iters
######################################################################
# To resample an audio waveform from one freqeuncy to another, you can use
# ``transforms.Resample`` or ``functional.resample``.
# ``transforms.Resample`` precomputes and caches the kernel used for
# resampling, while ``functional.resample`` computes it on the fly, so
# using ``transforms.Resample`` will result in a speedup when resampling
# multiple waveforms using the same parameters (see Benchmarking section).
#
# Both resampling methods use `bandlimited sinc
# interpolation <https://ccrma.stanford.edu/~jos/resample/>`__ to compute
# signal values at arbitrary time steps. The implementation involves
# convolution, so we can take advantage of GPU / multithreading for
# performance improvements. When using resampling in multiple
# subprocesses, such as data loading with multiple worker processes, your
# application might create more threads than your system can handle
# efficiently. Setting ``torch.set_num_threads(1)`` might help in this
# case.
#
# Because a finite number of samples can only represent a finite number of
# frequencies, resampling does not produce perfect results, and a variety
# of parameters can be used to control for its quality and computational
# speed. We demonstrate these properties through resampling a logarithmic
# sine sweep, which is a sine wave that increases exponentially in
# frequency over time.
#
# The spectrograms below show the frequency representation of the signal,
# where the x-axis corresponds to the frequency of the original
# waveform (in log scale), y-axis the frequency of the
# plotted waveform, and color intensity the amplitude.
#
sample_rate = 48000
resample_rate = 32000
waveform = get_sine_sweep(sample_rate)
plot_sweep(waveform, sample_rate, title="Original Waveform")
play_audio(waveform, sample_rate)
resampler = T.Resample(sample_rate, resample_rate, dtype=waveform.dtype)
resampled_waveform = resampler(waveform)
plot_sweep(resampled_waveform, resample_rate, title="Resampled Waveform")
play_audio(waveform, sample_rate)
######################################################################
# Controling resampling quality with parameters
# ---------------------------------------------
#
# Lowpass filter width
# ~~~~~~~~~~~~~~~~~~~~
#
# Because the filter used for interpolation extends infinitely, the
# ``lowpass_filter_width`` parameter is used to control for the width of
# the filter to use to window the interpolation. It is also referred to as
# the number of zero crossings, since the interpolation passes through
# zero at every time unit. Using a larger ``lowpass_filter_width``
# provides a sharper, more precise filter, but is more computationally
# expensive.
#
sample_rate = 48000
resample_rate = 32000
resampled_waveform = F.resample(
waveform, sample_rate, resample_rate, lowpass_filter_width=6
)
plot_sweep(resampled_waveform, resample_rate, title="lowpass_filter_width=6")
resampled_waveform = F.resample(
waveform, sample_rate, resample_rate, lowpass_filter_width=128
)
plot_sweep(resampled_waveform, resample_rate, title="lowpass_filter_width=128")
######################################################################
# Rolloff
# ~~~~~~~
#
# The ``rolloff`` parameter is represented as a fraction of the Nyquist
# frequency, which is the maximal frequency representable by a given
# finite sample rate. ``rolloff`` determines the lowpass filter cutoff and
# controls the degree of aliasing, which takes place when frequencies
# higher than the Nyquist are mapped to lower frequencies. A lower rolloff
# will therefore reduce the amount of aliasing, but it will also reduce
# some of the higher frequencies.
#
sample_rate = 48000
resample_rate = 32000
resampled_waveform = F.resample(waveform, sample_rate, resample_rate, rolloff=0.99)
plot_sweep(resampled_waveform, resample_rate, title="rolloff=0.99")
resampled_waveform = F.resample(waveform, sample_rate, resample_rate, rolloff=0.8)
plot_sweep(resampled_waveform, resample_rate, title="rolloff=0.8")
######################################################################
# Window function
# ~~~~~~~~~~~~~~~
#
# By default, ``torchaudio``’s resample uses the Hann window filter, which is
# a weighted cosine function. It additionally supports the Kaiser window,
# which is a near optimal window function that contains an additional
# ``beta`` parameter that allows for the design of the smoothness of the
# filter and width of impulse. This can be controlled using the
# ``resampling_method`` parameter.
#
sample_rate = 48000
resample_rate = 32000
resampled_waveform = F.resample(
waveform, sample_rate, resample_rate, resampling_method="sinc_interpolation"
)
plot_sweep(resampled_waveform, resample_rate, title="Hann Window Default")
resampled_waveform = F.resample(
waveform, sample_rate, resample_rate, resampling_method="kaiser_window"
)
plot_sweep(resampled_waveform, resample_rate, title="Kaiser Window Default")
######################################################################
# Comparison against librosa
# --------------------------
#
# ``torchaudio``’s resample function can be used to produce results similar to
# that of librosa (resampy)’s kaiser window resampling, with some noise
#
sample_rate = 48000
resample_rate = 32000
# kaiser_best
resampled_waveform = F.resample(
waveform,
sample_rate,
resample_rate,
lowpass_filter_width=64,
rolloff=0.9475937167399596,
resampling_method="kaiser_window",
beta=14.769656459379492,
)
plot_sweep(resampled_waveform, resample_rate, title="Kaiser Window Best (torchaudio)")
librosa_resampled_waveform = torch.from_numpy(
librosa.resample(
waveform.squeeze().numpy(), sample_rate, resample_rate, res_type="kaiser_best"
)
).unsqueeze(0)
plot_sweep(
librosa_resampled_waveform, resample_rate, title="Kaiser Window Best (librosa)"
)
mse = torch.square(resampled_waveform - librosa_resampled_waveform).mean().item()
print("torchaudio and librosa kaiser best MSE:", mse)
# kaiser_fast
resampled_waveform = F.resample(
waveform,
sample_rate,
resample_rate,
lowpass_filter_width=16,
rolloff=0.85,
resampling_method="kaiser_window",
beta=8.555504641634386,
)
plot_specgram(
resampled_waveform, resample_rate, title="Kaiser Window Fast (torchaudio)"
)
librosa_resampled_waveform = torch.from_numpy(
librosa.resample(
waveform.squeeze().numpy(), sample_rate, resample_rate, res_type="kaiser_fast"
)
).unsqueeze(0)
plot_sweep(
librosa_resampled_waveform, resample_rate, title="Kaiser Window Fast (librosa)"
)
mse = torch.square(resampled_waveform - librosa_resampled_waveform).mean().item()
print("torchaudio and librosa kaiser fast MSE:", mse)
######################################################################
# Performance Benchmarking
# ------------------------
#
# Below are benchmarks for downsampling and upsampling waveforms between
# two pairs of sampling rates. We demonstrate the performance implications
# that the ``lowpass_filter_wdith``, window type, and sample rates can
# have. Additionally, we provide a comparison against ``librosa``\ ’s
# ``kaiser_best`` and ``kaiser_fast`` using their corresponding parameters
# in ``torchaudio``.
#
# To elaborate on the results:
#
# - a larger ``lowpass_filter_width`` results in a larger resampling kernel,
# and therefore increases computation time for both the kernel computation
# and convolution
# - using ``kaiser_window`` results in longer computation times than the default
# ``sinc_interpolation`` because it is more complex to compute the intermediate
# window values - a large GCD between the sample and resample rate will result
# in a simplification that allows for a smaller kernel and faster kernel computation.
#
configs = {
"downsample (48 -> 44.1 kHz)": [48000, 44100],
"downsample (16 -> 8 kHz)": [16000, 8000],
"upsample (44.1 -> 48 kHz)": [44100, 48000],
"upsample (8 -> 16 kHz)": [8000, 16000],
}
for label in configs:
times, rows = [], []
sample_rate = configs[label][0]
resample_rate = configs[label][1]
waveform = get_sine_sweep(sample_rate)
# sinc 64 zero-crossings
f_time = benchmark_resample(
"functional", waveform, sample_rate, resample_rate, lowpass_filter_width=64
)
t_time = benchmark_resample(
"transforms", waveform, sample_rate, resample_rate, lowpass_filter_width=64
)
times.append([None, 1000 * f_time, 1000 * t_time])
rows.append("sinc (width 64)")
# sinc 6 zero-crossings
f_time = benchmark_resample(
"functional", waveform, sample_rate, resample_rate, lowpass_filter_width=16
)
t_time = benchmark_resample(
"transforms", waveform, sample_rate, resample_rate, lowpass_filter_width=16
)
times.append([None, 1000 * f_time, 1000 * t_time])
rows.append("sinc (width 16)")
# kaiser best
lib_time = benchmark_resample(
"librosa", waveform, sample_rate, resample_rate, librosa_type="kaiser_best"
)
f_time = benchmark_resample(
"functional",
waveform,
sample_rate,
resample_rate,
lowpass_filter_width=64,
rolloff=0.9475937167399596,
resampling_method="kaiser_window",
beta=14.769656459379492,
)
t_time = benchmark_resample(
"transforms",
waveform,
sample_rate,
resample_rate,
lowpass_filter_width=64,
rolloff=0.9475937167399596,
resampling_method="kaiser_window",
beta=14.769656459379492,
)
times.append([1000 * lib_time, 1000 * f_time, 1000 * t_time])
rows.append("kaiser_best")
# kaiser fast
lib_time = benchmark_resample(
"librosa", waveform, sample_rate, resample_rate, librosa_type="kaiser_fast"
)
f_time = benchmark_resample(
"functional",
waveform,
sample_rate,
resample_rate,
lowpass_filter_width=16,
rolloff=0.85,
resampling_method="kaiser_window",
beta=8.555504641634386,
)
t_time = benchmark_resample(
"transforms",
waveform,
sample_rate,
resample_rate,
lowpass_filter_width=16,
rolloff=0.85,
resampling_method="kaiser_window",
beta=8.555504641634386,
)
times.append([1000 * lib_time, 1000 * f_time, 1000 * t_time])
rows.append("kaiser_fast")
df = pd.DataFrame(
times, columns=["librosa", "functional", "transforms"], index=rows
)
df.columns = pd.MultiIndex.from_product([[f"{label} time (ms)"], df.columns])
display(df.round(2))
|
"""
Speech Recognition with Wav2Vec2
================================
**Author**: `Moto Hira <[email protected]>`__
This tutorial shows how to perform speech recognition using using
pre-trained models from wav2vec 2.0
[`paper <https://arxiv.org/abs/2006.11477>`__].
"""
######################################################################
# Overview
# --------
#
# The process of speech recognition looks like the following.
#
# 1. Extract the acoustic features from audio waveform
#
# 2. Estimate the class of the acoustic features frame-by-frame
#
# 3. Generate hypothesis from the sequence of the class probabilities
#
# Torchaudio provides easy access to the pre-trained weights and
# associated information, such as the expected sample rate and class
# labels. They are bundled together and available under
# ``torchaudio.pipelines`` module.
#
######################################################################
# Preparation
# -----------
#
# First we import the necessary packages, and fetch data that we work on.
#
# %matplotlib inline
import os
import torch
import torchaudio
import requests
import matplotlib
import matplotlib.pyplot as plt
import IPython
matplotlib.rcParams["figure.figsize"] = [16.0, 4.8]
torch.random.manual_seed(0)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(torch.__version__)
print(torchaudio.__version__)
print(device)
SPEECH_URL = "https://pytorch-tutorial-assets.s3.amazonaws.com/VOiCES_devkit/source-16k/train/sp0307/Lab41-SRI-VOiCES-src-sp0307-ch127535-sg0042.wav" # noqa: E501
SPEECH_FILE = "_assets/speech.wav"
if not os.path.exists(SPEECH_FILE):
os.makedirs("_assets", exist_ok=True)
with open(SPEECH_FILE, "wb") as file:
file.write(requests.get(SPEECH_URL).content)
######################################################################
# Creating a pipeline
# -------------------
#
# First, we will create a Wav2Vec2 model that performs the feature
# extraction and the classification.
#
# There are two types of Wav2Vec2 pre-trained weights available in
# torchaudio. The ones fine-tuned for ASR task, and the ones not
# fine-tuned.
#
# Wav2Vec2 (and HuBERT) models are trained in self-supervised manner. They
# are firstly trained with audio only for representation learning, then
# fine-tuned for a specific task with additional labels.
#
# The pre-trained weights without fine-tuning can be fine-tuned
# for other downstream tasks as well, but this tutorial does not
# cover that.
#
# We will use :py:func:`torchaudio.pipelines.WAV2VEC2_ASR_BASE_960H` here.
#
# There are multiple models available as
# :py:mod:`torchaudio.pipelines`. Please check the documentation for
# the detail of how they are trained.
#
# The bundle object provides the interface to instantiate model and other
# information. Sampling rate and the class labels are found as follow.
#
bundle = torchaudio.pipelines.WAV2VEC2_ASR_BASE_960H
print("Sample Rate:", bundle.sample_rate)
print("Labels:", bundle.get_labels())
######################################################################
# Model can be constructed as following. This process will automatically
# fetch the pre-trained weights and load it into the model.
#
model = bundle.get_model().to(device)
print(model.__class__)
######################################################################
# Loading data
# ------------
#
# We will use the speech data from `VOiCES
# dataset <https://iqtlabs.github.io/voices/>`__, which is licensed under
# Creative Commos BY 4.0.
#
IPython.display.Audio(SPEECH_FILE)
######################################################################
# To load data, we use :py:func:`torchaudio.load`.
#
# If the sampling rate is different from what the pipeline expects, then
# we can use :py:func:`torchaudio.functional.resample` for resampling.
#
# .. note::
#
# - :py:func:`torchaudio.functional.resample` works on CUDA tensors as well.
# - When performing resampling multiple times on the same set of sample rates,
# using :py:func:`torchaudio.transforms.Resample` might improve the performace.
#
waveform, sample_rate = torchaudio.load(SPEECH_FILE)
waveform = waveform.to(device)
if sample_rate != bundle.sample_rate:
waveform = torchaudio.functional.resample(waveform, sample_rate, bundle.sample_rate)
######################################################################
# Extracting acoustic features
# ----------------------------
#
# The next step is to extract acoustic features from the audio.
#
# .. note::
# Wav2Vec2 models fine-tuned for ASR task can perform feature
# extraction and classification with one step, but for the sake of the
# tutorial, we also show how to perform feature extraction here.
#
with torch.inference_mode():
features, _ = model.extract_features(waveform)
######################################################################
# The returned features is a list of tensors. Each tensor is the output of
# a transformer layer.
#
fig, ax = plt.subplots(len(features), 1, figsize=(16, 4.3 * len(features)))
for i, feats in enumerate(features):
ax[i].imshow(feats[0].cpu())
ax[i].set_title(f"Feature from transformer layer {i+1}")
ax[i].set_xlabel("Feature dimension")
ax[i].set_ylabel("Frame (time-axis)")
plt.tight_layout()
plt.show()
######################################################################
# Feature classification
# ----------------------
#
# Once the acoustic features are extracted, the next step is to classify
# them into a set of categories.
#
# Wav2Vec2 model provides method to perform the feature extraction and
# classification in one step.
#
with torch.inference_mode():
emission, _ = model(waveform)
######################################################################
# The output is in the form of logits. It is not in the form of
# probability.
#
# Let’s visualize this.
#
plt.imshow(emission[0].cpu().T)
plt.title("Classification result")
plt.xlabel("Frame (time-axis)")
plt.ylabel("Class")
plt.show()
print("Class labels:", bundle.get_labels())
######################################################################
# We can see that there are strong indications to certain labels across
# the time line.
#
######################################################################
# Generating transcripts
# ----------------------
#
# From the sequence of label probabilities, now we want to generate
# transcripts. The process to generate hypotheses is often called
# “decoding”.
#
# Decoding is more elaborate than simple classification because
# decoding at certain time step can be affected by surrounding
# observations.
#
# For example, take a word like ``night`` and ``knight``. Even if their
# prior probability distribution are differnt (in typical conversations,
# ``night`` would occur way more often than ``knight``), to accurately
# generate transcripts with ``knight``, such as ``a knight with a sword``,
# the decoding process has to postpone the final decision until it sees
# enough context.
#
# There are many decoding techniques proposed, and they require external
# resources, such as word dictionary and language models.
#
# In this tutorial, for the sake of simplicity, we will perform greedy
# decoding which does not depend on such external components, and simply
# pick up the best hypothesis at each time step. Therefore, the context
# information are not used, and only one transcript can be generated.
#
# We start by defining greedy decoding algorithm.
#
class GreedyCTCDecoder(torch.nn.Module):
def __init__(self, labels, blank=0):
super().__init__()
self.labels = labels
self.blank = blank
def forward(self, emission: torch.Tensor) -> str:
"""Given a sequence emission over labels, get the best path string
Args:
emission (Tensor): Logit tensors. Shape `[num_seq, num_label]`.
Returns:
str: The resulting transcript
"""
indices = torch.argmax(emission, dim=-1) # [num_seq,]
indices = torch.unique_consecutive(indices, dim=-1)
indices = [i for i in indices if i != self.blank]
return "".join([self.labels[i] for i in indices])
######################################################################
# Now create the decoder object and decode the transcript.
#
decoder = GreedyCTCDecoder(labels=bundle.get_labels())
transcript = decoder(emission[0])
######################################################################
# Let’s check the result and listen again to the audio.
#
print(transcript)
IPython.display.Audio(SPEECH_FILE)
######################################################################
# The ASR model is fine-tuned using a loss function called Connectionist Temporal Classification (CTC).
# The detail of CTC loss is explained
# `here <https://distill.pub/2017/ctc/>`__. In CTC a blank token (ϵ) is a
# special token which represents a repetition of the previous symbol. In
# decoding, these are simply ignored.
#
######################################################################
# Conclusion
# ----------
#
# In this tutorial, we looked at how to use :py:mod:`torchaudio.pipelines` to
# perform acoustic feature extraction and speech recognition. Constructing
# a model and getting the emission is as short as two lines.
#
# ::
#
# model = torchaudio.pipelines.WAV2VEC2_ASR_BASE_960H.get_model()
# emission = model(waveforms, ...)
#
|
# -*- coding: utf-8 -*-
"""
Audio I/O
=========
``torchaudio`` integrates ``libsox`` and provides a rich set of audio I/O.
"""
# When running this tutorial in Google Colab, install the required packages
# with the following.
# !pip install torchaudio boto3
import torch
import torchaudio
print(torch.__version__)
print(torchaudio.__version__)
######################################################################
# Preparing data and utility functions (skip this section)
# --------------------------------------------------------
#
# @title Prepare data and utility functions. {display-mode: "form"}
# @markdown
# @markdown You do not need to look into this cell.
# @markdown Just execute once and you are good to go.
# @markdown
# @markdown In this tutorial, we will use a speech data from [VOiCES dataset](https://iqtlabs.github.io/voices/),
# @markdown which is licensed under Creative Commos BY 4.0.
import io
import os
import requests
import tarfile
import boto3
from botocore import UNSIGNED
from botocore.config import Config
import matplotlib.pyplot as plt
from IPython.display import Audio, display
_SAMPLE_DIR = "_assets"
SAMPLE_WAV_URL = "https://pytorch-tutorial-assets.s3.amazonaws.com/steam-train-whistle-daniel_simon.wav"
SAMPLE_WAV_PATH = os.path.join(_SAMPLE_DIR, "steam.wav")
SAMPLE_MP3_URL = "https://pytorch-tutorial-assets.s3.amazonaws.com/steam-train-whistle-daniel_simon.mp3"
SAMPLE_MP3_PATH = os.path.join(_SAMPLE_DIR, "steam.mp3")
SAMPLE_GSM_URL = "https://pytorch-tutorial-assets.s3.amazonaws.com/steam-train-whistle-daniel_simon.gsm"
SAMPLE_GSM_PATH = os.path.join(_SAMPLE_DIR, "steam.gsm")
SAMPLE_WAV_SPEECH_URL = "https://pytorch-tutorial-assets.s3.amazonaws.com/VOiCES_devkit/source-16k/train/sp0307/Lab41-SRI-VOiCES-src-sp0307-ch127535-sg0042.wav" # noqa: E501
SAMPLE_WAV_SPEECH_PATH = os.path.join(_SAMPLE_DIR, "speech.wav")
SAMPLE_TAR_URL = "https://pytorch-tutorial-assets.s3.amazonaws.com/VOiCES_devkit.tar.gz"
SAMPLE_TAR_PATH = os.path.join(_SAMPLE_DIR, "sample.tar.gz")
SAMPLE_TAR_ITEM = "VOiCES_devkit/source-16k/train/sp0307/Lab41-SRI-VOiCES-src-sp0307-ch127535-sg0042.wav"
S3_BUCKET = "pytorch-tutorial-assets"
S3_KEY = "VOiCES_devkit/source-16k/train/sp0307/Lab41-SRI-VOiCES-src-sp0307-ch127535-sg0042.wav"
def _fetch_data():
os.makedirs(_SAMPLE_DIR, exist_ok=True)
uri = [
(SAMPLE_WAV_URL, SAMPLE_WAV_PATH),
(SAMPLE_MP3_URL, SAMPLE_MP3_PATH),
(SAMPLE_GSM_URL, SAMPLE_GSM_PATH),
(SAMPLE_WAV_SPEECH_URL, SAMPLE_WAV_SPEECH_PATH),
(SAMPLE_TAR_URL, SAMPLE_TAR_PATH),
]
for url, path in uri:
with open(path, "wb") as file_:
file_.write(requests.get(url).content)
_fetch_data()
def print_stats(waveform, sample_rate=None, src=None):
if src:
print("-" * 10)
print("Source:", src)
print("-" * 10)
if sample_rate:
print("Sample Rate:", sample_rate)
print("Shape:", tuple(waveform.shape))
print("Dtype:", waveform.dtype)
print(f" - Max: {waveform.max().item():6.3f}")
print(f" - Min: {waveform.min().item():6.3f}")
print(f" - Mean: {waveform.mean().item():6.3f}")
print(f" - Std Dev: {waveform.std().item():6.3f}")
print()
print(waveform)
print()
def plot_waveform(waveform, sample_rate, title="Waveform", xlim=None, ylim=None):
waveform = waveform.numpy()
num_channels, num_frames = waveform.shape
time_axis = torch.arange(0, num_frames) / sample_rate
figure, axes = plt.subplots(num_channels, 1)
if num_channels == 1:
axes = [axes]
for c in range(num_channels):
axes[c].plot(time_axis, waveform[c], linewidth=1)
axes[c].grid(True)
if num_channels > 1:
axes[c].set_ylabel(f"Channel {c+1}")
if xlim:
axes[c].set_xlim(xlim)
if ylim:
axes[c].set_ylim(ylim)
figure.suptitle(title)
plt.show(block=False)
def plot_specgram(waveform, sample_rate, title="Spectrogram", xlim=None):
waveform = waveform.numpy()
num_channels, num_frames = waveform.shape
figure, axes = plt.subplots(num_channels, 1)
if num_channels == 1:
axes = [axes]
for c in range(num_channels):
axes[c].specgram(waveform[c], Fs=sample_rate)
if num_channels > 1:
axes[c].set_ylabel(f"Channel {c+1}")
if xlim:
axes[c].set_xlim(xlim)
figure.suptitle(title)
plt.show(block=False)
def play_audio(waveform, sample_rate):
waveform = waveform.numpy()
num_channels, num_frames = waveform.shape
if num_channels == 1:
display(Audio(waveform[0], rate=sample_rate))
elif num_channels == 2:
display(Audio((waveform[0], waveform[1]), rate=sample_rate))
else:
raise ValueError("Waveform with more than 2 channels are not supported.")
def _get_sample(path, resample=None):
effects = [["remix", "1"]]
if resample:
effects.extend(
[
["lowpass", f"{resample // 2}"],
["rate", f"{resample}"],
]
)
return torchaudio.sox_effects.apply_effects_file(path, effects=effects)
def get_sample(*, resample=None):
return _get_sample(SAMPLE_WAV_PATH, resample=resample)
def inspect_file(path):
print("-" * 10)
print("Source:", path)
print("-" * 10)
print(f" - File size: {os.path.getsize(path)} bytes")
print(f" - {torchaudio.info(path)}")
######################################################################
# Querying audio metadata
# -----------------------
#
# Function ``torchaudio.info`` fetches audio metadata. You can provide
# a path-like object or file-like object.
#
metadata = torchaudio.info(SAMPLE_WAV_PATH)
print(metadata)
######################################################################
# Where
#
# - ``sample_rate`` is the sampling rate of the audio
# - ``num_channels`` is the number of channels
# - ``num_frames`` is the number of frames per channel
# - ``bits_per_sample`` is bit depth
# - ``encoding`` is the sample coding format
#
# ``encoding`` can take on one of the following values:
#
# - ``"PCM_S"``: Signed integer linear PCM
# - ``"PCM_U"``: Unsigned integer linear PCM
# - ``"PCM_F"``: Floating point linear PCM
# - ``"FLAC"``: Flac, `Free Lossless Audio
# Codec <https://xiph.org/flac/>`__
# - ``"ULAW"``: Mu-law,
# [`wikipedia <https://en.wikipedia.org/wiki/%CE%9C-law_algorithm>`__]
# - ``"ALAW"``: A-law
# [`wikipedia <https://en.wikipedia.org/wiki/A-law_algorithm>`__]
# - ``"MP3"`` : MP3, MPEG-1 Audio Layer III
# - ``"VORBIS"``: OGG Vorbis [`xiph.org <https://xiph.org/vorbis/>`__]
# - ``"AMR_NB"``: Adaptive Multi-Rate
# [`wikipedia <https://en.wikipedia.org/wiki/Adaptive_Multi-Rate_audio_codec>`__]
# - ``"AMR_WB"``: Adaptive Multi-Rate Wideband
# [`wikipedia <https://en.wikipedia.org/wiki/Adaptive_Multi-Rate_Wideband>`__]
# - ``"OPUS"``: Opus [`opus-codec.org <https://opus-codec.org/>`__]
# - ``"GSM"``: GSM-FR
# [`wikipedia <https://en.wikipedia.org/wiki/Full_Rate>`__]
# - ``"UNKNOWN"`` None of above
#
######################################################################
# **Note**
#
# - ``bits_per_sample`` can be ``0`` for formats with compression and/or
# variable bit rate (such as MP3).
# - ``num_frames`` can be ``0`` for GSM-FR format.
#
metadata = torchaudio.info(SAMPLE_MP3_PATH)
print(metadata)
metadata = torchaudio.info(SAMPLE_GSM_PATH)
print(metadata)
######################################################################
# Querying file-like object
# ~~~~~~~~~~~~~~~~~~~~~~~~~
#
# ``info`` works on file-like objects.
#
print("Source:", SAMPLE_WAV_URL)
with requests.get(SAMPLE_WAV_URL, stream=True) as response:
metadata = torchaudio.info(response.raw)
print(metadata)
######################################################################
# **Note** When passing a file-like object, ``info`` does not read
# all of the underlying data; rather, it reads only a portion
# of the data from the beginning.
# Therefore, for a given audio format, it may not be able to retrieve the
# correct metadata, including the format itself.
# The following example illustrates this.
#
# - Use argument ``format`` to specify the audio format of the input.
# - The returned metadata has ``num_frames = 0``
#
print("Source:", SAMPLE_MP3_URL)
with requests.get(SAMPLE_MP3_URL, stream=True) as response:
metadata = torchaudio.info(response.raw, format="mp3")
print(f"Fetched {response.raw.tell()} bytes.")
print(metadata)
######################################################################
# Loading audio data into Tensor
# ------------------------------
#
# To load audio data, you can use ``torchaudio.load``.
#
# This function accepts a path-like object or file-like object as input.
#
# The returned value is a tuple of waveform (``Tensor``) and sample rate
# (``int``).
#
# By default, the resulting tensor object has ``dtype=torch.float32`` and
# its value range is normalized within ``[-1.0, 1.0]``.
#
# For the list of supported format, please refer to `the torchaudio
# documentation <https://pytorch.org/audio>`__.
#
waveform, sample_rate = torchaudio.load(SAMPLE_WAV_SPEECH_PATH)
print_stats(waveform, sample_rate=sample_rate)
plot_waveform(waveform, sample_rate)
plot_specgram(waveform, sample_rate)
play_audio(waveform, sample_rate)
######################################################################
# Loading from file-like object
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# ``torchaudio``\ ’s I/O functions now support file-like objects. This
# allows for fetching and decoding audio data from locations
# within and beyond the local file system.
# The following examples illustrate this.
#
# Load audio data as HTTP request
with requests.get(SAMPLE_WAV_SPEECH_URL, stream=True) as response:
waveform, sample_rate = torchaudio.load(response.raw)
plot_specgram(waveform, sample_rate, title="HTTP datasource")
# Load audio from tar file
with tarfile.open(SAMPLE_TAR_PATH, mode="r") as tarfile_:
fileobj = tarfile_.extractfile(SAMPLE_TAR_ITEM)
waveform, sample_rate = torchaudio.load(fileobj)
plot_specgram(waveform, sample_rate, title="TAR file")
# Load audio from S3
client = boto3.client("s3", config=Config(signature_version=UNSIGNED))
response = client.get_object(Bucket=S3_BUCKET, Key=S3_KEY)
waveform, sample_rate = torchaudio.load(response["Body"])
plot_specgram(waveform, sample_rate, title="From S3")
######################################################################
# Tips on slicing
# ~~~~~~~~~~~~~~~
#
# Providing ``num_frames`` and ``frame_offset`` arguments restricts
# decoding to the corresponding segment of the input.
#
# The same result can be achieved using vanilla Tensor slicing,
# (i.e. ``waveform[:, frame_offset:frame_offset+num_frames]``). However,
# providing ``num_frames`` and ``frame_offset`` arguments is more
# efficient.
#
# This is because the function will end data acquisition and decoding
# once it finishes decoding the requested frames. This is advantageous
# when the audio data are transferred via network as the data transfer will
# stop as soon as the necessary amount of data is fetched.
#
# The following example illustrates this.
#
# Illustration of two different decoding methods.
# The first one will fetch all the data and decode them, while
# the second one will stop fetching data once it completes decoding.
# The resulting waveforms are identical.
frame_offset, num_frames = 16000, 16000 # Fetch and decode the 1 - 2 seconds
print("Fetching all the data...")
with requests.get(SAMPLE_WAV_SPEECH_URL, stream=True) as response:
waveform1, sample_rate1 = torchaudio.load(response.raw)
waveform1 = waveform1[:, frame_offset: frame_offset + num_frames]
print(f" - Fetched {response.raw.tell()} bytes")
print("Fetching until the requested frames are available...")
with requests.get(SAMPLE_WAV_SPEECH_URL, stream=True) as response:
waveform2, sample_rate2 = torchaudio.load(
response.raw, frame_offset=frame_offset, num_frames=num_frames
)
print(f" - Fetched {response.raw.tell()} bytes")
print("Checking the resulting waveform ... ", end="")
assert (waveform1 == waveform2).all()
print("matched!")
######################################################################
# Saving audio to file
# --------------------
#
# To save audio data in formats interpretable by common applications,
# you can use ``torchaudio.save``.
#
# This function accepts a path-like object or file-like object.
#
# When passing a file-like object, you also need to provide argument ``format``
# so that the function knows which format it should use. In the
# case of a path-like object, the function will infer the format from
# the extension. If you are saving to a file without an extension, you need
# to provide argument ``format``.
#
# When saving WAV-formatted data, the default encoding for ``float32`` Tensor
# is 32-bit floating-point PCM. You can provide arguments ``encoding`` and
# ``bits_per_sample`` to change this behavior. For example, to save data
# in 16-bit signed integer PCM, you can do the following.
#
# **Note** Saving data in encodings with lower bit depth reduces the
# resulting file size but also precision.
#
waveform, sample_rate = get_sample()
print_stats(waveform, sample_rate=sample_rate)
# Save without any encoding option.
# The function will pick up the encoding which
# the provided data fit
path = f"{_SAMPLE_DIR}/save_example_default.wav"
torchaudio.save(path, waveform, sample_rate)
inspect_file(path)
# Save as 16-bit signed integer Linear PCM
# The resulting file occupies half the storage but loses precision
path = f"{_SAMPLE_DIR}/save_example_PCM_S16.wav"
torchaudio.save(path, waveform, sample_rate, encoding="PCM_S", bits_per_sample=16)
inspect_file(path)
######################################################################
# ``torchaudio.save`` can also handle other formats. To name a few:
#
waveform, sample_rate = get_sample(resample=8000)
formats = [
"mp3",
"flac",
"vorbis",
"sph",
"amb",
"amr-nb",
"gsm",
]
for format in formats:
path = f"{_SAMPLE_DIR}/save_example.{format}"
torchaudio.save(path, waveform, sample_rate, format=format)
inspect_file(path)
######################################################################
# Saving to file-like object
# ~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Similar to the other I/O functions, you can save audio to file-like
# objects. When saving to a file-like object, argument ``format`` is
# required.
#
waveform, sample_rate = get_sample()
# Saving to bytes buffer
buffer_ = io.BytesIO()
torchaudio.save(buffer_, waveform, sample_rate, format="wav")
buffer_.seek(0)
print(buffer_.read(16))
|
"""
Forced Alignment with Wav2Vec2
==============================
**Author** `Moto Hira <[email protected]>`__
This tutorial shows how to align transcript to speech with
``torchaudio``, using CTC segmentation algorithm described in
`CTC-Segmentation of Large Corpora for German End-to-end Speech
Recognition <https://arxiv.org/abs/2007.09127>`__.
"""
######################################################################
# Overview
# --------
#
# The process of alignment looks like the following.
#
# 1. Estimate the frame-wise label probability from audio waveform
# 2. Generate the trellis matrix which represents the probability of
# labels aligned at time step.
# 3. Find the most likely path from the trellis matrix.
#
# In this example, we use ``torchaudio``\ ’s ``Wav2Vec2`` model for
# acoustic feature extraction.
#
######################################################################
# Preparation
# -----------
#
# First we import the necessary packages, and fetch data that we work on.
#
# %matplotlib inline
import os
from dataclasses import dataclass
import torch
import torchaudio
import requests
import matplotlib
import matplotlib.pyplot as plt
import IPython
matplotlib.rcParams["figure.figsize"] = [16.0, 4.8]
torch.random.manual_seed(0)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(torch.__version__)
print(torchaudio.__version__)
print(device)
SPEECH_URL = "https://download.pytorch.org/torchaudio/tutorial-assets/Lab41-SRI-VOiCES-src-sp0307-ch127535-sg0042.wav"
SPEECH_FILE = "_assets/speech.wav"
if not os.path.exists(SPEECH_FILE):
os.makedirs("_assets", exist_ok=True)
with open(SPEECH_FILE, "wb") as file:
file.write(requests.get(SPEECH_URL).content)
######################################################################
# Generate frame-wise label probability
# -------------------------------------
#
# The first step is to generate the label class porbability of each aduio
# frame. We can use a Wav2Vec2 model that is trained for ASR. Here we use
# :py:func:`torchaudio.pipelines.WAV2VEC2_ASR_BASE_960H`.
#
# ``torchaudio`` provides easy access to pretrained models with associated
# labels.
#
# .. note::
#
# In the subsequent sections, we will compute the probability in
# log-domain to avoid numerical instability. For this purpose, we
# normalize the ``emission`` with :py:func:`torch.log_softmax`.
#
bundle = torchaudio.pipelines.WAV2VEC2_ASR_BASE_960H
model = bundle.get_model().to(device)
labels = bundle.get_labels()
with torch.inference_mode():
waveform, _ = torchaudio.load(SPEECH_FILE)
emissions, _ = model(waveform.to(device))
emissions = torch.log_softmax(emissions, dim=-1)
emission = emissions[0].cpu().detach()
################################################################################
# Visualization
################################################################################
print(labels)
plt.imshow(emission.T)
plt.colorbar()
plt.title("Frame-wise class probability")
plt.xlabel("Time")
plt.ylabel("Labels")
plt.show()
######################################################################
# Generate alignment probability (trellis)
# ----------------------------------------
#
# From the emission matrix, next we generate the trellis which represents
# the probability of transcript labels occur at each time frame.
#
# Trellis is 2D matrix with time axis and label axis. The label axis
# represents the transcript that we are aligning. In the following, we use
# :math:`t` to denote the index in time axis and :math:`j` to denote the
# index in label axis. :math:`c_j` represents the label at label index
# :math:`j`.
#
# To generate, the probability of time step :math:`t+1`, we look at the
# trellis from time step :math:`t` and emission at time step :math:`t+1`.
# There are two path to reach to time step :math:`t+1` with label
# :math:`c_{j+1}`. The first one is the case where the label was
# :math:`c_{j+1}` at :math:`t` and there was no label change from
# :math:`t` to :math:`t+1`. The other case is where the label was
# :math:`c_j` at :math:`t` and it transitioned to the next label
# :math:`c_{j+1}` at :math:`t+1`.
#
# The follwoing diagram illustrates this transition.
#
# .. image:: https://download.pytorch.org/torchaudio/tutorial-assets/ctc-forward.png
#
# Since we are looking for the most likely transitions, we take the more
# likely path for the value of :math:`k_{(t+1, j+1)}`, that is
#
# :math:`k_{(t+1, j+1)} = max( k_{(t, j)} p(t+1, c_{j+1}), k_{(t, j+1)} p(t+1, repeat) )`
#
# where :math:`k` represents is trellis matrix, and :math:`p(t, c_j)`
# represents the probability of label :math:`c_j` at time step :math:`t`.
# :math:`repeat` represents the blank token from CTC formulation. (For the
# detail of CTC algorithm, please refer to the *Sequence Modeling with CTC*
# [`distill.pub <https://distill.pub/2017/ctc/>`__])
#
transcript = "I|HAD|THAT|CURIOSITY|BESIDE|ME|AT|THIS|MOMENT"
dictionary = {c: i for i, c in enumerate(labels)}
tokens = [dictionary[c] for c in transcript]
print(list(zip(transcript, tokens)))
def get_trellis(emission, tokens, blank_id=0):
num_frame = emission.size(0)
num_tokens = len(tokens)
# Trellis has extra diemsions for both time axis and tokens.
# The extra dim for tokens represents <SoS> (start-of-sentence)
# The extra dim for time axis is for simplification of the code.
trellis = torch.full((num_frame + 1, num_tokens + 1), -float("inf"))
trellis[:, 0] = 0
for t in range(num_frame):
trellis[t + 1, 1:] = torch.maximum(
# Score for staying at the same token
trellis[t, 1:] + emission[t, blank_id],
# Score for changing to the next token
trellis[t, :-1] + emission[t, tokens],
)
return trellis
trellis = get_trellis(emission, tokens)
################################################################################
# Visualization
################################################################################
plt.imshow(trellis[1:, 1:].T, origin="lower")
plt.annotate("- Inf", (trellis.size(1) / 5, trellis.size(1) / 1.5))
plt.colorbar()
plt.show()
######################################################################
# In the above visualization, we can see that there is a trace of high
# probability crossing the matrix diagonally.
#
######################################################################
# Find the most likely path (backtracking)
# ----------------------------------------
#
# Once the trellis is generated, we will traverse it following the
# elements with high probability.
#
# We will start from the last label index with the time step of highest
# probability, then, we traverse back in time, picking stay
# (:math:`c_j \rightarrow c_j`) or transition
# (:math:`c_j \rightarrow c_{j+1}`), based on the post-transition
# probability :math:`k_{t, j} p(t+1, c_{j+1})` or
# :math:`k_{t, j+1} p(t+1, repeat)`.
#
# Transition is done once the label reaches the beginning.
#
# The trellis matrix is used for path-finding, but for the final
# probability of each segment, we take the frame-wise probability from
# emission matrix.
#
@dataclass
class Point:
token_index: int
time_index: int
score: float
def backtrack(trellis, emission, tokens, blank_id=0):
# Note:
# j and t are indices for trellis, which has extra dimensions
# for time and tokens at the beginning.
# When referring to time frame index `T` in trellis,
# the corresponding index in emission is `T-1`.
# Similarly, when referring to token index `J` in trellis,
# the corresponding index in transcript is `J-1`.
j = trellis.size(1) - 1
t_start = torch.argmax(trellis[:, j]).item()
path = []
for t in range(t_start, 0, -1):
# 1. Figure out if the current position was stay or change
# Note (again):
# `emission[J-1]` is the emission at time frame `J` of trellis dimension.
# Score for token staying the same from time frame J-1 to T.
stayed = trellis[t - 1, j] + emission[t - 1, blank_id]
# Score for token changing from C-1 at T-1 to J at T.
changed = trellis[t - 1, j - 1] + emission[t - 1, tokens[j - 1]]
# 2. Store the path with frame-wise probability.
prob = emission[t - 1, tokens[j - 1] if changed > stayed else 0].exp().item()
# Return token index and time index in non-trellis coordinate.
path.append(Point(j - 1, t - 1, prob))
# 3. Update the token
if changed > stayed:
j -= 1
if j == 0:
break
else:
raise ValueError("Failed to align")
return path[::-1]
path = backtrack(trellis, emission, tokens)
print(path)
################################################################################
# Visualization
################################################################################
def plot_trellis_with_path(trellis, path):
# To plot trellis with path, we take advantage of 'nan' value
trellis_with_path = trellis.clone()
for _, p in enumerate(path):
trellis_with_path[p.time_index, p.token_index] = float("nan")
plt.imshow(trellis_with_path[1:, 1:].T, origin="lower")
plot_trellis_with_path(trellis, path)
plt.title("The path found by backtracking")
plt.show()
######################################################################
# Looking good. Now this path contains repetations for the same labels, so
# let’s merge them to make it close to the original transcript.
#
# When merging the multiple path points, we simply take the average
# probability for the merged segments.
#
# Merge the labels
@dataclass
class Segment:
label: str
start: int
end: int
score: float
def __repr__(self):
return f"{self.label}\t({self.score:4.2f}): [{self.start:5d}, {self.end:5d})"
@property
def length(self):
return self.end - self.start
def merge_repeats(path):
i1, i2 = 0, 0
segments = []
while i1 < len(path):
while i2 < len(path) and path[i1].token_index == path[i2].token_index:
i2 += 1
score = sum(path[k].score for k in range(i1, i2)) / (i2 - i1)
segments.append(
Segment(
transcript[path[i1].token_index],
path[i1].time_index,
path[i2 - 1].time_index + 1,
score,
)
)
i1 = i2
return segments
segments = merge_repeats(path)
for seg in segments:
print(seg)
################################################################################
# Visualization
################################################################################
def plot_trellis_with_segments(trellis, segments, transcript):
# To plot trellis with path, we take advantage of 'nan' value
trellis_with_path = trellis.clone()
for i, seg in enumerate(segments):
if seg.label != "|":
trellis_with_path[seg.start + 1: seg.end + 1, i + 1] = float("nan")
fig, [ax1, ax2] = plt.subplots(2, 1, figsize=(16, 9.5))
ax1.set_title("Path, label and probability for each label")
ax1.imshow(trellis_with_path.T, origin="lower")
ax1.set_xticks([])
for i, seg in enumerate(segments):
if seg.label != "|":
ax1.annotate(seg.label, (seg.start + 0.7, i + 0.3), weight="bold")
ax1.annotate(f"{seg.score:.2f}", (seg.start - 0.3, i + 4.3))
ax2.set_title("Label probability with and without repetation")
xs, hs, ws = [], [], []
for seg in segments:
if seg.label != "|":
xs.append((seg.end + seg.start) / 2 + 0.4)
hs.append(seg.score)
ws.append(seg.end - seg.start)
ax2.annotate(seg.label, (seg.start + 0.8, -0.07), weight="bold")
ax2.bar(xs, hs, width=ws, color="gray", alpha=0.5, edgecolor="black")
xs, hs = [], []
for p in path:
label = transcript[p.token_index]
if label != "|":
xs.append(p.time_index + 1)
hs.append(p.score)
ax2.bar(xs, hs, width=0.5, alpha=0.5)
ax2.axhline(0, color="black")
ax2.set_xlim(ax1.get_xlim())
ax2.set_ylim(-0.1, 1.1)
plot_trellis_with_segments(trellis, segments, transcript)
plt.tight_layout()
plt.show()
######################################################################
# Looks good. Now let’s merge the words. The Wav2Vec2 model uses ``'|'``
# as the word boundary, so we merge the segments before each occurance of
# ``'|'``.
#
# Then, finally, we segment the original audio into segmented audio and
# listen to them to see if the segmentation is correct.
#
# Merge words
def merge_words(segments, separator="|"):
words = []
i1, i2 = 0, 0
while i1 < len(segments):
if i2 >= len(segments) or segments[i2].label == separator:
if i1 != i2:
segs = segments[i1:i2]
word = "".join([seg.label for seg in segs])
score = sum(seg.score * seg.length for seg in segs) / sum(
seg.length for seg in segs
)
words.append(
Segment(word, segments[i1].start, segments[i2 - 1].end, score)
)
i1 = i2 + 1
i2 = i1
else:
i2 += 1
return words
word_segments = merge_words(segments)
for word in word_segments:
print(word)
################################################################################
# Visualization
################################################################################
def plot_alignments(trellis, segments, word_segments, waveform):
trellis_with_path = trellis.clone()
for i, seg in enumerate(segments):
if seg.label != "|":
trellis_with_path[seg.start + 1: seg.end + 1, i + 1] = float("nan")
fig, [ax1, ax2] = plt.subplots(2, 1, figsize=(16, 9.5))
ax1.imshow(trellis_with_path[1:, 1:].T, origin="lower")
ax1.set_xticks([])
ax1.set_yticks([])
for word in word_segments:
ax1.axvline(word.start - 0.5)
ax1.axvline(word.end - 0.5)
for i, seg in enumerate(segments):
if seg.label != "|":
ax1.annotate(seg.label, (seg.start, i + 0.3))
ax1.annotate(f"{seg.score:.2f}", (seg.start, i + 4), fontsize=8)
# The original waveform
ratio = waveform.size(0) / (trellis.size(0) - 1)
ax2.plot(waveform)
for word in word_segments:
x0 = ratio * word.start
x1 = ratio * word.end
ax2.axvspan(x0, x1, alpha=0.1, color="red")
ax2.annotate(f"{word.score:.2f}", (x0, 0.8))
for seg in segments:
if seg.label != "|":
ax2.annotate(seg.label, (seg.start * ratio, 0.9))
xticks = ax2.get_xticks()
plt.xticks(xticks, xticks / bundle.sample_rate)
ax2.set_xlabel("time [second]")
ax2.set_yticks([])
ax2.set_ylim(-1.0, 1.0)
ax2.set_xlim(0, waveform.size(-1))
plot_alignments(
trellis,
segments,
word_segments,
waveform[0],
)
plt.show()
# A trick to embed the resulting audio to the generated file.
# `IPython.display.Audio` has to be the last call in a cell,
# and there should be only one call par cell.
def display_segment(i):
ratio = waveform.size(1) / (trellis.size(0) - 1)
word = word_segments[i]
x0 = int(ratio * word.start)
x1 = int(ratio * word.end)
filename = f"_assets/{i}_{word.label}.wav"
torchaudio.save(filename, waveform[:, x0:x1], bundle.sample_rate)
print(
f"{word.label} ({word.score:.2f}): {x0 / bundle.sample_rate:.3f} - {x1 / bundle.sample_rate:.3f} sec"
)
return IPython.display.Audio(filename)
######################################################################
#
# Generate the audio for each segment
print(transcript)
IPython.display.Audio(SPEECH_FILE)
######################################################################
#
display_segment(0)
######################################################################
#
display_segment(1)
######################################################################
#
display_segment(2)
######################################################################
#
display_segment(3)
######################################################################
#
display_segment(4)
######################################################################
#
display_segment(5)
######################################################################
#
display_segment(6)
######################################################################
#
display_segment(7)
######################################################################
#
display_segment(8)
######################################################################
# Conclusion
# ----------
#
# In this tutorial, we looked how to use torchaudio’s Wav2Vec2 model to
# perform CTC segmentation for forced alignment.
#
|
# -*- coding: utf-8 -*-
"""
Audio Feature Augmentation
==========================
"""
# When running this tutorial in Google Colab, install the required packages
# with the following.
# !pip install torchaudio librosa
import torch
import torchaudio
import torchaudio.transforms as T
print(torch.__version__)
print(torchaudio.__version__)
######################################################################
# Preparing data and utility functions (skip this section)
# --------------------------------------------------------
#
# @title Prepare data and utility functions. {display-mode: "form"}
# @markdown
# @markdown You do not need to look into this cell.
# @markdown Just execute once and you are good to go.
# @markdown
# @markdown In this tutorial, we will use a speech data from [VOiCES dataset](https://iqtlabs.github.io/voices/),
# @markdown which is licensed under Creative Commos BY 4.0.
# -------------------------------------------------------------------------------
# Preparation of data and helper functions.
# -------------------------------------------------------------------------------
import os
import requests
import librosa
import matplotlib.pyplot as plt
_SAMPLE_DIR = "_assets"
SAMPLE_WAV_SPEECH_URL = "https://pytorch-tutorial-assets.s3.amazonaws.com/VOiCES_devkit/source-16k/train/sp0307/Lab41-SRI-VOiCES-src-sp0307-ch127535-sg0042.wav" # noqa: E501
SAMPLE_WAV_SPEECH_PATH = os.path.join(_SAMPLE_DIR, "speech.wav")
os.makedirs(_SAMPLE_DIR, exist_ok=True)
def _fetch_data():
uri = [
(SAMPLE_WAV_SPEECH_URL, SAMPLE_WAV_SPEECH_PATH),
]
for url, path in uri:
with open(path, "wb") as file_:
file_.write(requests.get(url).content)
_fetch_data()
def _get_sample(path, resample=None):
effects = [["remix", "1"]]
if resample:
effects.extend(
[
["lowpass", f"{resample // 2}"],
["rate", f"{resample}"],
]
)
return torchaudio.sox_effects.apply_effects_file(path, effects=effects)
def get_speech_sample(*, resample=None):
return _get_sample(SAMPLE_WAV_SPEECH_PATH, resample=resample)
def get_spectrogram(
n_fft=400,
win_len=None,
hop_len=None,
power=2.0,
):
waveform, _ = get_speech_sample()
spectrogram = T.Spectrogram(
n_fft=n_fft,
win_length=win_len,
hop_length=hop_len,
center=True,
pad_mode="reflect",
power=power,
)
return spectrogram(waveform)
def plot_spectrogram(spec, title=None, ylabel="freq_bin", aspect="auto", xmax=None):
fig, axs = plt.subplots(1, 1)
axs.set_title(title or "Spectrogram (db)")
axs.set_ylabel(ylabel)
axs.set_xlabel("frame")
im = axs.imshow(librosa.power_to_db(spec), origin="lower", aspect=aspect)
if xmax:
axs.set_xlim((0, xmax))
fig.colorbar(im, ax=axs)
plt.show(block=False)
######################################################################
# SpecAugment
# -----------
#
# `SpecAugment <https://ai.googleblog.com/2019/04/specaugment-new-data-augmentation.html>`__
# is a popular spectrogram augmentation technique.
#
# ``torchaudio`` implements ``TimeStretch``, ``TimeMasking`` and
# ``FrequencyMasking``.
#
# TimeStretch
# ~~~~~~~~~~~
#
spec = get_spectrogram(power=None)
stretch = T.TimeStretch()
rate = 1.2
spec_ = stretch(spec, rate)
plot_spectrogram(
torch.abs(spec_[0]), title=f"Stretched x{rate}", aspect="equal", xmax=304
)
plot_spectrogram(torch.abs(spec[0]), title="Original", aspect="equal", xmax=304)
rate = 0.9
spec_ = stretch(spec, rate)
plot_spectrogram(
torch.abs(spec_[0]), title=f"Stretched x{rate}", aspect="equal", xmax=304
)
######################################################################
# TimeMasking
# ~~~~~~~~~~~
#
torch.random.manual_seed(4)
spec = get_spectrogram()
plot_spectrogram(spec[0], title="Original")
masking = T.TimeMasking(time_mask_param=80)
spec = masking(spec)
plot_spectrogram(spec[0], title="Masked along time axis")
######################################################################
# FrequencyMasking
# ~~~~~~~~~~~~~~~~
#
torch.random.manual_seed(4)
spec = get_spectrogram()
plot_spectrogram(spec[0], title="Original")
masking = T.FrequencyMasking(freq_mask_param=80)
spec = masking(spec)
plot_spectrogram(spec[0], title="Masked along frequency axis")
|
"""
Text-to-Speech with Tacotron2
=============================
**Author** `Yao-Yuan Yang <https://github.com/yangarbiter>`__,
`Moto Hira <[email protected]>`__
"""
######################################################################
# Overview
# --------
#
# This tutorial shows how to build text-to-speech pipeline, using the
# pretrained Tacotron2 in torchaudio.
#
# The text-to-speech pipeline goes as follows:
#
# 1. Text preprocessing
#
# First, the input text is encoded into a list of symbols. In this
# tutorial, we will use English characters and phonemes as the symbols.
#
# 2. Spectrogram generation
#
# From the encoded text, a spectrogram is generated. We use ``Tacotron2``
# model for this.
#
# 3. Time-domain conversion
#
# The last step is converting the spectrogram into the waveform. The
# process to generate speech from spectrogram is also called Vocoder.
# In this tutorial, three different vocoders are used,
# `WaveRNN <https://pytorch.org/audio/stable/models/wavernn.html>`__,
# `Griffin-Lim <https://pytorch.org/audio/stable/transforms.html#griffinlim>`__,
# and
# `Nvidia's WaveGlow <https://pytorch.org/hub/nvidia_deeplearningexamples_tacotron2/>`__.
#
#
# The following figure illustrates the whole process.
#
# .. image:: https://download.pytorch.org/torchaudio/tutorial-assets/tacotron2_tts_pipeline.png
#
# All the related components are bundled in :py:func:`torchaudio.pipelines.Tacotron2TTSBundle`,
# but this tutorial will also cover the process under the hood.
######################################################################
# Preparation
# -----------
#
# First, we install the necessary dependencies. In addition to
# ``torchaudio``, ``DeepPhonemizer`` is required to perform phoneme-based
# encoding.
#
# When running this example in notebook, install DeepPhonemizer
# !pip3 install deep_phonemizer
import torch
import torchaudio
import matplotlib
import matplotlib.pyplot as plt
import IPython
matplotlib.rcParams["figure.figsize"] = [16.0, 4.8]
torch.random.manual_seed(0)
device = "cuda" if torch.cuda.is_available() else "cpu"
print(torch.__version__)
print(torchaudio.__version__)
print(device)
######################################################################
# Text Processing
# ---------------
#
######################################################################
# Character-based encoding
# ~~~~~~~~~~~~~~~~~~~~~~~~
#
# In this section, we will go through how the character-based encoding
# works.
#
# Since the pre-trained Tacotron2 model expects specific set of symbol
# tables, the same functionalities available in ``torchaudio``. This
# section is more for the explanation of the basis of encoding.
#
# Firstly, we define the set of symbols. For example, we can use
# ``'_-!\'(),.:;? abcdefghijklmnopqrstuvwxyz'``. Then, we will map the
# each character of the input text into the index of the corresponding
# symbol in the table.
#
# The following is an example of such processing. In the example, symbols
# that are not in the table are ignored.
#
symbols = "_-!'(),.:;? abcdefghijklmnopqrstuvwxyz"
look_up = {s: i for i, s in enumerate(symbols)}
symbols = set(symbols)
def text_to_sequence(text):
text = text.lower()
return [look_up[s] for s in text if s in symbols]
text = "Hello world! Text to speech!"
print(text_to_sequence(text))
######################################################################
# As mentioned in the above, the symbol table and indices must match
# what the pretrained Tacotron2 model expects. ``torchaudio`` provides the
# transform along with the pretrained model. For example, you can
# instantiate and use such transform as follow.
#
processor = torchaudio.pipelines.TACOTRON2_WAVERNN_CHAR_LJSPEECH.get_text_processor()
text = "Hello world! Text to speech!"
processed, lengths = processor(text)
print(processed)
print(lengths)
######################################################################
# The ``processor`` object takes either a text or list of texts as inputs.
# When a list of texts are provided, the returned ``lengths`` variable
# represents the valid length of each processed tokens in the output
# batch.
#
# The intermediate representation can be retrieved as follow.
#
print([processor.tokens[i] for i in processed[0, : lengths[0]]])
######################################################################
# Phoneme-based encoding
# ~~~~~~~~~~~~~~~~~~~~~~
#
# Phoneme-based encoding is similar to character-based encoding, but it
# uses a symbol table based on phonemes and a G2P (Grapheme-to-Phoneme)
# model.
#
# The detail of the G2P model is out of scope of this tutorial, we will
# just look at what the conversion looks like.
#
# Similar to the case of character-based encoding, the encoding process is
# expected to match what a pretrained Tacotron2 model is trained on.
# ``torchaudio`` has an interface to create the process.
#
# The following code illustrates how to make and use the process. Behind
# the scene, a G2P model is created using ``DeepPhonemizer`` package, and
# the pretrained weights published by the author of ``DeepPhonemizer`` is
# fetched.
#
bundle = torchaudio.pipelines.TACOTRON2_WAVERNN_PHONE_LJSPEECH
processor = bundle.get_text_processor()
text = "Hello world! Text to speech!"
with torch.inference_mode():
processed, lengths = processor(text)
print(processed)
print(lengths)
######################################################################
# Notice that the encoded values are different from the example of
# character-based encoding.
#
# The intermediate representation looks like the following.
#
print([processor.tokens[i] for i in processed[0, : lengths[0]]])
######################################################################
# Spectrogram Generation
# ----------------------
#
# ``Tacotron2`` is the model we use to generate spectrogram from the
# encoded text. For the detail of the model, please refer to `the
# paper <https://arxiv.org/abs/1712.05884>`__.
#
# It is easy to instantiate a Tacotron2 model with pretrained weight,
# however, note that the input to Tacotron2 models need to be processed
# by the matching text processor.
#
# :py:func:`torchaudio.pipelines.Tacotron2TTSBundle` bundles the matching
# models and processors together so that it is easy to create the pipeline.
#
# For the available bundles, and its usage, please refer to :py:mod:`torchaudio.pipelines`.
#
bundle = torchaudio.pipelines.TACOTRON2_WAVERNN_PHONE_LJSPEECH
processor = bundle.get_text_processor()
tacotron2 = bundle.get_tacotron2().to(device)
text = "Hello world! Text to speech!"
with torch.inference_mode():
processed, lengths = processor(text)
processed = processed.to(device)
lengths = lengths.to(device)
spec, _, _ = tacotron2.infer(processed, lengths)
plt.imshow(spec[0].cpu().detach())
######################################################################
# Note that ``Tacotron2.infer`` method perfoms multinomial sampling,
# therefor, the process of generating the spectrogram incurs randomness.
#
fig, ax = plt.subplots(3, 1, figsize=(16, 4.3 * 3))
for i in range(3):
with torch.inference_mode():
spec, spec_lengths, _ = tacotron2.infer(processed, lengths)
print(spec[0].shape)
ax[i].imshow(spec[0].cpu().detach())
plt.show()
######################################################################
# Waveform Generation
# -------------------
#
# Once the spectrogram is generated, the last process is to recover the
# waveform from the spectrogram.
#
# ``torchaudio`` provides vocoders based on ``GriffinLim`` and
# ``WaveRNN``.
#
######################################################################
# WaveRNN
# ~~~~~~~
#
# Continuing from the previous section, we can instantiate the matching
# WaveRNN model from the same bundle.
#
bundle = torchaudio.pipelines.TACOTRON2_WAVERNN_PHONE_LJSPEECH
processor = bundle.get_text_processor()
tacotron2 = bundle.get_tacotron2().to(device)
vocoder = bundle.get_vocoder().to(device)
text = "Hello world! Text to speech!"
with torch.inference_mode():
processed, lengths = processor(text)
processed = processed.to(device)
lengths = lengths.to(device)
spec, spec_lengths, _ = tacotron2.infer(processed, lengths)
waveforms, lengths = vocoder(spec, spec_lengths)
fig, [ax1, ax2] = plt.subplots(2, 1, figsize=(16, 9))
ax1.imshow(spec[0].cpu().detach())
ax2.plot(waveforms[0].cpu().detach())
torchaudio.save(
"_assets/output_wavernn.wav", waveforms[0:1].cpu(), sample_rate=vocoder.sample_rate
)
IPython.display.Audio("_assets/output_wavernn.wav")
######################################################################
# Griffin-Lim
# ~~~~~~~~~~~
#
# Using the Griffin-Lim vocoder is same as WaveRNN. You can instantiate
# the vocode object with ``get_vocoder`` method and pass the spectrogram.
#
bundle = torchaudio.pipelines.TACOTRON2_GRIFFINLIM_PHONE_LJSPEECH
processor = bundle.get_text_processor()
tacotron2 = bundle.get_tacotron2().to(device)
vocoder = bundle.get_vocoder().to(device)
with torch.inference_mode():
processed, lengths = processor(text)
processed = processed.to(device)
lengths = lengths.to(device)
spec, spec_lengths, _ = tacotron2.infer(processed, lengths)
waveforms, lengths = vocoder(spec, spec_lengths)
fig, [ax1, ax2] = plt.subplots(2, 1, figsize=(16, 9))
ax1.imshow(spec[0].cpu().detach())
ax2.plot(waveforms[0].cpu().detach())
torchaudio.save(
"_assets/output_griffinlim.wav",
waveforms[0:1].cpu(),
sample_rate=vocoder.sample_rate,
)
IPython.display.Audio("_assets/output_griffinlim.wav")
######################################################################
# Waveglow
# ~~~~~~~~
#
# Waveglow is a vocoder published by Nvidia. The pretrained weight is
# publishe on Torch Hub. One can instantiate the model using ``torch.hub``
# module.
#
# Workaround to load model mapped on GPU
# https://stackoverflow.com/a/61840832
waveglow = torch.hub.load(
"NVIDIA/DeepLearningExamples:torchhub",
"nvidia_waveglow",
model_math="fp32",
pretrained=False,
)
checkpoint = torch.hub.load_state_dict_from_url(
"https://api.ngc.nvidia.com/v2/models/nvidia/waveglowpyt_fp32/versions/1/files/nvidia_waveglowpyt_fp32_20190306.pth", # noqa: E501
progress=False,
map_location=device,
)
state_dict = {
key.replace("module.", ""): value for key, value in checkpoint["state_dict"].items()
}
waveglow.load_state_dict(state_dict)
waveglow = waveglow.remove_weightnorm(waveglow)
waveglow = waveglow.to(device)
waveglow.eval()
with torch.no_grad():
waveforms = waveglow.infer(spec)
fig, [ax1, ax2] = plt.subplots(2, 1, figsize=(16, 9))
ax1.imshow(spec[0].cpu().detach())
ax2.plot(waveforms[0].cpu().detach())
torchaudio.save("_assets/output_waveglow.wav", waveforms[0:1].cpu(), sample_rate=22050)
IPython.display.Audio("_assets/output_waveglow.wav")
|
# -*- coding: utf-8 -*-
"""
Audio Feature Extractions
=========================
``torchaudio`` implements feature extractions commonly used in the audio
domain. They are available in ``torchaudio.functional`` and
``torchaudio.transforms``.
``functional`` implements features as standalone functions.
They are stateless.
``transforms`` implements features as objects,
using implementations from ``functional`` and ``torch.nn.Module``. Because all
transforms are subclasses of ``torch.nn.Module``, they can be serialized
using TorchScript.
For the complete list of available features, please refer to the
documentation. In this tutorial, we will look into converting between the
time domain and frequency domain (``Spectrogram``, ``GriffinLim``,
``MelSpectrogram``).
"""
# When running this tutorial in Google Colab, install the required packages
# with the following.
# !pip install torchaudio librosa
import torch
import torchaudio
import torchaudio.functional as F
import torchaudio.transforms as T
print(torch.__version__)
print(torchaudio.__version__)
######################################################################
# Preparing data and utility functions (skip this section)
# --------------------------------------------------------
#
# @title Prepare data and utility functions. {display-mode: "form"}
# @markdown
# @markdown You do not need to look into this cell.
# @markdown Just execute once and you are good to go.
# @markdown
# @markdown In this tutorial, we will use a speech data from [VOiCES dataset](https://iqtlabs.github.io/voices/),
# @markdown which is licensed under Creative Commos BY 4.0.
# -------------------------------------------------------------------------------
# Preparation of data and helper functions.
# -------------------------------------------------------------------------------
import os
import requests
import librosa
import matplotlib.pyplot as plt
from IPython.display import Audio, display
_SAMPLE_DIR = "_assets"
SAMPLE_WAV_SPEECH_URL = "https://pytorch-tutorial-assets.s3.amazonaws.com/VOiCES_devkit/source-16k/train/sp0307/Lab41-SRI-VOiCES-src-sp0307-ch127535-sg0042.wav" # noqa: E501
SAMPLE_WAV_SPEECH_PATH = os.path.join(_SAMPLE_DIR, "speech.wav")
os.makedirs(_SAMPLE_DIR, exist_ok=True)
def _fetch_data():
uri = [
(SAMPLE_WAV_SPEECH_URL, SAMPLE_WAV_SPEECH_PATH),
]
for url, path in uri:
with open(path, "wb") as file_:
file_.write(requests.get(url).content)
_fetch_data()
def _get_sample(path, resample=None):
effects = [["remix", "1"]]
if resample:
effects.extend(
[
["lowpass", f"{resample // 2}"],
["rate", f"{resample}"],
]
)
return torchaudio.sox_effects.apply_effects_file(path, effects=effects)
def get_speech_sample(*, resample=None):
return _get_sample(SAMPLE_WAV_SPEECH_PATH, resample=resample)
def print_stats(waveform, sample_rate=None, src=None):
if src:
print("-" * 10)
print("Source:", src)
print("-" * 10)
if sample_rate:
print("Sample Rate:", sample_rate)
print("Shape:", tuple(waveform.shape))
print("Dtype:", waveform.dtype)
print(f" - Max: {waveform.max().item():6.3f}")
print(f" - Min: {waveform.min().item():6.3f}")
print(f" - Mean: {waveform.mean().item():6.3f}")
print(f" - Std Dev: {waveform.std().item():6.3f}")
print()
print(waveform)
print()
def plot_spectrogram(spec, title=None, ylabel="freq_bin", aspect="auto", xmax=None):
fig, axs = plt.subplots(1, 1)
axs.set_title(title or "Spectrogram (db)")
axs.set_ylabel(ylabel)
axs.set_xlabel("frame")
im = axs.imshow(librosa.power_to_db(spec), origin="lower", aspect=aspect)
if xmax:
axs.set_xlim((0, xmax))
fig.colorbar(im, ax=axs)
plt.show(block=False)
def plot_waveform(waveform, sample_rate, title="Waveform", xlim=None, ylim=None):
waveform = waveform.numpy()
num_channels, num_frames = waveform.shape
time_axis = torch.arange(0, num_frames) / sample_rate
figure, axes = plt.subplots(num_channels, 1)
if num_channels == 1:
axes = [axes]
for c in range(num_channels):
axes[c].plot(time_axis, waveform[c], linewidth=1)
axes[c].grid(True)
if num_channels > 1:
axes[c].set_ylabel(f"Channel {c+1}")
if xlim:
axes[c].set_xlim(xlim)
if ylim:
axes[c].set_ylim(ylim)
figure.suptitle(title)
plt.show(block=False)
def play_audio(waveform, sample_rate):
waveform = waveform.numpy()
num_channels, num_frames = waveform.shape
if num_channels == 1:
display(Audio(waveform[0], rate=sample_rate))
elif num_channels == 2:
display(Audio((waveform[0], waveform[1]), rate=sample_rate))
else:
raise ValueError("Waveform with more than 2 channels are not supported.")
def plot_mel_fbank(fbank, title=None):
fig, axs = plt.subplots(1, 1)
axs.set_title(title or "Filter bank")
axs.imshow(fbank, aspect="auto")
axs.set_ylabel("frequency bin")
axs.set_xlabel("mel bin")
plt.show(block=False)
def plot_pitch(waveform, sample_rate, pitch):
figure, axis = plt.subplots(1, 1)
axis.set_title("Pitch Feature")
axis.grid(True)
end_time = waveform.shape[1] / sample_rate
time_axis = torch.linspace(0, end_time, waveform.shape[1])
axis.plot(time_axis, waveform[0], linewidth=1, color="gray", alpha=0.3)
axis2 = axis.twinx()
time_axis = torch.linspace(0, end_time, pitch.shape[1])
axis2.plot(time_axis, pitch[0], linewidth=2, label="Pitch", color="green")
axis2.legend(loc=0)
plt.show(block=False)
def plot_kaldi_pitch(waveform, sample_rate, pitch, nfcc):
figure, axis = plt.subplots(1, 1)
axis.set_title("Kaldi Pitch Feature")
axis.grid(True)
end_time = waveform.shape[1] / sample_rate
time_axis = torch.linspace(0, end_time, waveform.shape[1])
axis.plot(time_axis, waveform[0], linewidth=1, color="gray", alpha=0.3)
time_axis = torch.linspace(0, end_time, pitch.shape[1])
ln1 = axis.plot(time_axis, pitch[0], linewidth=2, label="Pitch", color="green")
axis.set_ylim((-1.3, 1.3))
axis2 = axis.twinx()
time_axis = torch.linspace(0, end_time, nfcc.shape[1])
ln2 = axis2.plot(
time_axis, nfcc[0], linewidth=2, label="NFCC", color="blue", linestyle="--"
)
lns = ln1 + ln2
labels = [l.get_label() for l in lns]
axis.legend(lns, labels, loc=0)
plt.show(block=False)
######################################################################
# Spectrogram
# -----------
#
# To get the frequency make-up of an audio signal as it varies with time,
# you can use ``Spectrogram``.
#
waveform, sample_rate = get_speech_sample()
n_fft = 1024
win_length = None
hop_length = 512
# define transformation
spectrogram = T.Spectrogram(
n_fft=n_fft,
win_length=win_length,
hop_length=hop_length,
center=True,
pad_mode="reflect",
power=2.0,
)
# Perform transformation
spec = spectrogram(waveform)
print_stats(spec)
plot_spectrogram(spec[0], title="torchaudio")
######################################################################
# GriffinLim
# ----------
#
# To recover a waveform from a spectrogram, you can use ``GriffinLim``.
#
torch.random.manual_seed(0)
waveform, sample_rate = get_speech_sample()
plot_waveform(waveform, sample_rate, title="Original")
play_audio(waveform, sample_rate)
n_fft = 1024
win_length = None
hop_length = 512
spec = T.Spectrogram(
n_fft=n_fft,
win_length=win_length,
hop_length=hop_length,
)(waveform)
griffin_lim = T.GriffinLim(
n_fft=n_fft,
win_length=win_length,
hop_length=hop_length,
)
waveform = griffin_lim(spec)
plot_waveform(waveform, sample_rate, title="Reconstructed")
play_audio(waveform, sample_rate)
######################################################################
# Mel Filter Bank
# ---------------
#
# ``torchaudio.functional.melscale_fbanks`` generates the filter bank
# for converting frequency bins to mel-scale bins.
#
# Since this function does not require input audio/features, there is no
# equivalent transform in ``torchaudio.transforms``.
#
n_fft = 256
n_mels = 64
sample_rate = 6000
mel_filters = F.melscale_fbanks(
int(n_fft // 2 + 1),
n_mels=n_mels,
f_min=0.0,
f_max=sample_rate / 2.0,
sample_rate=sample_rate,
norm="slaney",
)
plot_mel_fbank(mel_filters, "Mel Filter Bank - torchaudio")
######################################################################
# Comparison against librosa
# ~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# For reference, here is the equivalent way to get the mel filter bank
# with ``librosa``.
#
mel_filters_librosa = librosa.filters.mel(
sample_rate,
n_fft,
n_mels=n_mels,
fmin=0.0,
fmax=sample_rate / 2.0,
norm="slaney",
htk=True,
).T
plot_mel_fbank(mel_filters_librosa, "Mel Filter Bank - librosa")
mse = torch.square(mel_filters - mel_filters_librosa).mean().item()
print("Mean Square Difference: ", mse)
######################################################################
# MelSpectrogram
# --------------
#
# Generating a mel-scale spectrogram involves generating a spectrogram
# and performing mel-scale conversion. In ``torchaudio``, ``MelSpectrogram`` provides
# this functionality.
#
waveform, sample_rate = get_speech_sample()
n_fft = 1024
win_length = None
hop_length = 512
n_mels = 128
mel_spectrogram = T.MelSpectrogram(
sample_rate=sample_rate,
n_fft=n_fft,
win_length=win_length,
hop_length=hop_length,
center=True,
pad_mode="reflect",
power=2.0,
norm="slaney",
onesided=True,
n_mels=n_mels,
mel_scale="htk",
)
melspec = mel_spectrogram(waveform)
plot_spectrogram(melspec[0], title="MelSpectrogram - torchaudio", ylabel="mel freq")
######################################################################
# Comparison against librosa
# ~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# For reference, here is the equivalent means of generating mel-scale
# spectrograms with ``librosa``.
#
melspec_librosa = librosa.feature.melspectrogram(
waveform.numpy()[0],
sr=sample_rate,
n_fft=n_fft,
hop_length=hop_length,
win_length=win_length,
center=True,
pad_mode="reflect",
power=2.0,
n_mels=n_mels,
norm="slaney",
htk=True,
)
plot_spectrogram(melspec_librosa, title="MelSpectrogram - librosa", ylabel="mel freq")
mse = torch.square(melspec - melspec_librosa).mean().item()
print("Mean Square Difference: ", mse)
######################################################################
# MFCC
# ----
#
waveform, sample_rate = get_speech_sample()
n_fft = 2048
win_length = None
hop_length = 512
n_mels = 256
n_mfcc = 256
mfcc_transform = T.MFCC(
sample_rate=sample_rate,
n_mfcc=n_mfcc,
melkwargs={
"n_fft": n_fft,
"n_mels": n_mels,
"hop_length": hop_length,
"mel_scale": "htk",
},
)
mfcc = mfcc_transform(waveform)
plot_spectrogram(mfcc[0])
######################################################################
# Comparing against librosa
# ~~~~~~~~~~~~~~~~~~~~~~~~~
#
melspec = librosa.feature.melspectrogram(
y=waveform.numpy()[0],
sr=sample_rate,
n_fft=n_fft,
win_length=win_length,
hop_length=hop_length,
n_mels=n_mels,
htk=True,
norm=None,
)
mfcc_librosa = librosa.feature.mfcc(
S=librosa.core.spectrum.power_to_db(melspec),
n_mfcc=n_mfcc,
dct_type=2,
norm="ortho",
)
plot_spectrogram(mfcc_librosa)
mse = torch.square(mfcc - mfcc_librosa).mean().item()
print("Mean Square Difference: ", mse)
######################################################################
# Pitch
# -----
#
waveform, sample_rate = get_speech_sample()
pitch = F.detect_pitch_frequency(waveform, sample_rate)
plot_pitch(waveform, sample_rate, pitch)
play_audio(waveform, sample_rate)
######################################################################
# Kaldi Pitch (beta)
# ------------------
#
# Kaldi Pitch feature [1] is a pitch detection mechanism tuned for automatic
# speech recognition (ASR) applications. This is a beta feature in ``torchaudio``,
# and it is available only in ``functional``.
#
# 1. A pitch extraction algorithm tuned for automatic speech recognition
#
# Ghahremani, B. BabaAli, D. Povey, K. Riedhammer, J. Trmal and S.
# Khudanpur
#
# 2014 IEEE International Conference on Acoustics, Speech and Signal
# Processing (ICASSP), Florence, 2014, pp. 2494-2498, doi:
# 10.1109/ICASSP.2014.6854049.
# [`abstract <https://ieeexplore.ieee.org/document/6854049>`__],
# [`paper <https://danielpovey.com/files/2014_icassp_pitch.pdf>`__]
#
waveform, sample_rate = get_speech_sample(resample=16000)
pitch_feature = F.compute_kaldi_pitch(waveform, sample_rate)
pitch, nfcc = pitch_feature[..., 0], pitch_feature[..., 1]
plot_kaldi_pitch(waveform, sample_rate, pitch, nfcc)
play_audio(waveform, sample_rate)
|
"""
MVDR with torchaudio
====================
**Author** `Zhaoheng Ni <[email protected]>`__
"""
######################################################################
# Overview
# --------
#
# This is a tutorial on how to apply MVDR beamforming by using `torchaudio <https://github.com/pytorch/audio>`__.
#
# Steps
#
# - Ideal Ratio Mask (IRM) is generated by dividing the clean/noise
# magnitude by the mixture magnitude.
# - We test all three solutions (``ref_channel``, ``stv_evd``, ``stv_power``)
# of torchaudio's MVDR module.
# - We test the single-channel and multi-channel masks for MVDR beamforming.
# The multi-channel mask is averaged along channel dimension when computing
# the covariance matrices of speech and noise, respectively.
######################################################################
# Preparation
# -----------
#
# First, we import the necessary packages and retrieve the data.
#
# The multi-channel audio example is selected from
# `ConferencingSpeech <https://github.com/ConferencingSpeech/ConferencingSpeech2021>`__
# dataset.
#
# The original filename is
#
# ``SSB07200001\#noise-sound-bible-0038\#7.86_6.16_3.00_3.14_4.84_134.5285_191.7899_0.4735\#15217\#25.16333303751458\#0.2101221178590021.wav``
#
# which was generated with;
#
# - ``SSB07200001.wav`` from `AISHELL-3 <https://www.openslr.org/93/>`__ (Apache License v.2.0)
# - ``noise-sound-bible-0038.wav`` from `MUSAN <http://www.openslr.org/17/>`__ (Attribution 4.0 International — CC BY 4.0) # noqa: E501
#
import os
import requests
import torch
import torchaudio
import IPython.display as ipd
torch.random.manual_seed(0)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(torch.__version__)
print(torchaudio.__version__)
print(device)
filenames = [
"mix.wav",
"reverb_clean.wav",
"clean.wav",
]
base_url = "https://download.pytorch.org/torchaudio/tutorial-assets/mvdr"
for filename in filenames:
os.makedirs("_assets", exist_ok=True)
if not os.path.exists(filename):
with open(f"_assets/{filename}", "wb") as file:
file.write(requests.get(f"{base_url}/{filename}").content)
######################################################################
# Generate the Ideal Ratio Mask (IRM)
# -----------------------------------
#
######################################################################
# Loading audio data
# ~~~~~~~~~~~~~~~~~~
#
mix, sr = torchaudio.load("_assets/mix.wav")
reverb_clean, sr2 = torchaudio.load("_assets/reverb_clean.wav")
clean, sr3 = torchaudio.load("_assets/clean.wav")
assert sr == sr2
noise = mix - reverb_clean
######################################################################
#
# .. note::
# The MVDR Module requires ``torch.cdouble`` dtype for noisy STFT.
# We need to convert the dtype of the waveforms to ``torch.double``
#
mix = mix.to(torch.double)
noise = noise.to(torch.double)
clean = clean.to(torch.double)
reverb_clean = reverb_clean.to(torch.double)
######################################################################
# Compute STFT
# ~~~~~~~~~~~~
#
stft = torchaudio.transforms.Spectrogram(
n_fft=1024,
hop_length=256,
power=None,
)
istft = torchaudio.transforms.InverseSpectrogram(n_fft=1024, hop_length=256)
spec_mix = stft(mix)
spec_clean = stft(clean)
spec_reverb_clean = stft(reverb_clean)
spec_noise = stft(noise)
######################################################################
# Generate the Ideal Ratio Mask (IRM)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# .. note::
# We found using the mask directly peforms better than using the
# square root of it. This is slightly different from the definition of IRM.
#
def get_irms(spec_clean, spec_noise):
mag_clean = spec_clean.abs() ** 2
mag_noise = spec_noise.abs() ** 2
irm_speech = mag_clean / (mag_clean + mag_noise)
irm_noise = mag_noise / (mag_clean + mag_noise)
return irm_speech, irm_noise
######################################################################
# .. note::
# We use reverberant clean speech as the target here,
# you can also set it to dry clean speech.
irm_speech, irm_noise = get_irms(spec_reverb_clean, spec_noise)
######################################################################
# Apply MVDR
# ----------
#
######################################################################
# Apply MVDR beamforming by using multi-channel masks
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
results_multi = {}
for solution in ["ref_channel", "stv_evd", "stv_power"]:
mvdr = torchaudio.transforms.MVDR(ref_channel=0, solution=solution, multi_mask=True)
stft_est = mvdr(spec_mix, irm_speech, irm_noise)
est = istft(stft_est, length=mix.shape[-1])
results_multi[solution] = est
######################################################################
# Apply MVDR beamforming by using single-channel masks
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# We use the 1st channel as an example.
# The channel selection may depend on the design of the microphone array
results_single = {}
for solution in ["ref_channel", "stv_evd", "stv_power"]:
mvdr = torchaudio.transforms.MVDR(
ref_channel=0, solution=solution, multi_mask=False
)
stft_est = mvdr(spec_mix, irm_speech[0], irm_noise[0])
est = istft(stft_est, length=mix.shape[-1])
results_single[solution] = est
######################################################################
# Compute Si-SDR scores
# ~~~~~~~~~~~~~~~~~~~~~
#
def si_sdr(estimate, reference, epsilon=1e-8):
estimate = estimate - estimate.mean()
reference = reference - reference.mean()
reference_pow = reference.pow(2).mean(axis=1, keepdim=True)
mix_pow = (estimate * reference).mean(axis=1, keepdim=True)
scale = mix_pow / (reference_pow + epsilon)
reference = scale * reference
error = estimate - reference
reference_pow = reference.pow(2)
error_pow = error.pow(2)
reference_pow = reference_pow.mean(axis=1)
error_pow = error_pow.mean(axis=1)
sisdr = 10 * torch.log10(reference_pow) - 10 * torch.log10(error_pow)
return sisdr.item()
######################################################################
# Results
# -------
#
######################################################################
# Single-channel mask results
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
for solution in results_single:
print(
solution + ": ", si_sdr(results_single[solution][None, ...], reverb_clean[0:1])
)
######################################################################
# Multi-channel mask results
# ~~~~~~~~~~~~~~~~~~~~~~~~~~
#
for solution in results_multi:
print(
solution + ": ", si_sdr(results_multi[solution][None, ...], reverb_clean[0:1])
)
######################################################################
# Original audio
# --------------
#
######################################################################
# Mixture speech
# ~~~~~~~~~~~~~~
#
ipd.Audio(mix[0], rate=16000)
######################################################################
# Noise
# ~~~~~
#
ipd.Audio(noise[0], rate=16000)
######################################################################
# Clean speech
# ~~~~~~~~~~~~
#
ipd.Audio(clean[0], rate=16000)
######################################################################
# Enhanced audio
# --------------
#
######################################################################
# Multi-channel mask, ref_channel solution
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
ipd.Audio(results_multi["ref_channel"], rate=16000)
######################################################################
# Multi-channel mask, stv_evd solution
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
ipd.Audio(results_multi["stv_evd"], rate=16000)
######################################################################
# Multi-channel mask, stv_power solution
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
ipd.Audio(results_multi["stv_power"], rate=16000)
######################################################################
# Single-channel mask, ref_channel solution
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
ipd.Audio(results_single["ref_channel"], rate=16000)
######################################################################
# Single-channel mask, stv_evd solution
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
ipd.Audio(results_single["stv_evd"], rate=16000)
######################################################################
# Single-channel mask, stv_power solution
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
ipd.Audio(results_single["stv_power"], rate=16000)
|
# -*- coding: utf-8 -*-
"""
Audio Datasets
==============
``torchaudio`` provides easy access to common, publicly accessible
datasets. Please refer to the official documentation for the list of
available datasets.
"""
# When running this tutorial in Google Colab, install the required packages
# with the following.
# !pip install torchaudio
import torch
import torchaudio
print(torch.__version__)
print(torchaudio.__version__)
######################################################################
# Preparing data and utility functions (skip this section)
# --------------------------------------------------------
#
# @title Prepare data and utility functions. {display-mode: "form"}
# @markdown
# @markdown You do not need to look into this cell.
# @markdown Just execute once and you are good to go.
# -------------------------------------------------------------------------------
# Preparation of data and helper functions.
# -------------------------------------------------------------------------------
import multiprocessing
import os
import matplotlib.pyplot as plt
from IPython.display import Audio, display
_SAMPLE_DIR = "_assets"
YESNO_DATASET_PATH = os.path.join(_SAMPLE_DIR, "yes_no")
os.makedirs(YESNO_DATASET_PATH, exist_ok=True)
def _download_yesno():
if os.path.exists(os.path.join(YESNO_DATASET_PATH, "waves_yesno.tar.gz")):
return
torchaudio.datasets.YESNO(root=YESNO_DATASET_PATH, download=True)
YESNO_DOWNLOAD_PROCESS = multiprocessing.Process(target=_download_yesno)
YESNO_DOWNLOAD_PROCESS.start()
def plot_specgram(waveform, sample_rate, title="Spectrogram", xlim=None):
waveform = waveform.numpy()
num_channels, num_frames = waveform.shape
figure, axes = plt.subplots(num_channels, 1)
if num_channels == 1:
axes = [axes]
for c in range(num_channels):
axes[c].specgram(waveform[c], Fs=sample_rate)
if num_channels > 1:
axes[c].set_ylabel(f"Channel {c+1}")
if xlim:
axes[c].set_xlim(xlim)
figure.suptitle(title)
plt.show(block=False)
def play_audio(waveform, sample_rate):
waveform = waveform.numpy()
num_channels, num_frames = waveform.shape
if num_channels == 1:
display(Audio(waveform[0], rate=sample_rate))
elif num_channels == 2:
display(Audio((waveform[0], waveform[1]), rate=sample_rate))
else:
raise ValueError("Waveform with more than 2 channels are not supported.")
######################################################################
# Here, we show how to use the ``YESNO`` dataset.
#
YESNO_DOWNLOAD_PROCESS.join()
dataset = torchaudio.datasets.YESNO(YESNO_DATASET_PATH, download=True)
for i in [1, 3, 5]:
waveform, sample_rate, label = dataset[i]
plot_specgram(waveform, sample_rate, title=f"Sample {i}: {label}")
play_audio(waveform, sample_rate)
|
# -*- coding: utf-8 -*-
"""
Audio Data Augmentation
=======================
``torchaudio`` provides a variety of ways to augment audio data.
"""
# When running this tutorial in Google Colab, install the required packages
# with the following.
# !pip install torchaudio
import torch
import torchaudio
import torchaudio.functional as F
print(torch.__version__)
print(torchaudio.__version__)
######################################################################
# Preparing data and utility functions (skip this section)
# --------------------------------------------------------
#
# @title Prepare data and utility functions. {display-mode: "form"}
# @markdown
# @markdown You do not need to look into this cell.
# @markdown Just execute once and you are good to go.
# @markdown
# @markdown In this tutorial, we will use a speech data from [VOiCES dataset](https://iqtlabs.github.io/voices/),
# @markdown which is licensed under Creative Commos BY 4.0.
# -------------------------------------------------------------------------------
# Preparation of data and helper functions.
# -------------------------------------------------------------------------------
import math
import os
import requests
import matplotlib.pyplot as plt
from IPython.display import Audio, display
_SAMPLE_DIR = "_assets"
SAMPLE_WAV_URL = "https://pytorch-tutorial-assets.s3.amazonaws.com/steam-train-whistle-daniel_simon.wav"
SAMPLE_WAV_PATH = os.path.join(_SAMPLE_DIR, "steam.wav")
SAMPLE_RIR_URL = "https://pytorch-tutorial-assets.s3.amazonaws.com/VOiCES_devkit/distant-16k/room-response/rm1/impulse/Lab41-SRI-VOiCES-rm1-impulse-mc01-stu-clo.wav" # noqa: E501
SAMPLE_RIR_PATH = os.path.join(_SAMPLE_DIR, "rir.wav")
SAMPLE_WAV_SPEECH_URL = "https://pytorch-tutorial-assets.s3.amazonaws.com/VOiCES_devkit/source-16k/train/sp0307/Lab41-SRI-VOiCES-src-sp0307-ch127535-sg0042.wav" # noqa: E501
SAMPLE_WAV_SPEECH_PATH = os.path.join(_SAMPLE_DIR, "speech.wav")
SAMPLE_NOISE_URL = "https://pytorch-tutorial-assets.s3.amazonaws.com/VOiCES_devkit/distant-16k/distractors/rm1/babb/Lab41-SRI-VOiCES-rm1-babb-mc01-stu-clo.wav" # noqa: E501
SAMPLE_NOISE_PATH = os.path.join(_SAMPLE_DIR, "bg.wav")
os.makedirs(_SAMPLE_DIR, exist_ok=True)
def _fetch_data():
uri = [
(SAMPLE_WAV_URL, SAMPLE_WAV_PATH),
(SAMPLE_RIR_URL, SAMPLE_RIR_PATH),
(SAMPLE_WAV_SPEECH_URL, SAMPLE_WAV_SPEECH_PATH),
(SAMPLE_NOISE_URL, SAMPLE_NOISE_PATH),
]
for url, path in uri:
with open(path, "wb") as file_:
file_.write(requests.get(url).content)
_fetch_data()
def _get_sample(path, resample=None):
effects = [["remix", "1"]]
if resample:
effects.extend(
[
["lowpass", f"{resample // 2}"],
["rate", f"{resample}"],
]
)
return torchaudio.sox_effects.apply_effects_file(path, effects=effects)
def get_sample(*, resample=None):
return _get_sample(SAMPLE_WAV_PATH, resample=resample)
def get_speech_sample(*, resample=None):
return _get_sample(SAMPLE_WAV_SPEECH_PATH, resample=resample)
def plot_waveform(waveform, sample_rate, title="Waveform", xlim=None, ylim=None):
waveform = waveform.numpy()
num_channels, num_frames = waveform.shape
time_axis = torch.arange(0, num_frames) / sample_rate
figure, axes = plt.subplots(num_channels, 1)
if num_channels == 1:
axes = [axes]
for c in range(num_channels):
axes[c].plot(time_axis, waveform[c], linewidth=1)
axes[c].grid(True)
if num_channels > 1:
axes[c].set_ylabel(f"Channel {c+1}")
if xlim:
axes[c].set_xlim(xlim)
if ylim:
axes[c].set_ylim(ylim)
figure.suptitle(title)
plt.show(block=False)
def print_stats(waveform, sample_rate=None, src=None):
if src:
print("-" * 10)
print("Source:", src)
print("-" * 10)
if sample_rate:
print("Sample Rate:", sample_rate)
print("Shape:", tuple(waveform.shape))
print("Dtype:", waveform.dtype)
print(f" - Max: {waveform.max().item():6.3f}")
print(f" - Min: {waveform.min().item():6.3f}")
print(f" - Mean: {waveform.mean().item():6.3f}")
print(f" - Std Dev: {waveform.std().item():6.3f}")
print()
print(waveform)
print()
def plot_specgram(waveform, sample_rate, title="Spectrogram", xlim=None):
waveform = waveform.numpy()
num_channels, num_frames = waveform.shape
figure, axes = plt.subplots(num_channels, 1)
if num_channels == 1:
axes = [axes]
for c in range(num_channels):
axes[c].specgram(waveform[c], Fs=sample_rate)
if num_channels > 1:
axes[c].set_ylabel(f"Channel {c+1}")
if xlim:
axes[c].set_xlim(xlim)
figure.suptitle(title)
plt.show(block=False)
def play_audio(waveform, sample_rate):
waveform = waveform.numpy()
num_channels, num_frames = waveform.shape
if num_channels == 1:
display(Audio(waveform[0], rate=sample_rate))
elif num_channels == 2:
display(Audio((waveform[0], waveform[1]), rate=sample_rate))
else:
raise ValueError("Waveform with more than 2 channels are not supported.")
def get_rir_sample(*, resample=None, processed=False):
rir_raw, sample_rate = _get_sample(SAMPLE_RIR_PATH, resample=resample)
if not processed:
return rir_raw, sample_rate
rir = rir_raw[:, int(sample_rate * 1.01): int(sample_rate * 1.3)]
rir = rir / torch.norm(rir, p=2)
rir = torch.flip(rir, [1])
return rir, sample_rate
def get_noise_sample(*, resample=None):
return _get_sample(SAMPLE_NOISE_PATH, resample=resample)
######################################################################
# Applying effects and filtering
# ------------------------------
#
# ``torchaudio.sox_effects`` allows for directly applying filters similar to
# those available in ``sox`` to Tensor objects and file object audio sources.
#
# There are two functions for this:
#
# - ``torchaudio.sox_effects.apply_effects_tensor`` for applying effects
# to Tensor.
# - ``torchaudio.sox_effects.apply_effects_file`` for applying effects to
# other audio sources.
#
# Both functions accept effect definitions in the form
# ``List[List[str]]``.
# This is mostly consistent with how ``sox`` command works, but one caveat is
# that ``sox`` adds some effects automatically, whereas ``torchaudio``’s
# implementation does not.
#
# For the list of available effects, please refer to `the sox
# documentation <http://sox.sourceforge.net/sox.html>`__.
#
# **Tip** If you need to load and resample your audio data on the fly,
# then you can use ``torchaudio.sox_effects.apply_effects_file`` with
# effect ``"rate"``.
#
# **Note** ``apply_effects_file`` accepts a file-like object or path-like
# object. Similar to ``torchaudio.load``, when the audio format cannot be
# inferred from either the file extension or header, you can provide
# argument ``format`` to specify the format of the audio source.
#
# **Note** This process is not differentiable.
#
# Load the data
waveform1, sample_rate1 = get_sample(resample=16000)
# Define effects
effects = [
["lowpass", "-1", "300"], # apply single-pole lowpass filter
["speed", "0.8"], # reduce the speed
# This only changes sample rate, so it is necessary to
# add `rate` effect with original sample rate after this.
["rate", f"{sample_rate1}"],
["reverb", "-w"], # Reverbration gives some dramatic feeling
]
# Apply effects
waveform2, sample_rate2 = torchaudio.sox_effects.apply_effects_tensor(
waveform1, sample_rate1, effects
)
plot_waveform(waveform1, sample_rate1, title="Original", xlim=(-0.1, 3.2))
plot_waveform(waveform2, sample_rate2, title="Effects Applied", xlim=(-0.1, 3.2))
print_stats(waveform1, sample_rate=sample_rate1, src="Original")
print_stats(waveform2, sample_rate=sample_rate2, src="Effects Applied")
######################################################################
# Note that the number of frames and number of channels are different from
# those of the original after the effects are applied. Let’s listen to the
# audio. Doesn’t it sound more dramatic?
#
plot_specgram(waveform1, sample_rate1, title="Original", xlim=(0, 3.04))
play_audio(waveform1, sample_rate1)
plot_specgram(waveform2, sample_rate2, title="Effects Applied", xlim=(0, 3.04))
play_audio(waveform2, sample_rate2)
######################################################################
# Simulating room reverberation
# -----------------------------
#
# `Convolution
# reverb <https://en.wikipedia.org/wiki/Convolution_reverb>`__ is a
# technique that's used to make clean audio sound as though it has been
# produced in a different environment.
#
# Using Room Impulse Response (RIR), for instance, we can make clean speech
# sound as though it has been uttered in a conference room.
#
# For this process, we need RIR data. The following data are from the VOiCES
# dataset, but you can record your own — just turn on your microphone
# and clap your hands.
#
sample_rate = 8000
rir_raw, _ = get_rir_sample(resample=sample_rate)
plot_waveform(rir_raw, sample_rate, title="Room Impulse Response (raw)", ylim=None)
plot_specgram(rir_raw, sample_rate, title="Room Impulse Response (raw)")
play_audio(rir_raw, sample_rate)
######################################################################
# First, we need to clean up the RIR. We extract the main impulse, normalize
# the signal power, then flip along the time axis.
#
rir = rir_raw[:, int(sample_rate * 1.01): int(sample_rate * 1.3)]
rir = rir / torch.norm(rir, p=2)
rir = torch.flip(rir, [1])
print_stats(rir)
plot_waveform(rir, sample_rate, title="Room Impulse Response", ylim=None)
######################################################################
# Then, we convolve the speech signal with the RIR filter.
#
speech, _ = get_speech_sample(resample=sample_rate)
speech_ = torch.nn.functional.pad(speech, (rir.shape[1] - 1, 0))
augmented = torch.nn.functional.conv1d(speech_[None, ...], rir[None, ...])[0]
plot_waveform(speech, sample_rate, title="Original", ylim=None)
plot_waveform(augmented, sample_rate, title="RIR Applied", ylim=None)
plot_specgram(speech, sample_rate, title="Original")
play_audio(speech, sample_rate)
plot_specgram(augmented, sample_rate, title="RIR Applied")
play_audio(augmented, sample_rate)
######################################################################
# Adding background noise
# -----------------------
#
# To add background noise to audio data, you can simply add a noise Tensor to
# the Tensor representing the audio data. A common method to adjust the
# intensity of noise is changing the Signal-to-Noise Ratio (SNR).
# [`wikipedia <https://en.wikipedia.org/wiki/Signal-to-noise_ratio>`__]
#
# \begin{align}\mathrm{SNR} = \frac{P_{\mathrm{signal}}}{P_{\mathrm{noise}}}\end{align}
#
# \begin{align}{\mathrm {SNR_{{dB}}}}=10\log _{{10}}\left({\mathrm {SNR}}\right)\end{align}
#
sample_rate = 8000
speech, _ = get_speech_sample(resample=sample_rate)
noise, _ = get_noise_sample(resample=sample_rate)
noise = noise[:, : speech.shape[1]]
plot_waveform(noise, sample_rate, title="Background noise")
plot_specgram(noise, sample_rate, title="Background noise")
play_audio(noise, sample_rate)
speech_power = speech.norm(p=2)
noise_power = noise.norm(p=2)
for snr_db in [20, 10, 3]:
snr = math.exp(snr_db / 10)
scale = snr * noise_power / speech_power
noisy_speech = (scale * speech + noise) / 2
plot_waveform(noisy_speech, sample_rate, title=f"SNR: {snr_db} [dB]")
plot_specgram(noisy_speech, sample_rate, title=f"SNR: {snr_db} [dB]")
play_audio(noisy_speech, sample_rate)
######################################################################
# Applying codec to Tensor object
# -------------------------------
#
# ``torchaudio.functional.apply_codec`` can apply codecs to a Tensor object.
#
# **Note** This process is not differentiable.
#
waveform, sample_rate = get_speech_sample(resample=8000)
plot_specgram(waveform, sample_rate, title="Original")
play_audio(waveform, sample_rate)
configs = [
({"format": "wav", "encoding": "ULAW", "bits_per_sample": 8}, "8 bit mu-law"),
({"format": "gsm"}, "GSM-FR"),
({"format": "mp3", "compression": -9}, "MP3"),
({"format": "vorbis", "compression": -1}, "Vorbis"),
]
for param, title in configs:
augmented = F.apply_codec(waveform, sample_rate, **param)
plot_specgram(augmented, sample_rate, title=title)
play_audio(augmented, sample_rate)
######################################################################
# Simulating a phone recoding
# ---------------------------
#
# Combining the previous techniques, we can simulate audio that sounds
# like a person talking over a phone in a echoey room with people talking
# in the background.
#
sample_rate = 16000
speech, _ = get_speech_sample(resample=sample_rate)
plot_specgram(speech, sample_rate, title="Original")
play_audio(speech, sample_rate)
# Apply RIR
rir, _ = get_rir_sample(resample=sample_rate, processed=True)
speech_ = torch.nn.functional.pad(speech, (rir.shape[1] - 1, 0))
speech = torch.nn.functional.conv1d(speech_[None, ...], rir[None, ...])[0]
plot_specgram(speech, sample_rate, title="RIR Applied")
play_audio(speech, sample_rate)
# Add background noise
# Because the noise is recorded in the actual environment, we consider that
# the noise contains the acoustic feature of the environment. Therefore, we add
# the noise after RIR application.
noise, _ = get_noise_sample(resample=sample_rate)
noise = noise[:, : speech.shape[1]]
snr_db = 8
scale = math.exp(snr_db / 10) * noise.norm(p=2) / speech.norm(p=2)
speech = (scale * speech + noise) / 2
plot_specgram(speech, sample_rate, title="BG noise added")
play_audio(speech, sample_rate)
# Apply filtering and change sample rate
speech, sample_rate = torchaudio.sox_effects.apply_effects_tensor(
speech,
sample_rate,
effects=[
["lowpass", "4000"],
[
"compand",
"0.02,0.05",
"-60,-60,-30,-10,-20,-8,-5,-8,-2,-8",
"-8",
"-7",
"0.05",
],
["rate", "8000"],
],
)
plot_specgram(speech, sample_rate, title="Filtered")
play_audio(speech, sample_rate)
# Apply telephony codec
speech = F.apply_codec(speech, sample_rate, format="gsm")
plot_specgram(speech, sample_rate, title="GSM Codec Applied")
play_audio(speech, sample_rate)
|
import random
import torch
from torch.utils.data.dataset import random_split
from torchaudio.datasets import LJSPEECH, LIBRITTS
from torchaudio.transforms import MuLawEncoding
from processing import bits_to_normalized_waveform, normalized_waveform_to_bits
class MapMemoryCache(torch.utils.data.Dataset):
r"""Wrap a dataset so that, whenever a new item is returned, it is saved to memory.
"""
def __init__(self, dataset):
self.dataset = dataset
self._cache = [None] * len(dataset)
def __getitem__(self, n):
if self._cache[n] is not None:
return self._cache[n]
item = self.dataset[n]
self._cache[n] = item
return item
def __len__(self):
return len(self.dataset)
class Processed(torch.utils.data.Dataset):
def __init__(self, dataset, transforms):
self.dataset = dataset
self.transforms = transforms
def __getitem__(self, key):
item = self.dataset[key]
return self.process_datapoint(item)
def __len__(self):
return len(self.dataset)
def process_datapoint(self, item):
specgram = self.transforms(item[0])
return item[0].squeeze(0), specgram
def split_process_dataset(args, transforms):
if args.dataset == 'ljspeech':
data = LJSPEECH(root=args.file_path, download=False)
val_length = int(len(data) * args.val_ratio)
lengths = [len(data) - val_length, val_length]
train_dataset, val_dataset = random_split(data, lengths)
elif args.dataset == 'libritts':
train_dataset = LIBRITTS(root=args.file_path, url='train-clean-100', download=False)
val_dataset = LIBRITTS(root=args.file_path, url='dev-clean', download=False)
else:
raise ValueError(f"Expected dataset: `ljspeech` or `libritts`, but found {args.dataset}")
train_dataset = Processed(train_dataset, transforms)
val_dataset = Processed(val_dataset, transforms)
train_dataset = MapMemoryCache(train_dataset)
val_dataset = MapMemoryCache(val_dataset)
return train_dataset, val_dataset
def collate_factory(args):
def raw_collate(batch):
pad = (args.kernel_size - 1) // 2
# input waveform length
wave_length = args.hop_length * args.seq_len_factor
# input spectrogram length
spec_length = args.seq_len_factor + pad * 2
# max start postion in spectrogram
max_offsets = [x[1].shape[-1] - (spec_length + pad * 2) for x in batch]
# random start postion in spectrogram
spec_offsets = [random.randint(0, offset) for offset in max_offsets]
# random start postion in waveform
wave_offsets = [(offset + pad) * args.hop_length for offset in spec_offsets]
waveform_combine = [
x[0][wave_offsets[i]: wave_offsets[i] + wave_length + 1]
for i, x in enumerate(batch)
]
specgram = [
x[1][:, spec_offsets[i]: spec_offsets[i] + spec_length]
for i, x in enumerate(batch)
]
specgram = torch.stack(specgram)
waveform_combine = torch.stack(waveform_combine)
waveform = waveform_combine[:, :wave_length]
target = waveform_combine[:, 1:]
# waveform: [-1, 1], target: [0, 2**bits-1] if loss = 'crossentropy'
if args.loss == "crossentropy":
if args.mulaw:
mulaw_encode = MuLawEncoding(2 ** args.n_bits)
waveform = mulaw_encode(waveform)
target = mulaw_encode(target)
waveform = bits_to_normalized_waveform(waveform, args.n_bits)
else:
target = normalized_waveform_to_bits(target, args.n_bits)
return waveform.unsqueeze(1), specgram.unsqueeze(1), target.unsqueeze(1)
return raw_collate
|
# *****************************************************************************
# Copyright (c) 2019 fatchord (https://github.com/fatchord)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# *****************************************************************************
from torchaudio.models.wavernn import WaveRNN
import torch
import torchaudio
from torch import Tensor
from processing import normalized_waveform_to_bits
def _fold_with_overlap(x: Tensor, timesteps: int, overlap: int) -> Tensor:
r'''Fold the tensor with overlap for quick batched inference.
Overlap will be used for crossfading in xfade_and_unfold().
x = [[h1, h2, ... hn]]
Where each h is a vector of conditioning channels
Eg: timesteps=2, overlap=1 with x.size(1)=10
folded = [[h1, h2, h3, h4],
[h4, h5, h6, h7],
[h7, h8, h9, h10]]
Args:
x (tensor): Upsampled conditioning channels of size (1, timesteps, channel).
timesteps (int): Timesteps for each index of batch.
overlap (int): Timesteps for both xfade and rnn warmup.
Return:
folded (tensor): folded tensor of size (n_folds, timesteps + 2 * overlap, channel).
'''
_, channels, total_len = x.size()
# Calculate variables needed
n_folds = (total_len - overlap) // (timesteps + overlap)
extended_len = n_folds * (overlap + timesteps) + overlap
remaining = total_len - extended_len
# Pad if some time steps poking out
if remaining != 0:
n_folds += 1
padding = timesteps + 2 * overlap - remaining
x = torch.nn.functional.pad(x, (0, padding))
folded = torch.zeros((n_folds, channels, timesteps + 2 * overlap), device=x.device)
# Get the values for the folded tensor
for i in range(n_folds):
start = i * (timesteps + overlap)
end = start + timesteps + 2 * overlap
folded[i] = x[0, :, start:end]
return folded
def _xfade_and_unfold(y: Tensor, overlap: int) -> Tensor:
r'''Applies a crossfade and unfolds into a 1d array.
y = [[seq1],
[seq2],
[seq3]]
Apply a gain envelope at both ends of the sequences
y = [[seq1_in, seq1_timesteps, seq1_out],
[seq2_in, seq2_timesteps, seq2_out],
[seq3_in, seq3_timesteps, seq3_out]]
Stagger and add up the groups of samples:
[seq1_in, seq1_timesteps, (seq1_out + seq2_in), seq2_timesteps, ...]
Args:
y (Tensor): Batched sequences of audio samples of size
(num_folds, channels, timesteps + 2 * overlap).
overlap (int): Timesteps for both xfade and rnn warmup.
Returns:
unfolded waveform (Tensor) : waveform in a 1d tensor of size (channels, total_len).
'''
num_folds, channels, length = y.shape
timesteps = length - 2 * overlap
total_len = num_folds * (timesteps + overlap) + overlap
# Need some silence for the rnn warmup
silence_len = overlap // 2
fade_len = overlap - silence_len
silence = torch.zeros((silence_len), dtype=y.dtype, device=y.device)
linear = torch.ones((silence_len), dtype=y.dtype, device=y.device)
# Equal power crossfade
t = torch.linspace(-1, 1, fade_len, dtype=y.dtype, device=y.device)
fade_in = torch.sqrt(0.5 * (1 + t))
fade_out = torch.sqrt(0.5 * (1 - t))
# Concat the silence to the fades
fade_in = torch.cat([silence, fade_in])
fade_out = torch.cat([linear, fade_out])
# Apply the gain to the overlap samples
y[:, :, :overlap] *= fade_in
y[:, :, -overlap:] *= fade_out
unfolded = torch.zeros((channels, total_len), dtype=y.dtype, device=y.device)
# Loop to add up all the samples
for i in range(num_folds):
start = i * (timesteps + overlap)
end = start + timesteps + 2 * overlap
unfolded[:, start:end] += y[i]
return unfolded
class WaveRNNInferenceWrapper(torch.nn.Module):
def __init__(self, wavernn: WaveRNN):
super().__init__()
self.wavernn_model = wavernn
def forward(self,
specgram: Tensor,
mulaw: bool = True,
batched: bool = True,
timesteps: int = 100,
overlap: int = 5) -> Tensor:
r"""Inference function for WaveRNN.
Based on the implementation from
https://github.com/fatchord/WaveRNN/blob/master/models/fatchord_version.py.
Currently only supports multinomial sampling.
Args:
specgram (Tensor): spectrogram of size (n_mels, n_time)
mulaw (bool, optional): Whether to perform mulaw decoding (Default: ``True``).
batched (bool, optional): Whether to perform batch prediction. Using batch prediction
will significantly increase the inference speed (Default: ``True``).
timesteps (int, optional): The time steps for each batch. Only used when `batched`
is set to True (Default: ``100``).
overlap (int, optional): The overlapping time steps between batches. Only used when
`batched` is set to True (Default: ``5``).
Returns:
waveform (Tensor): Reconstructed waveform of size (1, n_time, ).
1 represents single channel.
"""
specgram = specgram.unsqueeze(0)
if batched:
specgram = _fold_with_overlap(specgram, timesteps, overlap)
output = self.wavernn_model.infer(specgram).cpu()
if mulaw:
output = normalized_waveform_to_bits(output, self.wavernn_model.n_bits)
output = torchaudio.functional.mu_law_decoding(output, self.wavernn_model.n_classes)
if batched:
output = _xfade_and_unfold(output, overlap)
else:
output = output[0]
return output
|
import logging
import os
import shutil
from collections import defaultdict, deque
import torch
class MetricLogger:
r"""Logger for model metrics
"""
def __init__(self, group, print_freq=1):
self.print_freq = print_freq
self._iter = 0
self.data = defaultdict(lambda: deque(maxlen=self.print_freq))
self.data["group"].append(group)
def __setitem__(self, key, value):
self.data[key].append(value)
def _get_last(self):
return {k: v[-1] for k, v in self.data.items()}
def __str__(self):
return str(self._get_last())
def __call__(self):
self._iter = (self._iter + 1) % self.print_freq
if not self._iter:
print(self, flush=True)
def save_checkpoint(state, is_best, filename):
r"""Save the model to a temporary file first,
then copy it to filename, in case the signal interrupts
the torch.save() process.
"""
if filename == "":
return
tempfile = filename + ".temp"
# Remove tempfile in case interuption during the copying from tempfile to filename
if os.path.isfile(tempfile):
os.remove(tempfile)
torch.save(state, tempfile)
if os.path.isfile(tempfile):
os.rename(tempfile, filename)
if is_best:
shutil.copyfile(filename, "model_best.pth.tar")
logging.info("Checkpoint: saved")
def count_parameters(model):
r"""Count the total number of parameters in the model
"""
return sum(p.numel() for p in model.parameters() if p.requires_grad)
|
import argparse
import torch
import torchaudio
from torchaudio.transforms import MelSpectrogram
from torchaudio.models import wavernn
from torchaudio.models.wavernn import _MODEL_CONFIG_AND_URLS
from torchaudio.datasets import LJSPEECH
from wavernn_inference_wrapper import WaveRNNInferenceWrapper
from processing import NormalizeDB
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--output-wav-path", default="./output.wav", type=str, metavar="PATH",
help="The path to output the reconstructed wav file.",
)
parser.add_argument(
"--jit", default=False, action="store_true",
help="If used, the model and inference function is jitted."
)
parser.add_argument(
"--no-batch-inference", default=False, action="store_true",
help="Don't use batch inference."
)
parser.add_argument(
"--no-mulaw", default=False, action="store_true",
help="Don't use mulaw decoder to decoder the signal."
)
parser.add_argument(
"--checkpoint-name", default="wavernn_10k_epochs_8bits_ljspeech",
choices=list(_MODEL_CONFIG_AND_URLS.keys()),
help="Select the WaveRNN checkpoint."
)
parser.add_argument(
"--batch-timesteps", default=100, type=int,
help="The time steps for each batch. Only used when batch inference is used",
)
parser.add_argument(
"--batch-overlap", default=5, type=int,
help="The overlapping time steps between batches. Only used when batch inference is used",
)
args = parser.parse_args()
return args
def main(args):
device = "cuda" if torch.cuda.is_available() else "cpu"
waveform, sample_rate, _, _ = LJSPEECH("./", download=True)[0]
mel_kwargs = {
'sample_rate': sample_rate,
'n_fft': 2048,
'f_min': 40.,
'n_mels': 80,
'win_length': 1100,
'hop_length': 275,
'mel_scale': 'slaney',
'norm': 'slaney',
'power': 1,
}
transforms = torch.nn.Sequential(
MelSpectrogram(**mel_kwargs),
NormalizeDB(min_level_db=-100, normalization=True),
)
mel_specgram = transforms(waveform)
wavernn_model = wavernn(args.checkpoint_name).eval().to(device)
wavernn_inference_model = WaveRNNInferenceWrapper(wavernn_model)
if args.jit:
wavernn_inference_model = torch.jit.script(wavernn_inference_model)
with torch.no_grad():
output = wavernn_inference_model(mel_specgram.to(device),
mulaw=(not args.no_mulaw),
batched=(not args.no_batch_inference),
timesteps=args.batch_timesteps,
overlap=args.batch_overlap,)
torchaudio.save(args.output_wav_path, output, sample_rate=sample_rate)
if __name__ == "__main__":
args = parse_args()
main(args)
|
import math
import torch
from torch import nn as nn
from torch.nn import functional as F
class LongCrossEntropyLoss(nn.Module):
r""" CrossEntropy loss
"""
def __init__(self):
super(LongCrossEntropyLoss, self).__init__()
def forward(self, output, target):
output = output.transpose(1, 2)
target = target.long()
criterion = nn.CrossEntropyLoss()
return criterion(output, target)
class MoLLoss(nn.Module):
r""" Discretized mixture of logistic distributions loss
Adapted from wavenet vocoder
(https://github.com/r9y9/wavenet_vocoder/blob/master/wavenet_vocoder/mixture.py)
Explanation of loss (https://github.com/Rayhane-mamah/Tacotron-2/issues/155)
Args:
y_hat (Tensor): Predicted output (n_batch x n_time x n_channel)
y (Tensor): Target (n_batch x n_time x 1)
num_classes (int): Number of classes
log_scale_min (float): Log scale minimum value
reduce (bool): If True, the losses are averaged or summed for each minibatch
Returns
Tensor: loss
"""
def __init__(self, num_classes=65536, log_scale_min=None, reduce=True):
super(MoLLoss, self).__init__()
self.num_classes = num_classes
self.log_scale_min = log_scale_min
self.reduce = reduce
def forward(self, y_hat, y):
y = y.unsqueeze(-1)
if self.log_scale_min is None:
self.log_scale_min = math.log(1e-14)
assert y_hat.dim() == 3
assert y_hat.size(-1) % 3 == 0
nr_mix = y_hat.size(-1) // 3
# unpack parameters (n_batch, n_time, num_mixtures) x 3
logit_probs = y_hat[:, :, :nr_mix]
means = y_hat[:, :, nr_mix: 2 * nr_mix]
log_scales = torch.clamp(
y_hat[:, :, 2 * nr_mix: 3 * nr_mix], min=self.log_scale_min
)
# (n_batch x n_time x 1) to (n_batch x n_time x num_mixtures)
y = y.expand_as(means)
centered_y = y - means
inv_stdv = torch.exp(-log_scales)
plus_in = inv_stdv * (centered_y + 1.0 / (self.num_classes - 1))
cdf_plus = torch.sigmoid(plus_in)
min_in = inv_stdv * (centered_y - 1.0 / (self.num_classes - 1))
cdf_min = torch.sigmoid(min_in)
# log probability for edge case of 0 (before scaling)
# equivalent: torch.log(F.sigmoid(plus_in))
log_cdf_plus = plus_in - F.softplus(plus_in)
# log probability for edge case of 255 (before scaling)
# equivalent: (1 - F.sigmoid(min_in)).log()
log_one_minus_cdf_min = -F.softplus(min_in)
# probability for all other cases
cdf_delta = cdf_plus - cdf_min
mid_in = inv_stdv * centered_y
# log probability in the center of the bin, to be used in extreme cases
log_pdf_mid = mid_in - log_scales - 2.0 * F.softplus(mid_in)
inner_inner_cond = (cdf_delta > 1e-5).float()
inner_inner_out = inner_inner_cond * torch.log(
torch.clamp(cdf_delta, min=1e-12)
) + (1.0 - inner_inner_cond) * (
log_pdf_mid - math.log((self.num_classes - 1) / 2)
)
inner_cond = (y > 0.999).float()
inner_out = (
inner_cond * log_one_minus_cdf_min + (1.0 - inner_cond) * inner_inner_out
)
cond = (y < -0.999).float()
log_probs = cond * log_cdf_plus + (1.0 - cond) * inner_out
log_probs = log_probs + F.log_softmax(logit_probs, -1)
if self.reduce:
return -torch.mean(_log_sum_exp(log_probs))
else:
return -_log_sum_exp(log_probs).unsqueeze(-1)
def _log_sum_exp(x):
r""" Numerically stable log_sum_exp implementation that prevents overflow
"""
axis = len(x.size()) - 1
m, _ = torch.max(x, dim=axis)
m2, _ = torch.max(x, dim=axis, keepdim=True)
return m + torch.log(torch.sum(torch.exp(x - m2), dim=axis))
|
import torch
import torch.nn as nn
class NormalizeDB(nn.Module):
r"""Normalize the spectrogram with a minimum db value
"""
def __init__(self, min_level_db, normalization):
super().__init__()
self.min_level_db = min_level_db
self.normalization = normalization
def forward(self, specgram):
specgram = torch.log10(torch.clamp(specgram.squeeze(0), min=1e-5))
if self.normalization:
return torch.clamp(
(self.min_level_db - 20 * specgram) / self.min_level_db, min=0, max=1
)
return specgram
def normalized_waveform_to_bits(waveform: torch.Tensor, bits: int) -> torch.Tensor:
r"""Transform waveform [-1, 1] to label [0, 2 ** bits - 1]
"""
assert abs(waveform).max() <= 1.0
waveform = (waveform + 1.0) * (2 ** bits - 1) / 2
return torch.clamp(waveform, 0, 2 ** bits - 1).int()
def bits_to_normalized_waveform(label: torch.Tensor, bits: int) -> torch.Tensor:
r"""Transform label [0, 2 ** bits - 1] to waveform [-1, 1]
"""
return 2 * label / (2 ** bits - 1.0) - 1.0
|
import argparse
import logging
import os
from collections import defaultdict
from datetime import datetime
from time import time
from typing import List
import torch
import torchaudio
from torch.optim import Adam
from torch.utils.data import DataLoader
from torchaudio.datasets.utils import bg_iterator
from torchaudio.models.wavernn import WaveRNN
from datasets import collate_factory, split_process_dataset
from losses import LongCrossEntropyLoss, MoLLoss
from processing import NormalizeDB
from utils import MetricLogger, count_parameters, save_checkpoint
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--workers",
default=4,
type=int,
metavar="N",
help="number of data loading workers",
)
parser.add_argument(
"--checkpoint",
default="",
type=str,
metavar="PATH",
help="path to latest checkpoint",
)
parser.add_argument(
"--epochs",
default=8000,
type=int,
metavar="N",
help="number of total epochs to run",
)
parser.add_argument(
"--start-epoch", default=0, type=int, metavar="N", help="manual epoch number"
)
parser.add_argument(
"--print-freq",
default=10,
type=int,
metavar="N",
help="print frequency in epochs",
)
parser.add_argument(
"--dataset",
default="ljspeech",
choices=["ljspeech", "libritts"],
type=str,
help="select dataset to train with",
)
parser.add_argument(
"--batch-size", default=256, type=int, metavar="N", help="mini-batch size"
)
parser.add_argument(
"--learning-rate", default=1e-4, type=float, metavar="LR", help="learning rate",
)
parser.add_argument("--clip-grad", metavar="NORM", type=float, default=4.0)
parser.add_argument(
"--mulaw",
default=True,
action="store_true",
help="if used, waveform is mulaw encoded",
)
parser.add_argument(
"--jit", default=False, action="store_true", help="if used, model is jitted"
)
parser.add_argument(
"--upsample-scales",
default=[5, 5, 11],
type=List[int],
help="the list of upsample scales",
)
parser.add_argument(
"--n-bits", default=8, type=int, help="the bits of output waveform",
)
parser.add_argument(
"--sample-rate",
default=22050,
type=int,
help="the rate of audio dimensions (samples per second)",
)
parser.add_argument(
"--hop-length",
default=275,
type=int,
help="the number of samples between the starts of consecutive frames",
)
parser.add_argument(
"--win-length", default=1100, type=int, help="the length of the STFT window",
)
parser.add_argument(
"--f-min", default=40.0, type=float, help="the minimum frequency",
)
parser.add_argument(
"--min-level-db",
default=-100,
type=float,
help="the minimum db value for spectrogam normalization",
)
parser.add_argument(
"--n-res-block", default=10, type=int, help="the number of ResBlock in stack",
)
parser.add_argument(
"--n-rnn", default=512, type=int, help="the dimension of RNN layer",
)
parser.add_argument(
"--n-fc", default=512, type=int, help="the dimension of fully connected layer",
)
parser.add_argument(
"--kernel-size",
default=5,
type=int,
help="the number of kernel size in the first Conv1d layer",
)
parser.add_argument(
"--n-freq", default=80, type=int, help="the number of spectrogram bins to use",
)
parser.add_argument(
"--n-hidden-melresnet",
default=128,
type=int,
help="the number of hidden dimensions of resblock in melresnet",
)
parser.add_argument(
"--n-output-melresnet", default=128, type=int, help="the output dimension of melresnet",
)
parser.add_argument(
"--n-fft", default=2048, type=int, help="the number of Fourier bins",
)
parser.add_argument(
"--loss",
default="crossentropy",
choices=["crossentropy", "mol"],
type=str,
help="the type of loss",
)
parser.add_argument(
"--seq-len-factor",
default=5,
type=int,
help="the length of each waveform to process per batch = hop_length * seq_len_factor",
)
parser.add_argument(
"--val-ratio",
default=0.1,
type=float,
help="the ratio of waveforms for validation",
)
parser.add_argument(
"--file-path", default="", type=str, help="the path of audio files",
)
parser.add_argument(
"--normalization", default=True, action="store_true", help="if True, spectrogram is normalized",
)
args = parser.parse_args()
return args
def train_one_epoch(model, criterion, optimizer, data_loader, device, epoch):
model.train()
sums = defaultdict(lambda: 0.0)
start1 = time()
metric = MetricLogger("train_iteration")
metric["epoch"] = epoch
for waveform, specgram, target in bg_iterator(data_loader, maxsize=2):
start2 = time()
waveform = waveform.to(device)
specgram = specgram.to(device)
target = target.to(device)
output = model(waveform, specgram)
output, target = output.squeeze(1), target.squeeze(1)
loss = criterion(output, target)
loss_item = loss.item()
sums["loss"] += loss_item
metric["loss"] = loss_item
optimizer.zero_grad()
loss.backward()
if args.clip_grad > 0:
gradient = torch.nn.utils.clip_grad_norm_(
model.parameters(), args.clip_grad
)
sums["gradient"] += gradient.item()
metric["gradient"] = gradient.item()
optimizer.step()
metric["iteration"] = sums["iteration"]
metric["time"] = time() - start2
metric()
sums["iteration"] += 1
avg_loss = sums["loss"] / len(data_loader)
metric = MetricLogger("train_epoch")
metric["epoch"] = epoch
metric["loss"] = sums["loss"] / len(data_loader)
metric["gradient"] = avg_loss
metric["time"] = time() - start1
metric()
def validate(model, criterion, data_loader, device, epoch):
with torch.no_grad():
model.eval()
sums = defaultdict(lambda: 0.0)
start = time()
for waveform, specgram, target in bg_iterator(data_loader, maxsize=2):
waveform = waveform.to(device)
specgram = specgram.to(device)
target = target.to(device)
output = model(waveform, specgram)
output, target = output.squeeze(1), target.squeeze(1)
loss = criterion(output, target)
sums["loss"] += loss.item()
avg_loss = sums["loss"] / len(data_loader)
metric = MetricLogger("validation")
metric["epoch"] = epoch
metric["loss"] = avg_loss
metric["time"] = time() - start
metric()
return avg_loss
def main(args):
devices = ["cuda" if torch.cuda.is_available() else "cpu"]
logging.info("Start time: {}".format(str(datetime.now())))
melkwargs = {
"n_fft": args.n_fft,
"power": 1,
"hop_length": args.hop_length,
"win_length": args.win_length,
}
transforms = torch.nn.Sequential(
torchaudio.transforms.MelSpectrogram(
sample_rate=args.sample_rate,
n_mels=args.n_freq,
f_min=args.f_min,
mel_scale='slaney',
norm='slaney',
**melkwargs,
),
NormalizeDB(min_level_db=args.min_level_db, normalization=args.normalization),
)
train_dataset, val_dataset = split_process_dataset(args, transforms)
loader_training_params = {
"num_workers": args.workers,
"pin_memory": False,
"shuffle": True,
"drop_last": False,
}
loader_validation_params = loader_training_params.copy()
loader_validation_params["shuffle"] = False
collate_fn = collate_factory(args)
train_loader = DataLoader(
train_dataset,
batch_size=args.batch_size,
collate_fn=collate_fn,
**loader_training_params,
)
val_loader = DataLoader(
val_dataset,
batch_size=args.batch_size,
collate_fn=collate_fn,
**loader_validation_params,
)
n_classes = 2 ** args.n_bits if args.loss == "crossentropy" else 30
model = WaveRNN(
upsample_scales=args.upsample_scales,
n_classes=n_classes,
hop_length=args.hop_length,
n_res_block=args.n_res_block,
n_rnn=args.n_rnn,
n_fc=args.n_fc,
kernel_size=args.kernel_size,
n_freq=args.n_freq,
n_hidden=args.n_hidden_melresnet,
n_output=args.n_output_melresnet,
)
if args.jit:
model = torch.jit.script(model)
model = torch.nn.DataParallel(model)
model = model.to(devices[0], non_blocking=True)
n = count_parameters(model)
logging.info(f"Number of parameters: {n}")
# Optimizer
optimizer_params = {
"lr": args.learning_rate,
}
optimizer = Adam(model.parameters(), **optimizer_params)
criterion = LongCrossEntropyLoss() if args.loss == "crossentropy" else MoLLoss()
best_loss = 10.0
if args.checkpoint and os.path.isfile(args.checkpoint):
logging.info(f"Checkpoint: loading '{args.checkpoint}'")
checkpoint = torch.load(args.checkpoint)
args.start_epoch = checkpoint["epoch"]
best_loss = checkpoint["best_loss"]
model.load_state_dict(checkpoint["state_dict"])
optimizer.load_state_dict(checkpoint["optimizer"])
logging.info(
f"Checkpoint: loaded '{args.checkpoint}' at epoch {checkpoint['epoch']}"
)
else:
logging.info("Checkpoint: not found")
save_checkpoint(
{
"epoch": args.start_epoch,
"state_dict": model.state_dict(),
"best_loss": best_loss,
"optimizer": optimizer.state_dict(),
},
False,
args.checkpoint,
)
for epoch in range(args.start_epoch, args.epochs):
train_one_epoch(
model, criterion, optimizer, train_loader, devices[0], epoch,
)
if not (epoch + 1) % args.print_freq or epoch == args.epochs - 1:
sum_loss = validate(model, criterion, val_loader, devices[0], epoch)
is_best = sum_loss < best_loss
best_loss = min(sum_loss, best_loss)
save_checkpoint(
{
"epoch": epoch + 1,
"state_dict": model.state_dict(),
"best_loss": best_loss,
"optimizer": optimizer.state_dict(),
},
is_best,
args.checkpoint,
)
logging.info(f"End time: {datetime.now()}")
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
args = parse_args()
main(args)
|
"""
This script finds the merger responsible for labeling a PR by a commit SHA. It is used by the workflow in
'.github/workflows/pr-labels.yml'. If there exists no PR associated with the commit or the PR is properly labeled,
this script is a no-op.
Note: we ping the merger only, not the reviewers, as the reviewers can sometimes be external to torchaudio
with no labeling responsibility, so we don't want to bother them.
"""
import sys
from typing import Any, Optional, Set, Tuple
import requests
# For a PR to be properly labeled it should have one primary label and one secondary label
# For a PR with primary label "other", it does not require an additional secondary label
PRIMARY_LABELS = {
"BC-breaking",
"deprecation",
"bug fix",
"new feature",
"improvement",
"example",
"prototype",
"other",
}
SECONDARY_LABELS = {
"module: I/O",
"module: ops",
"module: models",
"module: pipelines",
"module: datasets",
"module: docs",
"module: tests",
"build",
"style",
"perf",
"other",
}
def query_torchaudio(cmd: str, *, accept) -> Any:
response = requests.get(f"https://api.github.com/repos/pytorch/audio/{cmd}", headers=dict(Accept=accept))
return response.json()
def get_pr_number(commit_hash: str) -> Optional[int]:
# See https://docs.github.com/en/rest/reference/repos#list-pull-requests-associated-with-a-commit
data = query_torchaudio(f"commits/{commit_hash}/pulls", accept="application/vnd.github.groot-preview+json")
if not data:
return None
return data[0]["number"]
def get_pr_merger_and_labels(pr_number: int) -> Tuple[str, Set[str]]:
# See https://docs.github.com/en/rest/reference/pulls#get-a-pull-request
data = query_torchaudio(f"pulls/{pr_number}", accept="application/vnd.github.v3+json")
merger = data["merged_by"]["login"]
labels = {label["name"] for label in data["labels"]}
return merger, labels
if __name__ == "__main__":
commit_hash = sys.argv[1]
pr_number = get_pr_number(commit_hash)
if not pr_number:
sys.exit(0)
merger, labels = get_pr_merger_and_labels(pr_number)
is_properly_labeled = bool(PRIMARY_LABELS.intersection(labels) and SECONDARY_LABELS.intersection(labels))
if not is_properly_labeled:
print(f"@{merger}")
|
# -*- coding: utf-8 -*-
import math
import warnings
from typing import Callable, Optional
import torch
from torch import Tensor
from torchaudio import functional as F
from .functional.functional import (
_get_sinc_resample_kernel,
_apply_sinc_resample_kernel,
)
__all__ = [
'Spectrogram',
'InverseSpectrogram',
'GriffinLim',
'AmplitudeToDB',
'MelScale',
'InverseMelScale',
'MelSpectrogram',
'MFCC',
'LFCC',
'MuLawEncoding',
'MuLawDecoding',
'Resample',
'TimeStretch',
'Fade',
'FrequencyMasking',
'TimeMasking',
'SlidingWindowCmn',
'Vad',
'SpectralCentroid',
'Vol',
'ComputeDeltas',
'PitchShift',
'RNNTLoss',
'PSD',
'MVDR',
]
class Spectrogram(torch.nn.Module):
r"""Create a spectrogram from a audio signal.
Args:
n_fft (int, optional): Size of FFT, creates ``n_fft // 2 + 1`` bins. (Default: ``400``)
win_length (int or None, optional): Window size. (Default: ``n_fft``)
hop_length (int or None, optional): Length of hop between STFT windows. (Default: ``win_length // 2``)
pad (int, optional): Two sided padding of signal. (Default: ``0``)
window_fn (Callable[..., Tensor], optional): A function to create a window tensor
that is applied/multiplied to each frame/window. (Default: ``torch.hann_window``)
power (float or None, optional): Exponent for the magnitude spectrogram,
(must be > 0) e.g., 1 for energy, 2 for power, etc.
If None, then the complex spectrum is returned instead. (Default: ``2``)
normalized (bool, optional): Whether to normalize by magnitude after stft. (Default: ``False``)
wkwargs (dict or None, optional): Arguments for window function. (Default: ``None``)
center (bool, optional): whether to pad :attr:`waveform` on both sides so
that the :math:`t`-th frame is centered at time :math:`t \times \text{hop\_length}`.
(Default: ``True``)
pad_mode (string, optional): controls the padding method used when
:attr:`center` is ``True``. (Default: ``"reflect"``)
onesided (bool, optional): controls whether to return half of results to
avoid redundancy (Default: ``True``)
return_complex (bool, optional):
Deprecated and not used.
Example
>>> waveform, sample_rate = torchaudio.load('test.wav', normalize=True)
>>> transform = torchaudio.transforms.Spectrogram(n_fft=800)
>>> spectrogram = transform(waveform)
"""
__constants__ = ['n_fft', 'win_length', 'hop_length', 'pad', 'power', 'normalized']
def __init__(self,
n_fft: int = 400,
win_length: Optional[int] = None,
hop_length: Optional[int] = None,
pad: int = 0,
window_fn: Callable[..., Tensor] = torch.hann_window,
power: Optional[float] = 2.,
normalized: bool = False,
wkwargs: Optional[dict] = None,
center: bool = True,
pad_mode: str = "reflect",
onesided: bool = True,
return_complex: Optional[bool] = None) -> None:
super(Spectrogram, self).__init__()
self.n_fft = n_fft
# number of FFT bins. the returned STFT result will have n_fft // 2 + 1
# number of frequencies due to onesided=True in torch.stft
self.win_length = win_length if win_length is not None else n_fft
self.hop_length = hop_length if hop_length is not None else self.win_length // 2
window = window_fn(self.win_length) if wkwargs is None else window_fn(self.win_length, **wkwargs)
self.register_buffer('window', window)
self.pad = pad
self.power = power
self.normalized = normalized
self.center = center
self.pad_mode = pad_mode
self.onesided = onesided
if return_complex is not None:
warnings.warn(
"`return_complex` argument is now deprecated and is not effective."
"`torchaudio.transforms.Spectrogram(power=None)` always returns a tensor with "
"complex dtype. Please remove the argument in the function call."
)
def forward(self, waveform: Tensor) -> Tensor:
r"""
Args:
waveform (Tensor): Tensor of audio of dimension (..., time).
Returns:
Tensor: Dimension (..., freq, time), where freq is
``n_fft // 2 + 1`` where ``n_fft`` is the number of
Fourier bins, and time is the number of window hops (n_frame).
"""
return F.spectrogram(
waveform,
self.pad,
self.window,
self.n_fft,
self.hop_length,
self.win_length,
self.power,
self.normalized,
self.center,
self.pad_mode,
self.onesided,
)
class InverseSpectrogram(torch.nn.Module):
r"""Create an inverse spectrogram to recover an audio signal from a spectrogram.
Args:
n_fft (int, optional): Size of FFT, creates ``n_fft // 2 + 1`` bins. (Default: ``400``)
win_length (int or None, optional): Window size. (Default: ``n_fft``)
hop_length (int or None, optional): Length of hop between STFT windows. (Default: ``win_length // 2``)
pad (int, optional): Two sided padding of signal. (Default: ``0``)
window_fn (Callable[..., Tensor], optional): A function to create a window tensor
that is applied/multiplied to each frame/window. (Default: ``torch.hann_window``)
normalized (bool, optional): Whether the spectrogram was normalized by magnitude after stft.
(Default: ``False``)
wkwargs (dict or None, optional): Arguments for window function. (Default: ``None``)
center (bool, optional): whether the signal in spectrogram was padded on both sides so
that the :math:`t`-th frame is centered at time :math:`t \times \text{hop\_length}`.
(Default: ``True``)
pad_mode (string, optional): controls the padding method used when
:attr:`center` is ``True``. (Default: ``"reflect"``)
onesided (bool, optional): controls whether spectrogram was used to return half of results to
avoid redundancy (Default: ``True``)
Example
>>> batch, freq, time = 2, 257, 100
>>> length = 25344
>>> spectrogram = torch.randn(batch, freq, time, dtype=torch.cdouble)
>>> transform = transforms.InverseSpectrogram(n_fft=512)
>>> waveform = transform(spectrogram, length)
"""
__constants__ = ['n_fft', 'win_length', 'hop_length', 'pad', 'power', 'normalized']
def __init__(self,
n_fft: int = 400,
win_length: Optional[int] = None,
hop_length: Optional[int] = None,
pad: int = 0,
window_fn: Callable[..., Tensor] = torch.hann_window,
normalized: bool = False,
wkwargs: Optional[dict] = None,
center: bool = True,
pad_mode: str = "reflect",
onesided: bool = True) -> None:
super(InverseSpectrogram, self).__init__()
self.n_fft = n_fft
# number of FFT bins. the returned STFT result will have n_fft // 2 + 1
# number of frequencies due to onesided=True in torch.stft
self.win_length = win_length if win_length is not None else n_fft
self.hop_length = hop_length if hop_length is not None else self.win_length // 2
window = window_fn(self.win_length) if wkwargs is None else window_fn(self.win_length, **wkwargs)
self.register_buffer('window', window)
self.pad = pad
self.normalized = normalized
self.center = center
self.pad_mode = pad_mode
self.onesided = onesided
def forward(self, spectrogram: Tensor, length: Optional[int] = None) -> Tensor:
r"""
Args:
spectrogram (Tensor): Complex tensor of audio of dimension (..., freq, time).
length (int or None, optional): The output length of the waveform.
Returns:
Tensor: Dimension (..., time), Least squares estimation of the original signal.
"""
return F.inverse_spectrogram(
spectrogram,
length,
self.pad,
self.window,
self.n_fft,
self.hop_length,
self.win_length,
self.normalized,
self.center,
self.pad_mode,
self.onesided,
)
class GriffinLim(torch.nn.Module):
r"""Compute waveform from a linear scale magnitude spectrogram using the Griffin-Lim transformation.
Implementation ported from
*librosa* [:footcite:`brian_mcfee-proc-scipy-2015`], *A fast Griffin-Lim algorithm* [:footcite:`6701851`]
and *Signal estimation from modified short-time Fourier transform* [:footcite:`1172092`].
Args:
n_fft (int, optional): Size of FFT, creates ``n_fft // 2 + 1`` bins. (Default: ``400``)
n_iter (int, optional): Number of iteration for phase recovery process. (Default: ``32``)
win_length (int or None, optional): Window size. (Default: ``n_fft``)
hop_length (int or None, optional): Length of hop between STFT windows. (Default: ``win_length // 2``)
window_fn (Callable[..., Tensor], optional): A function to create a window tensor
that is applied/multiplied to each frame/window. (Default: ``torch.hann_window``)
power (float, optional): Exponent for the magnitude spectrogram,
(must be > 0) e.g., 1 for energy, 2 for power, etc. (Default: ``2``)
wkwargs (dict or None, optional): Arguments for window function. (Default: ``None``)
momentum (float, optional): The momentum parameter for fast Griffin-Lim.
Setting this to 0 recovers the original Griffin-Lim method.
Values near 1 can lead to faster convergence, but above 1 may not converge. (Default: ``0.99``)
length (int, optional): Array length of the expected output. (Default: ``None``)
rand_init (bool, optional): Initializes phase randomly if True and to zero otherwise. (Default: ``True``)
Example
>>> batch, freq, time = 2, 257, 100
>>> spectrogram = torch.randn(batch, freq, time)
>>> transform = transforms.GriffinLim(n_fft=512)
>>> waveform = transform(spectrogram)
"""
__constants__ = ['n_fft', 'n_iter', 'win_length', 'hop_length', 'power',
'length', 'momentum', 'rand_init']
def __init__(self,
n_fft: int = 400,
n_iter: int = 32,
win_length: Optional[int] = None,
hop_length: Optional[int] = None,
window_fn: Callable[..., Tensor] = torch.hann_window,
power: float = 2.,
wkwargs: Optional[dict] = None,
momentum: float = 0.99,
length: Optional[int] = None,
rand_init: bool = True) -> None:
super(GriffinLim, self).__init__()
assert momentum < 1, 'momentum={} > 1 can be unstable'.format(momentum)
assert momentum >= 0, 'momentum={} < 0'.format(momentum)
self.n_fft = n_fft
self.n_iter = n_iter
self.win_length = win_length if win_length is not None else n_fft
self.hop_length = hop_length if hop_length is not None else self.win_length // 2
window = window_fn(self.win_length) if wkwargs is None else window_fn(self.win_length, **wkwargs)
self.register_buffer('window', window)
self.length = length
self.power = power
self.momentum = momentum / (1 + momentum)
self.rand_init = rand_init
def forward(self, specgram: Tensor) -> Tensor:
r"""
Args:
specgram (Tensor):
A magnitude-only STFT spectrogram of dimension (..., freq, frames)
where freq is ``n_fft // 2 + 1``.
Returns:
Tensor: waveform of (..., time), where time equals the ``length`` parameter if given.
"""
return F.griffinlim(specgram, self.window, self.n_fft, self.hop_length, self.win_length, self.power,
self.n_iter, self.momentum, self.length, self.rand_init)
class AmplitudeToDB(torch.nn.Module):
r"""Turn a tensor from the power/amplitude scale to the decibel scale.
This output depends on the maximum value in the input tensor, and so
may return different values for an audio clip split into snippets vs. a
a full clip.
Args:
stype (str, optional): scale of input tensor (``'power'`` or ``'magnitude'``). The
power being the elementwise square of the magnitude. (Default: ``'power'``)
top_db (float or None, optional): minimum negative cut-off in decibels. A reasonable
number is 80. (Default: ``None``)
"""
__constants__ = ['multiplier', 'amin', 'ref_value', 'db_multiplier']
def __init__(self, stype: str = 'power', top_db: Optional[float] = None) -> None:
super(AmplitudeToDB, self).__init__()
self.stype = stype
if top_db is not None and top_db < 0:
raise ValueError('top_db must be positive value')
self.top_db = top_db
self.multiplier = 10.0 if stype == 'power' else 20.0
self.amin = 1e-10
self.ref_value = 1.0
self.db_multiplier = math.log10(max(self.amin, self.ref_value))
def forward(self, x: Tensor) -> Tensor:
r"""Numerically stable implementation from Librosa.
https://librosa.org/doc/latest/generated/librosa.amplitude_to_db.html
Args:
x (Tensor): Input tensor before being converted to decibel scale.
Returns:
Tensor: Output tensor in decibel scale.
"""
return F.amplitude_to_DB(x, self.multiplier, self.amin, self.db_multiplier, self.top_db)
class MelScale(torch.nn.Module):
r"""Turn a normal STFT into a mel frequency STFT, using a conversion
matrix. This uses triangular filter banks.
Args:
n_mels (int, optional): Number of mel filterbanks. (Default: ``128``)
sample_rate (int, optional): Sample rate of audio signal. (Default: ``16000``)
f_min (float, optional): Minimum frequency. (Default: ``0.``)
f_max (float or None, optional): Maximum frequency. (Default: ``sample_rate // 2``)
n_stft (int, optional): Number of bins in STFT. See ``n_fft`` in :class:`Spectrogram`. (Default: ``201``)
norm (str or None, optional): If ``'slaney'``, divide the triangular mel weights by the width of the mel band
(area normalization). (Default: ``None``)
mel_scale (str, optional): Scale to use: ``htk`` or ``slaney``. (Default: ``htk``)
See also:
:py:func:`torchaudio.functional.melscale_fbanks` - The function used to
generate the filter banks.
"""
__constants__ = ['n_mels', 'sample_rate', 'f_min', 'f_max']
def __init__(self,
n_mels: int = 128,
sample_rate: int = 16000,
f_min: float = 0.,
f_max: Optional[float] = None,
n_stft: int = 201,
norm: Optional[str] = None,
mel_scale: str = "htk") -> None:
super(MelScale, self).__init__()
self.n_mels = n_mels
self.sample_rate = sample_rate
self.f_max = f_max if f_max is not None else float(sample_rate // 2)
self.f_min = f_min
self.norm = norm
self.mel_scale = mel_scale
assert f_min <= self.f_max, 'Require f_min: {} < f_max: {}'.format(f_min, self.f_max)
fb = F.melscale_fbanks(
n_stft, self.f_min, self.f_max, self.n_mels, self.sample_rate, self.norm,
self.mel_scale)
self.register_buffer('fb', fb)
def forward(self, specgram: Tensor) -> Tensor:
r"""
Args:
specgram (Tensor): A spectrogram STFT of dimension (..., freq, time).
Returns:
Tensor: Mel frequency spectrogram of size (..., ``n_mels``, time).
"""
# (..., time, freq) dot (freq, n_mels) -> (..., n_mels, time)
mel_specgram = torch.matmul(specgram.transpose(-1, -2), self.fb).transpose(-1, -2)
return mel_specgram
class InverseMelScale(torch.nn.Module):
r"""Solve for a normal STFT from a mel frequency STFT, using a conversion
matrix. This uses triangular filter banks.
It minimizes the euclidian norm between the input mel-spectrogram and the product between
the estimated spectrogram and the filter banks using SGD.
Args:
n_stft (int): Number of bins in STFT. See ``n_fft`` in :class:`Spectrogram`.
n_mels (int, optional): Number of mel filterbanks. (Default: ``128``)
sample_rate (int, optional): Sample rate of audio signal. (Default: ``16000``)
f_min (float, optional): Minimum frequency. (Default: ``0.``)
f_max (float or None, optional): Maximum frequency. (Default: ``sample_rate // 2``)
max_iter (int, optional): Maximum number of optimization iterations. (Default: ``100000``)
tolerance_loss (float, optional): Value of loss to stop optimization at. (Default: ``1e-5``)
tolerance_change (float, optional): Difference in losses to stop optimization at. (Default: ``1e-8``)
sgdargs (dict or None, optional): Arguments for the SGD optimizer. (Default: ``None``)
norm (str or None, optional): If 'slaney', divide the triangular mel weights by the width of the mel band
(area normalization). (Default: ``None``)
mel_scale (str, optional): Scale to use: ``htk`` or ``slaney``. (Default: ``htk``)
"""
__constants__ = ['n_stft', 'n_mels', 'sample_rate', 'f_min', 'f_max', 'max_iter', 'tolerance_loss',
'tolerance_change', 'sgdargs']
def __init__(self,
n_stft: int,
n_mels: int = 128,
sample_rate: int = 16000,
f_min: float = 0.,
f_max: Optional[float] = None,
max_iter: int = 100000,
tolerance_loss: float = 1e-5,
tolerance_change: float = 1e-8,
sgdargs: Optional[dict] = None,
norm: Optional[str] = None,
mel_scale: str = "htk") -> None:
super(InverseMelScale, self).__init__()
self.n_mels = n_mels
self.sample_rate = sample_rate
self.f_max = f_max or float(sample_rate // 2)
self.f_min = f_min
self.max_iter = max_iter
self.tolerance_loss = tolerance_loss
self.tolerance_change = tolerance_change
self.sgdargs = sgdargs or {'lr': 0.1, 'momentum': 0.9}
assert f_min <= self.f_max, 'Require f_min: {} < f_max: {}'.format(f_min, self.f_max)
fb = F.melscale_fbanks(n_stft, self.f_min, self.f_max, self.n_mels, self.sample_rate,
norm, mel_scale)
self.register_buffer('fb', fb)
def forward(self, melspec: Tensor) -> Tensor:
r"""
Args:
melspec (Tensor): A Mel frequency spectrogram of dimension (..., ``n_mels``, time)
Returns:
Tensor: Linear scale spectrogram of size (..., freq, time)
"""
# pack batch
shape = melspec.size()
melspec = melspec.view(-1, shape[-2], shape[-1])
n_mels, time = shape[-2], shape[-1]
freq, _ = self.fb.size() # (freq, n_mels)
melspec = melspec.transpose(-1, -2)
assert self.n_mels == n_mels
specgram = torch.rand(melspec.size()[0], time, freq, requires_grad=True,
dtype=melspec.dtype, device=melspec.device)
optim = torch.optim.SGD([specgram], **self.sgdargs)
loss = float('inf')
for _ in range(self.max_iter):
optim.zero_grad()
diff = melspec - specgram.matmul(self.fb)
new_loss = diff.pow(2).sum(axis=-1).mean()
# take sum over mel-frequency then average over other dimensions
# so that loss threshold is applied par unit timeframe
new_loss.backward()
optim.step()
specgram.data = specgram.data.clamp(min=0)
new_loss = new_loss.item()
if new_loss < self.tolerance_loss or abs(loss - new_loss) < self.tolerance_change:
break
loss = new_loss
specgram.requires_grad_(False)
specgram = specgram.clamp(min=0).transpose(-1, -2)
# unpack batch
specgram = specgram.view(shape[:-2] + (freq, time))
return specgram
class MelSpectrogram(torch.nn.Module):
r"""Create MelSpectrogram for a raw audio signal.
This is a composition of :py:func:`torchaudio.transforms.Spectrogram` and
and :py:func:`torchaudio.transforms.MelScale`.
Sources
* https://gist.github.com/kastnerkyle/179d6e9a88202ab0a2fe
* https://timsainb.github.io/spectrograms-mfccs-and-inversion-in-python.html
* http://haythamfayek.com/2016/04/21/speech-processing-for-machine-learning.html
Args:
sample_rate (int, optional): Sample rate of audio signal. (Default: ``16000``)
n_fft (int, optional): Size of FFT, creates ``n_fft // 2 + 1`` bins. (Default: ``400``)
win_length (int or None, optional): Window size. (Default: ``n_fft``)
hop_length (int or None, optional): Length of hop between STFT windows. (Default: ``win_length // 2``)
f_min (float, optional): Minimum frequency. (Default: ``0.``)
f_max (float or None, optional): Maximum frequency. (Default: ``None``)
pad (int, optional): Two sided padding of signal. (Default: ``0``)
n_mels (int, optional): Number of mel filterbanks. (Default: ``128``)
window_fn (Callable[..., Tensor], optional): A function to create a window tensor
that is applied/multiplied to each frame/window. (Default: ``torch.hann_window``)
power (float, optional): Exponent for the magnitude spectrogram,
(must be > 0) e.g., 1 for energy, 2 for power, etc. (Default: ``2``)
normalized (bool, optional): Whether to normalize by magnitude after stft. (Default: ``False``)
wkwargs (Dict[..., ...] or None, optional): Arguments for window function. (Default: ``None``)
center (bool, optional): whether to pad :attr:`waveform` on both sides so
that the :math:`t`-th frame is centered at time :math:`t \times \text{hop\_length}`.
(Default: ``True``)
pad_mode (string, optional): controls the padding method used when
:attr:`center` is ``True``. (Default: ``"reflect"``)
onesided (bool, optional): controls whether to return half of results to
avoid redundancy. (Default: ``True``)
norm (str or None, optional): If 'slaney', divide the triangular mel weights by the width of the mel band
(area normalization). (Default: ``None``)
mel_scale (str, optional): Scale to use: ``htk`` or ``slaney``. (Default: ``htk``)
Example
>>> waveform, sample_rate = torchaudio.load('test.wav', normalize=True)
>>> transform = transforms.MelSpectrogram(sample_rate)
>>> mel_specgram = transform(waveform) # (channel, n_mels, time)
See also:
:py:func:`torchaudio.functional.melscale_fbanks` - The function used to
generate the filter banks.
"""
__constants__ = ['sample_rate', 'n_fft', 'win_length', 'hop_length', 'pad', 'n_mels', 'f_min']
def __init__(self,
sample_rate: int = 16000,
n_fft: int = 400,
win_length: Optional[int] = None,
hop_length: Optional[int] = None,
f_min: float = 0.,
f_max: Optional[float] = None,
pad: int = 0,
n_mels: int = 128,
window_fn: Callable[..., Tensor] = torch.hann_window,
power: float = 2.,
normalized: bool = False,
wkwargs: Optional[dict] = None,
center: bool = True,
pad_mode: str = "reflect",
onesided: bool = True,
norm: Optional[str] = None,
mel_scale: str = "htk") -> None:
super(MelSpectrogram, self).__init__()
self.sample_rate = sample_rate
self.n_fft = n_fft
self.win_length = win_length if win_length is not None else n_fft
self.hop_length = hop_length if hop_length is not None else self.win_length // 2
self.pad = pad
self.power = power
self.normalized = normalized
self.n_mels = n_mels # number of mel frequency bins
self.f_max = f_max
self.f_min = f_min
self.spectrogram = Spectrogram(n_fft=self.n_fft, win_length=self.win_length,
hop_length=self.hop_length,
pad=self.pad, window_fn=window_fn, power=self.power,
normalized=self.normalized, wkwargs=wkwargs,
center=center, pad_mode=pad_mode, onesided=onesided)
self.mel_scale = MelScale(
self.n_mels,
self.sample_rate,
self.f_min,
self.f_max,
self.n_fft // 2 + 1,
norm,
mel_scale
)
def forward(self, waveform: Tensor) -> Tensor:
r"""
Args:
waveform (Tensor): Tensor of audio of dimension (..., time).
Returns:
Tensor: Mel frequency spectrogram of size (..., ``n_mels``, time).
"""
specgram = self.spectrogram(waveform)
mel_specgram = self.mel_scale(specgram)
return mel_specgram
class MFCC(torch.nn.Module):
r"""Create the Mel-frequency cepstrum coefficients from an audio signal.
By default, this calculates the MFCC on the DB-scaled Mel spectrogram.
This is not the textbook implementation, but is implemented here to
give consistency with librosa.
This output depends on the maximum value in the input spectrogram, and so
may return different values for an audio clip split into snippets vs. a
a full clip.
Args:
sample_rate (int, optional): Sample rate of audio signal. (Default: ``16000``)
n_mfcc (int, optional): Number of mfc coefficients to retain. (Default: ``40``)
dct_type (int, optional): type of DCT (discrete cosine transform) to use. (Default: ``2``)
norm (str, optional): norm to use. (Default: ``'ortho'``)
log_mels (bool, optional): whether to use log-mel spectrograms instead of db-scaled. (Default: ``False``)
melkwargs (dict or None, optional): arguments for MelSpectrogram. (Default: ``None``)
See also:
:py:func:`torchaudio.functional.melscale_fbanks` - The function used to
generate the filter banks.
"""
__constants__ = ['sample_rate', 'n_mfcc', 'dct_type', 'top_db', 'log_mels']
def __init__(self,
sample_rate: int = 16000,
n_mfcc: int = 40,
dct_type: int = 2,
norm: str = 'ortho',
log_mels: bool = False,
melkwargs: Optional[dict] = None) -> None:
super(MFCC, self).__init__()
supported_dct_types = [2]
if dct_type not in supported_dct_types:
raise ValueError('DCT type not supported: {}'.format(dct_type))
self.sample_rate = sample_rate
self.n_mfcc = n_mfcc
self.dct_type = dct_type
self.norm = norm
self.top_db = 80.0
self.amplitude_to_DB = AmplitudeToDB('power', self.top_db)
melkwargs = melkwargs or {}
self.MelSpectrogram = MelSpectrogram(sample_rate=self.sample_rate, **melkwargs)
if self.n_mfcc > self.MelSpectrogram.n_mels:
raise ValueError('Cannot select more MFCC coefficients than # mel bins')
dct_mat = F.create_dct(self.n_mfcc, self.MelSpectrogram.n_mels, self.norm)
self.register_buffer('dct_mat', dct_mat)
self.log_mels = log_mels
def forward(self, waveform: Tensor) -> Tensor:
r"""
Args:
waveform (Tensor): Tensor of audio of dimension (..., time).
Returns:
Tensor: specgram_mel_db of size (..., ``n_mfcc``, time).
"""
mel_specgram = self.MelSpectrogram(waveform)
if self.log_mels:
log_offset = 1e-6
mel_specgram = torch.log(mel_specgram + log_offset)
else:
mel_specgram = self.amplitude_to_DB(mel_specgram)
# (..., time, n_mels) dot (n_mels, n_mfcc) -> (..., n_nfcc, time)
mfcc = torch.matmul(mel_specgram.transpose(-1, -2), self.dct_mat).transpose(-1, -2)
return mfcc
class LFCC(torch.nn.Module):
r"""Create the linear-frequency cepstrum coefficients from an audio signal.
By default, this calculates the LFCC on the DB-scaled linear filtered spectrogram.
This is not the textbook implementation, but is implemented here to
give consistency with librosa.
This output depends on the maximum value in the input spectrogram, and so
may return different values for an audio clip split into snippets vs. a
a full clip.
Args:
sample_rate (int, optional): Sample rate of audio signal. (Default: ``16000``)
n_filter (int, optional): Number of linear filters to apply. (Default: ``128``)
n_lfcc (int, optional): Number of lfc coefficients to retain. (Default: ``40``)
f_min (float, optional): Minimum frequency. (Default: ``0.``)
f_max (float or None, optional): Maximum frequency. (Default: ``None``)
dct_type (int, optional): type of DCT (discrete cosine transform) to use. (Default: ``2``)
norm (str, optional): norm to use. (Default: ``'ortho'``)
log_lf (bool, optional): whether to use log-lf spectrograms instead of db-scaled. (Default: ``False``)
speckwargs (dict or None, optional): arguments for Spectrogram. (Default: ``None``)
See also:
:py:func:`torchaudio.functional.linear_fbanks` - The function used to
generate the filter banks.
"""
__constants__ = ['sample_rate', 'n_filter', 'n_lfcc', 'dct_type', 'top_db', 'log_lf']
def __init__(self,
sample_rate: int = 16000,
n_filter: int = 128,
f_min: float = 0.,
f_max: Optional[float] = None,
n_lfcc: int = 40,
dct_type: int = 2,
norm: str = 'ortho',
log_lf: bool = False,
speckwargs: Optional[dict] = None) -> None:
super(LFCC, self).__init__()
supported_dct_types = [2]
if dct_type not in supported_dct_types:
raise ValueError('DCT type not supported: {}'.format(dct_type))
self.sample_rate = sample_rate
self.f_min = f_min
self.f_max = f_max if f_max is not None else float(sample_rate // 2)
self.n_filter = n_filter
self.n_lfcc = n_lfcc
self.dct_type = dct_type
self.norm = norm
self.top_db = 80.0
self.amplitude_to_DB = AmplitudeToDB('power', self.top_db)
speckwargs = speckwargs or {}
self.Spectrogram = Spectrogram(**speckwargs)
if self.n_lfcc > self.Spectrogram.n_fft:
raise ValueError('Cannot select more LFCC coefficients than # fft bins')
filter_mat = F.linear_fbanks(
n_freqs=self.Spectrogram.n_fft // 2 + 1,
f_min=self.f_min,
f_max=self.f_max,
n_filter=self.n_filter,
sample_rate=self.sample_rate,
)
self.register_buffer("filter_mat", filter_mat)
dct_mat = F.create_dct(self.n_lfcc, self.n_filter, self.norm)
self.register_buffer('dct_mat', dct_mat)
self.log_lf = log_lf
def forward(self, waveform: Tensor) -> Tensor:
r"""
Args:
waveform (Tensor): Tensor of audio of dimension (..., time).
Returns:
Tensor: Linear Frequency Cepstral Coefficients of size (..., ``n_lfcc``, time).
"""
specgram = self.Spectrogram(waveform)
# (..., time, freq) dot (freq, n_filter) -> (..., n_filter, time)
specgram = torch.matmul(specgram.transpose(-1, -2), self.filter_mat).transpose(-1, -2)
if self.log_lf:
log_offset = 1e-6
specgram = torch.log(specgram + log_offset)
else:
specgram = self.amplitude_to_DB(specgram)
# (..., time, n_filter) dot (n_filter, n_lfcc) -> (..., n_lfcc, time)
lfcc = torch.matmul(specgram.transpose(-1, -2), self.dct_mat).transpose(-1, -2)
return lfcc
class MuLawEncoding(torch.nn.Module):
r"""Encode signal based on mu-law companding. For more info see the
`Wikipedia Entry <https://en.wikipedia.org/wiki/%CE%9C-law_algorithm>`_
This algorithm assumes the signal has been scaled to between -1 and 1 and
returns a signal encoded with values from 0 to quantization_channels - 1
Args:
quantization_channels (int, optional): Number of channels. (Default: ``256``)
Example
>>> waveform, sample_rate = torchaudio.load('test.wav', normalize=True)
>>> transform = torchaudio.transforms.MuLawEncoding(quantization_channels=512)
>>> mulawtrans = transform(waveform)
"""
__constants__ = ['quantization_channels']
def __init__(self, quantization_channels: int = 256) -> None:
super(MuLawEncoding, self).__init__()
self.quantization_channels = quantization_channels
def forward(self, x: Tensor) -> Tensor:
r"""
Args:
x (Tensor): A signal to be encoded.
Returns:
Tensor: An encoded signal.
"""
return F.mu_law_encoding(x, self.quantization_channels)
class MuLawDecoding(torch.nn.Module):
r"""Decode mu-law encoded signal. For more info see the
`Wikipedia Entry <https://en.wikipedia.org/wiki/%CE%9C-law_algorithm>`_
This expects an input with values between 0 and ``quantization_channels - 1``
and returns a signal scaled between -1 and 1.
Args:
quantization_channels (int, optional): Number of channels. (Default: ``256``)
Example
>>> waveform, sample_rate = torchaudio.load('test.wav', normalize=True)
>>> transform = torchaudio.transforms.MuLawDecoding(quantization_channels=512)
>>> mulawtrans = transform(waveform)
"""
__constants__ = ['quantization_channels']
def __init__(self, quantization_channels: int = 256) -> None:
super(MuLawDecoding, self).__init__()
self.quantization_channels = quantization_channels
def forward(self, x_mu: Tensor) -> Tensor:
r"""
Args:
x_mu (Tensor): A mu-law encoded signal which needs to be decoded.
Returns:
Tensor: The signal decoded.
"""
return F.mu_law_decoding(x_mu, self.quantization_channels)
class Resample(torch.nn.Module):
r"""Resample a signal from one frequency to another. A resampling method can be given.
Note:
If resampling on waveforms of higher precision than float32, there may be a small loss of precision
because the kernel is cached once as float32. If high precision resampling is important for your application,
the functional form will retain higher precision, but run slower because it does not cache the kernel.
Alternatively, you could rewrite a transform that caches a higher precision kernel.
Args:
orig_freq (int, optional): The original frequency of the signal. (Default: ``16000``)
new_freq (int, optional): The desired frequency. (Default: ``16000``)
resampling_method (str, optional): The resampling method to use.
Options: [``sinc_interpolation``, ``kaiser_window``] (Default: ``'sinc_interpolation'``)
lowpass_filter_width (int, optional): Controls the sharpness of the filter, more == sharper
but less efficient. (Default: ``6``)
rolloff (float, optional): The roll-off frequency of the filter, as a fraction of the Nyquist.
Lower values reduce anti-aliasing, but also reduce some of the highest frequencies. (Default: ``0.99``)
beta (float or None, optional): The shape parameter used for kaiser window.
dtype (torch.device, optional):
Determnines the precision that resampling kernel is pre-computed and cached. If not provided,
kernel is computed with ``torch.float64`` then cached as ``torch.float32``.
If you need higher precision, provide ``torch.float64``, and the pre-computed kernel is computed and
cached as ``torch.float64``. If you use resample with lower precision, then instead of providing this
providing this argument, please use ``Resample.to(dtype)``, so that the kernel generation is still
carried out on ``torch.float64``.
Example
>>> waveform, sample_rate = torchaudio.load('test.wav', normalize=True)
>>> transform = transforms.Resample(sample_rate, sample_rate/10)
>>> waveform = transform(waveform)
"""
def __init__(
self,
orig_freq: int = 16000,
new_freq: int = 16000,
resampling_method: str = 'sinc_interpolation',
lowpass_filter_width: int = 6,
rolloff: float = 0.99,
beta: Optional[float] = None,
*,
dtype: Optional[torch.dtype] = None,
) -> None:
super().__init__()
self.orig_freq = orig_freq
self.new_freq = new_freq
self.gcd = math.gcd(int(self.orig_freq), int(self.new_freq))
self.resampling_method = resampling_method
self.lowpass_filter_width = lowpass_filter_width
self.rolloff = rolloff
self.beta = beta
if self.orig_freq != self.new_freq:
kernel, self.width = _get_sinc_resample_kernel(
self.orig_freq, self.new_freq, self.gcd,
self.lowpass_filter_width, self.rolloff,
self.resampling_method, beta, dtype=dtype)
self.register_buffer('kernel', kernel)
def forward(self, waveform: Tensor) -> Tensor:
r"""
Args:
waveform (Tensor): Tensor of audio of dimension (..., time).
Returns:
Tensor: Output signal of dimension (..., time).
"""
if self.orig_freq == self.new_freq:
return waveform
return _apply_sinc_resample_kernel(
waveform, self.orig_freq, self.new_freq, self.gcd,
self.kernel, self.width)
class ComputeDeltas(torch.nn.Module):
r"""Compute delta coefficients of a tensor, usually a spectrogram.
See `torchaudio.functional.compute_deltas` for more details.
Args:
win_length (int, optional): The window length used for computing delta. (Default: ``5``)
mode (str, optional): Mode parameter passed to padding. (Default: ``'replicate'``)
"""
__constants__ = ['win_length']
def __init__(self, win_length: int = 5, mode: str = "replicate") -> None:
super(ComputeDeltas, self).__init__()
self.win_length = win_length
self.mode = mode
def forward(self, specgram: Tensor) -> Tensor:
r"""
Args:
specgram (Tensor): Tensor of audio of dimension (..., freq, time).
Returns:
Tensor: Tensor of deltas of dimension (..., freq, time).
"""
return F.compute_deltas(specgram, win_length=self.win_length, mode=self.mode)
class TimeStretch(torch.nn.Module):
r"""Stretch stft in time without modifying pitch for a given rate.
Proposed in *SpecAugment* [:footcite:`specaugment`].
Args:
hop_length (int or None, optional): Length of hop between STFT windows. (Default: ``win_length // 2``)
n_freq (int, optional): number of filter banks from stft. (Default: ``201``)
fixed_rate (float or None, optional): rate to speed up or slow down by.
If None is provided, rate must be passed to the forward method. (Default: ``None``)
Example
>>> spectrogram = torchaudio.transforms.Spectrogram()
>>> stretch = torchaudio.transforms.TimeStretch()
>>>
>>> original = spectrogram(waveform)
>>> streched_1_2 = stretch(original, 1.2)
>>> streched_0_9 = stretch(original, 0.9)
.. image:: https://download.pytorch.org/torchaudio/doc-assets/specaugment_time_stretch_1.png
:width: 600
:alt: Spectrogram streched by 1.2
.. image:: https://download.pytorch.org/torchaudio/doc-assets/specaugment_time_stretch_2.png
:width: 600
:alt: The original spectrogram
.. image:: https://download.pytorch.org/torchaudio/doc-assets/specaugment_time_stretch_3.png
:width: 600
:alt: Spectrogram streched by 0.9
"""
__constants__ = ['fixed_rate']
def __init__(self,
hop_length: Optional[int] = None,
n_freq: int = 201,
fixed_rate: Optional[float] = None) -> None:
super(TimeStretch, self).__init__()
self.fixed_rate = fixed_rate
n_fft = (n_freq - 1) * 2
hop_length = hop_length if hop_length is not None else n_fft // 2
self.register_buffer('phase_advance', torch.linspace(0, math.pi * hop_length, n_freq)[..., None])
def forward(self, complex_specgrams: Tensor, overriding_rate: Optional[float] = None) -> Tensor:
r"""
Args:
complex_specgrams (Tensor):
A tensor of dimension `(..., freq, num_frame)` with complex dtype.
overriding_rate (float or None, optional): speed up to apply to this batch.
If no rate is passed, use ``self.fixed_rate``. (Default: ``None``)
Returns:
Tensor:
Stretched spectrogram. The resulting tensor is of the same dtype as the input
spectrogram, but the number of frames is changed to ``ceil(num_frame / rate)``.
"""
if overriding_rate is None:
if self.fixed_rate is None:
raise ValueError(
"If no fixed_rate is specified, must pass a valid rate to the forward method.")
rate = self.fixed_rate
else:
rate = overriding_rate
return F.phase_vocoder(complex_specgrams, rate, self.phase_advance)
class Fade(torch.nn.Module):
r"""Add a fade in and/or fade out to an waveform.
Args:
fade_in_len (int, optional): Length of fade-in (time frames). (Default: ``0``)
fade_out_len (int, optional): Length of fade-out (time frames). (Default: ``0``)
fade_shape (str, optional): Shape of fade. Must be one of: "quarter_sine",
``"half_sine"``, ``"linear"``, ``"logarithmic"``, ``"exponential"``.
(Default: ``"linear"``)
Example
>>> waveform, sample_rate = torchaudio.load('test.wav', normalize=True)
>>> transform = transforms.Fade(fade_in_len=sample_rate, fade_out_len=2 * sample_rate, fade_shape='linear')
>>> faded_waveform = transform(waveform)
"""
def __init__(self,
fade_in_len: int = 0,
fade_out_len: int = 0,
fade_shape: str = "linear") -> None:
super(Fade, self).__init__()
self.fade_in_len = fade_in_len
self.fade_out_len = fade_out_len
self.fade_shape = fade_shape
def forward(self, waveform: Tensor) -> Tensor:
r"""
Args:
waveform (Tensor): Tensor of audio of dimension `(..., time)`.
Returns:
Tensor: Tensor of audio of dimension `(..., time)`.
"""
waveform_length = waveform.size()[-1]
device = waveform.device
return (
self._fade_in(waveform_length, device)
* self._fade_out(waveform_length, device)
* waveform
)
def _fade_in(self, waveform_length: int, device: torch.device) -> Tensor:
fade = torch.linspace(0, 1, self.fade_in_len, device=device)
ones = torch.ones(waveform_length - self.fade_in_len, device=device)
if self.fade_shape == "linear":
fade = fade
if self.fade_shape == "exponential":
fade = torch.pow(2, (fade - 1)) * fade
if self.fade_shape == "logarithmic":
fade = torch.log10(.1 + fade) + 1
if self.fade_shape == "quarter_sine":
fade = torch.sin(fade * math.pi / 2)
if self.fade_shape == "half_sine":
fade = torch.sin(fade * math.pi - math.pi / 2) / 2 + 0.5
return torch.cat((fade, ones)).clamp_(0, 1)
def _fade_out(self, waveform_length: int, device: torch.device) -> Tensor:
fade = torch.linspace(0, 1, self.fade_out_len, device=device)
ones = torch.ones(waveform_length - self.fade_out_len, device=device)
if self.fade_shape == "linear":
fade = - fade + 1
if self.fade_shape == "exponential":
fade = torch.pow(2, - fade) * (1 - fade)
if self.fade_shape == "logarithmic":
fade = torch.log10(1.1 - fade) + 1
if self.fade_shape == "quarter_sine":
fade = torch.sin(fade * math.pi / 2 + math.pi / 2)
if self.fade_shape == "half_sine":
fade = torch.sin(fade * math.pi + math.pi / 2) / 2 + 0.5
return torch.cat((ones, fade)).clamp_(0, 1)
class _AxisMasking(torch.nn.Module):
r"""Apply masking to a spectrogram.
Args:
mask_param (int): Maximum possible length of the mask.
axis (int): What dimension the mask is applied on.
iid_masks (bool): Applies iid masks to each of the examples in the batch dimension.
This option is applicable only when the input tensor is 4D.
"""
__constants__ = ['mask_param', 'axis', 'iid_masks']
def __init__(self, mask_param: int, axis: int, iid_masks: bool) -> None:
super(_AxisMasking, self).__init__()
self.mask_param = mask_param
self.axis = axis
self.iid_masks = iid_masks
def forward(self, specgram: Tensor, mask_value: float = 0.) -> Tensor:
r"""
Args:
specgram (Tensor): Tensor of dimension `(..., freq, time)`.
mask_value (float): Value to assign to the masked columns.
Returns:
Tensor: Masked spectrogram of dimensions `(..., freq, time)`.
"""
# if iid_masks flag marked and specgram has a batch dimension
if self.iid_masks and specgram.dim() == 4:
return F.mask_along_axis_iid(specgram, self.mask_param, mask_value, self.axis + 1)
else:
return F.mask_along_axis(specgram, self.mask_param, mask_value, self.axis)
class FrequencyMasking(_AxisMasking):
r"""Apply masking to a spectrogram in the frequency domain.
Proposed in *SpecAugment* [:footcite:`specaugment`].
Args:
freq_mask_param (int): maximum possible length of the mask.
Indices uniformly sampled from [0, freq_mask_param).
iid_masks (bool, optional): whether to apply different masks to each
example/channel in the batch. (Default: ``False``)
This option is applicable only when the input tensor is 4D.
Example
>>> spectrogram = torchaudio.transforms.Spectrogram()
>>> masking = torchaudio.transforms.FrequencyMasking(freq_mask_param=80)
>>>
>>> original = spectrogram(waveform)
>>> masked = masking(original)
.. image:: https://download.pytorch.org/torchaudio/doc-assets/specaugment_freq_masking1.png
:alt: The original spectrogram
.. image:: https://download.pytorch.org/torchaudio/doc-assets/specaugment_freq_masking2.png
:alt: The spectrogram masked along frequency axis
"""
def __init__(self, freq_mask_param: int, iid_masks: bool = False) -> None:
super(FrequencyMasking, self).__init__(freq_mask_param, 1, iid_masks)
class TimeMasking(_AxisMasking):
r"""Apply masking to a spectrogram in the time domain.
Proposed in *SpecAugment* [:footcite:`specaugment`].
Args:
time_mask_param (int): maximum possible length of the mask.
Indices uniformly sampled from [0, time_mask_param).
iid_masks (bool, optional): whether to apply different masks to each
example/channel in the batch. (Default: ``False``)
This option is applicable only when the input tensor is 4D.
Example
>>> spectrogram = torchaudio.transforms.Spectrogram()
>>> masking = torchaudio.transforms.TimeMasking(time_mask_param=80)
>>>
>>> original = spectrogram(waveform)
>>> masked = masking(original)
.. image:: https://download.pytorch.org/torchaudio/doc-assets/specaugment_time_masking1.png
:alt: The original spectrogram
.. image:: https://download.pytorch.org/torchaudio/doc-assets/specaugment_time_masking2.png
:alt: The spectrogram masked along time axis
"""
def __init__(self, time_mask_param: int, iid_masks: bool = False) -> None:
super(TimeMasking, self).__init__(time_mask_param, 2, iid_masks)
class Vol(torch.nn.Module):
r"""Add a volume to an waveform.
Args:
gain (float): Interpreted according to the given gain_type:
If ``gain_type`` = ``amplitude``, ``gain`` is a positive amplitude ratio.
If ``gain_type`` = ``power``, ``gain`` is a power (voltage squared).
If ``gain_type`` = ``db``, ``gain`` is in decibels.
gain_type (str, optional): Type of gain. One of: ``amplitude``, ``power``, ``db`` (Default: ``amplitude``)
"""
def __init__(self, gain: float, gain_type: str = 'amplitude'):
super(Vol, self).__init__()
self.gain = gain
self.gain_type = gain_type
if gain_type in ['amplitude', 'power'] and gain < 0:
raise ValueError("If gain_type = amplitude or power, gain must be positive.")
def forward(self, waveform: Tensor) -> Tensor:
r"""
Args:
waveform (Tensor): Tensor of audio of dimension `(..., time)`.
Returns:
Tensor: Tensor of audio of dimension `(..., time)`.
"""
if self.gain_type == "amplitude":
waveform = waveform * self.gain
if self.gain_type == "db":
waveform = F.gain(waveform, self.gain)
if self.gain_type == "power":
waveform = F.gain(waveform, 10 * math.log10(self.gain))
return torch.clamp(waveform, -1, 1)
class SlidingWindowCmn(torch.nn.Module):
r"""
Apply sliding-window cepstral mean (and optionally variance) normalization per utterance.
Args:
cmn_window (int, optional): Window in frames for running average CMN computation (int, default = 600)
min_cmn_window (int, optional): Minimum CMN window used at start of decoding (adds latency only at start).
Only applicable if center == false, ignored if center==true (int, default = 100)
center (bool, optional): If true, use a window centered on the current frame
(to the extent possible, modulo end effects). If false, window is to the left. (bool, default = false)
norm_vars (bool, optional): If true, normalize variance to one. (bool, default = false)
"""
def __init__(self,
cmn_window: int = 600,
min_cmn_window: int = 100,
center: bool = False,
norm_vars: bool = False) -> None:
super().__init__()
self.cmn_window = cmn_window
self.min_cmn_window = min_cmn_window
self.center = center
self.norm_vars = norm_vars
def forward(self, specgram: Tensor) -> Tensor:
r"""
Args:
specgram (Tensor): Tensor of spectrogram of dimension `(..., time, freq)`.
Returns:
Tensor: Tensor of spectrogram of dimension `(..., time, freq)`.
"""
cmn_specgram = F.sliding_window_cmn(
specgram, self.cmn_window, self.min_cmn_window, self.center, self.norm_vars)
return cmn_specgram
class Vad(torch.nn.Module):
r"""Voice Activity Detector. Similar to SoX implementation.
Attempts to trim silence and quiet background sounds from the ends of recordings of speech.
The algorithm currently uses a simple cepstral power measurement to detect voice,
so may be fooled by other things, especially music.
The effect can trim only from the front of the audio,
so in order to trim from the back, the reverse effect must also be used.
Args:
sample_rate (int): Sample rate of audio signal.
trigger_level (float, optional): The measurement level used to trigger activity detection.
This may need to be cahnged depending on the noise level, signal level,
and other characteristics of the input audio. (Default: 7.0)
trigger_time (float, optional): The time constant (in seconds)
used to help ignore short bursts of sound. (Default: 0.25)
search_time (float, optional): The amount of audio (in seconds)
to search for quieter/shorter bursts of audio to include prior
to the detected trigger point. (Default: 1.0)
allowed_gap (float, optional): The allowed gap (in seconds) between
quiteter/shorter bursts of audio to include prior
to the detected trigger point. (Default: 0.25)
pre_trigger_time (float, optional): The amount of audio (in seconds) to preserve
before the trigger point and any found quieter/shorter bursts. (Default: 0.0)
boot_time (float, optional) The algorithm (internally) uses adaptive noise
estimation/reduction in order to detect the start of the wanted audio.
This option sets the time for the initial noise estimate. (Default: 0.35)
noise_up_time (float, optional) Time constant used by the adaptive noise estimator
for when the noise level is increasing. (Default: 0.1)
noise_down_time (float, optional) Time constant used by the adaptive noise estimator
for when the noise level is decreasing. (Default: 0.01)
noise_reduction_amount (float, optional) Amount of noise reduction to use in
the detection algorithm (e.g. 0, 0.5, ...). (Default: 1.35)
measure_freq (float, optional) Frequency of the algorithm’s
processing/measurements. (Default: 20.0)
measure_duration: (float or None, optional) Measurement duration.
(Default: Twice the measurement period; i.e. with overlap.)
measure_smooth_time (float, optional) Time constant used to smooth
spectral measurements. (Default: 0.4)
hp_filter_freq (float, optional) "Brick-wall" frequency of high-pass filter applied
at the input to the detector algorithm. (Default: 50.0)
lp_filter_freq (float, optional) "Brick-wall" frequency of low-pass filter applied
at the input to the detector algorithm. (Default: 6000.0)
hp_lifter_freq (float, optional) "Brick-wall" frequency of high-pass lifter used
in the detector algorithm. (Default: 150.0)
lp_lifter_freq (float, optional) "Brick-wall" frequency of low-pass lifter used
in the detector algorithm. (Default: 2000.0)
Reference:
- http://sox.sourceforge.net/sox.html
"""
def __init__(self,
sample_rate: int,
trigger_level: float = 7.0,
trigger_time: float = 0.25,
search_time: float = 1.0,
allowed_gap: float = 0.25,
pre_trigger_time: float = 0.0,
boot_time: float = .35,
noise_up_time: float = .1,
noise_down_time: float = .01,
noise_reduction_amount: float = 1.35,
measure_freq: float = 20.0,
measure_duration: Optional[float] = None,
measure_smooth_time: float = .4,
hp_filter_freq: float = 50.,
lp_filter_freq: float = 6000.,
hp_lifter_freq: float = 150.,
lp_lifter_freq: float = 2000.) -> None:
super().__init__()
self.sample_rate = sample_rate
self.trigger_level = trigger_level
self.trigger_time = trigger_time
self.search_time = search_time
self.allowed_gap = allowed_gap
self.pre_trigger_time = pre_trigger_time
self.boot_time = boot_time
self.noise_up_time = noise_up_time
self.noise_down_time = noise_down_time
self.noise_reduction_amount = noise_reduction_amount
self.measure_freq = measure_freq
self.measure_duration = measure_duration
self.measure_smooth_time = measure_smooth_time
self.hp_filter_freq = hp_filter_freq
self.lp_filter_freq = lp_filter_freq
self.hp_lifter_freq = hp_lifter_freq
self.lp_lifter_freq = lp_lifter_freq
def forward(self, waveform: Tensor) -> Tensor:
r"""
Args:
waveform (Tensor): Tensor of audio of dimension `(channels, time)` or `(time)`
Tensor of shape `(channels, time)` is treated as a multi-channel recording
of the same event and the resulting output will be trimmed to the earliest
voice activity in any channel.
"""
return F.vad(
waveform=waveform,
sample_rate=self.sample_rate,
trigger_level=self.trigger_level,
trigger_time=self.trigger_time,
search_time=self.search_time,
allowed_gap=self.allowed_gap,
pre_trigger_time=self.pre_trigger_time,
boot_time=self.boot_time,
noise_up_time=self.noise_up_time,
noise_down_time=self.noise_down_time,
noise_reduction_amount=self.noise_reduction_amount,
measure_freq=self.measure_freq,
measure_duration=self.measure_duration,
measure_smooth_time=self.measure_smooth_time,
hp_filter_freq=self.hp_filter_freq,
lp_filter_freq=self.lp_filter_freq,
hp_lifter_freq=self.hp_lifter_freq,
lp_lifter_freq=self.lp_lifter_freq,
)
class SpectralCentroid(torch.nn.Module):
r"""Compute the spectral centroid for each channel along the time axis.
The spectral centroid is defined as the weighted average of the
frequency values, weighted by their magnitude.
Args:
sample_rate (int): Sample rate of audio signal.
n_fft (int, optional): Size of FFT, creates ``n_fft // 2 + 1`` bins. (Default: ``400``)
win_length (int or None, optional): Window size. (Default: ``n_fft``)
hop_length (int or None, optional): Length of hop between STFT windows. (Default: ``win_length // 2``)
pad (int, optional): Two sided padding of signal. (Default: ``0``)
window_fn (Callable[..., Tensor], optional): A function to create a window tensor
that is applied/multiplied to each frame/window. (Default: ``torch.hann_window``)
wkwargs (dict or None, optional): Arguments for window function. (Default: ``None``)
Example
>>> waveform, sample_rate = torchaudio.load('test.wav', normalize=True)
>>> transform = transforms.SpectralCentroid(sample_rate)
>>> spectral_centroid = transform(waveform) # (channel, time)
"""
__constants__ = ['sample_rate', 'n_fft', 'win_length', 'hop_length', 'pad']
def __init__(self,
sample_rate: int,
n_fft: int = 400,
win_length: Optional[int] = None,
hop_length: Optional[int] = None,
pad: int = 0,
window_fn: Callable[..., Tensor] = torch.hann_window,
wkwargs: Optional[dict] = None) -> None:
super(SpectralCentroid, self).__init__()
self.sample_rate = sample_rate
self.n_fft = n_fft
self.win_length = win_length if win_length is not None else n_fft
self.hop_length = hop_length if hop_length is not None else self.win_length // 2
window = window_fn(self.win_length) if wkwargs is None else window_fn(self.win_length, **wkwargs)
self.register_buffer('window', window)
self.pad = pad
def forward(self, waveform: Tensor) -> Tensor:
r"""
Args:
waveform (Tensor): Tensor of audio of dimension `(..., time)`.
Returns:
Tensor: Spectral Centroid of size `(..., time)`.
"""
return F.spectral_centroid(waveform, self.sample_rate, self.pad, self.window, self.n_fft, self.hop_length,
self.win_length)
class PitchShift(torch.nn.Module):
r"""Shift the pitch of a waveform by ``n_steps`` steps.
Args:
waveform (Tensor): The input waveform of shape `(..., time)`.
sample_rate (int): Sample rate of `waveform`.
n_steps (int): The (fractional) steps to shift `waveform`.
bins_per_octave (int, optional): The number of steps per octave (Default : ``12``).
n_fft (int, optional): Size of FFT, creates ``n_fft // 2 + 1`` bins (Default: ``512``).
win_length (int or None, optional): Window size. If None, then ``n_fft`` is used. (Default: ``None``).
hop_length (int or None, optional): Length of hop between STFT windows. If None, then ``win_length // 4``
is used (Default: ``None``).
window (Tensor or None, optional): Window tensor that is applied/multiplied to each frame/window.
If None, then ``torch.hann_window(win_length)`` is used (Default: ``None``).
Example
>>> waveform, sample_rate = torchaudio.load('test.wav', normalize=True)
>>> transform = transforms.PitchShift(sample_rate, 4)
>>> waveform_shift = transform(waveform) # (channel, time)
"""
__constants__ = ['sample_rate', 'n_steps', 'bins_per_octave', 'n_fft', 'win_length', 'hop_length']
def __init__(self,
sample_rate: int,
n_steps: int,
bins_per_octave: int = 12,
n_fft: int = 512,
win_length: Optional[int] = None,
hop_length: Optional[int] = None,
window_fn: Callable[..., Tensor] = torch.hann_window,
wkwargs: Optional[dict] = None) -> None:
super(PitchShift, self).__init__()
self.n_steps = n_steps
self.bins_per_octave = bins_per_octave
self.sample_rate = sample_rate
self.n_fft = n_fft
self.win_length = win_length if win_length is not None else n_fft
self.hop_length = hop_length if hop_length is not None else self.win_length // 4
window = window_fn(self.win_length) if wkwargs is None else window_fn(self.win_length, **wkwargs)
self.register_buffer('window', window)
def forward(self, waveform: Tensor) -> Tensor:
r"""
Args:
waveform (Tensor): Tensor of audio of dimension `(..., time)`.
Returns:
Tensor: The pitch-shifted audio of shape `(..., time)`.
"""
return F.pitch_shift(waveform, self.sample_rate, self.n_steps, self.bins_per_octave, self.n_fft,
self.win_length, self.hop_length, self.window)
class RNNTLoss(torch.nn.Module):
"""Compute the RNN Transducer loss from *Sequence Transduction with Recurrent Neural Networks*
[:footcite:`graves2012sequence`].
The RNN Transducer loss extends the CTC loss by defining a distribution over output
sequences of all lengths, and by jointly modelling both input-output and output-output
dependencies.
Args:
blank (int, optional): blank label (Default: ``-1``)
clamp (float, optional): clamp for gradients (Default: ``-1``)
reduction (string, optional): Specifies the reduction to apply to the output:
``'none'`` | ``'mean'`` | ``'sum'``. (Default: ``'mean'``)
Example
>>> # Hypothetical values
>>> logits = torch.tensor([[[[0.1, 0.6, 0.1, 0.1, 0.1],
>>> [0.1, 0.1, 0.6, 0.1, 0.1],
>>> [0.1, 0.1, 0.2, 0.8, 0.1]],
>>> [[0.1, 0.6, 0.1, 0.1, 0.1],
>>> [0.1, 0.1, 0.2, 0.1, 0.1],
>>> [0.7, 0.1, 0.2, 0.1, 0.1]]]],
>>> dtype=torch.float32,
>>> requires_grad=True)
>>> targets = torch.tensor([[1, 2]], dtype=torch.int)
>>> logit_lengths = torch.tensor([2], dtype=torch.int)
>>> target_lengths = torch.tensor([2], dtype=torch.int)
>>> transform = transforms.RNNTLoss(blank=0)
>>> loss = transform(logits, targets, logit_lengths, target_lengths)
>>> loss.backward()
"""
def __init__(
self,
blank: int = -1,
clamp: float = -1.,
reduction: str = "mean",
):
super().__init__()
self.blank = blank
self.clamp = clamp
self.reduction = reduction
def forward(
self,
logits: Tensor,
targets: Tensor,
logit_lengths: Tensor,
target_lengths: Tensor,
):
"""
Args:
logits (Tensor): Tensor of dimension `(batch, max seq length, max target length + 1, class)`
containing output from joiner
targets (Tensor): Tensor of dimension `(batch, max target length)` containing targets with zero padded
logit_lengths (Tensor): Tensor of dimension `(batch)` containing lengths of each sequence from encoder
target_lengths (Tensor): Tensor of dimension `(batch)` containing lengths of targets for each sequence
Returns:
Tensor: Loss with the reduction option applied. If ``reduction`` is ``'none'``, then size (batch),
otherwise scalar.
"""
return F.rnnt_loss(
logits,
targets,
logit_lengths,
target_lengths,
self.blank,
self.clamp,
self.reduction
)
def _get_mat_trace(input: torch.Tensor, dim1: int = -1, dim2: int = -2) -> torch.Tensor:
r"""Compute the trace of a Tensor along ``dim1`` and ``dim2`` dimensions.
Args:
input (torch.Tensor): Tensor of dimension `(..., channel, channel)`
dim1 (int, optional): the first dimension of the diagonal matrix
(Default: -1)
dim2 (int, optional): the second dimension of the diagonal matrix
(Default: -2)
Returns:
torch.Tensor: trace of the input Tensor
"""
assert input.ndim >= 2, "The dimension of the tensor must be at least 2."
assert input.shape[dim1] == input.shape[dim2],\
"The size of ``dim1`` and ``dim2`` must be the same."
input = torch.diagonal(input, 0, dim1=dim1, dim2=dim2)
return input.sum(dim=-1)
class PSD(torch.nn.Module):
r"""Compute cross-channel power spectral density (PSD) matrix.
Args:
multi_mask (bool, optional): whether to use multi-channel Time-Frequency masks. (Default: ``False``)
normalize (bool, optional): whether normalize the mask along the time dimension.
eps (float, optional): a value added to the denominator in mask normalization. (Default: 1e-15)
"""
def __init__(self, multi_mask: bool = False, normalize: bool = True, eps: float = 1e-15):
super().__init__()
self.multi_mask = multi_mask
self.normalize = normalize
self.eps = eps
def forward(self, specgram: torch.Tensor, mask: Optional[torch.Tensor] = None):
"""
Args:
specgram (torch.Tensor): multi-channel complex-valued STFT matrix.
Tensor of dimension `(..., channel, freq, time)`
mask (torch.Tensor or None, optional): Time-Frequency mask for normalization.
Tensor of dimension `(..., freq, time)` if multi_mask is ``False`` or
of dimension `(..., channel, freq, time)` if multi_mask is ``True``
Returns:
Tensor: PSD matrix of the input STFT matrix.
Tensor of dimension `(..., freq, channel, channel)`
"""
# outer product:
# (..., ch_1, freq, time) x (..., ch_2, freq, time) -> (..., time, ch_1, ch_2)
psd = torch.einsum("...cft,...eft->...ftce", [specgram, specgram.conj()])
if mask is not None:
if self.multi_mask:
# Averaging mask along channel dimension
mask = mask.mean(dim=-3) # (..., freq, time)
# Normalized mask along time dimension:
if self.normalize:
mask = mask / (mask.sum(dim=-1, keepdim=True) + self.eps)
psd = psd * mask.unsqueeze(-1).unsqueeze(-1)
psd = psd.sum(dim=-3)
return psd
class MVDR(torch.nn.Module):
"""Minimum Variance Distortionless Response (MVDR) module that performs MVDR beamforming with Time-Frequency masks.
Based on https://github.com/espnet/espnet/blob/master/espnet2/enh/layers/beamformer.py
We provide three solutions of MVDR beamforming. One is based on *reference channel selection*
[:footcite:`souden2009optimal`] (``solution=ref_channel``).
.. math::
\\textbf{w}_{\\text{MVDR}}(f) =\
\\frac{{{\\bf{\\Phi}_{\\textbf{NN}}^{-1}}(f){\\bf{\\Phi}_{\\textbf{SS}}}}(f)}\
{\\text{Trace}({{{\\bf{\\Phi}_{\\textbf{NN}}^{-1}}(f) \\bf{\\Phi}_{\\textbf{SS}}}(f))}}\\bm{u}
where :math:`\\bf{\\Phi}_{\\textbf{SS}}` and :math:`\\bf{\\Phi}_{\\textbf{NN}}` are the covariance\
matrices of speech and noise, respectively. :math:`\\bf{u}` is an one-hot vector to determine the\
reference channel.
The other two solutions are based on the steering vector (``solution=stv_evd`` or ``solution=stv_power``).
.. math::
\\textbf{w}_{\\text{MVDR}}(f) =\
\\frac{{{\\bf{\\Phi}_{\\textbf{NN}}^{-1}}(f){\\bm{v}}(f)}}\
{{\\bm{v}^{\\mathsf{H}}}(f){\\bf{\\Phi}_{\\textbf{NN}}^{-1}}(f){\\bm{v}}(f)}
where :math:`\\bm{v}` is the acoustic transfer function or the steering vector.\
:math:`.^{\\mathsf{H}}` denotes the Hermitian Conjugate operation.
We apply either *eigenvalue decomposition*
[:footcite:`higuchi2016robust`] or the *power method* [:footcite:`mises1929praktische`] to get the
steering vector from the PSD matrix of speech.
After estimating the beamforming weight, the enhanced Short-time Fourier Transform (STFT) is obtained by
.. math::
\\hat{\\bf{S}} = {\\bf{w}^\\mathsf{H}}{\\bf{Y}}, {\\bf{w}} \\in \\mathbb{C}^{M \\times F}
where :math:`\\bf{Y}` and :math:`\\hat{\\bf{S}}` are the STFT of the multi-channel noisy speech and\
the single-channel enhanced speech, respectively.
For online streaming audio, we provide a *recursive method* [:footcite:`higuchi2017online`] to update the
PSD matrices of speech and noise, respectively.
Args:
ref_channel (int, optional): the reference channel for beamforming. (Default: ``0``)
solution (str, optional): the solution to get MVDR weight.
Options: [``ref_channel``, ``stv_evd``, ``stv_power``]. (Default: ``ref_channel``)
multi_mask (bool, optional): whether to use multi-channel Time-Frequency masks. (Default: ``False``)
diag_loading (bool, optional): whether apply diagonal loading on the psd matrix of noise.
(Default: ``True``)
diag_eps (float, optional): the coefficient multipied to the identity matrix for diagonal loading.
(Default: 1e-7)
online (bool, optional): whether to update the mvdr vector based on the previous psd matrices.
(Default: ``False``)
Note:
The MVDR Module requires the input STFT to be double precision (``torch.complex128`` or ``torch.cdouble``),
to improve the numerical stability. You can downgrade the precision to ``torch.float`` after generating the
enhanced waveform for ASR joint training.
Note:
If you use ``stv_evd`` solution, the gradient of the same input may not be identical if the
eigenvalues of the PSD matrix are not distinct (i.e. some eigenvalues are close or identical).
"""
def __init__(
self,
ref_channel: int = 0,
solution: str = "ref_channel",
multi_mask: bool = False,
diag_loading: bool = True,
diag_eps: float = 1e-7,
online: bool = False,
):
super().__init__()
assert solution in ["ref_channel", "stv_evd", "stv_power"],\
"Unknown solution provided. Must be one of [``ref_channel``, ``stv_evd``, ``stv_power``]."
self.ref_channel = ref_channel
self.solution = solution
self.multi_mask = multi_mask
self.diag_loading = diag_loading
self.diag_eps = diag_eps
self.online = online
self.psd = PSD(multi_mask)
psd_s: torch.Tensor = torch.zeros(1)
psd_n: torch.Tensor = torch.zeros(1)
mask_sum_s: torch.Tensor = torch.zeros(1)
mask_sum_n: torch.Tensor = torch.zeros(1)
self.register_buffer('psd_s', psd_s)
self.register_buffer('psd_n', psd_n)
self.register_buffer('mask_sum_s', mask_sum_s)
self.register_buffer('mask_sum_n', mask_sum_n)
def _get_updated_mvdr_vector(
self,
psd_s: torch.Tensor,
psd_n: torch.Tensor,
mask_s: torch.Tensor,
mask_n: torch.Tensor,
reference_vector: torch.Tensor,
solution: str = 'ref_channel',
diagonal_loading: bool = True,
diag_eps: float = 1e-7,
eps: float = 1e-8,
) -> torch.Tensor:
r"""Recursively update the MVDR beamforming vector.
Args:
psd_s (torch.Tensor): psd matrix of target speech
psd_n (torch.Tensor): psd matrix of noise
mask_s (torch.Tensor): T-F mask of target speech
mask_n (torch.Tensor): T-F mask of noise
reference_vector (torch.Tensor): one-hot reference channel matrix
solution (str, optional): the solution to estimate the beamforming weight
(Default: ``ref_channel``)
diagonal_loading (bool, optional): whether to apply diagonal loading to psd_n
(Default: ``True``)
diag_eps (float, optional): The coefficient multipied to the identity matrix for diagonal loading
(Default: 1e-7)
eps (float, optional): a value added to the denominator in mask normalization. (Default: 1e-8)
Returns:
Tensor: the mvdr beamforming weight matrix
"""
if self.multi_mask:
# Averaging mask along channel dimension
mask_s = mask_s.mean(dim=-3) # (..., freq, time)
mask_n = mask_n.mean(dim=-3) # (..., freq, time)
if self.psd_s.ndim == 1:
self.psd_s = psd_s
self.psd_n = psd_n
self.mask_sum_s = mask_s.sum(dim=-1)
self.mask_sum_n = mask_n.sum(dim=-1)
return self._get_mvdr_vector(psd_s, psd_n, reference_vector, solution, diagonal_loading, diag_eps, eps)
else:
psd_s = self._get_updated_psd_speech(psd_s, mask_s)
psd_n = self._get_updated_psd_noise(psd_n, mask_n)
self.psd_s = psd_s
self.psd_n = psd_n
self.mask_sum_s = self.mask_sum_s + mask_s.sum(dim=-1)
self.mask_sum_n = self.mask_sum_n + mask_n.sum(dim=-1)
return self._get_mvdr_vector(psd_s, psd_n, reference_vector, solution, diagonal_loading, diag_eps, eps)
def _get_updated_psd_speech(self, psd_s: torch.Tensor, mask_s: torch.Tensor) -> torch.Tensor:
r"""Update psd of speech recursively.
Args:
psd_s (torch.Tensor): psd matrix of target speech
mask_s (torch.Tensor): T-F mask of target speech
Returns:
torch.Tensor: the updated psd of speech
"""
numerator = self.mask_sum_s / (self.mask_sum_s + mask_s.sum(dim=-1))
denominator = 1 / (self.mask_sum_s + mask_s.sum(dim=-1))
psd_s = self.psd_s * numerator[..., None, None] + psd_s * denominator[..., None, None]
return psd_s
def _get_updated_psd_noise(self, psd_n: torch.Tensor, mask_n: torch.Tensor) -> torch.Tensor:
r"""Update psd of noise recursively.
Args:
psd_n (torch.Tensor): psd matrix of target noise
mask_n (torch.Tensor): T-F mask of target noise
Returns:
torch.Tensor: the updated psd of noise
"""
numerator = self.mask_sum_n / (self.mask_sum_n + mask_n.sum(dim=-1))
denominator = 1 / (self.mask_sum_n + mask_n.sum(dim=-1))
psd_n = self.psd_n * numerator[..., None, None] + psd_n * denominator[..., None, None]
return psd_n
def _get_mvdr_vector(
self,
psd_s: torch.Tensor,
psd_n: torch.Tensor,
reference_vector: torch.Tensor,
solution: str = 'ref_channel',
diagonal_loading: bool = True,
diag_eps: float = 1e-7,
eps: float = 1e-8,
) -> torch.Tensor:
r"""Compute beamforming vector by the reference channel selection method.
Args:
psd_s (torch.Tensor): psd matrix of target speech
psd_n (torch.Tensor): psd matrix of noise
reference_vector (torch.Tensor): one-hot reference channel matrix
solution (str, optional): the solution to estimate the beamforming weight
(Default: ``ref_channel``)
diagonal_loading (bool, optional): whether to apply diagonal loading to psd_n
(Default: ``True``)
diag_eps (float, optional): The coefficient multipied to the identity matrix for diagonal loading
(Default: 1e-7)
eps (float, optional): a value added to the denominator in mask normalization. Default: 1e-8
Returns:
torch.Tensor: the mvdr beamforming weight matrix
"""
if diagonal_loading:
psd_n = self._tik_reg(psd_n, reg=diag_eps, eps=eps)
if solution == "ref_channel":
numerator = torch.linalg.solve(psd_n, psd_s) # psd_n.inv() @ psd_s
# ws: (..., C, C) / (...,) -> (..., C, C)
ws = numerator / (_get_mat_trace(numerator)[..., None, None] + eps)
# h: (..., F, C_1, C_2) x (..., C_2) -> (..., F, C_1)
beamform_vector = torch.einsum("...fec,...c->...fe", [ws, reference_vector])
else:
if solution == "stv_evd":
stv = self._get_steering_vector_evd(psd_s)
else:
stv = self._get_steering_vector_power(psd_s, psd_n, reference_vector)
# numerator = psd_n.inv() @ stv
numerator = torch.linalg.solve(psd_n, stv).squeeze(-1) # (..., freq, channel)
# denominator = stv^H @ psd_n.inv() @ stv
denominator = torch.einsum("...d,...d->...", [stv.conj().squeeze(-1), numerator])
# normalzie the numerator
scale = stv.squeeze(-1)[..., self.ref_channel, None].conj()
beamform_vector = numerator * scale / (denominator.real.unsqueeze(-1) + eps)
return beamform_vector
def _get_steering_vector_evd(self, psd_s: torch.Tensor) -> torch.Tensor:
r"""Estimate the steering vector by eigenvalue decomposition.
Args:
psd_s (torch.tensor): covariance matrix of speech
Tensor of dimension `(..., freq, channel, channel)`
Returns:
torch.Tensor: the enhanced STFT
Tensor of dimension `(..., freq, channel, 1)`
"""
w, v = torch.linalg.eig(psd_s) # (..., freq, channel, channel)
_, indices = torch.max(w.abs(), dim=-1, keepdim=True)
indices = indices.unsqueeze(-1)
stv = v.gather(-1, indices.expand(psd_s.shape[:-1] + (1,))) # (..., freq, channel, 1)
return stv
def _get_steering_vector_power(
self,
psd_s: torch.Tensor,
psd_n: torch.Tensor,
reference_vector: torch.Tensor
) -> torch.Tensor:
r"""Estimate the steering vector by the power method.
Args:
psd_s (torch.tensor): covariance matrix of speech
Tensor of dimension `(..., freq, channel, channel)`
psd_n (torch.Tensor): covariance matrix of noise
Tensor of dimension `(..., freq, channel, channel)`
reference_vector (torch.Tensor): one-hot reference channel matrix
Returns:
torch.Tensor: the enhanced STFT
Tensor of dimension `(..., freq, channel, 1)`
"""
phi = torch.linalg.solve(psd_n, psd_s) # psd_n.inv() @ psd_s
stv = torch.einsum("...fec,...c->...fe", [phi, reference_vector])
stv = stv.unsqueeze(-1)
stv = torch.matmul(phi, stv)
stv = torch.matmul(psd_s, stv)
return stv
def _apply_beamforming_vector(
self,
specgram: torch.Tensor,
beamform_vector: torch.Tensor
) -> torch.Tensor:
r"""Apply the beamforming weight to the noisy STFT
Args:
specgram (torch.tensor): multi-channel noisy STFT
Tensor of dimension `(..., channel, freq, time)`
beamform_vector (torch.Tensor): beamforming weight matrix
Tensor of dimension `(..., freq, channel)`
Returns:
torch.Tensor: the enhanced STFT
Tensor of dimension `(..., freq, time)`
"""
# (..., channel) x (..., channel, freq, time) -> (..., freq, time)
specgram_enhanced = torch.einsum("...fc,...cft->...ft", [beamform_vector.conj(), specgram])
return specgram_enhanced
def _tik_reg(
self,
mat: torch.Tensor,
reg: float = 1e-7,
eps: float = 1e-8
) -> torch.Tensor:
"""Perform Tikhonov regularization (only modifying real part).
Args:
mat (torch.Tensor): input matrix (..., channel, channel)
reg (float, optional): regularization factor (Default: 1e-8)
eps (float, optional): a value to avoid the correlation matrix is all-zero (Default: 1e-8)
Returns:
torch.Tensor: regularized matrix (..., channel, channel)
"""
# Add eps
C = mat.size(-1)
eye = torch.eye(C, dtype=mat.dtype, device=mat.device)
with torch.no_grad():
epsilon = _get_mat_trace(mat).real[..., None, None] * reg
# in case that correlation_matrix is all-zero
epsilon = epsilon + eps
mat = mat + epsilon * eye[..., :, :]
return mat
def forward(
self,
specgram: torch.Tensor,
mask_s: torch.Tensor,
mask_n: Optional[torch.Tensor] = None
) -> torch.Tensor:
"""Perform MVDR beamforming.
Args:
specgram (torch.Tensor): the multi-channel STF of the noisy speech.
Tensor of dimension `(..., channel, freq, time)`
mask_s (torch.Tensor): Time-Frequency mask of target speech.
Tensor of dimension `(..., freq, time)` if multi_mask is ``False``
or or dimension `(..., channel, freq, time)` if multi_mask is ``True``
mask_n (torch.Tensor or None, optional): Time-Frequency mask of noise.
Tensor of dimension `(..., freq, time)` if multi_mask is ``False``
or or dimension `(..., channel, freq, time)` if multi_mask is ``True``
(Default: None)
Returns:
torch.Tensor: The single-channel STFT of the enhanced speech.
Tensor of dimension `(..., freq, time)`
"""
if specgram.ndim < 3:
raise ValueError(
f"Expected at least 3D tensor (..., channel, freq, time). Found: {specgram.shape}"
)
if specgram.dtype != torch.cdouble:
raise ValueError(
f"The type of ``specgram`` tensor must be ``torch.cdouble``. Found: {specgram.dtype}"
)
if mask_n is None:
warnings.warn(
"``mask_n`` is not provided, use ``1 - mask_s`` as ``mask_n``."
)
mask_n = 1 - mask_s
shape = specgram.size()
# pack batch
specgram = specgram.reshape(-1, shape[-3], shape[-2], shape[-1])
if self.multi_mask:
mask_s = mask_s.reshape(-1, shape[-3], shape[-2], shape[-1])
mask_n = mask_n.reshape(-1, shape[-3], shape[-2], shape[-1])
else:
mask_s = mask_s.reshape(-1, shape[-2], shape[-1])
mask_n = mask_n.reshape(-1, shape[-2], shape[-1])
psd_s = self.psd(specgram, mask_s) # (..., freq, time, channel, channel)
psd_n = self.psd(specgram, mask_n) # (..., freq, time, channel, channel)
u = torch.zeros(
specgram.size()[:-2],
device=specgram.device,
dtype=torch.cdouble
) # (..., channel)
u[..., self.ref_channel].fill_(1)
if self.online:
w_mvdr = self._get_updated_mvdr_vector(
psd_s,
psd_n,
mask_s,
mask_n,
u,
self.solution,
self.diag_loading,
self.diag_eps
)
else:
w_mvdr = self._get_mvdr_vector(
psd_s,
psd_n,
u,
self.solution,
self.diag_loading,
self.diag_eps
)
specgram_enhanced = self._apply_beamforming_vector(specgram, w_mvdr)
# unpack batch
specgram_enhanced = specgram_enhanced.reshape(shape[:-3] + shape[-2:])
return specgram_enhanced
|
# To use this file, the dependency (https://github.com/vesis84/kaldi-io-for-python)
# needs to be installed. This is a light wrapper around kaldi_io that returns
# torch.Tensors.
from typing import Any, Callable, Iterable, Tuple
import torch
from torch import Tensor
from torchaudio._internal import module_utils as _mod_utils
if _mod_utils.is_module_available('kaldi_io', 'numpy'):
import numpy as np
import kaldi_io
__all__ = [
'read_vec_int_ark',
'read_vec_flt_scp',
'read_vec_flt_ark',
'read_mat_scp',
'read_mat_ark',
]
def _convert_method_output_to_tensor(file_or_fd: Any,
fn: Callable,
convert_contiguous: bool = False) -> Iterable[Tuple[str, Tensor]]:
r"""Takes a method invokes it. The output is converted to a tensor.
Args:
file_or_fd (str/FileDescriptor): File name or file descriptor
fn (Callable): Function that has the signature (file name/descriptor) and converts it to
Iterable[Tuple[str, Tensor]].
convert_contiguous (bool, optional): Determines whether the array should be converted into a
contiguous layout. (Default: ``False``)
Returns:
Iterable[Tuple[str, Tensor]]: The string is the key and the tensor is vec/mat
"""
for key, np_arr in fn(file_or_fd):
if convert_contiguous:
np_arr = np.ascontiguousarray(np_arr)
yield key, torch.from_numpy(np_arr)
@_mod_utils.requires_module('kaldi_io', 'numpy')
def read_vec_int_ark(file_or_fd: Any) -> Iterable[Tuple[str, Tensor]]:
r"""Create generator of (key,vector<int>) tuples, which reads from the ark file/stream.
Args:
file_or_fd (str/FileDescriptor): ark, gzipped ark, pipe or opened file descriptor
Returns:
Iterable[Tuple[str, Tensor]]: The string is the key and the tensor is the vector read from file
Example
>>> # read ark to a 'dictionary'
>>> d = { u:d for u,d in torchaudio.kaldi_io.read_vec_int_ark(file) }
"""
# Requires convert_contiguous to be True because elements from int32 vector are
# sorted in tuples: (sizeof(int32), value) so strides are (5,) instead of (4,) which will throw an error
# in from_numpy as it expects strides to be a multiple of 4 (int32).
return _convert_method_output_to_tensor(file_or_fd, kaldi_io.read_vec_int_ark, convert_contiguous=True)
@_mod_utils.requires_module('kaldi_io', 'numpy')
def read_vec_flt_scp(file_or_fd: Any) -> Iterable[Tuple[str, Tensor]]:
r"""Create generator of (key,vector<float32/float64>) tuples, read according to Kaldi scp.
Args:
file_or_fd (str/FileDescriptor): scp, gzipped scp, pipe or opened file descriptor
Returns:
Iterable[Tuple[str, Tensor]]: The string is the key and the tensor is the vector read from file
Example
>>> # read scp to a 'dictionary'
>>> # d = { u:d for u,d in torchaudio.kaldi_io.read_vec_flt_scp(file) }
"""
return _convert_method_output_to_tensor(file_or_fd, kaldi_io.read_vec_flt_scp)
@_mod_utils.requires_module('kaldi_io', 'numpy')
def read_vec_flt_ark(file_or_fd: Any) -> Iterable[Tuple[str, Tensor]]:
r"""Create generator of (key,vector<float32/float64>) tuples, which reads from the ark file/stream.
Args:
file_or_fd (str/FileDescriptor): ark, gzipped ark, pipe or opened file descriptor
Returns:
Iterable[Tuple[str, Tensor]]: The string is the key and the tensor is the vector read from file
Example
>>> # read ark to a 'dictionary'
>>> d = { u:d for u,d in torchaudio.kaldi_io.read_vec_flt_ark(file) }
"""
return _convert_method_output_to_tensor(file_or_fd, kaldi_io.read_vec_flt_ark)
@_mod_utils.requires_module('kaldi_io', 'numpy')
def read_mat_scp(file_or_fd: Any) -> Iterable[Tuple[str, Tensor]]:
r"""Create generator of (key,matrix<float32/float64>) tuples, read according to Kaldi scp.
Args:
file_or_fd (str/FileDescriptor): scp, gzipped scp, pipe or opened file descriptor
Returns:
Iterable[Tuple[str, Tensor]]: The string is the key and the tensor is the matrix read from file
Example
>>> # read scp to a 'dictionary'
>>> d = { u:d for u,d in torchaudio.kaldi_io.read_mat_scp(file) }
"""
return _convert_method_output_to_tensor(file_or_fd, kaldi_io.read_mat_scp)
@_mod_utils.requires_module('kaldi_io', 'numpy')
def read_mat_ark(file_or_fd: Any) -> Iterable[Tuple[str, Tensor]]:
r"""Create generator of (key,matrix<float32/float64>) tuples, which reads from the ark file/stream.
Args:
file_or_fd (str/FileDescriptor): ark, gzipped ark, pipe or opened file descriptor
Returns:
Iterable[Tuple[str, Tensor]]: The string is the key and the tensor is the matrix read from file
Example
>>> # read ark to a 'dictionary'
>>> d = { u:d for u,d in torchaudio.kaldi_io.read_mat_ark(file) }
"""
return _convert_method_output_to_tensor(file_or_fd, kaldi_io.read_mat_ark)
|
from torchaudio import _extension # noqa: F401
from torchaudio import (
compliance,
datasets,
functional,
models,
pipelines,
kaldi_io,
utils,
sox_effects,
transforms,
)
from torchaudio.backend import (
list_audio_backends,
get_audio_backend,
set_audio_backend,
)
try:
from .version import __version__, git_version # noqa: F401
except ImportError:
pass
__all__ = [
'compliance',
'datasets',
'functional',
'models',
'pipelines',
'kaldi_io',
'utils',
'sox_effects',
'transforms',
'list_audio_backends',
'get_audio_backend',
'set_audio_backend',
]
|
import os
import warnings
from pathlib import Path
import torch
from torchaudio._internal import module_utils as _mod_utils # noqa: F401
def _init_extension():
if not _mod_utils.is_module_available('torchaudio._torchaudio'):
warnings.warn('torchaudio C++ extension is not available.')
return
suffix = 'pyd' if os.name == 'nt' else 'so'
path = Path(__file__).parent / 'lib' / f'libtorchaudio.{suffix}'
# In case `torchaudio` is deployed with `pex` format, this file does not exist.
# In this case, we expect that `libtorchaudio` is available somewhere
# in the search path of dynamic loading mechanism, and importing `_torchaudio`,
# which depends on `libtorchaudio` and dynamic loader will handle it for us.
if path.exists():
torch.ops.load_library(path)
torch.classes.load_library(path)
# This import is for initializing the methods registered via PyBind11
from torchaudio import _torchaudio # noqa
_init_extension()
|
from torch.hub import load_state_dict_from_url, download_url_to_file
__all__ = [
"load_state_dict_from_url",
"download_url_to_file",
]
|
import warnings
import importlib.util
from typing import Optional
from functools import wraps
import torch
def is_module_available(*modules: str) -> bool:
r"""Returns if a top-level module with :attr:`name` exists *without**
importing it. This is generally safer than try-catch block around a
`import X`. It avoids third party libraries breaking assumptions of some of
our tests, e.g., setting multiprocessing start method when imported
(see librosa/#747, torchvision/#544).
"""
return all(importlib.util.find_spec(m) is not None for m in modules)
def requires_module(*modules: str):
"""Decorate function to give error message if invoked without required optional modules.
This decorator is to give better error message to users rather
than raising ``NameError: name 'module' is not defined`` at random places.
"""
missing = [m for m in modules if not is_module_available(m)]
if not missing:
# fall through. If all the modules are available, no need to decorate
def decorator(func):
return func
else:
req = f'module: {missing[0]}' if len(missing) == 1 else f'modules: {missing}'
def decorator(func):
@wraps(func)
def wrapped(*args, **kwargs):
raise RuntimeError(f'{func.__module__}.{func.__name__} requires {req}')
return wrapped
return decorator
def deprecated(direction: str, version: Optional[str] = None):
"""Decorator to add deprecation message
Args:
direction (str): Migration steps to be given to users.
version (str or int): The version when the object will be removed
"""
def decorator(func):
@wraps(func)
def wrapped(*args, **kwargs):
message = (
f'{func.__module__}.{func.__name__} has been deprecated '
f'and will be removed from {"future" if version is None else version} release. '
f'{direction}')
warnings.warn(message, stacklevel=2)
return func(*args, **kwargs)
return wrapped
return decorator
def is_kaldi_available():
return is_module_available('torchaudio._torchaudio') and torch.ops.torchaudio.is_kaldi_available()
def requires_kaldi():
if is_kaldi_available():
def decorator(func):
return func
else:
def decorator(func):
@wraps(func)
def wrapped(*args, **kwargs):
raise RuntimeError(f'{func.__module__}.{func.__name__} requires kaldi')
return wrapped
return decorator
def _check_soundfile_importable():
if not is_module_available('soundfile'):
return False
try:
import soundfile # noqa: F401
return True
except Exception:
warnings.warn("Failed to import soundfile. 'soundfile' backend is not available.")
return False
_is_soundfile_importable = _check_soundfile_importable()
def is_soundfile_available():
return _is_soundfile_importable
def requires_soundfile():
if is_soundfile_available():
def decorator(func):
return func
else:
def decorator(func):
@wraps(func)
def wrapped(*args, **kwargs):
raise RuntimeError(f'{func.__module__}.{func.__name__} requires soundfile')
return wrapped
return decorator
def is_sox_available():
return is_module_available('torchaudio._torchaudio') and torch.ops.torchaudio.is_sox_available()
def requires_sox():
if is_sox_available():
def decorator(func):
return func
else:
def decorator(func):
@wraps(func)
def wrapped(*args, **kwargs):
raise RuntimeError(f'{func.__module__}.{func.__name__} requires sox')
return wrapped
return decorator
|
import os
from typing import Tuple, Optional, Union
from pathlib import Path
import torchaudio
from torch.utils.data import Dataset
from torch import Tensor
from torchaudio.datasets.utils import (
download_url,
extract_archive,
)
FOLDER_IN_ARCHIVE = "SpeechCommands"
URL = "speech_commands_v0.02"
HASH_DIVIDER = "_nohash_"
EXCEPT_FOLDER = "_background_noise_"
_CHECKSUMS = {
"https://storage.googleapis.com/download.tensorflow.org/data/speech_commands_v0.01.tar.gz":
"3cd23799cb2bbdec517f1cc028f8d43c",
"https://storage.googleapis.com/download.tensorflow.org/data/speech_commands_v0.02.tar.gz":
"6b74f3901214cb2c2934e98196829835",
}
def _load_list(root, *filenames):
output = []
for filename in filenames:
filepath = os.path.join(root, filename)
with open(filepath) as fileobj:
output += [os.path.normpath(os.path.join(root, line.strip())) for line in fileobj]
return output
def load_speechcommands_item(filepath: str, path: str) -> Tuple[Tensor, int, str, str, int]:
relpath = os.path.relpath(filepath, path)
label, filename = os.path.split(relpath)
# Besides the officially supported split method for datasets defined by "validation_list.txt"
# and "testing_list.txt" over "speech_commands_v0.0x.tar.gz" archives, an alternative split
# method referred to in paragraph 2-3 of Section 7.1, references 13 and 14 of the original
# paper, and the checksums file from the tensorflow_datasets package [1] is also supported.
# Some filenames in those "speech_commands_test_set_v0.0x.tar.gz" archives have the form
# "xxx.wav.wav", so file extensions twice needs to be stripped twice.
# [1] https://github.com/tensorflow/datasets/blob/master/tensorflow_datasets/url_checksums/speech_commands.txt
speaker, _ = os.path.splitext(filename)
speaker, _ = os.path.splitext(speaker)
speaker_id, utterance_number = speaker.split(HASH_DIVIDER)
utterance_number = int(utterance_number)
# Load audio
waveform, sample_rate = torchaudio.load(filepath)
return waveform, sample_rate, label, speaker_id, utterance_number
class SPEECHCOMMANDS(Dataset):
"""Create a Dataset for Speech Commands.
Args:
root (str or Path): Path to the directory where the dataset is found or downloaded.
url (str, optional): The URL to download the dataset from,
or the type of the dataset to dowload.
Allowed type values are ``"speech_commands_v0.01"`` and ``"speech_commands_v0.02"``
(default: ``"speech_commands_v0.02"``)
folder_in_archive (str, optional):
The top-level directory of the dataset. (default: ``"SpeechCommands"``)
download (bool, optional):
Whether to download the dataset if it is not found at root path. (default: ``False``).
subset (str or None, optional):
Select a subset of the dataset [None, "training", "validation", "testing"]. None means
the whole dataset. "validation" and "testing" are defined in "validation_list.txt" and
"testing_list.txt", respectively, and "training" is the rest. Details for the files
"validation_list.txt" and "testing_list.txt" are explained in the README of the dataset
and in the introduction of Section 7 of the original paper and its reference 12. The
original paper can be found `here <https://arxiv.org/pdf/1804.03209.pdf>`_. (Default: ``None``)
"""
def __init__(self,
root: Union[str, Path],
url: str = URL,
folder_in_archive: str = FOLDER_IN_ARCHIVE,
download: bool = False,
subset: Optional[str] = None,
) -> None:
assert subset is None or subset in ["training", "validation", "testing"], (
"When `subset` not None, it must take a value from "
+ "{'training', 'validation', 'testing'}."
)
if url in [
"speech_commands_v0.01",
"speech_commands_v0.02",
]:
base_url = "https://storage.googleapis.com/download.tensorflow.org/data/"
ext_archive = ".tar.gz"
url = os.path.join(base_url, url + ext_archive)
# Get string representation of 'root' in case Path object is passed
root = os.fspath(root)
basename = os.path.basename(url)
archive = os.path.join(root, basename)
basename = basename.rsplit(".", 2)[0]
folder_in_archive = os.path.join(folder_in_archive, basename)
self._path = os.path.join(root, folder_in_archive)
if download:
if not os.path.isdir(self._path):
if not os.path.isfile(archive):
checksum = _CHECKSUMS.get(url, None)
download_url(url, root, hash_value=checksum, hash_type="md5")
extract_archive(archive, self._path)
if subset == "validation":
self._walker = _load_list(self._path, "validation_list.txt")
elif subset == "testing":
self._walker = _load_list(self._path, "testing_list.txt")
elif subset == "training":
excludes = set(_load_list(self._path, "validation_list.txt", "testing_list.txt"))
walker = sorted(str(p) for p in Path(self._path).glob('*/*.wav'))
self._walker = [
w for w in walker
if HASH_DIVIDER in w
and EXCEPT_FOLDER not in w
and os.path.normpath(w) not in excludes
]
else:
walker = sorted(str(p) for p in Path(self._path).glob('*/*.wav'))
self._walker = [w for w in walker if HASH_DIVIDER in w and EXCEPT_FOLDER not in w]
def __getitem__(self, n: int) -> Tuple[Tensor, int, str, str, int]:
"""Load the n-th sample from the dataset.
Args:
n (int): The index of the sample to be loaded
Returns:
(Tensor, int, str, str, int):
``(waveform, sample_rate, label, speaker_id, utterance_number)``
"""
fileid = self._walker[n]
return load_speechcommands_item(fileid, self._path)
def __len__(self) -> int:
return len(self._walker)
|
from pathlib import Path
from typing import Dict, Tuple, Union
from torch import Tensor
from torch.utils.data import Dataset
import torchaudio
from torchaudio.datasets.utils import (
download_url,
extract_archive,
validate_file,
)
_URL = "https://datashare.ed.ac.uk/bitstream/handle/10283/3038/DR-VCTK.zip"
_CHECKSUM = "29e93debeb0e779986542229a81ff29b"
_SUPPORTED_SUBSETS = {"train", "test"}
class DR_VCTK(Dataset):
"""Create a dataset for Device Recorded VCTK (Small subset version).
Args:
root (str or Path): Root directory where the dataset's top level directory is found.
subset (str): The subset to use. Can be one of ``"train"`` and ``"test"``. (default: ``"train"``).
download (bool):
Whether to download the dataset if it is not found at root path. (default: ``False``).
url (str): The URL to download the dataset from.
(default: ``"https://datashare.ed.ac.uk/bitstream/handle/10283/3038/DR-VCTK.zip"``)
"""
def __init__(
self,
root: Union[str, Path],
subset: str = "train",
*,
download: bool = False,
url: str = _URL,
) -> None:
if subset not in _SUPPORTED_SUBSETS:
raise RuntimeError(
f"The subset '{subset}' does not match any of the supported subsets: {_SUPPORTED_SUBSETS}"
)
root = Path(root).expanduser()
archive = root / "DR-VCTK.zip"
self._subset = subset
self._path = root / "DR-VCTK" / "DR-VCTK"
self._clean_audio_dir = self._path / f"clean_{self._subset}set_wav_16k"
self._noisy_audio_dir = self._path / f"device-recorded_{self._subset}set_wav_16k"
self._config_filepath = self._path / "configurations" / f"{self._subset}_ch_log.txt"
if not self._path.is_dir():
if not archive.is_file():
if not download:
raise RuntimeError("Dataset not found. Please use `download=True` to download it.")
download_url(url, root)
self._validate_checksum(archive)
extract_archive(archive, root)
self._config = self._load_config(self._config_filepath)
self._filename_list = sorted(self._config)
def _validate_checksum(self, archive):
with open(archive, "rb") as file_obj:
if not validate_file(file_obj, _CHECKSUM, "md5"):
raise RuntimeError(
f"The hash of {str(archive)} does not match. Delete the file manually and retry."
)
def _load_config(self, filepath: str) -> Dict[str, Tuple[str, int]]:
# Skip header
skip_rows = 2 if self._subset == "train" else 1
config = {}
with open(filepath) as f:
for i, line in enumerate(f):
if i < skip_rows or not line:
continue
filename, source, channel_id = line.strip().split("\t")
config[filename] = (source, int(channel_id))
return config
def _load_dr_vctk_item(self, filename: str) -> Tuple[Tensor, int, Tensor, int, str, str, str, int]:
speaker_id, utterance_id = filename.split(".")[0].split("_")
source, channel_id = self._config[filename]
file_clean_audio = self._clean_audio_dir / filename
file_noisy_audio = self._noisy_audio_dir / filename
waveform_clean, sample_rate_clean = torchaudio.load(file_clean_audio)
waveform_noisy, sample_rate_noisy = torchaudio.load(file_noisy_audio)
return (
waveform_clean,
sample_rate_clean,
waveform_noisy,
sample_rate_noisy,
speaker_id,
utterance_id,
source,
channel_id,
)
def __getitem__(self, n: int) -> Tuple[Tensor, int, Tensor, int, str, str, str, int]:
"""Load the n-th sample from the dataset.
Args:
n (int): The index of the sample to be loaded
Returns:
(Tensor, int, Tensor, int, str, str, str, int):
``(waveform_clean, sample_rate_clean, waveform_noisy, sample_rate_noisy, speaker_id,\
utterance_id, source, channel_id)``
"""
filename = self._filename_list[n]
return self._load_dr_vctk_item(filename)
def __len__(self) -> int:
return len(self._filename_list)
|
import os
import re
from pathlib import Path
from typing import Iterable, Tuple, Union, List
from torch.utils.data import Dataset
from torchaudio.datasets.utils import download_url
_CHECKSUMS = {
"http://svn.code.sf.net/p/cmusphinx/code/trunk/cmudict/cmudict-0.7b":
"825f4ebd9183f2417df9f067a9cabe86",
"http://svn.code.sf.net/p/cmusphinx/code/trunk/cmudict/cmudict-0.7b.symbols":
"385e490aabc71b48e772118e3d02923e",
}
_PUNCTUATIONS = set([
"!EXCLAMATION-POINT",
"\"CLOSE-QUOTE",
"\"DOUBLE-QUOTE",
"\"END-OF-QUOTE",
"\"END-QUOTE",
"\"IN-QUOTES",
"\"QUOTE",
"\"UNQUOTE",
"#HASH-MARK",
"#POUND-SIGN",
"#SHARP-SIGN",
"%PERCENT",
"&ERSAND",
"'END-INNER-QUOTE",
"'END-QUOTE",
"'INNER-QUOTE",
"'QUOTE",
"'SINGLE-QUOTE",
"(BEGIN-PARENS",
"(IN-PARENTHESES",
"(LEFT-PAREN",
"(OPEN-PARENTHESES",
"(PAREN",
"(PARENS",
"(PARENTHESES",
")CLOSE-PAREN",
")CLOSE-PARENTHESES",
")END-PAREN",
")END-PARENS",
")END-PARENTHESES",
")END-THE-PAREN",
")PAREN",
")PARENS",
")RIGHT-PAREN",
")UN-PARENTHESES",
"+PLUS",
",COMMA",
"--DASH",
"-DASH",
"-HYPHEN",
"...ELLIPSIS",
".DECIMAL",
".DOT",
".FULL-STOP",
".PERIOD",
".POINT",
"/SLASH",
":COLON",
";SEMI-COLON",
";SEMI-COLON(1)",
"?QUESTION-MARK",
"{BRACE",
"{LEFT-BRACE",
"{OPEN-BRACE",
"}CLOSE-BRACE",
"}RIGHT-BRACE",
])
def _parse_dictionary(lines: Iterable[str], exclude_punctuations: bool) -> List[str]:
_alt_re = re.compile(r'\([0-9]+\)')
cmudict: List[Tuple[str, List[str]]] = list()
for line in lines:
if not line or line.startswith(';;;'): # ignore comments
continue
word, phones = line.strip().split(' ')
if word in _PUNCTUATIONS:
if exclude_punctuations:
continue
# !EXCLAMATION-POINT -> !
# --DASH -> --
# ...ELLIPSIS -> ...
if word.startswith("..."):
word = "..."
elif word.startswith("--"):
word = "--"
else:
word = word[0]
# if a word have multiple pronunciations, there will be (number) appended to it
# for example, DATAPOINTS and DATAPOINTS(1),
# the regular expression `_alt_re` removes the '(1)' and change the word DATAPOINTS(1) to DATAPOINTS
word = re.sub(_alt_re, '', word)
phones = phones.split(" ")
cmudict.append((word, phones))
return cmudict
class CMUDict(Dataset):
"""Create a Dataset for CMU Pronouncing Dictionary (CMUDict).
Args:
root (str or Path): Path to the directory where the dataset is found or downloaded.
exclude_punctuations (bool, optional):
When enabled, exclude the pronounciation of punctuations, such as
`!EXCLAMATION-POINT` and `#HASH-MARK`.
download (bool, optional):
Whether to download the dataset if it is not found at root path. (default: ``False``).
url (str, optional):
The URL to download the dictionary from.
(default: ``"http://svn.code.sf.net/p/cmusphinx/code/trunk/cmudict/cmudict-0.7b"``)
url_symbols (str, optional):
The URL to download the list of symbols from.
(default: ``"http://svn.code.sf.net/p/cmusphinx/code/trunk/cmudict/cmudict-0.7b.symbols"``)
"""
def __init__(self,
root: Union[str, Path],
exclude_punctuations: bool = True,
*,
download: bool = False,
url: str = "http://svn.code.sf.net/p/cmusphinx/code/trunk/cmudict/cmudict-0.7b",
url_symbols: str = "http://svn.code.sf.net/p/cmusphinx/code/trunk/cmudict/cmudict-0.7b.symbols",
) -> None:
self.exclude_punctuations = exclude_punctuations
self._root_path = Path(root)
if not os.path.isdir(self._root_path):
raise RuntimeError(f'The root directory does not exist; {root}')
dict_file = self._root_path / os.path.basename(url)
symbol_file = self._root_path / os.path.basename(url_symbols)
if not os.path.exists(dict_file):
if not download:
raise RuntimeError(
'The dictionary file is not found in the following location. '
f'Set `download=True` to download it. {dict_file}')
checksum = _CHECKSUMS.get(url, None)
download_url(url, root, hash_value=checksum, hash_type="md5")
if not os.path.exists(symbol_file):
if not download:
raise RuntimeError(
'The symbol file is not found in the following location. '
f'Set `download=True` to download it. {symbol_file}')
checksum = _CHECKSUMS.get(url_symbols, None)
download_url(url_symbols, root, hash_value=checksum, hash_type="md5")
with open(symbol_file, "r") as text:
self._symbols = [line.strip() for line in text.readlines()]
with open(dict_file, "r", encoding='latin-1') as text:
self._dictionary = _parse_dictionary(
text.readlines(), exclude_punctuations=self.exclude_punctuations)
def __getitem__(self, n: int) -> Tuple[str, List[str]]:
"""Load the n-th sample from the dataset.
Args:
n (int): The index of the sample to be loaded.
Returns:
(str, List[str]): The corresponding word and phonemes ``(word, [phonemes])``.
"""
return self._dictionary[n]
def __len__(self) -> int:
return len(self._dictionary)
@property
def symbols(self) -> List[str]:
"""list[str]: A list of phonemes symbols, such as `AA`, `AE`, `AH`.
"""
return self._symbols.copy()
|
import os
from pathlib import Path
from typing import Tuple, Optional, Union
import torchaudio
from torch import Tensor
from torch.utils.data import Dataset
from torchaudio.datasets.utils import (
download_url,
extract_archive,
)
# The following lists prefixed with `filtered_` provide a filtered split
# that:
#
# a. Mitigate a known issue with GTZAN (duplication)
#
# b. Provide a standard split for testing it against other
# methods (e.g. the one in jordipons/sklearn-audio-transfer-learning).
#
# Those are used when GTZAN is initialised with the `filtered` keyword.
# The split was taken from (github) jordipons/sklearn-audio-transfer-learning.
gtzan_genres = [
"blues",
"classical",
"country",
"disco",
"hiphop",
"jazz",
"metal",
"pop",
"reggae",
"rock",
]
filtered_test = [
"blues.00012",
"blues.00013",
"blues.00014",
"blues.00015",
"blues.00016",
"blues.00017",
"blues.00018",
"blues.00019",
"blues.00020",
"blues.00021",
"blues.00022",
"blues.00023",
"blues.00024",
"blues.00025",
"blues.00026",
"blues.00027",
"blues.00028",
"blues.00061",
"blues.00062",
"blues.00063",
"blues.00064",
"blues.00065",
"blues.00066",
"blues.00067",
"blues.00068",
"blues.00069",
"blues.00070",
"blues.00071",
"blues.00072",
"blues.00098",
"blues.00099",
"classical.00011",
"classical.00012",
"classical.00013",
"classical.00014",
"classical.00015",
"classical.00016",
"classical.00017",
"classical.00018",
"classical.00019",
"classical.00020",
"classical.00021",
"classical.00022",
"classical.00023",
"classical.00024",
"classical.00025",
"classical.00026",
"classical.00027",
"classical.00028",
"classical.00029",
"classical.00034",
"classical.00035",
"classical.00036",
"classical.00037",
"classical.00038",
"classical.00039",
"classical.00040",
"classical.00041",
"classical.00049",
"classical.00077",
"classical.00078",
"classical.00079",
"country.00030",
"country.00031",
"country.00032",
"country.00033",
"country.00034",
"country.00035",
"country.00036",
"country.00037",
"country.00038",
"country.00039",
"country.00040",
"country.00043",
"country.00044",
"country.00046",
"country.00047",
"country.00048",
"country.00050",
"country.00051",
"country.00053",
"country.00054",
"country.00055",
"country.00056",
"country.00057",
"country.00058",
"country.00059",
"country.00060",
"country.00061",
"country.00062",
"country.00063",
"country.00064",
"disco.00001",
"disco.00021",
"disco.00058",
"disco.00062",
"disco.00063",
"disco.00064",
"disco.00065",
"disco.00066",
"disco.00069",
"disco.00076",
"disco.00077",
"disco.00078",
"disco.00079",
"disco.00080",
"disco.00081",
"disco.00082",
"disco.00083",
"disco.00084",
"disco.00085",
"disco.00086",
"disco.00087",
"disco.00088",
"disco.00091",
"disco.00092",
"disco.00093",
"disco.00094",
"disco.00096",
"disco.00097",
"disco.00099",
"hiphop.00000",
"hiphop.00026",
"hiphop.00027",
"hiphop.00030",
"hiphop.00040",
"hiphop.00043",
"hiphop.00044",
"hiphop.00045",
"hiphop.00051",
"hiphop.00052",
"hiphop.00053",
"hiphop.00054",
"hiphop.00062",
"hiphop.00063",
"hiphop.00064",
"hiphop.00065",
"hiphop.00066",
"hiphop.00067",
"hiphop.00068",
"hiphop.00069",
"hiphop.00070",
"hiphop.00071",
"hiphop.00072",
"hiphop.00073",
"hiphop.00074",
"hiphop.00075",
"hiphop.00099",
"jazz.00073",
"jazz.00074",
"jazz.00075",
"jazz.00076",
"jazz.00077",
"jazz.00078",
"jazz.00079",
"jazz.00080",
"jazz.00081",
"jazz.00082",
"jazz.00083",
"jazz.00084",
"jazz.00085",
"jazz.00086",
"jazz.00087",
"jazz.00088",
"jazz.00089",
"jazz.00090",
"jazz.00091",
"jazz.00092",
"jazz.00093",
"jazz.00094",
"jazz.00095",
"jazz.00096",
"jazz.00097",
"jazz.00098",
"jazz.00099",
"metal.00012",
"metal.00013",
"metal.00014",
"metal.00015",
"metal.00022",
"metal.00023",
"metal.00025",
"metal.00026",
"metal.00027",
"metal.00028",
"metal.00029",
"metal.00030",
"metal.00031",
"metal.00032",
"metal.00033",
"metal.00038",
"metal.00039",
"metal.00067",
"metal.00070",
"metal.00073",
"metal.00074",
"metal.00075",
"metal.00078",
"metal.00083",
"metal.00085",
"metal.00087",
"metal.00088",
"pop.00000",
"pop.00001",
"pop.00013",
"pop.00014",
"pop.00043",
"pop.00063",
"pop.00064",
"pop.00065",
"pop.00066",
"pop.00069",
"pop.00070",
"pop.00071",
"pop.00072",
"pop.00073",
"pop.00074",
"pop.00075",
"pop.00076",
"pop.00077",
"pop.00078",
"pop.00079",
"pop.00082",
"pop.00088",
"pop.00089",
"pop.00090",
"pop.00091",
"pop.00092",
"pop.00093",
"pop.00094",
"pop.00095",
"pop.00096",
"reggae.00034",
"reggae.00035",
"reggae.00036",
"reggae.00037",
"reggae.00038",
"reggae.00039",
"reggae.00040",
"reggae.00046",
"reggae.00047",
"reggae.00048",
"reggae.00052",
"reggae.00053",
"reggae.00064",
"reggae.00065",
"reggae.00066",
"reggae.00067",
"reggae.00068",
"reggae.00071",
"reggae.00079",
"reggae.00082",
"reggae.00083",
"reggae.00084",
"reggae.00087",
"reggae.00088",
"reggae.00089",
"reggae.00090",
"rock.00010",
"rock.00011",
"rock.00012",
"rock.00013",
"rock.00014",
"rock.00015",
"rock.00027",
"rock.00028",
"rock.00029",
"rock.00030",
"rock.00031",
"rock.00032",
"rock.00033",
"rock.00034",
"rock.00035",
"rock.00036",
"rock.00037",
"rock.00039",
"rock.00040",
"rock.00041",
"rock.00042",
"rock.00043",
"rock.00044",
"rock.00045",
"rock.00046",
"rock.00047",
"rock.00048",
"rock.00086",
"rock.00087",
"rock.00088",
"rock.00089",
"rock.00090",
]
filtered_train = [
"blues.00029",
"blues.00030",
"blues.00031",
"blues.00032",
"blues.00033",
"blues.00034",
"blues.00035",
"blues.00036",
"blues.00037",
"blues.00038",
"blues.00039",
"blues.00040",
"blues.00041",
"blues.00042",
"blues.00043",
"blues.00044",
"blues.00045",
"blues.00046",
"blues.00047",
"blues.00048",
"blues.00049",
"blues.00073",
"blues.00074",
"blues.00075",
"blues.00076",
"blues.00077",
"blues.00078",
"blues.00079",
"blues.00080",
"blues.00081",
"blues.00082",
"blues.00083",
"blues.00084",
"blues.00085",
"blues.00086",
"blues.00087",
"blues.00088",
"blues.00089",
"blues.00090",
"blues.00091",
"blues.00092",
"blues.00093",
"blues.00094",
"blues.00095",
"blues.00096",
"blues.00097",
"classical.00030",
"classical.00031",
"classical.00032",
"classical.00033",
"classical.00043",
"classical.00044",
"classical.00045",
"classical.00046",
"classical.00047",
"classical.00048",
"classical.00050",
"classical.00051",
"classical.00052",
"classical.00053",
"classical.00054",
"classical.00055",
"classical.00056",
"classical.00057",
"classical.00058",
"classical.00059",
"classical.00060",
"classical.00061",
"classical.00062",
"classical.00063",
"classical.00064",
"classical.00065",
"classical.00066",
"classical.00067",
"classical.00080",
"classical.00081",
"classical.00082",
"classical.00083",
"classical.00084",
"classical.00085",
"classical.00086",
"classical.00087",
"classical.00088",
"classical.00089",
"classical.00090",
"classical.00091",
"classical.00092",
"classical.00093",
"classical.00094",
"classical.00095",
"classical.00096",
"classical.00097",
"classical.00098",
"classical.00099",
"country.00019",
"country.00020",
"country.00021",
"country.00022",
"country.00023",
"country.00024",
"country.00025",
"country.00026",
"country.00028",
"country.00029",
"country.00065",
"country.00066",
"country.00067",
"country.00068",
"country.00069",
"country.00070",
"country.00071",
"country.00072",
"country.00073",
"country.00074",
"country.00075",
"country.00076",
"country.00077",
"country.00078",
"country.00079",
"country.00080",
"country.00081",
"country.00082",
"country.00083",
"country.00084",
"country.00085",
"country.00086",
"country.00087",
"country.00088",
"country.00089",
"country.00090",
"country.00091",
"country.00092",
"country.00093",
"country.00094",
"country.00095",
"country.00096",
"country.00097",
"country.00098",
"country.00099",
"disco.00005",
"disco.00015",
"disco.00016",
"disco.00017",
"disco.00018",
"disco.00019",
"disco.00020",
"disco.00022",
"disco.00023",
"disco.00024",
"disco.00025",
"disco.00026",
"disco.00027",
"disco.00028",
"disco.00029",
"disco.00030",
"disco.00031",
"disco.00032",
"disco.00033",
"disco.00034",
"disco.00035",
"disco.00036",
"disco.00037",
"disco.00039",
"disco.00040",
"disco.00041",
"disco.00042",
"disco.00043",
"disco.00044",
"disco.00045",
"disco.00047",
"disco.00049",
"disco.00053",
"disco.00054",
"disco.00056",
"disco.00057",
"disco.00059",
"disco.00061",
"disco.00070",
"disco.00073",
"disco.00074",
"disco.00089",
"hiphop.00002",
"hiphop.00003",
"hiphop.00004",
"hiphop.00005",
"hiphop.00006",
"hiphop.00007",
"hiphop.00008",
"hiphop.00009",
"hiphop.00010",
"hiphop.00011",
"hiphop.00012",
"hiphop.00013",
"hiphop.00014",
"hiphop.00015",
"hiphop.00016",
"hiphop.00017",
"hiphop.00018",
"hiphop.00019",
"hiphop.00020",
"hiphop.00021",
"hiphop.00022",
"hiphop.00023",
"hiphop.00024",
"hiphop.00025",
"hiphop.00028",
"hiphop.00029",
"hiphop.00031",
"hiphop.00032",
"hiphop.00033",
"hiphop.00034",
"hiphop.00035",
"hiphop.00036",
"hiphop.00037",
"hiphop.00038",
"hiphop.00041",
"hiphop.00042",
"hiphop.00055",
"hiphop.00056",
"hiphop.00057",
"hiphop.00058",
"hiphop.00059",
"hiphop.00060",
"hiphop.00061",
"hiphop.00077",
"hiphop.00078",
"hiphop.00079",
"hiphop.00080",
"jazz.00000",
"jazz.00001",
"jazz.00011",
"jazz.00012",
"jazz.00013",
"jazz.00014",
"jazz.00015",
"jazz.00016",
"jazz.00017",
"jazz.00018",
"jazz.00019",
"jazz.00020",
"jazz.00021",
"jazz.00022",
"jazz.00023",
"jazz.00024",
"jazz.00041",
"jazz.00047",
"jazz.00048",
"jazz.00049",
"jazz.00050",
"jazz.00051",
"jazz.00052",
"jazz.00053",
"jazz.00054",
"jazz.00055",
"jazz.00056",
"jazz.00057",
"jazz.00058",
"jazz.00059",
"jazz.00060",
"jazz.00061",
"jazz.00062",
"jazz.00063",
"jazz.00064",
"jazz.00065",
"jazz.00066",
"jazz.00067",
"jazz.00068",
"jazz.00069",
"jazz.00070",
"jazz.00071",
"jazz.00072",
"metal.00002",
"metal.00003",
"metal.00005",
"metal.00021",
"metal.00024",
"metal.00035",
"metal.00046",
"metal.00047",
"metal.00048",
"metal.00049",
"metal.00050",
"metal.00051",
"metal.00052",
"metal.00053",
"metal.00054",
"metal.00055",
"metal.00056",
"metal.00057",
"metal.00059",
"metal.00060",
"metal.00061",
"metal.00062",
"metal.00063",
"metal.00064",
"metal.00065",
"metal.00066",
"metal.00069",
"metal.00071",
"metal.00072",
"metal.00079",
"metal.00080",
"metal.00084",
"metal.00086",
"metal.00089",
"metal.00090",
"metal.00091",
"metal.00092",
"metal.00093",
"metal.00094",
"metal.00095",
"metal.00096",
"metal.00097",
"metal.00098",
"metal.00099",
"pop.00002",
"pop.00003",
"pop.00004",
"pop.00005",
"pop.00006",
"pop.00007",
"pop.00008",
"pop.00009",
"pop.00011",
"pop.00012",
"pop.00016",
"pop.00017",
"pop.00018",
"pop.00019",
"pop.00020",
"pop.00023",
"pop.00024",
"pop.00025",
"pop.00026",
"pop.00027",
"pop.00028",
"pop.00029",
"pop.00031",
"pop.00032",
"pop.00033",
"pop.00034",
"pop.00035",
"pop.00036",
"pop.00038",
"pop.00039",
"pop.00040",
"pop.00041",
"pop.00042",
"pop.00044",
"pop.00046",
"pop.00049",
"pop.00050",
"pop.00080",
"pop.00097",
"pop.00098",
"pop.00099",
"reggae.00000",
"reggae.00001",
"reggae.00002",
"reggae.00004",
"reggae.00006",
"reggae.00009",
"reggae.00011",
"reggae.00012",
"reggae.00014",
"reggae.00015",
"reggae.00016",
"reggae.00017",
"reggae.00018",
"reggae.00019",
"reggae.00020",
"reggae.00021",
"reggae.00022",
"reggae.00023",
"reggae.00024",
"reggae.00025",
"reggae.00026",
"reggae.00027",
"reggae.00028",
"reggae.00029",
"reggae.00030",
"reggae.00031",
"reggae.00032",
"reggae.00042",
"reggae.00043",
"reggae.00044",
"reggae.00045",
"reggae.00049",
"reggae.00050",
"reggae.00051",
"reggae.00054",
"reggae.00055",
"reggae.00056",
"reggae.00057",
"reggae.00058",
"reggae.00059",
"reggae.00060",
"reggae.00063",
"reggae.00069",
"rock.00000",
"rock.00001",
"rock.00002",
"rock.00003",
"rock.00004",
"rock.00005",
"rock.00006",
"rock.00007",
"rock.00008",
"rock.00009",
"rock.00016",
"rock.00017",
"rock.00018",
"rock.00019",
"rock.00020",
"rock.00021",
"rock.00022",
"rock.00023",
"rock.00024",
"rock.00025",
"rock.00026",
"rock.00057",
"rock.00058",
"rock.00059",
"rock.00060",
"rock.00061",
"rock.00062",
"rock.00063",
"rock.00064",
"rock.00065",
"rock.00066",
"rock.00067",
"rock.00068",
"rock.00069",
"rock.00070",
"rock.00091",
"rock.00092",
"rock.00093",
"rock.00094",
"rock.00095",
"rock.00096",
"rock.00097",
"rock.00098",
"rock.00099",
]
filtered_valid = [
"blues.00000",
"blues.00001",
"blues.00002",
"blues.00003",
"blues.00004",
"blues.00005",
"blues.00006",
"blues.00007",
"blues.00008",
"blues.00009",
"blues.00010",
"blues.00011",
"blues.00050",
"blues.00051",
"blues.00052",
"blues.00053",
"blues.00054",
"blues.00055",
"blues.00056",
"blues.00057",
"blues.00058",
"blues.00059",
"blues.00060",
"classical.00000",
"classical.00001",
"classical.00002",
"classical.00003",
"classical.00004",
"classical.00005",
"classical.00006",
"classical.00007",
"classical.00008",
"classical.00009",
"classical.00010",
"classical.00068",
"classical.00069",
"classical.00070",
"classical.00071",
"classical.00072",
"classical.00073",
"classical.00074",
"classical.00075",
"classical.00076",
"country.00000",
"country.00001",
"country.00002",
"country.00003",
"country.00004",
"country.00005",
"country.00006",
"country.00007",
"country.00009",
"country.00010",
"country.00011",
"country.00012",
"country.00013",
"country.00014",
"country.00015",
"country.00016",
"country.00017",
"country.00018",
"country.00027",
"country.00041",
"country.00042",
"country.00045",
"country.00049",
"disco.00000",
"disco.00002",
"disco.00003",
"disco.00004",
"disco.00006",
"disco.00007",
"disco.00008",
"disco.00009",
"disco.00010",
"disco.00011",
"disco.00012",
"disco.00013",
"disco.00014",
"disco.00046",
"disco.00048",
"disco.00052",
"disco.00067",
"disco.00068",
"disco.00072",
"disco.00075",
"disco.00090",
"disco.00095",
"hiphop.00081",
"hiphop.00082",
"hiphop.00083",
"hiphop.00084",
"hiphop.00085",
"hiphop.00086",
"hiphop.00087",
"hiphop.00088",
"hiphop.00089",
"hiphop.00090",
"hiphop.00091",
"hiphop.00092",
"hiphop.00093",
"hiphop.00094",
"hiphop.00095",
"hiphop.00096",
"hiphop.00097",
"hiphop.00098",
"jazz.00002",
"jazz.00003",
"jazz.00004",
"jazz.00005",
"jazz.00006",
"jazz.00007",
"jazz.00008",
"jazz.00009",
"jazz.00010",
"jazz.00025",
"jazz.00026",
"jazz.00027",
"jazz.00028",
"jazz.00029",
"jazz.00030",
"jazz.00031",
"jazz.00032",
"metal.00000",
"metal.00001",
"metal.00006",
"metal.00007",
"metal.00008",
"metal.00009",
"metal.00010",
"metal.00011",
"metal.00016",
"metal.00017",
"metal.00018",
"metal.00019",
"metal.00020",
"metal.00036",
"metal.00037",
"metal.00068",
"metal.00076",
"metal.00077",
"metal.00081",
"metal.00082",
"pop.00010",
"pop.00053",
"pop.00055",
"pop.00058",
"pop.00059",
"pop.00060",
"pop.00061",
"pop.00062",
"pop.00081",
"pop.00083",
"pop.00084",
"pop.00085",
"pop.00086",
"reggae.00061",
"reggae.00062",
"reggae.00070",
"reggae.00072",
"reggae.00074",
"reggae.00076",
"reggae.00077",
"reggae.00078",
"reggae.00085",
"reggae.00092",
"reggae.00093",
"reggae.00094",
"reggae.00095",
"reggae.00096",
"reggae.00097",
"reggae.00098",
"reggae.00099",
"rock.00038",
"rock.00049",
"rock.00050",
"rock.00051",
"rock.00052",
"rock.00053",
"rock.00054",
"rock.00055",
"rock.00056",
"rock.00071",
"rock.00072",
"rock.00073",
"rock.00074",
"rock.00075",
"rock.00076",
"rock.00077",
"rock.00078",
"rock.00079",
"rock.00080",
"rock.00081",
"rock.00082",
"rock.00083",
"rock.00084",
"rock.00085",
]
URL = "http://opihi.cs.uvic.ca/sound/genres.tar.gz"
FOLDER_IN_ARCHIVE = "genres"
_CHECKSUMS = {
"http://opihi.cs.uvic.ca/sound/genres.tar.gz": "5b3d6dddb579ab49814ab86dba69e7c7"
}
def load_gtzan_item(fileid: str, path: str, ext_audio: str) -> Tuple[Tensor, str]:
"""
Loads a file from the dataset and returns the raw waveform
as a Torch Tensor, its sample rate as an integer, and its
genre as a string.
"""
# Filenames are of the form label.id, e.g. blues.00078
label, _ = fileid.split(".")
# Read wav
file_audio = os.path.join(path, label, fileid + ext_audio)
waveform, sample_rate = torchaudio.load(file_audio)
return waveform, sample_rate, label
class GTZAN(Dataset):
"""Create a Dataset for GTZAN.
Note:
Please see http://marsyas.info/downloads/datasets.html if you are planning to use
this dataset to publish results.
Args:
root (str or Path): Path to the directory where the dataset is found or downloaded.
url (str, optional): The URL to download the dataset from.
(default: ``"http://opihi.cs.uvic.ca/sound/genres.tar.gz"``)
folder_in_archive (str, optional): The top-level directory of the dataset.
download (bool, optional):
Whether to download the dataset if it is not found at root path. (default: ``False``).
subset (str or None, optional): Which subset of the dataset to use.
One of ``"training"``, ``"validation"``, ``"testing"`` or ``None``.
If ``None``, the entire dataset is used. (default: ``None``).
"""
_ext_audio = ".wav"
def __init__(
self,
root: Union[str, Path],
url: str = URL,
folder_in_archive: str = FOLDER_IN_ARCHIVE,
download: bool = False,
subset: Optional[str] = None,
) -> None:
# super(GTZAN, self).__init__()
# Get string representation of 'root' in case Path object is passed
root = os.fspath(root)
self.root = root
self.url = url
self.folder_in_archive = folder_in_archive
self.download = download
self.subset = subset
assert subset is None or subset in ["training", "validation", "testing"], (
"When `subset` not None, it must take a value from "
+ "{'training', 'validation', 'testing'}."
)
archive = os.path.basename(url)
archive = os.path.join(root, archive)
self._path = os.path.join(root, folder_in_archive)
if download:
if not os.path.isdir(self._path):
if not os.path.isfile(archive):
checksum = _CHECKSUMS.get(url, None)
download_url(url, root, hash_value=checksum, hash_type="md5")
extract_archive(archive)
if not os.path.isdir(self._path):
raise RuntimeError(
"Dataset not found. Please use `download=True` to download it."
)
if self.subset is None:
# Check every subdirectory under dataset root
# which has the same name as the genres in
# GTZAN (e.g. `root_dir'/blues/, `root_dir'/rock, etc.)
# This lets users remove or move around song files,
# useful when e.g. they want to use only some of the files
# in a genre or want to label other files with a different
# genre.
self._walker = []
root = os.path.expanduser(self._path)
for directory in gtzan_genres:
fulldir = os.path.join(root, directory)
if not os.path.exists(fulldir):
continue
songs_in_genre = os.listdir(fulldir)
songs_in_genre.sort()
for fname in songs_in_genre:
name, ext = os.path.splitext(fname)
if ext.lower() == ".wav" and "." in name:
# Check whether the file is of the form
# `gtzan_genre`.`5 digit number`.wav
genre, num = name.split(".")
if genre in gtzan_genres and len(num) == 5 and num.isdigit():
self._walker.append(name)
else:
if self.subset == "training":
self._walker = filtered_train
elif self.subset == "validation":
self._walker = filtered_valid
elif self.subset == "testing":
self._walker = filtered_test
def __getitem__(self, n: int) -> Tuple[Tensor, int, str]:
"""Load the n-th sample from the dataset.
Args:
n (int): The index of the sample to be loaded
Returns:
(Tensor, int, str): ``(waveform, sample_rate, label)``
"""
fileid = self._walker[n]
item = load_gtzan_item(fileid, self._path, self._ext_audio)
waveform, sample_rate, label = item
return waveform, sample_rate, label
def __len__(self) -> int:
return len(self._walker)
|
import os
import csv
from pathlib import Path
from typing import Tuple, Union
import torchaudio
from torch import Tensor
from torch.utils.data import Dataset
from torchaudio.datasets.utils import (
download_url,
extract_archive,
)
URL = "aew"
FOLDER_IN_ARCHIVE = "ARCTIC"
_CHECKSUMS = {
"http://festvox.org/cmu_arctic/packed/cmu_us_aew_arctic.tar.bz2":
"4382b116efcc8339c37e01253cb56295",
"http://festvox.org/cmu_arctic/packed/cmu_us_ahw_arctic.tar.bz2":
"b072d6e961e3f36a2473042d097d6da9",
"http://festvox.org/cmu_arctic/packed/cmu_us_aup_arctic.tar.bz2":
"5301c7aee8919d2abd632e2667adfa7f",
"http://festvox.org/cmu_arctic/packed/cmu_us_awb_arctic.tar.bz2":
"280fdff1e9857119d9a2c57b50e12db7",
"http://festvox.org/cmu_arctic/packed/cmu_us_axb_arctic.tar.bz2":
"5e21cb26c6529c533df1d02ccde5a186",
"http://festvox.org/cmu_arctic/packed/cmu_us_bdl_arctic.tar.bz2":
"b2c3e558f656af2e0a65da0ac0c3377a",
"http://festvox.org/cmu_arctic/packed/cmu_us_clb_arctic.tar.bz2":
"3957c503748e3ce17a3b73c1b9861fb0",
"http://festvox.org/cmu_arctic/packed/cmu_us_eey_arctic.tar.bz2":
"59708e932d27664f9eda3e8e6859969b",
"http://festvox.org/cmu_arctic/packed/cmu_us_fem_arctic.tar.bz2":
"dba4f992ff023347c07c304bf72f4c73",
"http://festvox.org/cmu_arctic/packed/cmu_us_gka_arctic.tar.bz2":
"24a876ea7335c1b0ff21460e1241340f",
"http://festvox.org/cmu_arctic/packed/cmu_us_jmk_arctic.tar.bz2":
"afb69d95f02350537e8a28df5ab6004b",
"http://festvox.org/cmu_arctic/packed/cmu_us_ksp_arctic.tar.bz2":
"4ce5b3b91a0a54b6b685b1b05aa0b3be",
"http://festvox.org/cmu_arctic/packed/cmu_us_ljm_arctic.tar.bz2":
"6f45a3b2c86a4ed0465b353be291f77d",
"http://festvox.org/cmu_arctic/packed/cmu_us_lnh_arctic.tar.bz2":
"c6a15abad5c14d27f4ee856502f0232f",
"http://festvox.org/cmu_arctic/packed/cmu_us_rms_arctic.tar.bz2":
"71072c983df1e590d9e9519e2a621f6e",
"http://festvox.org/cmu_arctic/packed/cmu_us_rxr_arctic.tar.bz2":
"3771ff03a2f5b5c3b53aa0a68b9ad0d5",
"http://festvox.org/cmu_arctic/packed/cmu_us_slp_arctic.tar.bz2":
"9cbf984a832ea01b5058ba9a96862850",
"http://festvox.org/cmu_arctic/packed/cmu_us_slt_arctic.tar.bz2":
"959eecb2cbbc4ac304c6b92269380c81",
}
def load_cmuarctic_item(line: str,
path: str,
folder_audio: str,
ext_audio: str) -> Tuple[Tensor, int, str, str]:
utterance_id, transcript = line[0].strip().split(" ", 2)[1:]
# Remove space, double quote, and single parenthesis from transcript
transcript = transcript[1:-3]
file_audio = os.path.join(path, folder_audio, utterance_id + ext_audio)
# Load audio
waveform, sample_rate = torchaudio.load(file_audio)
return (
waveform,
sample_rate,
transcript,
utterance_id.split("_")[1]
)
class CMUARCTIC(Dataset):
"""Create a Dataset for CMU_ARCTIC.
Args:
root (str or Path): Path to the directory where the dataset is found or downloaded.
url (str, optional):
The URL to download the dataset from or the type of the dataset to dowload.
(default: ``"aew"``)
Allowed type values are ``"aew"``, ``"ahw"``, ``"aup"``, ``"awb"``, ``"axb"``, ``"bdl"``,
``"clb"``, ``"eey"``, ``"fem"``, ``"gka"``, ``"jmk"``, ``"ksp"``, ``"ljm"``, ``"lnh"``,
``"rms"``, ``"rxr"``, ``"slp"`` or ``"slt"``.
folder_in_archive (str, optional):
The top-level directory of the dataset. (default: ``"ARCTIC"``)
download (bool, optional):
Whether to download the dataset if it is not found at root path. (default: ``False``).
"""
_file_text = "txt.done.data"
_folder_text = "etc"
_ext_audio = ".wav"
_folder_audio = "wav"
def __init__(self,
root: Union[str, Path],
url: str = URL,
folder_in_archive: str = FOLDER_IN_ARCHIVE,
download: bool = False) -> None:
if url in [
"aew",
"ahw",
"aup",
"awb",
"axb",
"bdl",
"clb",
"eey",
"fem",
"gka",
"jmk",
"ksp",
"ljm",
"lnh",
"rms",
"rxr",
"slp",
"slt"
]:
url = "cmu_us_" + url + "_arctic"
ext_archive = ".tar.bz2"
base_url = "http://www.festvox.org/cmu_arctic/packed/"
url = os.path.join(base_url, url + ext_archive)
# Get string representation of 'root' in case Path object is passed
root = os.fspath(root)
basename = os.path.basename(url)
root = os.path.join(root, folder_in_archive)
if not os.path.isdir(root):
os.mkdir(root)
archive = os.path.join(root, basename)
basename = basename.split(".")[0]
self._path = os.path.join(root, basename)
if download:
if not os.path.isdir(self._path):
if not os.path.isfile(archive):
checksum = _CHECKSUMS.get(url, None)
download_url(url, root, hash_value=checksum, hash_type="md5")
extract_archive(archive)
self._text = os.path.join(self._path, self._folder_text, self._file_text)
with open(self._text, "r") as text:
walker = csv.reader(text, delimiter="\n")
self._walker = list(walker)
def __getitem__(self, n: int) -> Tuple[Tensor, int, str, str]:
"""Load the n-th sample from the dataset.
Args:
n (int): The index of the sample to be loaded
Returns:
(Tensor, int, str, str): ``(waveform, sample_rate, transcript, utterance_id)``
"""
line = self._walker[n]
return load_cmuarctic_item(line, self._path, self._folder_audio, self._ext_audio)
def __len__(self) -> int:
return len(self._walker)
|
import csv
import os
from pathlib import Path
from typing import List, Dict, Tuple, Union
from torch import Tensor
from torch.utils.data import Dataset
import torchaudio
def load_commonvoice_item(line: List[str],
header: List[str],
path: str,
folder_audio: str,
ext_audio: str) -> Tuple[Tensor, int, Dict[str, str]]:
# Each line as the following data:
# client_id, path, sentence, up_votes, down_votes, age, gender, accent
assert header[1] == "path"
fileid = line[1]
filename = os.path.join(path, folder_audio, fileid)
if not filename.endswith(ext_audio):
filename += ext_audio
waveform, sample_rate = torchaudio.load(filename)
dic = dict(zip(header, line))
return waveform, sample_rate, dic
class COMMONVOICE(Dataset):
"""Create a Dataset for CommonVoice.
Args:
root (str or Path): Path to the directory where the dataset is located.
(Where the ``tsv`` file is present.)
tsv (str, optional):
The name of the tsv file used to construct the metadata, such as
``"train.tsv"``, ``"test.tsv"``, ``"dev.tsv"``, ``"invalidated.tsv"``,
``"validated.tsv"`` and ``"other.tsv"``. (default: ``"train.tsv"``)
"""
_ext_txt = ".txt"
_ext_audio = ".mp3"
_folder_audio = "clips"
def __init__(self,
root: Union[str, Path],
tsv: str = "train.tsv") -> None:
# Get string representation of 'root' in case Path object is passed
self._path = os.fspath(root)
self._tsv = os.path.join(self._path, tsv)
with open(self._tsv, "r") as tsv_:
walker = csv.reader(tsv_, delimiter="\t")
self._header = next(walker)
self._walker = list(walker)
def __getitem__(self, n: int) -> Tuple[Tensor, int, Dict[str, str]]:
"""Load the n-th sample from the dataset.
Args:
n (int): The index of the sample to be loaded
Returns:
(Tensor, int, Dict[str, str]): ``(waveform, sample_rate, dictionary)``, where dictionary
is built from the TSV file with the following keys: ``client_id``, ``path``, ``sentence``,
``up_votes``, ``down_votes``, ``age``, ``gender`` and ``accent``.
"""
line = self._walker[n]
return load_commonvoice_item(line, self._header, self._path, self._folder_audio, self._ext_audio)
def __len__(self) -> int:
return len(self._walker)
|
from .commonvoice import COMMONVOICE
from .librispeech import LIBRISPEECH
from .speechcommands import SPEECHCOMMANDS
from .vctk import VCTK_092
from .dr_vctk import DR_VCTK
from .gtzan import GTZAN
from .yesno import YESNO
from .ljspeech import LJSPEECH
from .cmuarctic import CMUARCTIC
from .cmudict import CMUDict
from .librimix import LibriMix
from .libritts import LIBRITTS
from .tedlium import TEDLIUM
__all__ = [
"COMMONVOICE",
"LIBRISPEECH",
"SPEECHCOMMANDS",
"VCTK_092",
"DR_VCTK",
"YESNO",
"LJSPEECH",
"GTZAN",
"CMUARCTIC",
"CMUDict",
"LibriMix",
"LIBRITTS",
"TEDLIUM",
]
|
import os
from typing import Tuple, Union
from pathlib import Path
import torchaudio
from torch import Tensor
from torch.utils.data import Dataset
from torchaudio.datasets.utils import (
download_url,
extract_archive,
)
URL = "train-clean-100"
FOLDER_IN_ARCHIVE = "LibriTTS"
_CHECKSUMS = {
"http://www.openslr.org/60/dev-clean.tar.gz": "0c3076c1e5245bb3f0af7d82087ee207",
"http://www.openslr.org/60/dev-other.tar.gz": "815555d8d75995782ac3ccd7f047213d",
"http://www.openslr.org/60/test-clean.tar.gz": "7bed3bdb047c4c197f1ad3bc412db59f",
"http://www.openslr.org/60/test-other.tar.gz": "ae3258249472a13b5abef2a816f733e4",
"http://www.openslr.org/60/train-clean-100.tar.gz": "4a8c202b78fe1bc0c47916a98f3a2ea8",
"http://www.openslr.org/60/train-clean-360.tar.gz": "a84ef10ddade5fd25df69596a2767b2d",
"http://www.openslr.org/60/train-other-500.tar.gz": "7b181dd5ace343a5f38427999684aa6f",
}
def load_libritts_item(
fileid: str,
path: str,
ext_audio: str,
ext_original_txt: str,
ext_normalized_txt: str,
) -> Tuple[Tensor, int, str, str, int, int, str]:
speaker_id, chapter_id, segment_id, utterance_id = fileid.split("_")
utterance_id = fileid
normalized_text = utterance_id + ext_normalized_txt
normalized_text = os.path.join(path, speaker_id, chapter_id, normalized_text)
original_text = utterance_id + ext_original_txt
original_text = os.path.join(path, speaker_id, chapter_id, original_text)
file_audio = utterance_id + ext_audio
file_audio = os.path.join(path, speaker_id, chapter_id, file_audio)
# Load audio
waveform, sample_rate = torchaudio.load(file_audio)
# Load original text
with open(original_text) as ft:
original_text = ft.readline()
# Load normalized text
with open(normalized_text, "r") as ft:
normalized_text = ft.readline()
return (
waveform,
sample_rate,
original_text,
normalized_text,
int(speaker_id),
int(chapter_id),
utterance_id,
)
class LIBRITTS(Dataset):
"""Create a Dataset for LibriTTS.
Args:
root (str or Path): Path to the directory where the dataset is found or downloaded.
url (str, optional): The URL to download the dataset from,
or the type of the dataset to dowload.
Allowed type values are ``"dev-clean"``, ``"dev-other"``, ``"test-clean"``,
``"test-other"``, ``"train-clean-100"``, ``"train-clean-360"`` and
``"train-other-500"``. (default: ``"train-clean-100"``)
folder_in_archive (str, optional):
The top-level directory of the dataset. (default: ``"LibriTTS"``)
download (bool, optional):
Whether to download the dataset if it is not found at root path. (default: ``False``).
"""
_ext_original_txt = ".original.txt"
_ext_normalized_txt = ".normalized.txt"
_ext_audio = ".wav"
def __init__(
self,
root: Union[str, Path],
url: str = URL,
folder_in_archive: str = FOLDER_IN_ARCHIVE,
download: bool = False,
) -> None:
if url in [
"dev-clean",
"dev-other",
"test-clean",
"test-other",
"train-clean-100",
"train-clean-360",
"train-other-500",
]:
ext_archive = ".tar.gz"
base_url = "http://www.openslr.org/resources/60/"
url = os.path.join(base_url, url + ext_archive)
# Get string representation of 'root' in case Path object is passed
root = os.fspath(root)
basename = os.path.basename(url)
archive = os.path.join(root, basename)
basename = basename.split(".")[0]
folder_in_archive = os.path.join(folder_in_archive, basename)
self._path = os.path.join(root, folder_in_archive)
if download:
if not os.path.isdir(self._path):
if not os.path.isfile(archive):
checksum = _CHECKSUMS.get(url, None)
download_url(url, root, hash_value=checksum)
extract_archive(archive)
self._walker = sorted(str(p.stem) for p in Path(self._path).glob('*/*/*' + self._ext_audio))
def __getitem__(self, n: int) -> Tuple[Tensor, int, str, str, int, int, str]:
"""Load the n-th sample from the dataset.
Args:
n (int): The index of the sample to be loaded
Returns:
(Tensor, int, str, str, str, int, int, str):
``(waveform, sample_rate, original_text, normalized_text, speaker_id, chapter_id, utterance_id)``
"""
fileid = self._walker[n]
return load_libritts_item(
fileid,
self._path,
self._ext_audio,
self._ext_original_txt,
self._ext_normalized_txt,
)
def __len__(self) -> int:
return len(self._walker)
|
import os
from typing import Tuple, Union
from pathlib import Path
import torchaudio
from torch import Tensor
from torch.utils.data import Dataset
from torchaudio.datasets.utils import (
download_url,
extract_archive,
)
_RELEASE_CONFIGS = {
"release1": {
"folder_in_archive": "TEDLIUM_release1",
"url": "http://www.openslr.org/resources/7/TEDLIUM_release1.tar.gz",
"checksum": "30301975fd8c5cac4040c261c0852f57cfa8adbbad2ce78e77e4986957445f27",
"data_path": "",
"subset": "train",
"supported_subsets": ["train", "test", "dev"],
"dict": "TEDLIUM.150K.dic",
},
"release2": {
"folder_in_archive": "TEDLIUM_release2",
"url": "http://www.openslr.org/resources/19/TEDLIUM_release2.tar.gz",
"checksum": "93281b5fcaaae5c88671c9d000b443cb3c7ea3499ad12010b3934ca41a7b9c58",
"data_path": "",
"subset": "train",
"supported_subsets": ["train", "test", "dev"],
"dict": "TEDLIUM.152k.dic",
},
"release3": {
"folder_in_archive": "TEDLIUM_release-3",
"url": "http://www.openslr.org/resources/51/TEDLIUM_release-3.tgz",
"checksum": "ad1e454d14d1ad550bc2564c462d87c7a7ec83d4dc2b9210f22ab4973b9eccdb",
"data_path": "data/",
"subset": None,
"supported_subsets": [None],
"dict": "TEDLIUM.152k.dic",
},
}
class TEDLIUM(Dataset):
"""
Create a Dataset for Tedlium. It supports releases 1,2 and 3.
Args:
root (str or Path): Path to the directory where the dataset is found or downloaded.
release (str, optional): Release version.
Allowed values are ``"release1"``, ``"release2"`` or ``"release3"``.
(default: ``"release1"``).
subset (str, optional): The subset of dataset to use. Valid options are ``"train"``, ``"dev"``,
and ``"test"`` for releases 1&2, ``None`` for release3. Defaults to ``"train"`` or ``None``.
download (bool, optional):
Whether to download the dataset if it is not found at root path. (default: ``False``).
audio_ext (str, optional): extension for audio file (default: ``"audio_ext"``)
"""
def __init__(
self,
root: Union[str, Path],
release: str = "release1",
subset: str = None,
download: bool = False,
audio_ext: str = ".sph"
) -> None:
self._ext_audio = audio_ext
if release in _RELEASE_CONFIGS.keys():
folder_in_archive = _RELEASE_CONFIGS[release]["folder_in_archive"]
url = _RELEASE_CONFIGS[release]["url"]
subset = subset if subset else _RELEASE_CONFIGS[release]["subset"]
else:
# Raise warning
raise RuntimeError(
"The release {} does not match any of the supported tedlium releases{} ".format(
release, _RELEASE_CONFIGS.keys(),
)
)
if subset not in _RELEASE_CONFIGS[release]["supported_subsets"]:
# Raise warning
raise RuntimeError(
"The subset {} does not match any of the supported tedlium subsets{} ".format(
subset, _RELEASE_CONFIGS[release]["supported_subsets"],
)
)
# Get string representation of 'root' in case Path object is passed
root = os.fspath(root)
basename = os.path.basename(url)
archive = os.path.join(root, basename)
basename = basename.split(".")[0]
self._path = os.path.join(root, folder_in_archive, _RELEASE_CONFIGS[release]["data_path"])
if subset in ["train", "dev", "test"]:
self._path = os.path.join(self._path, subset)
if download:
if not os.path.isdir(self._path):
if not os.path.isfile(archive):
checksum = _RELEASE_CONFIGS[release]["checksum"]
download_url(url, root, hash_value=checksum)
extract_archive(archive)
# Create list for all samples
self._filelist = []
stm_path = os.path.join(self._path, "stm")
for file in sorted(os.listdir(stm_path)):
if file.endswith(".stm"):
stm_path = os.path.join(self._path, "stm", file)
with open(stm_path) as f:
l = len(f.readlines())
file = file.replace(".stm", "")
self._filelist.extend((file, line) for line in range(l))
# Create dict path for later read
self._dict_path = os.path.join(root, folder_in_archive, _RELEASE_CONFIGS[release]["dict"])
self._phoneme_dict = None
def _load_tedlium_item(self, fileid: str, line: int, path: str) -> Tuple[Tensor, int, str, int, int, int]:
"""Loads a TEDLIUM dataset sample given a file name and corresponding sentence name.
Args:
fileid (str): File id to identify both text and audio files corresponding to the sample
line (int): Line identifier for the sample inside the text file
path (str): Dataset root path
Returns:
(Tensor, int, str, int, int, int):
``(waveform, sample_rate, transcript, talk_id, speaker_id, identifier)``
"""
transcript_path = os.path.join(path, "stm", fileid)
with open(transcript_path + ".stm") as f:
transcript = f.readlines()[line]
talk_id, _, speaker_id, start_time, end_time, identifier, transcript = transcript.split(" ", 6)
wave_path = os.path.join(path, "sph", fileid)
waveform, sample_rate = self._load_audio(wave_path + self._ext_audio, start_time=start_time, end_time=end_time)
return (waveform, sample_rate, transcript, talk_id, speaker_id, identifier)
def _load_audio(self, path: str, start_time: float, end_time: float, sample_rate: int = 16000) -> [Tensor, int]:
"""Default load function used in TEDLIUM dataset, you can overwrite this function to customize functionality
and load individual sentences from a full ted audio talk file.
Args:
path (str): Path to audio file
start_time (int): Time in seconds where the sample sentence stars
end_time (int): Time in seconds where the sample sentence finishes
sample_rate (float, optional): Sampling rate
Returns:
[Tensor, int]: Audio tensor representation and sample rate
"""
start_time = int(float(start_time) * sample_rate)
end_time = int(float(end_time) * sample_rate)
kwargs = {"frame_offset": start_time, "num_frames": end_time - start_time}
return torchaudio.load(path, **kwargs)
def __getitem__(self, n: int) -> Tuple[Tensor, int, str, int, int, int]:
"""Load the n-th sample from the dataset.
Args:
n (int): The index of the sample to be loaded
Returns:
tuple: ``(waveform, sample_rate, transcript, talk_id, speaker_id, identifier)``
"""
fileid, line = self._filelist[n]
return self._load_tedlium_item(fileid, line, self._path)
def __len__(self) -> int:
"""TEDLIUM dataset custom function overwritting len default behaviour.
Returns:
int: TEDLIUM dataset length
"""
return len(self._filelist)
@property
def phoneme_dict(self):
"""dict[str, tuple[str]]: Phonemes. Mapping from word to tuple of phonemes.
Note that some words have empty phonemes.
"""
# Read phoneme dictionary
if not self._phoneme_dict:
self._phoneme_dict = {}
with open(self._dict_path, "r", encoding="utf-8") as f:
for line in f.readlines():
content = line.strip().split()
self._phoneme_dict[content[0]] = tuple(content[1:]) # content[1:] can be empty list
return self._phoneme_dict.copy()
|
import hashlib
import logging
import os
import tarfile
import urllib
import urllib.request
import zipfile
from typing import Any, Iterable, List, Optional
from torch.utils.model_zoo import tqdm
def stream_url(url: str,
start_byte: Optional[int] = None,
block_size: int = 32 * 1024,
progress_bar: bool = True) -> Iterable:
"""Stream url by chunk
Args:
url (str): Url.
start_byte (int or None, optional): Start streaming at that point (Default: ``None``).
block_size (int, optional): Size of chunks to stream (Default: ``32 * 1024``).
progress_bar (bool, optional): Display a progress bar (Default: ``True``).
"""
# If we already have the whole file, there is no need to download it again
req = urllib.request.Request(url, method="HEAD")
with urllib.request.urlopen(req) as response:
url_size = int(response.info().get("Content-Length", -1))
if url_size == start_byte:
return
req = urllib.request.Request(url)
if start_byte:
req.headers["Range"] = "bytes={}-".format(start_byte)
with urllib.request.urlopen(req) as upointer, tqdm(
unit="B",
unit_scale=True,
unit_divisor=1024,
total=url_size,
disable=not progress_bar,
) as pbar:
num_bytes = 0
while True:
chunk = upointer.read(block_size)
if not chunk:
break
yield chunk
num_bytes += len(chunk)
pbar.update(len(chunk))
def download_url(url: str,
download_folder: str,
filename: Optional[str] = None,
hash_value: Optional[str] = None,
hash_type: str = "sha256",
progress_bar: bool = True,
resume: bool = False) -> None:
"""Download file to disk.
Args:
url (str): Url.
download_folder (str): Folder to download file.
filename (str or None, optional): Name of downloaded file. If None, it is inferred from the url
(Default: ``None``).
hash_value (str or None, optional): Hash for url (Default: ``None``).
hash_type (str, optional): Hash type, among "sha256" and "md5" (Default: ``"sha256"``).
progress_bar (bool, optional): Display a progress bar (Default: ``True``).
resume (bool, optional): Enable resuming download (Default: ``False``).
"""
req = urllib.request.Request(url, method="HEAD")
req_info = urllib.request.urlopen(req).info()
# Detect filename
filename = filename or req_info.get_filename() or os.path.basename(url)
filepath = os.path.join(download_folder, filename)
if resume and os.path.exists(filepath):
mode = "ab"
local_size: Optional[int] = os.path.getsize(filepath)
elif not resume and os.path.exists(filepath):
raise RuntimeError(
"{} already exists. Delete the file manually and retry.".format(filepath)
)
else:
mode = "wb"
local_size = None
if hash_value and local_size == int(req_info.get("Content-Length", -1)):
with open(filepath, "rb") as file_obj:
if validate_file(file_obj, hash_value, hash_type):
return
raise RuntimeError(
"The hash of {} does not match. Delete the file manually and retry.".format(
filepath
)
)
with open(filepath, mode) as fpointer:
for chunk in stream_url(url, start_byte=local_size, progress_bar=progress_bar):
fpointer.write(chunk)
with open(filepath, "rb") as file_obj:
if hash_value and not validate_file(file_obj, hash_value, hash_type):
raise RuntimeError(
"The hash of {} does not match. Delete the file manually and retry.".format(
filepath
)
)
def validate_file(file_obj: Any, hash_value: str, hash_type: str = "sha256") -> bool:
"""Validate a given file object with its hash.
Args:
file_obj: File object to read from.
hash_value (str): Hash for url.
hash_type (str, optional): Hash type, among "sha256" and "md5" (Default: ``"sha256"``).
Returns:
bool: return True if its a valid file, else False.
"""
if hash_type == "sha256":
hash_func = hashlib.sha256()
elif hash_type == "md5":
hash_func = hashlib.md5()
else:
raise ValueError
while True:
# Read by chunk to avoid filling memory
chunk = file_obj.read(1024 ** 2)
if not chunk:
break
hash_func.update(chunk)
return hash_func.hexdigest() == hash_value
def extract_archive(from_path: str, to_path: Optional[str] = None, overwrite: bool = False) -> List[str]:
"""Extract archive.
Args:
from_path (str): the path of the archive.
to_path (str or None, optional): the root path of the extraced files (directory of from_path)
(Default: ``None``)
overwrite (bool, optional): overwrite existing files (Default: ``False``)
Returns:
List[str]: List of paths to extracted files even if not overwritten.
Examples:
>>> url = 'http://www.quest.dcs.shef.ac.uk/wmt16_files_mmt/validation.tar.gz'
>>> from_path = './validation.tar.gz'
>>> to_path = './'
>>> torchaudio.datasets.utils.download_from_url(url, from_path)
>>> torchaudio.datasets.utils.extract_archive(from_path, to_path)
"""
if to_path is None:
to_path = os.path.dirname(from_path)
try:
with tarfile.open(from_path, "r") as tar:
logging.info("Opened tar file {}.".format(from_path))
files = []
for file_ in tar: # type: Any
file_path = os.path.join(to_path, file_.name)
if file_.isfile():
files.append(file_path)
if os.path.exists(file_path):
logging.info("{} already extracted.".format(file_path))
if not overwrite:
continue
tar.extract(file_, to_path)
return files
except tarfile.ReadError:
pass
try:
with zipfile.ZipFile(from_path, "r") as zfile:
logging.info("Opened zip file {}.".format(from_path))
files = zfile.namelist()
for file_ in files:
file_path = os.path.join(to_path, file_)
if os.path.exists(file_path):
logging.info("{} already extracted.".format(file_path))
if not overwrite:
continue
zfile.extract(file_, to_path)
return files
except zipfile.BadZipFile:
pass
raise NotImplementedError("We currently only support tar.gz, tgz, and zip achives.")
|
import os
import csv
from typing import Tuple, Union
from pathlib import Path
import torchaudio
from torchaudio.datasets.utils import download_url, extract_archive
from torch import Tensor
from torch.utils.data import Dataset
_RELEASE_CONFIGS = {
"release1": {
"folder_in_archive": "wavs",
"url": "https://data.keithito.com/data/speech/LJSpeech-1.1.tar.bz2",
"checksum": "be1a30453f28eb8dd26af4101ae40cbf2c50413b1bb21936cbcdc6fae3de8aa5",
}
}
class LJSPEECH(Dataset):
"""Create a Dataset for LJSpeech-1.1.
Args:
root (str or Path): Path to the directory where the dataset is found or downloaded.
url (str, optional): The URL to download the dataset from.
(default: ``"https://data.keithito.com/data/speech/LJSpeech-1.1.tar.bz2"``)
folder_in_archive (str, optional):
The top-level directory of the dataset. (default: ``"wavs"``)
download (bool, optional):
Whether to download the dataset if it is not found at root path. (default: ``False``).
"""
def __init__(self,
root: Union[str, Path],
url: str = _RELEASE_CONFIGS["release1"]["url"],
folder_in_archive: str = _RELEASE_CONFIGS["release1"]["folder_in_archive"],
download: bool = False) -> None:
self._parse_filesystem(root, url, folder_in_archive, download)
def _parse_filesystem(self, root: str, url: str, folder_in_archive: str, download: bool) -> None:
root = Path(root)
basename = os.path.basename(url)
archive = root / basename
basename = Path(basename.split(".tar.bz2")[0])
folder_in_archive = basename / folder_in_archive
self._path = root / folder_in_archive
self._metadata_path = root / basename / 'metadata.csv'
if download:
if not os.path.isdir(self._path):
if not os.path.isfile(archive):
checksum = _RELEASE_CONFIGS["release1"]["checksum"]
download_url(url, root, hash_value=checksum)
extract_archive(archive)
with open(self._metadata_path, "r", newline='') as metadata:
flist = csv.reader(metadata, delimiter="|", quoting=csv.QUOTE_NONE)
self._flist = list(flist)
def __getitem__(self, n: int) -> Tuple[Tensor, int, str, str]:
"""Load the n-th sample from the dataset.
Args:
n (int): The index of the sample to be loaded
Returns:
(Tensor, int, str, str):
``(waveform, sample_rate, transcript, normalized_transcript)``
"""
line = self._flist[n]
fileid, transcript, normalized_transcript = line
fileid_audio = self._path / (fileid + ".wav")
# Load audio
waveform, sample_rate = torchaudio.load(fileid_audio)
return (
waveform,
sample_rate,
transcript,
normalized_transcript,
)
def __len__(self) -> int:
return len(self._flist)
|
import os
from pathlib import Path
from typing import List, Tuple, Union
from torch import Tensor
from torch.utils.data import Dataset
import torchaudio
from torchaudio.datasets.utils import (
download_url,
extract_archive,
)
_RELEASE_CONFIGS = {
"release1": {
"folder_in_archive": "waves_yesno",
"url": "http://www.openslr.org/resources/1/waves_yesno.tar.gz",
"checksum": "c3f49e0cca421f96b75b41640749167b52118f232498667ca7a5f9416aef8e73",
}
}
class YESNO(Dataset):
"""Create a Dataset for YesNo.
Args:
root (str or Path): Path to the directory where the dataset is found or downloaded.
url (str, optional): The URL to download the dataset from.
(default: ``"http://www.openslr.org/resources/1/waves_yesno.tar.gz"``)
folder_in_archive (str, optional):
The top-level directory of the dataset. (default: ``"waves_yesno"``)
download (bool, optional):
Whether to download the dataset if it is not found at root path. (default: ``False``).
"""
def __init__(
self,
root: Union[str, Path],
url: str = _RELEASE_CONFIGS["release1"]["url"],
folder_in_archive: str = _RELEASE_CONFIGS["release1"]["folder_in_archive"],
download: bool = False
) -> None:
self._parse_filesystem(root, url, folder_in_archive, download)
def _parse_filesystem(self, root: str, url: str, folder_in_archive: str, download: bool) -> None:
root = Path(root)
archive = os.path.basename(url)
archive = root / archive
self._path = root / folder_in_archive
if download:
if not os.path.isdir(self._path):
if not os.path.isfile(archive):
checksum = _RELEASE_CONFIGS["release1"]["checksum"]
download_url(url, root, hash_value=checksum)
extract_archive(archive)
if not os.path.isdir(self._path):
raise RuntimeError(
"Dataset not found. Please use `download=True` to download it."
)
self._walker = sorted(str(p.stem) for p in Path(self._path).glob("*.wav"))
def _load_item(self, fileid: str, path: str):
labels = [int(c) for c in fileid.split("_")]
file_audio = os.path.join(path, fileid + ".wav")
waveform, sample_rate = torchaudio.load(file_audio)
return waveform, sample_rate, labels
def __getitem__(self, n: int) -> Tuple[Tensor, int, List[int]]:
"""Load the n-th sample from the dataset.
Args:
n (int): The index of the sample to be loaded
Returns:
(Tensor, int, List[int]): ``(waveform, sample_rate, labels)``
"""
fileid = self._walker[n]
item = self._load_item(fileid, self._path)
return item
def __len__(self) -> int:
return len(self._walker)
|
import os
from typing import Tuple, Union
from pathlib import Path
import torchaudio
from torch import Tensor
from torch.utils.data import Dataset
from torchaudio.datasets.utils import (
download_url,
extract_archive,
)
URL = "train-clean-100"
FOLDER_IN_ARCHIVE = "LibriSpeech"
_CHECKSUMS = {
"http://www.openslr.org/resources/12/dev-clean.tar.gz":
"76f87d090650617fca0cac8f88b9416e0ebf80350acb97b343a85fa903728ab3",
"http://www.openslr.org/resources/12/dev-other.tar.gz":
"12661c48e8c3fe1de2c1caa4c3e135193bfb1811584f11f569dd12645aa84365",
"http://www.openslr.org/resources/12/test-clean.tar.gz":
"39fde525e59672dc6d1551919b1478f724438a95aa55f874b576be21967e6c23",
"http://www.openslr.org/resources/12/test-other.tar.gz":
"d09c181bba5cf717b3dee7d4d592af11a3ee3a09e08ae025c5506f6ebe961c29",
"http://www.openslr.org/resources/12/train-clean-100.tar.gz":
"d4ddd1d5a6ab303066f14971d768ee43278a5f2a0aa43dc716b0e64ecbbbf6e2",
"http://www.openslr.org/resources/12/train-clean-360.tar.gz":
"146a56496217e96c14334a160df97fffedd6e0a04e66b9c5af0d40be3c792ecf",
"http://www.openslr.org/resources/12/train-other-500.tar.gz":
"ddb22f27f96ec163645d53215559df6aa36515f26e01dd70798188350adcb6d2"
}
def load_librispeech_item(fileid: str,
path: str,
ext_audio: str,
ext_txt: str) -> Tuple[Tensor, int, str, int, int, int]:
speaker_id, chapter_id, utterance_id = fileid.split("-")
file_text = speaker_id + "-" + chapter_id + ext_txt
file_text = os.path.join(path, speaker_id, chapter_id, file_text)
fileid_audio = speaker_id + "-" + chapter_id + "-" + utterance_id
file_audio = fileid_audio + ext_audio
file_audio = os.path.join(path, speaker_id, chapter_id, file_audio)
# Load audio
waveform, sample_rate = torchaudio.load(file_audio)
# Load text
with open(file_text) as ft:
for line in ft:
fileid_text, transcript = line.strip().split(" ", 1)
if fileid_audio == fileid_text:
break
else:
# Translation not found
raise FileNotFoundError("Translation not found for " + fileid_audio)
return (
waveform,
sample_rate,
transcript,
int(speaker_id),
int(chapter_id),
int(utterance_id),
)
class LIBRISPEECH(Dataset):
"""Create a Dataset for LibriSpeech.
Args:
root (str or Path): Path to the directory where the dataset is found or downloaded.
url (str, optional): The URL to download the dataset from,
or the type of the dataset to dowload.
Allowed type values are ``"dev-clean"``, ``"dev-other"``, ``"test-clean"``,
``"test-other"``, ``"train-clean-100"``, ``"train-clean-360"`` and
``"train-other-500"``. (default: ``"train-clean-100"``)
folder_in_archive (str, optional):
The top-level directory of the dataset. (default: ``"LibriSpeech"``)
download (bool, optional):
Whether to download the dataset if it is not found at root path. (default: ``False``).
"""
_ext_txt = ".trans.txt"
_ext_audio = ".flac"
def __init__(self,
root: Union[str, Path],
url: str = URL,
folder_in_archive: str = FOLDER_IN_ARCHIVE,
download: bool = False) -> None:
if url in [
"dev-clean",
"dev-other",
"test-clean",
"test-other",
"train-clean-100",
"train-clean-360",
"train-other-500",
]:
ext_archive = ".tar.gz"
base_url = "http://www.openslr.org/resources/12/"
url = os.path.join(base_url, url + ext_archive)
# Get string representation of 'root' in case Path object is passed
root = os.fspath(root)
basename = os.path.basename(url)
archive = os.path.join(root, basename)
basename = basename.split(".")[0]
folder_in_archive = os.path.join(folder_in_archive, basename)
self._path = os.path.join(root, folder_in_archive)
if download:
if not os.path.isdir(self._path):
if not os.path.isfile(archive):
checksum = _CHECKSUMS.get(url, None)
download_url(url, root, hash_value=checksum)
extract_archive(archive)
self._walker = sorted(str(p.stem) for p in Path(self._path).glob('*/*/*' + self._ext_audio))
def __getitem__(self, n: int) -> Tuple[Tensor, int, str, int, int, int]:
"""Load the n-th sample from the dataset.
Args:
n (int): The index of the sample to be loaded
Returns:
(Tensor, int, str, int, int, int):
``(waveform, sample_rate, transcript, speaker_id, chapter_id, utterance_id)``
"""
fileid = self._walker[n]
return load_librispeech_item(fileid, self._path, self._ext_audio, self._ext_txt)
def __len__(self) -> int:
return len(self._walker)
|
from pathlib import Path
from typing import Union, Tuple, List
import torch
from torch.utils.data import Dataset
import torchaudio
SampleType = Tuple[int, torch.Tensor, List[torch.Tensor]]
class LibriMix(Dataset):
r"""Create the LibriMix dataset.
Args:
root (str or Path): The path to the directory where the directory ``Libri2Mix`` or
``Libri3Mix`` is stored.
subset (str, optional): The subset to use. Options: [``train-360`, ``train-100``,
``dev``, and ``test``] (Default: ``train-360``).
num_speakers (int, optional): The number of speakers, which determines the directories
to traverse. The Dataset will traverse ``s1`` to ``sN`` directories to collect
N source audios. (Default: 2)
sample_rate (int, optional): sample rate of audio files. The ``sample_rate`` determines
which subdirectory the audio are fetched. If any of the audio has a different sample
rate, raises ``ValueError``. Options: [8000, 16000] (Default: 8000)
task (str, optional): the task of LibriMix.
Options: [``enh_single``, ``enh_both``, ``sep_clean``, ``sep_noisy``]
(Default: ``sep_clean``)
Note:
The LibriMix dataset needs to be manually generated. Please check https://github.com/JorisCos/LibriMix
"""
def __init__(
self,
root: Union[str, Path],
subset: str = "train-360",
num_speakers: int = 2,
sample_rate: int = 8000,
task: str = "sep_clean",
):
self.root = Path(root) / f"Libri{num_speakers}Mix"
if sample_rate == 8000:
self.root = self.root / "wav8k/min" / subset
elif sample_rate == 16000:
self.root = self.root / "wav16k/min" / subset
else:
raise ValueError(
f"Unsupported sample rate. Found {sample_rate}."
)
self.sample_rate = sample_rate
self.task = task
self.mix_dir = (self.root / f"mix_{task.split('_')[1]}").resolve()
self.src_dirs = [(self.root / f"s{i+1}").resolve() for i in range(num_speakers)]
self.files = [p.name for p in self.mix_dir.glob("*wav")]
self.files.sort()
def _load_audio(self, path) -> torch.Tensor:
waveform, sample_rate = torchaudio.load(path)
if sample_rate != self.sample_rate:
raise ValueError(
f"The dataset contains audio file of sample rate {sample_rate}, "
f"but the requested sample rate is {self.sample_rate}."
)
return waveform
def _load_sample(self, filename) -> SampleType:
mixed = self._load_audio(str(self.mix_dir / filename))
srcs = []
for i, dir_ in enumerate(self.src_dirs):
src = self._load_audio(str(dir_ / filename))
if mixed.shape != src.shape:
raise ValueError(
f"Different waveform shapes. mixed: {mixed.shape}, src[{i}]: {src.shape}"
)
srcs.append(src)
return self.sample_rate, mixed, srcs
def __len__(self) -> int:
return len(self.files)
def __getitem__(self, key: int) -> SampleType:
"""Load the n-th sample from the dataset.
Args:
key (int): The index of the sample to be loaded
Returns:
(int, Tensor, List[Tensor]): ``(sample_rate, mix_waveform, list_of_source_waveforms)``
"""
return self._load_sample(self.files[key])
|
import os
from typing import Tuple
from torch import Tensor
from torch.utils.data import Dataset
import torchaudio
from torchaudio.datasets.utils import (
download_url,
extract_archive,
)
URL = "https://datashare.is.ed.ac.uk/bitstream/handle/10283/3443/VCTK-Corpus-0.92.zip"
_CHECKSUMS = {
"https://datashare.is.ed.ac.uk/bitstream/handle/10283/3443/VCTK-Corpus-0.92.zip": "8a6ba2946b36fcbef0212cad601f4bfa"
}
SampleType = Tuple[Tensor, int, str, str, str]
class VCTK_092(Dataset):
"""Create VCTK 0.92 Dataset
Args:
root (str): Root directory where the dataset's top level directory is found.
mic_id (str, optional): Microphone ID. Either ``"mic1"`` or ``"mic2"``. (default: ``"mic2"``)
download (bool, optional):
Whether to download the dataset if it is not found at root path. (default: ``False``).
url (str, optional): The URL to download the dataset from.
(default: ``"https://datashare.is.ed.ac.uk/bitstream/handle/10283/3443/VCTK-Corpus-0.92.zip"``)
audio_ext (str, optional): Custom audio extension if dataset is converted to non-default audio format.
Note:
* All the speeches from speaker ``p315`` will be skipped due to the lack of the corresponding text files.
* All the speeches from ``p280`` will be skipped for ``mic_id="mic2"`` due to the lack of the audio files.
* Some of the speeches from speaker ``p362`` will be skipped due to the lack of the audio files.
* See Also: https://datashare.is.ed.ac.uk/handle/10283/3443
"""
def __init__(
self,
root: str,
mic_id: str = "mic2",
download: bool = False,
url: str = URL,
audio_ext=".flac",
):
if mic_id not in ["mic1", "mic2"]:
raise RuntimeError(
f'`mic_id` has to be either "mic1" or "mic2". Found: {mic_id}'
)
archive = os.path.join(root, "VCTK-Corpus-0.92.zip")
self._path = os.path.join(root, "VCTK-Corpus-0.92")
self._txt_dir = os.path.join(self._path, "txt")
self._audio_dir = os.path.join(self._path, "wav48_silence_trimmed")
self._mic_id = mic_id
self._audio_ext = audio_ext
if download:
if not os.path.isdir(self._path):
if not os.path.isfile(archive):
checksum = _CHECKSUMS.get(url, None)
download_url(url, root, hash_value=checksum, hash_type="md5")
extract_archive(archive, self._path)
if not os.path.isdir(self._path):
raise RuntimeError(
"Dataset not found. Please use `download=True` to download it."
)
# Extracting speaker IDs from the folder structure
self._speaker_ids = sorted(os.listdir(self._txt_dir))
self._sample_ids = []
"""
Due to some insufficient data complexity in the 0.92 version of this dataset,
we start traversing the audio folder structure in accordance with the text folder.
As some of the audio files are missing of either ``mic_1`` or ``mic_2`` but the
text is present for the same, we first check for the existence of the audio file
before adding it to the ``sample_ids`` list.
Once the ``audio_ids`` are loaded into memory we can quickly access the list for
different parameters required by the user.
"""
for speaker_id in self._speaker_ids:
if speaker_id == "p280" and mic_id == "mic2":
continue
utterance_dir = os.path.join(self._txt_dir, speaker_id)
for utterance_file in sorted(
f for f in os.listdir(utterance_dir) if f.endswith(".txt")
):
utterance_id = os.path.splitext(utterance_file)[0]
audio_path_mic = os.path.join(
self._audio_dir,
speaker_id,
f"{utterance_id}_{mic_id}{self._audio_ext}",
)
if speaker_id == "p362" and not os.path.isfile(audio_path_mic):
continue
self._sample_ids.append(utterance_id.split("_"))
def _load_text(self, file_path) -> str:
with open(file_path) as file_path:
return file_path.readlines()[0]
def _load_audio(self, file_path) -> Tuple[Tensor, int]:
return torchaudio.load(file_path)
def _load_sample(self, speaker_id: str, utterance_id: str, mic_id: str) -> SampleType:
transcript_path = os.path.join(
self._txt_dir, speaker_id, f"{speaker_id}_{utterance_id}.txt"
)
audio_path = os.path.join(
self._audio_dir,
speaker_id,
f"{speaker_id}_{utterance_id}_{mic_id}{self._audio_ext}",
)
# Reading text
transcript = self._load_text(transcript_path)
# Reading FLAC
waveform, sample_rate = self._load_audio(audio_path)
return (waveform, sample_rate, transcript, speaker_id, utterance_id)
def __getitem__(self, n: int) -> SampleType:
"""Load the n-th sample from the dataset.
Args:
n (int): The index of the sample to be loaded
Returns:
(Tensor, int, str, str, str):
``(waveform, sample_rate, transcript, speaker_id, utterance_id)``
"""
speaker_id, utterance_id = self._sample_ids[n]
return self._load_sample(speaker_id, utterance_id, self._mic_id)
def __len__(self) -> int:
return len(self._sample_ids)
|
from ._wav2vec2.impl import (
Wav2Vec2Bundle,
Wav2Vec2ASRBundle,
WAV2VEC2_BASE,
WAV2VEC2_LARGE,
WAV2VEC2_LARGE_LV60K,
WAV2VEC2_ASR_BASE_10M,
WAV2VEC2_ASR_BASE_100H,
WAV2VEC2_ASR_BASE_960H,
WAV2VEC2_ASR_LARGE_10M,
WAV2VEC2_ASR_LARGE_100H,
WAV2VEC2_ASR_LARGE_960H,
WAV2VEC2_ASR_LARGE_LV60K_10M,
WAV2VEC2_ASR_LARGE_LV60K_100H,
WAV2VEC2_ASR_LARGE_LV60K_960H,
WAV2VEC2_XLSR53,
VOXPOPULI_ASR_BASE_10K_EN,
VOXPOPULI_ASR_BASE_10K_ES,
VOXPOPULI_ASR_BASE_10K_DE,
VOXPOPULI_ASR_BASE_10K_FR,
VOXPOPULI_ASR_BASE_10K_IT,
HUBERT_BASE,
HUBERT_LARGE,
HUBERT_XLARGE,
HUBERT_ASR_LARGE,
HUBERT_ASR_XLARGE,
)
from ._tts import (
Tacotron2TTSBundle,
TACOTRON2_GRIFFINLIM_CHAR_LJSPEECH,
TACOTRON2_GRIFFINLIM_PHONE_LJSPEECH,
TACOTRON2_WAVERNN_CHAR_LJSPEECH,
TACOTRON2_WAVERNN_PHONE_LJSPEECH,
)
__all__ = [
'Wav2Vec2Bundle',
'Wav2Vec2ASRBundle',
'WAV2VEC2_BASE',
'WAV2VEC2_LARGE',
'WAV2VEC2_LARGE_LV60K',
'WAV2VEC2_ASR_BASE_10M',
'WAV2VEC2_ASR_BASE_100H',
'WAV2VEC2_ASR_BASE_960H',
'WAV2VEC2_ASR_LARGE_10M',
'WAV2VEC2_ASR_LARGE_100H',
'WAV2VEC2_ASR_LARGE_960H',
'WAV2VEC2_ASR_LARGE_LV60K_10M',
'WAV2VEC2_ASR_LARGE_LV60K_100H',
'WAV2VEC2_ASR_LARGE_LV60K_960H',
'WAV2VEC2_XLSR53',
'VOXPOPULI_ASR_BASE_10K_EN',
'VOXPOPULI_ASR_BASE_10K_ES',
'VOXPOPULI_ASR_BASE_10K_DE',
'VOXPOPULI_ASR_BASE_10K_FR',
'VOXPOPULI_ASR_BASE_10K_IT',
'HUBERT_BASE',
'HUBERT_LARGE',
'HUBERT_XLARGE',
'HUBERT_ASR_LARGE',
'HUBERT_ASR_XLARGE',
'Tacotron2TTSBundle',
'TACOTRON2_GRIFFINLIM_CHAR_LJSPEECH',
'TACOTRON2_GRIFFINLIM_PHONE_LJSPEECH',
'TACOTRON2_WAVERNN_CHAR_LJSPEECH',
'TACOTRON2_WAVERNN_PHONE_LJSPEECH',
]
|
def _get_en_labels():
return (
'|',
'E',
'T',
'A',
'O',
'N',
'I',
'H',
'S',
'R',
'D',
'L',
'U',
'M',
'W',
'C',
'F',
'G',
'Y',
'P',
'B',
'V',
'K',
"'",
'X',
'J',
'Q',
'Z',
)
def _get_de_labels():
return (
"|",
"e",
"n",
"i",
"r",
"s",
"t",
"a",
"d",
"h",
"u",
"l",
"g",
"c",
"m",
"o",
"b",
"w",
"f",
"k",
"z",
"p",
"v",
"ü",
"ä",
"ö",
"j",
"ß",
"y",
"x",
"q",
)
def _get_vp_en_labels():
return (
"|",
"e",
"t",
"o",
"i",
"a",
"n",
"s",
"r",
"h",
"l",
"d",
"c",
"u",
"m",
"p",
"f",
"g",
"w",
"y",
"b",
"v",
"k",
"x",
"j",
"q",
"z",
)
def _get_es_labels():
return (
"|",
"e",
"a",
"o",
"s",
"n",
"r",
"i",
"l",
"d",
"c",
"t",
"u",
"p",
"m",
"b",
"q",
"y",
"g",
"v",
"h",
"ó",
"f",
"í",
"á",
"j",
"z",
"ñ",
"é",
"x",
"ú",
"k",
"w",
"ü",
)
def _get_fr_labels():
return (
"|",
"e",
"s",
"n",
"i",
"t",
"r",
"a",
"o",
"u",
"l",
"d",
"c",
"p",
"m",
"é",
"v",
"q",
"f",
"g",
"b",
"h",
"x",
"à",
"j",
"è",
"y",
"ê",
"z",
"ô",
"k",
"ç",
"œ",
"û",
"ù",
"î",
"â",
"w",
"ï",
"ë",
"ü",
"æ",
)
def _get_it_labels():
return (
"|",
"e",
"i",
"a",
"o",
"n",
"t",
"r",
"l",
"s",
"c",
"d",
"u",
"p",
"m",
"g",
"v",
"h",
"z",
"f",
"b",
"q",
"à",
"è",
"ù",
"é",
"ò",
"ì",
"k",
"y",
"x",
"w",
"j",
"ó",
"í",
"ï",
)
|
from dataclasses import dataclass
from typing import Dict, Tuple, Any
import torch
from torchaudio._internal import load_state_dict_from_url
from torchaudio.models import wav2vec2_model, Wav2Vec2Model
from . import utils
__all__ = []
@dataclass
class Wav2Vec2Bundle:
"""torchaudio.pipelines.Wav2Vec2Bundle()
Data class that bundles associated information to use pretrained Wav2Vec2Model.
This class provides interfaces for instantiating the pretrained model along with
the information necessary to retrieve pretrained weights and additional data
to be used with the model.
Torchaudio library instantiates objects of this class, each of which represents
a different pretrained model. Client code should access pretrained models via these
instances.
Please see below for the usage and the available values.
Example - Feature Extraction
>>> import torchaudio
>>>
>>> bundle = torchaudio.pipelines.HUBERT_BASE
>>>
>>> # Build the model and load pretrained weight.
>>> model = bundle.get_model()
Downloading:
100%|███████████████████████████████| 360M/360M [00:06<00:00, 60.6MB/s]
>>>
>>> # Resample audio to the expected sampling rate
>>> waveform = torchaudio.functional.resample(waveform, sample_rate, bundle.sample_rate)
>>>
>>> # Extract acoustic features
>>> features, _ = model.extract_features(waveform)
""" # noqa: E501
_path: str
_params: Dict[str, Any]
_sample_rate: float
@property
def sample_rate(self) -> float:
"""Sample rate of the audio that the model is trained on.
:type: float
"""
return self._sample_rate
def _get_state_dict(self, dl_kwargs):
url = f'https://download.pytorch.org/torchaudio/models/{self._path}'
dl_kwargs = {} if dl_kwargs is None else dl_kwargs
state_dict = load_state_dict_from_url(url, **dl_kwargs)
return state_dict
def get_model(self, *, dl_kwargs=None) -> Wav2Vec2Model:
# Overriding the signature so that the return type is correct on Sphinx
"""get_model(self, *, dl_kwargs=None) -> torchaudio.models.Wav2Vec2Model
Construct the model and load the pretrained weight.
The weight file is downloaded from the internet and cached with
:func:`torch.hub.load_state_dict_from_url`
Args:
dl_kwargs (dictionary of keyword arguments): Passed to :func:`torch.hub.load_state_dict_from_url`.
"""
model = wav2vec2_model(**self._params)
model.load_state_dict(self._get_state_dict(dl_kwargs))
model.eval()
return model
@dataclass
class Wav2Vec2ASRBundle(Wav2Vec2Bundle):
"""torchaudio.pipelines.Wav2Vec2ASRBundle()
Data class that bundles associated information to use pretrained Wav2Vec2Model.
This class provides interfaces for instantiating the pretrained model along with
the information necessary to retrieve pretrained weights and additional data
to be used with the model.
Torchaudio library instantiates objects of this class, each of which represents
a different pretrained model. Client code should access pretrained models via these
instances.
Please see below for the usage and the available values.
Example - ASR
>>> import torchaudio
>>>
>>> bundle = torchaudio.pipelines.HUBERT_ASR_LARGE
>>>
>>> # Build the model and load pretrained weight.
>>> model = bundle.get_model()
Downloading:
100%|███████████████████████████████| 1.18G/1.18G [00:17<00:00, 73.8MB/s]
>>>
>>> # Check the corresponding labels of the output.
>>> labels = bundle.get_labels()
>>> print(labels)
('-', '|', 'E', 'T', 'A', 'O', 'N', 'I', 'H', 'S', 'R', 'D', 'L', 'U', 'M', 'W', 'C', 'F', 'G', 'Y', 'P', 'B', 'V', 'K', "'", 'X', 'J', 'Q', 'Z')
>>>
>>> # Resample audio to the expected sampling rate
>>> waveform = torchaudio.functional.resample(waveform, sample_rate, bundle.sample_rate)
>>>
>>> # Infer the label probability distribution
>>> emissions, _ = model(waveform)
>>>
>>> # Pass emission to decoder
>>> # `ctc_decode` is for illustration purpose only
>>> transcripts = ctc_decode(emissions, labels)
""" # noqa: E501
_labels: Tuple[str]
_remove_aux_axis: Tuple[int] = (1, 2, 3)
def get_labels(
self,
*,
blank: str = '-',
) -> Tuple[str]:
"""The output class labels (only applicable to fine-tuned bundles)
The first is blank token, and it is customizable.
Args:
blank (str, optional): Blank token. (default: ``'-'``)
Returns:
Tuple[str]:
For models fine-tuned on ASR, returns the tuple of strings representing
the output class labels.
Example
>>> import torchaudio
>>> torchaudio.models.HUBERT_ASR_LARGE.get_labels()
('-', '|', 'E', 'T', 'A', 'O', 'N', 'I', 'H', 'S', 'R', 'D', 'L', 'U', 'M', 'W', 'C', 'F', 'G', 'Y', 'P', 'B', 'V', 'K', "'", 'X', 'J', 'Q', 'Z')
""" # noqa: E501
return (blank, *self._labels)
def _get_state_dict(self, dl_kwargs):
state_dict = super()._get_state_dict(dl_kwargs)
if self._remove_aux_axis:
# Remove the seemingly unnecessary axis
# For ASR task, the pretrained weights originated from fairseq has unrelated dimensions at index 1, 2, 3
# It's originated from the Dictionary implementation of fairseq, which was intended for NLP tasks,
# but not used during the ASR training.
# https://github.com/pytorch/fairseq/blob/c5ff181125c7e6126b49a85e5ebdd5f5b6a07914/fairseq/data/dictionary.py#L21-L37
# https://github.com/pytorch/fairseq/blob/c5ff181125c7e6126b49a85e5ebdd5f5b6a07914/fairseq/criterions/ctc.py#L126-L129
#
# Also, some pretrained weights originated from voxpopuli has an extra dimensions that almost never used and
# that resembles mistake.
# The label `1` shows up in the training dataset of German (1 out of 16M),
# English (1 / 28M), Spanish (1 / 9.4M), Romanian (1 / 4.7M) and Polish (6 / 5.8M)
for key in ['aux.weight', 'aux.bias']:
t = state_dict[key]
state_dict[key] = torch.stack([t[i] for i in range(t.size(0)) if i not in self._remove_aux_axis])
return state_dict
WAV2VEC2_BASE = Wav2Vec2Bundle(
_path='wav2vec2_fairseq_base_ls960.pth',
_params={
'extractor_mode': 'group_norm',
'extractor_conv_layer_config': [
(512, 10, 5),
(512, 3, 2),
(512, 3, 2),
(512, 3, 2),
(512, 3, 2),
(512, 2, 2),
(512, 2, 2),
],
'extractor_conv_bias': False,
'encoder_embed_dim': 768,
'encoder_projection_dropout': 0.1,
'encoder_pos_conv_kernel': 128,
'encoder_pos_conv_groups': 16,
'encoder_num_layers': 12,
'encoder_num_heads': 12,
'encoder_attention_dropout': 0.1,
'encoder_ff_interm_features': 3072,
'encoder_ff_interm_dropout': 0.0,
'encoder_dropout': 0.1,
'encoder_layer_norm_first': False,
'encoder_layer_drop': 0.05,
"aux_num_out": None,
},
_sample_rate=16000,
)
WAV2VEC2_BASE.__doc__ = """wav2vec 2.0 model with "Base" configuration.
Pre-trained on 960 hours of unlabeled audio from *LibriSpeech* dataset [:footcite:`7178964`]
(the combination of "train-clean-100", "train-clean-360", and "train-other-500").
Not fine-tuned.
Originally published by the authors of *wav2vec 2.0* [:footcite:`baevski2020wav2vec`] under MIT License and
redistributed with the same license.
[`License <https://github.com/pytorch/fairseq/blob/ce6c9eeae163ac04b79539c78e74f292f29eaa18/LICENSE>`__,
`Source <https://github.com/pytorch/fairseq/blob/ce6c9eeae163ac04b79539c78e74f292f29eaa18/examples/wav2vec#pre-trained-models>`__]
Please refer to :func:`torchaudio.pipelines.Wav2Vec2Bundle` for the usage.
""" # noqa: E501
WAV2VEC2_ASR_BASE_10M = Wav2Vec2ASRBundle(
_path='wav2vec2_fairseq_base_ls960_asr_ll10m.pth',
_params={
'extractor_mode': 'group_norm',
'extractor_conv_layer_config': [
(512, 10, 5),
(512, 3, 2),
(512, 3, 2),
(512, 3, 2),
(512, 3, 2),
(512, 2, 2),
(512, 2, 2),
],
'extractor_conv_bias': False,
'encoder_embed_dim': 768,
'encoder_projection_dropout': 0.1,
'encoder_pos_conv_kernel': 128,
'encoder_pos_conv_groups': 16,
'encoder_num_layers': 12,
'encoder_num_heads': 12,
'encoder_attention_dropout': 0.1,
'encoder_ff_interm_features': 3072,
'encoder_ff_interm_dropout': 0.0,
'encoder_dropout': 0.1,
'encoder_layer_norm_first': False,
'encoder_layer_drop': 0.05,
"aux_num_out": 29,
},
_labels=utils._get_en_labels(),
_sample_rate=16000,
)
WAV2VEC2_ASR_BASE_10M.__doc__ = """Build "base" wav2vec2 model with an extra linear module
Pre-trained on 960 hours of unlabeled audio from *LibriSpeech* dataset [:footcite:`7178964`]
(the combination of "train-clean-100", "train-clean-360", and "train-other-500"), and
fine-tuned for ASR on 10 minutes of transcribed audio from *Libri-Light* dataset
[:footcite:`librilight`] ("train-10min" subset).
Originally published by the authors of *wav2vec 2.0* [:footcite:`baevski2020wav2vec`] under MIT License and
redistributed with the same license.
[`License <https://github.com/pytorch/fairseq/blob/ce6c9eeae163ac04b79539c78e74f292f29eaa18/LICENSE>`__,
`Source <https://github.com/pytorch/fairseq/blob/ce6c9eeae163ac04b79539c78e74f292f29eaa18/examples/wav2vec#pre-trained-models>`__]
Please refer to :func:`torchaudio.pipelines.Wav2Vec2ASRBundle` for the usage.
""" # noqa: E501
WAV2VEC2_ASR_BASE_100H = Wav2Vec2ASRBundle(
'wav2vec2_fairseq_base_ls960_asr_ls100.pth',
{
'extractor_mode': 'group_norm',
'extractor_conv_layer_config': [
(512, 10, 5),
(512, 3, 2),
(512, 3, 2),
(512, 3, 2),
(512, 3, 2),
(512, 2, 2),
(512, 2, 2),
],
'extractor_conv_bias': False,
'encoder_embed_dim': 768,
'encoder_projection_dropout': 0.1,
'encoder_pos_conv_kernel': 128,
'encoder_pos_conv_groups': 16,
'encoder_num_layers': 12,
'encoder_num_heads': 12,
'encoder_attention_dropout': 0.1,
'encoder_ff_interm_features': 3072,
'encoder_ff_interm_dropout': 0.0,
'encoder_dropout': 0.1,
'encoder_layer_norm_first': False,
'encoder_layer_drop': 0.05,
"aux_num_out": 29,
},
_labels=utils._get_en_labels(),
_sample_rate=16000,
)
WAV2VEC2_ASR_BASE_100H.__doc__ = """Build "base" wav2vec2 model with an extra linear module
Pre-trained on 960 hours of unlabeled audio from *LibriSpeech* dataset [:footcite:`7178964`]
(the combination of "train-clean-100", "train-clean-360", and "train-other-500"), and
fine-tuned for ASR on 100 hours of transcribed audio from "train-clean-100" subset.
Originally published by the authors of *wav2vec 2.0* [:footcite:`baevski2020wav2vec`] under MIT License and
redistributed with the same license.
[`License <https://github.com/pytorch/fairseq/blob/ce6c9eeae163ac04b79539c78e74f292f29eaa18/LICENSE>`__,
`Source <https://github.com/pytorch/fairseq/blob/ce6c9eeae163ac04b79539c78e74f292f29eaa18/examples/wav2vec#pre-trained-models>`__]
Please refer to :func:`torchaudio.pipelines.Wav2Vec2ASRBundle` for the usage.
""" # noqa: E501
WAV2VEC2_ASR_BASE_960H = Wav2Vec2ASRBundle(
'wav2vec2_fairseq_base_ls960_asr_ls960.pth',
{
"extractor_mode": "group_norm",
"extractor_conv_layer_config": [
(512, 10, 5),
(512, 3, 2),
(512, 3, 2),
(512, 3, 2),
(512, 3, 2),
(512, 2, 2),
(512, 2, 2),
],
"extractor_conv_bias": False,
"encoder_embed_dim": 768,
"encoder_projection_dropout": 0.1,
"encoder_pos_conv_kernel": 128,
"encoder_pos_conv_groups": 16,
"encoder_num_layers": 12,
"encoder_num_heads": 12,
"encoder_attention_dropout": 0.1,
"encoder_ff_interm_features": 3072,
"encoder_ff_interm_dropout": 0.0,
"encoder_dropout": 0.1,
"encoder_layer_norm_first": False,
"encoder_layer_drop": 0.05,
"aux_num_out": 29,
},
_labels=utils._get_en_labels(),
_sample_rate=16000,
)
WAV2VEC2_ASR_BASE_960H.__doc__ = """Build "base" wav2vec2 model with an extra linear module
Pre-trained on 960 hours of unlabeled audio from *LibriSpeech* dataset [:footcite:`7178964`]
(the combination of "train-clean-100", "train-clean-360", and "train-other-500"), and
fine-tuned for ASR on the same audio with the corresponding transcripts.
Originally published by the authors of *wav2vec 2.0* [:footcite:`baevski2020wav2vec`] under MIT License and
redistributed with the same license.
[`License <https://github.com/pytorch/fairseq/blob/ce6c9eeae163ac04b79539c78e74f292f29eaa18/LICENSE>`__,
`Source <https://github.com/pytorch/fairseq/blob/ce6c9eeae163ac04b79539c78e74f292f29eaa18/examples/wav2vec#pre-trained-models>`__]
Please refer to :func:`torchaudio.pipelines.Wav2Vec2ASRBundle` for the usage.
""" # noqa: E501
WAV2VEC2_LARGE = Wav2Vec2Bundle(
'wav2vec2_fairseq_large_ls960.pth',
{
"extractor_mode": "group_norm",
"extractor_conv_layer_config": [
(512, 10, 5),
(512, 3, 2),
(512, 3, 2),
(512, 3, 2),
(512, 3, 2),
(512, 2, 2),
(512, 2, 2),
],
"extractor_conv_bias": False,
"encoder_embed_dim": 1024,
"encoder_projection_dropout": 0.1,
"encoder_pos_conv_kernel": 128,
"encoder_pos_conv_groups": 16,
"encoder_num_layers": 24,
"encoder_num_heads": 16,
"encoder_attention_dropout": 0.1,
"encoder_ff_interm_features": 4096,
"encoder_ff_interm_dropout": 0.0,
"encoder_dropout": 0.0,
"encoder_layer_norm_first": False,
"encoder_layer_drop": 0.2,
"aux_num_out": None,
},
_sample_rate=16000,
)
WAV2VEC2_LARGE.__doc__ = """Build "large" wav2vec2 model.
Pre-trained on 960 hours of unlabeled audio from *LibriSpeech* dataset [:footcite:`7178964`]
(the combination of "train-clean-100", "train-clean-360", and "train-other-500").
Not fine-tuned.
Originally published by the authors of *wav2vec 2.0* [:footcite:`baevski2020wav2vec`] under MIT License and
redistributed with the same license.
[`License <https://github.com/pytorch/fairseq/blob/ce6c9eeae163ac04b79539c78e74f292f29eaa18/LICENSE>`__,
`Source <https://github.com/pytorch/fairseq/blob/ce6c9eeae163ac04b79539c78e74f292f29eaa18/examples/wav2vec#pre-trained-models>`__]
Please refer to :func:`torchaudio.pipelines.Wav2Vec2Bundle` for the usage.
""" # noqa: E501
WAV2VEC2_ASR_LARGE_10M = Wav2Vec2ASRBundle(
'wav2vec2_fairseq_large_ls960_asr_ll10m.pth',
{
"extractor_mode": "group_norm",
"extractor_conv_layer_config": [
(512, 10, 5),
(512, 3, 2),
(512, 3, 2),
(512, 3, 2),
(512, 3, 2),
(512, 2, 2),
(512, 2, 2),
],
"extractor_conv_bias": False,
"encoder_embed_dim": 1024,
"encoder_projection_dropout": 0.1,
"encoder_pos_conv_kernel": 128,
"encoder_pos_conv_groups": 16,
"encoder_num_layers": 24,
"encoder_num_heads": 16,
"encoder_attention_dropout": 0.1,
"encoder_ff_interm_features": 4096,
"encoder_ff_interm_dropout": 0.0,
"encoder_dropout": 0.0,
"encoder_layer_norm_first": False,
"encoder_layer_drop": 0.2,
"aux_num_out": 29,
},
_labels=utils._get_en_labels(),
_sample_rate=16000,
)
WAV2VEC2_ASR_LARGE_10M.__doc__ = """Build "large" wav2vec2 model with an extra linear module
Pre-trained on 960 hours of unlabeled audio from *LibriSpeech* dataset [:footcite:`7178964`]
(the combination of "train-clean-100", "train-clean-360", and "train-other-500"), and
fine-tuned for ASR on 10 minutes of transcribed audio from *Libri-Light* dataset
[:footcite:`librilight`] ("train-10min" subset).
Originally published by the authors of *wav2vec 2.0* [:footcite:`baevski2020wav2vec`] under MIT License and
redistributed with the same license.
[`License <https://github.com/pytorch/fairseq/blob/ce6c9eeae163ac04b79539c78e74f292f29eaa18/LICENSE>`__,
`Source <https://github.com/pytorch/fairseq/blob/ce6c9eeae163ac04b79539c78e74f292f29eaa18/examples/wav2vec#pre-trained-models>`__]
Please refer to :func:`torchaudio.pipelines.Wav2Vec2ASRBundle` for the usage.
""" # noqa: E501
WAV2VEC2_ASR_LARGE_100H = Wav2Vec2ASRBundle(
'wav2vec2_fairseq_large_ls960_asr_ls100.pth',
{
"extractor_mode": "group_norm",
"extractor_conv_layer_config": [
(512, 10, 5),
(512, 3, 2),
(512, 3, 2),
(512, 3, 2),
(512, 3, 2),
(512, 2, 2),
(512, 2, 2),
],
"extractor_conv_bias": False,
"encoder_embed_dim": 1024,
"encoder_projection_dropout": 0.1,
"encoder_pos_conv_kernel": 128,
"encoder_pos_conv_groups": 16,
"encoder_num_layers": 24,
"encoder_num_heads": 16,
"encoder_attention_dropout": 0.1,
"encoder_ff_interm_features": 4096,
"encoder_ff_interm_dropout": 0.0,
"encoder_dropout": 0.0,
"encoder_layer_norm_first": False,
"encoder_layer_drop": 0.2,
"aux_num_out": 29,
},
_labels=utils._get_en_labels(),
_sample_rate=16000,
)
WAV2VEC2_ASR_LARGE_100H.__doc__ = """Build "large" wav2vec2 model with an extra linear module
Pre-trained on 960 hours of unlabeled audio from *LibriSpeech* dataset [:footcite:`7178964`]
(the combination of "train-clean-100", "train-clean-360", and "train-other-500"), and
fine-tuned for ASR on 100 hours of transcribed audio from
the same dataset ("train-clean-100" subset).
Originally published by the authors of *wav2vec 2.0* [:footcite:`baevski2020wav2vec`] under MIT License and
redistributed with the same license.
[`License <https://github.com/pytorch/fairseq/blob/ce6c9eeae163ac04b79539c78e74f292f29eaa18/LICENSE>`__,
`Source <https://github.com/pytorch/fairseq/blob/ce6c9eeae163ac04b79539c78e74f292f29eaa18/examples/wav2vec#pre-trained-models>`__]
Please refer to :func:`torchaudio.pipelines.Wav2Vec2ASRBundle` for the usage.
""" # noqa: E501
WAV2VEC2_ASR_LARGE_960H = Wav2Vec2ASRBundle(
'wav2vec2_fairseq_large_ls960_asr_ls960.pth',
{
"extractor_mode": "group_norm",
"extractor_conv_layer_config": [
(512, 10, 5),
(512, 3, 2),
(512, 3, 2),
(512, 3, 2),
(512, 3, 2),
(512, 2, 2),
(512, 2, 2),
],
"extractor_conv_bias": False,
"encoder_embed_dim": 1024,
"encoder_projection_dropout": 0.1,
"encoder_pos_conv_kernel": 128,
"encoder_pos_conv_groups": 16,
"encoder_num_layers": 24,
"encoder_num_heads": 16,
"encoder_attention_dropout": 0.1,
"encoder_ff_interm_features": 4096,
"encoder_ff_interm_dropout": 0.0,
"encoder_dropout": 0.0,
"encoder_layer_norm_first": False,
"encoder_layer_drop": 0.2,
"aux_num_out": 29,
},
_labels=utils._get_en_labels(),
_sample_rate=16000,
)
WAV2VEC2_ASR_LARGE_960H.__doc__ = """Build "large" wav2vec2 model with an extra linear module
Pre-trained on 960 hours of unlabeled audio from *LibriSpeech* dataset [:footcite:`7178964`]
(the combination of "train-clean-100", "train-clean-360", and "train-other-500"), and
fine-tuned for ASR on the same audio with the corresponding transcripts.
Originally published by the authors of *wav2vec 2.0* [:footcite:`baevski2020wav2vec`] under MIT License and
redistributed with the same license.
[`License <https://github.com/pytorch/fairseq/blob/ce6c9eeae163ac04b79539c78e74f292f29eaa18/LICENSE>`__,
`Source <https://github.com/pytorch/fairseq/blob/ce6c9eeae163ac04b79539c78e74f292f29eaa18/examples/wav2vec#pre-trained-models>`__]
Please refer to :func:`torchaudio.pipelines.Wav2Vec2ASRBundle` for the usage.
""" # noqa: E501
WAV2VEC2_LARGE_LV60K = Wav2Vec2Bundle(
'wav2vec2_fairseq_large_lv60k.pth',
{
"extractor_mode": "layer_norm",
"extractor_conv_layer_config": [
(512, 10, 5),
(512, 3, 2),
(512, 3, 2),
(512, 3, 2),
(512, 3, 2),
(512, 2, 2),
(512, 2, 2),
],
"extractor_conv_bias": True,
"encoder_embed_dim": 1024,
"encoder_projection_dropout": 0.1,
"encoder_pos_conv_kernel": 128,
"encoder_pos_conv_groups": 16,
"encoder_num_layers": 24,
"encoder_num_heads": 16,
"encoder_attention_dropout": 0.1,
"encoder_ff_interm_features": 4096,
"encoder_ff_interm_dropout": 0.0,
"encoder_dropout": 0.0,
"encoder_layer_norm_first": True,
"encoder_layer_drop": 0.0,
"aux_num_out": None,
},
_sample_rate=16000,
)
WAV2VEC2_LARGE_LV60K.__doc__ = """Build "large-lv60k" wav2vec2 model.
Pre-trained on 60,000 hours of unlabeled audio from
*Libri-Light* dataset [:footcite:`librilight`].
Not fine-tuned.
Originally published by the authors of *wav2vec 2.0* [:footcite:`baevski2020wav2vec`] under MIT License and
redistributed with the same license.
[`License <https://github.com/pytorch/fairseq/blob/ce6c9eeae163ac04b79539c78e74f292f29eaa18/LICENSE>`__,
`Source <https://github.com/pytorch/fairseq/blob/ce6c9eeae163ac04b79539c78e74f292f29eaa18/examples/wav2vec#pre-trained-models>`__]
Please refer to :func:`torchaudio.pipelines.Wav2Vec2Bundle` for the usage.
""" # noqa: E501
WAV2VEC2_ASR_LARGE_LV60K_10M = Wav2Vec2ASRBundle(
'wav2vec2_fairseq_large_lv60k_asr_ll10m.pth',
{
"extractor_mode": "layer_norm",
"extractor_conv_layer_config": [
(512, 10, 5),
(512, 3, 2),
(512, 3, 2),
(512, 3, 2),
(512, 3, 2),
(512, 2, 2),
(512, 2, 2),
],
"extractor_conv_bias": True,
"encoder_embed_dim": 1024,
"encoder_projection_dropout": 0.1,
"encoder_pos_conv_kernel": 128,
"encoder_pos_conv_groups": 16,
"encoder_num_layers": 24,
"encoder_num_heads": 16,
"encoder_attention_dropout": 0.1,
"encoder_ff_interm_features": 4096,
"encoder_ff_interm_dropout": 0.0,
"encoder_dropout": 0.0,
"encoder_layer_norm_first": True,
"encoder_layer_drop": 0.0,
"aux_num_out": 29,
},
_labels=utils._get_en_labels(),
_sample_rate=16000,
)
WAV2VEC2_ASR_LARGE_LV60K_10M.__doc__ = """Build "large-lv60k" wav2vec2 model with an extra linear module
Pre-trained on 60,000 hours of unlabeled audio from
*Libri-Light* dataset [:footcite:`librilight`], and
fine-tuned for ASR on 10 minutes of transcribed audio from
the same dataset ("train-10min" subset).
Originally published by the authors of *wav2vec 2.0* [:footcite:`baevski2020wav2vec`] under MIT License and
redistributed with the same license.
[`License <https://github.com/pytorch/fairseq/blob/ce6c9eeae163ac04b79539c78e74f292f29eaa18/LICENSE>`__,
`Source <https://github.com/pytorch/fairseq/blob/ce6c9eeae163ac04b79539c78e74f292f29eaa18/examples/wav2vec#pre-trained-models>`__]
Please refer to :func:`torchaudio.pipelines.Wav2Vec2ASRBundle` for the usage.
""" # noqa: E501
WAV2VEC2_ASR_LARGE_LV60K_100H = Wav2Vec2ASRBundle(
'wav2vec2_fairseq_large_lv60k_asr_ls100.pth',
{
"extractor_mode": "layer_norm",
"extractor_conv_layer_config": [
(512, 10, 5),
(512, 3, 2),
(512, 3, 2),
(512, 3, 2),
(512, 3, 2),
(512, 2, 2),
(512, 2, 2),
],
"extractor_conv_bias": True,
"encoder_embed_dim": 1024,
"encoder_projection_dropout": 0.1,
"encoder_pos_conv_kernel": 128,
"encoder_pos_conv_groups": 16,
"encoder_num_layers": 24,
"encoder_num_heads": 16,
"encoder_attention_dropout": 0.1,
"encoder_ff_interm_features": 4096,
"encoder_ff_interm_dropout": 0.0,
"encoder_dropout": 0.0,
"encoder_layer_norm_first": True,
"encoder_layer_drop": 0.0,
"aux_num_out": 29,
},
_labels=utils._get_en_labels(),
_sample_rate=16000,
)
WAV2VEC2_ASR_LARGE_LV60K_100H.__doc__ = """Build "large-lv60k" wav2vec2 model with an extra linear module
Pre-trained on 60,000 hours of unlabeled audio from
*Libri-Light* dataset [:footcite:`librilight`], and
fine-tuned for ASR on 100 hours of transcribed audio from
*LibriSpeech* dataset [:footcite:`7178964`] ("train-clean-100" subset).
Originally published by the authors of *wav2vec 2.0* [:footcite:`baevski2020wav2vec`] under MIT License and
redistributed with the same license.
[`License <https://github.com/pytorch/fairseq/blob/ce6c9eeae163ac04b79539c78e74f292f29eaa18/LICENSE>`__,
`Source <https://github.com/pytorch/fairseq/blob/ce6c9eeae163ac04b79539c78e74f292f29eaa18/examples/wav2vec#pre-trained-models>`__]
Please refer to :func:`torchaudio.pipelines.Wav2Vec2ASRBundle` for the usage.
""" # noqa: E501
WAV2VEC2_ASR_LARGE_LV60K_960H = Wav2Vec2ASRBundle(
'wav2vec2_fairseq_large_lv60k_asr_ls960.pth',
{
"extractor_mode": "layer_norm",
"extractor_conv_layer_config": [
(512, 10, 5),
(512, 3, 2),
(512, 3, 2),
(512, 3, 2),
(512, 3, 2),
(512, 2, 2),
(512, 2, 2),
],
"extractor_conv_bias": True,
"encoder_embed_dim": 1024,
"encoder_projection_dropout": 0.1,
"encoder_pos_conv_kernel": 128,
"encoder_pos_conv_groups": 16,
"encoder_num_layers": 24,
"encoder_num_heads": 16,
"encoder_attention_dropout": 0.1,
"encoder_ff_interm_features": 4096,
"encoder_ff_interm_dropout": 0.0,
"encoder_dropout": 0.0,
"encoder_layer_norm_first": True,
"encoder_layer_drop": 0.0,
"aux_num_out": 29,
},
_labels=utils._get_en_labels(),
_sample_rate=16000,
)
WAV2VEC2_ASR_LARGE_LV60K_960H.__doc__ = """Build "large-lv60k" wav2vec2 model with an extra linear module
Pre-trained on 60,000 hours of unlabeled audio from *Libri-Light*
[:footcite:`librilight`] dataset, and
fine-tuned for ASR on 960 hours of transcribed audio from
*LibriSpeech* dataset [:footcite:`7178964`]
(the combination of "train-clean-100", "train-clean-360", and "train-other-500").
Originally published by the authors of *wav2vec 2.0* [:footcite:`baevski2020wav2vec`] under MIT License and
redistributed with the same license.
[`License <https://github.com/pytorch/fairseq/blob/ce6c9eeae163ac04b79539c78e74f292f29eaa18/LICENSE>`__,
`Source <https://github.com/pytorch/fairseq/blob/ce6c9eeae163ac04b79539c78e74f292f29eaa18/examples/wav2vec#pre-trained-models>`__]
Please refer to :func:`torchaudio.pipelines.Wav2Vec2ASRBundle` for the usage.
""" # noqa: E501
WAV2VEC2_XLSR53 = Wav2Vec2Bundle(
'wav2vec2_fairseq_large_xlsr53.pth',
{
"extractor_mode": "layer_norm",
"extractor_conv_layer_config": [
(512, 10, 5),
(512, 3, 2),
(512, 3, 2),
(512, 3, 2),
(512, 3, 2),
(512, 2, 2),
(512, 2, 2),
],
"extractor_conv_bias": True,
"encoder_embed_dim": 1024,
"encoder_projection_dropout": 0.0,
"encoder_pos_conv_kernel": 128,
"encoder_pos_conv_groups": 16,
"encoder_num_layers": 24,
"encoder_num_heads": 16,
"encoder_attention_dropout": 0.0,
"encoder_ff_interm_features": 4096,
"encoder_ff_interm_dropout": 0.0,
"encoder_dropout": 0.0,
"encoder_layer_norm_first": True,
"encoder_layer_drop": 0.0,
"aux_num_out": None,
},
_sample_rate=16000,
)
WAV2VEC2_XLSR53.__doc__ = """wav2vec 2.0 model with "Base" configuration.
Trained on 56,000 hours of unlabeled audio from multiple datasets (
*Multilingual LibriSpeech* [:footcite:`Pratap_2020`],
*CommonVoice* [:footcite:`ardila2020common`] and
*BABEL* [:footcite:`Gales2014SpeechRA`]).
Not fine-tuned.
Originally published by the authors of
*Unsupervised Cross-lingual Representation Learning for Speech Recognition*
[:footcite:`conneau2020unsupervised`] under MIT License and redistributed with the same license.
[`License <https://github.com/pytorch/fairseq/blob/ce6c9eeae163ac04b79539c78e74f292f29eaa18/LICENSE>`__,
`Source <https://github.com/pytorch/fairseq/blob/ce6c9eeae163ac04b79539c78e74f292f29eaa18/examples/wav2vec#pre-trained-models>`__]
Please refer to :func:`torchaudio.pipelines.Wav2Vec2Bundle` for the usage.
""" # noqa: E501
HUBERT_BASE = Wav2Vec2Bundle(
'hubert_fairseq_base_ls960.pth',
{
'extractor_mode': 'group_norm',
'extractor_conv_layer_config': [
(512, 10, 5),
(512, 3, 2),
(512, 3, 2),
(512, 3, 2),
(512, 3, 2),
(512, 2, 2),
(512, 2, 2),
],
'extractor_conv_bias': False,
'encoder_embed_dim': 768,
'encoder_projection_dropout': 0.1,
'encoder_pos_conv_kernel': 128,
'encoder_pos_conv_groups': 16,
'encoder_num_layers': 12,
'encoder_num_heads': 12,
'encoder_attention_dropout': 0.1,
'encoder_ff_interm_features': 3072,
'encoder_ff_interm_dropout': 0.0,
'encoder_dropout': 0.1,
'encoder_layer_norm_first': False,
'encoder_layer_drop': 0.05,
'aux_num_out': None,
},
_sample_rate=16000,
)
HUBERT_BASE.__doc__ = """HuBERT model with "Base" configuration.
Pre-trained on 960 hours of unlabeled audio from *LibriSpeech* dataset [:footcite:`7178964`]
(the combination of "train-clean-100", "train-clean-360", and "train-other-500").
Not fine-tuned.
Originally published by the authors of *HuBERT* [:footcite:`hsu2021hubert`] under MIT License and
redistributed with the same license.
[`License <https://github.com/pytorch/fairseq/blob/ce6c9eeae163ac04b79539c78e74f292f29eaa18/LICENSE>`__,
`Source <https://github.com/pytorch/fairseq/blob/ce6c9eeae163ac04b79539c78e74f292f29eaa18/examples/hubert#pre-trained-and-fine-tuned-asr-models>`__]
Please refer to :func:`torchaudio.pipelines.Wav2Vec2Bundle` for the usage.
""" # noqa: E501
HUBERT_LARGE = Wav2Vec2Bundle(
'hubert_fairseq_large_ll60k.pth',
{
'extractor_mode': 'layer_norm',
'extractor_conv_layer_config': [
(512, 10, 5),
(512, 3, 2),
(512, 3, 2),
(512, 3, 2),
(512, 3, 2),
(512, 2, 2),
(512, 2, 2),
],
'extractor_conv_bias': False,
'encoder_embed_dim': 1024,
'encoder_projection_dropout': 0.0,
'encoder_pos_conv_kernel': 128,
'encoder_pos_conv_groups': 16,
'encoder_num_layers': 24,
'encoder_num_heads': 16,
'encoder_attention_dropout': 0.0,
'encoder_ff_interm_features': 4096,
'encoder_ff_interm_dropout': 0.0,
'encoder_dropout': 0.0,
'encoder_layer_norm_first': True,
'encoder_layer_drop': 0.0,
'aux_num_out': None,
},
_sample_rate=16000,
)
HUBERT_LARGE.__doc__ = """HuBERT model with "Large" configuration.
Pre-trained on 60,000 hours of unlabeled audio from
*Libri-Light* dataset [:footcite:`librilight`].
Not fine-tuned.
Originally published by the authors of *HuBERT* [:footcite:`hsu2021hubert`] under MIT License and
redistributed with the same license.
[`License <https://github.com/pytorch/fairseq/blob/ce6c9eeae163ac04b79539c78e74f292f29eaa18/LICENSE>`__,
`Source <https://github.com/pytorch/fairseq/blob/ce6c9eeae163ac04b79539c78e74f292f29eaa18/examples/hubert#pre-trained-and-fine-tuned-asr-models>`__]
Please refer to :func:`torchaudio.pipelines.Wav2Vec2Bundle` for the usage.
""" # noqa: E501
HUBERT_XLARGE = Wav2Vec2Bundle(
'hubert_fairseq_xlarge_ll60k.pth',
{
'extractor_mode': 'layer_norm',
'extractor_conv_layer_config': [
(512, 10, 5),
(512, 3, 2),
(512, 3, 2),
(512, 3, 2),
(512, 3, 2),
(512, 2, 2),
(512, 2, 2),
],
'extractor_conv_bias': False,
'encoder_embed_dim': 1280,
'encoder_projection_dropout': 0.0,
'encoder_pos_conv_kernel': 128,
'encoder_pos_conv_groups': 16,
'encoder_num_layers': 48,
'encoder_num_heads': 16,
'encoder_attention_dropout': 0.0,
'encoder_ff_interm_features': 5120,
'encoder_ff_interm_dropout': 0.0,
'encoder_dropout': 0.0,
'encoder_layer_norm_first': True,
'encoder_layer_drop': 0.0,
'aux_num_out': None,
},
_sample_rate=16000,
)
HUBERT_XLARGE.__doc__ = """HuBERT model with "Extra Large" configuration.
Pre-trained on 60,000 hours of unlabeled audio from
*Libri-Light* dataset [:footcite:`librilight`].
Not fine-tuned.
Originally published by the authors of *HuBERT* [:footcite:`hsu2021hubert`] under MIT License and
redistributed with the same license.
[`License <https://github.com/pytorch/fairseq/blob/ce6c9eeae163ac04b79539c78e74f292f29eaa18/LICENSE>`__,
`Source <https://github.com/pytorch/fairseq/blob/ce6c9eeae163ac04b79539c78e74f292f29eaa18/examples/hubert#pre-trained-and-fine-tuned-asr-models>`__]
Please refer to :func:`torchaudio.pipelines.Wav2Vec2Bundle` for the usage.
""" # noqa: E501
HUBERT_ASR_LARGE = Wav2Vec2ASRBundle(
'hubert_fairseq_large_ll60k_asr_ls960.pth',
{
'extractor_mode': 'layer_norm',
'extractor_conv_layer_config': [
(512, 10, 5),
(512, 3, 2),
(512, 3, 2),
(512, 3, 2),
(512, 3, 2),
(512, 2, 2),
(512, 2, 2),
],
'extractor_conv_bias': False,
'encoder_embed_dim': 1024,
'encoder_projection_dropout': 0.0,
'encoder_pos_conv_kernel': 128,
'encoder_pos_conv_groups': 16,
'encoder_num_layers': 24,
'encoder_num_heads': 16,
'encoder_attention_dropout': 0.0,
'encoder_ff_interm_features': 4096,
'encoder_ff_interm_dropout': 0.1,
'encoder_dropout': 0.0,
'encoder_layer_norm_first': True,
'encoder_layer_drop': 0.1,
'aux_num_out': 29,
},
_labels=utils._get_en_labels(),
_sample_rate=16000,
)
HUBERT_ASR_LARGE.__doc__ = """HuBERT model with "Large" configuration.
Pre-trained on 60,000 hours of unlabeled audio from
*Libri-Light* dataset [:footcite:`librilight`], and
fine-tuned for ASR on 960 hours of transcribed audio from
*LibriSpeech* dataset [:footcite:`7178964`]
(the combination of "train-clean-100", "train-clean-360", and "train-other-500").
Originally published by the authors of *HuBERT* [:footcite:`hsu2021hubert`] under MIT License and
redistributed with the same license.
[`License <https://github.com/pytorch/fairseq/blob/ce6c9eeae163ac04b79539c78e74f292f29eaa18/LICENSE>`__,
`Source <https://github.com/pytorch/fairseq/blob/ce6c9eeae163ac04b79539c78e74f292f29eaa18/examples/hubert#pre-trained-and-fine-tuned-asr-models>`__]
Please refer to :func:`torchaudio.pipelines.Wav2Vec2ASRBundle` for the usage.
""" # noqa: E501
HUBERT_ASR_XLARGE = Wav2Vec2ASRBundle(
'hubert_fairseq_xlarge_ll60k_asr_ls960.pth',
{
'extractor_mode': 'layer_norm',
'extractor_conv_layer_config': [
(512, 10, 5),
(512, 3, 2),
(512, 3, 2),
(512, 3, 2),
(512, 3, 2),
(512, 2, 2),
(512, 2, 2),
],
'extractor_conv_bias': False,
'encoder_embed_dim': 1280,
'encoder_projection_dropout': 0.0,
'encoder_pos_conv_kernel': 128,
'encoder_pos_conv_groups': 16,
'encoder_num_layers': 48,
'encoder_num_heads': 16,
'encoder_attention_dropout': 0.0,
'encoder_ff_interm_features': 5120,
'encoder_ff_interm_dropout': 0.1,
'encoder_dropout': 0.0,
'encoder_layer_norm_first': True,
'encoder_layer_drop': 0.1,
'aux_num_out': 29,
},
_labels=utils._get_en_labels(),
_sample_rate=16000,
)
HUBERT_ASR_XLARGE.__doc__ = """HuBERT model with "Extra Large" configuration.
Pre-trained on 60,000 hours of unlabeled audio from
*Libri-Light* dataset [:footcite:`librilight`], and
fine-tuned for ASR on 960 hours of transcribed audio from
*LibriSpeech* dataset [:footcite:`7178964`]
(the combination of "train-clean-100", "train-clean-360", and "train-other-500").
Originally published by the authors of *HuBERT* [:footcite:`hsu2021hubert`] under MIT License and
redistributed with the same license.
[`License <https://github.com/pytorch/fairseq/blob/ce6c9eeae163ac04b79539c78e74f292f29eaa18/LICENSE>`__,
`Source <https://github.com/pytorch/fairseq/blob/ce6c9eeae163ac04b79539c78e74f292f29eaa18/examples/hubert#pre-trained-and-fine-tuned-asr-models>`__]
Please refer to :func:`torchaudio.pipelines.Wav2Vec2ASRBundle` for the usage.
""" # noqa: E501
VOXPOPULI_ASR_BASE_10K_DE = Wav2Vec2ASRBundle(
'wav2vec2_voxpopuli_base_10k_asr_de.pt',
{
"extractor_mode": "group_norm",
"extractor_conv_layer_config": [
(512, 10, 5),
(512, 3, 2),
(512, 3, 2),
(512, 3, 2),
(512, 3, 2),
(512, 2, 2),
(512, 2, 2),
],
"extractor_conv_bias": False,
"encoder_embed_dim": 768,
"encoder_projection_dropout": 0.0,
"encoder_pos_conv_kernel": 128,
"encoder_pos_conv_groups": 16,
"encoder_num_layers": 12,
"encoder_num_heads": 12,
"encoder_attention_dropout": 0.0,
"encoder_ff_interm_features": 3072,
"encoder_ff_interm_dropout": 0.1,
"encoder_dropout": 0.0,
"encoder_layer_norm_first": False,
"encoder_layer_drop": 0.1,
"aux_num_out": 32,
},
_labels=utils._get_de_labels(),
_sample_rate=16000,
_remove_aux_axis=(1, 2, 3, 35),
)
VOXPOPULI_ASR_BASE_10K_DE.__doc__ = """wav2vec 2.0 model with "Base" configuration.
Pre-trained on 10k hours of unlabeled audio from *VoxPopuli* dataset [:footcite:`voxpopuli`]
("10k" subset, consisting of 23 languages).
Fine-tuned for ASR on 282 hours of transcribed audio from "de" subset.
Originally published by the authors of *VoxPopuli* [:footcite:`voxpopuli`] under CC BY-NC 4.0 and
redistributed with the same license.
[`License <https://github.com/facebookresearch/voxpopuli/tree/160e4d7915bad9f99b2c35b1d3833e51fd30abf2#license>`__,
`Source <https://github.com/facebookresearch/voxpopuli/tree/160e4d7915bad9f99b2c35b1d3833e51fd30abf2#asr-and-lm>`__]
Please refer to :func:`torchaudio.pipelines.Wav2Vec2ASRBundle` for the usage.
""" # noqa: E501
VOXPOPULI_ASR_BASE_10K_EN = Wav2Vec2ASRBundle(
'wav2vec2_voxpopuli_base_10k_asr_en.pt',
{
"extractor_mode": "group_norm",
"extractor_conv_layer_config": [
(512, 10, 5),
(512, 3, 2),
(512, 3, 2),
(512, 3, 2),
(512, 3, 2),
(512, 2, 2),
(512, 2, 2),
],
"extractor_conv_bias": False,
"encoder_embed_dim": 768,
"encoder_projection_dropout": 0.0,
"encoder_pos_conv_kernel": 128,
"encoder_pos_conv_groups": 16,
"encoder_num_layers": 12,
"encoder_num_heads": 12,
"encoder_attention_dropout": 0.0,
"encoder_ff_interm_features": 3072,
"encoder_ff_interm_dropout": 0.1,
"encoder_dropout": 0.0,
"encoder_layer_norm_first": False,
"encoder_layer_drop": 0.1,
"aux_num_out": 28
},
_labels=utils._get_vp_en_labels(),
_sample_rate=16000,
_remove_aux_axis=(1, 2, 3, 31),
)
VOXPOPULI_ASR_BASE_10K_EN.__doc__ = """wav2vec 2.0 model with "Base" configuration.
Pre-trained on 10k hours of unlabeled audio from *VoxPopuli* dataset [:footcite:`voxpopuli`]
("10k" subset, consisting of 23 languages).
Fine-tuned for ASR on 543 hours of transcribed audio from "en" subset.
Originally published by the authors of *VoxPopuli* [:footcite:`voxpopuli`] under CC BY-NC 4.0 and
redistributed with the same license.
[`License <https://github.com/facebookresearch/voxpopuli/tree/160e4d7915bad9f99b2c35b1d3833e51fd30abf2#license>`__,
`Source <https://github.com/facebookresearch/voxpopuli/tree/160e4d7915bad9f99b2c35b1d3833e51fd30abf2#asr-and-lm>`__]
Please refer to :func:`torchaudio.pipelines.Wav2Vec2ASRBundle` for the usage.
""" # noqa: E501
VOXPOPULI_ASR_BASE_10K_ES = Wav2Vec2ASRBundle(
'wav2vec2_voxpopuli_base_10k_asr_es.pt',
{
"extractor_mode": "group_norm",
"extractor_conv_layer_config": [
(512, 10, 5),
(512, 3, 2),
(512, 3, 2),
(512, 3, 2),
(512, 3, 2),
(512, 2, 2),
(512, 2, 2),
],
"extractor_conv_bias": False,
"encoder_embed_dim": 768,
"encoder_projection_dropout": 0.0,
"encoder_pos_conv_kernel": 128,
"encoder_pos_conv_groups": 16,
"encoder_num_layers": 12,
"encoder_num_heads": 12,
"encoder_attention_dropout": 0.0,
"encoder_ff_interm_features": 3072,
"encoder_ff_interm_dropout": 0.1,
"encoder_dropout": 0.0,
"encoder_layer_norm_first": False,
"encoder_layer_drop": 0.1,
"aux_num_out": 35
},
_labels=utils._get_es_labels(),
_sample_rate=16000,
_remove_aux_axis=(1, 2, 3, 35),
)
VOXPOPULI_ASR_BASE_10K_ES.__doc__ = """wav2vec 2.0 model with "Base" configuration.
Pre-trained on 10k hours of unlabeled audio from *VoxPopuli* dataset [:footcite:`voxpopuli`]
("10k" subset, consisting of 23 languages).
Fine-tuned for ASR on 166 hours of transcribed audio from "es" subset.
Originally published by the authors of *VoxPopuli* [:footcite:`voxpopuli`] under CC BY-NC 4.0 and
redistributed with the same license.
[`License <https://github.com/facebookresearch/voxpopuli/tree/160e4d7915bad9f99b2c35b1d3833e51fd30abf2#license>`__,
`Source <https://github.com/facebookresearch/voxpopuli/tree/160e4d7915bad9f99b2c35b1d3833e51fd30abf2#asr-and-lm>`__]
Please refer to :func:`torchaudio.pipelines.Wav2Vec2ASRBundle` for the usage.
""" # noqa: E501
VOXPOPULI_ASR_BASE_10K_FR = Wav2Vec2ASRBundle(
'wav2vec2_voxpopuli_base_10k_asr_fr.pt',
{
"extractor_mode": "group_norm",
"extractor_conv_layer_config": [
(512, 10, 5),
(512, 3, 2),
(512, 3, 2),
(512, 3, 2),
(512, 3, 2),
(512, 2, 2),
(512, 2, 2),
],
"extractor_conv_bias": False,
"encoder_embed_dim": 768,
"encoder_projection_dropout": 0.0,
"encoder_pos_conv_kernel": 128,
"encoder_pos_conv_groups": 16,
"encoder_num_layers": 12,
"encoder_num_heads": 12,
"encoder_attention_dropout": 0.0,
"encoder_ff_interm_features": 3072,
"encoder_ff_interm_dropout": 0.1,
"encoder_dropout": 0.0,
"encoder_layer_norm_first": False,
"encoder_layer_drop": 0.1,
"aux_num_out": 43
},
_labels=utils._get_fr_labels(),
_sample_rate=16000,
)
VOXPOPULI_ASR_BASE_10K_FR.__doc__ = """wav2vec 2.0 model with "Base" configuration.
Pre-trained on 10k hours of unlabeled audio from *VoxPopuli* dataset [:footcite:`voxpopuli`]
("10k" subset, consisting of 23 languages).
Fine-tuned for ASR on 211 hours of transcribed audio from "fr" subset.
Originally published by the authors of *VoxPopuli* [:footcite:`voxpopuli`] under CC BY-NC 4.0 and
redistributed with the same license.
[`License <https://github.com/facebookresearch/voxpopuli/tree/160e4d7915bad9f99b2c35b1d3833e51fd30abf2#license>`__,
`Source <https://github.com/facebookresearch/voxpopuli/tree/160e4d7915bad9f99b2c35b1d3833e51fd30abf2#asr-and-lm>`__]
Please refer to :func:`torchaudio.pipelines.Wav2Vec2ASRBundle` for the usage.
""" # noqa: E501
VOXPOPULI_ASR_BASE_10K_IT = Wav2Vec2ASRBundle(
'wav2vec2_voxpopuli_base_10k_asr_it.pt',
{
"extractor_mode": "group_norm",
"extractor_conv_layer_config": [
(512, 10, 5),
(512, 3, 2),
(512, 3, 2),
(512, 3, 2),
(512, 3, 2),
(512, 2, 2),
(512, 2, 2),
],
"extractor_conv_bias": False,
"encoder_embed_dim": 768,
"encoder_projection_dropout": 0.0,
"encoder_pos_conv_kernel": 128,
"encoder_pos_conv_groups": 16,
"encoder_num_layers": 12,
"encoder_num_heads": 12,
"encoder_attention_dropout": 0.0,
"encoder_ff_interm_features": 3072,
"encoder_ff_interm_dropout": 0.1,
"encoder_dropout": 0.0,
"encoder_layer_norm_first": False,
"encoder_layer_drop": 0.1,
"aux_num_out": 37,
},
_labels=utils._get_it_labels(),
_sample_rate=16000,
_remove_aux_axis=(1, 2, 3),
)
VOXPOPULI_ASR_BASE_10K_IT.__doc__ = """wav2vec 2.0 model with "Base" configuration.
Pre-trained on 10k hours of unlabeled audio from *VoxPopuli* dataset [:footcite:`voxpopuli`]
("10k" subset, consisting of 23 languages).
Fine-tuned for ASR on 91 hours of transcribed audio from "it" subset.
Originally published by the authors of *VoxPopuli* [:footcite:`voxpopuli`] under CC BY-NC 4.0 and
redistributed with the same license.
[`License <https://github.com/facebookresearch/voxpopuli/tree/160e4d7915bad9f99b2c35b1d3833e51fd30abf2#license>`__,
`Source <https://github.com/facebookresearch/voxpopuli/tree/160e4d7915bad9f99b2c35b1d3833e51fd30abf2#asr-and-lm>`__]
Please refer to :func:`torchaudio.pipelines.Wav2Vec2ASRBundle` for the usage.
""" # noqa: E501
|
from abc import ABC, abstractmethod
from typing import Union, List, Tuple, Optional
from torch import Tensor
from torchaudio.models import Tacotron2
class _TextProcessor(ABC):
@property
@abstractmethod
def tokens(self):
"""The tokens that the each value in the processed tensor represent.
See :func:`torchaudio.pipelines.Tacotron2TTSBundle.get_text_processor` for the usage.
:type: List[str]
"""
@abstractmethod
def __call__(self, texts: Union[str, List[str]]) -> Tuple[Tensor, Tensor]:
"""Encode the given (batch of) texts into numerical tensors
See :func:`torchaudio.pipelines.Tacotron2TTSBundle.get_text_processor` for the usage.
Args:
text (str or list of str): The input texts.
Returns:
(Tensor, Tensor):
Tensor:
The encoded texts. Shape: `(batch, max length)`
Tensor:
The valid length of each sample in the batch. Shape: `(batch, )`.
"""
class _Vocoder(ABC):
@property
@abstractmethod
def sample_rate(self):
"""The sample rate of the resulting waveform
See :func:`torchaudio.pipelines.Tacotron2TTSBundle.get_vocoder` for the usage.
:type: float
"""
@abstractmethod
def __call__(self, specgrams: Tensor, lengths: Optional[Tensor] = None) -> Tuple[Tensor, Optional[Tensor]]:
"""Generate waveform from the given input, such as spectrogram
See :func:`torchaudio.pipelines.Tacotron2TTSBundle.get_vocoder` for the usage.
Args:
specgrams (Tensor):
The input spectrogram. Shape: `(batch, frequency bins, time)`.
The expected shape depends on the implementation.
lengths (Tensor, or None, optional):
The valid length of each sample in the batch. Shape: `(batch, )`.
(Default: `None`)
Returns:
(Tensor, Optional[Tensor]):
Tensor:
The generated waveform. Shape: `(batch, max length)`
Tensor or None:
The valid length of each sample in the batch. Shape: `(batch, )`.
"""
class Tacotron2TTSBundle(ABC):
"""Data class that bundles associated information to use pretrained Tacotron2 and vocoder.
This class provides interfaces for instantiating the pretrained model along with
the information necessary to retrieve pretrained weights and additional data
to be used with the model.
Torchaudio library instantiates objects of this class, each of which represents
a different pretrained model. Client code should access pretrained models via these
instances.
Please see below for the usage and the available values.
Example - Character-based TTS pipeline with Tacotron2 and WaveRNN
>>> import torchaudio
>>>
>>> text = "Hello, T T S !"
>>> bundle = torchaudio.pipelines.TACOTRON2_WAVERNN_CHAR_LJSPEECH
>>>
>>> # Build processor, Tacotron2 and WaveRNN model
>>> processor = bundle.get_text_processor()
>>> tacotron2 = bundle.get_tacotron2()
Downloading:
100%|███████████████████████████████| 107M/107M [00:01<00:00, 87.9MB/s]
>>> vocoder = bundle.get_vocoder()
Downloading:
100%|███████████████████████████████| 16.7M/16.7M [00:00<00:00, 78.1MB/s]
>>>
>>> # Encode text
>>> input, lengths = processor(text)
>>>
>>> # Generate (mel-scale) spectrogram
>>> specgram, lengths, _ = tacotron2.infer(input, lengths)
>>>
>>> # Convert spectrogram to waveform
>>> waveforms, lengths = vocoder(specgram, lengths)
>>>
>>> torchaudio.save('hello-tts.wav', waveforms, vocoder.sample_rate)
Example - Phoneme-based TTS pipeline with Tacotron2 and WaveRNN
>>>
>>> # Note:
>>> # This bundle uses pre-trained DeepPhonemizer as
>>> # the text pre-processor.
>>> # Please install deep-phonemizer.
>>> # See https://github.com/as-ideas/DeepPhonemizer
>>> # The pretrained weight is automatically downloaded.
>>>
>>> import torchaudio
>>>
>>> text = "Hello, TTS!"
>>> bundle = torchaudio.pipelines.TACOTRON2_WAVERNN_PHONE_LJSPEECH
>>>
>>> # Build processor, Tacotron2 and WaveRNN model
>>> processor = bundle.get_text_processor()
Downloading:
100%|███████████████████████████████| 63.6M/63.6M [00:04<00:00, 15.3MB/s]
>>> tacotron2 = bundle.get_tacotron2()
Downloading:
100%|███████████████████████████████| 107M/107M [00:01<00:00, 87.9MB/s]
>>> vocoder = bundle.get_vocoder()
Downloading:
100%|███████████████████████████████| 16.7M/16.7M [00:00<00:00, 78.1MB/s]
>>>
>>> # Encode text
>>> input, lengths = processor(text)
>>>
>>> # Generate (mel-scale) spectrogram
>>> specgram, lengths, _ = tacotron2.infer(input, lengths)
>>>
>>> # Convert spectrogram to waveform
>>> waveforms, lengths = vocoder(specgram, lengths)
>>>
>>> torchaudio.save('hello-tts.wav', waveforms, vocoder.sample_rate)
"""
# Using the inner class so that these interfaces are not directly exposed on
# `torchaudio.pipelines`, but still listed in documentation.
# The thing is, text processing and vocoder are generic and we do not know what kind of
# new text processing and vocoder will be added in the future, so we want to make these
# interfaces specific to this Tacotron2TTS pipeline.
class TextProcessor(_TextProcessor):
"""Interface of the text processing part of Tacotron2TTS pipeline
See :func:`torchaudio.pipelines.Tacotron2TTSBundle.get_text_processor` for the usage.
"""
class Vocoder(_Vocoder):
"""Interface of the vocoder part of Tacotron2TTS pipeline
See :func:`torchaudio.pipelines.Tacotron2TTSBundle.get_vocoder` for the usage.
"""
@abstractmethod
def get_text_processor(self, *, dl_kwargs=None) -> TextProcessor:
# Overriding the signature so that the return type is correct on Sphinx
"""get_text_processor(self, *, dl_kwargs=None) -> torchaudio.pipelines.Tacotron2TTSBundle.TextProcessor
Create a text processor
For character-based pipeline, this processor splits the input text by character.
For phoneme-based pipeline, this processor converts the input text (grapheme) to
phonemes.
If a pre-trained weight file is necessary,
:func:`torch.hub.download_url_to_file` is used to downloaded it.
Args:
dl_kwargs (dictionary of keyword arguments,):
Passed to :func:`torch.hub.download_url_to_file`.
Returns:
TTSTextProcessor:
A callable which takes a string or a list of strings as input and
returns Tensor of encoded texts and Tensor of valid lengths.
The object also has ``tokens`` property, which allows to recover the
tokenized form.
Example - Character-based
>>> text = [
>>> "Hello World!",
>>> "Text-to-speech!",
>>> ]
>>> bundle = torchaudio.pipelines.TACOTRON2_WAVERNN_CHAR_LJSPEECH
>>> processor = bundle.get_text_processor()
>>> input, lengths = processor(text)
>>>
>>> print(input)
tensor([[19, 16, 23, 23, 26, 11, 34, 26, 29, 23, 15, 2, 0, 0, 0],
[31, 16, 35, 31, 1, 31, 26, 1, 30, 27, 16, 16, 14, 19, 2]],
dtype=torch.int32)
>>>
>>> print(lengths)
tensor([12, 15], dtype=torch.int32)
>>>
>>> print([processor.tokens[i] for i in input[0, :lengths[0]]])
['h', 'e', 'l', 'l', 'o', ' ', 'w', 'o', 'r', 'l', 'd', '!']
>>> print([processor.tokens[i] for i in input[1, :lengths[1]]])
['t', 'e', 'x', 't', '-', 't', 'o', '-', 's', 'p', 'e', 'e', 'c', 'h', '!']
Example - Phoneme-based
>>> text = [
>>> "Hello, T T S !",
>>> "Text-to-speech!",
>>> ]
>>> bundle = torchaudio.pipelines.TACOTRON2_WAVERNN_PHONE_LJSPEECH
>>> processor = bundle.get_text_processor()
Downloading:
100%|███████████████████████████████| 63.6M/63.6M [00:04<00:00, 15.3MB/s]
>>> input, lengths = processor(text)
>>>
>>> print(input)
tensor([[54, 20, 65, 69, 11, 92, 44, 65, 38, 2, 0, 0, 0, 0],
[81, 40, 64, 79, 81, 1, 81, 20, 1, 79, 77, 59, 37, 2]],
dtype=torch.int32)
>>>
>>> print(lengths)
tensor([10, 14], dtype=torch.int32)
>>>
>>> print([processor.tokens[i] for i in input[0]])
['HH', 'AH', 'L', 'OW', ' ', 'W', 'ER', 'L', 'D', '!', '_', '_', '_', '_']
>>> print([processor.tokens[i] for i in input[1]])
['T', 'EH', 'K', 'S', 'T', '-', 'T', 'AH', '-', 'S', 'P', 'IY', 'CH', '!']
"""
@abstractmethod
def get_vocoder(self, *, dl_kwargs=None) -> Vocoder:
# Overriding the signature so that the return type is correct on Sphinx
"""get_vocoder(self, *, dl_kwargs=None) -> torchaudio.pipelines.Tacotron2TTSBundle.Vocoder
Create a vocoder module, based off of either WaveRNN or GriffinLim.
If a pre-trained weight file is necessary,
:func:`torch.hub.load_state_dict_from_url` is used to downloaded it.
Args:
dl_kwargs (dictionary of keyword arguments):
Passed to :func:`torch.hub.load_state_dict_from_url`.
Returns:
Callable[[Tensor, Optional[Tensor]], Tuple[Tensor, Optional[Tensor]]]:
A vocoder module, which takes spectrogram Tensor and an optional
length Tensor, then returns resulting waveform Tensor and an optional
length Tensor.
"""
@abstractmethod
def get_tacotron2(self, *, dl_kwargs=None) -> Tacotron2:
# Overriding the signature so that the return type is correct on Sphinx
"""get_tacotron2(self, *, dl_kwargs=None) -> torchaudio.models.Tacotron2
Create a Tacotron2 model with pre-trained weight.
Args:
dl_kwargs (dictionary of keyword arguments):
Passed to :func:`torch.hub.load_state_dict_from_url`.
Returns:
Tacotron2:
The resulting model.
"""
|
from .interface import Tacotron2TTSBundle
from .impl import (
TACOTRON2_GRIFFINLIM_CHAR_LJSPEECH,
TACOTRON2_GRIFFINLIM_PHONE_LJSPEECH,
TACOTRON2_WAVERNN_CHAR_LJSPEECH,
TACOTRON2_WAVERNN_PHONE_LJSPEECH,
)
__all__ = [
'Tacotron2TTSBundle',
'TACOTRON2_GRIFFINLIM_CHAR_LJSPEECH',
'TACOTRON2_GRIFFINLIM_PHONE_LJSPEECH',
'TACOTRON2_WAVERNN_CHAR_LJSPEECH',
'TACOTRON2_WAVERNN_PHONE_LJSPEECH',
]
|
import os
import logging
import torch
from torchaudio._internal import (
download_url_to_file,
module_utils as _mod_utils,
)
def _get_chars():
return (
'_',
'-',
'!',
"'",
'(',
')',
',',
'.',
':',
';',
'?',
' ',
'a',
'b',
'c',
'd',
'e',
'f',
'g',
'h',
'i',
'j',
'k',
'l',
'm',
'n',
'o',
'p',
'q',
'r',
's',
't',
'u',
'v',
'w',
'x',
'y',
'z',
)
def _get_phones():
return (
"_",
"-",
"!",
"'",
"(",
")",
",",
".",
":",
";",
"?",
" ",
"AA",
"AA0",
"AA1",
"AA2",
"AE",
"AE0",
"AE1",
"AE2",
"AH",
"AH0",
"AH1",
"AH2",
"AO",
"AO0",
"AO1",
"AO2",
"AW",
"AW0",
"AW1",
"AW2",
"AY",
"AY0",
"AY1",
"AY2",
"B",
"CH",
"D",
"DH",
"EH",
"EH0",
"EH1",
"EH2",
"ER",
"ER0",
"ER1",
"ER2",
"EY",
"EY0",
"EY1",
"EY2",
"F",
"G",
"HH",
"IH",
"IH0",
"IH1",
"IH2",
"IY",
"IY0",
"IY1",
"IY2",
"JH",
"K",
"L",
"M",
"N",
"NG",
"OW",
"OW0",
"OW1",
"OW2",
"OY",
"OY0",
"OY1",
"OY2",
"P",
"R",
"S",
"SH",
"T",
"TH",
"UH",
"UH0",
"UH1",
"UH2",
"UW",
"UW0",
"UW1",
"UW2",
"V",
"W",
"Y",
"Z",
"ZH"
)
def _to_tensor(indices):
lengths = torch.tensor([len(i) for i in indices], dtype=torch.int32)
values = [torch.tensor(i) for i in indices]
values = torch.nn.utils.rnn.pad_sequence(values, batch_first=True)
return values, lengths
def _load_phonemizer(file, dl_kwargs):
if not _mod_utils.is_module_available('dp'):
raise RuntimeError('DeepPhonemizer is not installed. Please install it.')
from dp.phonemizer import Phonemizer
# By default, dp issues DEBUG level log.
logger = logging.getLogger('dp')
orig_level = logger.level
logger.setLevel(logging.INFO)
try:
url = f'https://public-asai-dl-models.s3.eu-central-1.amazonaws.com/DeepPhonemizer/{file}'
directory = os.path.join(torch.hub.get_dir(), 'checkpoints')
os.makedirs(directory, exist_ok=True)
path = os.path.join(directory, file)
if not os.path.exists(path):
dl_kwargs = {} if dl_kwargs is None else dl_kwargs
download_url_to_file(url, path, **dl_kwargs)
return Phonemizer.from_checkpoint(path)
finally:
logger.setLevel(orig_level)
def _unnormalize_waveform(waveform: torch.Tensor, bits: int) -> torch.Tensor:
r"""Transform waveform [-1, 1] to label [0, 2 ** bits - 1]"""
waveform = torch.clamp(waveform, -1, 1)
waveform = (waveform + 1.0) * (2 ** bits - 1) / 2
return torch.clamp(waveform, 0, 2 ** bits - 1).int()
def _get_taco_params(n_symbols):
return {
'mask_padding': False,
'n_mels': 80,
'n_frames_per_step': 1,
'symbol_embedding_dim': 512,
'encoder_embedding_dim': 512,
'encoder_n_convolution': 3,
'encoder_kernel_size': 5,
'decoder_rnn_dim': 1024,
'decoder_max_step': 2000,
'decoder_dropout': 0.1,
'decoder_early_stopping': True,
'attention_rnn_dim': 1024,
'attention_hidden_dim': 128,
'attention_location_n_filter': 32,
'attention_location_kernel_size': 31,
'attention_dropout': 0.1,
'prenet_dim': 256,
'postnet_n_convolution': 5,
'postnet_kernel_size': 5,
'postnet_embedding_dim': 512,
'gate_threshold': 0.5,
'n_symbol': n_symbols,
}
def _get_wrnn_params():
return {
'upsample_scales': [5, 5, 11],
'n_classes': 2 ** 8, # n_bits = 8
'hop_length': 275,
'n_res_block': 10,
'n_rnn': 512,
'n_fc': 512,
'kernel_size': 5,
'n_freq': 80,
'n_hidden': 128,
'n_output': 128
}
|
from dataclasses import dataclass
import re
from typing import Union, Optional, Dict, Any, Tuple, List
import torch
from torch import Tensor
from torchaudio._internal import load_state_dict_from_url
from torchaudio.models import Tacotron2, WaveRNN
from torchaudio.functional import mu_law_decoding
from torchaudio.transforms import InverseMelScale, GriffinLim
from . import utils
from .interface import Tacotron2TTSBundle
__all__ = []
_BASE_URL = 'https://download.pytorch.org/torchaudio/models'
################################################################################
# Pipeline implementation - Text Processor
################################################################################
class _EnglishCharProcessor(Tacotron2TTSBundle.TextProcessor):
def __init__(self):
super().__init__()
self._tokens = utils._get_chars()
self._mapping = {s: i for i, s in enumerate(self._tokens)}
@property
def tokens(self):
return self._tokens
def __call__(self, texts: Union[str, List[str]]) -> Tuple[Tensor, Tensor]:
if isinstance(texts, str):
texts = [texts]
indices = [[self._mapping[c] for c in t.lower() if c in self._mapping] for t in texts]
return utils._to_tensor(indices)
class _EnglishPhoneProcessor(Tacotron2TTSBundle.TextProcessor):
def __init__(self, *, dl_kwargs=None):
super().__init__()
self._tokens = utils._get_phones()
self._mapping = {p: i for i, p in enumerate(self._tokens)}
self._phonemizer = utils._load_phonemizer(
'en_us_cmudict_forward.pt', dl_kwargs=dl_kwargs)
self._pattern = r"(\[[A-Z]+?\]|[_!'(),.:;? -])"
@property
def tokens(self):
return self._tokens
def __call__(self, texts: Union[str, List[str]]) -> Tuple[Tensor, Tensor]:
if isinstance(texts, str):
texts = [texts]
indices = []
for phones in self._phonemizer(texts, lang='en_us'):
# '[F][UW][B][AA][R]!' -> ['F', 'UW', 'B', 'AA', 'R', '!']
ret = [re.sub(r'[\[\]]', '', r) for r in re.findall(self._pattern, phones)]
indices.append([self._mapping[p] for p in ret])
return utils._to_tensor(indices)
################################################################################
# Pipeline implementation - Vocoder
################################################################################
class _WaveRNNVocoder(torch.nn.Module, Tacotron2TTSBundle.Vocoder):
def __init__(
self,
model: WaveRNN,
min_level_db: Optional[float] = -100
):
super().__init__()
self._sample_rate = 22050
self._model = model
self._min_level_db = min_level_db
@property
def sample_rate(self):
return self._sample_rate
def forward(self, mel_spec, lengths=None):
mel_spec = torch.exp(mel_spec)
mel_spec = 20 * torch.log10(torch.clamp(mel_spec, min=1e-5))
if self._min_level_db is not None:
mel_spec = (self._min_level_db - mel_spec) / self._min_level_db
mel_spec = torch.clamp(mel_spec, min=0, max=1)
waveform, lengths = self._model.infer(mel_spec, lengths)
waveform = utils._unnormalize_waveform(waveform, self._model.n_bits)
waveform = mu_law_decoding(waveform, self._model.n_classes)
waveform = waveform.squeeze(1)
return waveform, lengths
class _GriffinLimVocoder(torch.nn.Module, Tacotron2TTSBundle.Vocoder):
def __init__(self):
super().__init__()
self._sample_rate = 22050
self._inv_mel = InverseMelScale(
n_stft=(1024 // 2 + 1),
n_mels=80,
sample_rate=self.sample_rate,
f_min=0.,
f_max=8000.,
mel_scale="slaney",
norm='slaney',
)
self._griffin_lim = GriffinLim(
n_fft=1024,
power=1,
hop_length=256,
win_length=1024,
)
@property
def sample_rate(self):
return self._sample_rate
def forward(self, mel_spec, lengths=None):
mel_spec = torch.exp(mel_spec)
mel_spec = mel_spec.clone().detach().requires_grad_(True)
spec = self._inv_mel(mel_spec)
spec = spec.detach().requires_grad_(False)
waveforms = self._griffin_lim(spec)
return waveforms, lengths
################################################################################
# Bundle classes mixins
################################################################################
class _CharMixin:
def get_text_processor(self) -> Tacotron2TTSBundle.TextProcessor:
return _EnglishCharProcessor()
class _PhoneMixin:
def get_text_processor(self, *, dl_kwargs=None) -> Tacotron2TTSBundle.TextProcessor:
return _EnglishPhoneProcessor(dl_kwargs=dl_kwargs)
@dataclass
class _Tacotron2Mixin:
_tacotron2_path: str
_tacotron2_params: Dict[str, Any]
def get_tacotron2(self, *, dl_kwargs=None) -> Tacotron2:
model = Tacotron2(**self._tacotron2_params)
url = f'{_BASE_URL}/{self._tacotron2_path}'
dl_kwargs = {} if dl_kwargs is None else dl_kwargs
state_dict = load_state_dict_from_url(url, **dl_kwargs)
model.load_state_dict(state_dict)
model.eval()
return model
@dataclass
class _WaveRNNMixin:
_wavernn_path: Optional[str]
_wavernn_params: Optional[Dict[str, Any]]
def get_vocoder(self, *, dl_kwargs=None):
wavernn = self._get_wavernn(dl_kwargs=dl_kwargs)
return _WaveRNNVocoder(wavernn)
def _get_wavernn(self, *, dl_kwargs=None):
model = WaveRNN(**self._wavernn_params)
url = f'{_BASE_URL}/{self._wavernn_path}'
dl_kwargs = {} if dl_kwargs is None else dl_kwargs
state_dict = load_state_dict_from_url(url, **dl_kwargs)
model.load_state_dict(state_dict)
model.eval()
return model
class _GriffinLimMixin:
def get_vocoder(self, **_):
return _GriffinLimVocoder()
################################################################################
# Bundle classes
################################################################################
@dataclass
class _Tacotron2WaveRNNCharBundle(_WaveRNNMixin, _Tacotron2Mixin, _CharMixin, Tacotron2TTSBundle):
pass
@dataclass
class _Tacotron2WaveRNNPhoneBundle(_WaveRNNMixin, _Tacotron2Mixin, _PhoneMixin, Tacotron2TTSBundle):
pass
@dataclass
class _Tacotron2GriffinLimCharBundle(_GriffinLimMixin, _Tacotron2Mixin, _CharMixin, Tacotron2TTSBundle):
pass
@dataclass
class _Tacotron2GriffinLimPhoneBundle(_GriffinLimMixin, _Tacotron2Mixin, _PhoneMixin, Tacotron2TTSBundle):
pass
################################################################################
# Instantiate bundle objects
################################################################################
TACOTRON2_GRIFFINLIM_CHAR_LJSPEECH = _Tacotron2GriffinLimCharBundle(
_tacotron2_path='tacotron2_english_characters_1500_epochs_ljspeech.pth',
_tacotron2_params=utils._get_taco_params(n_symbols=38),
)
TACOTRON2_GRIFFINLIM_CHAR_LJSPEECH.__doc__ = (
'''Character-based TTS pipeline with :py:class:`torchaudio.models.Tacotron2` and
:py:class:`torchaudio.transforms.GriffinLim`.
The text processor encodes the input texts character-by-character.
Tacotron2 was trained on *LJSpeech* [:footcite:`ljspeech17`] for 1,500 epochs.
You can find the training script `here <https://github.com/pytorch/audio/tree/main/examples/pipeline_tacotron2>`__.
The default parameters were used.
The vocoder is based on :py:class:`torchaudio.transforms.GriffinLim`.
Please refer to :func:`torchaudio.pipelines.Tacotron2TTSBundle` for the usage.
Example - "Hello world! T T S stands for Text to Speech!"
.. image:: https://download.pytorch.org/torchaudio/doc-assets/TACOTRON2_GRIFFINLIM_CHAR_LJSPEECH.png
:alt: Spectrogram generated by Tacotron2
.. raw:: html
<audio controls="controls">
<source src="https://download.pytorch.org/torchaudio/doc-assets/TACOTRON2_GRIFFINLIM_CHAR_LJSPEECH.wav" type="audio/wav">
Your browser does not support the <code>audio</code> element.
</audio>
Example - "The examination and testimony of the experts enabled the Commission to conclude that five shots may have been fired,"
.. image:: https://download.pytorch.org/torchaudio/doc-assets/TACOTRON2_GRIFFINLIM_CHAR_LJSPEECH_v2.png
:alt: Spectrogram generated by Tacotron2
.. raw:: html
<audio controls="controls">
<source src="https://download.pytorch.org/torchaudio/doc-assets/TACOTRON2_GRIFFINLIM_CHAR_LJSPEECH_v2.wav" type="audio/wav">
Your browser does not support the <code>audio</code> element.
</audio>
''') # noqa: E501
TACOTRON2_GRIFFINLIM_PHONE_LJSPEECH = _Tacotron2GriffinLimPhoneBundle(
_tacotron2_path='tacotron2_english_phonemes_1500_epochs_ljspeech.pth',
_tacotron2_params=utils._get_taco_params(n_symbols=96),
)
TACOTRON2_GRIFFINLIM_PHONE_LJSPEECH.__doc__ = (
'''Phoneme-based TTS pipeline with :py:class:`torchaudio.models.Tacotron2` and
:py:class:`torchaudio.transforms.GriffinLim`.
The text processor encodes the input texts based on phoneme.
It uses `DeepPhonemizer <https://github.com/as-ideas/DeepPhonemizer>`__ to convert
graphemes to phonemes.
The model (*en_us_cmudict_forward*) was trained on
`CMUDict <http://www.speech.cs.cmu.edu/cgi-bin/cmudict>`__.
Tacotron2 was trained on *LJSpeech* [:footcite:`ljspeech17`] for 1,500 epochs.
You can find the training script `here <https://github.com/pytorch/audio/tree/main/examples/pipeline_tacotron2>`__.
The text processor is set to the *"english_phonemes"*.
The vocoder is based on :py:class:`torchaudio.transforms.GriffinLim`.
Please refer to :func:`torchaudio.pipelines.Tacotron2TTSBundle` for the usage.
Example - "Hello world! T T S stands for Text to Speech!"
.. image:: https://download.pytorch.org/torchaudio/doc-assets/TACOTRON2_GRIFFINLIM_PHONE_LJSPEECH.png
:alt: Spectrogram generated by Tacotron2
.. raw:: html
<audio controls="controls">
<source src="https://download.pytorch.org/torchaudio/doc-assets/TACOTRON2_GRIFFINLIM_PHONE_LJSPEECH.wav" type="audio/wav">
Your browser does not support the <code>audio</code> element.
</audio>
Example - "The examination and testimony of the experts enabled the Commission to conclude that five shots may have been fired,"
.. image:: https://download.pytorch.org/torchaudio/doc-assets/TACOTRON2_GRIFFINLIM_PHONE_LJSPEECH_v2.png
:alt: Spectrogram generated by Tacotron2
.. raw:: html
<audio controls="controls">
<source src="https://download.pytorch.org/torchaudio/doc-assets/TACOTRON2_GRIFFINLIM_PHONE_LJSPEECH_v2.wav" type="audio/wav">
Your browser does not support the <code>audio</code> element.
</audio>
''') # noqa: E501
TACOTRON2_WAVERNN_CHAR_LJSPEECH = _Tacotron2WaveRNNCharBundle(
_tacotron2_path='tacotron2_english_characters_1500_epochs_wavernn_ljspeech.pth',
_tacotron2_params=utils._get_taco_params(n_symbols=38),
_wavernn_path='wavernn_10k_epochs_8bits_ljspeech.pth',
_wavernn_params=utils._get_wrnn_params(),
)
TACOTRON2_WAVERNN_CHAR_LJSPEECH.__doc__ = (
'''Character-based TTS pipeline with :py:class:`torchaudio.models.Tacotron2` and
:py:class:`torchaudio.models.WaveRNN`.
The text processor encodes the input texts character-by-character.
Tacotron2 was trained on *LJSpeech* [:footcite:`ljspeech17`] for 1,500 epochs.
You can find the training script `here <https://github.com/pytorch/audio/tree/main/examples/pipeline_tacotron2>`__.
The following parameters were used; ``win_length=1100``, ``hop_length=275``, ``n_fft=2048``,
``mel_fmin=40``, and ``mel_fmax=11025``.
The vocder is based on :py:class:`torchaudio.models.WaveRNN`.
It was trained on 8 bits depth waveform of *LJSpeech* [:footcite:`ljspeech17`] for 10,000 epochs.
You can find the training script `here <https://github.com/pytorch/audio/tree/main/examples/pipeline_wavernn>`__.
Please refer to :func:`torchaudio.pipelines.Tacotron2TTSBundle` for the usage.
Example - "Hello world! T T S stands for Text to Speech!"
.. image:: https://download.pytorch.org/torchaudio/doc-assets/TACOTRON2_WAVERNN_CHAR_LJSPEECH.png
:alt: Spectrogram generated by Tacotron2
.. raw:: html
<audio controls="controls">
<source src="https://download.pytorch.org/torchaudio/doc-assets/TACOTRON2_WAVERNN_CHAR_LJSPEECH.wav" type="audio/wav">
Your browser does not support the <code>audio</code> element.
</audio>
Example - "The examination and testimony of the experts enabled the Commission to conclude that five shots may have been fired,"
.. image:: https://download.pytorch.org/torchaudio/doc-assets/TACOTRON2_WAVERNN_CHAR_LJSPEECH_v2.png
:alt: Spectrogram generated by Tacotron2
.. raw:: html
<audio controls="controls">
<source src="https://download.pytorch.org/torchaudio/doc-assets/TACOTRON2_WAVERNN_CHAR_LJSPEECH_v2.wav" type="audio/wav">
Your browser does not support the <code>audio</code> element.
</audio>
''') # noqa: E501
TACOTRON2_WAVERNN_PHONE_LJSPEECH = _Tacotron2WaveRNNPhoneBundle(
_tacotron2_path='tacotron2_english_phonemes_1500_epochs_wavernn_ljspeech.pth',
_tacotron2_params=utils._get_taco_params(n_symbols=96),
_wavernn_path='wavernn_10k_epochs_8bits_ljspeech.pth',
_wavernn_params=utils._get_wrnn_params(),
)
TACOTRON2_WAVERNN_PHONE_LJSPEECH.__doc__ = (
'''Phoneme-based TTS pipeline with :py:class:`torchaudio.models.Tacotron2` and
:py:class:`torchaudio.models.WaveRNN`.
The text processor encodes the input texts based on phoneme.
It uses `DeepPhonemizer <https://github.com/as-ideas/DeepPhonemizer>`__ to convert
graphemes to phonemes.
The model (*en_us_cmudict_forward*) was trained on
`CMUDict <http://www.speech.cs.cmu.edu/cgi-bin/cmudict>`__.
Tacotron2 was trained on *LJSpeech* [:footcite:`ljspeech17`] for 1,500 epochs.
You can find the training script `here <https://github.com/pytorch/audio/tree/main/examples/pipeline_tacotron2>`__.
The following parameters were used; ``win_length=1100``, ``hop_length=275``, ``n_fft=2048``,
``mel_fmin=40``, and ``mel_fmax=11025``.
The vocder is based on :py:class:`torchaudio.models.WaveRNN`.
It was trained on 8 bits depth waveform of *LJSpeech* [:footcite:`ljspeech17`] for 10,000 epochs.
You can find the training script `here <https://github.com/pytorch/audio/tree/main/examples/pipeline_wavernn>`__.
Please refer to :func:`torchaudio.pipelines.Tacotron2TTSBundle` for the usage.
Example - "Hello world! T T S stands for Text to Speech!"
.. image:: https://download.pytorch.org/torchaudio/doc-assets/TACOTRON2_WAVERNN_PHONE_LJSPEECH.png
:alt: Spectrogram generated by Tacotron2
.. raw:: html
<audio controls="controls">
<source src="https://download.pytorch.org/torchaudio/doc-assets/TACOTRON2_WAVERNN_PHONE_LJSPEECH.wav" type="audio/wav">
Your browser does not support the <code>audio</code> element.
</audio>
Example - "The examination and testimony of the experts enabled the Commission to conclude that five shots may have been fired,"
.. image:: https://download.pytorch.org/torchaudio/doc-assets/TACOTRON2_WAVERNN_PHONE_LJSPEECH_v2.png
:alt: Spectrogram generated by Tacotron2
.. raw:: html
<audio controls="controls">
<source src="https://download.pytorch.org/torchaudio/doc-assets/TACOTRON2_WAVERNN_PHONE_LJSPEECH_v2.wav" type="audio/wav">
Your browser does not support the <code>audio</code> element.
</audio>
''') # noqa: E501
|
from . import (
sox_utils,
)
from torchaudio._internal import module_utils as _mod_utils
if _mod_utils.is_sox_available():
sox_utils.set_verbosity(1)
|
from typing import List, Dict
import torch
from torchaudio._internal import module_utils as _mod_utils
@_mod_utils.requires_sox()
def set_seed(seed: int):
"""Set libsox's PRNG
Args:
seed (int): seed value. valid range is int32.
See Also:
http://sox.sourceforge.net/sox.html
"""
torch.ops.torchaudio.sox_utils_set_seed(seed)
@_mod_utils.requires_sox()
def set_verbosity(verbosity: int):
"""Set libsox's verbosity
Args:
verbosity (int): Set verbosity level of libsox.
* ``1`` failure messages
* ``2`` warnings
* ``3`` details of processing
* ``4``-``6`` increasing levels of debug messages
See Also:
http://sox.sourceforge.net/sox.html
"""
torch.ops.torchaudio.sox_utils_set_verbosity(verbosity)
@_mod_utils.requires_sox()
def set_buffer_size(buffer_size: int):
"""Set buffer size for sox effect chain
Args:
buffer_size (int): Set the size in bytes of the buffers used for processing audio.
See Also:
http://sox.sourceforge.net/sox.html
"""
torch.ops.torchaudio.sox_utils_set_buffer_size(buffer_size)
@_mod_utils.requires_sox()
def set_use_threads(use_threads: bool):
"""Set multithread option for sox effect chain
Args:
use_threads (bool): When ``True``, enables ``libsox``'s parallel effects channels processing.
To use mutlithread, the underlying ``libsox`` has to be compiled with OpenMP support.
See Also:
http://sox.sourceforge.net/sox.html
"""
torch.ops.torchaudio.sox_utils_set_use_threads(use_threads)
@_mod_utils.requires_sox()
def list_effects() -> Dict[str, str]:
"""List the available sox effect names
Returns:
Dict[str, str]: Mapping from ``effect name`` to ``usage``
"""
return dict(torch.ops.torchaudio.sox_utils_list_effects())
@_mod_utils.requires_sox()
def list_read_formats() -> List[str]:
"""List the supported audio formats for read
Returns:
List[str]: List of supported audio formats
"""
return torch.ops.torchaudio.sox_utils_list_read_formats()
@_mod_utils.requires_sox()
def list_write_formats() -> List[str]:
"""List the supported audio formats for write
Returns:
List[str]: List of supported audio formats
"""
return torch.ops.torchaudio.sox_utils_list_write_formats()
@_mod_utils.requires_sox()
def get_buffer_size() -> int:
"""Get buffer size for sox effect chain
Returns:
int: size in bytes of buffers used for processing audio.
"""
return torch.ops.torchaudio.sox_utils_get_buffer_size()
|
# flake8: noqa
from . import utils
from .utils import (
list_audio_backends,
get_audio_backend,
set_audio_backend,
)
utils._init_audio_backend()
|
import os
from typing import Tuple, Optional
import torch
from torchaudio._internal import (
module_utils as _mod_utils,
)
import torchaudio
from .common import AudioMetaData
@_mod_utils.requires_sox()
def info(
filepath: str,
format: Optional[str] = None,
) -> AudioMetaData:
"""Get signal information of an audio file.
Args:
filepath (path-like object or file-like object):
Source of audio data. When the function is not compiled by TorchScript,
(e.g. ``torch.jit.script``), the following types are accepted;
* ``path-like``: file path
* ``file-like``: Object with ``read(size: int) -> bytes`` method,
which returns byte string of at most ``size`` length.
When the function is compiled by TorchScript, only ``str`` type is allowed.
Note:
* When the input type is file-like object, this function cannot
get the correct length (``num_samples``) for certain formats,
such as ``mp3`` and ``vorbis``.
In this case, the value of ``num_samples`` is ``0``.
* This argument is intentionally annotated as ``str`` only due to
TorchScript compiler compatibility.
format (str or None, optional):
Override the format detection with the given format.
Providing the argument might help when libsox can not infer the format
from header or extension,
Returns:
AudioMetaData: Metadata of the given audio.
"""
if not torch.jit.is_scripting():
if hasattr(filepath, 'read'):
sinfo = torchaudio._torchaudio.get_info_fileobj(filepath, format)
return AudioMetaData(*sinfo)
filepath = os.fspath(filepath)
sinfo = torch.ops.torchaudio.sox_io_get_info(filepath, format)
return AudioMetaData(*sinfo)
@_mod_utils.requires_sox()
def load(
filepath: str,
frame_offset: int = 0,
num_frames: int = -1,
normalize: bool = True,
channels_first: bool = True,
format: Optional[str] = None,
) -> Tuple[torch.Tensor, int]:
"""Load audio data from file.
Note:
This function can handle all the codecs that underlying libsox can handle,
however it is tested on the following formats;
* WAV, AMB
* 32-bit floating-point
* 32-bit signed integer
* 24-bit signed integer
* 16-bit signed integer
* 8-bit unsigned integer (WAV only)
* MP3
* FLAC
* OGG/VORBIS
* OPUS
* SPHERE
* AMR-NB
To load ``MP3``, ``FLAC``, ``OGG/VORBIS``, ``OPUS`` and other codecs ``libsox`` does not
handle natively, your installation of ``torchaudio`` has to be linked to ``libsox``
and corresponding codec libraries such as ``libmad`` or ``libmp3lame`` etc.
By default (``normalize=True``, ``channels_first=True``), this function returns Tensor with
``float32`` dtype and the shape of `[channel, time]`.
The samples are normalized to fit in the range of ``[-1.0, 1.0]``.
When the input format is WAV with integer type, such as 32-bit signed integer, 16-bit
signed integer, 24-bit signed integer, and 8-bit unsigned integer, by providing ``normalize=False``,
this function can return integer Tensor, where the samples are expressed within the whole range
of the corresponding dtype, that is, ``int32`` tensor for 32-bit signed PCM,
``int16`` for 16-bit signed PCM and ``uint8`` for 8-bit unsigned PCM. Since torch does not
support ``int24`` dtype, 24-bit signed PCM are converted to ``int32`` tensors.
``normalize`` parameter has no effect on 32-bit floating-point WAV and other formats, such as
``flac`` and ``mp3``.
For these formats, this function always returns ``float32`` Tensor with values normalized to
``[-1.0, 1.0]``.
Args:
filepath (path-like object or file-like object):
Source of audio data. When the function is not compiled by TorchScript,
(e.g. ``torch.jit.script``), the following types are accepted;
* ``path-like``: file path
* ``file-like``: Object with ``read(size: int) -> bytes`` method,
which returns byte string of at most ``size`` length.
When the function is compiled by TorchScript, only ``str`` type is allowed.
Note: This argument is intentionally annotated as ``str`` only due to
TorchScript compiler compatibility.
frame_offset (int):
Number of frames to skip before start reading data.
num_frames (int, optional):
Maximum number of frames to read. ``-1`` reads all the remaining samples,
starting from ``frame_offset``.
This function may return the less number of frames if there is not enough
frames in the given file.
normalize (bool, optional):
When ``True``, this function always return ``float32``, and sample values are
normalized to ``[-1.0, 1.0]``.
If input file is integer WAV, giving ``False`` will change the resulting Tensor type to
integer type.
This argument has no effect for formats other than integer WAV type.
channels_first (bool, optional):
When True, the returned Tensor has dimension `[channel, time]`.
Otherwise, the returned Tensor's dimension is `[time, channel]`.
format (str or None, optional):
Override the format detection with the given format.
Providing the argument might help when libsox can not infer the format
from header or extension,
Returns:
(torch.Tensor, int): Resulting Tensor and sample rate.
If the input file has integer wav format and normalization is off, then it has
integer type, else ``float32`` type. If ``channels_first=True``, it has
`[channel, time]` else `[time, channel]`.
"""
if not torch.jit.is_scripting():
if hasattr(filepath, 'read'):
return torchaudio._torchaudio.load_audio_fileobj(
filepath, frame_offset, num_frames, normalize, channels_first, format)
filepath = os.fspath(filepath)
return torch.ops.torchaudio.sox_io_load_audio_file(
filepath, frame_offset, num_frames, normalize, channels_first, format)
@_mod_utils.requires_sox()
def save(
filepath: str,
src: torch.Tensor,
sample_rate: int,
channels_first: bool = True,
compression: Optional[float] = None,
format: Optional[str] = None,
encoding: Optional[str] = None,
bits_per_sample: Optional[int] = None,
):
"""Save audio data to file.
Args:
filepath (str or pathlib.Path): Path to save file.
This function also handles ``pathlib.Path`` objects, but is annotated
as ``str`` for TorchScript compiler compatibility.
src (torch.Tensor): Audio data to save. must be 2D tensor.
sample_rate (int): sampling rate
channels_first (bool, optional): If ``True``, the given tensor is interpreted as `[channel, time]`,
otherwise `[time, channel]`.
compression (float or None, optional): Used for formats other than WAV.
This corresponds to ``-C`` option of ``sox`` command.
``"mp3"``
Either bitrate (in ``kbps``) with quality factor, such as ``128.2``, or
VBR encoding with quality factor such as ``-4.2``. Default: ``-4.5``.
``"flac"``
Whole number from ``0`` to ``8``. ``8`` is default and highest compression.
``"ogg"``, ``"vorbis"``
Number from ``-1`` to ``10``; ``-1`` is the highest compression
and lowest quality. Default: ``3``.
See the detail at http://sox.sourceforge.net/soxformat.html.
format (str or None, optional): Override the audio format.
When ``filepath`` argument is path-like object, audio format is infered from
file extension. If file extension is missing or different, you can specify the
correct format with this argument.
When ``filepath`` argument is file-like object, this argument is required.
Valid values are ``"wav"``, ``"mp3"``, ``"ogg"``, ``"vorbis"``, ``"amr-nb"``,
``"amb"``, ``"flac"``, ``"sph"``, ``"gsm"``, and ``"htk"``.
encoding (str or None, optional): Changes the encoding for the supported formats.
This argument is effective only for supported formats, such as ``"wav"``, ``""amb"``
and ``"sph"``. Valid values are;
- ``"PCM_S"`` (signed integer Linear PCM)
- ``"PCM_U"`` (unsigned integer Linear PCM)
- ``"PCM_F"`` (floating point PCM)
- ``"ULAW"`` (mu-law)
- ``"ALAW"`` (a-law)
Default values
If not provided, the default value is picked based on ``format`` and ``bits_per_sample``.
``"wav"``, ``"amb"``
- | If both ``encoding`` and ``bits_per_sample`` are not provided, the ``dtype`` of the
| Tensor is used to determine the default value.
- ``"PCM_U"`` if dtype is ``uint8``
- ``"PCM_S"`` if dtype is ``int16`` or ``int32``
- ``"PCM_F"`` if dtype is ``float32``
- ``"PCM_U"`` if ``bits_per_sample=8``
- ``"PCM_S"`` otherwise
``"sph"`` format;
- the default value is ``"PCM_S"``
bits_per_sample (int or None, optional): Changes the bit depth for the supported formats.
When ``format`` is one of ``"wav"``, ``"flac"``, ``"sph"``, or ``"amb"``, you can change the
bit depth. Valid values are ``8``, ``16``, ``32`` and ``64``.
Default Value;
If not provided, the default values are picked based on ``format`` and ``"encoding"``;
``"wav"``, ``"amb"``;
- | If both ``encoding`` and ``bits_per_sample`` are not provided, the ``dtype`` of the
| Tensor is used.
- ``8`` if dtype is ``uint8``
- ``16`` if dtype is ``int16``
- ``32`` if dtype is ``int32`` or ``float32``
- ``8`` if ``encoding`` is ``"PCM_U"``, ``"ULAW"`` or ``"ALAW"``
- ``16`` if ``encoding`` is ``"PCM_S"``
- ``32`` if ``encoding`` is ``"PCM_F"``
``"flac"`` format;
- the default value is ``24``
``"sph"`` format;
- ``16`` if ``encoding`` is ``"PCM_U"``, ``"PCM_S"``, ``"PCM_F"`` or not provided.
- ``8`` if ``encoding`` is ``"ULAW"`` or ``"ALAW"``
``"amb"`` format;
- ``8`` if ``encoding`` is ``"PCM_U"``, ``"ULAW"`` or ``"ALAW"``
- ``16`` if ``encoding`` is ``"PCM_S"`` or not provided.
- ``32`` if ``encoding`` is ``"PCM_F"``
Supported formats/encodings/bit depth/compression are;
``"wav"``, ``"amb"``
- 32-bit floating-point PCM
- 32-bit signed integer PCM
- 24-bit signed integer PCM
- 16-bit signed integer PCM
- 8-bit unsigned integer PCM
- 8-bit mu-law
- 8-bit a-law
Note: Default encoding/bit depth is determined by the dtype of the input Tensor.
``"mp3"``
Fixed bit rate (such as 128kHz) and variable bit rate compression.
Default: VBR with high quality.
``"flac"``
- 8-bit
- 16-bit
- 24-bit (default)
``"ogg"``, ``"vorbis"``
- Different quality level. Default: approx. 112kbps
``"sph"``
- 8-bit signed integer PCM
- 16-bit signed integer PCM
- 24-bit signed integer PCM
- 32-bit signed integer PCM (default)
- 8-bit mu-law
- 8-bit a-law
- 16-bit a-law
- 24-bit a-law
- 32-bit a-law
``"amr-nb"``
Bitrate ranging from 4.75 kbit/s to 12.2 kbit/s. Default: 4.75 kbit/s
``"gsm"``
Lossy Speech Compression, CPU intensive.
``"htk"``
Uses a default single-channel 16-bit PCM format.
Note:
To save into formats that ``libsox`` does not handle natively, (such as ``"mp3"``,
``"flac"``, ``"ogg"`` and ``"vorbis"``), your installation of ``torchaudio`` has
to be linked to ``libsox`` and corresponding codec libraries such as ``libmad``
or ``libmp3lame`` etc.
"""
if not torch.jit.is_scripting():
if hasattr(filepath, 'write'):
torchaudio._torchaudio.save_audio_fileobj(
filepath, src, sample_rate, channels_first, compression,
format, encoding, bits_per_sample)
return
filepath = os.fspath(filepath)
torch.ops.torchaudio.sox_io_save_audio_file(
filepath, src, sample_rate, channels_first, compression, format, encoding, bits_per_sample)
|
class AudioMetaData:
"""Return type of ``torchaudio.info`` function.
This class is used by :ref:`"sox_io" backend<sox_io_backend>` and
:ref:`"soundfile" backend with the new interface<soundfile_backend>`.
:ivar int sample_rate: Sample rate
:ivar int num_frames: The number of frames
:ivar int num_channels: The number of channels
:ivar int bits_per_sample: The number of bits per sample. This is 0 for lossy formats,
or when it cannot be accurately inferred.
:ivar str encoding: Audio encoding
The values encoding can take are one of the following:
* ``PCM_S``: Signed integer linear PCM
* ``PCM_U``: Unsigned integer linear PCM
* ``PCM_F``: Floating point linear PCM
* ``FLAC``: Flac, Free Lossless Audio Codec
* ``ULAW``: Mu-law
* ``ALAW``: A-law
* ``MP3`` : MP3, MPEG-1 Audio Layer III
* ``VORBIS``: OGG Vorbis
* ``AMR_WB``: Adaptive Multi-Rate
* ``AMR_NB``: Adaptive Multi-Rate Wideband
* ``OPUS``: Opus
* ``HTK``: Single channel 16-bit PCM
* ``UNKNOWN`` : None of above
"""
def __init__(
self,
sample_rate: int,
num_frames: int,
num_channels: int,
bits_per_sample: int,
encoding: str,
):
self.sample_rate = sample_rate
self.num_frames = num_frames
self.num_channels = num_channels
self.bits_per_sample = bits_per_sample
self.encoding = encoding
def __str__(self):
return (
f"AudioMetaData("
f"sample_rate={self.sample_rate}, "
f"num_frames={self.num_frames}, "
f"num_channels={self.num_channels}, "
f"bits_per_sample={self.bits_per_sample}, "
f"encoding={self.encoding}"
f")"
)
|
"""Defines utilities for switching audio backends"""
import warnings
from typing import Optional, List
import torchaudio
from torchaudio._internal import module_utils as _mod_utils
from . import (
no_backend,
sox_io_backend,
soundfile_backend,
)
__all__ = [
'list_audio_backends',
'get_audio_backend',
'set_audio_backend',
]
def list_audio_backends() -> List[str]:
"""List available backends
Returns:
List[str]: The list of available backends.
"""
backends = []
if _mod_utils.is_module_available('soundfile'):
backends.append('soundfile')
if _mod_utils.is_sox_available():
backends.append('sox_io')
return backends
def set_audio_backend(backend: Optional[str]):
"""Set the backend for I/O operation
Args:
backend (str or None): Name of the backend.
One of ``"sox_io"`` or ``"soundfile"`` based on availability
of the system. If ``None`` is provided the current backend is unassigned.
"""
if backend is not None and backend not in list_audio_backends():
raise RuntimeError(
f'Backend "{backend}" is not one of '
f'available backends: {list_audio_backends()}.')
if backend is None:
module = no_backend
elif backend == 'sox_io':
module = sox_io_backend
elif backend == 'soundfile':
module = soundfile_backend
else:
raise NotImplementedError(f'Unexpected backend "{backend}"')
for func in ['save', 'load', 'info']:
setattr(torchaudio, func, getattr(module, func))
def _init_audio_backend():
backends = list_audio_backends()
if 'sox_io' in backends:
set_audio_backend('sox_io')
elif 'soundfile' in backends:
set_audio_backend('soundfile')
else:
warnings.warn('No audio backend is available.')
set_audio_backend(None)
def get_audio_backend() -> Optional[str]:
"""Get the name of the current backend
Returns:
Optional[str]: The name of the current backend or ``None`` if no backend is assigned.
"""
if torchaudio.load == no_backend.load:
return None
if torchaudio.load == sox_io_backend.load:
return 'sox_io'
if torchaudio.load == soundfile_backend.load:
return 'soundfile'
raise ValueError('Unknown backend.')
|
from pathlib import Path
from typing import Callable, Optional, Tuple, Union
from torch import Tensor
def load(filepath: Union[str, Path],
out: Optional[Tensor] = None,
normalization: Union[bool, float, Callable] = True,
channels_first: bool = True,
num_frames: int = 0,
offset: int = 0,
filetype: Optional[str] = None) -> Tuple[Tensor, int]:
raise RuntimeError('No audio I/O backend is available.')
def save(filepath: str, src: Tensor, sample_rate: int, precision: int = 16, channels_first: bool = True) -> None:
raise RuntimeError('No audio I/O backend is available.')
def info(filepath: str) -> None:
raise RuntimeError('No audio I/O backend is available.')
|
"""The new soundfile backend which will become default in 0.8.0 onward"""
from typing import Tuple, Optional
import warnings
import torch
from torchaudio._internal import module_utils as _mod_utils
from .common import AudioMetaData
if _mod_utils.is_soundfile_available():
import soundfile
# Mapping from soundfile subtype to number of bits per sample.
# This is mostly heuristical and the value is set to 0 when it is irrelevant
# (lossy formats) or when it can't be inferred.
# For ADPCM (and G72X) subtypes, it's hard to infer the bit depth because it's not part of the standard:
# According to https://en.wikipedia.org/wiki/Adaptive_differential_pulse-code_modulation#In_telephony,
# the default seems to be 8 bits but it can be compressed further to 4 bits.
# The dict is inspired from
# https://github.com/bastibe/python-soundfile/blob/744efb4b01abc72498a96b09115b42a4cabd85e4/soundfile.py#L66-L94
_SUBTYPE_TO_BITS_PER_SAMPLE = {
'PCM_S8': 8, # Signed 8 bit data
'PCM_16': 16, # Signed 16 bit data
'PCM_24': 24, # Signed 24 bit data
'PCM_32': 32, # Signed 32 bit data
'PCM_U8': 8, # Unsigned 8 bit data (WAV and RAW only)
'FLOAT': 32, # 32 bit float data
'DOUBLE': 64, # 64 bit float data
'ULAW': 8, # U-Law encoded. See https://en.wikipedia.org/wiki/G.711#Types
'ALAW': 8, # A-Law encoded. See https://en.wikipedia.org/wiki/G.711#Types
'IMA_ADPCM': 0, # IMA ADPCM.
'MS_ADPCM': 0, # Microsoft ADPCM.
'GSM610': 0, # GSM 6.10 encoding. (Wikipedia says 1.625 bit depth?? https://en.wikipedia.org/wiki/Full_Rate)
'VOX_ADPCM': 0, # OKI / Dialogix ADPCM
'G721_32': 0, # 32kbs G721 ADPCM encoding.
'G723_24': 0, # 24kbs G723 ADPCM encoding.
'G723_40': 0, # 40kbs G723 ADPCM encoding.
'DWVW_12': 12, # 12 bit Delta Width Variable Word encoding.
'DWVW_16': 16, # 16 bit Delta Width Variable Word encoding.
'DWVW_24': 24, # 24 bit Delta Width Variable Word encoding.
'DWVW_N': 0, # N bit Delta Width Variable Word encoding.
'DPCM_8': 8, # 8 bit differential PCM (XI only)
'DPCM_16': 16, # 16 bit differential PCM (XI only)
'VORBIS': 0, # Xiph Vorbis encoding. (lossy)
'ALAC_16': 16, # Apple Lossless Audio Codec (16 bit).
'ALAC_20': 20, # Apple Lossless Audio Codec (20 bit).
'ALAC_24': 24, # Apple Lossless Audio Codec (24 bit).
'ALAC_32': 32, # Apple Lossless Audio Codec (32 bit).
}
def _get_bit_depth(subtype):
if subtype not in _SUBTYPE_TO_BITS_PER_SAMPLE:
warnings.warn(
f"The {subtype} subtype is unknown to TorchAudio. As a result, the bits_per_sample "
"attribute will be set to 0. If you are seeing this warning, please "
"report by opening an issue on github (after checking for existing/closed ones). "
"You may otherwise ignore this warning."
)
return _SUBTYPE_TO_BITS_PER_SAMPLE.get(subtype, 0)
_SUBTYPE_TO_ENCODING = {
'PCM_S8': 'PCM_S',
'PCM_16': 'PCM_S',
'PCM_24': 'PCM_S',
'PCM_32': 'PCM_S',
'PCM_U8': 'PCM_U',
'FLOAT': 'PCM_F',
'DOUBLE': 'PCM_F',
'ULAW': 'ULAW',
'ALAW': 'ALAW',
'VORBIS': 'VORBIS',
}
def _get_encoding(format: str, subtype: str):
if format == 'FLAC':
return 'FLAC'
return _SUBTYPE_TO_ENCODING.get(subtype, 'UNKNOWN')
@_mod_utils.requires_soundfile()
def info(filepath: str, format: Optional[str] = None) -> AudioMetaData:
"""Get signal information of an audio file.
Note:
``filepath`` argument is intentionally annotated as ``str`` only, even though it accepts
``pathlib.Path`` object as well. This is for the consistency with ``"sox_io"`` backend,
which has a restriction on type annotation due to TorchScript compiler compatiblity.
Args:
filepath (path-like object or file-like object):
Source of audio data.
format (str or None, optional):
Not used. PySoundFile does not accept format hint.
Returns:
AudioMetaData: meta data of the given audio.
"""
sinfo = soundfile.info(filepath)
return AudioMetaData(
sinfo.samplerate,
sinfo.frames,
sinfo.channels,
bits_per_sample=_get_bit_depth(sinfo.subtype),
encoding=_get_encoding(sinfo.format, sinfo.subtype),
)
_SUBTYPE2DTYPE = {
"PCM_S8": "int8",
"PCM_U8": "uint8",
"PCM_16": "int16",
"PCM_32": "int32",
"FLOAT": "float32",
"DOUBLE": "float64",
}
@_mod_utils.requires_soundfile()
def load(
filepath: str,
frame_offset: int = 0,
num_frames: int = -1,
normalize: bool = True,
channels_first: bool = True,
format: Optional[str] = None,
) -> Tuple[torch.Tensor, int]:
"""Load audio data from file.
Note:
The formats this function can handle depend on the soundfile installation.
This function is tested on the following formats;
* WAV
* 32-bit floating-point
* 32-bit signed integer
* 16-bit signed integer
* 8-bit unsigned integer
* FLAC
* OGG/VORBIS
* SPHERE
By default (``normalize=True``, ``channels_first=True``), this function returns Tensor with
``float32`` dtype and the shape of `[channel, time]`.
The samples are normalized to fit in the range of ``[-1.0, 1.0]``.
When the input format is WAV with integer type, such as 32-bit signed integer, 16-bit
signed integer and 8-bit unsigned integer (24-bit signed integer is not supported),
by providing ``normalize=False``, this function can return integer Tensor, where the samples
are expressed within the whole range of the corresponding dtype, that is, ``int32`` tensor
for 32-bit signed PCM, ``int16`` for 16-bit signed PCM and ``uint8`` for 8-bit unsigned PCM.
``normalize`` parameter has no effect on 32-bit floating-point WAV and other formats, such as
``flac`` and ``mp3``.
For these formats, this function always returns ``float32`` Tensor with values normalized to
``[-1.0, 1.0]``.
Note:
``filepath`` argument is intentionally annotated as ``str`` only, even though it accepts
``pathlib.Path`` object as well. This is for the consistency with ``"sox_io"`` backend,
which has a restriction on type annotation due to TorchScript compiler compatiblity.
Args:
filepath (path-like object or file-like object):
Source of audio data.
frame_offset (int, optional):
Number of frames to skip before start reading data.
num_frames (int, optional):
Maximum number of frames to read. ``-1`` reads all the remaining samples,
starting from ``frame_offset``.
This function may return the less number of frames if there is not enough
frames in the given file.
normalize (bool, optional):
When ``True``, this function always return ``float32``, and sample values are
normalized to ``[-1.0, 1.0]``.
If input file is integer WAV, giving ``False`` will change the resulting Tensor type to
integer type.
This argument has no effect for formats other than integer WAV type.
channels_first (bool, optional):
When True, the returned Tensor has dimension `[channel, time]`.
Otherwise, the returned Tensor's dimension is `[time, channel]`.
format (str or None, optional):
Not used. PySoundFile does not accept format hint.
Returns:
(torch.Tensor, int): Resulting Tensor and sample rate.
If the input file has integer wav format and normalization is off, then it has
integer type, else ``float32`` type. If ``channels_first=True``, it has
`[channel, time]` else `[time, channel]`.
"""
with soundfile.SoundFile(filepath, "r") as file_:
if file_.format != "WAV" or normalize:
dtype = "float32"
elif file_.subtype not in _SUBTYPE2DTYPE:
raise ValueError(f"Unsupported subtype: {file_.subtype}")
else:
dtype = _SUBTYPE2DTYPE[file_.subtype]
frames = file_._prepare_read(frame_offset, None, num_frames)
waveform = file_.read(frames, dtype, always_2d=True)
sample_rate = file_.samplerate
waveform = torch.from_numpy(waveform)
if channels_first:
waveform = waveform.t()
return waveform, sample_rate
def _get_subtype_for_wav(
dtype: torch.dtype,
encoding: str,
bits_per_sample: int):
if not encoding:
if not bits_per_sample:
subtype = {
torch.uint8: "PCM_U8",
torch.int16: "PCM_16",
torch.int32: "PCM_32",
torch.float32: "FLOAT",
torch.float64: "DOUBLE",
}.get(dtype)
if not subtype:
raise ValueError(f"Unsupported dtype for wav: {dtype}")
return subtype
if bits_per_sample == 8:
return "PCM_U8"
return f"PCM_{bits_per_sample}"
if encoding == "PCM_S":
if not bits_per_sample:
return "PCM_32"
if bits_per_sample == 8:
raise ValueError("wav does not support 8-bit signed PCM encoding.")
return f"PCM_{bits_per_sample}"
if encoding == "PCM_U":
if bits_per_sample in (None, 8):
return "PCM_U8"
raise ValueError("wav only supports 8-bit unsigned PCM encoding.")
if encoding == "PCM_F":
if bits_per_sample in (None, 32):
return "FLOAT"
if bits_per_sample == 64:
return "DOUBLE"
raise ValueError("wav only supports 32/64-bit float PCM encoding.")
if encoding == "ULAW":
if bits_per_sample in (None, 8):
return "ULAW"
raise ValueError("wav only supports 8-bit mu-law encoding.")
if encoding == "ALAW":
if bits_per_sample in (None, 8):
return "ALAW"
raise ValueError("wav only supports 8-bit a-law encoding.")
raise ValueError(f"wav does not support {encoding}.")
def _get_subtype_for_sphere(encoding: str, bits_per_sample: int):
if encoding in (None, "PCM_S"):
return f"PCM_{bits_per_sample}" if bits_per_sample else "PCM_32"
if encoding in ("PCM_U", "PCM_F"):
raise ValueError(f"sph does not support {encoding} encoding.")
if encoding == "ULAW":
if bits_per_sample in (None, 8):
return "ULAW"
raise ValueError("sph only supports 8-bit for mu-law encoding.")
if encoding == "ALAW":
return "ALAW"
raise ValueError(f"sph does not support {encoding}.")
def _get_subtype(
dtype: torch.dtype,
format: str,
encoding: str,
bits_per_sample: int):
if format == "wav":
return _get_subtype_for_wav(dtype, encoding, bits_per_sample)
if format == "flac":
if encoding:
raise ValueError("flac does not support encoding.")
if not bits_per_sample:
return "PCM_16"
if bits_per_sample > 24:
raise ValueError("flac does not support bits_per_sample > 24.")
return "PCM_S8" if bits_per_sample == 8 else f"PCM_{bits_per_sample}"
if format in ("ogg", "vorbis"):
if encoding or bits_per_sample:
raise ValueError(
"ogg/vorbis does not support encoding/bits_per_sample.")
return "VORBIS"
if format == "sph":
return _get_subtype_for_sphere(encoding, bits_per_sample)
if format in ("nis", "nist"):
return "PCM_16"
raise ValueError(f"Unsupported format: {format}")
@_mod_utils.requires_soundfile()
def save(
filepath: str,
src: torch.Tensor,
sample_rate: int,
channels_first: bool = True,
compression: Optional[float] = None,
format: Optional[str] = None,
encoding: Optional[str] = None,
bits_per_sample: Optional[int] = None,
):
"""Save audio data to file.
Note:
The formats this function can handle depend on the soundfile installation.
This function is tested on the following formats;
* WAV
* 32-bit floating-point
* 32-bit signed integer
* 16-bit signed integer
* 8-bit unsigned integer
* FLAC
* OGG/VORBIS
* SPHERE
Note:
``filepath`` argument is intentionally annotated as ``str`` only, even though it accepts
``pathlib.Path`` object as well. This is for the consistency with ``"sox_io"`` backend,
which has a restriction on type annotation due to TorchScript compiler compatiblity.
Args:
filepath (str or pathlib.Path): Path to audio file.
src (torch.Tensor): Audio data to save. must be 2D tensor.
sample_rate (int): sampling rate
channels_first (bool, optional): If ``True``, the given tensor is interpreted as `[channel, time]`,
otherwise `[time, channel]`.
compression (float of None, optional): Not used.
It is here only for interface compatibility reson with "sox_io" backend.
format (str or None, optional): Override the audio format.
When ``filepath`` argument is path-like object, audio format is
inferred from file extension. If the file extension is missing or
different, you can specify the correct format with this argument.
When ``filepath`` argument is file-like object,
this argument is required.
Valid values are ``"wav"``, ``"ogg"``, ``"vorbis"``,
``"flac"`` and ``"sph"``.
encoding (str or None, optional): Changes the encoding for supported formats.
This argument is effective only for supported formats, sush as
``"wav"``, ``""flac"`` and ``"sph"``. Valid values are;
- ``"PCM_S"`` (signed integer Linear PCM)
- ``"PCM_U"`` (unsigned integer Linear PCM)
- ``"PCM_F"`` (floating point PCM)
- ``"ULAW"`` (mu-law)
- ``"ALAW"`` (a-law)
bits_per_sample (int or None, optional): Changes the bit depth for the
supported formats.
When ``format`` is one of ``"wav"``, ``"flac"`` or ``"sph"``,
you can change the bit depth.
Valid values are ``8``, ``16``, ``24``, ``32`` and ``64``.
Supported formats/encodings/bit depth/compression are:
``"wav"``
- 32-bit floating-point PCM
- 32-bit signed integer PCM
- 24-bit signed integer PCM
- 16-bit signed integer PCM
- 8-bit unsigned integer PCM
- 8-bit mu-law
- 8-bit a-law
Note:
Default encoding/bit depth is determined by the dtype of
the input Tensor.
``"flac"``
- 8-bit
- 16-bit (default)
- 24-bit
``"ogg"``, ``"vorbis"``
- Doesn't accept changing configuration.
``"sph"``
- 8-bit signed integer PCM
- 16-bit signed integer PCM
- 24-bit signed integer PCM
- 32-bit signed integer PCM (default)
- 8-bit mu-law
- 8-bit a-law
- 16-bit a-law
- 24-bit a-law
- 32-bit a-law
"""
if src.ndim != 2:
raise ValueError(f"Expected 2D Tensor, got {src.ndim}D.")
if compression is not None:
warnings.warn(
'`save` function of "soundfile" backend does not support "compression" parameter. '
"The argument is silently ignored."
)
if hasattr(filepath, 'write'):
if format is None:
raise RuntimeError('`format` is required when saving to file object.')
ext = format.lower()
else:
ext = str(filepath).split(".")[-1].lower()
if bits_per_sample not in (None, 8, 16, 24, 32, 64):
raise ValueError("Invalid bits_per_sample.")
if bits_per_sample == 24:
warnings.warn("Saving audio with 24 bits per sample might warp samples near -1. "
"Using 16 bits per sample might be able to avoid this.")
subtype = _get_subtype(src.dtype, ext, encoding, bits_per_sample)
# sph is a extension used in TED-LIUM but soundfile does not recognize it as NIST format,
# so we extend the extensions manually here
if ext in ["nis", "nist", "sph"] and format is None:
format = "NIST"
if channels_first:
src = src.t()
soundfile.write(
file=filepath, data=src, samplerate=sample_rate, subtype=subtype, format=format
)
|
from torch import Tensor
from torch import nn
__all__ = [
"Wav2Letter",
]
class Wav2Letter(nn.Module):
r"""Wav2Letter model architecture from *Wav2Letter: an End-to-End ConvNet-based Speech
Recognition System* [:footcite:`collobert2016wav2letter`].
:math:`\text{padding} = \frac{\text{ceil}(\text{kernel} - \text{stride})}{2}`
Args:
num_classes (int, optional): Number of classes to be classified. (Default: ``40``)
input_type (str, optional): Wav2Letter can use as input: ``waveform``, ``power_spectrum``
or ``mfcc`` (Default: ``waveform``).
num_features (int, optional): Number of input features that the network will receive (Default: ``1``).
"""
def __init__(self, num_classes: int = 40,
input_type: str = "waveform",
num_features: int = 1) -> None:
super(Wav2Letter, self).__init__()
acoustic_num_features = 250 if input_type == "waveform" else num_features
acoustic_model = nn.Sequential(
nn.Conv1d(in_channels=acoustic_num_features, out_channels=250, kernel_size=48, stride=2, padding=23),
nn.ReLU(inplace=True),
nn.Conv1d(in_channels=250, out_channels=250, kernel_size=7, stride=1, padding=3),
nn.ReLU(inplace=True),
nn.Conv1d(in_channels=250, out_channels=250, kernel_size=7, stride=1, padding=3),
nn.ReLU(inplace=True),
nn.Conv1d(in_channels=250, out_channels=250, kernel_size=7, stride=1, padding=3),
nn.ReLU(inplace=True),
nn.Conv1d(in_channels=250, out_channels=250, kernel_size=7, stride=1, padding=3),
nn.ReLU(inplace=True),
nn.Conv1d(in_channels=250, out_channels=250, kernel_size=7, stride=1, padding=3),
nn.ReLU(inplace=True),
nn.Conv1d(in_channels=250, out_channels=250, kernel_size=7, stride=1, padding=3),
nn.ReLU(inplace=True),
nn.Conv1d(in_channels=250, out_channels=250, kernel_size=7, stride=1, padding=3),
nn.ReLU(inplace=True),
nn.Conv1d(in_channels=250, out_channels=2000, kernel_size=32, stride=1, padding=16),
nn.ReLU(inplace=True),
nn.Conv1d(in_channels=2000, out_channels=2000, kernel_size=1, stride=1, padding=0),
nn.ReLU(inplace=True),
nn.Conv1d(in_channels=2000, out_channels=num_classes, kernel_size=1, stride=1, padding=0),
nn.ReLU(inplace=True)
)
if input_type == "waveform":
waveform_model = nn.Sequential(
nn.Conv1d(in_channels=num_features, out_channels=250, kernel_size=250, stride=160, padding=45),
nn.ReLU(inplace=True)
)
self.acoustic_model = nn.Sequential(waveform_model, acoustic_model)
if input_type in ["power_spectrum", "mfcc"]:
self.acoustic_model = acoustic_model
def forward(self, x: Tensor) -> Tensor:
r"""
Args:
x (torch.Tensor): Tensor of dimension (batch_size, num_features, input_length).
Returns:
Tensor: Predictor tensor of dimension (batch_size, number_of_classes, input_length).
"""
x = self.acoustic_model(x)
x = nn.functional.log_softmax(x, dim=1)
return x
|
# *****************************************************************************
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
import warnings
from math import sqrt
from typing import Tuple, List, Optional, Union
import torch
from torch import nn
from torch import Tensor
from torch.nn import functional as F
__all__ = [
"Tacotron2",
]
def _get_linear_layer(
in_dim: int, out_dim: int, bias: bool = True, w_init_gain: str = "linear"
) -> torch.nn.Linear:
r"""Linear layer with xavier uniform initialization.
Args:
in_dim (int): Size of each input sample.
out_dim (int): Size of each output sample.
bias (bool, optional): If set to ``False``, the layer will not learn an additive bias. (Default: ``True``)
w_init_gain (str, optional): Parameter passed to ``torch.nn.init.calculate_gain``
for setting the gain parameter of ``xavier_uniform_``. (Default: ``linear``)
Returns:
(torch.nn.Linear): The corresponding linear layer.
"""
linear = torch.nn.Linear(in_dim, out_dim, bias=bias)
torch.nn.init.xavier_uniform_(
linear.weight, gain=torch.nn.init.calculate_gain(w_init_gain)
)
return linear
def _get_conv1d_layer(
in_channels: int,
out_channels: int,
kernel_size: int = 1,
stride: int = 1,
padding: Optional[Union[str, int, Tuple[int]]] = None,
dilation: int = 1,
bias: bool = True,
w_init_gain: str = "linear",
) -> torch.nn.Conv1d:
r"""1D convolution with xavier uniform initialization.
Args:
in_channels (int): Number of channels in the input image.
out_channels (int): Number of channels produced by the convolution.
kernel_size (int, optional): Number of channels in the input image. (Default: ``1``)
stride (int, optional): Number of channels in the input image. (Default: ``1``)
padding (str, int or tuple, optional): Padding added to both sides of the input.
(Default: dilation * (kernel_size - 1) / 2)
dilation (int, optional): Number of channels in the input image. (Default: ``1``)
w_init_gain (str, optional): Parameter passed to ``torch.nn.init.calculate_gain``
for setting the gain parameter of ``xavier_uniform_``. (Default: ``linear``)
Returns:
(torch.nn.Conv1d): The corresponding Conv1D layer.
"""
if padding is None:
assert kernel_size % 2 == 1
padding = int(dilation * (kernel_size - 1) / 2)
conv1d = torch.nn.Conv1d(
in_channels,
out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias,
)
torch.nn.init.xavier_uniform_(
conv1d.weight, gain=torch.nn.init.calculate_gain(w_init_gain)
)
return conv1d
def _get_mask_from_lengths(lengths: Tensor) -> Tensor:
r"""Returns a binary mask based on ``lengths``. The ``i``-th row and ``j``-th column of the mask
is ``1`` if ``j`` is smaller than ``i``-th element of ``lengths.
Args:
lengths (Tensor): The length of each element in the batch, with shape (n_batch, ).
Returns:
mask (Tensor): The binary mask, with shape (n_batch, max of ``lengths``).
"""
max_len = torch.max(lengths).item()
ids = torch.arange(0, max_len, device=lengths.device, dtype=lengths.dtype)
mask = (ids < lengths.unsqueeze(1)).byte()
mask = torch.le(mask, 0)
return mask
class _LocationLayer(nn.Module):
r"""Location layer used in the Attention model.
Args:
attention_n_filter (int): Number of filters for attention model.
attention_kernel_size (int): Kernel size for attention model.
attention_hidden_dim (int): Dimension of attention hidden representation.
"""
def __init__(
self,
attention_n_filter: int,
attention_kernel_size: int,
attention_hidden_dim: int,
):
super().__init__()
padding = int((attention_kernel_size - 1) / 2)
self.location_conv = _get_conv1d_layer(
2,
attention_n_filter,
kernel_size=attention_kernel_size,
padding=padding,
bias=False,
stride=1,
dilation=1,
)
self.location_dense = _get_linear_layer(
attention_n_filter, attention_hidden_dim, bias=False, w_init_gain="tanh"
)
def forward(self, attention_weights_cat: Tensor) -> Tensor:
r"""Location layer used in the Attention model.
Args:
attention_weights_cat (Tensor): Cumulative and previous attention weights
with shape (n_batch, 2, max of ``text_lengths``).
Returns:
processed_attention (Tensor): Cumulative and previous attention weights
with shape (n_batch, ``attention_hidden_dim``).
"""
# (n_batch, attention_n_filter, text_lengths.max())
processed_attention = self.location_conv(attention_weights_cat)
processed_attention = processed_attention.transpose(1, 2)
# (n_batch, text_lengths.max(), attention_hidden_dim)
processed_attention = self.location_dense(processed_attention)
return processed_attention
class _Attention(nn.Module):
r"""Locally sensitive attention model.
Args:
attention_rnn_dim (int): Number of hidden units for RNN.
encoder_embedding_dim (int): Number of embedding dimensions in the Encoder.
attention_hidden_dim (int): Dimension of attention hidden representation.
attention_location_n_filter (int): Number of filters for Attention model.
attention_location_kernel_size (int): Kernel size for Attention model.
"""
def __init__(
self,
attention_rnn_dim: int,
encoder_embedding_dim: int,
attention_hidden_dim: int,
attention_location_n_filter: int,
attention_location_kernel_size: int,
) -> None:
super().__init__()
self.query_layer = _get_linear_layer(
attention_rnn_dim, attention_hidden_dim, bias=False, w_init_gain="tanh"
)
self.memory_layer = _get_linear_layer(
encoder_embedding_dim, attention_hidden_dim, bias=False, w_init_gain="tanh"
)
self.v = _get_linear_layer(attention_hidden_dim, 1, bias=False)
self.location_layer = _LocationLayer(
attention_location_n_filter,
attention_location_kernel_size,
attention_hidden_dim,
)
self.score_mask_value = -float("inf")
def _get_alignment_energies(
self, query: Tensor, processed_memory: Tensor, attention_weights_cat: Tensor
) -> Tensor:
r"""Get the alignment vector.
Args:
query (Tensor): Decoder output with shape (n_batch, n_mels * n_frames_per_step).
processed_memory (Tensor): Processed Encoder outputs
with shape (n_batch, max of ``text_lengths``, attention_hidden_dim).
attention_weights_cat (Tensor): Cumulative and previous attention weights
with shape (n_batch, 2, max of ``text_lengths``).
Returns:
alignment (Tensor): attention weights, it is a tensor with shape (batch, max of ``text_lengths``).
"""
processed_query = self.query_layer(query.unsqueeze(1))
processed_attention_weights = self.location_layer(attention_weights_cat)
energies = self.v(
torch.tanh(processed_query + processed_attention_weights + processed_memory)
)
alignment = energies.squeeze(2)
return alignment
def forward(
self,
attention_hidden_state: Tensor,
memory: Tensor,
processed_memory: Tensor,
attention_weights_cat: Tensor,
mask: Tensor,
) -> Tuple[Tensor, Tensor]:
r"""Pass the input through the Attention model.
Args:
attention_hidden_state (Tensor): Attention rnn last output with shape (n_batch, ``attention_rnn_dim``).
memory (Tensor): Encoder outputs with shape (n_batch, max of ``text_lengths``, ``encoder_embedding_dim``).
processed_memory (Tensor): Processed Encoder outputs
with shape (n_batch, max of ``text_lengths``, ``attention_hidden_dim``).
attention_weights_cat (Tensor): Previous and cumulative attention weights
with shape (n_batch, current_num_frames * 2, max of ``text_lengths``).
mask (Tensor): Binary mask for padded data with shape (n_batch, current_num_frames).
Returns:
attention_context (Tensor): Context vector with shape (n_batch, ``encoder_embedding_dim``).
attention_weights (Tensor): Attention weights with shape (n_batch, max of ``text_lengths``).
"""
alignment = self._get_alignment_energies(
attention_hidden_state, processed_memory, attention_weights_cat
)
alignment = alignment.masked_fill(mask, self.score_mask_value)
attention_weights = F.softmax(alignment, dim=1)
attention_context = torch.bmm(attention_weights.unsqueeze(1), memory)
attention_context = attention_context.squeeze(1)
return attention_context, attention_weights
class _Prenet(nn.Module):
r"""Prenet Module. It is consists of ``len(output_size)`` linear layers.
Args:
in_dim (int): The size of each input sample.
output_sizes (list): The output dimension of each linear layers.
"""
def __init__(self, in_dim: int, out_sizes: List[int]) -> None:
super().__init__()
in_sizes = [in_dim] + out_sizes[:-1]
self.layers = nn.ModuleList(
[
_get_linear_layer(in_size, out_size, bias=False)
for (in_size, out_size) in zip(in_sizes, out_sizes)
]
)
def forward(self, x: Tensor) -> Tensor:
r"""Pass the input through Prenet.
Args:
x (Tensor): The input sequence to Prenet with shape (n_batch, in_dim).
Return:
x (Tensor): Tensor with shape (n_batch, sizes[-1])
"""
for linear in self.layers:
x = F.dropout(F.relu(linear(x)), p=0.5, training=True)
return x
class _Postnet(nn.Module):
r"""Postnet Module.
Args:
n_mels (int): Number of mel bins.
postnet_embedding_dim (int): Postnet embedding dimension.
postnet_kernel_size (int): Postnet kernel size.
postnet_n_convolution (int): Number of postnet convolutions.
"""
def __init__(
self,
n_mels: int,
postnet_embedding_dim: int,
postnet_kernel_size: int,
postnet_n_convolution: int,
):
super().__init__()
self.convolutions = nn.ModuleList()
for i in range(postnet_n_convolution):
in_channels = n_mels if i == 0 else postnet_embedding_dim
out_channels = n_mels if i == (postnet_n_convolution - 1) else postnet_embedding_dim
init_gain = "linear" if i == (postnet_n_convolution - 1) else "tanh"
num_features = n_mels if i == (postnet_n_convolution - 1) else postnet_embedding_dim
self.convolutions.append(
nn.Sequential(
_get_conv1d_layer(
in_channels,
out_channels,
kernel_size=postnet_kernel_size,
stride=1,
padding=int((postnet_kernel_size - 1) / 2),
dilation=1,
w_init_gain=init_gain,
),
nn.BatchNorm1d(num_features),
)
)
self.n_convs = len(self.convolutions)
def forward(self, x: Tensor) -> Tensor:
r"""Pass the input through Postnet.
Args:
x (Tensor): The input sequence with shape (n_batch, ``n_mels``, max of ``mel_specgram_lengths``).
Return:
x (Tensor): Tensor with shape (n_batch, ``n_mels``, max of ``mel_specgram_lengths``).
"""
for i, conv in enumerate(self.convolutions):
if i < self.n_convs - 1:
x = F.dropout(torch.tanh(conv(x)), 0.5, training=self.training)
else:
x = F.dropout(conv(x), 0.5, training=self.training)
return x
class _Encoder(nn.Module):
r"""Encoder Module.
Args:
encoder_embedding_dim (int): Number of embedding dimensions in the encoder.
encoder_n_convolution (int): Number of convolution layers in the encoder.
encoder_kernel_size (int): The kernel size in the encoder.
Examples
>>> encoder = _Encoder(3, 512, 5)
>>> input = torch.rand(10, 20, 30)
>>> output = encoder(input) # shape: (10, 30, 512)
"""
def __init__(
self,
encoder_embedding_dim: int,
encoder_n_convolution: int,
encoder_kernel_size: int,
) -> None:
super().__init__()
self.convolutions = nn.ModuleList()
for _ in range(encoder_n_convolution):
conv_layer = nn.Sequential(
_get_conv1d_layer(
encoder_embedding_dim,
encoder_embedding_dim,
kernel_size=encoder_kernel_size,
stride=1,
padding=int((encoder_kernel_size - 1) / 2),
dilation=1,
w_init_gain="relu",
),
nn.BatchNorm1d(encoder_embedding_dim),
)
self.convolutions.append(conv_layer)
self.lstm = nn.LSTM(
encoder_embedding_dim,
int(encoder_embedding_dim / 2),
1,
batch_first=True,
bidirectional=True,
)
self.lstm.flatten_parameters()
def forward(self, x: Tensor, input_lengths: Tensor) -> Tensor:
r"""Pass the input through the Encoder.
Args:
x (Tensor): The input sequences with shape (n_batch, encoder_embedding_dim, n_seq).
input_lengths (Tensor): The length of each input sequence with shape (n_batch, ).
Return:
x (Tensor): A tensor with shape (n_batch, n_seq, encoder_embedding_dim).
"""
for conv in self.convolutions:
x = F.dropout(F.relu(conv(x)), 0.5, self.training)
x = x.transpose(1, 2)
input_lengths = input_lengths.cpu()
x = nn.utils.rnn.pack_padded_sequence(x, input_lengths, batch_first=True)
outputs, _ = self.lstm(x)
outputs, _ = nn.utils.rnn.pad_packed_sequence(outputs, batch_first=True)
return outputs
class _Decoder(nn.Module):
r"""Decoder with Attention model.
Args:
n_mels (int): number of mel bins
n_frames_per_step (int): number of frames processed per step, only 1 is supported
encoder_embedding_dim (int): the number of embedding dimensions in the encoder.
decoder_rnn_dim (int): number of units in decoder LSTM
decoder_max_step (int): maximum number of output mel spectrograms
decoder_dropout (float): dropout probability for decoder LSTM
decoder_early_stopping (bool): stop decoding when all samples are finished
attention_rnn_dim (int): number of units in attention LSTM
attention_hidden_dim (int): dimension of attention hidden representation
attention_location_n_filter (int): number of filters for attention model
attention_location_kernel_size (int): kernel size for attention model
attention_dropout (float): dropout probability for attention LSTM
prenet_dim (int): number of ReLU units in prenet layers
gate_threshold (float): probability threshold for stop token
"""
def __init__(
self,
n_mels: int,
n_frames_per_step: int,
encoder_embedding_dim: int,
decoder_rnn_dim: int,
decoder_max_step: int,
decoder_dropout: float,
decoder_early_stopping: bool,
attention_rnn_dim: int,
attention_hidden_dim: int,
attention_location_n_filter: int,
attention_location_kernel_size: int,
attention_dropout: float,
prenet_dim: int,
gate_threshold: float,
) -> None:
super().__init__()
self.n_mels = n_mels
self.n_frames_per_step = n_frames_per_step
self.encoder_embedding_dim = encoder_embedding_dim
self.attention_rnn_dim = attention_rnn_dim
self.decoder_rnn_dim = decoder_rnn_dim
self.prenet_dim = prenet_dim
self.decoder_max_step = decoder_max_step
self.gate_threshold = gate_threshold
self.attention_dropout = attention_dropout
self.decoder_dropout = decoder_dropout
self.decoder_early_stopping = decoder_early_stopping
self.prenet = _Prenet(n_mels * n_frames_per_step, [prenet_dim, prenet_dim])
self.attention_rnn = nn.LSTMCell(
prenet_dim + encoder_embedding_dim, attention_rnn_dim
)
self.attention_layer = _Attention(
attention_rnn_dim,
encoder_embedding_dim,
attention_hidden_dim,
attention_location_n_filter,
attention_location_kernel_size,
)
self.decoder_rnn = nn.LSTMCell(
attention_rnn_dim + encoder_embedding_dim, decoder_rnn_dim, True
)
self.linear_projection = _get_linear_layer(
decoder_rnn_dim + encoder_embedding_dim, n_mels * n_frames_per_step
)
self.gate_layer = _get_linear_layer(
decoder_rnn_dim + encoder_embedding_dim, 1, bias=True, w_init_gain="sigmoid"
)
def _get_initial_frame(self, memory: Tensor) -> Tensor:
r"""Gets all zeros frames to use as the first decoder input.
Args:
memory (Tensor): Encoder outputs with shape (n_batch, max of ``text_lengths``, ``encoder_embedding_dim``).
Returns:
decoder_input (Tensor): all zeros frames with shape
(n_batch, max of ``text_lengths``, ``n_mels * n_frames_per_step``).
"""
n_batch = memory.size(0)
dtype = memory.dtype
device = memory.device
decoder_input = torch.zeros(
n_batch, self.n_mels * self.n_frames_per_step, dtype=dtype, device=device
)
return decoder_input
def _initialize_decoder_states(
self, memory: Tensor
) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor]:
r"""Initializes attention rnn states, decoder rnn states, attention
weights, attention cumulative weights, attention context, stores memory
and stores processed memory.
Args:
memory (Tensor): Encoder outputs with shape (n_batch, max of ``text_lengths``, ``encoder_embedding_dim``).
Returns:
attention_hidden (Tensor): Hidden state of the attention LSTM with shape (n_batch, ``attention_rnn_dim``).
attention_cell (Tensor): Hidden state of the attention LSTM with shape (n_batch, ``attention_rnn_dim``).
decoder_hidden (Tensor): Hidden state of the decoder LSTM with shape (n_batch, ``decoder_rnn_dim``).
decoder_cell (Tensor): Hidden state of the decoder LSTM with shape (n_batch, ``decoder_rnn_dim``).
attention_weights (Tensor): Attention weights with shape (n_batch, max of ``text_lengths``).
attention_weights_cum (Tensor): Cumulated attention weights with shape (n_batch, max of ``text_lengths``).
attention_context (Tensor): Context vector with shape (n_batch, ``encoder_embedding_dim``).
processed_memory (Tensor): Processed encoder outputs
with shape (n_batch, max of ``text_lengths``, ``attention_hidden_dim``).
"""
n_batch = memory.size(0)
max_time = memory.size(1)
dtype = memory.dtype
device = memory.device
attention_hidden = torch.zeros(
n_batch, self.attention_rnn_dim, dtype=dtype, device=device
)
attention_cell = torch.zeros(
n_batch, self.attention_rnn_dim, dtype=dtype, device=device
)
decoder_hidden = torch.zeros(
n_batch, self.decoder_rnn_dim, dtype=dtype, device=device
)
decoder_cell = torch.zeros(
n_batch, self.decoder_rnn_dim, dtype=dtype, device=device
)
attention_weights = torch.zeros(n_batch, max_time, dtype=dtype, device=device)
attention_weights_cum = torch.zeros(
n_batch, max_time, dtype=dtype, device=device
)
attention_context = torch.zeros(
n_batch, self.encoder_embedding_dim, dtype=dtype, device=device
)
processed_memory = self.attention_layer.memory_layer(memory)
return (
attention_hidden,
attention_cell,
decoder_hidden,
decoder_cell,
attention_weights,
attention_weights_cum,
attention_context,
processed_memory,
)
def _parse_decoder_inputs(self, decoder_inputs: Tensor) -> Tensor:
r"""Prepares decoder inputs.
Args:
decoder_inputs (Tensor): Inputs used for teacher-forced training, i.e. mel-specs,
with shape (n_batch, ``n_mels``, max of ``mel_specgram_lengths``)
Returns:
inputs (Tensor): Processed decoder inputs with shape (max of ``mel_specgram_lengths``, n_batch, ``n_mels``).
"""
# (n_batch, n_mels, mel_specgram_lengths.max()) -> (n_batch, mel_specgram_lengths.max(), n_mels)
decoder_inputs = decoder_inputs.transpose(1, 2)
decoder_inputs = decoder_inputs.view(
decoder_inputs.size(0),
int(decoder_inputs.size(1) / self.n_frames_per_step),
-1,
)
# (n_batch, mel_specgram_lengths.max(), n_mels) -> (mel_specgram_lengths.max(), n_batch, n_mels)
decoder_inputs = decoder_inputs.transpose(0, 1)
return decoder_inputs
def _parse_decoder_outputs(
self, mel_specgram: Tensor, gate_outputs: Tensor, alignments: Tensor
) -> Tuple[Tensor, Tensor, Tensor]:
r"""Prepares decoder outputs for output
Args:
mel_specgram (Tensor): mel spectrogram with shape (max of ``mel_specgram_lengths``, n_batch, ``n_mels``)
gate_outputs (Tensor): predicted stop token with shape (max of ``mel_specgram_lengths``, n_batch)
alignments (Tensor): sequence of attention weights from the decoder
with shape (max of ``mel_specgram_lengths``, n_batch, max of ``text_lengths``)
Returns:
mel_specgram (Tensor): mel spectrogram with shape (n_batch, ``n_mels``, max of ``mel_specgram_lengths``)
gate_outputs (Tensor): predicted stop token with shape (n_batch, max of ``mel_specgram_lengths``)
alignments (Tensor): sequence of attention weights from the decoder
with shape (n_batch, max of ``mel_specgram_lengths``, max of ``text_lengths``)
"""
# (mel_specgram_lengths.max(), n_batch, text_lengths.max())
# -> (n_batch, mel_specgram_lengths.max(), text_lengths.max())
alignments = alignments.transpose(0, 1).contiguous()
# (mel_specgram_lengths.max(), n_batch) -> (n_batch, mel_specgram_lengths.max())
gate_outputs = gate_outputs.transpose(0, 1).contiguous()
# (mel_specgram_lengths.max(), n_batch, n_mels) -> (n_batch, mel_specgram_lengths.max(), n_mels)
mel_specgram = mel_specgram.transpose(0, 1).contiguous()
# decouple frames per step
shape = (mel_specgram.shape[0], -1, self.n_mels)
mel_specgram = mel_specgram.view(*shape)
# (n_batch, mel_specgram_lengths.max(), n_mels) -> (n_batch, n_mels, T_out)
mel_specgram = mel_specgram.transpose(1, 2)
return mel_specgram, gate_outputs, alignments
def decode(
self,
decoder_input: Tensor,
attention_hidden: Tensor,
attention_cell: Tensor,
decoder_hidden: Tensor,
decoder_cell: Tensor,
attention_weights: Tensor,
attention_weights_cum: Tensor,
attention_context: Tensor,
memory: Tensor,
processed_memory: Tensor,
mask: Tensor,
) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor]:
r"""Decoder step using stored states, attention and memory
Args:
decoder_input (Tensor): Output of the Prenet with shape (n_batch, ``prenet_dim``).
attention_hidden (Tensor): Hidden state of the attention LSTM with shape (n_batch, ``attention_rnn_dim``).
attention_cell (Tensor): Hidden state of the attention LSTM with shape (n_batch, ``attention_rnn_dim``).
decoder_hidden (Tensor): Hidden state of the decoder LSTM with shape (n_batch, ``decoder_rnn_dim``).
decoder_cell (Tensor): Hidden state of the decoder LSTM with shape (n_batch, ``decoder_rnn_dim``).
attention_weights (Tensor): Attention weights with shape (n_batch, max of ``text_lengths``).
attention_weights_cum (Tensor): Cumulated attention weights with shape (n_batch, max of ``text_lengths``).
attention_context (Tensor): Context vector with shape (n_batch, ``encoder_embedding_dim``).
memory (Tensor): Encoder output with shape (n_batch, max of ``text_lengths``, ``encoder_embedding_dim``).
processed_memory (Tensor): Processed Encoder outputs
with shape (n_batch, max of ``text_lengths``, ``attention_hidden_dim``).
mask (Tensor): Binary mask for padded data with shape (n_batch, current_num_frames).
Returns:
decoder_output: Predicted mel spectrogram for the current frame with shape (n_batch, ``n_mels``).
gate_prediction (Tensor): Prediction of the stop token with shape (n_batch, ``1``).
attention_hidden (Tensor): Hidden state of the attention LSTM with shape (n_batch, ``attention_rnn_dim``).
attention_cell (Tensor): Hidden state of the attention LSTM with shape (n_batch, ``attention_rnn_dim``).
decoder_hidden (Tensor): Hidden state of the decoder LSTM with shape (n_batch, ``decoder_rnn_dim``).
decoder_cell (Tensor): Hidden state of the decoder LSTM with shape (n_batch, ``decoder_rnn_dim``).
attention_weights (Tensor): Attention weights with shape (n_batch, max of ``text_lengths``).
attention_weights_cum (Tensor): Cumulated attention weights with shape (n_batch, max of ``text_lengths``).
attention_context (Tensor): Context vector with shape (n_batch, ``encoder_embedding_dim``).
"""
cell_input = torch.cat((decoder_input, attention_context), -1)
attention_hidden, attention_cell = self.attention_rnn(
cell_input, (attention_hidden, attention_cell)
)
attention_hidden = F.dropout(
attention_hidden, self.attention_dropout, self.training
)
attention_weights_cat = torch.cat(
(attention_weights.unsqueeze(1), attention_weights_cum.unsqueeze(1)), dim=1
)
attention_context, attention_weights = self.attention_layer(
attention_hidden, memory, processed_memory, attention_weights_cat, mask
)
attention_weights_cum += attention_weights
decoder_input = torch.cat((attention_hidden, attention_context), -1)
decoder_hidden, decoder_cell = self.decoder_rnn(
decoder_input, (decoder_hidden, decoder_cell)
)
decoder_hidden = F.dropout(decoder_hidden, self.decoder_dropout, self.training)
decoder_hidden_attention_context = torch.cat(
(decoder_hidden, attention_context), dim=1
)
decoder_output = self.linear_projection(decoder_hidden_attention_context)
gate_prediction = self.gate_layer(decoder_hidden_attention_context)
return (
decoder_output,
gate_prediction,
attention_hidden,
attention_cell,
decoder_hidden,
decoder_cell,
attention_weights,
attention_weights_cum,
attention_context,
)
def forward(
self, memory: Tensor, mel_specgram_truth: Tensor, memory_lengths: Tensor
) -> Tuple[Tensor, Tensor, Tensor]:
r"""Decoder forward pass for training.
Args:
memory (Tensor): Encoder outputs
with shape (n_batch, max of ``text_lengths``, ``encoder_embedding_dim``).
mel_specgram_truth (Tensor): Decoder ground-truth mel-specs for teacher forcing
with shape (n_batch, ``n_mels``, max of ``mel_specgram_lengths``).
memory_lengths (Tensor): Encoder output lengths for attention masking
(the same as ``text_lengths``) with shape (n_batch, ).
Returns:
mel_specgram (Tensor): Predicted mel spectrogram
with shape (n_batch, ``n_mels``, max of ``mel_specgram_lengths``).
gate_outputs (Tensor): Predicted stop token for each timestep
with shape (n_batch, max of ``mel_specgram_lengths``).
alignments (Tensor): Sequence of attention weights from the decoder
with shape (n_batch, max of ``mel_specgram_lengths``, max of ``text_lengths``).
"""
decoder_input = self._get_initial_frame(memory).unsqueeze(0)
decoder_inputs = self._parse_decoder_inputs(mel_specgram_truth)
decoder_inputs = torch.cat((decoder_input, decoder_inputs), dim=0)
decoder_inputs = self.prenet(decoder_inputs)
mask = _get_mask_from_lengths(memory_lengths)
(
attention_hidden,
attention_cell,
decoder_hidden,
decoder_cell,
attention_weights,
attention_weights_cum,
attention_context,
processed_memory,
) = self._initialize_decoder_states(memory)
mel_outputs, gate_outputs, alignments = [], [], []
while len(mel_outputs) < decoder_inputs.size(0) - 1:
decoder_input = decoder_inputs[len(mel_outputs)]
(
mel_output,
gate_output,
attention_hidden,
attention_cell,
decoder_hidden,
decoder_cell,
attention_weights,
attention_weights_cum,
attention_context,
) = self.decode(
decoder_input,
attention_hidden,
attention_cell,
decoder_hidden,
decoder_cell,
attention_weights,
attention_weights_cum,
attention_context,
memory,
processed_memory,
mask,
)
mel_outputs += [mel_output.squeeze(1)]
gate_outputs += [gate_output.squeeze()]
alignments += [attention_weights]
mel_specgram, gate_outputs, alignments = self._parse_decoder_outputs(
torch.stack(mel_outputs), torch.stack(gate_outputs), torch.stack(alignments)
)
return mel_specgram, gate_outputs, alignments
def _get_go_frame(self, memory: Tensor) -> Tensor:
"""Gets all zeros frames to use as the first decoder input
args:
memory (Tensor): Encoder outputs
with shape (n_batch, max of ``text_lengths``, ``encoder_embedding_dim``).
returns:
decoder_input (Tensor): All zeros frames with shape(n_batch, ``n_mels`` * ``n_frame_per_step``).
"""
n_batch = memory.size(0)
dtype = memory.dtype
device = memory.device
decoder_input = torch.zeros(
n_batch, self.n_mels * self.n_frames_per_step, dtype=dtype, device=device
)
return decoder_input
@torch.jit.export
def infer(self,
memory: Tensor,
memory_lengths: Tensor) -> Tuple[Tensor, Tensor, Tensor, Tensor]:
"""Decoder inference
Args:
memory (Tensor): Encoder outputs
with shape (n_batch, max of ``text_lengths``, ``encoder_embedding_dim``).
memory_lengths (Tensor): Encoder output lengths for attention masking
(the same as ``text_lengths``) with shape (n_batch, ).
Returns:
mel_specgram (Tensor): Predicted mel spectrogram
with shape (n_batch, ``n_mels``, max of ``mel_specgram_lengths``).
mel_specgram_lengths (Tensor): the length of the predicted mel spectrogram (n_batch, ))
gate_outputs (Tensor): Predicted stop token for each timestep
with shape (n_batch, max of ``mel_specgram_lengths``).
alignments (Tensor): Sequence of attention weights from the decoder
with shape (n_batch, max of ``mel_specgram_lengths``, max of ``text_lengths``).
"""
batch_size, device = memory.size(0), memory.device
decoder_input = self._get_go_frame(memory)
mask = _get_mask_from_lengths(memory_lengths)
(
attention_hidden,
attention_cell,
decoder_hidden,
decoder_cell,
attention_weights,
attention_weights_cum,
attention_context,
processed_memory,
) = self._initialize_decoder_states(memory)
mel_specgram_lengths = torch.zeros([batch_size], dtype=torch.int32, device=device)
finished = torch.zeros([batch_size], dtype=torch.bool, device=device)
mel_specgrams: List[Tensor] = []
gate_outputs: List[Tensor] = []
alignments: List[Tensor] = []
for _ in range(self.decoder_max_step):
decoder_input = self.prenet(decoder_input)
(
mel_specgram,
gate_output,
attention_hidden,
attention_cell,
decoder_hidden,
decoder_cell,
attention_weights,
attention_weights_cum,
attention_context,
) = self.decode(
decoder_input,
attention_hidden,
attention_cell,
decoder_hidden,
decoder_cell,
attention_weights,
attention_weights_cum,
attention_context,
memory,
processed_memory,
mask,
)
mel_specgrams.append(mel_specgram.unsqueeze(0))
gate_outputs.append(gate_output.transpose(0, 1))
alignments.append(attention_weights)
mel_specgram_lengths[~finished] += 1
finished |= torch.sigmoid(gate_output.squeeze(1)) > self.gate_threshold
if self.decoder_early_stopping and torch.all(finished):
break
decoder_input = mel_specgram
if len(mel_specgrams) == self.decoder_max_step:
warnings.warn(
"Reached max decoder steps. The generated spectrogram might not cover "
"the whole transcript.")
mel_specgrams = torch.cat(mel_specgrams, dim=0)
gate_outputs = torch.cat(gate_outputs, dim=0)
alignments = torch.cat(alignments, dim=0)
mel_specgrams, gate_outputs, alignments = self._parse_decoder_outputs(
mel_specgrams, gate_outputs, alignments
)
return mel_specgrams, mel_specgram_lengths, gate_outputs, alignments
class Tacotron2(nn.Module):
r"""Tacotron2 model based on the implementation from
`Nvidia <https://github.com/NVIDIA/DeepLearningExamples/>`_.
The original implementation was introduced in
*Natural TTS Synthesis by Conditioning WaveNet on Mel Spectrogram Predictions*
[:footcite:`shen2018natural`].
Args:
mask_padding (bool, optional): Use mask padding (Default: ``False``).
n_mels (int, optional): Number of mel bins (Default: ``80``).
n_symbol (int, optional): Number of symbols for the input text (Default: ``148``).
n_frames_per_step (int, optional): Number of frames processed per step, only 1 is supported (Default: ``1``).
symbol_embedding_dim (int, optional): Input embedding dimension (Default: ``512``).
encoder_n_convolution (int, optional): Number of encoder convolutions (Default: ``3``).
encoder_kernel_size (int, optional): Encoder kernel size (Default: ``5``).
encoder_embedding_dim (int, optional): Encoder embedding dimension (Default: ``512``).
decoder_rnn_dim (int, optional): Number of units in decoder LSTM (Default: ``1024``).
decoder_max_step (int, optional): Maximum number of output mel spectrograms (Default: ``2000``).
decoder_dropout (float, optional): Dropout probability for decoder LSTM (Default: ``0.1``).
decoder_early_stopping (bool, optional): Continue decoding after all samples are finished (Default: ``True``).
attention_rnn_dim (int, optional): Number of units in attention LSTM (Default: ``1024``).
attention_hidden_dim (int, optional): Dimension of attention hidden representation (Default: ``128``).
attention_location_n_filter (int, optional): Number of filters for attention model (Default: ``32``).
attention_location_kernel_size (int, optional): Kernel size for attention model (Default: ``31``).
attention_dropout (float, optional): Dropout probability for attention LSTM (Default: ``0.1``).
prenet_dim (int, optional): Number of ReLU units in prenet layers (Default: ``256``).
postnet_n_convolution (int, optional): Number of postnet convolutions (Default: ``5``).
postnet_kernel_size (int, optional): Postnet kernel size (Default: ``5``).
postnet_embedding_dim (int, optional): Postnet embedding dimension (Default: ``512``).
gate_threshold (float, optional): Probability threshold for stop token (Default: ``0.5``).
"""
def __init__(
self,
mask_padding: bool = False,
n_mels: int = 80,
n_symbol: int = 148,
n_frames_per_step: int = 1,
symbol_embedding_dim: int = 512,
encoder_embedding_dim: int = 512,
encoder_n_convolution: int = 3,
encoder_kernel_size: int = 5,
decoder_rnn_dim: int = 1024,
decoder_max_step: int = 2000,
decoder_dropout: float = 0.1,
decoder_early_stopping: bool = True,
attention_rnn_dim: int = 1024,
attention_hidden_dim: int = 128,
attention_location_n_filter: int = 32,
attention_location_kernel_size: int = 31,
attention_dropout: float = 0.1,
prenet_dim: int = 256,
postnet_n_convolution: int = 5,
postnet_kernel_size: int = 5,
postnet_embedding_dim: int = 512,
gate_threshold: float = 0.5,
) -> None:
super().__init__()
self.mask_padding = mask_padding
self.n_mels = n_mels
self.n_frames_per_step = n_frames_per_step
self.embedding = nn.Embedding(n_symbol, symbol_embedding_dim)
std = sqrt(2.0 / (n_symbol + symbol_embedding_dim))
val = sqrt(3.0) * std
self.embedding.weight.data.uniform_(-val, val)
self.encoder = _Encoder(
encoder_embedding_dim, encoder_n_convolution, encoder_kernel_size
)
self.decoder = _Decoder(
n_mels,
n_frames_per_step,
encoder_embedding_dim,
decoder_rnn_dim,
decoder_max_step,
decoder_dropout,
decoder_early_stopping,
attention_rnn_dim,
attention_hidden_dim,
attention_location_n_filter,
attention_location_kernel_size,
attention_dropout,
prenet_dim,
gate_threshold,
)
self.postnet = _Postnet(
n_mels, postnet_embedding_dim, postnet_kernel_size, postnet_n_convolution
)
def forward(
self,
tokens: Tensor,
token_lengths: Tensor,
mel_specgram: Tensor,
mel_specgram_lengths: Tensor,
) -> Tuple[Tensor, Tensor, Tensor, Tensor]:
r"""Pass the input through the Tacotron2 model. This is in teacher
forcing mode, which is generally used for training.
The input ``tokens`` should be padded with zeros to length max of ``token_lengths``.
The input ``mel_specgram`` should be padded with zeros to length max of ``mel_specgram_lengths``.
Args:
tokens (Tensor): The input tokens to Tacotron2 with shape `(n_batch, max of token_lengths)`.
token_lengths (Tensor): The valid length of each sample in ``tokens`` with shape `(n_batch, )`.
mel_specgram (Tensor): The target mel spectrogram
with shape `(n_batch, n_mels, max of mel_specgram_lengths)`.
mel_specgram_lengths (Tensor): The length of each mel spectrogram with shape `(n_batch, )`.
Returns:
[Tensor, Tensor, Tensor, Tensor]:
Tensor
Mel spectrogram before Postnet with shape `(n_batch, n_mels, max of mel_specgram_lengths)`.
Tensor
Mel spectrogram after Postnet with shape `(n_batch, n_mels, max of mel_specgram_lengths)`.
Tensor
The output for stop token at each time step with shape `(n_batch, max of mel_specgram_lengths)`.
Tensor
Sequence of attention weights from the decoder with
shape `(n_batch, max of mel_specgram_lengths, max of token_lengths)`.
"""
embedded_inputs = self.embedding(tokens).transpose(1, 2)
encoder_outputs = self.encoder(embedded_inputs, token_lengths)
mel_specgram, gate_outputs, alignments = self.decoder(
encoder_outputs, mel_specgram, memory_lengths=token_lengths
)
mel_specgram_postnet = self.postnet(mel_specgram)
mel_specgram_postnet = mel_specgram + mel_specgram_postnet
if self.mask_padding:
mask = _get_mask_from_lengths(mel_specgram_lengths)
mask = mask.expand(self.n_mels, mask.size(0), mask.size(1))
mask = mask.permute(1, 0, 2)
mel_specgram.masked_fill_(mask, 0.0)
mel_specgram_postnet.masked_fill_(mask, 0.0)
gate_outputs.masked_fill_(mask[:, 0, :], 1e3)
return mel_specgram, mel_specgram_postnet, gate_outputs, alignments
@torch.jit.export
def infer(self, tokens: Tensor, lengths: Optional[Tensor] = None) -> Tuple[Tensor, Tensor, Tensor]:
r"""Using Tacotron2 for inference. The input is a batch of encoded
sentences (``tokens``) and its corresponding lengths (``lengths``). The
output is the generated mel spectrograms, its corresponding lengths, and
the attention weights from the decoder.
The input `tokens` should be padded with zeros to length max of ``lengths``.
Args:
tokens (Tensor): The input tokens to Tacotron2 with shape `(n_batch, max of lengths)`.
lengths (Tensor or None, optional):
The valid length of each sample in ``tokens`` with shape `(n_batch, )`.
If ``None``, it is assumed that the all the tokens are valid. Default: ``None``
Returns:
(Tensor, Tensor, Tensor):
Tensor
The predicted mel spectrogram with shape `(n_batch, n_mels, max of mel_specgram_lengths)`.
Tensor
The length of the predicted mel spectrogram with shape `(n_batch, )`.
Tensor
Sequence of attention weights from the decoder with shape
`(n_batch, max of mel_specgram_lengths, max of lengths)`.
"""
n_batch, max_length = tokens.shape
if lengths is None:
lengths = torch.tensor([max_length]).expand(n_batch).to(tokens.device, tokens.dtype)
assert lengths is not None # For TorchScript compiler
embedded_inputs = self.embedding(tokens).transpose(1, 2)
encoder_outputs = self.encoder(embedded_inputs, lengths)
mel_specgram, mel_specgram_lengths, _, alignments = self.decoder.infer(
encoder_outputs, lengths
)
mel_outputs_postnet = self.postnet(mel_specgram)
mel_outputs_postnet = mel_specgram + mel_outputs_postnet
alignments = alignments.unfold(1, n_batch, n_batch).transpose(0, 2)
return mel_outputs_postnet, mel_specgram_lengths, alignments
|
"""Implements Conv-TasNet with building blocks of it.
Based on https://github.com/naplab/Conv-TasNet/tree/e66d82a8f956a69749ec8a4ae382217faa097c5c
"""
from typing import Tuple, Optional
import torch
class ConvBlock(torch.nn.Module):
"""1D Convolutional block.
Args:
io_channels (int): The number of input/output channels, <B, Sc>
hidden_channels (int): The number of channels in the internal layers, <H>.
kernel_size (int): The convolution kernel size of the middle layer, <P>.
padding (int): Padding value of the convolution in the middle layer.
dilation (int, optional): Dilation value of the convolution in the middle layer.
no_redisual (bool, optional): Disable residual block/output.
Note:
This implementation corresponds to the "non-causal" setting in the paper.
"""
def __init__(
self,
io_channels: int,
hidden_channels: int,
kernel_size: int,
padding: int,
dilation: int = 1,
no_residual: bool = False,
):
super().__init__()
self.conv_layers = torch.nn.Sequential(
torch.nn.Conv1d(
in_channels=io_channels, out_channels=hidden_channels, kernel_size=1
),
torch.nn.PReLU(),
torch.nn.GroupNorm(num_groups=1, num_channels=hidden_channels, eps=1e-08),
torch.nn.Conv1d(
in_channels=hidden_channels,
out_channels=hidden_channels,
kernel_size=kernel_size,
padding=padding,
dilation=dilation,
groups=hidden_channels,
),
torch.nn.PReLU(),
torch.nn.GroupNorm(num_groups=1, num_channels=hidden_channels, eps=1e-08),
)
self.res_out = (
None
if no_residual
else torch.nn.Conv1d(
in_channels=hidden_channels, out_channels=io_channels, kernel_size=1
)
)
self.skip_out = torch.nn.Conv1d(
in_channels=hidden_channels, out_channels=io_channels, kernel_size=1
)
def forward(
self, input: torch.Tensor
) -> Tuple[Optional[torch.Tensor], torch.Tensor]:
feature = self.conv_layers(input)
if self.res_out is None:
residual = None
else:
residual = self.res_out(feature)
skip_out = self.skip_out(feature)
return residual, skip_out
class MaskGenerator(torch.nn.Module):
"""TCN (Temporal Convolution Network) Separation Module
Generates masks for separation.
Args:
input_dim (int): Input feature dimension, <N>.
num_sources (int): The number of sources to separate.
kernel_size (int): The convolution kernel size of conv blocks, <P>.
num_featrs (int): Input/output feature dimenstion of conv blocks, <B, Sc>.
num_hidden (int): Intermediate feature dimention of conv blocks, <H>
num_layers (int): The number of conv blocks in one stack, <X>.
num_stacks (int): The number of conv block stacks, <R>.
msk_activate (str): The activation function of the mask output.
Note:
This implementation corresponds to the "non-causal" setting in the paper.
"""
def __init__(
self,
input_dim: int,
num_sources: int,
kernel_size: int,
num_feats: int,
num_hidden: int,
num_layers: int,
num_stacks: int,
msk_activate: str,
):
super().__init__()
self.input_dim = input_dim
self.num_sources = num_sources
self.input_norm = torch.nn.GroupNorm(
num_groups=1, num_channels=input_dim, eps=1e-8
)
self.input_conv = torch.nn.Conv1d(
in_channels=input_dim, out_channels=num_feats, kernel_size=1
)
self.receptive_field = 0
self.conv_layers = torch.nn.ModuleList([])
for s in range(num_stacks):
for l in range(num_layers):
multi = 2 ** l
self.conv_layers.append(
ConvBlock(
io_channels=num_feats,
hidden_channels=num_hidden,
kernel_size=kernel_size,
dilation=multi,
padding=multi,
# The last ConvBlock does not need residual
no_residual=(l == (num_layers - 1) and s == (num_stacks - 1)),
)
)
self.receptive_field += (
kernel_size if s == 0 and l == 0 else (kernel_size - 1) * multi
)
self.output_prelu = torch.nn.PReLU()
self.output_conv = torch.nn.Conv1d(
in_channels=num_feats, out_channels=input_dim * num_sources, kernel_size=1,
)
if msk_activate == "sigmoid":
self.mask_activate = torch.nn.Sigmoid()
elif msk_activate == "relu":
self.mask_activate = torch.nn.ReLU()
else:
raise ValueError(f"Unsupported activation {msk_activate}")
def forward(self, input: torch.Tensor) -> torch.Tensor:
"""Generate separation mask.
Args:
input (torch.Tensor): 3D Tensor with shape [batch, features, frames]
Returns:
Tensor: shape [batch, num_sources, features, frames]
"""
batch_size = input.shape[0]
feats = self.input_norm(input)
feats = self.input_conv(feats)
output = 0.0
for layer in self.conv_layers:
residual, skip = layer(feats)
if residual is not None: # the last conv layer does not produce residual
feats = feats + residual
output = output + skip
output = self.output_prelu(output)
output = self.output_conv(output)
output = self.mask_activate(output)
return output.view(batch_size, self.num_sources, self.input_dim, -1)
class ConvTasNet(torch.nn.Module):
"""Conv-TasNet: a fully-convolutional time-domain audio separation network
*Conv-TasNet: Surpassing Ideal Time–Frequency Magnitude Masking for Speech Separation*
[:footcite:`Luo_2019`].
Args:
num_sources (int, optional): The number of sources to split.
enc_kernel_size (int, optional): The convolution kernel size of the encoder/decoder, <L>.
enc_num_feats (int, optional): The feature dimensions passed to mask generator, <N>.
msk_kernel_size (int, optional): The convolution kernel size of the mask generator, <P>.
msk_num_feats (int, optional): The input/output feature dimension of conv block in the mask generator, <B, Sc>.
msk_num_hidden_feats (int, optional): The internal feature dimension of conv block of the mask generator, <H>.
msk_num_layers (int, optional): The number of layers in one conv block of the mask generator, <X>.
msk_num_stacks (int, optional): The numbr of conv blocks of the mask generator, <R>.
msk_activate (str, optional): The activation function of the mask output (Default: ``sigmoid``).
Note:
This implementation corresponds to the "non-causal" setting in the paper.
"""
def __init__(
self,
num_sources: int = 2,
# encoder/decoder parameters
enc_kernel_size: int = 16,
enc_num_feats: int = 512,
# mask generator parameters
msk_kernel_size: int = 3,
msk_num_feats: int = 128,
msk_num_hidden_feats: int = 512,
msk_num_layers: int = 8,
msk_num_stacks: int = 3,
msk_activate: str = "sigmoid",
):
super().__init__()
self.num_sources = num_sources
self.enc_num_feats = enc_num_feats
self.enc_kernel_size = enc_kernel_size
self.enc_stride = enc_kernel_size // 2
self.encoder = torch.nn.Conv1d(
in_channels=1,
out_channels=enc_num_feats,
kernel_size=enc_kernel_size,
stride=self.enc_stride,
padding=self.enc_stride,
bias=False,
)
self.mask_generator = MaskGenerator(
input_dim=enc_num_feats,
num_sources=num_sources,
kernel_size=msk_kernel_size,
num_feats=msk_num_feats,
num_hidden=msk_num_hidden_feats,
num_layers=msk_num_layers,
num_stacks=msk_num_stacks,
msk_activate=msk_activate,
)
self.decoder = torch.nn.ConvTranspose1d(
in_channels=enc_num_feats,
out_channels=1,
kernel_size=enc_kernel_size,
stride=self.enc_stride,
padding=self.enc_stride,
bias=False,
)
def _align_num_frames_with_strides(
self, input: torch.Tensor
) -> Tuple[torch.Tensor, int]:
"""Pad input Tensor so that the end of the input tensor corresponds with
1. (if kernel size is odd) the center of the last convolution kernel
or 2. (if kernel size is even) the end of the first half of the last convolution kernel
Assumption:
The resulting Tensor will be padded with the size of stride (== kernel_width // 2)
on the both ends in Conv1D
|<--- k_1 --->|
| | |<-- k_n-1 -->|
| | | |<--- k_n --->|
| | | | |
| | | | |
| v v v |
|<---->|<--- input signal --->|<--->|<---->|
stride PAD stride
Args:
input (torch.Tensor): 3D Tensor with shape (batch_size, channels==1, frames)
Returns:
Tensor: Padded Tensor
int: Number of paddings performed
"""
batch_size, num_channels, num_frames = input.shape
is_odd = self.enc_kernel_size % 2
num_strides = (num_frames - is_odd) // self.enc_stride
num_remainings = num_frames - (is_odd + num_strides * self.enc_stride)
if num_remainings == 0:
return input, 0
num_paddings = self.enc_stride - num_remainings
pad = torch.zeros(
batch_size,
num_channels,
num_paddings,
dtype=input.dtype,
device=input.device,
)
return torch.cat([input, pad], 2), num_paddings
def forward(self, input: torch.Tensor) -> torch.Tensor:
"""Perform source separation. Generate audio source waveforms.
Args:
input (torch.Tensor): 3D Tensor with shape [batch, channel==1, frames]
Returns:
Tensor: 3D Tensor with shape [batch, channel==num_sources, frames]
"""
if input.ndim != 3 or input.shape[1] != 1:
raise ValueError(
f"Expected 3D tensor (batch, channel==1, frames). Found: {input.shape}"
)
# B: batch size
# L: input frame length
# L': padded input frame length
# F: feature dimension
# M: feature frame length
# S: number of sources
padded, num_pads = self._align_num_frames_with_strides(input) # B, 1, L'
batch_size, num_padded_frames = padded.shape[0], padded.shape[2]
feats = self.encoder(padded) # B, F, M
masked = self.mask_generator(feats) * feats.unsqueeze(1) # B, S, F, M
masked = masked.view(
batch_size * self.num_sources, self.enc_num_feats, -1
) # B*S, F, M
decoded = self.decoder(masked) # B*S, 1, L'
output = decoded.view(
batch_size, self.num_sources, num_padded_frames
) # B, S, L'
if num_pads > 0:
output = output[..., :-num_pads] # B, S, L
return output
|
from .wav2letter import Wav2Letter
from .wavernn import WaveRNN
from .conv_tasnet import ConvTasNet
from .deepspeech import DeepSpeech
from .tacotron2 import Tacotron2
from .wav2vec2 import (
Wav2Vec2Model,
wav2vec2_model,
wav2vec2_base,
wav2vec2_large,
wav2vec2_large_lv60k,
hubert_base,
hubert_large,
hubert_xlarge,
)
__all__ = [
'Wav2Letter',
'WaveRNN',
'ConvTasNet',
'DeepSpeech',
'Wav2Vec2Model',
'wav2vec2_model',
'wav2vec2_base',
'wav2vec2_large',
'wav2vec2_large_lv60k',
'hubert_base',
'hubert_large',
'hubert_xlarge',
'Tacotron2',
]
|
import torch
__all__ = ["DeepSpeech"]
class FullyConnected(torch.nn.Module):
"""
Args:
n_feature: Number of input features
n_hidden: Internal hidden unit size.
"""
def __init__(self,
n_feature: int,
n_hidden: int,
dropout: float,
relu_max_clip: int = 20) -> None:
super(FullyConnected, self).__init__()
self.fc = torch.nn.Linear(n_feature, n_hidden, bias=True)
self.relu_max_clip = relu_max_clip
self.dropout = dropout
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.fc(x)
x = torch.nn.functional.relu(x)
x = torch.nn.functional.hardtanh(x, 0, self.relu_max_clip)
if self.dropout:
x = torch.nn.functional.dropout(x, self.dropout, self.training)
return x
class DeepSpeech(torch.nn.Module):
"""
DeepSpeech model architecture from *Deep Speech: Scaling up end-to-end speech recognition*
[:footcite:`hannun2014deep`].
Args:
n_feature: Number of input features
n_hidden: Internal hidden unit size.
n_class: Number of output classes
"""
def __init__(
self,
n_feature: int,
n_hidden: int = 2048,
n_class: int = 40,
dropout: float = 0.0,
) -> None:
super(DeepSpeech, self).__init__()
self.n_hidden = n_hidden
self.fc1 = FullyConnected(n_feature, n_hidden, dropout)
self.fc2 = FullyConnected(n_hidden, n_hidden, dropout)
self.fc3 = FullyConnected(n_hidden, n_hidden, dropout)
self.bi_rnn = torch.nn.RNN(
n_hidden, n_hidden, num_layers=1, nonlinearity="relu", bidirectional=True
)
self.fc4 = FullyConnected(n_hidden, n_hidden, dropout)
self.out = torch.nn.Linear(n_hidden, n_class)
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""
Args:
x (torch.Tensor): Tensor of dimension (batch, channel, time, feature).
Returns:
Tensor: Predictor tensor of dimension (batch, time, class).
"""
# N x C x T x F
x = self.fc1(x)
# N x C x T x H
x = self.fc2(x)
# N x C x T x H
x = self.fc3(x)
# N x C x T x H
x = x.squeeze(1)
# N x T x H
x = x.transpose(0, 1)
# T x N x H
x, _ = self.bi_rnn(x)
# The fifth (non-recurrent) layer takes both the forward and backward units as inputs
x = x[:, :, :self.n_hidden] + x[:, :, self.n_hidden:]
# T x N x H
x = self.fc4(x)
# T x N x H
x = self.out(x)
# T x N x n_class
x = x.permute(1, 0, 2)
# N x T x n_class
x = torch.nn.functional.log_softmax(x, dim=2)
# N x T x n_class
return x
|
from typing import List, Tuple, Optional
import math
import torch
from torch import Tensor
from torch import nn
import torch.nn.functional as F
__all__ = [
"ResBlock",
"MelResNet",
"Stretch2d",
"UpsampleNetwork",
"WaveRNN",
]
class ResBlock(nn.Module):
r"""ResNet block based on *Efficient Neural Audio Synthesis* [:footcite:`kalchbrenner2018efficient`].
Args:
n_freq: the number of bins in a spectrogram. (Default: ``128``)
Examples
>>> resblock = ResBlock()
>>> input = torch.rand(10, 128, 512) # a random spectrogram
>>> output = resblock(input) # shape: (10, 128, 512)
"""
def __init__(self, n_freq: int = 128) -> None:
super().__init__()
self.resblock_model = nn.Sequential(
nn.Conv1d(in_channels=n_freq, out_channels=n_freq, kernel_size=1, bias=False),
nn.BatchNorm1d(n_freq),
nn.ReLU(inplace=True),
nn.Conv1d(in_channels=n_freq, out_channels=n_freq, kernel_size=1, bias=False),
nn.BatchNorm1d(n_freq)
)
def forward(self, specgram: Tensor) -> Tensor:
r"""Pass the input through the ResBlock layer.
Args:
specgram (Tensor): the input sequence to the ResBlock layer (n_batch, n_freq, n_time).
Return:
Tensor shape: (n_batch, n_freq, n_time)
"""
return self.resblock_model(specgram) + specgram
class MelResNet(nn.Module):
r"""MelResNet layer uses a stack of ResBlocks on spectrogram.
Args:
n_res_block: the number of ResBlock in stack. (Default: ``10``)
n_freq: the number of bins in a spectrogram. (Default: ``128``)
n_hidden: the number of hidden dimensions of resblock. (Default: ``128``)
n_output: the number of output dimensions of melresnet. (Default: ``128``)
kernel_size: the number of kernel size in the first Conv1d layer. (Default: ``5``)
Examples
>>> melresnet = MelResNet()
>>> input = torch.rand(10, 128, 512) # a random spectrogram
>>> output = melresnet(input) # shape: (10, 128, 508)
"""
def __init__(self,
n_res_block: int = 10,
n_freq: int = 128,
n_hidden: int = 128,
n_output: int = 128,
kernel_size: int = 5) -> None:
super().__init__()
ResBlocks = [ResBlock(n_hidden) for _ in range(n_res_block)]
self.melresnet_model = nn.Sequential(
nn.Conv1d(in_channels=n_freq, out_channels=n_hidden, kernel_size=kernel_size, bias=False),
nn.BatchNorm1d(n_hidden),
nn.ReLU(inplace=True),
*ResBlocks,
nn.Conv1d(in_channels=n_hidden, out_channels=n_output, kernel_size=1)
)
def forward(self, specgram: Tensor) -> Tensor:
r"""Pass the input through the MelResNet layer.
Args:
specgram (Tensor): the input sequence to the MelResNet layer (n_batch, n_freq, n_time).
Return:
Tensor shape: (n_batch, n_output, n_time - kernel_size + 1)
"""
return self.melresnet_model(specgram)
class Stretch2d(nn.Module):
r"""Upscale the frequency and time dimensions of a spectrogram.
Args:
time_scale: the scale factor in time dimension
freq_scale: the scale factor in frequency dimension
Examples
>>> stretch2d = Stretch2d(time_scale=10, freq_scale=5)
>>> input = torch.rand(10, 100, 512) # a random spectrogram
>>> output = stretch2d(input) # shape: (10, 500, 5120)
"""
def __init__(self,
time_scale: int,
freq_scale: int) -> None:
super().__init__()
self.freq_scale = freq_scale
self.time_scale = time_scale
def forward(self, specgram: Tensor) -> Tensor:
r"""Pass the input through the Stretch2d layer.
Args:
specgram (Tensor): the input sequence to the Stretch2d layer (..., n_freq, n_time).
Return:
Tensor shape: (..., n_freq * freq_scale, n_time * time_scale)
"""
return specgram.repeat_interleave(self.freq_scale, -2).repeat_interleave(self.time_scale, -1)
class UpsampleNetwork(nn.Module):
r"""Upscale the dimensions of a spectrogram.
Args:
upsample_scales: the list of upsample scales.
n_res_block: the number of ResBlock in stack. (Default: ``10``)
n_freq: the number of bins in a spectrogram. (Default: ``128``)
n_hidden: the number of hidden dimensions of resblock. (Default: ``128``)
n_output: the number of output dimensions of melresnet. (Default: ``128``)
kernel_size: the number of kernel size in the first Conv1d layer. (Default: ``5``)
Examples
>>> upsamplenetwork = UpsampleNetwork(upsample_scales=[4, 4, 16])
>>> input = torch.rand(10, 128, 10) # a random spectrogram
>>> output = upsamplenetwork(input) # shape: (10, 1536, 128), (10, 1536, 128)
"""
def __init__(self,
upsample_scales: List[int],
n_res_block: int = 10,
n_freq: int = 128,
n_hidden: int = 128,
n_output: int = 128,
kernel_size: int = 5) -> None:
super().__init__()
total_scale = 1
for upsample_scale in upsample_scales:
total_scale *= upsample_scale
self.total_scale: int = total_scale
self.indent = (kernel_size - 1) // 2 * total_scale
self.resnet = MelResNet(n_res_block, n_freq, n_hidden, n_output, kernel_size)
self.resnet_stretch = Stretch2d(total_scale, 1)
up_layers = []
for scale in upsample_scales:
stretch = Stretch2d(scale, 1)
conv = nn.Conv2d(in_channels=1,
out_channels=1,
kernel_size=(1, scale * 2 + 1),
padding=(0, scale),
bias=False)
conv.weight.data.fill_(1. / (scale * 2 + 1))
up_layers.append(stretch)
up_layers.append(conv)
self.upsample_layers = nn.Sequential(*up_layers)
def forward(self, specgram: Tensor) -> Tuple[Tensor, Tensor]:
r"""Pass the input through the UpsampleNetwork layer.
Args:
specgram (Tensor): the input sequence to the UpsampleNetwork layer (n_batch, n_freq, n_time)
Return:
Tensor shape: (n_batch, n_freq, (n_time - kernel_size + 1) * total_scale),
(n_batch, n_output, (n_time - kernel_size + 1) * total_scale)
where total_scale is the product of all elements in upsample_scales.
"""
resnet_output = self.resnet(specgram).unsqueeze(1)
resnet_output = self.resnet_stretch(resnet_output)
resnet_output = resnet_output.squeeze(1)
specgram = specgram.unsqueeze(1)
upsampling_output = self.upsample_layers(specgram)
upsampling_output = upsampling_output.squeeze(1)[:, :, self.indent:-self.indent]
return upsampling_output, resnet_output
class WaveRNN(nn.Module):
r"""WaveRNN model based on the implementation from `fatchord <https://github.com/fatchord/WaveRNN>`_.
The original implementation was introduced in *Efficient Neural Audio Synthesis*
[:footcite:`kalchbrenner2018efficient`]. The input channels of waveform and spectrogram have to be 1.
The product of `upsample_scales` must equal `hop_length`.
Args:
upsample_scales: the list of upsample scales.
n_classes: the number of output classes.
hop_length: the number of samples between the starts of consecutive frames.
n_res_block: the number of ResBlock in stack. (Default: ``10``)
n_rnn: the dimension of RNN layer. (Default: ``512``)
n_fc: the dimension of fully connected layer. (Default: ``512``)
kernel_size: the number of kernel size in the first Conv1d layer. (Default: ``5``)
n_freq: the number of bins in a spectrogram. (Default: ``128``)
n_hidden: the number of hidden dimensions of resblock. (Default: ``128``)
n_output: the number of output dimensions of melresnet. (Default: ``128``)
Example
>>> wavernn = WaveRNN(upsample_scales=[5,5,8], n_classes=512, hop_length=200)
>>> waveform, sample_rate = torchaudio.load(file)
>>> # waveform shape: (n_batch, n_channel, (n_time - kernel_size + 1) * hop_length)
>>> specgram = MelSpectrogram(sample_rate)(waveform) # shape: (n_batch, n_channel, n_freq, n_time)
>>> output = wavernn(waveform, specgram)
>>> # output shape: (n_batch, n_channel, (n_time - kernel_size + 1) * hop_length, n_classes)
"""
def __init__(self,
upsample_scales: List[int],
n_classes: int,
hop_length: int,
n_res_block: int = 10,
n_rnn: int = 512,
n_fc: int = 512,
kernel_size: int = 5,
n_freq: int = 128,
n_hidden: int = 128,
n_output: int = 128) -> None:
super().__init__()
self.kernel_size = kernel_size
self._pad = (kernel_size - 1 if kernel_size % 2 else kernel_size) // 2
self.n_rnn = n_rnn
self.n_aux = n_output // 4
self.hop_length = hop_length
self.n_classes = n_classes
self.n_bits: int = int(math.log2(self.n_classes))
total_scale = 1
for upsample_scale in upsample_scales:
total_scale *= upsample_scale
if total_scale != self.hop_length:
raise ValueError(f"Expected: total_scale == hop_length, but found {total_scale} != {hop_length}")
self.upsample = UpsampleNetwork(upsample_scales,
n_res_block,
n_freq,
n_hidden,
n_output,
kernel_size)
self.fc = nn.Linear(n_freq + self.n_aux + 1, n_rnn)
self.rnn1 = nn.GRU(n_rnn, n_rnn, batch_first=True)
self.rnn2 = nn.GRU(n_rnn + self.n_aux, n_rnn, batch_first=True)
self.relu1 = nn.ReLU(inplace=True)
self.relu2 = nn.ReLU(inplace=True)
self.fc1 = nn.Linear(n_rnn + self.n_aux, n_fc)
self.fc2 = nn.Linear(n_fc + self.n_aux, n_fc)
self.fc3 = nn.Linear(n_fc, self.n_classes)
def forward(self, waveform: Tensor, specgram: Tensor) -> Tensor:
r"""Pass the input through the WaveRNN model.
Args:
waveform: the input waveform to the WaveRNN layer (n_batch, 1, (n_time - kernel_size + 1) * hop_length)
specgram: the input spectrogram to the WaveRNN layer (n_batch, 1, n_freq, n_time)
Return:
Tensor: shape (n_batch, 1, (n_time - kernel_size + 1) * hop_length, n_classes)
"""
assert waveform.size(1) == 1, 'Require the input channel of waveform is 1'
assert specgram.size(1) == 1, 'Require the input channel of specgram is 1'
# remove channel dimension until the end
waveform, specgram = waveform.squeeze(1), specgram.squeeze(1)
batch_size = waveform.size(0)
h1 = torch.zeros(1, batch_size, self.n_rnn, dtype=waveform.dtype, device=waveform.device)
h2 = torch.zeros(1, batch_size, self.n_rnn, dtype=waveform.dtype, device=waveform.device)
# output of upsample:
# specgram: (n_batch, n_freq, (n_time - kernel_size + 1) * total_scale)
# aux: (n_batch, n_output, (n_time - kernel_size + 1) * total_scale)
specgram, aux = self.upsample(specgram)
specgram = specgram.transpose(1, 2)
aux = aux.transpose(1, 2)
aux_idx = [self.n_aux * i for i in range(5)]
a1 = aux[:, :, aux_idx[0]:aux_idx[1]]
a2 = aux[:, :, aux_idx[1]:aux_idx[2]]
a3 = aux[:, :, aux_idx[2]:aux_idx[3]]
a4 = aux[:, :, aux_idx[3]:aux_idx[4]]
x = torch.cat([waveform.unsqueeze(-1), specgram, a1], dim=-1)
x = self.fc(x)
res = x
x, _ = self.rnn1(x, h1)
x = x + res
res = x
x = torch.cat([x, a2], dim=-1)
x, _ = self.rnn2(x, h2)
x = x + res
x = torch.cat([x, a3], dim=-1)
x = self.fc1(x)
x = self.relu1(x)
x = torch.cat([x, a4], dim=-1)
x = self.fc2(x)
x = self.relu2(x)
x = self.fc3(x)
# bring back channel dimension
return x.unsqueeze(1)
@torch.jit.export
def infer(self, specgram: Tensor, lengths: Optional[Tensor] = None) -> Tuple[Tensor, Optional[Tensor]]:
r"""Inference method of WaveRNN.
This function currently only supports multinomial sampling, which assumes the
network is trained on cross entropy loss.
Args:
specgram (Tensor):
Batch of spectrograms. Shape: `(n_batch, n_freq, n_time)`.
lengths (Tensor or None, optional):
Indicates the valid length of each audio in the batch.
Shape: `(batch, )`.
When the ``specgram`` contains spectrograms with different durations,
by providing ``lengths`` argument, the model will compute
the corresponding valid output lengths.
If ``None``, it is assumed that all the audio in ``waveforms``
have valid length. Default: ``None``.
Returns:
(Tensor, Optional[Tensor]):
Tensor
The inferred waveform of size `(n_batch, 1, n_time)`.
1 stands for a single channel.
Tensor or None
If ``lengths`` argument was provided, a Tensor of shape `(batch, )`
is returned.
It indicates the valid length in time axis of the output Tensor.
"""
device = specgram.device
dtype = specgram.dtype
specgram = torch.nn.functional.pad(specgram, (self._pad, self._pad))
specgram, aux = self.upsample(specgram)
if lengths is not None:
lengths = lengths * self.upsample.total_scale
output: List[Tensor] = []
b_size, _, seq_len = specgram.size()
h1 = torch.zeros((1, b_size, self.n_rnn), device=device, dtype=dtype)
h2 = torch.zeros((1, b_size, self.n_rnn), device=device, dtype=dtype)
x = torch.zeros((b_size, 1), device=device, dtype=dtype)
aux_split = [aux[:, self.n_aux * i: self.n_aux * (i + 1), :] for i in range(4)]
for i in range(seq_len):
m_t = specgram[:, :, i]
a1_t, a2_t, a3_t, a4_t = [a[:, :, i] for a in aux_split]
x = torch.cat([x, m_t, a1_t], dim=1)
x = self.fc(x)
_, h1 = self.rnn1(x.unsqueeze(1), h1)
x = x + h1[0]
inp = torch.cat([x, a2_t], dim=1)
_, h2 = self.rnn2(inp.unsqueeze(1), h2)
x = x + h2[0]
x = torch.cat([x, a3_t], dim=1)
x = F.relu(self.fc1(x))
x = torch.cat([x, a4_t], dim=1)
x = F.relu(self.fc2(x))
logits = self.fc3(x)
posterior = F.softmax(logits, dim=1)
x = torch.multinomial(posterior, 1).float()
# Transform label [0, 2 ** n_bits - 1] to waveform [-1, 1]
x = 2 * x / (2 ** self.n_bits - 1.0) - 1.0
output.append(x)
return torch.stack(output).permute(1, 2, 0), lengths
|
from .model import (
Wav2Vec2Model,
wav2vec2_model,
wav2vec2_base,
wav2vec2_large,
wav2vec2_large_lv60k,
hubert_base,
hubert_large,
hubert_xlarge,
)
from . import utils
__all__ = [
'Wav2Vec2Model',
'wav2vec2_model',
'wav2vec2_base',
'wav2vec2_large',
'wav2vec2_large_lv60k',
'hubert_base',
'hubert_large',
'hubert_xlarge',
'utils',
]
|
from typing import Optional, Tuple, List
import torch
from torch import Tensor
from torch.nn import Module
from . import components
class Wav2Vec2Model(Module):
"""torchaudio.models.Wav2Vec2Model(feature_extractor: torch.nn.Module, encoder: torch.nn.Module, aux: Optional[torch.nn.Module] = None)
Encoder model used in *wav2vec 2.0* [:footcite:`baevski2020wav2vec`].
Note:
To build the model, please use one of the factory functions.
Args:
feature_extractor (torch.nn.Module):
Feature extractor that extracts feature vectors from raw audio Tensor.
encoder (torch.nn.Module):
Encoder that converts the audio features into the sequence of probability
distribution (in negative log-likelihood) over labels.
aux (torch.nn.Module or None, optional):
Auxiliary module. If provided, the output from encoder is passed to this module.
""" # noqa: E501
def __init__(
self,
feature_extractor: Module,
encoder: Module,
aux: Optional[Module] = None,
):
super().__init__()
self.feature_extractor = feature_extractor
self.encoder = encoder
self.aux = aux
@torch.jit.export
def extract_features(
self,
waveforms: Tensor,
lengths: Optional[Tensor] = None,
num_layers: Optional[int] = None,
) -> Tuple[List[Tensor], Optional[Tensor]]:
"""Extract feature vectors from raw waveforms
This returns the list of outputs from the intermediate layers of
transformer block in encoder.
Args:
waveforms (Tensor): Audio tensor of shape `(batch, frames)`.
lengths (Tensor or None, optional):
Indicates the valid length of each audio in the batch.
Shape: `(batch, )`.
When the ``waveforms`` contains audios with different durations,
by providing ``lengths`` argument, the model will compute
the corresponding valid output lengths and apply proper mask in
transformer attention layer.
If ``None``, it is assumed that the entire audio waveform
length is valid.
num_layers (int or None, optional):
If given, limit the number of intermediate layers to go through.
Providing `1` will stop the computation after going through one
intermediate layers. If not given, the outputs from all the
intermediate layers are returned.
Returns:
(List[Tensor], Optional[Tensor]):
List of Tensors
Features from requested layers.
Each Tensor is of shape: `(batch, time frame, feature dimension)`
Tensor or None
If ``lengths`` argument was provided, a Tensor of shape `(batch, )`
is returned.
It indicates the valid length in time axis of each feature Tensor.
"""
x, lengths = self.feature_extractor(waveforms, lengths)
x = self.encoder.extract_features(x, lengths, num_layers)
return x, lengths
def forward(
self,
waveforms: Tensor,
lengths: Optional[Tensor] = None,
) -> Tuple[Tensor, Optional[Tensor]]:
"""Compute the sequence of probability distribution over labels.
Args:
waveforms (Tensor): Audio tensor of shape `(batch, frames)`.
lengths (Tensor or None, optional):
Indicates the valid length of each audio in the batch.
Shape: `(batch, )`.
When the ``waveforms`` contains audios with different durations,
by providing ``lengths`` argument, the model will compute
the corresponding valid output lengths and apply proper mask in
transformer attention layer.
If ``None``, it is assumed that all the audio in ``waveforms``
have valid length. Default: ``None``.
Returns:
(Tensor, Optional[Tensor]):
Tensor
The sequences of probability distribution (in logit) over labels.
Shape: `(batch, frames, num labels)`.
Tensor or None
If ``lengths`` argument was provided, a Tensor of shape `(batch, )`
is returned.
It indicates the valid length in time axis of the output Tensor.
"""
x, lengths = self.feature_extractor(waveforms, lengths)
x = self.encoder(x, lengths)
if self.aux is not None:
x = self.aux(x)
return x, lengths
def wav2vec2_model(
extractor_mode: str,
extractor_conv_layer_config: Optional[List[Tuple[int, int, int]]],
extractor_conv_bias: bool,
encoder_embed_dim: int,
encoder_projection_dropout: float,
encoder_pos_conv_kernel: int,
encoder_pos_conv_groups: int,
encoder_num_layers: int,
encoder_num_heads: int,
encoder_attention_dropout: float,
encoder_ff_interm_features: int,
encoder_ff_interm_dropout: float,
encoder_dropout: float,
encoder_layer_norm_first: bool,
encoder_layer_drop: float,
aux_num_out: Optional[int],
) -> Wav2Vec2Model:
# Overriding the signature so that the return type is correct on Sphinx
"""wav2vec2_model(extractor_mode: str, extractor_conv_layer_config: Optional[List[Tuple[int, int, int]]], extractor_conv_bias: bool, encoder_embed_dim: int, encoder_projection_dropout: float, encoder_pos_conv_kernel: int, encoder_pos_conv_groups: int, encoder_num_layers: int, encoder_num_heads: int, encoder_attention_dropout: float, encoder_ff_interm_features: int, encoder_ff_interm_dropout: float, encoder_dropout: float, encoder_layer_norm_first: bool, encoder_layer_drop: float, aux_num_out: Optional[int]) -> torchaudio.models.Wav2Vec2Model
Build a custom Wav2Vec2Model
Note:
The "feature extractor" below corresponds to
`ConvFeatureExtractionModel <https://github.com/pytorch/fairseq/blob/dd3bd3c0497ae9a7ae7364404a6b0a4c501780b3/fairseq/models/wav2vec/wav2vec2.py#L736>`__
in the original ``fairseq`` implementation.
This is referred as "(convolutional) feature encoder" in the *wav2vec 2.0*
[:footcite:`baevski2020wav2vec`] paper.
The "encoder" below corresponds to `TransformerEncoder <https://github.com/pytorch/fairseq/blob/dd3bd3c0497ae9a7ae7364404a6b0a4c501780b3/fairseq/models/wav2vec/wav2vec2.py#L817>`__,
and this is referred as "Transformer" in the paper.
Args:
extractor_mode (str): Operation mode of feature extractor.
Valid values are ``"group_norm"`` or ``"layer_norm"``.
If ``"group_norm"``, then a single normalization is applied
in the first convolution block. Otherwise, all the convolution
blocks will have layer normalization.
This option corresponds to ``extractor_mode`` from ``fairseq``.
extractor_conv_layer_config (list of integer tuples or None):
Configuration of convolution layers in feature extractor.
List of convolution configuration,
i.e. ``[(output_channel, kernel_size, stride), ...]``
If ``None`` is provided, then the following default value is used.
.. code-block:: python
[
(512, 10, 5),
(512, 3, 2),
(512, 3, 2),
(512, 3, 2),
(512, 3, 2),
(512, 2, 2),
(512, 2, 2),
]
This option corresponds to ``conv_feature_layers`` from ``fairseq``.
extractor_conv_bias (bool):
Whether to include bias term to each convolution operation.
This option corresponds to ``conv_bias`` from ``fairseq``.
encoder_embed_dim (int):
The dimension of embedding in encoder.
This option corresponds to ``encoder_embed_dim`` from ``fairseq``.
encoder_projection_dropout (float):
The dropout probability applied after the input feature is projected
to ``encoder_embed_dim``.
This option corresponds to ``dropout_input`` from ``fairseq``.
encoder_pos_conv_kernel (int):
The kernel size of convolutional positional embeddings.
This option corresponds to ``conv_pos`` from ``fairseq``.
encoder_pos_conv_groups (int):
The number of groups of convolutional positional embeddings.
This option corresponds to ``conv_pos_groups`` from ``fairseq``.
encoder_num_layers (int):
The number of self attention layers in transformer block.
This option corresponds to ``encoder_layers`` from ``fairseq``.
encoder_num_heads (int):
The number of heads in self attention layers.
This option corresponds to ``encoder_attention_heads`` from ``fairseq``.
encoder_attention_dropout (float):
The dropout probability applied after softmax in self-attention layer.
This option corresponds to ``attention_dropout`` from ``fairseq``.
encoder_ff_interm_features (int):
The dimension of hidden features in feed forward layer.
This option corresponds to ``encoder_ffn_embed_dim`` from ``fairseq``.
encoder_ff_interm_dropout (float):
The dropout probability applied in feedforward layer.
This option correspinds to ``activation_dropout`` from ``fairseq``.
encoder_dropout (float):
The dropout probability applied at the end of feed forward layer.
This option corresponds to ``dropout`` from ``fairseq``.
encoder_layer_norm_first (bool):
Control the order of layer norm in transformer layer and each encoder layer.
If True, in transformer layer, layer norm is applied before features are fed
to encoder layers. In encoder layer, two layer norms are applied before and after
self attention.
If False, in transformer layer, layer norm is applied after features are fed
to encoder layers. In encoder layer, two layer norms are applied after self
attention, before and after feed forward.
This option corresponds to ``layer_norm_first`` from ``fairseq``.
encoder_layer_drop (float):
Probability to drop each encoder layer during training.
This option corresponds to ``layerdrop`` from ``fairseq``.
aux_num_out (int or None):
When provided, attach an extra linear layer on top of encoder, which can be
used for fine-tuning.
Returns:
Wav2Vec2Model:
The resulting model.
""" # noqa: E501
if extractor_conv_layer_config is None:
extractor_conv_layer_config = [(512, 10, 5)] + [(512, 3, 2)] * 4 + [(512, 2, 2)] * 2
feature_extractor = components._get_feature_extractor(
extractor_mode, extractor_conv_layer_config, extractor_conv_bias)
encoder = components._get_encoder(
in_features=extractor_conv_layer_config[-1][0],
embed_dim=encoder_embed_dim,
dropout_input=encoder_projection_dropout,
pos_conv_kernel=encoder_pos_conv_kernel,
pos_conv_groups=encoder_pos_conv_groups,
num_layers=encoder_num_layers,
num_heads=encoder_num_heads,
attention_dropout=encoder_attention_dropout,
ff_interm_features=encoder_ff_interm_features,
ff_interm_dropout=encoder_ff_interm_dropout,
dropout=encoder_dropout,
layer_norm_first=encoder_layer_norm_first,
layer_drop=encoder_layer_drop,
)
aux = None
if aux_num_out is not None:
aux = torch.nn.Linear(in_features=encoder_embed_dim, out_features=aux_num_out)
return Wav2Vec2Model(feature_extractor, encoder, aux)
def wav2vec2_base(
encoder_projection_dropout: float = 0.1,
encoder_attention_dropout: float = 0.1,
encoder_ff_interm_dropout: float = 0.1,
encoder_dropout: float = 0.1,
encoder_layer_drop: float = 0.1,
aux_num_out: Optional[int] = None,
) -> Wav2Vec2Model:
# Overriding the signature so that the return type is correct on Sphinx
"""wav2vec2_base(encoder_projection_dropout: float = 0.1, encoder_attention_dropout: float = 0.1, encoder_ff_interm_dropout: float = 0.1, encoder_dropout: float = 0.1, encoder_layer_drop: float = 0.1, aux_num_out: Optional[int] = None) -> torchaudio.models.Wav2Vec2Model
Build Wav2Vec2Model with "base" architecture from *wav2vec 2.0* [:footcite:`baevski2020wav2vec`]
Args:
encoder_projection_dropout (float):
See :py:func:`wav2vec2_model`.
encoder_attention_dropout (float):
See :py:func:`wav2vec2_model`.
encoder_ff_interm_dropout (float):
See :py:func:`wav2vec2_model`.
encoder_dropout (float):
See :py:func:`wav2vec2_model`.
encoder_layer_drop (float):
See :py:func:`wav2vec2_model`.
aux_num_out (int or None, optional):
See :py:func:`wav2vec2_model`.
Returns:
Wav2Vec2Model:
The resulting model.
""" # noqa: E501
return wav2vec2_model(
extractor_mode="group_norm",
extractor_conv_layer_config=None,
extractor_conv_bias=False,
encoder_embed_dim=768,
encoder_projection_dropout=encoder_projection_dropout,
encoder_pos_conv_kernel=128,
encoder_pos_conv_groups=16,
encoder_num_layers=12,
encoder_num_heads=12,
encoder_attention_dropout=encoder_attention_dropout,
encoder_ff_interm_features=3072,
encoder_ff_interm_dropout=encoder_ff_interm_dropout,
encoder_dropout=encoder_dropout,
encoder_layer_norm_first=False,
encoder_layer_drop=encoder_layer_drop,
aux_num_out=aux_num_out,
)
def wav2vec2_large(
encoder_projection_dropout: float = 0.1,
encoder_attention_dropout: float = 0.1,
encoder_ff_interm_dropout: float = 0.1,
encoder_dropout: float = 0.1,
encoder_layer_drop: float = 0.1,
aux_num_out: Optional[int] = None,
) -> Wav2Vec2Model:
# Overriding the signature so that the return type is correct on Sphinx
"""wav2vec2_large(encoder_projection_dropout: float = 0.1, encoder_attention_dropout: float = 0.1, encoder_ff_interm_dropout: float = 0.1, encoder_dropout: float = 0.1, encoder_layer_drop: float = 0.1, aux_num_out: Optional[int] = None) -> torchaudio.models.Wav2Vec2Model
Build Wav2Vec2Model with "large" architecture from *wav2vec 2.0* [:footcite:`baevski2020wav2vec`]
Args:
encoder_projection_dropout (float):
See :py:func:`wav2vec2_model`.
encoder_attention_dropout (float):
See :py:func:`wav2vec2_model`.
encoder_ff_interm_dropout (float):
See :py:func:`wav2vec2_model`.
encoder_dropout (float):
See :py:func:`wav2vec2_model`.
encoder_layer_drop (float):
See :py:func:`wav2vec2_model`.
aux_num_out (int or None, optional):
See :py:func:`wav2vec2_model`.
Returns:
Wav2Vec2Model:
The resulting model.
""" # noqa: E501
return wav2vec2_model(
extractor_mode="group_norm",
extractor_conv_layer_config=None,
extractor_conv_bias=False,
encoder_embed_dim=1024,
encoder_projection_dropout=encoder_projection_dropout,
encoder_pos_conv_kernel=128,
encoder_pos_conv_groups=16,
encoder_num_layers=24,
encoder_num_heads=16,
encoder_attention_dropout=encoder_attention_dropout,
encoder_ff_interm_features=4096,
encoder_ff_interm_dropout=encoder_ff_interm_dropout,
encoder_dropout=encoder_dropout,
encoder_layer_norm_first=False,
encoder_layer_drop=encoder_layer_drop,
aux_num_out=aux_num_out,
)
def wav2vec2_large_lv60k(
encoder_projection_dropout: float = 0.1,
encoder_attention_dropout: float = 0.0,
encoder_ff_interm_dropout: float = 0.1,
encoder_dropout: float = 0.0,
encoder_layer_drop: float = 0.1,
aux_num_out: Optional[int] = None,
) -> Wav2Vec2Model:
# Overriding the signature so that the return type is correct on Sphinx
"""wav2vec2_large_lv60k( encoder_projection_dropout: float = 0.1, encoder_attention_dropout: float = 0.0, encoder_ff_interm_dropout: float = 0.1, encoder_dropout: float = 0.0, encoder_layer_drop: float = 0.1, aux_num_out: Optional[int] = None) -> torchaudio.models.Wav2Vec2Model
Build Wav2Vec2Model with "large lv-60k" architecture from *wav2vec 2.0* [:footcite:`baevski2020wav2vec`]
Args:
encoder_projection_dropout (float):
See :py:func:`wav2vec2_model`.
encoder_attention_dropout (float):
See :py:func:`wav2vec2_model`.
encoder_ff_interm_dropout (float):
See :py:func:`wav2vec2_model`.
encoder_dropout (float):
See :py:func:`wav2vec2_model`.
encoder_layer_drop (float):
See :py:func:`wav2vec2_model`.
aux_num_out (int or None, optional):
See :py:func:`wav2vec2_model`.
Returns:
Wav2Vec2Model:
The resulting model.
""" # noqa: E501
return wav2vec2_model(
extractor_mode="layer_norm",
extractor_conv_layer_config=None,
extractor_conv_bias=True,
encoder_embed_dim=1024,
encoder_projection_dropout=encoder_projection_dropout,
encoder_pos_conv_kernel=128,
encoder_pos_conv_groups=16,
encoder_num_layers=24,
encoder_num_heads=16,
encoder_attention_dropout=encoder_attention_dropout,
encoder_ff_interm_features=4096,
encoder_ff_interm_dropout=encoder_ff_interm_dropout,
encoder_dropout=encoder_dropout,
encoder_layer_norm_first=True,
encoder_layer_drop=encoder_layer_drop,
aux_num_out=aux_num_out,
)
def hubert_base(
encoder_projection_dropout: float = 0.1,
encoder_attention_dropout: float = 0.1,
encoder_ff_interm_dropout: float = 0.0,
encoder_dropout: float = 0.1,
encoder_layer_drop: float = 0.05,
aux_num_out: Optional[int] = None,
) -> Wav2Vec2Model:
# Overriding the signature so that the return type is correct on Sphinx
"""hubert_base(encoder_projection_dropout: float = 0.1, encoder_attention_dropout: float = 0.1, encoder_ff_interm_dropout: float = 0.0, encoder_dropout: float = 0.1, encoder_layer_drop: float = 0.05, aux_num_out: Optional[int] = None) -> torchaudio.models.Wav2Vec2Model
Build HuBERT model with "base" architecture from *HuBERT* [:footcite:`hsu2021hubert`]
Args:
encoder_projection_dropout (float):
See :py:func:`wav2vec2_model`.
encoder_attention_dropout (float):
See :py:func:`wav2vec2_model`.
encoder_ff_interm_dropout (float):
See :py:func:`wav2vec2_model`.
encoder_dropout (float):
See :py:func:`wav2vec2_model`.
encoder_layer_drop (float):
See :py:func:`wav2vec2_model`.
aux_num_out (int or None, optional):
See :py:func:`wav2vec2_model`.
Returns:
Wav2Vec2Model:
The resulting model.
""" # noqa: E501
return wav2vec2_model(
extractor_mode='group_norm',
extractor_conv_layer_config=None,
extractor_conv_bias=False,
encoder_embed_dim=768,
encoder_projection_dropout=encoder_projection_dropout,
encoder_pos_conv_kernel=128,
encoder_pos_conv_groups=16,
encoder_num_layers=12,
encoder_num_heads=12,
encoder_attention_dropout=encoder_attention_dropout,
encoder_ff_interm_features=3072,
encoder_ff_interm_dropout=encoder_ff_interm_dropout,
encoder_dropout=encoder_dropout,
encoder_layer_norm_first=False,
encoder_layer_drop=encoder_layer_drop,
aux_num_out=aux_num_out,
)
def hubert_large(
encoder_projection_dropout: float = 0.0,
encoder_attention_dropout: float = 0.0,
encoder_ff_interm_dropout: float = 0.0,
encoder_dropout: float = 0.0,
encoder_layer_drop: float = 0.0,
aux_num_out: Optional[int] = None,
) -> Wav2Vec2Model:
# Overriding the signature so that the return type is correct on Sphinx
"""hubert_large(encoder_projection_dropout: float = 0.0, encoder_attention_dropout: float = 0.0, encoder_ff_interm_dropout: float = 0.0, encoder_dropout: float = 0.0, encoder_layer_drop: float = 0.0, aux_num_out: Optional[int] = None) -> torchaudio.models.Wav2Vec2Model
Build HuBERT model with "large" architecture from *HuBERT* [:footcite:`hsu2021hubert`]
Args:
encoder_projection_dropout (float):
See :py:func:`wav2vec2_model`.
encoder_attention_dropout (float):
See :py:func:`wav2vec2_model`.
encoder_ff_interm_dropout (float):
See :py:func:`wav2vec2_model`.
encoder_dropout (float):
See :py:func:`wav2vec2_model`.
encoder_layer_drop (float):
See :py:func:`wav2vec2_model`.
aux_num_out (int or None, optional):
See :py:func:`wav2vec2_model`.
Returns:
Wav2Vec2Model:
The resulting model.
""" # noqa: E501
return wav2vec2_model(
extractor_mode='layer_norm',
extractor_conv_layer_config=None,
extractor_conv_bias=False,
encoder_embed_dim=1024,
encoder_projection_dropout=encoder_projection_dropout,
encoder_pos_conv_kernel=128,
encoder_pos_conv_groups=16,
encoder_num_layers=24,
encoder_num_heads=16,
encoder_attention_dropout=encoder_attention_dropout,
encoder_ff_interm_features=4096,
encoder_ff_interm_dropout=encoder_ff_interm_dropout,
encoder_dropout=encoder_dropout,
encoder_layer_norm_first=True,
encoder_layer_drop=encoder_layer_drop,
aux_num_out=aux_num_out,
)
def hubert_xlarge(
encoder_projection_dropout: float = 0.0,
encoder_attention_dropout: float = 0.0,
encoder_ff_interm_dropout: float = 0.0,
encoder_dropout: float = 0.0,
encoder_layer_drop: float = 0.0,
aux_num_out: Optional[int] = None,
) -> Wav2Vec2Model:
# Overriding the signature so that the return type is correct on Sphinx
"""hubert_xlarge(encoder_projection_dropout: float = 0.0, encoder_attention_dropout: float = 0.0, encoder_ff_interm_dropout: float = 0.0, encoder_dropout: float = 0.0, encoder_layer_drop: float = 0.0, aux_num_out: Optional[int] = None) -> torchaudio.models.Wav2Vec2Model
Build HuBERT model with "extra large" architecture from *HuBERT* [:footcite:`hsu2021hubert`]
Args:
encoder_projection_dropout (float):
See :py:func:`wav2vec2_model`.
encoder_attention_dropout (float):
See :py:func:`wav2vec2_model`.
encoder_ff_interm_dropout (float):
See :py:func:`wav2vec2_model`.
encoder_dropout (float):
See :py:func:`wav2vec2_model`.
encoder_layer_drop (float):
See :py:func:`wav2vec2_model`.
aux_num_out (int or None, optional):
See :py:func:`wav2vec2_model`.
Returns:
Wav2Vec2Model:
The resulting model.
""" # noqa: E501
return wav2vec2_model(
extractor_mode='layer_norm',
extractor_conv_layer_config=None,
extractor_conv_bias=False,
encoder_embed_dim=1280,
encoder_projection_dropout=encoder_projection_dropout,
encoder_pos_conv_kernel=128,
encoder_pos_conv_groups=16,
encoder_num_layers=48,
encoder_num_heads=16,
encoder_attention_dropout=encoder_attention_dropout,
encoder_ff_interm_features=5120,
encoder_ff_interm_dropout=encoder_ff_interm_dropout,
encoder_dropout=encoder_dropout,
encoder_layer_norm_first=True,
encoder_layer_drop=encoder_layer_drop,
aux_num_out=aux_num_out,
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.